Import Cobalt 21.master.0.301323
diff --git a/src/third_party/v8/tools/BUILD.gn b/src/third_party/v8/tools/BUILD.gn
new file mode 100644
index 0000000..2f8197d
--- /dev/null
+++ b/src/third_party/v8/tools/BUILD.gn
@@ -0,0 +1,65 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/config/sanitizers/sanitizers.gni")
+import("../gni/v8.gni")
+
+group("gn_all") {
+  testonly = true
+
+  data_deps = [
+    ":v8_check_static_initializers",
+    "debug_helper:v8_debug_helper",
+    "gcmole:v8_run_gcmole",
+    "jsfunfuzz:v8_jsfunfuzz",
+  ]
+
+  if (is_win) {
+    data_deps += [ "v8windbg" ]
+  }
+}
+
+group("v8_check_static_initializers") {
+  data_deps = [ "..:d8" ]
+
+  data = [ "check-static-initializers.sh" ]
+}
+
+group("v8_android_test_runner_deps") {
+  testonly = true
+
+  if (is_android && !build_with_chromium) {
+    data_deps = [ "//build/android:test_runner_py" ]
+    data = [
+      # This is used by android.py, but not included by test_runner_py above.
+      "//third_party/catapult/devil/devil/android/perf/",
+    ]
+  }
+}
+
+group("v8_testrunner") {
+  testonly = true
+
+  data_deps = [
+    ":v8_android_test_runner_deps",
+    "..:v8_dump_build_config",
+    "..:v8_python_base",
+  ]
+
+  data = [
+    # Also add the num-fuzzer wrapper script in order to be able to run the
+    # num-fuzzer on all existing isolated V8 test suites.
+    "predictable_wrapper.py",
+    "run-num-fuzzer.py",
+    "run-tests.py",
+    "testrunner/",
+  ]
+
+  if (v8_code_coverage && sanitizer_coverage_flags == "bb,trace-pc-guard") {
+    data += [
+      "sanitizers/sancov_merger.py",
+      "../third_party/llvm/projects/compiler-rt/lib/sanitizer_common/scripts/sancov.py",
+    ]
+  }
+}
diff --git a/src/third_party/v8/tools/Makefile.tags b/src/third_party/v8/tools/Makefile.tags
new file mode 100644
index 0000000..372824d
--- /dev/null
+++ b/src/third_party/v8/tools/Makefile.tags
@@ -0,0 +1,30 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+# Variable default definitions. Override them by exporting them in your shell.
+V8_DIR ?= $(realpath $(dir $(lastword $(MAKEFILE_LIST)))/..)
+
+# Support for the GNU GLOBAL Source Code Tag System.
+$(V8_DIR)/gtags.files: $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+	@(cd $(V8_DIR) && find include src test -name '*.h' -o -name '*.cc' -o -name '*.c') > $@
+
+# We need to manually set the stack limit here, to work around bugs in
+# gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems.
+# Using $(wildcard ...) gracefully ignores non-existing files, so that stale
+# gtags.files after switching branches don't cause recipe failures.
+$(V8_DIR)/GPATH $(V8_DIR)/GRTAGS $(V8_DIR)/GSYMS $(V8_DIR)/GTAGS: $(V8_DIR)/gtags.files $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+	@cd $(V8_DIR) && bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<'
+
+$(V8_DIR)/tags: $(V8_DIR)/gtags.files $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+	@(ctags --version | grep 'Exuberant Ctags' >/dev/null) || \
+	  (echo "Please install Exuberant Ctags (check 'ctags --version')" >&2; false)
+	@cd $(V8_DIR) && ctags --fields=+l -L gtags.files
+
+tags: $(V8_DIR)/tags
+
+tags.clean:
+	@rm -f $(addprefix $(V8_DIR), gtags.files GPATH GRTAGS GSYMS GTAGS tags)
+
+clean: tags.clean
diff --git a/src/third_party/v8/tools/SourceMap.js b/src/third_party/v8/tools/SourceMap.js
new file mode 100644
index 0000000..4635450
--- /dev/null
+++ b/src/third_party/v8/tools/SourceMap.js
@@ -0,0 +1,382 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This is a copy from blink dev tools, see:
+// http://src.chromium.org/viewvc/blink/trunk/Source/devtools/front_end/SourceMap.js
+// revision: 153407
+
+// Added to make the file work without dev tools
+WebInspector = {};
+WebInspector.ParsedURL = {};
+WebInspector.ParsedURL.completeURL = function(){};
+// start of original file content
+
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Implements Source Map V3 model. See http://code.google.com/p/closure-compiler/wiki/SourceMaps
+ * for format description.
+ * @constructor
+ * @param {string} sourceMappingURL
+ * @param {SourceMapV3} payload
+ */
+WebInspector.SourceMap = function(sourceMappingURL, payload)
+{
+    if (!WebInspector.SourceMap.prototype._base64Map) {
+        const base64Digits = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+        WebInspector.SourceMap.prototype._base64Map = {};
+        for (var i = 0; i < base64Digits.length; ++i)
+            WebInspector.SourceMap.prototype._base64Map[base64Digits.charAt(i)] = i;
+    }
+
+    this._sourceMappingURL = sourceMappingURL;
+    this._reverseMappingsBySourceURL = {};
+    this._mappings = [];
+    this._sources = {};
+    this._sourceContentByURL = {};
+    this._parseMappingPayload(payload);
+}
+
+/**
+ * @param {string} sourceMapURL
+ * @param {string} compiledURL
+ * @param {function(WebInspector.SourceMap)} callback
+ */
+WebInspector.SourceMap.load = function(sourceMapURL, compiledURL, callback)
+{
+    NetworkAgent.loadResourceForFrontend(WebInspector.resourceTreeModel.mainFrame.id, sourceMapURL, undefined, contentLoaded.bind(this));
+
+    /**
+     * @param {?Protocol.Error} error
+     * @param {number} statusCode
+     * @param {NetworkAgent.Headers} headers
+     * @param {string} content
+     */
+    function contentLoaded(error, statusCode, headers, content)
+    {
+        if (error || !content || statusCode >= 400) {
+            console.error("Could not load content for " + sourceMapURL + " : " + (error || ("HTTP status code: " + statusCode)));
+            callback(null);
+            return;
+        }
+
+        if (content.slice(0, 3) === ")]}")
+            content = content.substring(content.indexOf('\n'));
+        try {
+            var payload = /** @type {SourceMapV3} */ (JSON.parse(content));
+            var baseURL = sourceMapURL.startsWith("data:") ? compiledURL : sourceMapURL;
+            callback(new WebInspector.SourceMap(baseURL, payload));
+        } catch(e) {
+            console.error(e.message);
+            callback(null);
+        }
+    }
+}
+
+WebInspector.SourceMap.prototype = {
+    /**
+     * @return {Array.<string>}
+     */
+    sources: function()
+    {
+        return Object.keys(this._sources);
+    },
+
+    /**
+     * @param {string} sourceURL
+     * @return {string|undefined}
+     */
+    sourceContent: function(sourceURL)
+    {
+        return this._sourceContentByURL[sourceURL];
+    },
+
+    /**
+     * @param {string} sourceURL
+     * @param {WebInspector.ResourceType} contentType
+     * @return {WebInspector.ContentProvider}
+     */
+    sourceContentProvider: function(sourceURL, contentType)
+    {
+        var lastIndexOfDot = sourceURL.lastIndexOf(".");
+        var extension = lastIndexOfDot !== -1 ? sourceURL.substr(lastIndexOfDot + 1) : "";
+        var mimeType = WebInspector.ResourceType.mimeTypesForExtensions[extension.toLowerCase()];
+        var sourceContent = this.sourceContent(sourceURL);
+        if (sourceContent)
+            return new WebInspector.StaticContentProvider(contentType, sourceContent, mimeType);
+        return new WebInspector.CompilerSourceMappingContentProvider(sourceURL, contentType, mimeType);
+    },
+
+    /**
+     * @param {SourceMapV3} mappingPayload
+     */
+    _parseMappingPayload: function(mappingPayload)
+    {
+        if (mappingPayload.sections)
+            this._parseSections(mappingPayload.sections);
+        else
+            this._parseMap(mappingPayload, 0, 0);
+    },
+
+    /**
+     * @param {Array.<SourceMapV3.Section>} sections
+     */
+    _parseSections: function(sections)
+    {
+        for (var i = 0; i < sections.length; ++i) {
+            var section = sections[i];
+            this._parseMap(section.map, section.offset.line, section.offset.column);
+        }
+    },
+
+    /**
+     * @param {number} lineNumber in compiled resource
+     * @param {number} columnNumber in compiled resource
+     * @return {?Array}
+     */
+    findEntry: function(lineNumber, columnNumber)
+    {
+        var first = 0;
+        var count = this._mappings.length;
+        while (count > 1) {
+          var step = count >> 1;
+          var middle = first + step;
+          var mapping = this._mappings[middle];
+          if (lineNumber < mapping[0] || (lineNumber === mapping[0] && columnNumber < mapping[1]))
+              count = step;
+          else {
+              first = middle;
+              count -= step;
+          }
+        }
+        var entry = this._mappings[first];
+        if (!first && entry && (lineNumber < entry[0] || (lineNumber === entry[0] && columnNumber < entry[1])))
+            return null;
+        return entry;
+    },
+
+    /**
+     * @param {string} sourceURL of the originating resource
+     * @param {number} lineNumber in the originating resource
+     * @return {Array}
+     */
+    findEntryReversed: function(sourceURL, lineNumber)
+    {
+        var mappings = this._reverseMappingsBySourceURL[sourceURL];
+        for ( ; lineNumber < mappings.length; ++lineNumber) {
+            var mapping = mappings[lineNumber];
+            if (mapping)
+                return mapping;
+        }
+        return this._mappings[0];
+    },
+
+    /**
+     * @override
+     */
+    _parseMap: function(map, lineNumber, columnNumber)
+    {
+        var sourceIndex = 0;
+        var sourceLineNumber = 0;
+        var sourceColumnNumber = 0;
+        var nameIndex = 0;
+
+        var sources = [];
+        var originalToCanonicalURLMap = {};
+        for (var i = 0; i < map.sources.length; ++i) {
+            var originalSourceURL = map.sources[i];
+            var sourceRoot = map.sourceRoot || "";
+            if (sourceRoot && !sourceRoot.endsWith("/"))
+                sourceRoot += "/";
+            var href = sourceRoot + originalSourceURL;
+            var url = WebInspector.ParsedURL.completeURL(this._sourceMappingURL, href) || href;
+            originalToCanonicalURLMap[originalSourceURL] = url;
+            sources.push(url);
+            this._sources[url] = true;
+
+            if (map.sourcesContent && map.sourcesContent[i])
+                this._sourceContentByURL[url] = map.sourcesContent[i];
+        }
+
+        var stringCharIterator = new WebInspector.SourceMap.StringCharIterator(map.mappings);
+        var sourceURL = sources[sourceIndex];
+
+        while (true) {
+            if (stringCharIterator.peek() === ",")
+                stringCharIterator.next();
+            else {
+                while (stringCharIterator.peek() === ";") {
+                    lineNumber += 1;
+                    columnNumber = 0;
+                    stringCharIterator.next();
+                }
+                if (!stringCharIterator.hasNext())
+                    break;
+            }
+
+            columnNumber += this._decodeVLQ(stringCharIterator);
+            if (this._isSeparator(stringCharIterator.peek())) {
+                this._mappings.push([lineNumber, columnNumber]);
+                continue;
+            }
+
+            var sourceIndexDelta = this._decodeVLQ(stringCharIterator);
+            if (sourceIndexDelta) {
+                sourceIndex += sourceIndexDelta;
+                sourceURL = sources[sourceIndex];
+            }
+            sourceLineNumber += this._decodeVLQ(stringCharIterator);
+            sourceColumnNumber += this._decodeVLQ(stringCharIterator);
+            if (!this._isSeparator(stringCharIterator.peek()))
+                nameIndex += this._decodeVLQ(stringCharIterator);
+
+            this._mappings.push([lineNumber, columnNumber, sourceURL, sourceLineNumber, sourceColumnNumber]);
+        }
+
+        for (var i = 0; i < this._mappings.length; ++i) {
+            var mapping = this._mappings[i];
+            var url = mapping[2];
+            if (!url)
+                continue;
+            if (!this._reverseMappingsBySourceURL[url])
+                this._reverseMappingsBySourceURL[url] = [];
+            var reverseMappings = this._reverseMappingsBySourceURL[url];
+            var sourceLine = mapping[3];
+            if (!reverseMappings[sourceLine])
+                reverseMappings[sourceLine] = [mapping[0], mapping[1]];
+        }
+    },
+
+    /**
+     * @param {string} char
+     * @return {boolean}
+     */
+    _isSeparator: function(char)
+    {
+        return char === "," || char === ";";
+    },
+
+    /**
+     * @param {WebInspector.SourceMap.StringCharIterator} stringCharIterator
+     * @return {number}
+     */
+    _decodeVLQ: function(stringCharIterator)
+    {
+        // Read unsigned value.
+        var result = 0;
+        var shift = 0;
+        do {
+            var digit = this._base64Map[stringCharIterator.next()];
+            result += (digit & this._VLQ_BASE_MASK) << shift;
+            shift += this._VLQ_BASE_SHIFT;
+        } while (digit & this._VLQ_CONTINUATION_MASK);
+
+        // Fix the sign.
+        var negative = result & 1;
+        // Use unsigned right shift, so that the 32nd bit is properly shifted
+        // to the 31st, and the 32nd becomes unset.
+        result >>>= 1;
+        if (negate) {
+          // We need to OR 0x80000000 here to ensure the 32nd bit (the sign bit
+          // in a 32bit int) is always set for negative numbers. If `result`
+          // were 1, (meaning `negate` is true and all other bits were zeros),
+          // `result` would now be 0. But -0 doesn't flip the 32nd bit as
+          // intended. All other numbers will successfully set the 32nd bit
+          // without issue, so doing this is a noop for them.
+          return -result | 0x80000000;
+        }
+        return result;
+    },
+
+    _VLQ_BASE_SHIFT: 5,
+    _VLQ_BASE_MASK: (1 << 5) - 1,
+    _VLQ_CONTINUATION_MASK: 1 << 5
+}
+
+/**
+ * @constructor
+ * @param {string} string
+ */
+WebInspector.SourceMap.StringCharIterator = function(string)
+{
+    this._string = string;
+    this._position = 0;
+}
+
+WebInspector.SourceMap.StringCharIterator.prototype = {
+    /**
+     * @return {string}
+     */
+    next: function()
+    {
+        return this._string.charAt(this._position++);
+    },
+
+    /**
+     * @return {string}
+     */
+    peek: function()
+    {
+        return this._string.charAt(this._position);
+    },
+
+    /**
+     * @return {boolean}
+     */
+    hasNext: function()
+    {
+        return this._position < this._string.length;
+    }
+}
diff --git a/src/third_party/v8/tools/__init__.py b/src/third_party/v8/tools/__init__.py
new file mode 100644
index 0000000..3841a86
--- /dev/null
+++ b/src/third_party/v8/tools/__init__.py
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/src/third_party/v8/tools/adb-d8.py b/src/third_party/v8/tools/adb-d8.py
new file mode 100755
index 0000000..f2e67e8
--- /dev/null
+++ b/src/third_party/v8/tools/adb-d8.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python
+
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Runs an android build of d8 over adb, with any given arguments. Files
+# requested by d8 are transferred on-demand from the caller, by reverse port
+# forwarding a simple TCP file server from the computer to the android device.
+#
+# Usage:
+#    adb-d8.py <build_dir> [<d8_args>...]
+#
+# Options:
+#    <build_dir>    The directory containing the android build of d8.
+#    <d8_args>...   The arguments passed through to d8.
+#
+# Run adb-d8.py --help for complete usage information.
+
+from __future__ import print_function
+
+import os
+import sys
+import struct
+import threading
+import subprocess
+import SocketServer # TODO(leszeks): python 3 compatibility
+
+def CreateFileHandlerClass(root_dirs, verbose):
+  class FileHandler(SocketServer.BaseRequestHandler):
+    def handle(self):
+      data = self.request.recv(1024);
+      while data[-1] != "\0":
+        data += self.request.recv(1024);
+
+      filename = data[0:-1]
+
+      try:
+        filename = os.path.abspath(filename)
+
+        if not any(filename.startswith(root) for root in root_dirs):
+          raise Exception("{} not in roots {}".format(filename, root_dirs))
+        if not os.path.isfile(filename):
+          raise Exception("{} is not a file".format(filename))
+
+        if verbose:
+          sys.stdout.write("Serving {}\r\n".format(os.path.relpath(filename)))
+
+        with open(filename) as f:
+          contents = f.read();
+          self.request.sendall(struct.pack("!i", len(contents)))
+          self.request.sendall(contents)
+
+      except Exception as e:
+        if verbose:
+          sys.stderr.write(
+            "Request failed ({})\n".format(e).replace('\n','\r\n'))
+        self.request.sendall(struct.pack("!i", -1))
+
+  return FileHandler
+
+
+def TransferD8ToDevice(adb, build_dir, device_d8_dir, verbose):
+  files_to_copy = ["d8", "snapshot_blob.bin"]
+
+  # Pipe the output of md5sum from the local computer to the device, checking
+  # the md5 hashes on the device.
+  local_md5_sum_proc = subprocess.Popen(
+    ["md5sum"] + files_to_copy,
+    cwd=build_dir,
+    stdout=subprocess.PIPE
+  )
+  device_md5_check_proc = subprocess.Popen(
+    [
+      adb, "shell",
+      "mkdir -p '{0}' ; cd '{0}' ; md5sum -c -".format(device_d8_dir)
+    ],
+    stdin=local_md5_sum_proc.stdout,
+    stdout=subprocess.PIPE,
+    stderr=subprocess.PIPE
+  )
+
+  # Push any files which failed the md5 check.
+  (stdoutdata, stderrdata) = device_md5_check_proc.communicate()
+  for line in stdoutdata.split('\n'):
+    if line.endswith(": FAILED"):
+      filename = line[:-len(": FAILED")]
+      if verbose:
+        print("Updating {}...".format(filename))
+      subprocess.check_call([
+        adb, "push",
+        os.path.join(build_dir, filename),
+        device_d8_dir
+      ], stdout=sys.stdout if verbose else open(os.devnull, 'wb'))
+
+
+def AdbForwardDeviceToLocal(adb, device_port, server_port, verbose):
+  if verbose:
+    print("Forwarding device:{} to localhost:{}...".format(
+      device_port, server_port))
+
+  subprocess.check_call([
+    adb, "reverse",
+    "tcp:{}".format(device_port),
+    "tcp:{}".format(server_port)
+  ])
+
+
+def AdbRunD8(adb, device_d8_dir, device_port, d8_args, verbose):
+  # Single-quote the arguments to d8, and concatenate them into a string.
+  d8_arg_str = " ".join("'{}'".format(a) for a in d8_args)
+  d8_arg_str = "--read-from-tcp-port='{}' ".format(device_port) + d8_arg_str
+
+  # Don't use os.path.join for d8 because we care about the device's os, not
+  # the host os.
+  d8_str = "{}/d8 {}".format(device_d8_dir, d8_arg_str)
+
+  if sys.stdout.isatty():
+    # Run adb shell with -t to have a tty if we run d8 without a script.
+    cmd = [adb, "shell", "-t", d8_str]
+  else:
+    cmd = [adb, "shell", d8_str]
+
+  if verbose:
+    print("Running {}".format(" ".join(cmd)))
+  return subprocess.call(cmd)
+
+
+def PrintUsage(file=sys.stdout):
+  print("Usage: adb-d8.py [-v|--verbose] [--] <build_dir> [<d8 args>...]",
+    file=file)
+
+
+def PrintHelp(file=sys.stdout):
+  print("""Usage:
+   adb-d8.py [options] [--] <build_dir> [<d8_args>...]
+   adb-d8.py -h|--help
+
+Options:
+   -h|--help             Show this help message and exit.
+   -v|--verbose          Print verbose output.
+   --device-dir=DIR      Specify which directory on the device should be used
+                         for the d8 binary. [default: /data/local/tmp/v8]
+   --extra-root-dir=DIR  In addition to the current directory, allow d8 to
+                         access files inside DIR. Multiple additional roots
+                         can be specified.
+   <build_dir>           The directory containing the android build of d8.
+   <d8_args>...          The arguments passed through to d8.""", file=file)
+
+
+def Main():
+  if len(sys.argv) < 2:
+    PrintUsage(sys.stderr)
+    return 1
+
+  script_dir = os.path.dirname(sys.argv[0])
+  # Use the platform-tools version of adb so that we know it has the reverse
+  # command.
+  adb = os.path.join(
+    script_dir,
+    "../third_party/android_sdk/public/platform-tools/adb"
+  )
+
+  # Read off any command line flags before build_dir (or --). Do this
+  # manually, rather than using something like argparse, to be able to split
+  # the adb-d8 options from the passthrough d8 options.
+  verbose = False
+  device_d8_dir = '/data/local/tmp/v8'
+  root_dirs = []
+  arg_index = 1
+  while arg_index < len(sys.argv):
+    arg = sys.argv[arg_index]
+    if not arg.startswith("-"):
+      break
+    elif arg == "--":
+      arg_index += 1
+      break
+    elif arg == "-h" or arg == "--help":
+      PrintHelp(sys.stdout)
+      return 0
+    elif arg == "-v" or arg == "--verbose":
+      verbose = True
+
+    elif arg == "--device-dir":
+      arg_index += 1
+      device_d8_dir = sys.argv[arg_index]
+    elif arg.startswith("--device-dir="):
+      device_d8_dir = arg[len("--device-dir="):]
+
+    elif arg == "--extra-root-dir":
+      arg_index += 1
+      root_dirs.append(sys.argv[arg_index])
+    elif arg.startswith("--extra-root-dir="):
+      root_dirs.append(arg[len("--extra-root-dir="):])
+
+    else:
+      print("ERROR: Unrecognised option: {}".format(arg))
+      PrintUsage(sys.stderr)
+      return 1
+
+    arg_index += 1
+
+  # Transfer d8 (and dependencies) to the device.
+  build_dir = os.path.abspath(sys.argv[arg_index])
+
+  TransferD8ToDevice(adb, build_dir, device_d8_dir, verbose)
+
+  # Start a file server for the files d8 might need.
+  script_root_dir = os.path.abspath(os.curdir)
+  root_dirs.append(script_root_dir)
+  server = SocketServer.TCPServer(
+    ("localhost", 0), # 0 means an arbitrary unused port.
+    CreateFileHandlerClass(root_dirs, verbose)
+  )
+
+  try:
+    # Start the file server in its own thread.
+    server_thread = threading.Thread(target=server.serve_forever)
+    server_thread.daemon = True
+    server_thread.start()
+
+    # Port-forward the given device port to the file server.
+    # TODO(leszeks): Pick an unused device port.
+    # TODO(leszeks): Remove the port forwarding on exit.
+    server_ip, server_port = server.server_address
+    device_port = 4444
+    AdbForwardDeviceToLocal(adb, device_port, server_port, verbose)
+
+    # Run d8 over adb with the remaining arguments, using the given device
+    # port to forward file reads.
+    return AdbRunD8(
+      adb, device_d8_dir, device_port, sys.argv[arg_index+1:], verbose)
+
+  finally:
+    if verbose:
+      print("Shutting down file server...")
+    server.shutdown()
+    server.server_close()
+
+if __name__ == '__main__':
+  sys.exit(Main())
diff --git a/src/third_party/v8/tools/android-build.sh b/src/third_party/v8/tools/android-build.sh
new file mode 100755
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/v8/tools/android-build.sh
diff --git a/src/third_party/v8/tools/android-ll-prof.sh b/src/third_party/v8/tools/android-ll-prof.sh
new file mode 100755
index 0000000..436f262
--- /dev/null
+++ b/src/third_party/v8/tools/android-ll-prof.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Runs d8 with the given arguments on the device under 'perf' and
+# processes the profiler trace and v8 logs using ll_prof.py.
+# 
+# Usage:
+# > ./tools/android-ll-prof.sh (debug|release) "args to d8" "args to ll_prof.py"
+#
+# The script creates deploy directory deploy/data/local/tmp/v8, copies there
+# the d8 binary either from out/android_arm.release or out/android_arm.debug,
+# and then sync the deploy directory with /data/local/tmp/v8 on the device.
+# You can put JS files in the deploy directory before running the script.
+# Note: $ANDROID_NDK_ROOT must be set.
+
+MODE=$1
+RUN_ARGS=$2
+LL_PROF_ARGS=$3
+
+BASE=`cd $(dirname "$0")/..; pwd`
+DEPLOY="$BASE/deploy"
+
+set +e
+mkdir -p "$DEPLOY/data/local/tmp/v8"
+
+cp "$BASE/out/android_arm.$MODE/d8" "$DEPLOY/data/local/tmp/v8/d8"
+
+adb -p "$DEPLOY" sync data
+
+adb shell "cd /data/local/tmp/v8;\
+           perf record -R -e cycles -c 10000 -f -i \
+           ./d8 --ll_prof --gc-fake-mmap=/data/local/tmp/__v8_gc__ $RUN_ARGS"
+
+adb pull /data/local/tmp/v8/v8.log .
+adb pull /data/local/tmp/v8/v8.log.ll .
+adb pull /data/perf.data .
+
+ARCH=arm-linux-androideabi-4.6
+TOOLCHAIN="${ANDROID_NDK_ROOT}/toolchains/$ARCH/prebuilt/linux-x86/bin"
+
+$BASE/tools/ll_prof.py --host-root="$BASE/deploy" \
+                       --gc-fake-mmap=/data/local/tmp/__v8_gc__ \
+                       --objdump="$TOOLCHAIN/arm-linux-androideabi-objdump" \
+                       $LL_PROF_ARGS
diff --git a/src/third_party/v8/tools/android-run.py b/src/third_party/v8/tools/android-run.py
new file mode 100755
index 0000000..66d333a
--- /dev/null
+++ b/src/third_party/v8/tools/android-run.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script executes the passed command line on Android device
+# using 'adb shell' command. Unfortunately, 'adb shell' always
+# returns exit code 0, ignoring the exit code of executed command.
+# Since we need to return non-zero exit code if the command failed,
+# we augment the passed command line with exit code checking statement
+# and output special error string in case of non-zero exit code.
+# Then we parse the output of 'adb shell' and look for that error string.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+from os.path import join, dirname, abspath
+import subprocess
+import sys
+import tempfile
+
+def Check(output, errors):
+  failed = any([s.startswith('/system/bin/sh:') or s.startswith('ANDROID')
+                for s in output.split('\n')])
+  return 1 if failed else 0
+
+def Execute(cmdline):
+  (fd_out, outname) = tempfile.mkstemp()
+  (fd_err, errname) = tempfile.mkstemp()
+  process = subprocess.Popen(
+    args=cmdline,
+    shell=True,
+    stdout=fd_out,
+    stderr=fd_err,
+  )
+  exit_code = process.wait()
+  os.close(fd_out)
+  os.close(fd_err)
+  output = open(outname).read()
+  errors = open(errname).read()
+  os.unlink(outname)
+  os.unlink(errname)
+  sys.stdout.write(output)
+  sys.stderr.write(errors)
+  return exit_code or Check(output, errors)
+
+def Escape(arg):
+  def ShouldEscape():
+    for x in arg:
+      if not x.isalnum() and x != '-' and x != '_':
+        return True
+    return False
+
+  return arg if not ShouldEscape() else '"%s"' % (arg.replace('"', '\\"'))
+
+def WriteToTemporaryFile(data):
+  (fd, fname) = tempfile.mkstemp()
+  os.close(fd)
+  tmp_file = open(fname, "w")
+  tmp_file.write(data)
+  tmp_file.close()
+  return fname
+
+def Main():
+  if (len(sys.argv) == 1):
+    print("Usage: %s <command-to-run-on-device>" % sys.argv[0])
+    return 1
+  workspace = abspath(join(dirname(sys.argv[0]), '..'))
+  v8_root = "/data/local/tmp/v8"
+  android_workspace = os.getenv("ANDROID_V8", v8_root)
+  args = [Escape(arg) for arg in sys.argv[1:]]
+  script = (" ".join(args) + "\n"
+            "case $? in\n"
+            "  0) ;;\n"
+            "  *) echo \"ANDROID: Error returned by test\";;\n"
+            "esac\n")
+  script = script.replace(workspace, android_workspace)
+  script_file = WriteToTemporaryFile(script)
+  android_script_file = android_workspace + "/" + script_file
+  command =  ("adb push '%s' %s;" % (script_file, android_script_file) +
+              "adb shell 'cd %s && sh %s';" % (v8_root, android_script_file) +
+              "adb shell 'rm %s'" % android_script_file)
+  error_code = Execute(command)
+  os.unlink(script_file)
+  return error_code
+
+if __name__ == '__main__':
+  sys.exit(Main())
diff --git a/src/third_party/v8/tools/android-sync.sh b/src/third_party/v8/tools/android-sync.sh
new file mode 100755
index 0000000..66d7aed
--- /dev/null
+++ b/src/third_party/v8/tools/android-sync.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script pushes android binaries and test data to the device.
+# The first argument can be either "android.release" or "android.debug".
+# The second argument is a relative path to the output directory with binaries.
+# The third argument is the absolute path to the V8 directory on the host.
+# The fourth argument is the absolute path to the V8 directory on the device.
+
+if [ ${#@} -lt 4 ] ; then
+  echo "$0: Error: need 4 arguments"
+  exit 1
+fi
+
+ARCH_MODE=$1
+OUTDIR=$2
+HOST_V8=$3
+ANDROID_V8=$4
+
+function LINUX_MD5 {
+  local HASH=$(md5sum $1)
+  echo ${HASH%% *}
+}
+
+function DARWIN_MD5 {
+  local HASH=$(md5 $1)
+  echo ${HASH} | cut -f2 -d "=" | cut -f2 -d " "
+}
+
+host_os=$(uname -s)
+case "${host_os}" in
+  "Linux")
+    MD5=LINUX_MD5
+    ;;
+  "Darwin")
+    MD5=DARWIN_MD5
+    ;;
+  *)
+    echo "$0: Host platform ${host_os} is not supported" >& 2
+    exit 1
+esac
+
+function sync_file {
+  local FILE=$1
+  local ANDROID_HASH=$(adb shell "md5 \"$ANDROID_V8/$FILE\"")
+  local HOST_HASH=$($MD5 "$HOST_V8/$FILE")
+  if [ "${ANDROID_HASH%% *}" != "${HOST_HASH}" ]; then
+    adb push "$HOST_V8/$FILE" "$ANDROID_V8/$FILE" &> /dev/null
+  fi
+  echo -n "."
+}
+
+function sync_dir {
+  local DIR=$1
+  echo -n "sync to $ANDROID_V8/$DIR"
+  for FILE in $(find "$HOST_V8/$DIR" -not -path "*.svn*" -type f); do
+    local RELATIVE_FILE=${FILE:${#HOST_V8}}
+    sync_file "$RELATIVE_FILE"
+  done
+  echo ""
+}
+
+echo -n "sync to $ANDROID_V8/$OUTDIR/$ARCH_MODE"
+sync_file "$OUTDIR/$ARCH_MODE/cctest"
+sync_file "$OUTDIR/$ARCH_MODE/d8"
+sync_file "$OUTDIR/$ARCH_MODE/snapshot_blob.bin"
+sync_file "$OUTDIR/$ARCH_MODE/unittests"
+echo ""
+echo -n "sync to $ANDROID_V8/tools"
+sync_file tools/arguments.mjs
+sync_file tools/codemap.mjs
+sync_file tools/consarray.mjs
+sync_file tools/csvparser.mjs
+sync_file tools/dumpcpp.mjs
+sync_file tools/logreader.mjs
+sync_file tools/profile.mjs
+sync_file tools/profile_view.mjs
+sync_file tools/splaytree.mjs
+sync_file tools/tickprocessor.mjs
+echo ""
+sync_dir test/intl
+sync_dir test/message
+sync_dir test/mjsunit
diff --git a/src/third_party/v8/tools/arguments.js b/src/third_party/v8/tools/arguments.js
new file mode 100644
index 0000000..c2b3d1b
--- /dev/null
+++ b/src/third_party/v8/tools/arguments.js
@@ -0,0 +1,78 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class BaseArgumentsProcessor {
+  constructor(args) {
+    this.args_ = args;
+    this.result_ = this.getDefaultResults();
+    console.assert(this.result_ !== undefined)
+    console.assert(this.result_.logFileName !== undefined);
+    this.argsDispatch_ = this.getArgsDispatch();
+    console.assert(this.argsDispatch_ !== undefined);
+  }
+
+  getDefaultResults() {
+    throw "Implement in getDefaultResults in subclass";
+  }
+
+  getArgsDispatch() {
+    throw "Implement getArgsDispatch in subclass";
+  }
+
+  result() { return this.result_ }
+
+  printUsageAndExit() {
+    print('Cmdline args: [options] [log-file-name]\n' +
+          'Default log file name is "' +
+          this.result_.logFileName + '".\n');
+    print('Options:');
+    for (var arg in this.argsDispatch_) {
+      var synonyms = [arg];
+      var dispatch = this.argsDispatch_[arg];
+      for (var synArg in this.argsDispatch_) {
+        if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) {
+          synonyms.push(synArg);
+          delete this.argsDispatch_[synArg];
+        }
+      }
+      print('  ' + synonyms.join(', ').padEnd(20) + " " + dispatch[2]);
+    }
+    quit(2);
+  }
+
+  parse() {
+    while (this.args_.length) {
+      var arg = this.args_.shift();
+      if (arg.charAt(0) != '-') {
+        this.result_.logFileName = arg;
+        continue;
+      }
+      var userValue = null;
+      var eqPos = arg.indexOf('=');
+      if (eqPos != -1) {
+        userValue = arg.substr(eqPos + 1);
+        arg = arg.substr(0, eqPos);
+      }
+      if (arg in this.argsDispatch_) {
+        var dispatch = this.argsDispatch_[arg];
+        var property = dispatch[0];
+        var defaultValue = dispatch[1];
+        if (typeof defaultValue == "function") {
+          userValue = defaultValue(userValue);
+        } else if (userValue == null) {
+          userValue = defaultValue;
+        }
+        this.result_[property] = userValue;
+      } else {
+        return false;
+      }
+    }
+    return true;
+  }
+}
+
+function parseBool(str) {
+  if (str == "true" || str == "1") return true;
+  return false;
+}
diff --git a/src/third_party/v8/tools/arguments.mjs b/src/third_party/v8/tools/arguments.mjs
new file mode 100644
index 0000000..4e607b7
--- /dev/null
+++ b/src/third_party/v8/tools/arguments.mjs
@@ -0,0 +1,78 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export class BaseArgumentsProcessor {
+  constructor(args) {
+    this.args_ = args;
+    this.result_ = this.getDefaultResults();
+    console.assert(this.result_ !== undefined)
+    console.assert(this.result_.logFileName !== undefined);
+    this.argsDispatch_ = this.getArgsDispatch();
+    console.assert(this.argsDispatch_ !== undefined);
+  }
+
+  getDefaultResults() {
+    throw "Implement in getDefaultResults in subclass";
+  }
+
+  getArgsDispatch() {
+    throw "Implement getArgsDispatch in subclass";
+  }
+
+  result() { return this.result_ }
+
+  printUsageAndExit() {
+    print('Cmdline args: [options] [log-file-name]\n' +
+          'Default log file name is "' +
+          this.result_.logFileName + '".\n');
+    print('Options:');
+    for (const arg in this.argsDispatch_) {
+      const synonyms = [arg];
+      const dispatch = this.argsDispatch_[arg];
+      for (const synArg in this.argsDispatch_) {
+        if (arg !== synArg && dispatch === this.argsDispatch_[synArg]) {
+          synonyms.push(synArg);
+          delete this.argsDispatch_[synArg];
+        }
+      }
+      print(`  ${synonyms.join(', ').padEnd(20)} ${dispatch[2]}`);
+    }
+    quit(2);
+  }
+
+  parse() {
+    while (this.args_.length) {
+      let arg = this.args_.shift();
+      if (arg.charAt(0) != '-') {
+        this.result_.logFileName = arg;
+        continue;
+      }
+      let userValue = null;
+      const eqPos = arg.indexOf('=');
+      if (eqPos != -1) {
+        userValue = arg.substr(eqPos + 1);
+        arg = arg.substr(0, eqPos);
+      }
+      if (arg in this.argsDispatch_) {
+        const dispatch = this.argsDispatch_[arg];
+        const property = dispatch[0];
+        const defaultValue = dispatch[1];
+        if (typeof defaultValue == "function") {
+          userValue = defaultValue(userValue);
+        } else if (userValue == null) {
+          userValue = defaultValue;
+        }
+        this.result_[property] = userValue;
+      } else {
+        return false;
+      }
+    }
+    return true;
+  }
+}
+
+export function parseBool(str) {
+  if (str == "true" || str == "1") return true;
+  return false;
+}
diff --git a/src/third_party/v8/tools/avg.py b/src/third_party/v8/tools/avg.py
new file mode 100755
index 0000000..5741acd
--- /dev/null
+++ b/src/third_party/v8/tools/avg.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can
+# be found in the LICENSE file.
+"""
+This script averages numbers output from another script. It is useful
+to average over a benchmark that outputs one or more results of the form
+  <key> <number> <unit>
+key and unit are optional, but only one number per line is processed.
+
+For example, if
+  $ bch --allow-natives-syntax toNumber.js
+outputs
+  Number('undefined'):  155763
+  (+'undefined'):  193050 Kps
+  23736 Kps
+then
+  $ avg.py 10 bch --allow-natives-syntax toNumber.js
+will output
+  [10/10] (+'undefined')         : avg 192,240.40 stddev   6,486.24 (185,529.00 - 206,186.00)
+  [10/10] Number('undefined')    : avg 156,990.10 stddev  16,327.56 (144,718.00 - 202,840.00) Kps
+  [10/10] [default]              : avg  22,885.80 stddev   1,941.80 ( 17,584.00 -  24,266.00) Kps
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import math
+import re
+import signal
+import subprocess
+import sys
+
+PARSER = argparse.ArgumentParser(
+    description="A script that averages numbers from another script's output",
+    epilog="Example:\n\tavg.py 10 bash -c \"echo A: 100; echo B 120; sleep .1\""
+)
+PARSER.add_argument(
+    'repetitions',
+    type=int,
+    help="number of times the command should be repeated")
+PARSER.add_argument(
+    'command',
+    nargs=argparse.REMAINDER,
+    help="command to run (no quotes needed)")
+PARSER.add_argument(
+    '--echo',
+    '-e',
+    action='store_true',
+    default=False,
+    help="set this flag to echo the command's output")
+
+ARGS = vars(PARSER.parse_args())
+
+if not ARGS['command']:
+  print("No command provided.")
+  exit(1)
+
+
+class FieldWidth:
+
+  def __init__(self, points=0, key=0, average=0, stddev=0, min_width=0, max_width=0):
+    self.widths = dict(points=points, key=key, average=average, stddev=stddev,
+                       min=min_width, max=max_width)
+
+  def max_widths(self, other):
+    self.widths = {k: max(v, other.widths[k]) for k, v in self.widths.items()}
+
+  def __getattr__(self, key):
+    return self.widths[key]
+
+
+def fmtS(string, width=0):
+  return "{0:<{1}}".format(string, width)
+
+
+def fmtN(num, width=0):
+  return "{0:>{1},.2f}".format(num, width)
+
+
+def fmt(num):
+  return "{0:>,.2f}".format(num)
+
+
+def format_line(points, key, average, stddev, min_value, max_value,
+                unit_string, widths):
+  return "{:>{}};  {:<{}};  {:>{}};  {:>{}};  {:>{}};  {:>{}};  {}".format(
+      points, widths.points,
+      key, widths.key,
+      average, widths.average,
+      stddev, widths.stddev,
+      min_value, widths.min,
+      max_value, widths.max,
+      unit_string)
+
+
+def fmt_reps(msrmnt):
+  rep_string = str(ARGS['repetitions'])
+  return "[{0:>{1}}/{2}]".format(msrmnt.size(), len(rep_string), rep_string)
+
+
+class Measurement:
+
+  def __init__(self, key, unit):
+    self.key = key
+    self.unit = unit
+    self.values = []
+    self.average = 0
+    self.count = 0
+    self.M2 = 0
+    self.min = float("inf")
+    self.max = -float("inf")
+
+  def addValue(self, value):
+    try:
+      num_value = float(value)
+      self.values.append(num_value)
+      self.min = min(self.min, num_value)
+      self.max = max(self.max, num_value)
+      self.count = self.count + 1
+      delta = num_value - self.average
+      self.average = self.average + delta / self.count
+      delta2 = num_value - self.average
+      self.M2 = self.M2 + delta * delta2
+    except ValueError:
+      print("Ignoring non-numeric value", value)
+
+  def status(self, widths):
+    return "{} {}: avg {} stddev {} ({} - {}) {}".format(
+        fmt_reps(self),
+        fmtS(self.key, widths.key), fmtN(self.average, widths.average),
+        fmtN(self.stddev(), widths.stddev), fmtN(self.min, widths.min),
+        fmtN(self.max, widths.max), fmtS(self.unit_string()))
+
+  def result(self, widths):
+    return format_line(self.size(), self.key, fmt(self.average),
+                       fmt(self.stddev()), fmt(self.min),
+                       fmt(self.max), self.unit_string(),
+                       widths)
+
+  def unit_string(self):
+    if not self.unit:
+      return ""
+    return self.unit
+
+  def variance(self):
+    if self.count < 2:
+      return float('NaN')
+    return self.M2 / (self.count - 1)
+
+  def stddev(self):
+    return math.sqrt(self.variance())
+
+  def size(self):
+    return len(self.values)
+
+  def widths(self):
+    return FieldWidth(
+        points=len("{}".format(self.size())) + 2,
+        key=len(self.key),
+        average=len(fmt(self.average)),
+        stddev=len(fmt(self.stddev())),
+        min_width=len(fmt(self.min)),
+        max_width=len(fmt(self.max)))
+
+
+def result_header(widths):
+  return format_line("#/{}".format(ARGS['repetitions']),
+                     "id", "avg", "stddev", "min", "max", "unit", widths)
+
+
+class Measurements:
+
+  def __init__(self):
+    self.all = {}
+    self.default_key = '[default]'
+    self.max_widths = FieldWidth(
+        points=len("{}".format(ARGS['repetitions'])) + 2,
+        key=len("id"),
+        average=len("avg"),
+        stddev=len("stddev"),
+        min_width=len("min"),
+        max_width=len("max"))
+    self.last_status_len = 0
+
+  def record(self, key, value, unit):
+    if not key:
+      key = self.default_key
+    if key not in self.all:
+      self.all[key] = Measurement(key, unit)
+    self.all[key].addValue(value)
+    self.max_widths.max_widths(self.all[key].widths())
+
+  def any(self):
+    if self.all:
+      return next(iter(self.all.values()))
+    return None
+
+  def print_results(self):
+    print("{:<{}}".format("", self.last_status_len), end="\r")
+    print(result_header(self.max_widths), sep=" ")
+    for key in sorted(self.all):
+      print(self.all[key].result(self.max_widths), sep=" ")
+
+  def print_status(self):
+    status = "No results found. Check format?"
+    measurement = MEASUREMENTS.any()
+    if measurement:
+      status = measurement.status(MEASUREMENTS.max_widths)
+    print("{:<{}}".format(status, self.last_status_len), end="\r")
+    self.last_status_len = len(status)
+
+
+MEASUREMENTS = Measurements()
+
+
+def signal_handler(signum, frame):
+  print("", end="\r")
+  MEASUREMENTS.print_results()
+  sys.exit(0)
+
+
+signal.signal(signal.SIGINT, signal_handler)
+
+SCORE_REGEX = (r'\A((console.timeEnd: )?'
+               r'(?P<key>[^\s:,]+)[,:]?)?'
+               r'(^\s*|\s+)'
+               r'(?P<value>[0-9]+(.[0-9]+)?)'
+               r'\ ?(?P<unit>[^\d\W]\w*)?[.\s]*\Z')
+
+for x in range(0, ARGS['repetitions']):
+  proc = subprocess.Popen(ARGS['command'], stdout=subprocess.PIPE)
+  for line in proc.stdout:
+    if ARGS['echo']:
+      print(line.decode(), end="")
+    for m in re.finditer(SCORE_REGEX, line.decode()):
+      MEASUREMENTS.record(m.group('key'), m.group('value'), m.group('unit'))
+  proc.wait()
+  if proc.returncode != 0:
+    print("Child exited with status %d" % proc.returncode)
+    break
+
+  MEASUREMENTS.print_status()
+
+# Print final results
+MEASUREMENTS.print_results()
diff --git a/src/third_party/v8/tools/bash-completion.sh b/src/third_party/v8/tools/bash-completion.sh
new file mode 100755
index 0000000..27e73b7
--- /dev/null
+++ b/src/third_party/v8/tools/bash-completion.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Inspired by and based on:
+# http://src.chromium.org/viewvc/chrome/trunk/src/tools/bash-completion
+
+# Flag completion rule for bash.
+# To load in your shell, "source path/to/this/file".
+
+v8_source=$(readlink -f $(dirname $BASH_SOURCE)/..)
+
+_v8_flag() {
+  local cur defines targets
+  cur="${COMP_WORDS[COMP_CWORD]}"
+  defines=$(cat $v8_source/src/flags/flag-definitions.h \
+    | grep "^DEFINE" \
+    | grep -v "DEFINE_IMPLICATION" \
+    | sed -e 's/_/-/g'; \
+    cat $v8_source/src/flags/flag-definitions.h \
+    | grep "^  V(harmony_" \
+    | sed -e 's/^  V/DEFINE-BOOL/' \
+    | sed -e 's/_/-/g')
+  targets=$(echo "$defines" \
+    | sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
+    echo "$defines" \
+    | sed -ne 's/^DEFINE-BOOL(\([^,]*\).*/--no\1/p'; \
+    cat $v8_source/src/d8/d8.cc \
+    | grep "strcmp(argv\[i\]" \
+    | sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
+  COMPREPLY=($(compgen -W "$targets" -- "$cur"))
+  return 0
+}
+
+complete -F _v8_flag -f d8
diff --git a/src/third_party/v8/tools/bigint-tester.py b/src/third_party/v8/tools/bigint-tester.py
new file mode 100755
index 0000000..0940369
--- /dev/null
+++ b/src/third_party/v8/tools/bigint-tester.py
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import math
+import multiprocessing
+import os
+import random
+import subprocess
+import sys
+import tempfile
+
+# Configuration.
+kChars = "0123456789abcdef"
+kBase = 16
+kLineLength = 70  # A bit less than 80.
+kNumInputsGenerate = 20
+kNumInputsStress = 1000
+
+# Internally used sentinels.
+kNo = 0
+kYes = 1
+kRandom = 2
+
+TEST_HEADER = """\
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generated by %s.
+""" % sys.argv[0]
+
+TEST_BODY = """
+var error_count = 0;
+for (var i = 0; i < data.length; i++) {
+  var d = data[i];
+%s
+}
+if (error_count !== 0) {
+  print("Finished with " + error_count + " errors.")
+  quit(1);
+}"""
+
+def GenRandom(length, negative=kRandom):
+  if length == 0: return "0n"
+  s = []
+  if negative == kYes or (negative == kRandom and (random.randint(0, 1) == 0)):
+    s.append("-")  # 50% chance of negative.
+  s.append("0x")
+  s.append(kChars[random.randint(1, kBase - 1)])  # No leading zero.
+  for i in range(1, length):
+    s.append(kChars[random.randint(0, kBase - 1)])
+  s.append("n")
+  return "".join(s)
+
+def Parse(x):
+  assert x[-1] == 'n', x
+  return int(x[:-1], kBase)
+
+def Format(x):
+  original = x
+  negative = False
+  if x == 0: return "0n"
+  if x < 0:
+    negative = True
+    x = -x
+  s = ""
+  while x > 0:
+    s = kChars[x % kBase] + s
+    x = x / kBase
+  s = "0x" + s + "n"
+  if negative:
+    s = "-" + s
+  assert Parse(s) == original
+  return s
+
+class TestGenerator(object):
+  # Subclasses must implement these.
+  # Returns a JSON snippet defining inputs and expected output for one test.
+  def EmitOne(self): raise NotImplementedError
+  # Returns a snippet of JavaScript that will operate on a variable "d"
+  # whose content is defined by the result of a call to "EmitOne".
+  def EmitTestCore(self): raise NotImplementedError
+
+  def EmitHeader(self):
+    return TEST_HEADER
+
+  def EmitData(self, count):
+    s = []
+    for i in range(count):
+      s.append(self.EmitOne())
+    return "var data = [" + ", ".join(s) + "];"
+
+  def EmitTestBody(self):
+    return TEST_BODY % self.EmitTestCore()
+
+  def PrintTest(self, count):
+    print(self.EmitHeader())
+    print(self.EmitData(count))
+    print(self.EmitTestBody())
+
+  def RunTest(self, count, binary):
+    try:
+      fd, path = tempfile.mkstemp(suffix=".js", prefix="bigint-test-")
+      with open(path, "w") as f:
+        f.write(self.EmitData(count))
+        f.write(self.EmitTestBody())
+      return subprocess.call("%s %s" % (binary, path),
+                             shell=True)
+    finally:
+      os.close(fd)
+      os.remove(path)
+
+class UnaryOp(TestGenerator):
+  # Subclasses must implement these two.
+  def GetOpString(self): raise NotImplementedError
+  def GenerateResult(self, x): raise NotImplementedError
+
+  # Subclasses may override this:
+  def GenerateInput(self):
+    return GenRandom(random.randint(0, kLineLength))
+
+  # Subclasses should not override anything below.
+  def EmitOne(self):
+    x_str = self.GenerateInput()
+    x_num = Parse(x_str)
+    result_num = self.GenerateResult(x_num)
+    result_str = Format(result_num)
+    return "{\n  a: %s,\n  r: %s\n}" % (x_str, result_str)
+
+  def EmitTestCore(self):
+    return """\
+  var r = %(op)sd.a;
+  if (d.r !== r) {
+    print("Input:    " + d.a.toString(%(base)d));
+    print("Result:   " + r.toString(%(base)d));
+    print("Expected: " + d.r);
+    error_count++;
+  }""" % {"op": self.GetOpString(), "base": kBase}
+
+class BinaryOp(TestGenerator):
+  # Subclasses must implement these two.
+  def GetOpString(self): raise NotImplementedError
+  def GenerateResult(self, left, right): raise NotImplementedError
+
+  # Subclasses may override these:
+  def GenerateInputLengths(self):
+    return random.randint(0, kLineLength), random.randint(0, kLineLength)
+
+  def GenerateInputs(self):
+    left_length, right_length = self.GenerateInputLengths()
+    return GenRandom(left_length), GenRandom(right_length)
+
+  # Subclasses should not override anything below.
+  def EmitOne(self):
+    left_str, right_str = self.GenerateInputs()
+    left_num = Parse(left_str)
+    right_num = Parse(right_str)
+    result_num = self.GenerateResult(left_num, right_num)
+    result_str = Format(result_num)
+    return ("{\n  a: %s,\n  b: %s,\n  r: %s\n}" %
+            (left_str, right_str, result_str))
+
+  def EmitTestCore(self):
+    return """\
+  var r = d.a %(op)s d.b;
+  if (d.r !== r) {
+    print("Input A:  " + d.a.toString(%(base)d));
+    print("Input B:  " + d.b.toString(%(base)d));
+    print("Result:   " + r.toString(%(base)d));
+    print("Expected: " + d.r);
+    print("Op: %(op)s");
+    error_count++;
+  }""" % {"op": self.GetOpString(), "base": kBase}
+
+class Neg(UnaryOp):
+  def GetOpString(self): return "-"
+  def GenerateResult(self, x): return -x
+
+class BitNot(UnaryOp):
+  def GetOpString(self): return "~"
+  def GenerateResult(self, x): return ~x
+
+class Inc(UnaryOp):
+  def GetOpString(self): return "++"
+  def GenerateResult(self, x): return x + 1
+
+class Dec(UnaryOp):
+  def GetOpString(self): return "--"
+  def GenerateResult(self, x): return x - 1
+
+class Add(BinaryOp):
+  def GetOpString(self): return "+"
+  def GenerateResult(self, a, b): return a + b
+
+class Sub(BinaryOp):
+  def GetOpString(self): return "-"
+  def GenerateResult(self, a, b): return a - b
+
+class Mul(BinaryOp):
+  def GetOpString(self): return "*"
+  def GenerateResult(self, a, b): return a * b
+  def GenerateInputLengths(self):
+    left_length = random.randint(1, kLineLength)
+    return left_length, kLineLength - left_length
+
+class Div(BinaryOp):
+  def GetOpString(self): return "/"
+  def GenerateResult(self, a, b):
+    result = abs(a) / abs(b)
+    if (a < 0) != (b < 0): result = -result
+    return result
+  def GenerateInputLengths(self):
+    # Let the left side be longer than the right side with high probability,
+    # because that case is more interesting.
+    min_left = kLineLength * 6 / 10
+    max_right = kLineLength * 7 / 10
+    return random.randint(min_left, kLineLength), random.randint(1, max_right)
+
+class Mod(Div):  # Sharing GenerateInputLengths.
+  def GetOpString(self): return "%"
+  def GenerateResult(self, a, b):
+    result = a % b
+    if a < 0 and result > 0:
+      result -= abs(b)
+    if a > 0 and result < 0:
+      result += abs(b)
+    return result
+
+class Shl(BinaryOp):
+  def GetOpString(self): return "<<"
+  def GenerateInputsInternal(self, small_shift_positive):
+    left_length = random.randint(0, kLineLength - 1)
+    left = GenRandom(left_length)
+    small_shift = random.randint(0, 1) == 0
+    if small_shift:
+      right_length = 1 + int(math.log((kLineLength - left_length), kBase))
+      neg = kNo if small_shift_positive else kYes
+    else:
+      right_length = random.randint(0, 3)
+      neg = kYes if small_shift_positive else kNo
+    right = GenRandom(right_length, negative=neg)
+    return left, right
+
+  def GenerateInputs(self): return self.GenerateInputsInternal(True)
+  def GenerateResult(self, a, b):
+    if b < 0: return a >> -b
+    return a << b
+
+class Sar(Shl):  # Sharing GenerateInputsInternal.
+  def GetOpString(self): return ">>"
+  def GenerateInputs(self):
+    return self.GenerateInputsInternal(False)
+  def GenerateResult(self, a, b):
+    if b < 0: return a << -b
+    return a >> b
+
+class BitAnd(BinaryOp):
+  def GetOpString(self): return "&"
+  def GenerateResult(self, a, b): return a & b
+
+class BitOr(BinaryOp):
+  def GetOpString(self): return "|"
+  def GenerateResult(self, a, b): return a | b
+
+class BitXor(BinaryOp):
+  def GetOpString(self): return "^"
+  def GenerateResult(self, a, b): return a ^ b
+
+OPS = {
+  "add": Add,
+  "sub": Sub,
+  "mul": Mul,
+  "div": Div,
+  "mod": Mod,
+  "inc": Inc,
+  "dec": Dec,
+  "neg": Neg,
+  "not": BitNot,
+  "shl": Shl,
+  "sar": Sar,
+  "and": BitAnd,
+  "or": BitOr,
+  "xor": BitXor
+}
+
+OPS_NAMES = ", ".join(sorted(OPS.keys()))
+
+def RunOne(op, num_inputs, binary):
+  return OPS[op]().RunTest(num_inputs, binary)
+def WrapRunOne(args):
+  return RunOne(*args)
+def RunAll(args):
+  for op in args.op:
+    for r in range(args.runs):
+      yield (op, args.num_inputs, args.binary)
+
+def Main():
+  parser = argparse.ArgumentParser(
+      description="Helper for generating or running BigInt tests.")
+  parser.add_argument(
+      "action", help="Action to perform: 'generate' or 'stress'")
+  parser.add_argument(
+      "op", nargs="+",
+      help="Operation(s) to test, one or more of: %s. In 'stress' mode, "
+           "special op 'all' tests all ops." % OPS_NAMES)
+  parser.add_argument(
+      "-n", "--num-inputs", type=int, default=-1,
+      help="Number of input/output sets in each generated test. Defaults to "
+           "%d for 'generate' and '%d' for 'stress' mode." %
+           (kNumInputsGenerate, kNumInputsStress))
+
+  stressopts = parser.add_argument_group("'stress' mode arguments")
+  stressopts.add_argument(
+      "-r", "--runs", type=int, default=1000,
+      help="Number of tests (with NUM_INPUTS each) to generate and run. "
+           "Default: %(default)s.")
+  stressopts.add_argument(
+      "-b", "--binary", default="out/x64.debug/d8",
+      help="The 'd8' binary to use. Default: %(default)s.")
+  args = parser.parse_args()
+
+  for op in args.op:
+    if op not in OPS.keys() and op != "all":
+      print("Invalid op '%s'. See --help." % op)
+      return 1
+
+  if len(args.op) == 1 and args.op[0] == "all":
+    args.op = OPS.keys()
+
+  if args.action == "generate":
+    if args.num_inputs < 0: args.num_inputs = kNumInputsGenerate
+    for op in args.op:
+      OPS[op]().PrintTest(args.num_inputs)
+  elif args.action == "stress":
+    if args.num_inputs < 0: args.num_inputs = kNumInputsStress
+    result = 0
+    pool = multiprocessing.Pool(multiprocessing.cpu_count())
+    for r in pool.imap_unordered(WrapRunOne, RunAll(args)):
+      result = result or r
+    return result
+  else:
+    print("Invalid action '%s'. See --help." % args.action)
+    return 1
+
+if __name__ == "__main__":
+  sys.exit(Main())
diff --git a/src/third_party/v8/tools/blink_tests/TestExpectations b/src/third_party/v8/tools/blink_tests/TestExpectations
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/v8/tools/blink_tests/TestExpectations
diff --git a/src/third_party/v8/tools/callstats-from-telemetry.sh b/src/third_party/v8/tools/callstats-from-telemetry.sh
new file mode 100755
index 0000000..ead482a
--- /dev/null
+++ b/src/third_party/v8/tools/callstats-from-telemetry.sh
@@ -0,0 +1,61 @@
+#1/bin/env bash
+set -e
+
+usage() {
+cat << EOF
+usage: $0 OPTIONS RESULTS_DIR
+
+Convert telemetry json trace result to callstats.html compatible
+versions ot ./out.json
+
+OPTIONS:
+  -h           Show this message.
+  RESULTS_DIR  tools/perf/artifacts/run_XXX
+EOF
+}
+
+
+while getopts ":h" OPTION ; do
+  case $OPTION in
+    h)  usage
+        exit 0
+        ;;
+    ?)  echo "Illegal option: -$OPTARG"
+        usage
+        exit 1
+        ;;
+  esac
+done
+
+# =======================================================================
+
+RESULTS_DIR=$1
+
+if [[ ! -e "$RESULTS_DIR" ]]; then
+  echo "RESULTS_DIR '$RESULTS_DIR' not found";
+  usage;
+  exit 1;
+fi
+
+
+OUT=out.json
+
+if [[ -e $OUT ]]; then
+  cp --backup=numbered $OUT $OUT.bak
+fi
+
+
+echo '{ "telemetry-results": { "placeholder":{}' > $OUT
+
+for PAGE_DIR in $RESULTS_DIR/*_1; do
+  PAGE=`basename $PAGE_DIR`;
+  JSON="$PAGE_DIR/trace/traceEvents/*_converted.json";
+  du -sh $JSON;
+  echo "Converting PAGE=$PAGE";
+  echo "," >> $OUT;
+  echo "\"$PAGE\": " >> $OUT;
+  jq '[.traceEvents[].args | select(."runtime-call-stats" != null) | ."runtime-call-stats"]' $JSON >> $OUT;
+done
+
+
+echo '}}' >> $OUT
diff --git a/src/third_party/v8/tools/callstats.html b/src/third_party/v8/tools/callstats.html
new file mode 100644
index 0000000..5e691ed
--- /dev/null
+++ b/src/third_party/v8/tools/callstats.html
@@ -0,0 +1,2234 @@
+<!DOCTYPE html>
+<html>
+<!--
+Copyright 2016 the V8 project authors. All rights reserved.  Use of this source
+code is governed by a BSD-style license that can be found in the LICENSE file.
+-->
+
+<head>
+  <meta charset="utf-8">
+  <title>V8 Runtime Stats Komparator</title>
+  <style>
+    body {
+      font-family: arial;
+    }
+
+    table {
+      display: table;
+      border-spacing: 0px;
+    }
+
+    tr {
+      border-spacing: 0px;
+      padding: 10px;
+    }
+
+    td,
+    th {
+      padding: 3px 10px 3px 5px;
+    }
+
+    .inline {
+      display: inline-block;
+      vertical-align: top;
+    }
+
+    h2,
+    h3 {
+      margin-bottom: 0px;
+    }
+
+    .hidden {
+      display: none;
+    }
+
+    .view {
+      display: table;
+    }
+
+    .column {
+      display: table-cell;
+      border-right: 1px black dotted;
+      min-width: 200px;
+    }
+
+    .column .header {
+      padding: 0 10px 0 10px
+    }
+
+    #column {
+      display: none;
+    }
+
+    .list {
+      width: 100%;
+    }
+
+    select {
+      width: 100%
+    }
+
+    .list tbody {
+      cursor: pointer;
+    }
+
+    .list tr:nth-child(even) {
+      background-color: #EFEFEF;
+    }
+
+    .list tr:nth-child(even).selected {
+      background-color: #DDD;
+    }
+
+    .list tr.child {
+      display: none;
+    }
+
+    .list tr.child.visible {
+      display: table-row;
+    }
+
+    .list .child .name {
+      padding-left: 20px;
+    }
+
+    .list .parent td {
+      border-top: 1px solid #AAA;
+    }
+
+    .list .total {
+      font-weight: bold
+    }
+
+    .list tr.parent {
+      background-color: #FFF;
+    }
+
+    .list tr.parent.selected {
+      background-color: #DDD;
+    }
+
+    tr.selected {
+      background-color: #DDD;
+    }
+
+    .codeSearch {
+      display: block-inline;
+      float: right;
+      border-radius: 5px;
+      background-color: #EEE;
+      width: 1em;
+      text-align: center;
+    }
+
+    .list .position {
+      text-align: right;
+      display: none;
+    }
+
+    .list div.toggle {
+      cursor: pointer;
+    }
+
+    #column_0 .position {
+      display: table-cell;
+    }
+
+    #column_0 .name {
+      display: table-cell;
+    }
+
+    .list .name {
+      display: none;
+      white-space: nowrap;
+    }
+
+    .value {
+      text-align: right;
+    }
+
+    .selectedVersion {
+      font-weight: bold;
+    }
+
+    #baseline {
+      width: auto;
+    }
+
+    .compareSelector {
+      padding-bottom: 20px;
+    }
+
+    .pageDetailTable tbody {
+      cursor: pointer
+    }
+
+    .pageDetailTable tfoot td {
+      border-top: 1px grey solid;
+    }
+
+    #popover {
+      position: absolute;
+      transform: translateY(-50%) translateX(40px);
+      box-shadow: -2px 10px 44px -10px #000;
+      border-radius: 5px;
+      z-index: 1;
+      background-color: #FFF;
+      display: none;
+      white-space: nowrap;
+    }
+
+    #popover table {
+      position: relative;
+      z-index: 1;
+      text-align: right;
+      margin: 10px;
+    }
+    #popover td {
+      padding: 3px 0px 3px 5px;
+      white-space: nowrap;
+    }
+
+    .popoverArrow {
+      background-color: #FFF;
+      position: absolute;
+      width: 30px;
+      height: 30px;
+      transform: translateY(-50%)rotate(45deg);
+      top: 50%;
+      left: -10px;
+      z-index: 0;
+    }
+
+    #popover .name {
+      padding: 5px;
+      font-weight: bold;
+      text-align: center;
+    }
+
+    #popover table .compare {
+      display: none
+    }
+
+    #popover table.compare .compare {
+      display: table-cell;
+    }
+
+    #popover .compare .time,
+    #popover .compare .version {
+      padding-left: 10px;
+    }
+    .graph,
+    .graph .content {
+      width: 100%;
+    }
+
+    .diff .hideDiff {
+      display: none;
+    }
+    .noDiff .hideNoDiff {
+      display: none;
+    }
+  </style>
+  <script src="https://www.gstatic.com/charts/loader.js"></script>
+  <script>
+    "use strict"
+    google.charts.load('current', {packages: ['corechart']});
+
+    // Did anybody say monkeypatching?
+    if (!NodeList.prototype.forEach) {
+      NodeList.prototype.forEach = function(func) {
+        for (let i = 0; i < this.length; i++) {
+          func(this[i]);
+        }
+      }
+    }
+
+    let versions;
+    let pages;
+    let selectedPage;
+    let baselineVersion;
+    let selectedEntry;
+
+    // Marker to programatically replace the defaultData.
+    let defaultData = /*default-data-start*/undefined/*default-data-end*/;
+
+    function initialize() {
+      // Initialize the stats table and toggle lists.
+      let original = $("column");
+      let view = document.createElement('div');
+      view.id = 'view';
+      let i = 0;
+      versions.forEach((version) =>  {
+        if (!version.enabled) return;
+        // add column
+        let column = original.cloneNode(true);
+        column.id = "column_" + i;
+        // Fill in all versions
+        let select = column.querySelector(".version");
+        select.id = "selectVersion_" + i;
+        // add all select options
+        versions.forEach((version) => {
+          if (!version.enabled) return;
+          let option = document.createElement("option");
+          option.textContent = version.name;
+          option.version = version;
+          select.appendChild(option);
+        });
+        // Fill in all page versions
+        select = column.querySelector(".pageVersion");
+        select.id = "select_" + i;
+        // add all pages
+        versions.forEach((version) => {
+          if (!version.enabled) return;
+          let optgroup = document.createElement("optgroup");
+          optgroup.label = version.name;
+          optgroup.version = version;
+          version.forEachPage((page) => {
+            let option = document.createElement("option");
+            option.textContent = page.name;
+            option.page = page;
+            optgroup.appendChild(option);
+          });
+          select.appendChild(optgroup);
+        });
+        view.appendChild(column);
+        i++;
+      });
+      let oldView = $('view');
+      oldView.parentNode.replaceChild(view, oldView);
+
+      let select = $('baseline');
+      removeAllChildren(select);
+      select.appendChild(document.createElement('option'));
+      versions.forEach((version) => {
+        let option = document.createElement("option");
+        option.textContent = version.name;
+        option.version = version;
+        select.appendChild(option);
+      });
+      initializeToggleList(versions.versions, $('versionSelector'));
+      initializeToggleList(pages.values(), $('pageSelector'));
+      initializeToggleList(Group.groups.values(), $('groupSelector'));
+      initializeToggleContentVisibility();
+    }
+
+    function initializeToggleList(items, node) {
+      let list = node.querySelector('ul');
+      removeAllChildren(list);
+      items = Array.from(items);
+      items.sort(NameComparator);
+      items.forEach((item) => {
+        let li = document.createElement('li');
+        let checkbox = document.createElement('input');
+        checkbox.type = 'checkbox';
+        checkbox.checked = item.enabled;
+        checkbox.item = item;
+        checkbox.addEventListener('click', handleToggleVersionOrPageEnable);
+        li.appendChild(checkbox);
+        li.appendChild(document.createTextNode(item.name));
+        list.appendChild(li);
+      });
+      $('results').querySelectorAll('#results > .hidden').forEach((node) => {
+        toggleCssClass(node, 'hidden', false);
+      })
+    }
+
+    function initializeToggleContentVisibility() {
+      let nodes = document.querySelectorAll('.toggleContentVisibility');
+      nodes.forEach((node) => {
+        let content = node.querySelector('.content');
+        let header = node.querySelector('h1,h2,h3');
+        if (content === undefined || header === undefined) return;
+        if (header.querySelector('input') != undefined) return;
+        let checkbox = document.createElement('input');
+        checkbox.type = 'checkbox';
+        checkbox.checked = content.className.indexOf('hidden') == -1;
+        checkbox.contentNode = content;
+        checkbox.addEventListener('click', handleToggleContentVisibility);
+        header.insertBefore(checkbox, header.childNodes[0]);
+      });
+    }
+
+    window.addEventListener('popstate', (event) => {
+      popHistoryState(event.state);
+    });
+
+    function popHistoryState(state) {
+      if (!state.version) return false;
+      if (!versions) return false;
+      let version = versions.getByName(state.version);
+      if (!version) return false;
+      let page = version.get(state.page);
+      if (!page) return false;
+      if (!state.entry) {
+        showEntry(page.total);
+      } else {
+        let entry = page.get(state.entry);
+        if (!entry) {
+          showEntry(page.total);
+        } else {
+          showEntry(entry);
+        }
+      }
+      return true;
+    }
+
+    function pushHistoryState() {
+      let selection = selectedEntry ? selectedEntry : selectedPage;
+      if (!selection) return;
+      let state = selection.urlParams();
+      // Don't push a history state if it didn't change.
+      if (JSON.stringify(window.history.state) === JSON.stringify(state)) return;
+      let params = "?";
+      for (let pairs of Object.entries(state)) {
+        params += encodeURIComponent(pairs[0]) + "="
+            + encodeURIComponent(pairs[1]) + "&";
+      }
+      window.history.pushState(state, selection.toString(), params);
+    }
+
+    function showSelectedEntryInPage(page) {
+      if (!selectedEntry) return showPage(page);
+      let entry = page.get(selectedEntry.name);
+      if (!entry) return showPage(page);
+      selectEntry(entry);
+    }
+
+    function showPage(firstPage) {
+      let changeSelectedEntry = selectedEntry !== undefined
+          && selectedEntry.page === selectedPage;
+      selectedPage = firstPage;
+      selectedPage.sort();
+      showPageInColumn(firstPage, 0);
+      // Show the other versions of this page in the following columns.
+      let pageVersions = versions.getPageVersions(firstPage);
+      let index = 1;
+      pageVersions.forEach((page) => {
+        if (page !== firstPage) {
+          showPageInColumn(page, index);
+          index++;
+        }
+      });
+      if (changeSelectedEntry) {
+        showEntryDetail(selectedPage.getEntry(selectedEntry));
+      }
+      showImpactList(selectedPage);
+      pushHistoryState();
+    }
+
+    function showPageInColumn(page, columnIndex) {
+      page.sort();
+      let showDiff = (baselineVersion === undefined && columnIndex !== 0) ||
+        (baselineVersion !== undefined && page.version !== baselineVersion);
+      let diffStatus = (td, a, b) => {};
+      if (showDiff) {
+        if (baselineVersion !== undefined) {
+          diffStatus = (td, a, b) => {
+            if (a == 0) return;
+            td.style.color = a < 0 ? '#FF0000' : '#00BB00';
+          };
+        } else {
+          diffStatus = (td, a, b) => {
+            if (a == b) return;
+            let color;
+            let ratio = a / b;
+            if (ratio > 1) {
+              ratio = Math.min(Math.round((ratio - 1) * 255 * 10), 200);
+              color = '#' + ratio.toString(16) + "0000";
+            } else {
+              ratio = Math.min(Math.round((1 - ratio) * 255 * 10), 200);
+              color = '#00' + ratio.toString(16) + "00";
+            }
+            td.style.color = color;
+          }
+        }
+      }
+
+      let column = $('column_' + columnIndex);
+      let select = $('select_' + columnIndex);
+      // Find the matching option
+      selectOption(select, (i, option) => {
+        return option.page == page
+      });
+      let table = column.querySelector("table");
+      let oldTbody = table.querySelector('tbody');
+      let tbody = document.createElement('tbody');
+      let referencePage = selectedPage;
+      page.forEachSorted(selectedPage, (parentEntry, entry, referenceEntry) => {
+        let tr = document.createElement('tr');
+        tbody.appendChild(tr);
+        tr.entry = entry;
+        tr.parentEntry = parentEntry;
+        tr.className = parentEntry === undefined ? 'parent' : 'child';
+        // Don't show entries that do not exist on the current page or if we
+        // compare against the current page
+        if (entry !== undefined && page.version !== baselineVersion) {
+          // If we show a diff, use the baselineVersion as the referenceEntry
+          if (baselineVersion !== undefined) {
+            let baselineEntry = baselineVersion.getEntry(entry);
+            if (baselineEntry !== undefined) referenceEntry = baselineEntry
+          }
+          if (!parentEntry) {
+            let node = td(tr, '<div class="toggle">►</div>', 'position');
+            node.firstChild.addEventListener('click', handleToggleGroup);
+          } else {
+            td(tr, entry.position == 0 ? '' : entry.position, 'position');
+          }
+          addCodeSearchButton(entry,
+              td(tr, entry.name, 'name ' + entry.cssClass()));
+
+          diffStatus(
+            td(tr, ms(entry.time), 'value time'),
+            entry.time, referenceEntry.time);
+          diffStatus(
+            td(tr, percent(entry.timePercent), 'value time'),
+            entry.time, referenceEntry.time);
+          diffStatus(
+            td(tr, count(entry.count), 'value count'),
+            entry.count, referenceEntry.count);
+        } else if (baselineVersion !== undefined && referenceEntry
+            && page.version !== baselineVersion) {
+          // Show comparison of entry that does not exist on the current page.
+          tr.entry = new Entry(0, referenceEntry.name);
+          tr.entry.page = page;
+          td(tr, '-', 'position');
+          td(tr, referenceEntry.name, 'name');
+          diffStatus(
+            td(tr, ms(referenceEntry.time), 'value time'),
+            referenceEntry.time, 0);
+          diffStatus(
+            td(tr, percent(referenceEntry.timePercent), 'value time'),
+            referenceEntry.timePercent, 0);
+          diffStatus(
+            td(tr, count(referenceEntry.count), 'value count'),
+            referenceEntry.count, 0);
+        } else {
+          // Display empty entry / baseline entry
+          let showBaselineEntry = entry !== undefined;
+          if (showBaselineEntry) {
+            if (!parentEntry) {
+              let node = td(tr, '<div class="toggle">►</div>', 'position');
+              node.firstChild.addEventListener('click', handleToggleGroup);
+            } else {
+              td(tr, entry.position == 0 ? '' : entry.position, 'position');
+            }
+            td(tr, entry.name, 'name');
+            td(tr, ms(entry.time, false), 'value time');
+            td(tr, percent(entry.timePercent, false), 'value time');
+            td(tr, count(entry.count, false), 'value count');
+          } else {
+            td(tr, '-', 'position');
+            td(tr, referenceEntry.name, 'name');
+            td(tr, '-', 'value time');
+            td(tr, '-', 'value time');
+            td(tr, '-', 'value count');
+          }
+        }
+      });
+      table.replaceChild(tbody, oldTbody);
+      let versionSelect = column.querySelector('select.version');
+      selectOption(versionSelect, (index, option) => {
+        return option.version == page.version
+      });
+    }
+
+    function showEntry(entry) {
+      selectEntry(entry, true);
+    }
+
+    function selectEntry(entry, updateSelectedPage) {
+      let needsPageSwitch = true;
+      if (updateSelectedPage && selectedPage) {
+        entry = selectedPage.version.getEntry(entry);
+        needsPageSwitch = updateSelectedPage && entry.page != selectedPage;
+      }
+      let rowIndex = 0;
+      // If clicked in the detail row change the first column to that page.
+      if (needsPageSwitch) showPage(entry.page);
+      let childNodes = $('column_0').querySelector('.list tbody').childNodes;
+      for (let i = 0; i < childNodes.length; i++) {
+        if (childNodes[i].entry !== undefined &&
+            childNodes[i].entry.name == entry.name) {
+          rowIndex = i;
+          break;
+        }
+      }
+      let firstEntry = childNodes[rowIndex].entry;
+      if (rowIndex) {
+        if (firstEntry.parent) showGroup(firstEntry.parent);
+      }
+      // Deselect all
+      $('view').querySelectorAll('.list tbody tr').forEach((tr) => {
+        toggleCssClass(tr, 'selected', false);
+      });
+      // Select the entry row
+      $('view').querySelectorAll("tbody").forEach((body) => {
+        let row = body.childNodes[rowIndex];
+        if (!row) return;
+        toggleCssClass(row, 'selected', row.entry && row.entry.name ==
+          firstEntry.name);
+      });
+      if (updateSelectedPage && selectedEntry) {
+        entry = selectedEntry.page.version.getEntry(entry);
+      }
+      if (entry !== selectedEntry) {
+        selectedEntry = entry;
+        showEntryDetail(entry);
+      }
+    }
+
+    function showEntryDetail(entry) {
+      showVersionDetails(entry);
+      showPageDetails(entry);
+      showImpactList(entry.page);
+      showGraphs(entry.page);
+      pushHistoryState();
+    }
+
+    function showVersionDetails(entry) {
+      let table, tbody, entries;
+      table = $('detailView').querySelector('.versionDetailTable');
+      tbody = document.createElement('tbody');
+      if (entry !== undefined) {
+        $('detailView').querySelector('.versionDetail h3 span').textContent =
+          entry.name + ' in ' + entry.page.name;
+        entries = versions.getPageVersions(entry.page).map(
+          (page) => {
+            return page.get(entry.name)
+          });
+        entries.sort((a, b) => {
+          return a.time - b.time
+        });
+        entries.forEach((pageEntry) => {
+          if (pageEntry === undefined) return;
+          let tr = document.createElement('tr');
+          if (pageEntry == entry) tr.className += 'selected';
+          tr.entry = pageEntry;
+          let isBaselineEntry = pageEntry.page.version == baselineVersion;
+          td(tr, pageEntry.page.version.name, 'version');
+          td(tr, ms(pageEntry.time, !isBaselineEntry), 'value time');
+          td(tr, percent(pageEntry.timePercent, !isBaselineEntry), 'value time');
+          td(tr, count(pageEntry.count, !isBaselineEntry), 'value count');
+          tbody.appendChild(tr);
+        });
+      }
+      table.replaceChild(tbody, table.querySelector('tbody'));
+    }
+
+    function showPageDetails(entry) {
+      let table, tbody, entries;
+      table = $('detailView').querySelector('.pageDetailTable');
+      tbody = document.createElement('tbody');
+      if (entry === undefined) {
+        table.replaceChild(tbody, table.querySelector('tbody'));
+        return;
+      }
+      let version = entry.page.version;
+      let showDiff = version !== baselineVersion;
+      $('detailView').querySelector('.pageDetail h3 span').textContent =
+        version.name;
+      entries = version.pages.map((page) => {
+          if (!page.enabled) return;
+          return page.get(entry.name)
+        });
+      entries.sort((a, b) => {
+        let cmp = b.timePercent - a.timePercent;
+        if (cmp.toFixed(1) == 0) return b.time - a.time;
+        return cmp
+      });
+      entries.forEach((pageEntry) => {
+        if (pageEntry === undefined) return;
+        let tr = document.createElement('tr');
+        if (pageEntry === entry) tr.className += 'selected';
+        tr.entry = pageEntry;
+        td(tr, pageEntry.page.name, 'name');
+        td(tr, ms(pageEntry.time, showDiff), 'value time');
+        td(tr, percent(pageEntry.timePercent, showDiff), 'value time');
+        td(tr, percent(pageEntry.timePercentPerEntry, showDiff),
+            'value time hideNoDiff');
+        td(tr, count(pageEntry.count, showDiff), 'value count');
+        tbody.appendChild(tr);
+      });
+      // show the total for all pages
+      let tds = table.querySelectorAll('tfoot td');
+      tds[1].textContent = ms(entry.getTimeImpact(), showDiff);
+      // Only show the percentage total if we are in diff mode:
+      tds[2].textContent = percent(entry.getTimePercentImpact(), showDiff);
+      tds[3].textContent = '';
+      tds[4].textContent = count(entry.getCountImpact(), showDiff);
+      table.replaceChild(tbody, table.querySelector('tbody'));
+    }
+
+    function showImpactList(page) {
+      let impactView = $('detailView').querySelector('.impactView');
+      impactView.querySelector('h3 span').textContent = page.version.name;
+
+      let table = impactView.querySelector('table');
+      let tbody = document.createElement('tbody');
+      let version = page.version;
+      let entries = version.allEntries();
+      if (selectedEntry !== undefined && selectedEntry.isGroup) {
+        impactView.querySelector('h3 span').textContent += " " + selectedEntry.name;
+        entries = entries.filter((entry) => {
+          return entry.name == selectedEntry.name ||
+            (entry.parent && entry.parent.name == selectedEntry.name)
+        });
+      }
+      let isCompareView = baselineVersion !== undefined;
+      entries = entries.filter((entry) => {
+        if (isCompareView) {
+          let impact = entry.getTimeImpact();
+          return impact < -1 || 1 < impact
+        }
+        return entry.getTimePercentImpact() > 0.01;
+      });
+      entries = entries.slice(0, 50);
+      entries.sort((a, b) => {
+        let cmp = b.getTimePercentImpact() - a.getTimePercentImpact();
+        if (isCompareView || cmp.toFixed(1) == 0) {
+          return b.getTimeImpact() - a.getTimeImpact();
+        }
+        return cmp
+      });
+      entries.forEach((entry) => {
+        let tr = document.createElement('tr');
+        tr.entry = entry;
+        td(tr, entry.name, 'name');
+        td(tr, ms(entry.getTimeImpact()), 'value time');
+        let percentImpact = entry.getTimePercentImpact();
+        td(tr, percentImpact > 1000 ? '-' : percent(percentImpact), 'value time');
+        let topPages = entry.getPagesByPercentImpact().slice(0, 3)
+          .map((each) => {
+            return each.name + ' (' + percent(each.getEntry(entry).timePercent) +
+              ')'
+          });
+        td(tr, topPages.join(', '), 'name');
+        tbody.appendChild(tr);
+      });
+      table.replaceChild(tbody, table.querySelector('tbody'));
+    }
+
+    function showGraphs(page) {
+      let groups = page.groups.filter(each => each.enabled);
+      // Sort groups by the biggest impact
+      groups.sort((a, b) => {
+        return b.getTimeImpact() - a.getTimeImpact();
+      });
+      if (selectedGroup == undefined) {
+        selectedGroup = groups[0];
+      } else {
+        groups = groups.filter(each => each.name != selectedGroup.name);
+        groups.unshift(selectedGroup);
+      }
+      showPageGraph(groups, page);
+      showVersionGraph(groups, page);
+      showPageVersionGraph(groups, page);
+    }
+
+    function getGraphDataTable(groups) {
+      let dataTable = new google.visualization.DataTable();
+      dataTable.addColumn('string', 'Name');
+      groups.forEach(group => {
+        let column = dataTable.addColumn('number', group.name.substring(6));
+        dataTable.setColumnProperty(column, 'group', group);
+      });
+      return dataTable;
+    }
+
+    let selectedGroup;
+    function showPageGraph(groups, page) {
+      let isDiffView = baselineVersion !== undefined;
+      let dataTable = getGraphDataTable(groups);
+      // Calculate the average row
+      let row = ['Average'];
+      groups.forEach((group) => {
+        if (isDiffView) {
+          row.push(group.isTotal ? 0 : group.getAverageTimeImpact());
+        } else {
+          row.push(group.isTotal ? 0 : group.getTimeImpact());
+        }
+      });
+      dataTable.addRow(row);
+      // Sort the pages by the selected group.
+      let pages = page.version.pages.filter(page => page.enabled);
+      function sumDiff(page) {
+        let sum = 0;
+        groups.forEach(group => {
+          let value = group.getTimePercentImpact() -
+            page.getEntry(group).timePercent;
+          sum += value * value;
+        });
+        return sum;
+      }
+      if (isDiffView) {
+        pages.sort((a, b) => {
+          return b.getEntry(selectedGroup).time-
+            a.getEntry(selectedGroup).time;
+        });
+      } else {
+        pages.sort((a, b) => {
+          return b.getEntry(selectedGroup).timePercent -
+            a.getEntry(selectedGroup).timePercent;
+        });
+      }
+      // Sort by sum of squared distance to the average.
+      // pages.sort((a, b) => {
+      //   return a.distanceFromTotalPercent() - b.distanceFromTotalPercent();
+      // });
+      // Calculate the entries for the pages
+      pages.forEach((page) => {
+        row = [page.name];
+        groups.forEach((group) => {
+          row.push(group.isTotal ? 0 : page.getEntry(group).time);
+        });
+        let rowIndex = dataTable.addRow(row);
+        dataTable.setRowProperty(rowIndex, 'page', page);
+      });
+      renderGraph('Pages for ' + page.version.name, groups, dataTable,
+          'pageGraph', isDiffView ? true : 'percent');
+    }
+
+    function showVersionGraph(groups, page) {
+      let dataTable = getGraphDataTable(groups);
+      let row;
+      let vs = versions.versions.filter(version => version.enabled);
+      vs.sort((a, b) => {
+        return b.getEntry(selectedGroup).getTimeImpact() -
+          a.getEntry(selectedGroup).getTimeImpact();
+      });
+      // Calculate the entries for the versions
+      vs.forEach((version) => {
+        row = [version.name];
+        groups.forEach((group) => {
+          row.push(group.isTotal ? 0 : version.getEntry(group).getTimeImpact());
+        });
+        let rowIndex = dataTable.addRow(row);
+        dataTable.setRowProperty(rowIndex, 'page', page);
+      });
+      renderGraph('Versions Total Time over all Pages', groups, dataTable,
+          'versionGraph', true);
+    }
+
+    function showPageVersionGraph(groups, page) {
+      let dataTable = getGraphDataTable(groups);
+      let row;
+      let vs = versions.getPageVersions(page);
+      vs.sort((a, b) => {
+        return b.getEntry(selectedGroup).time - a.getEntry(selectedGroup).time;
+      });
+      // Calculate the entries for the versions
+      vs.forEach((page) => {
+        row = [page.version.name];
+        groups.forEach((group) => {
+          row.push(group.isTotal ? 0 : page.getEntry(group).time);
+        });
+        let rowIndex = dataTable.addRow(row);
+        dataTable.setRowProperty(rowIndex, 'page', page);
+      });
+      renderGraph('Versions for ' + page.name, groups, dataTable,
+          'pageVersionGraph', true);
+    }
+
+    function renderGraph(title, groups, dataTable, id, isStacked) {
+      let isDiffView = baselineVersion !== undefined;
+      let formatter = new google.visualization.NumberFormat({
+        suffix: (isDiffView ? 'msΔ' : 'ms'),
+        negativeColor: 'red',
+        groupingSymbol: "'"
+      });
+      for (let i = 1; i < dataTable.getNumberOfColumns(); i++) {
+        formatter.format(dataTable, i);
+      }
+      let height = 85 + 28 * dataTable.getNumberOfRows();
+      let options = {
+        isStacked: isStacked,
+        height: height,
+        hAxis: {
+          minValue: 0,
+          textStyle: { fontSize: 14 }
+        },
+        animation:{
+          duration: dataTable.getNumberOfRows() > 50 ? 0 : 500 ,
+          easing: 'out',
+        },
+        vAxis: {
+          textStyle: { fontSize: 14 }
+        },
+        tooltip: { textStyle: { fontSize: 14 }},
+        explorer: {
+          actions: ['dragToZoom', 'rightClickToReset'],
+          maxZoomIn: 0.01
+        },
+        legend: {position:'top', maxLines: 1, textStyle: { fontSize: 14 }},
+        chartArea: {left:200, top:50, width:'98%', height:'80%'},
+        colors: groups.map(each => each.color)
+      };
+      let parentNode = $(id);
+      parentNode.querySelector('h2>span, h3>span').textContent = title;
+      let graphNode = parentNode.querySelector('.content');
+
+      let chart = graphNode.chart;
+      if (chart === undefined) {
+        chart = graphNode.chart = new google.visualization.BarChart(graphNode);
+      } else {
+        google.visualization.events.removeAllListeners(chart);
+      }
+      google.visualization.events.addListener(chart, 'select', selectHandler);
+      function getChartEntry(selection) {
+        if (!selection) return undefined;
+        let column = selection.column;
+        if (column == undefined) return undefined;
+        let selectedGroup = dataTable.getColumnProperty(column, 'group');
+        let row = selection.row;
+        if (row == null) return selectedGroup;
+        let page = dataTable.getRowProperty(row, 'page');
+        if (!page) return selectedGroup;
+        return page.getEntry(selectedGroup);
+      }
+      function selectHandler() {
+        selectedGroup = getChartEntry(chart.getSelection()[0])
+        if (!selectedGroup) return;
+        selectEntry(selectedGroup, true);
+      }
+
+      // Make our global tooltips work
+      google.visualization.events.addListener(chart, 'onmouseover', mouseOverHandler);
+      function mouseOverHandler(selection) {
+        graphNode.entry = getChartEntry(selection);
+      }
+      chart.draw(dataTable, options);
+    }
+
+    function showGroup(entry) {
+      toggleGroup(entry, true);
+    }
+
+    function toggleGroup(group, show) {
+      $('view').querySelectorAll(".child").forEach((tr) => {
+        let entry = tr.parentEntry;
+        if (!entry) return;
+        if (entry.name !== group.name) return;
+        toggleCssClass(tr, 'visible', show);
+      });
+    }
+
+    function showPopover(entry) {
+      let popover = $('popover');
+      popover.querySelector('td.name').textContent = entry.name;
+      popover.querySelector('td.page').textContent = entry.page.name;
+      setPopoverDetail(popover, entry, '');
+      popover.querySelector('table').className = "";
+      if (baselineVersion !== undefined) {
+        entry = baselineVersion.getEntry(entry);
+        setPopoverDetail(popover, entry, '.compare');
+        popover.querySelector('table').className = "compare";
+      }
+    }
+
+    function setPopoverDetail(popover, entry, prefix) {
+      let node = (name) => popover.querySelector(prefix + name);
+      if (entry == undefined) {
+        node('.version').textContent = baselineVersion.name;
+        node('.time').textContent = '-';
+        node('.timeVariance').textContent = '-';
+        node('.percent').textContent = '-';
+        node('.percentPerEntry').textContent = '-';
+        node('.percentVariance').textContent  = '-';
+        node('.count').textContent =  '-';
+        node('.countVariance').textContent = '-';
+        node('.timeImpact').textContent = '-';
+        node('.timePercentImpact').textContent = '-';
+      } else {
+        node('.version').textContent = entry.page.version.name;
+        node('.time').textContent = ms(entry._time, false);
+        node('.timeVariance').textContent
+            = percent(entry.timeVariancePercent, false);
+        node('.percent').textContent = percent(entry.timePercent, false);
+        node('.percentPerEntry').textContent
+            = percent(entry.timePercentPerEntry, false);
+        node('.percentVariance').textContent
+            = percent(entry.timePercentVariancePercent, false);
+        node('.count').textContent = count(entry._count, false);
+        node('.countVariance').textContent
+            = percent(entry.timeVariancePercent, false);
+        node('.timeImpact').textContent
+            = ms(entry.getTimeImpact(false), false);
+        node('.timePercentImpact').textContent
+            = percent(entry.getTimeImpactVariancePercent(false), false);
+      }
+    }
+  </script>
+  <script>
+  "use strict"
+    // =========================================================================
+    // Helpers
+    function $(id) {
+      return document.getElementById(id)
+    }
+
+    function removeAllChildren(node) {
+      while (node.firstChild) {
+        node.removeChild(node.firstChild);
+      }
+    }
+
+    function selectOption(select, match) {
+      let options = select.options;
+      for (let i = 0; i < options.length; i++) {
+        if (match(i, options[i])) {
+          select.selectedIndex = i;
+          return;
+        }
+      }
+    }
+
+    function addCodeSearchButton(entry, node) {
+      if (entry.isGroup) return;
+      let button = document.createElement("div");
+      button.textContent = '?'
+      button.className = "codeSearch"
+      button.addEventListener('click', handleCodeSearch);
+      node.appendChild(button);
+      return node;
+    }
+
+    function td(tr, content, className) {
+      let td = document.createElement("td");
+      if (content[0] == '<') {
+        td.innerHTML = content;
+      } else {
+        td.textContent = content;
+      }
+      td.className = className
+      tr.appendChild(td);
+      return td
+    }
+
+    function nodeIndex(node) {
+      let children = node.parentNode.childNodes,
+        i = 0;
+      for (; i < children.length; i++) {
+        if (children[i] == node) {
+          return i;
+        }
+      }
+      return -1;
+    }
+
+    function toggleCssClass(node, cssClass, toggleState) {
+      let index = -1;
+      let classes;
+      if (node.className != undefined) {
+        classes = node.className.split(' ');
+        index = classes.indexOf(cssClass);
+      }
+      if (index == -1) {
+        if (toggleState === false) return;
+        node.className += ' ' + cssClass;
+        return;
+      }
+      if (toggleState === true) return;
+      classes.splice(index, 1);
+      node.className = classes.join(' ');
+    }
+
+    function NameComparator(a, b) {
+      if (a.name > b.name) return 1;
+      if (a.name < b.name) return -1;
+      return 0
+    }
+
+    function diffSign(value, digits, unit, showDiff) {
+      if (showDiff === false || baselineVersion == undefined) {
+        if (value === undefined) return '';
+        return value.toFixed(digits) + unit;
+      }
+      return (value >= 0 ? '+' : '') + value.toFixed(digits) + unit + 'Δ';
+    }
+
+    function ms(value, showDiff) {
+      return diffSign(value, 1, 'ms', showDiff);
+    }
+
+    function count(value, showDiff) {
+      return diffSign(value, 0, '#', showDiff);
+    }
+
+    function percent(value, showDiff) {
+      return diffSign(value, 1, '%', showDiff);
+    }
+
+  </script>
+  <script>
+  "use strict"
+    // =========================================================================
+    // EventHandlers
+    function handleBodyLoad() {
+      $('uploadInput').focus();
+      if (defaultData) {
+        handleLoadJSON(defaultData);
+      } else if (window.location.protocol !== 'file:') {
+        tryLoadDefaultResults();
+      }
+    }
+
+    function tryLoadDefaultResults() {
+     // Try to load a results.json file adjacent to this day.
+     let xhr = new XMLHttpRequest();
+     // The markers on the following line can be used to replace the url easily
+     // with scripts.
+     xhr.open('GET', /*results-url-start*/'results.json'/*results-url-end*/, true);
+     xhr.onreadystatechange = function(e) {
+       if(this.readyState !== XMLHttpRequest.DONE || this.status !== 200) return;
+       handleLoadText(this.responseText);
+     };
+     xhr.send();
+    }
+
+    function handleAppendFile() {
+      let files = document.getElementById("appendInput").files;
+      loadFiles(files, true);
+    }
+
+    function handleLoadFile() {
+      let files = document.getElementById("uploadInput").files;
+      loadFiles(files, false)
+    }
+
+    function loadFiles(files, append) {
+      let file = files[0];
+      let reader = new FileReader();
+
+      reader.onload = function(evt) {
+        handleLoadText(this.result, append, file.name);
+      }
+      reader.readAsText(file);
+    }
+
+    function handleLoadText(text, append, fileName) {
+      try {
+        handleLoadJSON(JSON.parse(text), append, fileName);
+      } catch(e) {
+        if (!fileName.endsWith('.txt')) {
+          alert(`Error parsing "${fileName}"`);
+          console.error(e);
+        }
+        handleLoadTXT(text, append, fileName);
+      }
+    }
+
+    function getStateFromParams() {
+      let query = window.location.search.substr(1);
+      let result = {};
+      query.split("&").forEach((part) => {
+        let item = part.split("=");
+        let key = decodeURIComponent(item[0])
+        result[key] = decodeURIComponent(item[1]);
+      });
+      return result;
+    }
+
+    function handleLoadJSON(json, append, fileName) {
+      let isFirstLoad = pages === undefined;
+      json = fixClusterTelemetryResults(json);
+      json = fixTraceImportJSON(json);
+      json = fixSingleVersionJSON(json, fileName);
+      if (append && !isFirstLoad) {
+        json = createUniqueVersions(json)
+      }
+      if (!append || isFirstLoad) {
+        pages = new Pages();
+        versions = Versions.fromJSON(json);
+      } else {
+        Versions.fromJSON(json).forEach(e => versions.add(e))
+      }
+      displayResultsAfterLoading(isFirstLoad)
+    }
+
+    function handleLoadTXT(txt, append, fileName) {
+      let isFirstLoad = pages === undefined;
+      // Load raw RCS output which contains a single page
+      if (!append || isFirstLoad) {
+        pages = new Pages();
+        versions = new Versions()
+      }
+      versions.add(Version.fromTXT(fileName, txt))
+      displayResultsAfterLoading()
+
+    }
+
+    function displayResultsAfterLoading(isFirstLoad) {
+      let state = getStateFromParams();
+      initialize()
+      if (isFirstLoad && !popHistoryState(state) && selectedPage) {
+        showEntry(selectedPage.total);
+        return;
+      }
+      selectedPage = versions.versions[0].pages[0]
+      if (selectedPage == undefined) return;
+      showPage(selectedPage);
+    }
+
+    function fixClusterTelemetryResults(json) {
+      // Convert CT results to callstats compatible JSON
+      // Input:
+      // { VERSION_NAME: { PAGE: { METRIC: { "count": {XX}, "duration": {XX} }.. }}.. }
+      let firstEntry;
+      for (let key in json) {
+        firstEntry = json[key];
+        break;
+      }
+      // Return the original JSON if it is not a CT result.
+      if (firstEntry.pairs === undefined) return json;
+      // The results include already the group totals, remove them by filtering.
+      let groupNames = new Set(Array.from(Group.groups.values()).map(e => e.name));
+      let result = Object.create(null);
+      for (let file_name in json) {
+        let entries = [];
+        let file_data = json[file_name].pairs;
+        for (let name in file_data) {
+          if(name != "Total" && groupNames.has(name)) continue;
+          let entry = file_data[name];
+          let count = entry.count;
+          let time = entry.time;
+          entries.push([name, time, 0, 0, count, 0, 0]);
+        } 
+        let domain = file_name.split("/").slice(-1)[0];
+        result[domain] = entries;
+      }
+      return {__proto__:null, ClusterTelemetry: result};
+    }
+
+    function fixTraceImportJSON(json) {
+      // Fix json file that was created by converting a trace json output
+      if (!('telemetry-results' in json)) return json;
+      // { telemetry-results: { PAGE:[ { METRIC: [ COUNT TIME ], ... }, ... ]}}
+      let version_data = {__proto__:null};
+      json = json["telemetry-results"];
+      for (let page_name in json) {
+        if (page_name == "placeholder") continue;
+        let page_data = {
+              __proto__:null,
+              Total: {
+                duration: {average: 0, stddev: 0},
+                count: {average:0, stddev: 0}
+              }
+            };
+        let page = json[page_name];
+        for (let slice of page ) {
+          for (let metric_name in slice) {
+            if (metric_name == "Blink_V8") continue;
+            // sum up entries
+            if (!(metric_name in page_data)) {
+              page_data[metric_name] = {
+                duration: {average: 0, stddev: 0},
+                count: {average:0, stddev: 0}
+              }
+            }
+            let [metric_count, metric_duration] = slice[metric_name]
+            let metric = page_data[metric_name];
+            const kMicroToMilli = 1/1000;
+            metric.duration.average += metric_duration * kMicroToMilli;
+            metric.count.average += metric_count;
+
+            if (metric_name.startsWith('Blink_')) continue;
+            let total = page_data['Total'];
+            total.duration.average += metric_duration * kMicroToMilli;
+            total.count.average += metric_count;
+          } 
+        }
+        version_data[page_name] = page_data;
+      }
+      return version_data;
+    }
+
+    function fixSingleVersionJSON(json, name) {
+      // Try to detect the single-version case, where we're missing the toplevel
+      // version object. The incoming JSON is of the form:
+        //   { PAGE: ... , PAGE_2:  }
+      // Instead of the default multi-page JSON:
+      //    {"Version 1": { "Page 1": ..., ...}, "Version 2": {...}, ...}
+      // In this case insert a single "Default" version as top-level entry.
+      let firstProperty = (object) => {
+        for (let key in object) return object[key];
+      };
+      let maybePage = firstProperty(json);
+      let maybeMetrics = firstProperty(maybePage);
+      let tempName = name ? name : new Date().toISOString();
+      tempName = window.prompt('Enter a name for the loaded file:', tempName);
+      if ('count' in maybeMetrics && 'duration' in maybeMetrics) {
+        return {[tempName]: json}
+      }
+      // Legacy fallback where the metrics are encoded as arrays:
+      //  { PAGE: [[metric_name, ...], [...], ]}
+      if (Array.isArray(maybeMetrics)) {
+        return {[tempName]: json}
+      }
+      return json
+    }
+
+    let appendIndex = 0;
+    function createUniqueVersions(json) {
+      // Make sure all toplevel entries are unique names and added properly
+      appendIndex++;
+      let result = {__proto__:null}
+      for (let key in json) {
+        result[key+"_"+appendIndex] = json[key];
+      }
+      return result
+    }
+
+    function handleToggleGroup(event) {
+      let group = event.target.parentNode.parentNode.entry;
+      toggleGroup(selectedPage.get(group.name));
+    }
+
+    function handleSelectPage(select, event) {
+      let option = select.options[select.selectedIndex];
+      if (select.id == "select_0") {
+        showSelectedEntryInPage(option.page);
+      } else {
+        let columnIndex = select.id.split('_')[1];
+        showPageInColumn(option.page, columnIndex);
+      }
+    }
+
+    function handleSelectVersion(select, event) {
+      let option = select.options[select.selectedIndex];
+      let version = option.version;
+      if (select.id == "selectVersion_0") {
+        let page = version.get(selectedPage.name);
+        showSelectedEntryInPage(page);
+      } else {
+        let columnIndex = select.id.split('_')[1];
+        let pageSelect = $('select_' + columnIndex);
+        let page = pageSelect.options[pageSelect.selectedIndex].page;
+        page = version.get(page.name);
+        showPageInColumn(page, columnIndex);
+      }
+    }
+
+    function handleSelectDetailRow(table, event) {
+      if (event.target.tagName != 'TD') return;
+      let tr = event.target.parentNode;
+      if (tr.tagName != 'TR') return;
+      if (tr.entry === undefined) return;
+      selectEntry(tr.entry, true);
+    }
+
+    function handleSelectRow(table, event, fromDetail) {
+      if (event.target.tagName != 'TD') return;
+      let tr = event.target.parentNode;
+      if (tr.tagName != 'TR') return;
+      if (tr.entry === undefined) return;
+      selectEntry(tr.entry, false);
+    }
+
+    function handleSelectBaseline(select, event) {
+      let option = select.options[select.selectedIndex];
+      baselineVersion = option.version;
+      let showingDiff = baselineVersion !== undefined;
+      let body = $('body');
+      toggleCssClass(body, 'diff', showingDiff);
+      toggleCssClass(body, 'noDiff', !showingDiff);
+      showPage(selectedPage);
+      if (selectedEntry === undefined) return;
+      selectEntry(selectedEntry, true);
+    }
+
+    function findEntry(event) {
+      let target = event.target;
+      while (target.entry === undefined) {
+        target = target.parentNode;
+        if (!target) return undefined;
+      }
+      return target.entry;
+    }
+
+    function handleUpdatePopover(event) {
+      let popover = $('popover');
+      popover.style.left = event.pageX + 'px';
+      popover.style.top = event.pageY + 'px';
+      popover.style.display = 'none';
+      popover.style.display = event.shiftKey ? 'block' : 'none';
+      let entry = findEntry(event);
+      if (entry === undefined) return;
+      showPopover(entry);
+    }
+
+    function handleToggleVersionOrPageEnable(event) {
+      let item = this.item ;
+      if (item  === undefined) return;
+      item .enabled = this.checked;
+      initialize();
+      let page = selectedPage;
+      if (page === undefined || !page.version.enabled) {
+        page = versions.getEnabledPage(page.name);
+      }
+      if (!page.enabled) {
+        page = page.getNextPage();
+      }
+      showPage(page);
+    }
+
+    function handleToggleContentVisibility(event) {
+      let content = event.target.contentNode;
+      toggleCssClass(content, 'hidden');
+    }
+
+    function handleCodeSearch(event) {
+      let entry = findEntry(event);
+      if (entry === undefined) return;
+      let url = "https://cs.chromium.org/search/?sq=package:chromium&type=cs&q=";
+      name = entry.name;
+      if (name.startsWith("API_")) {
+        name = name.substring(4);
+      }
+      url += encodeURIComponent(name) + "+file:src/v8/src";
+      window.open(url,'_blank');
+    }
+  </script>
+  <script>
+  "use strict"
+    // =========================================================================
+    class Versions {
+      constructor() {
+        this.versions = [];
+      }
+      add(version) {
+        this.versions.push(version)
+      }
+      getPageVersions(page) {
+        let result = [];
+        this.versions.forEach((version) => {
+          if (!version.enabled) return;
+          let versionPage = version.get(page.name);
+          if (versionPage  !== undefined) result.push(versionPage);
+        });
+        return result;
+      }
+      get length() {
+        return this.versions.length
+      }
+      get(index) {
+        return this.versions[index]
+      }
+      getByName(name) {
+        return this.versions.find((each) => each.name == name);
+      }
+      forEach(f) {
+        this.versions.forEach(f);
+      }
+      sort() {
+        this.versions.sort(NameComparator);
+      }
+      getEnabledPage(name) {
+        for (let i = 0; i < this.versions.length; i++) {
+          let version = this.versions[i];
+          if (!version.enabled) continue;
+          let page = version.get(name);
+          if (page !== undefined) return page;
+        }
+      }
+
+      static fromJSON(json) {
+        let versions = new Versions();
+        for (let version in json) {
+          versions.add(Version.fromJSON(version, json[version]));
+        }
+        versions.sort();
+        return versions;
+      }
+    }
+
+    class Version {
+      constructor(name) {
+        this.name = name;
+        this.enabled = true;
+        this.pages = [];
+      }
+      add(page) {
+        this.pages.push(page);
+      }
+      indexOf(name) {
+        for (let i = 0; i < this.pages.length; i++) {
+          if (this.pages[i].name == name) return i;
+        }
+        return -1;
+      }
+      getNextPage(page) {
+        if (this.length == 0) return undefined;
+        return this.pages[(this.indexOf(page.name) + 1) % this.length];
+      }
+      get(name) {
+        let index = this.indexOf(name);
+        if (0 <= index) return this.pages[index];
+        return undefined
+      }
+      get length() {
+        return this.pages.length
+      }
+      getEntry(entry) {
+        if (entry === undefined) return undefined;
+        let page = this.get(entry.page.name);
+        if (page === undefined) return undefined;
+        return page.get(entry.name);
+      }
+      forEachEntry(fun) {
+        this.forEachPage((page) => {
+          page.forEach(fun);
+        });
+      }
+      forEachPage(fun) {
+        this.pages.forEach((page) => {
+          if (!page.enabled) return;
+          fun(page);
+        })
+      }
+      allEntries() {
+        let map = new Map();
+        this.forEachEntry((group, entry) => {
+          if (!map.has(entry.name)) map.set(entry.name, entry);
+        });
+        return Array.from(map.values());
+      }
+      getTotalValue(name, property) {
+        if (name === undefined) name = this.pages[0].total.name;
+        let sum = 0;
+        this.forEachPage((page) => {
+          let entry = page.get(name);
+          if (entry !== undefined) sum += entry[property];
+        });
+        return sum;
+      }
+      getTotalTime(name, showDiff) {
+        return this.getTotalValue(name, showDiff === false ? '_time' : 'time');
+      }
+      getTotalTimePercent(name, showDiff) {
+        if (baselineVersion === undefined || showDiff === false) {
+          // Return the overall average percent of the given entry name.
+          return this.getTotalValue(name, 'time') /
+            this.getTotalTime('Group-Total') * 100;
+        }
+        // Otherwise return the difference to the sum of the baseline version.
+        let baselineValue = baselineVersion.getTotalTime(name, false);
+        let total = this.getTotalValue(name, '_time');
+        return (total / baselineValue - 1)  * 100;
+      }
+      getTotalTimeVariance(name, showDiff) {
+        // Calculate the overall error for a given entry name
+        let sum = 0;
+        this.forEachPage((page) => {
+          let entry = page.get(name);
+          if (entry === undefined) return;
+          sum += entry.timeVariance * entry.timeVariance;
+        });
+        return Math.sqrt(sum);
+      }
+      getTotalTimeVariancePercent(name, showDiff) {
+        return this.getTotalTimeVariance(name, showDiff) /
+          this.getTotalTime(name, showDiff) * 100;
+      }
+      getTotalCount(name, showDiff) {
+        return this.getTotalValue(name, showDiff === false ? '_count' : 'count');
+      }
+      getAverageTimeImpact(name, showDiff) {
+        return this.getTotalTime(name, showDiff) / this.pages.length;
+      }
+      getPagesByPercentImpact(name) {
+        let sortedPages =
+          this.pages.filter((each) => {
+            return each.get(name) !== undefined
+          });
+        sortedPages.sort((a, b) => {
+          return b.get(name).timePercent - a.get(name).timePercent;
+        });
+        return sortedPages;
+      }
+      sort() {
+        this.pages.sort(NameComparator)
+      }
+
+      static fromJSON(name, data) {
+        let version = new Version(name);
+        for (let pageName in data) {
+          version.add(PageVersion.fromJSON(version, pageName, data[pageName]));
+        }
+        version.sort();
+        return version;
+      }
+
+      static fromTXT(name, txt) {
+        let version = new Version(name);
+        let pageName = "RAW DATA";
+        version.add(PageVersion.fromTXT(version, pageName, txt));
+        return version;
+      }
+    }
+
+    class Pages extends Map {
+      get(name) {
+        if (name.indexOf('www.') == 0) {
+          name = name.substring(4);
+        }
+        if (!this.has(name)) {
+          this.set(name, new Page(name));
+        }
+        return super.get(name);
+      }
+    }
+
+    class Page {
+      constructor(name) {
+        this.name = name;
+        this.enabled = true;
+        this.versions = [];
+      }
+      add(page) {
+        this.versions.push(page);
+      }
+    }
+
+    class PageVersion {
+      constructor(version, page) {
+        this.page = page;
+        this.page.add(this);
+        this.total = Group.groups.get('total').entry();
+        this.total.isTotal = true;
+        this.unclassified = new UnclassifiedEntry(this)
+        this.groups = [
+          this.total,
+          Group.groups.get('ic').entry(),
+          Group.groups.get('optimize-background').entry(),
+          Group.groups.get('optimize').entry(),
+          Group.groups.get('compile-background').entry(),
+          Group.groups.get('compile').entry(),
+          Group.groups.get('parse-background').entry(),
+          Group.groups.get('parse').entry(),
+          Group.groups.get('blink').entry(),
+          Group.groups.get('callback').entry(),
+          Group.groups.get('api').entry(),
+          Group.groups.get('gc').entry(),
+          Group.groups.get('javascript').entry(),
+          Group.groups.get('runtime').entry(),
+          this.unclassified
+        ];
+        this.entryDict = new Map();
+        this.groups.forEach((entry) => {
+          entry.page = this;
+          this.entryDict.set(entry.name, entry);
+        });
+        this.version = version;
+      }
+      toString() {
+        return this.version.name + ": " + this.name;
+      }
+      urlParams() {
+        return { version: this.version.name, page: this.name};
+      }
+      add(entry) {
+        // Ignore accidentally added Group entries.
+        if (entry.name.startsWith(GroupedEntry.prefix)) return;
+        entry.page = this;
+        this.entryDict.set(entry.name, entry);
+        for (let group of this.groups) { 
+          if (group.add(entry)) return;
+        }
+        console.error("Sould not get here", entry);
+      }
+      get(name) {
+        return this.entryDict.get(name)
+      }
+      getEntry(entry) {
+        if (entry === undefined) return undefined;
+        return this.get(entry.name);
+      }
+      get length() {
+        return this.versions.length
+      }
+      get name() { return this.page.name }
+      get enabled() { return this.page.enabled }
+      forEachSorted(referencePage, func) {
+        // Iterate over all the entries in the order they appear on the
+        // reference page.
+        referencePage.forEach((parent, referenceEntry) => {
+          let entry;
+          if (parent) parent = this.entryDict.get(parent.name);
+          if (referenceEntry) entry = this.entryDict.get(referenceEntry.name);
+          func(parent, entry, referenceEntry);
+        });
+      }
+      forEach(fun) {
+        this.forEachGroup((group) => {
+          fun(undefined, group);
+          group.forEach((entry) => {
+            fun(group, entry)
+          });
+        });
+      }
+      forEachGroup(fun) {
+        this.groups.forEach(fun)
+      }
+      sort() {
+        this.groups.sort((a, b) => {
+          return b.time - a.time;
+        });
+        this.groups.forEach((group) => {
+          group.sort()
+        });
+      }
+      distanceFromTotalPercent() {
+        let sum = 0;
+        this.groups.forEach(group => {
+          if (group == this.total) return;
+          let value = group.getTimePercentImpact() -
+              this.getEntry(group).timePercent;
+          sum += value * value;
+        });
+        return sum;
+      }
+      getNextPage() {
+        return this.version.getNextPage(this);
+      }
+
+      static fromJSON(version, name, data) {
+        let page = new PageVersion(version, pages.get(name));
+        // Distinguish between the legacy format which just uses Arrays,
+        // or the new object style.
+        if (Array.isArray(data)) {
+          for (let i = 0; i < data.length; i++) {
+            page.add(Entry.fromLegacyJSON(i, data[data.length - i - 1]));
+          }
+        } else {
+          let position = 0;
+          for (let metric_name in data) {
+            page.add(Entry.fromJSON(position, metric_name, data[metric_name]));
+            position++;
+          }
+        }
+        page.sort();
+        return page
+      }
+
+      static fromTXT(version, name, txt) {
+        let pageVersion = new PageVersion(version, pages.get(name));
+        let lines = txt.split('\n');
+        let split = / +/g
+        // Skip the first two lines (HEADER and SEPARATOR)
+        for (let i = 2; i < lines.length; i++) {
+          let line = lines[i].trim().split(split)
+          if (line.length != 5) continue;
+          let position = i-2;
+          pageVersion.add(Entry.fromTXT(position, line));
+        }
+        return pageVersion;
+      }
+    }
+
+
+    class Entry {
+      constructor(position, name, time, timeVariance, timeVariancePercent,
+        count,
+        countVariance, countVariancePercent) {
+        this.position = position;
+        this.name = name;
+        this._time = time;
+        this._timeVariance = timeVariance;
+        this._timeVariancePercent = timeVariancePercent;
+        this._count = count;
+        this.countVariance = countVariance;
+        this.countVariancePercent = countVariancePercent;
+        this.page = undefined;
+        this.parent = undefined;
+        this.isTotal = false;
+      }
+      urlParams() {
+        let params = this.page.urlParams();
+        params.entry = this.name;
+        return params;
+      }
+      getCompareWithBaseline(value, property) {
+        if (baselineVersion == undefined) return value;
+        let baselineEntry = baselineVersion.getEntry(this);
+        if (!baselineEntry) return value;
+        if (baselineVersion === this.page.version) return value;
+        return value - baselineEntry[property];
+      }
+      cssClass() {
+        return ''
+      }
+      get time() {
+        return this.getCompareWithBaseline(this._time, '_time');
+      }
+      get count() {
+        return this.getCompareWithBaseline(this._count, '_count');
+      }
+      get timePercent() {
+        let value = this._time / this.page.total._time * 100;
+        if (baselineVersion == undefined) return value;
+        let baselineEntry = baselineVersion.getEntry(this);
+        if (!baselineEntry) return value;
+        if (baselineVersion === this.page.version) return value;
+        return (this._time - baselineEntry._time) / this.page.total._time *
+          100;
+      }
+      get timePercentPerEntry() {
+        let value = this._time / this.page.total._time * 100;
+        if (baselineVersion == undefined) return value;
+        let baselineEntry = baselineVersion.getEntry(this);
+        if (!baselineEntry) return value;
+        if (baselineVersion === this.page.version) return value;
+        return (this._time / baselineEntry._time - 1) * 100;
+      }
+      get timePercentVariancePercent() {
+        // Get the absolute values for the percentages
+        return this.timeVariance / this.page.total._time * 100;
+      }
+      getTimeImpact(showDiff) {
+        return this.page.version.getTotalTime(this.name, showDiff);
+      }
+      getTimeImpactVariancePercent(showDiff) {
+        return this.page.version.getTotalTimeVariancePercent(this.name, showDiff);
+      }
+      getTimePercentImpact(showDiff) {
+        return this.page.version.getTotalTimePercent(this.name, showDiff);
+      }
+      getCountImpact(showDiff) {
+        return this.page.version.getTotalCount(this.name, showDiff);
+      }
+      getAverageTimeImpact(showDiff) {
+        return this.page.version.getAverageTimeImpact(this.name, showDiff);
+      }
+      getPagesByPercentImpact() {
+        return this.page.version.getPagesByPercentImpact(this.name);
+      }
+      get isGroup() {
+        return false
+      }
+      get timeVariance() {
+        return this._timeVariance
+      }
+      get timeVariancePercent() {
+        return this._timeVariancePercent
+      }
+
+      static fromLegacyJSON(position, data) {
+        return new Entry(position, ...data);
+      }
+
+      static fromJSON(position, name, data) {
+        let time = data.duration;
+        let count = data.count;
+        return new Entry(position, name, time.average, time.stddev, 0,
+                        count.average, count.stddev, 0);
+      }
+
+      static fromTXT(position, splitLine) {
+        let [name, time, timePercent, count, countPercent] = splitLine;
+        time = time.split('ms')
+        let timeDeviation = 0, countDeviation = 0;
+        let timeDeviationPercent = 0, countDeviationPercent = 0
+        return new Entry(position, name,
+          Number.parseFloat(time), timeDeviation, timeDeviationPercent,
+          Number.parseInt(count), countDeviation, countDeviationPercent)
+      }
+    }
+
+    class Group {
+      constructor(name, regexp, color, enabled=true, addsToTotal=true) {
+        this.name = name;
+        this.regexp = regexp;
+        this.color = color;
+        this.enabled = enabled;
+        this.addsToTotal = addsToTotal; 
+      }
+      entry() { return new GroupedEntry(this) };
+    }
+    Group.groups = new Map();
+    Group.add = function(name, group) {
+      this.groups.set(name, group);
+      return group;
+    }
+    Group.add('total', new Group('Total', /.*Total.*/, '#BBB', true, false));
+    Group.add('ic', new Group('IC', /.*IC_.*/, "#3366CC"));
+    Group.add('optimize-background', new Group('Optimize-Background',
+        /(.*OptimizeBackground.*)/, "#702000"));
+    Group.add('optimize', new Group('Optimize',
+        /StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"));
+    Group.add('compile-background', new Group('Compile-Background',
+        /(.*CompileBackground.*)/, "#b08000"));
+    Group.add('compile', new Group('Compile',
+        /(^Compile.*)|(.*_Compile.*)/, "#FFAA00"));
+    Group.add('parse-background',
+        new Group('Parse-Background', /.*ParseBackground.*/, "#c05000"));
+    Group.add('parse', new Group('Parse', /.*Parse.*/, "#FF6600"));
+    Group.add('callback', new Group('Blink C++', /.*Callback*/, "#109618"));
+    Group.add('api', new Group('API', /.*API.*/, "#990099"));
+    Group.add('gc-custom', new Group('GC-Custom', /GC_Custom_.*/, "#0099C6"));
+    Group.add('gc-background',
+        new Group('GC-Background', /.*GC.*BACKGROUND.*/, "#00597c"));
+    Group.add('gc', new Group('GC', /GC_.*|AllocateInTargetSpace/, "#00799c"));
+    Group.add('javascript', new Group('JavaScript', /JS_Execution/, "#DD4477"));
+    Group.add('runtime', new Group('V8 C++', /.*/, "#88BB00"));
+    Group.add('blink', new Group('Blink RCS', /.*Blink_.*/, "#006600", false, false));
+    Group.add('unclassified', new Group('Unclassified', /.*/, "#000", false));
+
+    class GroupedEntry extends Entry {
+      constructor(group) {
+        super(0, GroupedEntry.prefix + group.name, 0, 0, 0, 0, 0, 0);
+        this.group = group;
+        this.entries = [];
+        this.missingEntries = null;
+        this.addsToTotal = group.addsToTotal;
+      }
+      get regexp() { return this.group.regexp }
+      get color() { return this.group.color }
+      get enabled() { return this.group.enabled }
+      add(entry) {
+        if (!this.regexp.test(entry.name)) return false;
+        this._time += entry.time;
+        this._count += entry.count;
+        // TODO: sum up variance
+        this.entries.push(entry);
+        entry.parent = this;
+        return true;
+      }
+      _initializeMissingEntries() {
+        let dummyEntryNames = new Set();
+        versions.forEach((version) => {
+          let groupEntry = version.getEntry(this);
+          if (groupEntry != this) {
+            for (let entry of groupEntry.entries) {
+              if (this.page.get(entry.name) == undefined) {
+                dummyEntryNames.add(entry.name);
+              }
+            }
+          }
+        });
+        this.missingEntries  = [];
+        for (let name of dummyEntryNames) {
+          let tmpEntry = new Entry(0, name, 0, 0, 0, 0, 0, 0);
+          tmpEntry.page = this.page;
+          this.missingEntries.push(tmpEntry);
+        };
+      }
+
+      forEach(fun) {
+        // Show also all entries which are in at least one version.
+        // Concatenate our real entries.
+        if (this.missingEntries == null) {
+          this._initializeMissingEntries();
+        }
+        let tmpEntries = this.missingEntries.concat(this.entries);
+
+        // The compared entries are sorted by absolute impact.
+        tmpEntries.sort((a, b) => {
+          return b.time - a.time
+        });
+        tmpEntries.forEach(fun);
+      }
+      sort() {
+        this.entries.sort((a, b) => {
+          return b.time - a.time;
+        });
+      }
+      cssClass() {
+        if (this.page.total == this) return 'total';
+        return '';
+      }
+      get isGroup() {
+        return true
+      }
+      getVarianceForProperty(property) {
+        let sum = 0;
+        this.entries.forEach((entry) => {
+          sum += entry[property + 'Variance'] * entry[property +
+            'Variance'];
+        });
+        return Math.sqrt(sum);
+      }
+      get timeVariancePercent() {
+        if (this._time == 0) return 0;
+        return this.getVarianceForProperty('time')  / this._time * 100
+      }
+      get timeVariance() {
+        return this.getVarianceForProperty('time')
+      }
+    }
+    GroupedEntry.prefix = 'Group-';
+
+    class UnclassifiedEntry extends GroupedEntry {
+      constructor(page) {
+        super(Group.groups.get('unclassified'));
+        this.page = page;
+        this._time = undefined;
+        this._count = undefined;
+      }
+      add(entry) {
+        console.log("Adding unclassified:", entry);
+        this.entries.push(entry);
+        entry.parent = this;
+        return true;
+      }
+      forEachPageGroup(fun) {
+        this.page.forEachGroup((group) => {
+          if (group == this) return;
+          if (group == this.page.total) return;
+          fun(group);
+        });
+      }
+      get time() {
+        if (this._time === undefined) {
+          this._time = this.page.total._time;
+          this.forEachPageGroup((group) => {
+            if (group.addsToTotal) this._time -= group._time;
+          });
+        }
+        return this.getCompareWithBaseline(this._time, '_time');
+      }
+      get count() {
+        if (this._count === undefined) {
+          this._count = this.page.total._count;
+          this.forEachPageGroup((group) => {
+            this._count -= group._count;
+          });
+        }
+        return this.getCompareWithBaseline(this._count, '_count');
+      }
+    }
+  </script>
+</head>
+
+<body id="body" onmousemove="handleUpdatePopover(event)" onload="handleBodyLoad()" class="noDiff">
+  <h1>Runtime Stats Komparator</h1>
+
+  <div id="results">
+    <div class="inline">
+      <h2>Data</h2>
+      <form name="fileForm">
+        <p>
+          <label for="uploadInput">Load File:</label>
+          <input id="uploadInput" type="file" name="files" onchange="handleLoadFile();" accept=".json,.txt">
+        </p>
+        <p>
+          <label for="appendInput">Append File:</label>
+          <input id="appendInput" type="file" name="files" onchange="handleAppendFile();" accept=".json,.txt">
+        </p>
+      </form>
+    </div>
+
+    <div class="inline hidden">
+      <h2>Result</h2>
+      <div class="compareSelector inline">
+        Compare against:&nbsp;<select id="baseline" onchange="handleSelectBaseline(this, event)"></select><br/>
+        <span style="color: #060">Green</span> the selected version above performs
+        better on this measurement.
+      </div>
+    </div>
+
+    <div id="versionSelector" class="inline toggleContentVisibility">
+      <h2>Versions</h2>
+      <div class="content hidden">
+        <ul></ul>
+      </div>
+    </div>
+
+    <div id="pageSelector" class="inline toggleContentVisibility">
+      <h2>Pages</h2>
+      <div class="content hidden">
+        <ul></ul>
+      </div>
+    </div>
+
+    <div id="groupSelector" class="inline toggleContentVisibility">
+      <h2>Groups</h2>
+      <div class="content hidden">
+        <ul></ul>
+      </div>
+    </div>
+
+    <div id="view">
+    </div>
+
+    <div id="detailView" class="hidden">
+      <div class="versionDetail inline toggleContentVisibility">
+        <h3><span></span></h3>
+        <div class="content">
+          <table class="versionDetailTable" onclick="handleSelectDetailRow(this, event);">
+            <thead>
+              <tr>
+                <th class="version">Version&nbsp;</th>
+                <th class="position">Pos.&nbsp;</th>
+                <th class="value time">Time▴&nbsp;</th>
+                <th class="value time">Percent&nbsp;</th>
+                <th class="value count">Count&nbsp;</th>
+              </tr>
+            </thead>
+            <tbody></tbody>
+          </table>
+        </div>
+      </div>
+      <div class="pageDetail inline toggleContentVisibility">
+        <h3>Page Comparison for <span></span></h3>
+        <div class="content">
+          <table class="pageDetailTable" onclick="handleSelectDetailRow(this, event);">
+            <thead>
+              <tr>
+                <th class="page">Page&nbsp;</th>
+                <th class="value time">Time&nbsp;</th>
+                <th class="value time">Percent▾&nbsp;</th>
+                <th class="value time hideNoDiff">%/Entry&nbsp;</th>
+                <th class="value count">Count&nbsp;</th>
+              </tr>
+            </thead>
+            <tfoot>
+              <tr>
+                <td class="page">Total:</td>
+                <td class="value time"></td>
+                <td class="value time"></td>
+                <td class="value time hideNoDiff"></td>
+                <td class="value count"></td>
+              </tr>
+            </tfoot>
+            <tbody></tbody>
+          </table>
+        </div>
+      </div>
+      <div class="impactView inline toggleContentVisibility">
+        <h3>Impact list for <span></span></h3>
+        <div class="content">
+          <table class="pageDetailTable" onclick="handleSelectDetailRow(this, event);">
+            <thead>
+              <tr>
+                <th class="page">Name&nbsp;</th>
+                <th class="value time">Time&nbsp;</th>
+                <th class="value time">Percent▾&nbsp;</th>
+                <th class="">Top Pages</th>
+              </tr>
+            </thead>
+            <tbody></tbody>
+          </table>
+        </div>
+      </div>
+    </div>
+    <div id="pageVersionGraph" class="graph hidden toggleContentVisibility">
+      <h3><span></span></h3>
+      <div class="content"></div>
+    </div>
+    <div id="pageGraph" class="graph hidden toggleContentVisibility">
+      <h3><span></span></h3>
+      <div class="content"></div>
+    </div>
+    <div id="versionGraph" class="graph hidden toggleContentVisibility">
+      <h3><span></span></h3>
+      <div class="content"></div>
+    </div>
+
+    <div id="column" class="column">
+      <div class="header">
+        <select class="version" onchange="handleSelectVersion(this, event);"></select>
+        <select class="pageVersion" onchange="handleSelectPage(this, event);"></select>
+      </div>
+      <table class="list" onclick="handleSelectRow(this, event);">
+        <thead>
+          <tr>
+            <th class="position">Pos.&nbsp;</th>
+            <th class="name">Name&nbsp;</th>
+            <th class="value time">Time&nbsp;</th>
+            <th class="value time">Percent&nbsp;</th>
+            <th class="value count">Count&nbsp;</th>
+          </tr>
+        </thead>
+        <tbody></tbody>
+      </table>
+    </div>
+  </div>
+
+  <div class="inline">
+    <h2>Usage</h2>
+    <ol>
+      <li>Build chrome.</li>
+    </ol>
+    <h3>Telemetry benchmark</h3>
+    <ol>
+      <li>Run <code>v8.browsing</code> benchmarks:
+        <pre>$CHROMIUM_DIR/tools/perf/run_benchmark run v8.browsing_desktop \
+          --browser=exact --browser-executable=$CHROMIUM_DIR/out/release/chrome \
+          --story-filter='.*2020 ' \
+          --also-run-disabled-tests
+        </pre>
+      </li>
+      <li>Install <a href="https://stedolan.github.io/jq/">jq</a>.</li>
+      <li>Convert the telemetry JSON files to callstats JSON file:
+        <pre>
+          $V8_DIR/tools/callstats-from-telemetry.sh $CHROMIUM_DIR/tools/perf/artifacts/run_XXXX
+        </pre>
+      </li>
+      <li>Load the generated <code>out.json</code></li>
+    </ol>
+    <h3>Raw approach</h3>
+    <ol>
+      <li>Install scipy, e.g. <code>sudo aptitude install python-scipy</code>
+      <li>Check out a known working version of webpagereply:
+        <pre>git -C $CHROME_DIR/third_party/webpagereplay checkout 7dbd94752d1cde5536ffc623a9e10a51721eff1d</pre>
+      </li>
+      <li>Run <code>callstats.py</code> with a web-page-replay archive:
+        <pre>$V8_DIR/tools/callstats.py run \
+        --replay-bin=$CHROME_SRC/third_party/webpagereplay/replay.py \
+        --replay-wpr=$INPUT_DIR/top25.wpr \
+        --js-flags="" \
+        --with-chrome=$CHROME_SRC/out/Release/chrome \
+        --sites-file=$INPUT_DIR/top25.json</pre>
+      </li>
+      <li>Move results file to a subdirectory: <code>mkdir $VERSION_DIR; mv *.txt $VERSION_DIR</code></li>
+      <li>Repeat from step 1 with a different configuration (e.g. <code>--js-flags="--nolazy"</code>).</li>
+      <li>Create the final results file: <code>./callstats.py json $VERSION_DIR1 $VERSION_DIR2 > result.json</code></li>
+      <li>Use <code>results.json</code> on this site.</code>
+    </ol>
+  </div>
+
+  <div id="popover">
+    <div class="popoverArrow"></div>
+    <table>
+      <tr>
+        <td class="name" colspan="6"></td>
+      </tr>
+      <tr>
+        <td>Page:</td>
+        <td class="page name" colspan="6"></td>
+      </tr>
+      <tr>
+        <td>Version:</td>
+        <td class="version name" colspan="3"></td>
+        <td class="compare version name" colspan="3"></td>
+      </tr>
+      <tr>
+        <td>Time:</td>
+        <td class="time"></td><td>±</td><td class="timeVariance"></td>
+        <td class="compare time"></td><td class="compare"> ± </td><td class="compare timeVariance"></td>
+      </tr>
+      <tr>
+        <td>Percent:</td>
+        <td class="percent"></td><td>±</td><td class="percentVariance"></td>
+        <td class="compare percent"></td><td class="compare"> ± </td><td class="compare percentVariance"></td>
+      </tr>
+      <tr>
+        <td>Percent per Entry:</td>
+        <td class="percentPerEntry"></td><td colspan=2></td>
+        <td class="compare percentPerEntry"></td><td colspan=2></td>
+      </tr>
+      <tr>
+        <td>Count:</td>
+        <td class="count"></td><td>±</td><td class="countVariance"></td>
+        <td class="compare count"></td><td class="compare"> ± </td><td class="compare countVariance"></td>
+      </tr>
+      <tr>
+        <td>Overall Impact:</td>
+        <td class="timeImpact"></td><td>±</td><td class="timePercentImpact"></td>
+        <td class="compare timeImpact"></td><td class="compare"> ± </td><td class="compare timePercentImpact"></td>
+      </tr>
+    </table>
+  </div>
+</body>
+</html>
diff --git a/src/third_party/v8/tools/callstats.py b/src/third_party/v8/tools/callstats.py
new file mode 100755
index 0000000..f756757
--- /dev/null
+++ b/src/third_party/v8/tools/callstats.py
@@ -0,0 +1,777 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+'''
+Usage: callstats.py [-h] <command> ...
+
+Optional arguments:
+  -h, --help  show this help message and exit
+
+Commands:
+  run         run chrome with --runtime-call-stats and generate logs
+  stats       process logs and print statistics
+  json        process logs from several versions and generate JSON
+  help        help information
+
+For each command, you can try ./runtime-call-stats.py help command.
+'''
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import json
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+import operator
+from callstats_groups import RUNTIME_CALL_STATS_GROUPS
+
+import numpy
+from math import sqrt
+
+
+MAX_NOF_RETRIES = 5
+
+
+# Run benchmarks.
+
+def print_command(cmd_args):
+  def fix_for_printing(arg):
+    m = re.match(r'^--([^=]+)=(.*)$', arg)
+    if m and (' ' in m.group(2) or m.group(2).startswith('-')):
+      arg = "--{}='{}'".format(m.group(1), m.group(2))
+    elif ' ' in arg:
+      arg = "'{}'".format(arg)
+    return arg
+  print(" ".join(map(fix_for_printing, cmd_args)))
+
+
+def start_replay_server(args, sites, discard_output=True):
+  with tempfile.NamedTemporaryFile(prefix='callstats-inject-', suffix='.js',
+                                   mode='wt', delete=False) as f:
+    injection = f.name
+    generate_injection(f, sites, args.refresh)
+  http_port = 4080 + args.port_offset
+  https_port = 4443 + args.port_offset
+  cmd_args = [
+      args.replay_bin,
+      "--port=%s" % http_port,
+      "--ssl_port=%s" % https_port,
+      "--no-dns_forwarding",
+      "--use_closest_match",
+      "--no-diff_unknown_requests",
+      "--inject_scripts=deterministic.js,{}".format(injection),
+      args.replay_wpr,
+  ]
+  print("=" * 80)
+  print_command(cmd_args)
+  if discard_output:
+    with open(os.devnull, 'w') as null:
+      server = subprocess.Popen(cmd_args, stdout=null, stderr=null)
+  else:
+      server = subprocess.Popen(cmd_args)
+  print("RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid))
+  print("=" * 80)
+  return {'process': server, 'injection': injection}
+
+
+def stop_replay_server(server):
+  print("SHUTTING DOWN REPLAY SERVER %s" % server['process'].pid)
+  server['process'].terminate()
+  os.remove(server['injection'])
+
+
+def generate_injection(f, sites, refreshes=0):
+  print("""\
+(function() {
+  var s = window.sessionStorage.getItem("refreshCounter");
+  var refreshTotal = """, refreshes, """;
+  var refreshCounter = s ? parseInt(s) : refreshTotal;
+  var refreshId = refreshTotal - refreshCounter;
+  if (refreshCounter > 0) {
+    window.sessionStorage.setItem("refreshCounter", refreshCounter-1);
+  }
+  function match(url, item) {
+    if ('regexp' in item) { return url.match(item.regexp) !== null };
+    var url_wanted = item.url;
+    /* Allow automatic redirections from http to https. */
+    if (url_wanted.startsWith("http://") && url.startsWith("https://")) {
+      url_wanted = "https://" + url_wanted.substr(7);
+    }
+    return url.startsWith(url_wanted);
+  };
+  function onLoad(url) {
+    for (var item of sites) {
+      if (!match(url, item)) continue;
+      var timeout = 'timeline' in item ? 2000 * item.timeline
+                  : 'timeout'  in item ? 1000 * (item.timeout - 3)
+                  : 10000;
+      console.log("Setting time out of " + timeout + " for: " + url);
+      window.setTimeout(function() {
+        console.log("Time is out for: " + url);
+        var msg = "STATS: (" + refreshId + ") " + url;
+        %GetAndResetRuntimeCallStats(1, msg);
+        if (refreshCounter > 0) {
+          console.log(
+              "Refresh counter is " + refreshCounter + ", refreshing: " + url);
+          window.location.reload();
+        }
+      }, timeout);
+      return;
+    }
+    console.log("Ignoring: " + url);
+  };
+  var sites =
+    """, json.dumps(sites), """;
+  onLoad(window.location.href);
+})();""", file=f)
+
+def get_chrome_flags(js_flags, user_data_dir, arg_delimiter=""):
+  return [
+      "--no-default-browser-check",
+      "--no-sandbox",
+      "--disable-translate",
+      "--enable-benchmarking",
+      "--enable-stats-table",
+      "--js-flags={}{}{}".format(arg_delimiter, js_flags, arg_delimiter),
+      "--no-first-run",
+      "--user-data-dir={}{}{}".format(arg_delimiter, user_data_dir,
+                                      arg_delimiter),
+      "--data-path={}{}{}".format(arg_delimiter,
+          os.path.join(user_data_dir, 'content-shell-data'), arg_delimiter),
+    ]
+
+def get_chrome_replay_flags(args, arg_delimiter=""):
+  http_port = 4080 + args.port_offset
+  https_port = 4443 + args.port_offset
+  return [
+      "--host-resolver-rules=%sMAP *:80 localhost:%s, "  \
+                              "MAP *:443 localhost:%s, " \
+                              "EXCLUDE localhost%s" % (
+                               arg_delimiter, http_port, https_port,
+                               arg_delimiter),
+      "--ignore-certificate-errors",
+      "--disable-seccomp-sandbox",
+      "--disable-web-security",
+      "--reduce-security-for-testing",
+      "--allow-insecure-localhost",
+    ]
+
+def run_site(site, domain, args, timeout=None):
+  print("="*80)
+  print("RUNNING DOMAIN %s" % domain)
+  print("="*80)
+  result_template = "{domain}#{count}.txt" if args.repeat else "{domain}.txt"
+  count = 0
+  if timeout is None: timeout = args.timeout
+  if args.replay_wpr:
+    timeout *= 1 + args.refresh
+    timeout += 1
+  retries_since_good_run = 0
+  while count == 0 or args.repeat is not None and count < args.repeat:
+    count += 1
+    result = result_template.format(domain=domain, count=count)
+    retries = 0
+    while args.retries is None or retries < args.retries:
+      retries += 1
+      try:
+        if args.user_data_dir:
+          user_data_dir = args.user_data_dir
+        else:
+          user_data_dir = tempfile.mkdtemp(prefix="chr_")
+        js_flags = "--runtime-call-stats"
+        if args.replay_wpr: js_flags += " --allow-natives-syntax"
+        if args.js_flags: js_flags += " " + args.js_flags
+        chrome_flags = get_chrome_flags(js_flags, user_data_dir)
+        if args.replay_wpr:
+          chrome_flags += get_chrome_replay_flags(args)
+        else:
+          chrome_flags += [ "--single-process", ]
+        if args.chrome_flags:
+          chrome_flags += args.chrome_flags.split()
+        cmd_args = [
+            "timeout", str(timeout),
+            args.with_chrome
+        ] + chrome_flags + [ site ]
+        print("- " * 40)
+        print_command(cmd_args)
+        print("- " * 40)
+        with open(result, "wt") as f:
+          with open(args.log_stderr or os.devnull, 'at') as err:
+            status = subprocess.call(cmd_args, stdout=f, stderr=err)
+        # 124 means timeout killed chrome, 0 means the user was bored first!
+        # If none of these two happened, then chrome apparently crashed, so
+        # it must be called again.
+        if status != 124 and status != 0:
+          print("CHROME CRASHED, REPEATING RUN");
+          continue
+        # If the stats file is empty, chrome must be called again.
+        if os.path.isfile(result) and os.path.getsize(result) > 0:
+          if args.print_url:
+            with open(result, "at") as f:
+              print(file=f)
+              print("URL: {}".format(site), file=f)
+          retries_since_good_run = 0
+          break
+        if retries_since_good_run > MAX_NOF_RETRIES:
+          # Abort after too many retries, no point in ever increasing the
+          # timeout.
+          print("TOO MANY EMPTY RESULTS ABORTING RUN")
+          return
+        timeout += 2 ** retries_since_good_run
+        retries_since_good_run += 1
+        print("EMPTY RESULT, REPEATING RUN ({})".format(
+            retries_since_good_run));
+      finally:
+        if not args.user_data_dir:
+          shutil.rmtree(user_data_dir)
+
+
+def read_sites_file(args):
+  try:
+    sites = []
+    try:
+      with open(args.sites_file, "rt") as f:
+        for item in json.load(f):
+          if 'timeout' not in item:
+            # This is more-or-less arbitrary.
+            item['timeout'] = int(1.5 * item['timeline'] + 7)
+          if item['timeout'] > args.timeout: item['timeout'] = args.timeout
+          sites.append(item)
+    except ValueError:
+      args.error("Warning: Could not read sites file as JSON, falling back to "
+                 "primitive file")
+      with open(args.sites_file, "rt") as f:
+        for line in f:
+          line = line.strip()
+          if not line or line.startswith('#'): continue
+          sites.append({'url': line, 'timeout': args.timeout})
+    return sites
+  except IOError as e:
+    args.error("Cannot read from {}. {}.".format(args.sites_file, e.strerror))
+    sys.exit(1)
+
+
+def read_sites(args):
+  # Determine the websites to benchmark.
+  if args.sites_file:
+    return read_sites_file(args)
+  return [{'url': site, 'timeout': args.timeout} for site in args.sites]
+
+def do_run(args):
+  sites = read_sites(args)
+  replay_server = start_replay_server(args, sites) if args.replay_wpr else None
+  # Disambiguate domains, if needed.
+  L = []
+  domains = {}
+  for item in sites:
+    site = item['url']
+    domain = None
+    if args.domain:
+      domain = args.domain
+    elif 'domain' in item:
+      domain = item['domain']
+    else:
+      m = re.match(r'^(https?://)?([^/]+)(/.*)?$', site)
+      if not m:
+        args.error("Invalid URL {}.".format(site))
+        continue
+      domain = m.group(2)
+    entry = [site, domain, None, item['timeout']]
+    if domain not in domains:
+      domains[domain] = entry
+    else:
+      if not isinstance(domains[domain], int):
+        domains[domain][2] = 1
+        domains[domain] = 1
+      domains[domain] += 1
+      entry[2] = domains[domain]
+    L.append(entry)
+  try:
+    # Run them.
+    for site, domain, count, timeout in L:
+      if count is not None: domain = "{}%{}".format(domain, count)
+      print((site, domain, timeout))
+      run_site(site, domain, args, timeout)
+  finally:
+    if replay_server:
+      stop_replay_server(replay_server)
+
+
+def do_run_replay_server(args):
+  sites = read_sites(args)
+  print("- " * 40)
+  print("Available URLs:")
+  for site in sites:
+    print("    "+site['url'])
+  print("- " * 40)
+  print("Launch chromium with the following commands for debugging:")
+  flags = get_chrome_flags("--runtime-call-stats --allow-natives-syntax",
+                           "/var/tmp/`date +%s`", '"')
+  flags += get_chrome_replay_flags(args, "'")
+  print("    $CHROMIUM_DIR/out/Release/chrome " + (" ".join(flags)) + " <URL>")
+  print("- " * 40)
+  replay_server = start_replay_server(args, sites, discard_output=False)
+  try:
+    replay_server['process'].wait()
+  finally:
+   stop_replay_server(replay_server)
+
+
+# Calculate statistics.
+
+def statistics(data):
+  # NOTE(V8:10269): imports moved here to mitigate the outage.
+  import scipy
+  import scipy.stats
+
+  N = len(data)
+  average = numpy.average(data)
+  median = numpy.median(data)
+  low = numpy.min(data)
+  high= numpy.max(data)
+  if N > 1:
+    # evaluate sample variance by setting delta degrees of freedom (ddof) to
+    # 1. The degree used in calculations is N - ddof
+    stddev = numpy.std(data, ddof=1)
+    # Get the endpoints of the range that contains 95% of the distribution
+    t_bounds = scipy.stats.t.interval(0.95, N-1)
+    #assert abs(t_bounds[0] + t_bounds[1]) < 1e-6
+    # sum mean to the confidence interval
+    ci = {
+        'abs': t_bounds[1] * stddev / sqrt(N),
+        'low': average + t_bounds[0] * stddev / sqrt(N),
+        'high': average + t_bounds[1] * stddev / sqrt(N)
+    }
+  else:
+    stddev = 0
+    ci = { 'abs': 0, 'low': average, 'high': average }
+  if abs(stddev) > 0.0001 and abs(average) > 0.0001:
+    ci['perc'] = t_bounds[1] * stddev / sqrt(N) / average * 100
+  else:
+    ci['perc'] = 0
+  return { 'samples': N, 'average': average, 'median': median,
+           'stddev': stddev, 'min': low, 'max': high, 'ci': ci }
+
+
+def add_category_total(entries, groups, category_prefix):
+  group_data = { 'time': 0, 'count': 0 }
+  for group_name, regexp in groups:
+    if not group_name.startswith('Group-' + category_prefix): continue
+    group_data['time'] += entries[group_name]['time']
+    group_data['count'] += entries[group_name]['count']
+  entries['Group-' + category_prefix + '-Total'] = group_data
+
+
+def read_stats(path, domain, args):
+  groups = [];
+  if args.aggregate:
+    groups = [
+        ('Group-IC', re.compile(".*IC_.*")),
+        ('Group-OptimizeBackground', re.compile(".*OptimizeBackground.*")),
+        ('Group-Optimize',
+         re.compile("StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*")),
+        ('Group-CompileBackground', re.compile("(.*CompileBackground.*)")),
+        ('Group-Compile', re.compile("(^Compile.*)|(.*_Compile.*)")),
+        ('Group-ParseBackground', re.compile(".*ParseBackground.*")),
+        ('Group-Parse', re.compile(".*Parse.*")),
+        ('Group-Callback', re.compile(".*Callback.*")),
+        ('Group-API', re.compile(".*API.*")),
+        ('Group-GC-Custom', re.compile("GC_Custom_.*")),
+        ('Group-GC-Background', re.compile(".*GC.*BACKGROUND.*")),
+        ('Group-GC', re.compile("GC_.*|AllocateInTargetSpace")),
+        ('Group-JavaScript', re.compile("JS_Execution")),
+        ('Group-Runtime', re.compile(".*"))]
+  with open(path, "rt") as f:
+    # Process the whole file and sum repeating entries.
+    entries = { 'Sum': {'time': 0, 'count': 0} }
+    for group_name, regexp in groups:
+      entries[group_name] = { 'time': 0, 'count': 0 }
+    for line in f:
+      line = line.strip()
+      # Discard headers and footers.
+      if not line: continue
+      if line.startswith("Runtime Function"): continue
+      if line.startswith("===="): continue
+      if line.startswith("----"): continue
+      if line.startswith("URL:"): continue
+      if line.startswith("STATS:"): continue
+      # We have a regular line.
+      fields = line.split()
+      key = fields[0]
+      time = float(fields[1].replace("ms", ""))
+      count = int(fields[3])
+      if key not in entries: entries[key] = { 'time': 0, 'count': 0 }
+      entries[key]['time'] += time
+      entries[key]['count'] += count
+      # We calculate the sum, if it's not the "total" line.
+      if key != "Total":
+        entries['Sum']['time'] += time
+        entries['Sum']['count'] += count
+        for group_name, regexp in groups:
+          if not regexp.match(key): continue
+          entries[group_name]['time'] += time
+          entries[group_name]['count'] += count
+          break
+    # Calculate the V8-Total (all groups except Callback)
+    group_data = { 'time': 0, 'count': 0 }
+    for group_name, regexp in groups:
+      if group_name == 'Group-Callback': continue
+      group_data['time'] += entries[group_name]['time']
+      group_data['count'] += entries[group_name]['count']
+    entries['Group-Total-V8'] = group_data
+    # Calculate the Parse-Total, Compile-Total and Optimize-Total groups
+    add_category_total(entries, groups, 'Parse')
+    add_category_total(entries, groups, 'Compile')
+    add_category_total(entries, groups, 'Optimize')
+    # Append the sums as single entries to domain.
+    for key in entries:
+      if key not in domain: domain[key] = { 'time_list': [], 'count_list': [] }
+      domain[key]['time_list'].append(entries[key]['time'])
+      domain[key]['count_list'].append(entries[key]['count'])
+
+
+def print_stats(S, args):
+  # Sort by ascending/descending time average, then by ascending/descending
+  # count average, then by ascending name.
+  def sort_asc_func(item):
+    return (item[1]['time_stat']['average'],
+            item[1]['count_stat']['average'],
+            item[0])
+  def sort_desc_func(item):
+    return (-item[1]['time_stat']['average'],
+            -item[1]['count_stat']['average'],
+            item[0])
+  # Sorting order is in the commend-line arguments.
+  sort_func = sort_asc_func if args.sort == "asc" else sort_desc_func
+  # Possibly limit how many elements to print.
+  L = [item for item in sorted(S.items(), key=sort_func)
+       if item[0] not in ["Total", "Sum"]]
+  N = len(L)
+  if args.limit == 0:
+    low, high = 0, N
+  elif args.sort == "desc":
+    low, high = 0, args.limit
+  else:
+    low, high = N-args.limit, N
+  # How to print entries.
+  def print_entry(key, value):
+    def stats(s, units=""):
+      conf = "{:0.1f}({:0.2f}%)".format(s['ci']['abs'], s['ci']['perc'])
+      return "{:8.1f}{} +/- {:15s}".format(s['average'], units, conf)
+    print("{:>50s}  {}  {}".format(
+      key,
+      stats(value['time_stat'], units="ms"),
+      stats(value['count_stat'])
+    ))
+  # Print and calculate partial sums, if necessary.
+  for i in range(low, high):
+    print_entry(*L[i])
+    if args.totals and args.limit != 0 and not args.aggregate:
+      if i == low:
+        partial = { 'time_list': [0] * len(L[i][1]['time_list']),
+                    'count_list': [0] * len(L[i][1]['count_list']) }
+      assert len(partial['time_list']) == len(L[i][1]['time_list'])
+      assert len(partial['count_list']) == len(L[i][1]['count_list'])
+      for j, v in enumerate(L[i][1]['time_list']):
+        partial['time_list'][j] += v
+      for j, v in enumerate(L[i][1]['count_list']):
+        partial['count_list'][j] += v
+  # Print totals, if necessary.
+  if args.totals:
+    print('-' * 80)
+    if args.limit != 0 and not args.aggregate:
+      partial['time_stat'] = statistics(partial['time_list'])
+      partial['count_stat'] = statistics(partial['count_list'])
+      print_entry("Partial", partial)
+    print_entry("Sum", S["Sum"])
+    print_entry("Total", S["Total"])
+
+
+def do_stats(args):
+  domains = {}
+  for path in args.logfiles:
+    filename = os.path.basename(path)
+    m = re.match(r'^([^#]+)(#.*)?$', filename)
+    domain = m.group(1)
+    if domain not in domains: domains[domain] = {}
+    read_stats(path, domains[domain], args)
+  if args.aggregate:
+    create_total_page_stats(domains, args)
+  for i, domain in enumerate(sorted(domains)):
+    if len(domains) > 1:
+      if i > 0: print()
+      print("{}:".format(domain))
+      print('=' * 80)
+    domain_stats = domains[domain]
+    for key in domain_stats:
+      domain_stats[key]['time_stat'] = \
+          statistics(domain_stats[key]['time_list'])
+      domain_stats[key]['count_stat'] = \
+          statistics(domain_stats[key]['count_list'])
+    print_stats(domain_stats, args)
+
+
+# Create a Total page with all entries summed up.
+def create_total_page_stats(domains, args):
+  total = {}
+  def sum_up(parent, key, other):
+    sums = parent[key]
+    for i, item in enumerate(other[key]):
+      if i >= len(sums):
+        sums.extend([0] * (i - len(sums) + 1))
+      if item is not None:
+        sums[i] += item
+  # Exclude adwords and speedometer pages from aggrigate total, since adwords
+  # dominates execution time and speedometer is measured elsewhere.
+  excluded_domains = ['adwords.google.com', 'speedometer-angular',
+                      'speedometer-jquery', 'speedometer-backbone',
+                      'speedometer-ember', 'speedometer-vanilla'];
+  # Sum up all the entries/metrics from all non-excluded domains
+  for domain, entries in domains.items():
+    if domain in excluded_domains:
+      continue;
+    for key, domain_stats in entries.items():
+      if key not in total:
+        total[key] = {}
+        total[key]['time_list'] = list(domain_stats['time_list'])
+        total[key]['count_list'] = list(domain_stats['count_list'])
+      else:
+        sum_up(total[key], 'time_list', domain_stats)
+        sum_up(total[key], 'count_list', domain_stats)
+  # Add a new "Total" page containing the summed up metrics.
+  domains['Total'] = total
+
+# Generate Raw JSON file.
+
+def _read_logs(args):
+  versions = {}
+  for path in args.logdirs:
+    if os.path.isdir(path):
+      for root, dirs, files in os.walk(path):
+        version = os.path.basename(root)
+        if version not in versions: versions[version] = {}
+        for filename in files:
+          if filename.endswith(".txt"):
+            m = re.match(r'^([^#]+)(#.*)?\.txt$', filename)
+            domain = m.group(1)
+            if domain not in versions[version]: versions[version][domain] = {}
+            read_stats(os.path.join(root, filename),
+                       versions[version][domain], args)
+
+  return versions
+
+def do_raw_json(args):
+  versions = _read_logs(args)
+
+  for version, domains in versions.items():
+    if args.aggregate:
+      create_total_page_stats(domains, args)
+    for domain, entries in domains.items():
+      raw_entries = []
+      for name, value in entries.items():
+        # We don't want the calculated sum in the JSON file.
+        if name == "Sum": continue
+        raw_entries.append({
+          'name': name,
+          'duration': value['time_list'],
+          'count': value['count_list'],
+        })
+
+      domains[domain] = raw_entries
+
+  print(json.dumps(versions, separators=(',', ':')))
+
+
+# Generate JSON file.
+
+def do_json(args):
+  versions = _read_logs(args)
+
+  for version, domains in versions.items():
+    if args.aggregate:
+      create_total_page_stats(domains, args)
+    for domain, entries in domains.items():
+      stats = []
+      for name, value in entries.items():
+        # We don't want the calculated sum in the JSON file.
+        if name == "Sum": continue
+        entry = [name]
+        for x in ['time_list', 'count_list']:
+          s = statistics(entries[name][x])
+          entry.append(round(s['average'], 1))
+          entry.append(round(s['ci']['abs'], 1))
+          entry.append(round(s['ci']['perc'], 2))
+        stats.append(entry)
+      domains[domain] = stats
+  print(json.dumps(versions, separators=(',', ':')))
+
+
+# Help.
+
+def do_help(parser, subparsers, args):
+  if args.help_cmd:
+    if args.help_cmd in subparsers:
+      subparsers[args.help_cmd].print_help()
+    else:
+      args.error("Unknown command '{}'".format(args.help_cmd))
+  else:
+    parser.print_help()
+
+
+# Main program, parse command line and execute.
+
+def coexist(*l):
+  given = sum(1 for x in l if x)
+  return given == 0 or given == len(l)
+
+def main():
+  parser = argparse.ArgumentParser()
+  subparser_adder = parser.add_subparsers(title="commands", dest="command",
+                                          metavar="<command>")
+  subparsers = {}
+  # Command: run.
+  subparsers["run"] = subparser_adder.add_parser(
+      "run", help="Replay websites and collect runtime stats data.")
+  subparsers["run"].set_defaults(
+      func=do_run, error=subparsers["run"].error)
+  subparsers["run"].add_argument(
+      "--chrome-flags", type=str, default="",
+      help="specify additional chrome flags")
+  subparsers["run"].add_argument(
+      "--js-flags", type=str, default="",
+      help="specify additional V8 flags")
+  subparsers["run"].add_argument(
+      "-u", "--user-data-dir", type=str, metavar="<path>",
+      help="specify user data dir (default is temporary)")
+  subparsers["run"].add_argument(
+      "-c", "--with-chrome", type=str, metavar="<path>",
+      default="/usr/bin/google-chrome",
+      help="specify chrome executable to use")
+  subparsers["run"].add_argument(
+      "-r", "--retries", type=int, metavar="<num>",
+      help="specify retries if website is down (default: forever)")
+  subparsers["run"].add_argument(
+      "--no-url", dest="print_url", action="store_false", default=True,
+      help="do not include url in statistics file")
+  subparsers["run"].add_argument(
+      "--domain", type=str, default="",
+      help="specify the output file domain name")
+  subparsers["run"].add_argument(
+      "-n", "--repeat", type=int, metavar="<num>",
+      help="specify iterations for each website (default: once)")
+
+  def add_replay_args(subparser):
+    subparser.add_argument(
+        "-k", "--refresh", type=int, metavar="<num>", default=0,
+        help="specify refreshes for each iteration (default: 0)")
+    subparser.add_argument(
+        "--replay-wpr", type=str, metavar="<path>",
+        help="use the specified web page replay (.wpr) archive")
+    subparser.add_argument(
+        "--replay-bin", type=str, metavar="<path>",
+        help="specify the replay.py script typically located in " \
+             "$CHROMIUM/src/third_party/webpagereplay/replay.py")
+    subparser.add_argument(
+        "-f", "--sites-file", type=str, metavar="<path>",
+        help="specify file containing benchmark websites")
+    subparser.add_argument(
+        "-t", "--timeout", type=int, metavar="<seconds>", default=60,
+        help="specify seconds before chrome is killed")
+    subparser.add_argument(
+        "-p", "--port-offset", type=int, metavar="<offset>", default=0,
+        help="specify the offset for the replay server's default ports")
+    subparser.add_argument(
+        "-l", "--log-stderr", type=str, metavar="<path>",
+        help="specify where chrome's stderr should go (default: /dev/null)")
+    subparser.add_argument(
+        "--sites", type=str, metavar="<URL>", nargs="*",
+        help="specify benchmark website")
+  add_replay_args(subparsers["run"])
+
+  # Command: replay-server
+  subparsers["replay"] = subparser_adder.add_parser(
+      "replay", help="Run the replay server for debugging purposes")
+  subparsers["replay"].set_defaults(
+      func=do_run_replay_server, error=subparsers["replay"].error)
+  add_replay_args(subparsers["replay"])
+
+  # Command: stats.
+  subparsers["stats"] = subparser_adder.add_parser(
+      "stats", help="Analize the results file create by the 'run' command.")
+  subparsers["stats"].set_defaults(
+      func=do_stats, error=subparsers["stats"].error)
+  subparsers["stats"].add_argument(
+      "-l", "--limit", type=int, metavar="<num>", default=0,
+      help="limit how many items to print (default: none)")
+  subparsers["stats"].add_argument(
+      "-s", "--sort", choices=["asc", "desc"], default="asc",
+      help="specify sorting order (default: ascending)")
+  subparsers["stats"].add_argument(
+      "-n", "--no-total", dest="totals", action="store_false", default=True,
+      help="do not print totals")
+  subparsers["stats"].add_argument(
+      "logfiles", type=str, metavar="<logfile>", nargs="*",
+      help="specify log files to parse")
+  subparsers["stats"].add_argument(
+      "--aggregate", dest="aggregate", action="store_true", default=False,
+      help="Create aggregated entries. Adds Group-* entries at the toplevel. " \
+      "Additionally creates a Total page with all entries.")
+
+  # Command: json.
+  subparsers["json"] = subparser_adder.add_parser(
+      "json", help="Collect results file created by the 'run' command into" \
+          "a single json file.")
+  subparsers["json"].set_defaults(
+      func=do_json, error=subparsers["json"].error)
+  subparsers["json"].add_argument(
+      "logdirs", type=str, metavar="<logdir>", nargs="*",
+      help="specify directories with log files to parse")
+  subparsers["json"].add_argument(
+      "--aggregate", dest="aggregate", action="store_true", default=False,
+      help="Create aggregated entries. Adds Group-* entries at the toplevel. " \
+      "Additionally creates a Total page with all entries.")
+
+  # Command: raw-json.
+  subparsers["raw-json"] = subparser_adder.add_parser(
+      "raw-json", help="Collect raw results from 'run' command into" \
+          "a single json file.")
+  subparsers["raw-json"].set_defaults(
+      func=do_raw_json, error=subparsers["json"].error)
+  subparsers["raw-json"].add_argument(
+      "logdirs", type=str, metavar="<logdir>", nargs="*",
+      help="specify directories with log files to parse")
+  subparsers["raw-json"].add_argument(
+      "--aggregate", dest="aggregate", action="store_true", default=False,
+      help="Create aggregated entries. Adds Group-* entries at the toplevel. " \
+      "Additionally creates a Total page with all entries.")
+
+  # Command: help.
+  subparsers["help"] = subparser_adder.add_parser(
+      "help", help="help information")
+  subparsers["help"].set_defaults(
+      func=lambda args: do_help(parser, subparsers, args),
+      error=subparsers["help"].error)
+  subparsers["help"].add_argument(
+      "help_cmd", type=str, metavar="<command>", nargs="?",
+      help="command for which to display help")
+
+  # Execute the command.
+  args = parser.parse_args()
+  setattr(args, 'script_path', os.path.dirname(sys.argv[0]))
+  if args.command == "run" and coexist(args.sites_file, args.sites):
+    args.error("use either option --sites-file or site URLs")
+    sys.exit(1)
+  elif args.command == "run" and not coexist(args.replay_wpr, args.replay_bin):
+    args.error("options --replay-wpr and --replay-bin must be used together")
+    sys.exit(1)
+  else:
+    args.func(args)
+
+if __name__ == "__main__":
+  sys.exit(main())
diff --git a/src/third_party/v8/tools/callstats.py.vpython b/src/third_party/v8/tools/callstats.py.vpython
new file mode 100644
index 0000000..11e3f34
--- /dev/null
+++ b/src/third_party/v8/tools/callstats.py.vpython
@@ -0,0 +1,43 @@
+# This is a vpython "spec" file.
+#
+# It describes patterns for python wheel dependencies of the python scripts in
+# the callstats.py, particularly for dependencies that have compiled components
+# (since pure-python dependencies can be easily vendored into third_party).
+#
+# When vpython is invoked, it finds this file and builds a python VirtualEnv,
+# containing all of the dependencies described in this file, fetching them from
+# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
+# this never requires the end-user machine to have a working python extension
+# compilation environment. All of these packages are built using:
+#   https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
+#
+# All python scripts in the repo share this same spec, to avoid dependency
+# fragmentation.
+#
+# If you have depot_tools installed in your $PATH, you can invoke python scripts
+# in this repo by running them as you normally would run them, except
+# substituting `vpython` instead of `python` on the command line, e.g.:
+#   vpython path/to/script.py some --arguments
+#
+# Read more about `vpython` and how to modify this file here:
+#   https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
+
+python_version: "2.7"
+
+wheel: <
+  name: "infra/python/wheels/numpy/${vpython_platform}"
+  version: "version:1.11.3"
+>
+
+wheel: <
+  name: "infra/python/wheels/scipy/${vpython_platform}"
+  version: "version:0.19.0"
+  match_tag: <
+    abi: "cp27mu"
+    platform: "manylinux1_i686"
+  >
+  match_tag: <
+    abi: "cp27mu"
+    platform: "manylinux1_x86_64"
+  >
+>
diff --git a/src/third_party/v8/tools/callstats_groups.py b/src/third_party/v8/tools/callstats_groups.py
new file mode 100644
index 0000000..62898c6
--- /dev/null
+++ b/src/third_party/v8/tools/callstats_groups.py
@@ -0,0 +1,22 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+RUNTIME_CALL_STATS_GROUPS = [
+    ('Group-IC', re.compile(".*IC_.*")),
+    ('Group-OptimizeBackground', re.compile(".*OptimizeBackground.*")),
+    ('Group-Optimize',
+     re.compile("StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*")),
+    ('Group-CompileBackground', re.compile("(.*CompileBackground.*)")),
+    ('Group-Compile', re.compile("(^Compile.*)|(.*_Compile.*)")),
+    ('Group-ParseBackground', re.compile(".*ParseBackground.*")),
+    ('Group-Parse', re.compile(".*Parse.*")),
+    ('Group-Callback', re.compile(".*Callback.*")),
+    ('Group-API', re.compile(".*API.*")),
+    ('Group-GC-Custom', re.compile("GC_Custom_.*")),
+    ('Group-GC-Background', re.compile(".*GC.*BACKGROUND.*")),
+    ('Group-GC', re.compile("GC_.*|AllocateInTargetSpace")),
+    ('Group-JavaScript', re.compile("JS_Execution")),
+    ('Group-Runtime', re.compile(".*"))]
diff --git a/src/third_party/v8/tools/cfi/ignores.txt b/src/third_party/v8/tools/cfi/ignores.txt
new file mode 100644
index 0000000..9886fd3
--- /dev/null
+++ b/src/third_party/v8/tools/cfi/ignores.txt
@@ -0,0 +1,26 @@
+# All std:: types
+# This should be possible to remove, if/when we build against
+# a statically linked libc++.
+type:std::*
+
+# Following entries Taken from chromium's tools/cfi/blacklist.txt
+[cfi-icall]
+
+######### Function pointers cast to incorrect type signatures
+
+# libicu is currently compiled such that in libicu the 'UChar' type is a
+# defined as a char16_t internally, but for the rest of chromium it's an
+# unsigned short, causing mismatched type signatures for icalls to/from icu
+# v8/src/intl.cc
+fun:*LocaleConvertCase*
+
+# PropertyCallbackArguments::Call methods cast function pointers
+src:*src/api/api-arguments-inl.h
+
+# v8 callback that casts argument template parameters
+fun:*PendingPhantomCallback*Invoke*
+
+# weak_callback_ is cast from original type.
+fun:*GlobalHandles*PostGarbageCollectionProcessing*
+
+fun:*InvokeAccessorGetterCallback*
diff --git a/src/third_party/v8/tools/check-inline-includes.sh b/src/third_party/v8/tools/check-inline-includes.sh
new file mode 100755
index 0000000..6def974
--- /dev/null
+++ b/src/third_party/v8/tools/check-inline-includes.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+directories="src test/cctest test/unittests"
+
+for directory in $directories; do
+  headers=$(find "$v8_root/$directory" -name '*.h' -not -name '*-inl.h')
+  for header in $headers; do
+    inline_header_include=$(grep '#include ".*-inl.h"' "$header")
+    if [ -n "$inline_header_include" ]; then
+      echo "The following non-inline header seems to include an inline header:"
+      echo "  Header : $header"
+      echo "  Include: $inline_header_include"
+      echo
+    fi
+  done
+done
+
+echo "Kthxbye."
diff --git a/src/third_party/v8/tools/check-static-initializers.sh b/src/third_party/v8/tools/check-static-initializers.sh
new file mode 100755
index 0000000..fdd1e84
--- /dev/null
+++ b/src/third_party/v8/tools/check-static-initializers.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Checks that the number of compilation units having at least one static
+# initializer in d8 matches the one defined below.
+
+# Allow:
+# _GLOBAL__sub_I_d8.cc
+# _GLOBAL__sub_I_iostream.cpp
+expected_static_init_count=2
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+
+if [ -n "$1" ] ; then
+  d8="${v8_root}/$1"
+else
+  d8="${v8_root}/d8"
+fi
+
+if [ ! -f "$d8" ]; then
+  echo "d8 binary not found: $d8"
+  exit 1
+fi
+
+static_inits=$(nm "$d8" | grep _GLOBAL_ | grep _I_ | awk '{ print $NF; }')
+
+static_init_count=$(echo "$static_inits" | wc -l)
+
+if [ $static_init_count -gt $expected_static_init_count ]; then
+  echo "Too many static initializers."
+  echo "$static_inits"
+  exit 1
+else
+  echo "Static initializer check passed ($static_init_count initializers)."
+  exit 0
+fi
diff --git a/src/third_party/v8/tools/check-unused-bailouts.sh b/src/third_party/v8/tools/check-unused-bailouts.sh
new file mode 100755
index 0000000..68c2c91
--- /dev/null
+++ b/src/third_party/v8/tools/check-unused-bailouts.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+bailouts=$(
+    grep \
+        --only-matching \
+        --perl-regexp 'V\(\K(k[^,]*)' \
+        -- "$v8_root/src/bailout-reason.h")
+
+# Find bailouts which appear exactly once (in bailout-reason.h)
+grep \
+    --only-matching \
+    --no-filename \
+    --recursive \
+    --word-regexp \
+    --fixed-strings "$bailouts" \
+    -- "$v8_root/src" "$v8_root/test/cctest" \
+| sort \
+| uniq -u \
+| sed -e 's/.*/Bailout reason "&" seems to be unused./'
+
+echo "Kthxbye."
diff --git a/src/third_party/v8/tools/check-unused-symbols.sh b/src/third_party/v8/tools/check-unused-symbols.sh
new file mode 100755
index 0000000..0348938
--- /dev/null
+++ b/src/third_party/v8/tools/check-unused-symbols.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+symbols=$(
+    grep \
+        --only-matching \
+        --perl-regexp 'V\(_, \K([^,\)]*)' \
+        -- "$v8_root/src/heap-symbols.h")
+
+# Find symbols which appear exactly once (in heap-symbols.h)
+grep \
+    --only-matching \
+    --no-filename \
+    --recursive \
+    --fixed-strings "$symbols" \
+    -- "$v8_root/src" "$v8_root/test/cctest" \
+| sort \
+| uniq -u \
+| sed -e 's/.*/Heap symbol "&" seems to be unused./'
+
+echo "Kthxbye."
diff --git a/src/third_party/v8/tools/clusterfuzz/BUILD.gn b/src/third_party/v8/tools/clusterfuzz/BUILD.gn
new file mode 100644
index 0000000..d75e6f9
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/BUILD.gn
@@ -0,0 +1,25 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+if (v8_correctness_fuzzer) {
+  copy("v8_correctness_fuzzer_resources") {
+    sources = [
+      "v8_commands.py",
+      "v8_foozzie.py",
+      "v8_foozzie_harness_adjust.js",
+      "v8_fuzz_config.py",
+      "v8_fuzz_experiments.json",
+      "v8_fuzz_flags.json",
+      "v8_mock.js",
+      "v8_mock_archs.js",
+      "v8_mock_webassembly.js",
+      "v8_sanity_checks.js",
+      "v8_suppressions.js",
+      "v8_suppressions.py",
+    ]
+    outputs = [ "$root_out_dir/{{source_file_part}}" ]
+  }
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/.eslintrc.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/.eslintrc.js
new file mode 100644
index 0000000..f3ba3c9
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/.eslintrc.js
@@ -0,0 +1,22 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+module.exports = {
+    "env": {
+        "node": true,
+        "commonjs": true,
+        "es6": true,
+        "mocha": true
+    },
+    "extends": "eslint:recommended",
+    "globals": {
+        "Atomics": "readonly",
+        "SharedArrayBuffer": "readonly"
+    },
+    "parserOptions": {
+        "ecmaVersion": 2018
+    },
+    "rules": {
+    }
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/DIR_METADATA b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/DIR_METADATA
new file mode 100644
index 0000000..9fc1320
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/DIR_METADATA
@@ -0,0 +1,11 @@
+# Metadata information for this directory.
+#
+# For more information on DIR_METADATA files, see:
+#   https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/README.md
+#
+# For the schema of this file, see Metadata message:
+#   https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
+
+monorail {
+  component: "Infra>Client>V8"
+}
\ No newline at end of file
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/README.md b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/README.md
new file mode 100644
index 0000000..a537ad7
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/README.md
@@ -0,0 +1,119 @@
+# JS-Fuzzer
+
+Javascript fuzzer for stand-alone shells like D8, Chakra, JSC or Spidermonkey.
+
+Original author: Oliver Chang
+
+# Building
+
+This fuzzer may require versions of node that are newer than available on
+ClusterFuzz, so we use [pkg](https://github.com/zeit/pkg) to create a self
+contained binary) out of this.
+
+## Prereqs
+You need to intall nodejs and npm. Run `npm install` in this directory.
+
+## Fuzzing DB
+This fuzzer requires a fuzzing DB. To build one, get the latest web_tests.zip
+from `gs://clusterfuzz-data/web_tests.zip` and run:
+
+```bash
+$ mkdir db
+$ node build_db.js -i /path/to/web_tests -o db chakra v8 spidermonkey WebKit/JSTests
+```
+
+This may take a while. Optionally test the fuzzing DB with:
+
+```bash
+$ node test_db.js -i db
+```
+
+## Building fuzzer
+Then, to build the fuzzer,
+```bash
+$ ./node_modules/.bin/pkg -t node10-linux-x64 .
+```
+
+Replace "linux" with either "win" or "macos" for those platforms.
+
+This builds a binary named `ochang_js_fuzzer` for Linux / macOS OR
+`ochang_js_fuzzer.exe` for Windows.
+
+## Packaging
+Use `./package.sh`, `./package.sh win` or `./package.sh macos` to build and
+create the `output.zip` archive or use these raw commands:
+```bash
+$ mkdir output
+$ cd output
+$ ln -s ../db db
+$ ln -s ../ochang_js_fuzzer run
+$ zip -r /path/output.zip *
+```
+
+**NOTE**: Add `.exe` to `ochang_js_fuzzer` and `run` filename above if archiving
+for Windows platform.
+
+# Development
+
+Run the tests with:
+
+```bash
+$ npm test
+```
+
+When test expectations change, generate them with:
+
+```bash
+$ GENERATE=1 npm test
+```
+
+# Generating exceptional configurations
+
+Tests that fail to parse or show very bad performance can be automatically
+skipped or soft-skipped with the following script (takes >1h):
+
+```bash
+$ WEB_TESTS=/path/to/web_tests OUTPUT=/path/to/output/folder ./gen_exceptions.sh
+```
+
+# Experimenting (limited to differential fuzzing)
+
+To locally evaluate the fuzzer, setup a work directory as follows:
+
+```bash
+$ workdir/
+$ workdir/app_dir
+$ workdir/fuzzer
+$ workdir/input
+$ workdir/output
+```
+
+The `app_dir` folder can be a symlink or should contain the bundled
+version of `d8` with all files required for execution.
+The copy the packaged `ochang_js_fuzzer` executable and the `db` folder
+to the `fuzzer` directory or use a symlink.
+The `input` directory is the root folder of the corpus, i.e. pointing
+to the unzipped data of `gs://clusterfuzz-data/web_tests.zip`.
+The `output` directory is expected to be empty. It'll contain all
+output of the fuzzing session. Start the experiments with:
+
+```bash
+$ # Around ~40000 corresponds to 24h of fuzzing on a workstation.
+$ NUM_RUNS = 40000
+$ python tools/workbench.py $NUM_RUNS
+```
+
+You can check current stats with:
+
+```bash
+$ cat workdir/output/stats.json | python -m json.tool
+```
+
+When failures are found, you can forge minimization command lines with:
+
+```bash
+$ MINIMIZER_PATH = path/to/minimizer
+$ python tools/minimize.py $MINIMIZER_PATH
+```
+
+The path should point to a local checkout of the [minimizer](https://chrome-internal.googlesource.com/chrome/tools/clusterfuzz/+/refs/heads/master/src/python/bot/minimizer/).
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/build_db.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/build_db.js
new file mode 100644
index 0000000..675a322
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/build_db.js
@@ -0,0 +1,66 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Collect JS nodes.
+ */
+
+const program = require('commander');
+
+const corpus = require('./corpus.js');
+const db = require('./db.js');
+const path = require('path');
+
+const sourceHelpers = require('./source_helpers.js');
+
+function main() {
+  Error.stackTraceLimit = Infinity;
+
+  program
+    .version('0.0.1')
+    .option('-i, --input_dir <path>', 'Input directory.')
+    .option('-o, --output_dir <path>', 'Output directory.')
+    .parse(process.argv);
+
+  if (!program.args.length) {
+    console.log('Need to specify corpora.');
+    return;
+  }
+
+  if (!program.output_dir) {
+    console.log('Need to specify output dir.');
+    return;
+  }
+
+  const mutateDb = new db.MutateDbWriter(program.output_dir);
+  const expressions = new Set();
+
+  const inputDir = path.resolve(program.input_dir);
+  for (const corpusName of program.args) {
+    const curCorpus = new corpus.Corpus(inputDir, corpusName);
+    for (const relPath of curCorpus.relFiles()) {
+      let source;
+      try {
+        source = sourceHelpers.loadSource(inputDir, relPath);
+      } catch (e) {
+        console.log(e);
+        continue;
+      }
+
+      if (!source) {
+        continue;
+      }
+
+      try{
+        mutateDb.process(source, expressions);
+      } catch (e) {
+        console.log(e);
+      }
+    }
+  }
+
+  mutateDb.writeIndex();
+}
+
+main();
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/corpus.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/corpus.js
new file mode 100644
index 0000000..d186ce8
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/corpus.js
@@ -0,0 +1,141 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Corpus
+ */
+
+const program = require('commander');
+const fs = require('fs');
+const path = require('path');
+
+const exceptions = require('./exceptions.js');
+const random = require('./random.js');
+const sourceHelpers = require('./source_helpers.js');
+
+function* walkDirectory(directory, filter) {
+  // Generator for recursively walk a directory.
+  for (const filePath of fs.readdirSync(directory)) {
+    const currentPath = path.join(directory, filePath);
+    const stat = fs.lstatSync(currentPath);
+    if (stat.isFile()) {
+      if (!filter || filter(currentPath)) {
+        yield currentPath;
+      }
+      continue;
+    }
+
+    if (stat.isDirectory()) {
+      for (let childFilePath of walkDirectory(currentPath, filter)) {
+        yield childFilePath;
+      }
+    }
+  }
+}
+
+class Corpus {
+  // Input corpus.
+  constructor(inputDir, corpusName, extraStrict=false) {
+    this.inputDir = inputDir;
+    this.extraStrict = extraStrict;
+
+    // Filter for permitted JS files.
+    function isPermittedJSFile(absPath) {
+      return (absPath.endsWith('.js') &&
+              !exceptions.isTestSkippedAbs(absPath));
+    }
+
+    // Cache relative paths of all files in corpus.
+    this.skippedFiles = [];
+    this.softSkippedFiles = [];
+    this.permittedFiles = [];
+    const directory = path.join(inputDir, corpusName);
+    for (const absPath of walkDirectory(directory, isPermittedJSFile)) {
+      const relPath = path.relative(this.inputDir, absPath);
+      if (exceptions.isTestSkippedRel(relPath)) {
+        this.skippedFiles.push(relPath);
+      } else if (exceptions.isTestSoftSkippedAbs(absPath) ||
+          exceptions.isTestSoftSkippedRel(relPath)) {
+        this.softSkippedFiles.push(relPath);
+      } else {
+        this.permittedFiles.push(relPath);
+      }
+    }
+    random.shuffle(this.softSkippedFiles);
+    random.shuffle(this.permittedFiles);
+  }
+
+  // Relative paths of all files in corpus.
+  *relFiles() {
+    for (const relPath of this.permittedFiles) {
+      yield relPath;
+    }
+    for (const relPath of this.softSkippedFiles) {
+      yield relPath;
+    }
+  }
+
+  // Relative paths of all files in corpus including generated skipped.
+  *relFilesForGenSkipped() {
+    for (const relPath of this.relFiles()) {
+      yield relPath;
+    }
+    for (const relPath of this.skippedFiles) {
+      yield relPath;
+    }
+  }
+
+  /**
+   * Returns "count" relative test paths, randomly selected from soft-skipped
+   * and permitted files. Permitted files have a 4 times higher chance to
+   * be chosen.
+   */
+  getRandomTestcasePaths(count) {
+    return random.twoBucketSample(
+        this.softSkippedFiles, this.permittedFiles, 4, count);
+  }
+
+  loadTestcase(relPath, strict, label) {
+    const start = Date.now();
+    try {
+      const source = sourceHelpers.loadSource(this.inputDir, relPath, strict);
+      if (program.verbose) {
+        const duration = Date.now() - start;
+        console.log(`Parsing ${relPath} ${label} took ${duration} ms.`);
+      }
+      return source;
+    } catch (e) {
+      console.log(`WARNING: failed to ${label} parse ${relPath}`);
+      console.log(e);
+    }
+    return undefined;
+  }
+
+  *loadTestcases(relPaths) {
+    for (const relPath of relPaths) {
+      if (this.extraStrict) {
+        // When re-generating the files marked sloppy, we additionally test if
+        // the file parses in strict mode.
+        this.loadTestcase(relPath, true, 'strict');
+      }
+      const source = this.loadTestcase(relPath, false, 'sloppy');
+      if (source) {
+        yield source;
+      }
+    }
+  }
+
+  getRandomTestcases(count) {
+    return Array.from(this.loadTestcases(this.getRandomTestcasePaths(count)));
+  }
+
+  getAllTestcases() {
+    return this.loadTestcases(this.relFilesForGenSkipped());
+  }
+}
+
+module.exports = {
+  Corpus: Corpus,
+  walkDirectory: walkDirectory,
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/db.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/db.js
new file mode 100644
index 0000000..e96265b
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/db.js
@@ -0,0 +1,455 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Mutation Db.
+ */
+
+const crypto = require('crypto');
+const fs = require('fs');
+const fsPath = require('path');
+
+const babelGenerator = require('@babel/generator').default;
+const babelTraverse = require('@babel/traverse').default;
+const babelTypes = require('@babel/types');
+const globals = require('globals');
+
+const random = require('./random.js');
+
+const globalIdentifiers = new Set(Object.keys(globals.builtin));
+const propertyNames = new Set([
+    // Parsed from https://github.com/tc39/ecma262/blob/master/spec.html
+    'add',
+    'anchor',
+    'apply',
+    'big',
+    'bind',
+    'blink',
+    'bold',
+    'buffer',
+    'byteLength',
+    'byteOffset',
+    'BYTES_PER_ELEMENT',
+    'call',
+    'catch',
+    'charAt',
+    'charCodeAt',
+    'clear',
+    'codePointAt',
+    'compile',
+    'concat',
+    'constructor',
+    'copyWithin',
+    '__defineGetter__',
+    '__defineSetter__',
+    'delete',
+    'endsWith',
+    'entries',
+    'every',
+    'exec',
+    'fill',
+    'filter',
+    'find',
+    'findIndex',
+    'fixed',
+    'flags',
+    'fontcolor',
+    'fontsize',
+    'forEach',
+    'get',
+    'getDate',
+    'getDay',
+    'getFloat32',
+    'getFloat64',
+    'getFullYear',
+    'getHours',
+    'getInt16',
+    'getInt32',
+    'getInt8',
+    'getMilliseconds',
+    'getMinutes',
+    'getMonth',
+    'getSeconds',
+    'getTime',
+    'getTimezoneOffset',
+    'getUint16',
+    'getUint32',
+    'getUint8',
+    'getUTCDate',
+    'getUTCDay',
+    'getUTCFullYear',
+    'getUTCHours',
+    'getUTCMilliseconds',
+    'getUTCMinutes',
+    'getUTCMonth',
+    'getUTCSeconds',
+    'getYear',
+    'global',
+    'has',
+    'hasInstance',
+    'hasOwnProperty',
+    'ignoreCase',
+    'includes',
+    'indexOf',
+    'isConcatSpreadable',
+    'isPrototypeOf',
+    'italics',
+    'iterator',
+    'join',
+    'keys',
+    'lastIndexOf',
+    'length',
+    'link',
+    'localeCompare',
+    '__lookupGetter__',
+    '__lookupSetter__',
+    'map',
+    'match',
+    'match',
+    'message',
+    'multiline',
+    'name',
+    'next',
+    'normalize',
+    'padEnd',
+    'padStart',
+    'pop',
+    'propertyIsEnumerable',
+    '__proto__',
+    'prototype',
+    'push',
+    'reduce',
+    'reduceRight',
+    'repeat',
+    'replace',
+    'replace',
+    'return',
+    'reverse',
+    'search',
+    'search',
+    'set',
+    'set',
+    'setDate',
+    'setFloat32',
+    'setFloat64',
+    'setFullYear',
+    'setHours',
+    'setInt16',
+    'setInt32',
+    'setInt8',
+    'setMilliseconds',
+    'setMinutes',
+    'setMonth',
+    'setSeconds',
+    'setTime',
+    'setUint16',
+    'setUint32',
+    'setUint8',
+    'setUTCDate',
+    'setUTCFullYear',
+    'setUTCHours',
+    'setUTCMilliseconds',
+    'setUTCMinutes',
+    'setUTCMonth',
+    'setUTCSeconds',
+    'setYear',
+    'shift',
+    'size',
+    'slice',
+    'slice',
+    'small',
+    'some',
+    'sort',
+    'source',
+    'species',
+    'splice',
+    'split',
+    'split',
+    'startsWith',
+    'sticky',
+    'strike',
+    'sub',
+    'subarray',
+    'substr',
+    'substring',
+    'sup',
+    'test',
+    'then',
+    'throw',
+    'toDateString',
+    'toExponential',
+    'toFixed',
+    'toGMTString',
+    'toISOString',
+    'toJSON',
+    'toLocaleDateString',
+    'toLocaleLowerCase',
+    'toLocaleString',
+    'toLocaleTimeString',
+    'toLocaleUpperCase',
+    'toLowerCase',
+    'toPrecision',
+    'toPrimitive',
+    'toString',
+    'toStringTag',
+    'toTimeString',
+    'toUpperCase',
+    'toUTCString',
+    'trim',
+    'unicode',
+    'unscopables',
+    'unshift',
+    'valueOf',
+    'values',
+]);
+
+const MAX_DEPENDENCIES = 2;
+
+class Expression {
+  constructor(type, source, isStatement, originalPath,
+              dependencies, needsSuper) {
+    this.type = type;
+    this.source = source;
+    this.isStatement = isStatement;
+    this.originalPath = originalPath;
+    this.dependencies = dependencies;
+    this.needsSuper = needsSuper;
+  }
+}
+
+function dedupKey(expression) {
+  if (!expression.dependencies) {
+    return expression.source;
+  }
+
+  let result = expression.source;
+  for (let dependency of expression.dependencies) {
+    result = result.replace(new RegExp(dependency, 'g'), 'ID');
+  }
+
+  return result;
+}
+
+function _markSkipped(path) {
+  while (path) {
+    path.node.__skipped = true;
+    path = path.parentPath;
+  }
+}
+
+class MutateDbWriter {
+  constructor(outputDir) {
+    this.seen = new Set();
+    this.outputDir = fsPath.resolve(outputDir);
+    this.index = {
+      statements: [],
+      superStatements: [],
+      all: [],
+    };
+  }
+
+  process(source) {
+    let self = this;
+
+    let varIndex = 0;
+
+    // First pass to collect dependency information.
+    babelTraverse(source.ast, {
+      Super(path) {
+        while (path) {
+          path.node.__needsSuper = true;
+          path = path.parentPath;
+        }
+      },
+
+      YieldExpression(path) {
+        // Don't include yield expressions in DB.
+        _markSkipped(path);
+      },
+
+      Identifier(path) {
+        if (globalIdentifiers.has(path.node.name) &&
+            path.node.name != 'eval') {
+          // Global name.
+          return;
+        }
+
+        if (propertyNames.has(path.node.name) &&
+            path.parentPath.isMemberExpression() &&
+            path.parentKey !== 'object') {
+          // Builtin property name.
+          return;
+        }
+
+        let binding = path.scope.getBinding(path.node.name);
+        if (!binding) {
+          // Unknown dependency. Don't handle this.
+          _markSkipped(path);
+          return;
+        }
+
+        let newName;
+        if (path.node.name.startsWith('VAR_')) {
+          newName = path.node.name;
+        } else if (babelTypes.isFunctionDeclaration(binding.path.node) ||
+                   babelTypes.isFunctionExpression(binding.path.node) ||
+                   babelTypes.isDeclaration(binding.path.node) ||
+                   babelTypes.isFunctionExpression(binding.path.node)) {
+          // Unknown dependency. Don't handle this.
+          _markSkipped(path);
+          return;
+        } else {
+          newName = 'VAR_' + varIndex++;
+          path.scope.rename(path.node.name, newName);
+        }
+
+        // Mark all parents as having a dependency.
+        while (path) {
+          path.node.__idDependencies = path.node.__idDependencies || [];
+          if (path.node.__idDependencies.length <= MAX_DEPENDENCIES) {
+            path.node.__idDependencies.push(newName);
+          }
+          path = path.parentPath;
+        }
+      }
+    });
+
+    babelTraverse(source.ast, {
+      Expression(path) {
+        if (!path.parentPath.isExpressionStatement()) {
+          return;
+        }
+
+        if (path.node.__skipped ||
+            (path.node.__idDependencies &&
+             path.node.__idDependencies.length > MAX_DEPENDENCIES)) {
+          return;
+        }
+
+        if (path.isIdentifier() || path.isMemberExpression() ||
+            path.isConditionalExpression() ||
+            path.isBinaryExpression() || path.isDoExpression() ||
+            path.isLiteral() ||
+            path.isObjectExpression() || path.isArrayExpression()) {
+          // Skip:
+          //   - Identifiers.
+          //   - Member expressions (too many and too context dependent).
+          //   - Conditional expressions (too many and too context dependent).
+          //   - Binary expressions (too many).
+          //   - Literals (too many).
+          //   - Object/array expressions (too many).
+          return;
+        }
+
+        if (path.isAssignmentExpression()) {
+          if (!babelTypes.isMemberExpression(path.node.left)) {
+            // Skip assignments that aren't to properties.
+            return;
+          }
+
+          if (babelTypes.isIdentifier(path.node.left.object)) {
+            if (babelTypes.isNumericLiteral(path.node.left.property)) {
+              // Skip VAR[\d+] = ...;
+              // There are too many and they generally aren't very useful.
+              return;
+            }
+
+            if (babelTypes.isStringLiteral(path.node.left.property) &&
+                !propertyNames.has(path.node.left.property.value)) {
+              // Skip custom properties. e.g.
+              // VAR["abc"] = ...;
+              // There are too many and they generally aren't very useful.
+              return;
+            }
+          }
+        }
+
+        if (path.isCallExpression() &&
+            babelTypes.isIdentifier(path.node.callee) &&
+            !globalIdentifiers.has(path.node.callee.name)) {
+          // Skip VAR(...) calls since there's too much context we're missing.
+          return;
+        }
+
+        if (path.isUnaryExpression() && path.node.operator == '-') {
+          // Skip -... since there are too many.
+          return;
+        }
+
+        // Make the template.
+        let generated = babelGenerator(path.node, { concise: true }).code;
+        let expression = new Expression(
+            path.node.type,
+            generated,
+            path.parentPath.isExpressionStatement(),
+            source.relPath,
+            path.node.__idDependencies,
+            Boolean(path.node.__needsSuper));
+
+        // Try to de-dupe similar expressions.
+        let key = dedupKey(expression);
+        if (self.seen.has(key)) {
+          return;
+        }
+
+        // Write results.
+        let dirPath = fsPath.join(self.outputDir, expression.type);
+        if (!fs.existsSync(dirPath)) {
+          fs.mkdirSync(dirPath);
+        }
+
+        let sha1sum = crypto.createHash('sha1');
+        sha1sum.update(key);
+
+        let filePath = fsPath.join(dirPath, sha1sum.digest('hex') + '.json');
+        fs.writeFileSync(filePath, JSON.stringify(expression));
+
+        let relPath = fsPath.relative(self.outputDir, filePath);
+
+        // Update index.
+        self.seen.add(key);
+        self.index.all.push(relPath);
+
+        if (expression.needsSuper) {
+          self.index.superStatements.push(relPath);
+        } else {
+          self.index.statements.push(relPath);
+        }
+      }
+    });
+  }
+
+  writeIndex() {
+    fs.writeFileSync(
+        fsPath.join(this.outputDir, 'index.json'),
+        JSON.stringify(this.index));
+  }
+}
+
+class MutateDb {
+  constructor(outputDir) {
+    this.outputDir = fsPath.resolve(outputDir);
+    this.index = JSON.parse(
+        fs.readFileSync(fsPath.join(outputDir, 'index.json'), 'utf-8'));
+  }
+
+  getRandomStatement({canHaveSuper=false} = {}) {
+    let choices;
+    if (canHaveSuper) {
+      choices = random.randInt(0, 1) ?
+          this.index.all : this.index.superStatements;
+    } else {
+      choices = this.index.statements;
+    }
+
+    let path = fsPath.join(
+        this.outputDir, choices[random.randInt(0, choices.length - 1)]);
+    return JSON.parse(fs.readFileSync(path), 'utf-8');
+  }
+}
+
+module.exports = {
+  MutateDb: MutateDb,
+  MutateDbWriter: MutateDbWriter,
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/differential_script_mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/differential_script_mutator.js
new file mode 100644
index 0000000..6c43f64
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/differential_script_mutator.js
@@ -0,0 +1,168 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Script mutator for differential fuzzing.
+ */
+
+'use strict';
+
+const assert = require('assert');
+const fs = require('fs');
+const path = require('path');
+
+const common = require('./mutators/common.js');
+const random = require('./random.js');
+const sourceHelpers = require('./source_helpers.js');
+
+const { filterDifferentialFuzzFlags } = require('./exceptions.js');
+const { DifferentialFuzzMutator, DifferentialFuzzSuppressions } = require(
+    './mutators/differential_fuzz_mutator.js');
+const { ScriptMutator } = require('./script_mutator.js');
+
+
+const USE_ORIGINAL_FLAGS_PROB = 0.2;
+
+/**
+ * Randomly chooses a configuration from experiments. The configuration
+ * parameters are expected to be passed from a bundled V8 build. Constraints
+ * mentioned below are enforced by PRESUBMIT checks on the V8 side.
+ *
+ * @param {Object[]} experiments List of tuples (probability, first config name,
+ *     second config name, second d8 name). The probabilities are integers in
+ *     [0,100]. We assume the sum of all probabilities is 100.
+ * @param {Object[]} additionalFlags List of tuples (probability, flag strings).
+ *     Probability is in [0,1).
+ * @return {string[]} List of flags for v8_foozzie.py.
+ */
+function chooseRandomFlags(experiments, additionalFlags) {
+  // Add additional flags to second config based on experiment percentages.
+  const extra_flags = [];
+  for (const [p, flags] of additionalFlags) {
+    if (random.choose(p)) {
+      for (const flag of flags.split(' ')) {
+        extra_flags.push('--second-config-extra-flags=' + flag);
+      }
+    }
+  }
+
+  // Calculate flags determining the experiment.
+  let acc = 0;
+  const threshold = random.random() * 100;
+  for (let [prob, first_config, second_config, second_d8] of experiments) {
+    acc += prob;
+    if (acc > threshold) {
+      return [
+        '--first-config=' + first_config,
+        '--second-config=' + second_config,
+        '--second-d8=' + second_d8,
+      ].concat(extra_flags);
+    }
+  }
+  // Unreachable.
+  assert(false);
+}
+
+function loadJSONFromBuild(name) {
+  assert(process.env.APP_DIR);
+  const fullPath = path.join(path.resolve(process.env.APP_DIR), name);
+  return JSON.parse(fs.readFileSync(fullPath, 'utf-8'));
+}
+
+function hasMjsunit(dependencies) {
+  return dependencies.some(dep => dep.relPath.endsWith('mjsunit.js'));
+}
+
+function hasJSTests(dependencies) {
+  return dependencies.some(dep => dep.relPath.endsWith('jstest_stubs.js'));
+}
+
+class DifferentialScriptMutator extends ScriptMutator {
+  constructor(settings, db_path) {
+    super(settings, db_path);
+
+    // Mutators for differential fuzzing.
+    this.differential = [
+      new DifferentialFuzzSuppressions(settings),
+      new DifferentialFuzzMutator(settings),
+    ];
+
+    // Flag configurations from the V8 build directory.
+    this.experiments = loadJSONFromBuild('v8_fuzz_experiments.json');
+    this.additionalFlags = loadJSONFromBuild('v8_fuzz_flags.json');
+  }
+
+  /**
+   * Performes the high-level mutation and afterwards adds flags for the
+   * v8_foozzie.py harness.
+   */
+  mutateMultiple(inputs) {
+    const result = super.mutateMultiple(inputs);
+    const originalFlags = [];
+
+    // Keep original JS flags in some cases. Let the harness pass them to
+    // baseline _and_ comparison run.
+    if (random.choose(USE_ORIGINAL_FLAGS_PROB)) {
+      for (const flag of filterDifferentialFuzzFlags(result.flags)) {
+        originalFlags.push('--first-config-extra-flags=' + flag);
+        originalFlags.push('--second-config-extra-flags=' + flag);
+      }
+    }
+
+    // Add flags for the differnetial-fuzzing settings.
+    const fuzzFlags = chooseRandomFlags(this.experiments, this.additionalFlags);
+    result.flags = fuzzFlags.concat(originalFlags);
+    return result;
+  }
+
+  /**
+   * Mutatates a set of inputs.
+   *
+   * Additionally we prepare inputs by tagging each with the original source
+   * path for later printing. The mutated sources are post-processed by the
+   * differential-fuzz mutators, adding extra printing and other substitutions.
+   */
+  mutateInputs(inputs) {
+    inputs.forEach(input => common.setOriginalPath(input, input.relPath));
+
+    const result = super.mutateInputs(inputs);
+    this.differential.forEach(mutator => mutator.mutate(result));
+    return result;
+  }
+
+  /**
+   * Adds extra dependencies for differential fuzzing.
+   */
+  resolveDependencies(inputs) {
+    const dependencies = super.resolveDependencies(inputs);
+    // The suppression file neuters functions not working with differential
+    // fuzzing. It can also be used to temporarily silence some functionality
+    // leading to dupes of an active bug.
+    dependencies.push(
+        sourceHelpers.loadResource('differential_fuzz_suppressions.js'));
+    // Extra printing and tracking functionality.
+    dependencies.push(
+        sourceHelpers.loadResource('differential_fuzz_library.js'));
+    // Make Chakra tests print more.
+    dependencies.push(
+        sourceHelpers.loadResource('differential_fuzz_chakra.js'));
+
+    if (hasMjsunit(dependencies)) {
+      // Make V8 tests print more. We guard this as the functionality
+      // relies on mjsunit.js.
+      dependencies.push(sourceHelpers.loadResource('differential_fuzz_v8.js'));
+    }
+
+    if (hasJSTests(dependencies)) {
+      dependencies.push(
+          sourceHelpers.loadResource('differential_fuzz_jstest.js'));
+    }
+
+    return dependencies;
+  }
+}
+
+module.exports = {
+  DifferentialScriptMutator: DifferentialScriptMutator,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/exceptions.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
new file mode 100644
index 0000000..41255aa
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
@@ -0,0 +1,281 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Blacklists for fuzzer.
+ */
+
+'use strict';
+
+const fs = require('fs');
+const path = require('path');
+
+const random = require('./random.js');
+
+const {generatedSloppy, generatedSoftSkipped, generatedSkipped} = require(
+    './generated/exceptions.js');
+
+const SKIPPED_FILES = [
+    // Disabled for unexpected test behavior, specific to d8 shell.
+    'd8-os.js',
+    'd8-readbuffer.js',
+
+    // Passes JS flags.
+    'd8-arguments.js',
+
+    // Slow tests or tests that are too large to be used as input.
+    /numops-fuzz-part.*.js/,
+    'regexp-pcre.js',
+    'unicode-test.js',
+    'unicodelctest.js',
+    'unicodelctest-no-optimization.js',
+
+    // Unsupported modules.
+    /^modules.*\.js/,
+
+    // Unsupported property escapes.
+    /^regexp-property-.*\.js/,
+
+    // Bad testcases that just loads a script that always throws errors.
+    'regress-444805.js',
+    'regress-crbug-489597.js',
+    'regress-crbug-620253.js',
+
+    // Just recursively loads itself.
+    'regress-8510.js',
+];
+
+const SKIPPED_DIRECTORIES = [
+    // Slow tests or tests that are too large to be used as input.
+    'embenchen',
+    'poppler',
+    'sqlite',
+
+    // Causes lots of failures.
+    'test262',
+
+    // Unavailable debug.Debug.
+    'v8/test/debugger',
+    'v8/test/inspector',
+
+    // Unsupported modules.
+    'v8/test/js-perf-test/Modules',
+
+    // Contains tests expected to error out on parsing.
+    'v8/test/message',
+
+    // Needs specific dependencies for load of various tests.
+    'v8/test/mjsunit/tools',
+
+    // Unsupported e4x standard.
+    'mozilla/data/e4x',
+
+    // Bails out fast without ReadableStream support.
+    'spidermonkey/non262/ReadableStream',
+];
+
+// Files used with a lower probability.
+const SOFT_SKIPPED_FILES = [
+    // Tests with large binary content.
+    /^binaryen.*\.js/,
+
+    // Tests slow to parse.
+    // CrashTests:
+    /^jquery.*\.js/,
+    // Spidermonkey:
+    'regress-308085.js',
+    'regress-74474-002.js',
+    'regress-74474-003.js',
+    // V8:
+    'object-literal.js',
+];
+
+// Flags that lead to false positives or that are already passed by default.
+const DISALLOWED_FLAGS = [
+    // Disallowed because features prefixed with "experimental" are not
+    // stabilized yet and would cause too much noise when enabled.
+    /^--experimental-.*/,
+
+    // Disallowed due to noise. We explicitly add --es-staging to job
+    // definitions, and all of these features are staged before launch.
+    /^--harmony-.*/,
+
+    // Disallowed because they are passed explicitly on the command line.
+    '--allow-natives-syntax',
+    '--debug-code',
+    '--es-staging',
+    '--wasm-staging',
+    '--expose-gc',
+    '--expose_gc',
+    '--icu-data-file',
+    '--random-seed',
+
+    // Disallowed due to false positives.
+    '--check-handle-count',
+    '--expose-debug-as',
+    '--expose-natives-as',
+    '--expose-trigger-failure',
+    '--mock-arraybuffer-allocator',
+    'natives',  // Used in conjuction with --expose-natives-as.
+    /^--trace-path.*/,
+];
+
+// Flags only used with 25% probability.
+const LOW_PROB_FLAGS_PROB = 0.25;
+const LOW_PROB_FLAGS = [
+    // Flags that lead to slow test performance.
+    /^--gc-interval.*/,
+    /^--deopt-every-n-times.*/,
+];
+
+
+// Flags printing data, leading to false positives in differential fuzzing.
+const DISALLOWED_DIFFERENTIAL_FUZZ_FLAGS = [
+    /^--gc-interval.*/,
+    /^--perf.*/,
+    /^--print.*/,
+    /^--stress-runs.*/,
+    /^--trace.*/,
+    '--expose-externalize-string',
+    '--interpreted-frames-native-stack',
+    '--stress-opt',
+    '--validate-asm',
+];
+
+const ALLOWED_RUNTIME_FUNCTIONS = new Set([
+    // List of allowed runtime functions. Others will be replaced with no-ops.
+    'ArrayBufferDetach',
+    'DeoptimizeFunction',
+    'DeoptimizeNow',
+    'EnableCodeLoggingForTesting',
+    'GetUndetectable',
+    'HeapObjectVerify',
+    'IsBeingInterpreted',
+    'NeverOptimizeFunction',
+    'OptimizeFunctionOnNextCall',
+    'OptimizeOsr',
+    'PrepareFunctionForOptimization',
+    'SetAllocationTimeout',
+    'SimulateNewspaceFull',
+]);
+
+const MAX_FILE_SIZE_BYTES = 128 * 1024;  // 128KB
+const MEDIUM_FILE_SIZE_BYTES = 32 * 1024;  // 32KB
+
+function _findMatch(iterable, candidate) {
+  for (const entry of iterable) {
+    if (typeof entry === 'string') {
+      if (entry === candidate) {
+        return true;
+      }
+    } else {
+      if (entry.test(candidate)) {
+        return true;
+      }
+    }
+  }
+
+  return false;
+}
+
+function _doesntMatch(iterable, candidate) {
+  return !_findMatch(iterable, candidate);
+}
+
+// Convert Windows path separators.
+function normalize(testPath) {
+  return path.normalize(testPath).replace(/\\/g, '/');
+}
+
+function isTestSkippedAbs(absPath) {
+  const basename = path.basename(absPath);
+  if (_findMatch(SKIPPED_FILES, basename)) {
+    return true;
+  }
+
+  const normalizedTestPath = normalize(absPath);
+  for (const entry of SKIPPED_DIRECTORIES) {
+    if (normalizedTestPath.includes(entry))  {
+      return true;
+    }
+  }
+
+  // Avoid OOM/hangs through huge inputs.
+  const stat = fs.statSync(absPath);
+  return (stat && stat.size >= MAX_FILE_SIZE_BYTES);
+}
+
+function isTestSkippedRel(relPath) {
+  return generatedSkipped.has(normalize(relPath));
+}
+
+// For testing.
+function getSoftSkipped() {
+  return SOFT_SKIPPED_FILES;
+}
+
+// For testing.
+function getGeneratedSoftSkipped() {
+  return generatedSoftSkipped;
+}
+
+// For testing.
+function getGeneratedSloppy() {
+  return generatedSloppy;
+}
+
+function isTestSoftSkippedAbs(absPath) {
+  const basename = path.basename(absPath);
+  if (_findMatch(this.getSoftSkipped(), basename)) {
+    return true;
+  }
+
+  // Graylist medium size files.
+  const stat = fs.statSync(absPath);
+  return (stat && stat.size >= MEDIUM_FILE_SIZE_BYTES);
+}
+
+function isTestSoftSkippedRel(relPath) {
+  return this.getGeneratedSoftSkipped().has(normalize(relPath));
+}
+
+function isTestSloppyRel(relPath) {
+  return this.getGeneratedSloppy().has(normalize(relPath));
+}
+
+function filterFlags(flags) {
+  return flags.filter(flag => {
+    return (
+        _doesntMatch(DISALLOWED_FLAGS, flag) &&
+        (_doesntMatch(LOW_PROB_FLAGS, flag) ||
+         random.choose(LOW_PROB_FLAGS_PROB)));
+  });
+}
+
+function filterDifferentialFuzzFlags(flags) {
+  return flags.filter(
+      flag => _doesntMatch(DISALLOWED_DIFFERENTIAL_FUZZ_FLAGS, flag));
+}
+
+function isAllowedRuntimeFunction(name) {
+  if (process.env.APP_NAME != 'd8') {
+    return false;
+  }
+
+  return ALLOWED_RUNTIME_FUNCTIONS.has(name);
+}
+
+module.exports = {
+  filterDifferentialFuzzFlags: filterDifferentialFuzzFlags,
+  filterFlags: filterFlags,
+  getGeneratedSoftSkipped: getGeneratedSoftSkipped,
+  getGeneratedSloppy: getGeneratedSloppy,
+  getSoftSkipped: getSoftSkipped,
+  isAllowedRuntimeFunction: isAllowedRuntimeFunction,
+  isTestSkippedAbs: isTestSkippedAbs,
+  isTestSkippedRel: isTestSkippedRel,
+  isTestSoftSkippedAbs: isTestSoftSkippedAbs,
+  isTestSoftSkippedRel: isTestSoftSkippedRel,
+  isTestSloppyRel: isTestSloppyRel,
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/foozzie_launcher.py b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/foozzie_launcher.py
new file mode 100644
index 0000000..b1f892c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/foozzie_launcher.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""
+Launcher for the foozzie differential-fuzzing harness. Wraps foozzie
+with Python2 for backwards-compatibility when bisecting.
+"""
+
+import os
+import re
+import subprocess
+import sys
+
+if __name__ == '__main__':
+  # In some cases or older versions, the python executable is passed as
+  # first argument. Let's be robust either way, with or without full
+  # path or version.
+  if re.match(r'.*python.*', sys.argv[1]):
+    args = sys.argv[2:]
+  else:
+    args = sys.argv[1:]
+  process = subprocess.Popen(['python2'] + args)
+  process.communicate()
+  sys.exit(process.returncode)
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.js
new file mode 100644
index 0000000..19e40ed
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.js
@@ -0,0 +1,196 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Generate exceptions from full corpus test report.
+ */
+
+const program = require('commander');
+
+const assert = require('assert');
+const babelGenerator = require('@babel/generator').default;
+const babelTemplate = require('@babel/template').default;
+const babelTypes = require('@babel/types');
+const fs = require('fs');
+const p = require('path');
+const prettier = require("prettier");
+
+const SPLIT_LINES_RE = /^.*([\n\r]+|$)/gm;
+const PARSE_RE = /^Parsing (.*) sloppy took (\d+) ms\.\n$/;
+const MUTATE_RE = /^Mutating (.*) took (\d+) ms\.\n$/;
+const PARSE_FAILED_RE = /^WARNING: failed to sloppy parse (.*)\n$/;
+const PARSE_STRICT_FAILED_RE = /^WARNING: failed to strict parse (.*)\n$/;
+const MUTATE_FAILED_RE = /^ERROR: Exception during mutate: (.*)\n$/;
+
+// Add tests matching error regexp to result array.
+function matchError(regexp, line, resultArray){
+  const match = line.match(regexp);
+  if (!match) return false;
+  const relPath = match[1];
+  assert(relPath);
+  resultArray.push(relPath);
+  return true;
+}
+
+// Sum up total duration of tests matching the duration regexp and
+// map test -> duration in result map.
+function matchDuration(regexp, line, resultMap){
+  const match = line.match(regexp);
+  if (!match) return false;
+  const relPath = match[1];
+  assert(relPath);
+  resultMap[relPath] = (resultMap[relPath] || 0) + parseInt(match[2]);
+  return true;
+}
+
+// Create lists of failed and slow tests from stdout of a fuzzer run.
+function processFuzzOutput(outputFile){
+  const text = fs.readFileSync(outputFile, 'utf-8');
+  const lines = text.match(SPLIT_LINES_RE);
+
+  const failedParse = [];
+  const failedParseStrict = [];
+  const failedMutate = [];
+  const durationsMap = {};
+
+  for (const line of lines) {
+    if (matchError(PARSE_FAILED_RE, line, failedParse))
+      continue;
+    if (matchError(PARSE_STRICT_FAILED_RE, line, failedParseStrict))
+      continue;
+    if (matchError(MUTATE_FAILED_RE, line, failedMutate))
+      continue;
+    if (matchDuration(PARSE_RE, line, durationsMap))
+      continue;
+    if (matchDuration(MUTATE_RE, line, durationsMap))
+      continue;
+  }
+
+  // Tuples (absPath, duration).
+  const total = Object.entries(durationsMap);
+  // Tuples (absPath, duration) with 2s < duration <= 10s.
+  const slow = total.filter(t => t[1] > 2000 && t[1] <= 10000);
+  // Tuples (absPath, duration) with 10s < duration.
+  const verySlow = total.filter(t => t[1] > 10000);
+
+  // Assert there's nothing horribly wrong with the results.
+  // We have at least 2500 tests in the output.
+  assert(total.length > 2500);
+  // No more than 5% parse/mutation errors.
+  assert(failedParse.length + failedMutate.length < total.length / 20);
+  // No more than 10% slow tests
+  assert(slow.length < total.length / 10);
+  // No more than 2% very slow tests.
+  assert(verySlow.length < total.length / 50);
+
+  // Sort everything.
+  failedParse.sort();
+  failedParseStrict.sort();
+  failedMutate.sort();
+
+  function slowestFirst(a, b) {
+    return b[1] - a[1];
+  }
+
+  slow.sort(slowestFirst);
+  verySlow.sort(slowestFirst);
+
+  return [failedParse, failedParseStrict, failedMutate, slow, verySlow];
+}
+
+// List of string literals of failed tests.
+function getLiteralsForFailed(leadingComment, failedList) {
+  const result = failedList.map(path => babelTypes.stringLiteral(path));
+  if (result.length) {
+    babelTypes.addComment(result[0], 'leading', leadingComment);
+  }
+  return result;
+}
+
+// List of string literals of slow tests with duration comments.
+function getLiteralsForSlow(leadingComment, slowList) {
+  const result = slowList.map(([path, duration]) => {
+    const literal = babelTypes.stringLiteral(path);
+    babelTypes.addComment(
+        literal, 'trailing', ` ${duration / 1000}s`, true);
+    return literal;
+  });
+  if (result.length) {
+    babelTypes.addComment(result[0], 'leading', leadingComment);
+  }
+  return result;
+}
+
+function main() {
+  program
+    .version('0.0.1')
+    .parse(process.argv);
+
+  if (!program.args.length) {
+    console.log('Need to specify stdout reports of fuzz runs.');
+    return;
+  }
+
+  let skipped = [];
+  let softSkipped = [];
+  let sloppy = [];
+  for (const outputFile of program.args) {
+    const [failedParse, failedParseStrict, failedMutate, slow, verySlow] = (
+        processFuzzOutput(outputFile));
+    const name = p.basename(outputFile, p.extname(outputFile));
+
+    // Skip tests that fail to parse/mutate or are very slow.
+    skipped = skipped.concat(getLiteralsForFailed(
+        ` Tests with parse errors from ${name} `, failedParse));
+    skipped = skipped.concat(getLiteralsForFailed(
+        ` Tests with mutation errors from ${name} `, failedMutate));
+    skipped = skipped.concat(getLiteralsForSlow(
+        ` Very slow tests from ${name} `, verySlow));
+
+    // Soft-skip slow but not very slow tests.
+    softSkipped = softSkipped.concat(getLiteralsForSlow(
+        ` Slow tests from ${name} `, slow));
+
+    // Mark sloppy tests.
+    sloppy = sloppy.concat(getLiteralsForFailed(
+        ` Tests requiring sloppy mode from ${name} `, failedParseStrict));
+  }
+
+  const fileTemplate = babelTemplate(`
+    /**
+     * @fileoverview Autogenerated exceptions. Created with gen_exceptions.js.
+     */
+
+    'use strict';
+
+    const skipped = SKIPPED;
+
+    const softSkipped = SOFTSKIPPED;
+
+    const sloppy = SLOPPY;
+
+    module.exports = {
+      generatedSkipped: new Set(skipped),
+      generatedSoftSkipped: new Set(softSkipped),
+      generatedSloppy: new Set(sloppy),
+    }
+  `, {preserveComments: true});
+
+  const skippedArray = babelTypes.arrayExpression(skipped);
+  const softSkippedArray = babelTypes.arrayExpression(softSkipped);
+  const sloppyArray = babelTypes.arrayExpression(sloppy);
+
+  const statements = fileTemplate({
+    SKIPPED: skippedArray,
+    SOFTSKIPPED: softSkippedArray,
+    SLOPPY: sloppyArray,
+  });
+
+  const resultProgram = babelTypes.program(statements);
+  const code = babelGenerator(resultProgram, { comments: true }).code;
+  const prettyCode = prettier.format(code, { parser: "babel" });
+  fs.writeFileSync('generated/exceptions.js', prettyCode);
+}
+
+main();
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.sh b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.sh
new file mode 100644
index 0000000..b2a90b8
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/gen_exceptions.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+APP_NAME=d8 node run.js -i $WEB_TESTS -o $OUTPUT -z -v -e -c chakra > chakra.log
+APP_NAME=d8 node run.js -i $WEB_TESTS -o $OUTPUT -z -v -e -c v8 > v8.log
+APP_NAME=d8 node run.js -i $WEB_TESTS -o $OUTPUT -z -v -e -c spidermonkey > spidermonkey.log
+APP_NAME=d8 node run.js -i $WEB_TESTS -o $OUTPUT -z -v -e -c WebKit/JSTests > jstests.log
+APP_NAME=d8 node run.js -i $WEB_TESTS -o $OUTPUT -z -v -e -c CrashTests > crashtests.log
+
+node gen_exceptions.js v8.log spidermonkey.log chakra.log jstests.log crashtests.log
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/generated/exceptions.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/generated/exceptions.js
new file mode 100644
index 0000000..5b4bd7c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/generated/exceptions.js
@@ -0,0 +1,1928 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Autogenerated exceptions. Created with gen_exceptions.js.
+ */
+"use strict";
+
+const skipped = [
+  /* Tests with parse errors from v8 */
+  "v8/test/mjsunit/es6/block-let-contextual-sloppy.js",
+  "v8/test/mjsunit/es6/generators-parsing.js",
+  "v8/test/mjsunit/es6/object-literals-property-shorthand.js",
+  "v8/test/mjsunit/es6/sloppy-restrictive-block-function.js",
+  "v8/test/mjsunit/es7/exponentiation-operator.js",
+  "v8/test/mjsunit/harmony/bigint/property-names.js",
+  "v8/test/mjsunit/harmony/import-from-compilation-errored.js",
+  "v8/test/mjsunit/harmony/import-from-evaluation-errored.js",
+  "v8/test/mjsunit/harmony/import-from-fetch-errored.js",
+  "v8/test/mjsunit/harmony/import-from-instantiation-errored.js",
+  "v8/test/mjsunit/harmony/numeric-separator.js",
+  "v8/test/mjsunit/harmony/private-fields-special-object.js",
+  "v8/test/mjsunit/html-comments.js",
+  "v8/test/mjsunit/ignition/dead-code-source-position.js",
+  "v8/test/mjsunit/regress/regress-436893.js",
+  "v8/test/mjsunit/regress/regress-5692.js",
+  "v8/test/mjsunit/regress/regress-740694.js",
+  "v8/test/mjsunit/regress/regress-744292.js",
+  "v8/test/mjsunit/regress/regress-797581.js",
+  "v8/test/mjsunit/regress/regress-800651.js",
+  "v8/test/mjsunit/regress/regress-crbug-412208.js",
+  "v8/test/mjsunit/regress/regress-crbug-934166.js",
+  "v8/test/mjsunit/serialize-embedded-error.js",
+  "v8/test/mjsunit/switch.js",
+  /* Tests with mutation errors from v8 */
+  "v8/test/mjsunit/harmony/private-accessors.js",
+  "v8/test/mjsunit/harmony/private-methods-empty-inner.js",
+  "v8/test/mjsunit/harmony/private-methods.js",
+  "v8/test/mjsunit/harmony/static-private-methods.js",
+  /* Very slow tests from v8 */
+  "v8/test/mjsunit/regress/wasm/regress-810973b.js", // 12.121s
+  /* Tests with parse errors from spidermonkey */
+  "spidermonkey/non262/BigInt/property-name-guessed-name.js",
+  "spidermonkey/non262/BigInt/property-name.js",
+  "spidermonkey/non262/Exceptions/catchguard-002-n.js",
+  "spidermonkey/non262/Exceptions/catchguard-003-n.js",
+  "spidermonkey/non262/Function/rest-parameter-names.js",
+  "spidermonkey/non262/Unicode/regress-352044-02-n.js",
+  "spidermonkey/non262/arrow-functions/arrow-not-as-end-of-statement.js",
+  "spidermonkey/non262/arrow-functions/arrow-returning-arrow-with-block-body-followed-by-regexp.js",
+  "spidermonkey/non262/async-functions/duplicate-__proto__.js",
+  "spidermonkey/non262/destructuring/bug1396261.js",
+  "spidermonkey/non262/destructuring/duplicate-__proto__.js",
+  "spidermonkey/non262/destructuring/yield-in-object-destr-function.js",
+  "spidermonkey/non262/destructuring/yield-in-object-destr-script.js",
+  "spidermonkey/non262/destructuring/yield-with-escape-in-object-destr-function.js",
+  "spidermonkey/non262/destructuring/yield-with-escape-in-object-destr-script.js",
+  "spidermonkey/non262/extensions/inc-dec-functioncall.js",
+  "spidermonkey/non262/extensions/regress-406572.js",
+  "spidermonkey/non262/fields/await-identifier-module-1.js",
+  "spidermonkey/non262/fields/await-identifier-module-2.js",
+  "spidermonkey/non262/fields/await-identifier-script.js",
+  "spidermonkey/non262/generators/syntax.js",
+  "spidermonkey/non262/generators/yield-non-regexp.js",
+  "spidermonkey/non262/jit/regress-451673.js",
+  "spidermonkey/non262/lexical-environment/block-scoped-functions-annex-b-if.js",
+  "spidermonkey/non262/lexical-environment/block-scoped-functions-annex-b-parameter.js",
+  "spidermonkey/non262/module/bug1488117-import-namespace.js",
+  "spidermonkey/non262/module/bug1488117.js",
+  "spidermonkey/non262/regress/regress-243389-n.js",
+  "spidermonkey/non262/regress/regress-319391.js",
+  "spidermonkey/non262/regress/regress-350253.js",
+  "spidermonkey/non262/regress/regress-351515.js",
+  "spidermonkey/non262/regress/regress-452498-168-2.js",
+  "spidermonkey/non262/regress/regress-462292.js",
+  "spidermonkey/non262/regress/regress-467495-03.js",
+  "spidermonkey/non262/regress/regress-467495-04.js",
+  "spidermonkey/non262/regress/regress-467495-05.js",
+  "spidermonkey/non262/regress/regress-467495-06.js",
+  "spidermonkey/non262/regress/regress-609617.js",
+  "spidermonkey/non262/regress/regress-98901.js",
+  "spidermonkey/non262/statements/for-of-iterator-close.js",
+  "spidermonkey/test/expected/export/multi-header.js",
+  "spidermonkey/test/expected/export/reftest-error-syntaxerror.js",
+  "spidermonkey/test/expected/export/regular.js",
+  "spidermonkey/test/expected/import/files/local/smTempBranch/language/export/escaped-foobarbaz.js",
+  "spidermonkey/test/fixtures/export/.ignore.js",
+  "spidermonkey/test/fixtures/export/multi-header.js",
+  "spidermonkey/test/fixtures/export/reftest-error-syntaxerror.js",
+  "spidermonkey/test/fixtures/export/regular.js",
+  /* Very slow tests from spidermonkey */
+  "spidermonkey/non262/regress/regress-308085.js", // 14.519s
+  /* Tests with parse errors from chakra */
+  "chakra/Basics/keyword.js",
+  "chakra/Bugs/OS_4341640.js",
+  "chakra/Bugs/SuperAccessInGlobalLambda.js",
+  "chakra/Closures/bug_OS_2525694.js",
+  "chakra/Function/callsideeffects.js",
+  "chakra/Function/crosssite_bind_main.js",
+  "chakra/Function/deferredBadContinue.js",
+  "chakra/LetConst/defer1.js",
+  "chakra/LetConst/letlet.js",
+  "chakra/LetConst/tdz2.js",
+  "chakra/Optimizer/test115.js",
+  "chakra/Optimizer/test135.js",
+  "chakra/Optimizer/test146.js",
+  "chakra/Scanner/InvalidCharacter.js",
+  "chakra/Strings/long_concatstr.js",
+  "chakra/benchmarks/ARES-6/Babylon/babylon-blob.js",
+  "chakra/es6/DeferParseMethods.js",
+  "chakra/es6/ES6Iterators-forof.js",
+  "chakra/es6/blockscope-functionbinding.js",
+  "chakra/es6/bug_OS17895855.js",
+  "chakra/es6/bug_issue_3247_dep.js",
+  "chakra/es6/bug_issue_5994.js",
+  "chakra/es6/forInEdgeCases.js",
+  "chakra/es6/generators-syntax.js",
+  "chakra/es6/globalCatchNewTargetSyntaxError.js",
+  "chakra/es6/globalNewTargetSyntaxError.js",
+  "chakra/es6/globalParamCatchNewTargetSyntaxError.js",
+  "chakra/es6/lambda-expr.js",
+  "chakra/es6/objlit.js",
+  "chakra/es6/supersyntax02.js",
+  "chakra/es6/supersyntax05.js",
+  "chakra/es6/supersyntax06.js",
+  "chakra/es6/unicode_6_identifier_Blue511452.js",
+  "chakra/es6/unicode_6_identifier_Blue524737.js",
+  "chakra/es6module/GetModuleNamespace.js",
+  "chakra/es6module/ModuleCircularBar.js",
+  "chakra/es6module/ModuleCircularFoo.js",
+  "chakra/es6module/ModuleComplexExports.js",
+  "chakra/es6module/ModuleComplexReexports.js",
+  "chakra/es6module/ModuleDefaultExport1.js",
+  "chakra/es6module/ModuleDefaultExport2.js",
+  "chakra/es6module/ModuleDefaultExport3.js",
+  "chakra/es6module/ModuleDefaultExport4.js",
+  "chakra/es6module/ModuleDefaultReexport.js",
+  "chakra/es6module/ModuleReexportDefault.js",
+  "chakra/es6module/ModuleSimpleExport.js",
+  "chakra/es6module/ModuleSimpleReexport.js",
+  "chakra/es6module/ValidExportDefaultStatement1.js",
+  "chakra/es6module/ValidExportDefaultStatement2.js",
+  "chakra/es6module/ValidExportStatements.js",
+  "chakra/es6module/ValidExportStatements2.js",
+  "chakra/es6module/ValidImportStatements.js",
+  "chakra/es6module/ValidReExportStatements.js",
+  "chakra/es6module/bug_OS12095746.js",
+  "chakra/es6module/bug_OS12095746_mod0.js",
+  "chakra/es6module/bug_OS12095746_mod1.js",
+  "chakra/es6module/bug_OS12095746_mod2.js",
+  "chakra/es6module/bug_OS12095746_moddep.js",
+  "chakra/es6module/bug_OS12113549_module1.js",
+  "chakra/es6module/bug_OS14562349.js",
+  "chakra/es6module/bug_issue_3076.js",
+  "chakra/es6module/bug_issue_3257/mod/mod0.js",
+  "chakra/es6module/bug_issue_3257/mod1.js",
+  "chakra/es6module/bug_issue_3257/mod2/mod2.js",
+  "chakra/es6module/bug_issue_3257/script/script0.js",
+  "chakra/es6module/bug_issue_5777.js",
+  "chakra/es6module/dynamic_import_promises_5796.js",
+  "chakra/es6module/exportBinding.js",
+  "chakra/es6module/exportmodule.js",
+  "chakra/es6module/module-3250-bug-dep.js",
+  "chakra/es6module/module-3250-bug-dep2.js",
+  "chakra/es6module/module-3250-ext-a.js",
+  "chakra/es6module/module-3250-ext-b.js",
+  "chakra/es6module/moduleExport1.js",
+  "chakra/es6module/moduleImportTheError.js",
+  "chakra/es6module/moduleThrowAnError.js",
+  "chakra/es6module/module_1_2645.js",
+  "chakra/es6module/module_2_2645.js",
+  "chakra/es6module/module_4482_dep1.js",
+  "chakra/es6module/module_4482_dep2.js",
+  "chakra/es6module/module_4482_dep3.js",
+  "chakra/es6module/module_4570_dep1.js",
+  "chakra/es6module/module_4570_dep2.js",
+  "chakra/es6module/moduletest2_mod0.js",
+  "chakra/es6module/moduletest2_mod1a.js",
+  "chakra/es6module/moduletest2_mod1b.js",
+  "chakra/es6module/moduletest2_mod2a.js",
+  "chakra/es6module/otherModule.js",
+  "chakra/es6module/passmodule.js",
+  "chakra/es6module/testDynamicImportfromModule.js",
+  "chakra/es7/json_superset.js",
+  "chakra/inlining/bug_gh6303.js",
+  "chakra/inlining/profilingbug.js",
+  "chakra/stackfunc/602481.js",
+  "chakra/strict/bug212755.js",
+  "chakra/strict/delete.js",
+  "chakra/strict/formal_samename1.js",
+  "chakra/strict/formal_samename2.js",
+  "chakra/strict/multiunit.js",
+  "chakra/strict/nonSimpleParameterList.js",
+  "chakra/strict/strictargs-deferred.js",
+  "chakra/strict/strictargs2-deferred.js",
+  "chakra/strict/strictargs3-deferred.js",
+  "chakra/strict/stricteval-deferred.js",
+  "chakra/strict/stricteval2-deferred.js",
+  "chakra/strict/strictkwd-deferred.js",
+  /* Tests with mutation errors from chakra */
+  "chakra/es6module/bug_issue_3257.js",
+  "chakra/es6module/moduleUrlInError.js",
+  "chakra/es6module/moduletest1.js",
+  "chakra/es6module/moduletest2.js",
+  /* Very slow tests from chakra */
+  "chakra/benchmarks/ARES-6/Air/payload-imaging-gaussian-blur-gaussianBlur.js", // 21.782s
+  "chakra/benchmarks/ARES-6/Air/payload-gbemu-executeIteration.js", // 18.461s
+  /* Tests with parse errors from jstests */
+  "WebKit/JSTests/es6/non-strict_function_semantics_function_statements_in_if-statement_clauses.js",
+  "WebKit/JSTests/stress/arrowfunction-lexical-bind-this-8.js",
+  "WebKit/JSTests/stress/big-int-as-property-name.js",
+  "WebKit/JSTests/stress/for-let-comma.js",
+  "WebKit/JSTests/stress/import-basic.js",
+  "WebKit/JSTests/stress/import-from-eval.js",
+  "WebKit/JSTests/stress/import-reject-with-exception.js",
+  "WebKit/JSTests/stress/import-tests/cocoa.js",
+  "WebKit/JSTests/stress/import-tests/multiple.js",
+  "WebKit/JSTests/stress/import-tests/multiple2.js",
+  "WebKit/JSTests/stress/import-tests/should.js",
+  "WebKit/JSTests/stress/import-with-empty-string.js",
+  "WebKit/JSTests/stress/module-namespace-access-change.js",
+  "WebKit/JSTests/stress/module-namespace-access-non-constant.js",
+  "WebKit/JSTests/stress/module-namespace-access-poly.js",
+  "WebKit/JSTests/stress/module-namespace-access-transitive-exports.js",
+  "WebKit/JSTests/stress/module-namespace-access.js",
+  "WebKit/JSTests/stress/printableModuleKey-should-never-throw.js",
+  "WebKit/JSTests/stress/re-execute-error-module.js",
+  "WebKit/JSTests/stress/regress-170732.js",
+  "WebKit/JSTests/stress/regress-191856.js",
+  "WebKit/JSTests/stress/resources/error-module.js",
+  "WebKit/JSTests/stress/resources/module-namespace-access-transitive-exports-2.js",
+  "WebKit/JSTests/stress/resources/module-namespace-access-transitive-exports.js",
+  "WebKit/JSTests/stress/resources/module-namespace-access.js",
+  "WebKit/JSTests/stress/sloppy-mode-function-hoisting.js",
+  "WebKit/JSTests/stress/yield-label.js",
+  /* Tests with parse errors from crashtests */
+  "CrashTests/115674352/util.js",
+  "CrashTests/132918471/fast/js/resources/js-test-post.js",
+  "CrashTests/135733397/resources/cookies-test-post.js",
+  "CrashTests/158344541/support/refTestWait.js",
+  "CrashTests/178811021/media-file.js",
+  "CrashTests/178811021/video-test.js",
+  "CrashTests/31681819/fast/js/resources/js-test-post.js",
+  "CrashTests/4506116685037568/00422.js",
+  "CrashTests/4523272292270080/02509.js",
+  "CrashTests/4531783459405824/01124.js",
+  "CrashTests/4563969814560768/1.0.2/conformance/resources/webgl-test-utils.js",
+  "CrashTests/4563969814560768/1.0.2/conformance/resources/webgl-test.js",
+  "CrashTests/4563969814560768/1.0.2/resources/js-test-pre.js",
+  "CrashTests/4592095397150720/619.js",
+  "CrashTests/4620742728613888/02272.js",
+  "CrashTests/4675875294674944/04443.js",
+  "CrashTests/4676310267068416/00041.js",
+  "CrashTests/4676310267068416/meta-00041.js",
+  "CrashTests/4689880216240128/meta-00116.js",
+  "CrashTests/4710304564903936/resources/js-test-post.js",
+  "CrashTests/4715062213476352/meta-00915.js",
+  "CrashTests/4730791635451904/00498.js",
+  "CrashTests/4731918950203392/04316.js",
+  "CrashTests/4747813124571136/00737.js",
+  "CrashTests/4777280799506432/01745.js",
+  "CrashTests/4784915024707584/meta-00090.js",
+  "CrashTests/4786126975139840/00686.js",
+  "CrashTests/4804606392795136/00345.js",
+  "CrashTests/4823335117783040/meta-00192.js",
+  "CrashTests/4827148775849984/02704.js",
+  "CrashTests/4835573090222080/00096.js",
+  "CrashTests/4835573090222080/meta-00096.js",
+  "CrashTests/4837730048278528/03052.js",
+  "CrashTests/4843490131312640/03475.js",
+  "CrashTests/4850895428517888/2670.js",
+  "CrashTests/4854644212105216/392.js",
+  "CrashTests/4855156194934784/meta-00080.js",
+  "CrashTests/4899093893742592/02412.js",
+  "CrashTests/4914294111076352/resources/worker-common.js",
+  "CrashTests/4920133693472768/09913.js",
+  "CrashTests/4924640359088128/resources/methods.js",
+  "CrashTests/4966722004058112/01202.js",
+  "CrashTests/4996582417891328/00314.js",
+  "CrashTests/5010966233481216/05443.js",
+  "CrashTests/5016449390477312/02595.js",
+  "CrashTests/5019028805124096/01923.js",
+  "CrashTests/5024919414112256/02190.js",
+  "CrashTests/5026147325968384/01281.js",
+  "CrashTests/5034236802498560/00038.js",
+  "CrashTests/5034236802498560/meta-00038.js",
+  "CrashTests/5036266378756096/resources/js-test.js",
+  "CrashTests/5040821977219072/07176.js",
+  "CrashTests/5047964758441984/02170.js",
+  "CrashTests/5064701201350656/mjsunit_modified.js",
+  "CrashTests/5071018176282624/05782.js",
+  "CrashTests/5073353348087808/03720.js",
+  "CrashTests/5083537469079552/03453.js",
+  "CrashTests/5091969183776768/js/angular.js",
+  "CrashTests/5104674803023872/meta-00066.js",
+  "CrashTests/5110246766673920/117.js",
+  "CrashTests/5126730184654848/00846.js",
+  "CrashTests/5140656268640256/resources/interpolation-test.js",
+  "CrashTests/5151090662178816/01492.js",
+  "CrashTests/5153368765628416/00787.js",
+  "CrashTests/5157575680327680/06055.js",
+  "CrashTests/5159014924288000/01496.js",
+  "CrashTests/5164793027624960/01357.js",
+  "CrashTests/5165045173846016/Common/MV.js",
+  "CrashTests/5168023154720768/meta-00125.js",
+  "CrashTests/5171658670473216/02099.js",
+  "CrashTests/5190716938387456/05795.js",
+  "CrashTests/5192473061359616/meta-00780.js",
+  "CrashTests/5197954434596864/01324.js",
+  "CrashTests/5200019587334144/meta-00398.js",
+  "CrashTests/5222394685292544/resources/stress-js-execution.js",
+  "CrashTests/5226258591121408/04850.js",
+  "CrashTests/5226692407984128/meta-00030.js",
+  "CrashTests/5238861996490752/01351.js",
+  "CrashTests/5242104612651008/mjsunit_modified.js",
+  "CrashTests/5254331529166848/meta-00409.js",
+  "CrashTests/5254663852261376/meta-00497.js",
+  "CrashTests/5280655383724032/meta-00355.js",
+  "CrashTests/5283736101322752/00921.js",
+  "CrashTests/5289653387919360/635.js",
+  "CrashTests/5301431549820928/meta-00738.js",
+  "CrashTests/5335637787934720/mersenne.js",
+  "CrashTests/5337864091729920/00078.js",
+  "CrashTests/5346139261108224/meta-00086.js",
+  "CrashTests/5354535555825664/01561.js",
+  "CrashTests/5402562393276416/meta-00565.js",
+  "CrashTests/5418169017303040/156.js",
+  "CrashTests/5435516837429248/02591.js",
+  "CrashTests/5443796254064640/08661.js",
+  "CrashTests/5474124668600320/meta-00992.js",
+  "CrashTests/5486126228570112/02840.js",
+  "CrashTests/5487631620112384/animations/resources/animation-test-helpers.js",
+  "CrashTests/5521703332741120/00376.js",
+  "CrashTests/5530153792045056/meta-00910.js",
+  "CrashTests/5540548491608064/01146.js",
+  "CrashTests/5541203771916288/currentscript.js",
+  "CrashTests/5559694775025664/imported/w3c/html-templates/testcommon.js",
+  "CrashTests/5559694775025664/resources/testharness.js",
+  "CrashTests/5559694775025664/resources/testharnessreport.js",
+  "CrashTests/5568247077011456/04042.js",
+  "CrashTests/5593713261412352/319.js",
+  "CrashTests/5606041047007232/02795.js",
+  "CrashTests/5606702255964160/meta-00168.js",
+  "CrashTests/5615411944226816/02684.js",
+  "CrashTests/5630063870214144/02130.js",
+  "CrashTests/5630225822121984/03637.js",
+  "CrashTests/5635645835182080/worker-serviceworker-7636bedbbb1f120d.js",
+  "CrashTests/5639628713492480/04139.js",
+  "CrashTests/5642849944993792/resources/js-test.js",
+  "CrashTests/5644307466878976/__MACOSX/._audio.js",
+  "CrashTests/5644307466878976/__MACOSX/._processor.js",
+  "CrashTests/5649862583648256/meta-00236.js",
+  "CrashTests/5650039238033408/00812.js",
+  "CrashTests/5651703040835584/resources/testharnessreport.js",
+  "CrashTests/5652465613406208/4584.js",
+  "CrashTests/5661345388167168/resources/stress-js-execution.js",
+  "CrashTests/5668694740172800/meta-00294.js",
+  "CrashTests/5672678890405888/resources/js-test.js",
+  "CrashTests/5685487336161280/03642.js",
+  "CrashTests/5686447370665984/sdk/tests/conformance/ogles/ogles-utils.js",
+  "CrashTests/5686447370665984/sdk/tests/conformance/resources/glsl-conformance-test.js",
+  "CrashTests/5686447370665984/sdk/tests/conformance/resources/webgl-test-utils.js",
+  "CrashTests/5686447370665984/sdk/tests/resources/js-test-pre.js",
+  "CrashTests/5688866685321216/09854.js",
+  "CrashTests/5689884189392896/02723.js",
+  "CrashTests/5694701996867584/conformance/resources/webgl-test.js",
+  "CrashTests/5703976838234112/test.js",
+  "CrashTests/5707472246472704/1443.js",
+  "CrashTests/5721502735532032/03042.js",
+  "CrashTests/5734750167105536/01271.js",
+  "CrashTests/5735023732064256/meta-00070.js",
+  "CrashTests/5736353084342272/resources/testharness.js",
+  "CrashTests/5737388710821888/resources/js-test.js",
+  "CrashTests/5744365229441024/resources/testharness.js",
+  "CrashTests/5745342726537216/meta-00053.js",
+  "CrashTests/5755508264534016/00224.js",
+  "CrashTests/5763511307337728/04651.js",
+  "CrashTests/5774432061095936/00972.js",
+  "CrashTests/5798263663099904/01459.js",
+  "CrashTests/5802116248764416/06966.js",
+  "CrashTests/5806021251432448/background.js",
+  "CrashTests/58219635/fast/js/resources/js-test-post.js",
+  "CrashTests/5826758986170368/resources/js-test-post.js",
+  "CrashTests/5841445051170816/resources/js-test-pre.js",
+  "CrashTests/5842510916091904/resources/webgl_test_files/resources/js-test-post.js",
+  "CrashTests/58693299/selfhtml.js",
+  "CrashTests/5910324886634496/02597.js",
+  "CrashTests/5925149103357952/webaudio/resources/audit.js",
+  "CrashTests/5931087833333760/03890.js",
+  "CrashTests/5933875666616320/01048.js",
+  "CrashTests/5949184339083264/poc.js",
+  "CrashTests/5950573451804672/494.js",
+  "CrashTests/5950617700007936/01848.js",
+  "CrashTests/5970316012290048/414.js",
+  "CrashTests/5996165326962688/meta-00146.js",
+  "CrashTests/6010230465626112/resources/webgl_test_files/js/tests/tex-image-and-sub-image-with-image-bitmap-utils.js",
+  "CrashTests/6018592823836672/support/refTestWait.js",
+  "CrashTests/6026840030314496/00848.js",
+  "CrashTests/6037361396809728/02741.js",
+  "CrashTests/6049932086083584/03169.js",
+  "CrashTests/6051257375784960/03082.js",
+  "CrashTests/6071110049988608/00548.js",
+  "CrashTests/6073192676327424/resources/js-test-pre.js",
+  "CrashTests/6085702952681472/00521.js",
+  "CrashTests/6113149884563456/02823.js",
+  "CrashTests/6150179231105024/conformance/resources/webgl-test.js",
+  "CrashTests/6158905865666560/meta-00624.js",
+  "CrashTests/6179220893204480/02159.js",
+  "CrashTests/6183415490019328/02656.js",
+  "CrashTests/6198951751188480/1248.js",
+  "CrashTests/6204924829630464/meta-00272.js",
+  "CrashTests/6216405020835840/03985.js",
+  "CrashTests/6218202061209600/mersenne.js",
+  "CrashTests/6220111297118208/meta-00050.js",
+  "CrashTests/6223202209234944/02648.js",
+  "CrashTests/6239384157552640/meta-00271.js",
+  "CrashTests/6250055858126848/04619.js",
+  "CrashTests/6255231244697600/meta-00216.js",
+  "CrashTests/6255916311379968/1372.js",
+  "CrashTests/6259138054324224/meta-00172.js",
+  "CrashTests/6278159702425600/01463.js",
+  "CrashTests/6280577705705472/1146.js",
+  "CrashTests/6285336190124032/01621.js",
+  "CrashTests/6295241556492288/01763.js",
+  "CrashTests/6304143111356416/00782.js",
+  "CrashTests/6328755580567552/resources/js-test.js",
+  "CrashTests/6328755580567552/svg/dynamic-updates/resources/SVGTestCase.js",
+  "CrashTests/6330764245073920/poc1.js",
+  "CrashTests/6332832186761216/00681.js",
+  "CrashTests/6339944789049344/04142.js",
+  "CrashTests/6345007341764608/00699.js",
+  "CrashTests/6351789088833536/meta-00122.js",
+  "CrashTests/6352599495475200/meta-00093.js",
+  "CrashTests/6358320674242560/resources/js-test.js",
+  "CrashTests/6359996105359360/934166-poc.js",
+  "CrashTests/6362821967740928/00524.js",
+  "CrashTests/6374053756272640/824.js",
+  "CrashTests/6387037962240000/02996.js",
+  "CrashTests/6394941698998272/meta-00167.js",
+  "CrashTests/6403694305476608/meta-00079.js",
+  "CrashTests/6405187880353792/meta-00826.js",
+  "CrashTests/6406267560263680/02111.js",
+  "CrashTests/6406622734974976/meta-00188.js",
+  "CrashTests/6427802493779968/meta-00335.js",
+  "CrashTests/6440904627060736/meta-00149.js",
+  "CrashTests/6443097497010176/1.0.2/resources/webgl_test_files/conformance/resources/webgl-test-utils.js",
+  "CrashTests/6443097497010176/1.0.2/resources/webgl_test_files/conformance/resources/webgl-test.js",
+  "CrashTests/6443097497010176/1.0.2/resources/webgl_test_files/resources/js-test-pre.js",
+  "CrashTests/6443097497010176/1.0.2/resources/webkit-webgl-test-harness.js",
+  "CrashTests/6449605693931520/meta-00261.js",
+  "CrashTests/6452658006392832/meta-00824.js",
+  "CrashTests/6463535346614272/meta-00886.js",
+  "CrashTests/6509855193169920/03031.js",
+  "CrashTests/6530413356122112/meta-00391.js",
+  "CrashTests/6541223017054208/01484.js",
+  "CrashTests/6550225930944512/mnt/scratch0/clusterfuzz/slave-bot/inputs/fuzzers/inferno_twister_custom_bundle/inferno_twister_custom_bundle_data/moz_tests/dom/workers/test/threadErrors_worker1.js",
+  "CrashTests/6552552797503488/bug_41414141.js",
+  "CrashTests/6576437049950208/conformance/resources/glsl-generator.js",
+  "CrashTests/6576437049950208/resources/js-test-pre.js",
+  "CrashTests/6586504922267648/00672.js",
+  "CrashTests/6597230699216896/meta-00299.js",
+  "CrashTests/6613865297084416/builds/chromium-browser-syzyasan_win32-release/revisions/asan-win32-release-276100/resources/inspector/main/Main.js",
+  "CrashTests/6622275291840512/resources/js-test.js",
+  "CrashTests/6644133880397824/00752.js",
+  "CrashTests/6646069054013440/poc.js",
+  "CrashTests/6658388688371712/00042.js",
+  "CrashTests/6659440421109760/00027.js",
+  "CrashTests/6695292278931456/04706.js",
+  "CrashTests/6727300586405888/builds/chromium-browser-syzyasan_win32-release/revisions/asan-win32-release-259551/resources/inspector/Main.js",
+  "CrashTests/6731147175526400/meta-00107.js",
+  "CrashTests/6744125769252864/494.js",
+  /* Tests with mutation errors from crashtests */
+  "CrashTests/4542853924782080/01450.js",
+  "CrashTests/4575654914293760/01532.js",
+  "CrashTests/4652594229411840/00000.js",
+  "CrashTests/4656490341466112/00126.js",
+  "CrashTests/4672370177736704/00528.js",
+  "CrashTests/4798856567717888/04694.js",
+  "CrashTests/4804923870150656/03027.js",
+  "CrashTests/4895570342707200/02467.js",
+  "CrashTests/4983976359100416/02090.js",
+  "CrashTests/5087167542853632/02505.js",
+  "CrashTests/5136618234314752/00136.js",
+  "CrashTests/5518580170096640/00960.js",
+  "CrashTests/5604116503199744/00316.js",
+  "CrashTests/5631123291111424/00708.js",
+  "CrashTests/5701970444288000/00241.js",
+  "CrashTests/5834909260709888/01407.js",
+  "CrashTests/5927058168610816/01389.js",
+  "CrashTests/6005687605002240/00969.js",
+  "CrashTests/6396053053243392/00161.js",
+  "CrashTests/6491889133158400/01408.js",
+  "CrashTests/6666268416671744/09877.js",
+  /* Very slow tests from crashtests */
+  "CrashTests/5680390288441344/scripts/extension.js", // 48.473s
+  "CrashTests/5680390288441344/scripts/feedback.js", // 24.001s
+  "CrashTests/5666182802309120/binaryen-1564.js", // 18.342s
+  "CrashTests/5306741051621376/binaryen-2962.js", // 16.455s
+  "CrashTests/6071297320747008/324.js", // 14.031s
+  "CrashTests/6250982932086784/binaryen-538.js", // 11.258s
+  "CrashTests/5187171718529024/844.js", // 10.189s
+  "CrashTests/4741082707132416/binaryen-1700.js", // 10.129s
+];
+const softSkipped = [
+  /* Slow tests from v8 */
+  "v8/test/mjsunit/object-literal.js", // 4.219s
+  "v8/test/mjsunit/wasm/wasm-module-builder.js", // 4.07s
+  /* Slow tests from spidermonkey */
+  "spidermonkey/non262/statements/regress-74474-003.js", // 7.228s
+  "spidermonkey/non262/statements/regress-74474-002.js", // 7.209s
+  "spidermonkey/non262/extensions/dataview.js", // 3.845s
+  /* Slow tests from chakra */
+  "chakra/TaggedIntegers/loops.js", // 7.354s
+  "chakra/benchmarks/ARES-6/Air/payload-typescript-scanIdentifier.js", // 7.011s
+  "chakra/benchmarks/Octane/crypto.js", // 4.004s
+  "chakra/benchmarks/Octane_Closure/crypto.js", // 3.178s
+  "chakra/benchmarks/ARES-6/Air/payload-airjs-ACLj8C.js", // 2.918s
+  /* Slow tests from jstests */
+  "WebKit/JSTests/stress/v8-crypto-strict.js", // 3.023s
+  "WebKit/JSTests/stress/v8-regexp-strict.js", // 2.555s
+  /* Slow tests from crashtests */
+  "CrashTests/5082337238712320/binaryen-3268.js", // 9.621s
+  "CrashTests/4602127226241024/js/jquery.js", // 9.337s
+  "CrashTests/6472801805664256/common/js/frameworks/jquery-1.8.2.min.js", // 8.859s
+  "CrashTests/5657116044951552/scripts/libs/jquery.js", // 8.649s
+  "CrashTests/4614296351277056/js/jquery-1.8.0.min.js", // 8.446s
+  "CrashTests/5550653104455680/js/jquery-1.8.0.min.js", // 8.426s
+  "CrashTests/5091969183776768/js/jquery.js", // 8.396s
+  "CrashTests/4612142496743424/binaryen-1882.js", // 8.101s
+  "CrashTests/5049543056424960/inc/jquery-2.1.0.min.js", // 7.912s
+  "CrashTests/6183950024441856/common/widget-api/widgets/common/jquery-1.7.1.min.js", // 7.454s
+  "CrashTests/6183950024441856/common/components/menu/js/jquery-1.7.1.min.js", // 7.409s
+  "CrashTests/5365583999664128/extensionData/plugins/4_jquery_1_7_1.js", // 7.298s
+  "CrashTests/4615141375344640/lib/jquery.js", // 7.144s
+  "CrashTests/6183950024441856/common/js/jquery-1.7.1.min.js", // 7.133s
+  "CrashTests/5657174977806336/binaryen-1398.js", // 6.913s
+  "CrashTests/6327982568898560/binaryen-862.js", // 6.736s
+  "CrashTests/4633495124312064/634.js", // 5.399s
+  "CrashTests/5689977077891072/01770.js", // 5.345s
+  "CrashTests/6636948839202816/121.js", // 5.31s
+  "CrashTests/5365583999664128/extensionData/plugins/17_jQuery.js", // 5.234s
+  "CrashTests/5533984447266816/626.js", // 5.002s
+  "CrashTests/4528969625894912/encaiiljifbdbjlphpgpiimidegddhic/lib/3rdparty/jquery.js", // 4.998s
+  "CrashTests/5274731158568960/test2.js", // 4.907s
+  "CrashTests/4528969625894912/lib/3rdparty/jquery.js", // 4.847s
+  "CrashTests/6103088053354496/965.js", // 4.574s
+  "CrashTests/5293298093391872/65.js", // 3.944s
+  "CrashTests/6215250211504128/05886.js", // 3.928s
+  "CrashTests/6107728614522880/wasm-hashset/many-test.js", // 3.235s
+  "CrashTests/5157721919979520/00935.js", // 3.224s
+  "CrashTests/5804707603021824/workers/wasm-hashset/worker.js", // 3.116s
+  "CrashTests/6107728614522880/wasm-hashset/worker.js", // 3.115s
+  "CrashTests/4986854798262272/js/webgl-test-utils.js", // 3.098s
+  "CrashTests/4764215218012160/workers/wasm-hashset/worker.js", // 3.092s
+  "CrashTests/4764215218012160/workers/wasm-hashset/test.js", // 3.064s
+  "CrashTests/5970862301904896/wasm-hashset/many-test.js", // 3.037s
+  "CrashTests/6264668110323712/js/webgl-test-utils.js", // 3.031s
+  "CrashTests/5144726426222592/957.js", // 3.028s
+  "CrashTests/4521096081309696/workers/wasm-hashset/many-worker-2.js", // 3.007s
+  "CrashTests/4727886732066816/03031.js", // 2.945s
+  "CrashTests/6171607952523264/workers/wasm-hashset/many-test-2.js", // 2.924s
+  "CrashTests/5804707603021824/workers/wasm-hashset/many-test.js", // 2.92s
+  "CrashTests/5903614327128064/js/webgl-test-utils.js", // 2.892s
+  "CrashTests/5474186315956224/js/webgl-test-utils.js", // 2.881s
+  "CrashTests/5720170289692672/js/webgl-test-utils.js", // 2.88s
+  "CrashTests/5709689405571072/js/webgl-test-utils.js", // 2.87s
+  "CrashTests/4808534067838976/113.js", // 2.852s
+  "CrashTests/5150788929454080/js/webgl-test-utils.js", // 2.842s
+  "CrashTests/4521096081309696/workers/wasm-hashset/many-test-2.js", // 2.839s
+  "CrashTests/4750804070957056/js/webgl-test-utils.js", // 2.837s
+  "CrashTests/5877660912451584/js/webgl-test-utils.js", // 2.831s
+  "CrashTests/6117827240263680/js/webgl-test-utils.js", // 2.821s
+  "CrashTests/5649522772541440/js/webgl-test-utils.js", // 2.821s
+  "CrashTests/6207235662020608/js/webgl-test-utils.js", // 2.81s
+  "CrashTests/5081168717545472/js/webgl-test-utils.js", // 2.793s
+  "CrashTests/6113858805301248/js/webgl-test-utils.js", // 2.781s
+  "CrashTests/4895116383485952/js/webgl-test-utils.js", // 2.767s
+  "CrashTests/5205072808771584/js/webgl-test-utils.js", // 2.766s
+  "CrashTests/5550653104455680/js/esprima.js", // 2.758s
+  "CrashTests/5540518327746560/js/webgl-test-utils.js", // 2.751s
+  "CrashTests/6307834848608256/js/webgl-test-utils.js", // 2.723s
+  "CrashTests/4561088605323264/js/webgl-test-utils.js", // 2.722s
+  "CrashTests/5152046202093568/binaryen-397.js", // 2.721s
+  "CrashTests/4614296351277056/js/esprima.js", // 2.72s
+  "CrashTests/5289255386742784/js/webgl-test-utils.js", // 2.718s
+  "CrashTests/5636770818686976/00408.js", // 2.718s
+  "CrashTests/6021155845308416/js/webgl-test-utils.js", // 2.708s
+  "CrashTests/5316130750332928/js/webgl-test-utils.js", // 2.694s
+  "CrashTests/5630410519478272/916.js", // 2.685s
+  "CrashTests/4763495091142656/js/webgl-test-utils.js", // 2.676s
+  "CrashTests/6643859697434624/00989.js", // 2.672s
+  "CrashTests/6578304131006464/js/webgl-test-utils.js", // 2.63s
+  "CrashTests/5921882795933696/js/webgl-test-utils.js", // 2.613s
+  "CrashTests/5720530023612416/binaryen-1954.js", // 2.592s
+  "CrashTests/5753604559470592/03311.js", // 2.589s
+  "CrashTests/4780408753094656/js/webgl-test-utils.js", // 2.584s
+  "CrashTests/6103004909666304/js/webgl-test-utils.js", // 2.582s
+  "CrashTests/5940011987107840/js/webgl-test-utils.js", // 2.569s
+  "CrashTests/6612369747476480/04469.js", // 2.566s
+  "CrashTests/5766886287081472/js/webgl-test-utils.js", // 2.561s
+  "CrashTests/5130481752735744/817.js", // 2.557s
+  "CrashTests/5667434598760448/js/webgl-test-utils.js", // 2.557s
+  "CrashTests/5304417640513536/js/webgl-test-utils.js", // 2.557s
+  "CrashTests/5069958615400448/js/webgl-test-utils.js", // 2.539s
+  "CrashTests/5803513008095232/js/webgl-test-utils.js", // 2.524s
+  "CrashTests/5684927436423168/js/webgl-test-utils.js", // 2.521s
+  "CrashTests/6343749881036800/01604.js", // 2.516s
+  "CrashTests/6159546553466880/js/webgl-test-utils.js", // 2.506s
+  "CrashTests/5436877461782528/binaryen-4415.js", // 2.492s
+  "CrashTests/5246233363611648/js/webgl-test-utils.js", // 2.478s
+  "CrashTests/5154715558084608/572.js", // 2.472s
+  "CrashTests/5216366704721920/js/webgl-test-utils.js", // 2.47s
+  "CrashTests/5020463045804032/js/webgl-test-utils.js", // 2.44s
+  "CrashTests/6231966593318912/js/webgl-test-utils.js", // 2.438s
+  "CrashTests/4712093587865600/js/webgl-test-utils.js", // 2.421s
+  "CrashTests/4722289303355392/js/webgl-test-utils.js", // 2.415s
+  "CrashTests/6446057308028928/js/webgl-test-utils.js", // 2.414s
+  "CrashTests/6585627176992768/binaryen-655.js", // 2.411s
+  "CrashTests/6371786506371072/js/webgl-test-utils.js", // 2.408s
+  "CrashTests/5875816496627712/js/webgl-test-utils.js", // 2.404s
+  "CrashTests/4571384448811008/fast/canvas/webgl/resources/webgl-test-utils-full.js", // 2.404s
+  "CrashTests/4902839495032832/2.0.0/resources/webgl_test_files/js/webgl-test-utils.js", // 2.391s
+  "CrashTests/6396634260570112/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-616366/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.379s
+  "CrashTests/5973030224527360/builds/chrome-test-builds_media_mac-release_e6940505d6c387d688e04a7feeb7e2019c3efe81/revisions/asan-mac-release-405858/resources/inspector/heap_snapshot_worker.js", // 2.376s
+  "CrashTests/4928460350029824/js/webgl-test-utils.js", // 2.371s
+  "CrashTests/5447031043915776/js/webgl-test-utils.js", // 2.35s
+  "CrashTests/5097133477462016/binaryen-1557.js", // 2.339s
+  "CrashTests/5748791416979456/js/webgl-test-utils.js", // 2.335s
+  "CrashTests/4979734430351360/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-587925/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.329s
+  "CrashTests/5882955910873088/test.js", // 2.329s
+  "CrashTests/6030846597005312/binaryen-97.js", // 2.31s
+  "CrashTests/5934321914609664/js/webgl-test-utils.js", // 2.306s
+  "CrashTests/4872723313197056/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-589752/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.291s
+  "CrashTests/4864843149213696/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-588015/gen/third_party/blink/public/mojom/payments/payment_request.mojom.js", // 2.281s
+  "CrashTests/4526031242788864/rf_onloadcontent.js", // 2.261s
+  "CrashTests/5673981645684736/js/webgl-test-utils.js", // 2.247s
+  "CrashTests/5112085437743104/js/webgl-test-utils.js", // 2.223s
+  "CrashTests/4544669955129344/binaryen-1549.js", // 2.211s
+  "CrashTests/4661285908905984/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-578254/gen/third_party/blink/public/platform/modules/payments/payment_request.mojom.js", // 2.204s
+  "CrashTests/5710180189995008/js/webgl-test-utils.js", // 2.188s
+  "CrashTests/6522661136760832/js/webgl-test-utils.js", // 2.176s
+  "CrashTests/6158076232990720/resources/testharness.js", // 2.174s
+  "CrashTests/5657181087727616/binaryen-125.js", // 2.159s
+  "CrashTests/4714207862587392/03389.js", // 2.145s
+  "CrashTests/5716123902410752/resources/testharness.js", // 2.135s
+  "CrashTests/6203771342159872/builds/chromium-browser-asan_linux-release_4392242b7f59878a2775b4607420a2b37e17ff13/revisions/asan-linux-release-707359/gen/media/mojo/mojom/media_types.mojom.js", // 2.133s
+  "CrashTests/6393868459180032/jquery.flot.js", // 2.114s
+  "CrashTests/5186189903396864/resources/testharness.js", // 2.111s
+  "CrashTests/5490620452044800/00601.js", // 2.089s
+  "CrashTests/4656197324767232/gen/third_party/blink/public/platform/modules/payments/payment_request.mojom.js", // 2.081s
+  "CrashTests/5873758480105472/conformance/resources/webgl-test-utils.js", // 2.079s
+  "CrashTests/5308016126853120/conformance/resources/webgl-test-utils.js", // 2.075s
+  "CrashTests/6693648314400768/1.0.3/resources/webgl_test_files/conformance/resources/webgl-test-utils.js", // 2.07s
+  "CrashTests/4607827521568768/resources/testharness.js", // 2.066s
+  "CrashTests/6444261469847552/jquery.flot.js", // 2.043s
+  "CrashTests/5949856401326080/conformance/resources/webgl-test-utils.js", // 2.028s
+  "CrashTests/5320478993678336/conformance/resources/webgl-test-utils.js", // 2.024s
+  "CrashTests/4871780976099328/LayoutTests/resources/testharness.js", // 2.024s
+  "CrashTests/5195343992586240/binaryen-2577.js", // 2.022s
+  "CrashTests/5170518889332736/resources/webgl-test-utils.js", // 2.019s
+  "CrashTests/4942847902220288/conformance/resources/webgl-test-utils.js", // 2.005s
+  "CrashTests/6459909679087616/conformance/resources/webgl-test-utils.js", // 2.001s
+];
+const sloppy = [
+  /* Tests requiring sloppy mode from v8 */
+  "v8/test/mjsunit/accessors-on-global-object.js",
+  "v8/test/mjsunit/argument-assigned.js",
+  "v8/test/mjsunit/argument-named-arguments.js",
+  "v8/test/mjsunit/arguments-apply.js",
+  "v8/test/mjsunit/arguments-lazy.js",
+  "v8/test/mjsunit/arguments-read-and-assignment.js",
+  "v8/test/mjsunit/arguments.js",
+  "v8/test/mjsunit/array-indexing-receiver.js",
+  "v8/test/mjsunit/arrow-with.js",
+  "v8/test/mjsunit/asm-directive.js",
+  "v8/test/mjsunit/compiler/delete.js",
+  "v8/test/mjsunit/compiler/global-delete.js",
+  "v8/test/mjsunit/compiler/global-var-delete.js",
+  "v8/test/mjsunit/compiler/opt-next-call-turbo.js",
+  "v8/test/mjsunit/compiler/optimized-for-in.js",
+  "v8/test/mjsunit/compiler/optimized-with.js",
+  "v8/test/mjsunit/compiler/regress-1037771.js",
+  "v8/test/mjsunit/compiler/regress-665680.js",
+  "v8/test/mjsunit/compiler/regress-669517.js",
+  "v8/test/mjsunit/compiler/rotate.js",
+  "v8/test/mjsunit/compiler/try-catch-deopt.js",
+  "v8/test/mjsunit/compiler/try-context.js",
+  "v8/test/mjsunit/compiler/uint32.js",
+  "v8/test/mjsunit/compiler/variables.js",
+  "v8/test/mjsunit/context-calls-maintained.js",
+  "v8/test/mjsunit/delete-global-properties.js",
+  "v8/test/mjsunit/delete-in-with.js",
+  "v8/test/mjsunit/delete-vars-from-eval.js",
+  "v8/test/mjsunit/delete.js",
+  "v8/test/mjsunit/deopt-with-fp-regs.js",
+  "v8/test/mjsunit/elements-kind.js",
+  "v8/test/mjsunit/es6/array-concat-sloppy-arguments-with-dupes.js",
+  "v8/test/mjsunit/es6/arrow-functions-lexical-arguments.js",
+  "v8/test/mjsunit/es6/arrow-functions-this.js",
+  "v8/test/mjsunit/es6/block-eval-var-over-let.js",
+  "v8/test/mjsunit/es6/block-let-contextual-sloppy.js",
+  "v8/test/mjsunit/es6/block-sloppy-function.js",
+  "v8/test/mjsunit/es6/classes.js",
+  "v8/test/mjsunit/es6/default-parameters.js",
+  "v8/test/mjsunit/es6/destructuring-assignment.js",
+  "v8/test/mjsunit/es6/generator-destructuring.js",
+  "v8/test/mjsunit/es6/generators-iteration.js",
+  "v8/test/mjsunit/es6/generators-parsing.js",
+  "v8/test/mjsunit/es6/global-proto-proxy.js",
+  "v8/test/mjsunit/es6/new-target.js",
+  "v8/test/mjsunit/es6/object-literals-property-shorthand.js",
+  "v8/test/mjsunit/es6/proxies-function.js",
+  "v8/test/mjsunit/es6/proxies-with-unscopables.js",
+  "v8/test/mjsunit/es6/proxies-with.js",
+  "v8/test/mjsunit/es6/regress/regress-crbug-461520.js",
+  "v8/test/mjsunit/es6/sloppy-restrictive-block-function.js",
+  "v8/test/mjsunit/es6/super.js",
+  "v8/test/mjsunit/es6/templates.js",
+  "v8/test/mjsunit/es6/unscopables.js",
+  "v8/test/mjsunit/es7/array-includes-receiver.js",
+  "v8/test/mjsunit/es7/exponentiation-operator.js",
+  "v8/test/mjsunit/es8/async-await-basic.js",
+  "v8/test/mjsunit/es8/async-destructuring.js",
+  "v8/test/mjsunit/eval.js",
+  "v8/test/mjsunit/for-in.js",
+  "v8/test/mjsunit/function-arguments-duplicate.js",
+  "v8/test/mjsunit/getter-in-prototype.js",
+  "v8/test/mjsunit/global-arrow-delete-this.js",
+  "v8/test/mjsunit/global-infinity.js",
+  "v8/test/mjsunit/global-load-from-eval-in-with.js",
+  "v8/test/mjsunit/global-load-from-nested-eval.js",
+  "v8/test/mjsunit/global-nan.js",
+  "v8/test/mjsunit/global-undefined.js",
+  "v8/test/mjsunit/global-vars-with.js",
+  "v8/test/mjsunit/harmony/bigint/property-names.js",
+  "v8/test/mjsunit/harmony/global-configurable.js",
+  "v8/test/mjsunit/harmony/import-from-compilation-errored.js",
+  "v8/test/mjsunit/harmony/import-from-evaluation-errored.js",
+  "v8/test/mjsunit/harmony/import-from-fetch-errored.js",
+  "v8/test/mjsunit/harmony/import-from-instantiation-errored.js",
+  "v8/test/mjsunit/harmony/numeric-separator.js",
+  "v8/test/mjsunit/harmony/private-fields-special-object.js",
+  "v8/test/mjsunit/ignition/dead-code-source-position.js",
+  "v8/test/mjsunit/ignition/regress-616064.js",
+  "v8/test/mjsunit/no-octal-constants-above-256.js",
+  "v8/test/mjsunit/override-read-only-property.js",
+  "v8/test/mjsunit/receiver-in-with-calls.js",
+  "v8/test/mjsunit/regress/regress-1030466.js",
+  "v8/test/mjsunit/regress/regress-1079.js",
+  "v8/test/mjsunit/regress/regress-1125.js",
+  "v8/test/mjsunit/regress/regress-1132.js",
+  "v8/test/mjsunit/regress/regress-124.js",
+  "v8/test/mjsunit/regress/regress-1528.js",
+  "v8/test/mjsunit/regress/regress-2071.js",
+  "v8/test/mjsunit/regress/regress-2594.js",
+  "v8/test/mjsunit/regress/regress-263.js",
+  "v8/test/mjsunit/regress/regress-2690.js",
+  "v8/test/mjsunit/regress/regress-3138.js",
+  "v8/test/mjsunit/regress/regress-318420.js",
+  "v8/test/mjsunit/regress/regress-3185905.js",
+  "v8/test/mjsunit/regress/regress-353058.js",
+  "v8/test/mjsunit/regress/regress-392.js",
+  "v8/test/mjsunit/regress/regress-410030.js",
+  "v8/test/mjsunit/regress/regress-410912.js",
+  "v8/test/mjsunit/regress/regress-4169.js",
+  "v8/test/mjsunit/regress/regress-4214.js",
+  "v8/test/mjsunit/regress/regress-436893.js",
+  "v8/test/mjsunit/regress/regress-4577.js",
+  "v8/test/mjsunit/regress/regress-492.js",
+  "v8/test/mjsunit/regress/regress-5205.js",
+  "v8/test/mjsunit/regress/regress-5405.js",
+  "v8/test/mjsunit/regress/regress-5692.js",
+  "v8/test/mjsunit/regress/regress-583260.js",
+  "v8/test/mjsunit/regress/regress-587004.js",
+  "v8/test/mjsunit/regress/regress-592353.js",
+  "v8/test/mjsunit/regress/regress-608630.js",
+  "v8/test/mjsunit/regress/regress-649067.js",
+  "v8/test/mjsunit/regress/regress-6677.js",
+  "v8/test/mjsunit/regress/regress-670147.js",
+  "v8/test/mjsunit/regress/regress-6733.js",
+  "v8/test/mjsunit/regress/regress-678525.js",
+  "v8/test/mjsunit/regress/regress-70066.js",
+  "v8/test/mjsunit/regress/regress-74.js",
+  "v8/test/mjsunit/regress/regress-740694.js",
+  "v8/test/mjsunit/regress/regress-744292.js",
+  "v8/test/mjsunit/regress/regress-784080.js",
+  "v8/test/mjsunit/regress/regress-797581.js",
+  "v8/test/mjsunit/regress/regress-800651.js",
+  "v8/test/mjsunit/regress/regress-801772.js",
+  "v8/test/mjsunit/regress/regress-95485.js",
+  "v8/test/mjsunit/regress/regress-96523.js",
+  "v8/test/mjsunit/regress/regress-969.js",
+  "v8/test/mjsunit/regress/regress-abort-context-allocate-params.js",
+  "v8/test/mjsunit/regress/regress-abort-preparsing-params.js",
+  "v8/test/mjsunit/regress/regress-crbug-1041210.js",
+  "v8/test/mjsunit/regress/regress-crbug-1041616.js",
+  "v8/test/mjsunit/regress/regress-crbug-135008.js",
+  "v8/test/mjsunit/regress/regress-crbug-412208.js",
+  "v8/test/mjsunit/regress/regress-crbug-450642.js",
+  "v8/test/mjsunit/regress/regress-crbug-455644.js",
+  "v8/test/mjsunit/regress/regress-crbug-505907.js",
+  "v8/test/mjsunit/regress/regress-crbug-506956.js",
+  "v8/test/mjsunit/regress/regress-crbug-552304.js",
+  "v8/test/mjsunit/regress/regress-crbug-628573.js",
+  "v8/test/mjsunit/regress/regress-crbug-631027.js",
+  "v8/test/mjsunit/regress/regress-crbug-648740.js",
+  "v8/test/mjsunit/regress/regress-crbug-663750.js",
+  "v8/test/mjsunit/regress/regress-crbug-691687.js",
+  "v8/test/mjsunit/regress/regress-crbug-851393.js",
+  "v8/test/mjsunit/regress/regress-crbug-934166.js",
+  "v8/test/mjsunit/regress/regress-sloppy-block-function-hoisting-dynamic.js",
+  "v8/test/mjsunit/regress/regress-v8-9394-2.js",
+  "v8/test/mjsunit/regress/regress-v8-9394.js",
+  "v8/test/mjsunit/regress/wasm/loop-stack-check.js",
+  "v8/test/mjsunit/regress/wasm/regress-648079.js",
+  "v8/test/mjsunit/regress/wasm/regress-654377.js",
+  "v8/test/mjsunit/regress/wasm/regress-753496.js",
+  "v8/test/mjsunit/scope-calls-eval.js",
+  "v8/test/mjsunit/serialize-embedded-error.js",
+  "v8/test/mjsunit/strict-mode-implicit-receiver.js",
+  "v8/test/mjsunit/strict-mode.js",
+  "v8/test/mjsunit/switch.js",
+  "v8/test/mjsunit/throw-and-catch-function.js",
+  "v8/test/mjsunit/unused-context-in-with.js",
+  "v8/test/mjsunit/value-wrapper.js",
+  "v8/test/mjsunit/wasm/import-mutable-global.js",
+  "v8/test/mjsunit/with-leave.js",
+  "v8/test/mjsunit/with-parameter-access.js",
+  "v8/test/mjsunit/with-prototype.js",
+  "v8/test/mjsunit/with-readonly.js",
+  "v8/test/mjsunit/with-value.js",
+  /* Tests requiring sloppy mode from spidermonkey */
+  "spidermonkey/non262/Array/unscopables.js",
+  "spidermonkey/non262/Array/values.js",
+  "spidermonkey/non262/BigInt/property-name-guessed-name.js",
+  "spidermonkey/non262/BigInt/property-name.js",
+  "spidermonkey/non262/Date/time-zones-posix.js",
+  "spidermonkey/non262/Date/time-zones.js",
+  "spidermonkey/non262/Exceptions/catchguard-002-n.js",
+  "spidermonkey/non262/Exceptions/catchguard-003-n.js",
+  "spidermonkey/non262/Exceptions/regress-273931.js",
+  "spidermonkey/non262/Function/10.2.1.1.6.js",
+  "spidermonkey/non262/Function/arguments-parameter-shadowing.js",
+  "spidermonkey/non262/Function/arrow-has-duplicated.js",
+  "spidermonkey/non262/Function/regress-131964.js",
+  "spidermonkey/non262/Function/regress-94506.js",
+  "spidermonkey/non262/Function/regress-97921.js",
+  "spidermonkey/non262/Function/rest-has-duplicated.js",
+  "spidermonkey/non262/Function/rest-parameter-names.js",
+  "spidermonkey/non262/GC/regress-383269-02.js",
+  "spidermonkey/non262/RegExp/regress-6359.js",
+  "spidermonkey/non262/RegExp/regress-85721.js",
+  "spidermonkey/non262/Scope/regress-184107.js",
+  "spidermonkey/non262/Scope/regress-185485.js",
+  "spidermonkey/non262/Scope/regress-192226.js",
+  "spidermonkey/non262/Scope/regress-208496-001.js",
+  "spidermonkey/non262/Scope/regress-208496-002.js",
+  "spidermonkey/non262/Scope/scope-004.js",
+  "spidermonkey/non262/Script/delete-001.js",
+  "spidermonkey/non262/Script/new-001.js",
+  "spidermonkey/non262/String/regress-392378.js",
+  "spidermonkey/non262/TypedObject/method_from.js",
+  "spidermonkey/non262/TypedObject/method_map.js",
+  "spidermonkey/non262/Unicode/regress-352044-02-n.js",
+  "spidermonkey/non262/arrow-functions/arrow-not-as-end-of-statement.js",
+  "spidermonkey/non262/arrow-functions/arrow-returning-arrow-with-block-body-followed-by-regexp.js",
+  "spidermonkey/non262/async-functions/async-contains-unicode-escape.js",
+  "spidermonkey/non262/async-functions/duplicate-__proto__.js",
+  "spidermonkey/non262/class/outerBinding.js",
+  "spidermonkey/non262/destructuring/bug1396261.js",
+  "spidermonkey/non262/destructuring/duplicate-__proto__.js",
+  "spidermonkey/non262/destructuring/yield-in-object-destr-function.js",
+  "spidermonkey/non262/destructuring/yield-in-object-destr-script.js",
+  "spidermonkey/non262/destructuring/yield-with-escape-in-object-destr-function.js",
+  "spidermonkey/non262/destructuring/yield-with-escape-in-object-destr-script.js",
+  "spidermonkey/non262/eval/redeclared-arguments-in-param-expression-eval.js",
+  "spidermonkey/non262/execution-contexts/regress-448595-01.js",
+  "spidermonkey/non262/expressions/delete-constant-folded-and-or.js",
+  "spidermonkey/non262/extensions/clone-leaf-object.js",
+  "spidermonkey/non262/extensions/clone-simple.js",
+  "spidermonkey/non262/extensions/cross-global-eval-is-indirect.js",
+  "spidermonkey/non262/extensions/eval-native-callback-is-indirect.js",
+  "spidermonkey/non262/extensions/function-definition-with.js",
+  "spidermonkey/non262/extensions/inc-dec-functioncall.js",
+  "spidermonkey/non262/extensions/recursion.js",
+  "spidermonkey/non262/extensions/regress-104077.js",
+  "spidermonkey/non262/extensions/regress-226078.js",
+  "spidermonkey/non262/extensions/regress-352604.js",
+  "spidermonkey/non262/extensions/regress-365527.js",
+  "spidermonkey/non262/extensions/regress-406572.js",
+  "spidermonkey/non262/extensions/regress-416834.js",
+  "spidermonkey/non262/extensions/regress-476414-01.js",
+  "spidermonkey/non262/extensions/regress-476414-02.js",
+  "spidermonkey/non262/extensions/uneval/bug496985.js",
+  "spidermonkey/non262/extensions/uneval/regress-385393-03.js",
+  "spidermonkey/non262/extensions/uneval/regress-452498-101.js",
+  "spidermonkey/non262/extensions/uneval/regress-452498-117.js",
+  "spidermonkey/non262/extensions/uneval/regress-624199.js",
+  "spidermonkey/non262/fields/await-identifier-module-1.js",
+  "spidermonkey/non262/fields/await-identifier-module-2.js",
+  "spidermonkey/non262/fields/await-identifier-script.js",
+  "spidermonkey/non262/generators/iteration.js",
+  "spidermonkey/non262/generators/syntax.js",
+  "spidermonkey/non262/generators/yield-non-regexp.js",
+  "spidermonkey/non262/global/direct-eval-but-not.js",
+  "spidermonkey/non262/global/eval-02.js",
+  "spidermonkey/non262/global/eval-inside-with-is-direct.js",
+  "spidermonkey/non262/global/eval-native-callback-is-indirect.js",
+  "spidermonkey/non262/jit/regress-451673.js",
+  "spidermonkey/non262/lexical-conventions/lexical-001.js",
+  "spidermonkey/non262/lexical-conventions/regress-177314.js",
+  "spidermonkey/non262/lexical-environment/block-scoped-functions-annex-b-arguments.js",
+  "spidermonkey/non262/lexical-environment/block-scoped-functions-annex-b-eval.js",
+  "spidermonkey/non262/lexical-environment/block-scoped-functions-annex-b-if.js",
+  "spidermonkey/non262/lexical-environment/block-scoped-functions-annex-b-parameter.js",
+  "spidermonkey/non262/lexical-environment/block-scoped-functions-annex-b-with.js",
+  "spidermonkey/non262/lexical-environment/block-scoped-functions-deprecated-redecl.js",
+  "spidermonkey/non262/lexical-environment/implicit-this-in-with.js",
+  "spidermonkey/non262/lexical-environment/redeclaring-global-properties.js",
+  "spidermonkey/non262/lexical-environment/unscopables-basics.js",
+  "spidermonkey/non262/lexical-environment/unscopables-closures.js",
+  "spidermonkey/non262/lexical-environment/unscopables-const.js",
+  "spidermonkey/non262/lexical-environment/unscopables-delete.js",
+  "spidermonkey/non262/lexical-environment/unscopables-getters.js",
+  "spidermonkey/non262/lexical-environment/unscopables-global.js",
+  "spidermonkey/non262/lexical-environment/unscopables-ignored.js",
+  "spidermonkey/non262/lexical-environment/unscopables-miss.js",
+  "spidermonkey/non262/lexical-environment/unscopables-mutation-frozen.js",
+  "spidermonkey/non262/lexical-environment/unscopables-mutation.js",
+  "spidermonkey/non262/lexical-environment/unscopables-proto.js",
+  "spidermonkey/non262/lexical-environment/unscopables-proxy.js",
+  "spidermonkey/non262/lexical-environment/unscopables-strict.js",
+  "spidermonkey/non262/lexical-environment/unscopables-tdz.js",
+  "spidermonkey/non262/lexical-environment/with-global-ignores-global-let-variables.js",
+  "spidermonkey/non262/module/bug1488117-import-namespace.js",
+  "spidermonkey/non262/module/bug1488117.js",
+  "spidermonkey/non262/reflect-parse/PatternBuilders.js",
+  "spidermonkey/non262/reflect-parse/classes.js",
+  "spidermonkey/non262/regress/regress-104077.js",
+  "spidermonkey/non262/regress/regress-131510-001.js",
+  "spidermonkey/non262/regress/regress-146596.js",
+  "spidermonkey/non262/regress/regress-170193.js",
+  "spidermonkey/non262/regress/regress-230216-1.js",
+  "spidermonkey/non262/regress/regress-230216-3.js",
+  "spidermonkey/non262/regress/regress-243389-n.js",
+  "spidermonkey/non262/regress/regress-252892.js",
+  "spidermonkey/non262/regress/regress-290656.js",
+  "spidermonkey/non262/regress/regress-319391.js",
+  "spidermonkey/non262/regress/regress-336100.js",
+  "spidermonkey/non262/regress/regress-343713.js",
+  "spidermonkey/non262/regress/regress-344959.js",
+  "spidermonkey/non262/regress/regress-349482-02.js",
+  "spidermonkey/non262/regress/regress-350253.js",
+  "spidermonkey/non262/regress/regress-351515.js",
+  "spidermonkey/non262/regress/regress-352604.js",
+  "spidermonkey/non262/regress/regress-428366.js",
+  "spidermonkey/non262/regress/regress-441477-01.js",
+  "spidermonkey/non262/regress/regress-452498-072.js",
+  "spidermonkey/non262/regress/regress-452498-102.js",
+  "spidermonkey/non262/regress/regress-452498-117.js",
+  "spidermonkey/non262/regress/regress-452498-135.js",
+  "spidermonkey/non262/regress/regress-452498-168-2.js",
+  "spidermonkey/non262/regress/regress-452498-181.js",
+  "spidermonkey/non262/regress/regress-452498-192.js",
+  "spidermonkey/non262/regress/regress-452742-01.js",
+  "spidermonkey/non262/regress/regress-452742-02.js",
+  "spidermonkey/non262/regress/regress-462292.js",
+  "spidermonkey/non262/regress/regress-467495-03.js",
+  "spidermonkey/non262/regress/regress-467495-04.js",
+  "spidermonkey/non262/regress/regress-467495-05.js",
+  "spidermonkey/non262/regress/regress-467495-06.js",
+  "spidermonkey/non262/regress/regress-477234.js",
+  "spidermonkey/non262/regress/regress-483749.js",
+  "spidermonkey/non262/regress/regress-509354.js",
+  "spidermonkey/non262/regress/regress-551763-0.js",
+  "spidermonkey/non262/regress/regress-551763-1.js",
+  "spidermonkey/non262/regress/regress-551763-2.js",
+  "spidermonkey/non262/regress/regress-554955-1.js",
+  "spidermonkey/non262/regress/regress-554955-2.js",
+  "spidermonkey/non262/regress/regress-554955-3.js",
+  "spidermonkey/non262/regress/regress-569306.js",
+  "spidermonkey/non262/regress/regress-591897.js",
+  "spidermonkey/non262/regress/regress-593256.js",
+  "spidermonkey/non262/regress/regress-597945-1.js",
+  "spidermonkey/non262/regress/regress-602621.js",
+  "spidermonkey/non262/regress/regress-609617.js",
+  "spidermonkey/non262/regress/regress-633741.js",
+  "spidermonkey/non262/regress/regress-672892.js",
+  "spidermonkey/non262/regress/regress-68498-001.js",
+  "spidermonkey/non262/regress/regress-68498-002.js",
+  "spidermonkey/non262/regress/regress-68498-003.js",
+  "spidermonkey/non262/regress/regress-68498-004.js",
+  "spidermonkey/non262/regress/regress-98901.js",
+  "spidermonkey/non262/statements/for-in-with-assignment-semantics.js",
+  "spidermonkey/non262/statements/for-in-with-assignments.js",
+  "spidermonkey/non262/statements/for-of-iterator-close.js",
+  "spidermonkey/non262/strict/directive-prologue-01.js",
+  "spidermonkey/non262/syntax/escaped-strict-reserved-words-and-yield.js",
+  "spidermonkey/non262/template-strings/noSubst.js",
+  "spidermonkey/test/expected/export/multi-header.js",
+  "spidermonkey/test/expected/export/reftest-error-syntaxerror.js",
+  "spidermonkey/test/expected/export/regular.js",
+  "spidermonkey/test/expected/import/files/local/smTempBranch/language/export/escaped-foobarbaz.js",
+  "spidermonkey/test/fixtures/export/.ignore.js",
+  "spidermonkey/test/fixtures/export/multi-header.js",
+  "spidermonkey/test/fixtures/export/reftest-error-syntaxerror.js",
+  "spidermonkey/test/fixtures/export/regular.js",
+  /* Tests requiring sloppy mode from chakra */
+  "chakra/AsmJs/evalbug.js",
+  "chakra/AsmJs/switchBinaryTraverse.js",
+  "chakra/Basics/SpecialSymbolCapture.js",
+  "chakra/Basics/With-defer-block-scope.js",
+  "chakra/Basics/With.js",
+  "chakra/Basics/With2.js",
+  "chakra/Basics/keyword.js",
+  "chakra/Basics/scopedaccessors.js",
+  "chakra/Basics/with3.js",
+  "chakra/Basics/withBug940841.js",
+  "chakra/Basics/withBug940841_2.js",
+  "chakra/Basics/witheval.js",
+  "chakra/Bugs/OS_4341640.js",
+  "chakra/Bugs/SuperAccessInGlobalLambda.js",
+  "chakra/Bugs/blue_1096569.js",
+  "chakra/Bugs/bug10191241.js",
+  "chakra/Bugs/bug56026.js",
+  "chakra/Bugs/bug56026_minimal.js",
+  "chakra/Bugs/bug56026_minimalWithProperties.js",
+  "chakra/Bugs/bug56026_nested.js",
+  "chakra/Bugs/bug56026_trycatch.js",
+  "chakra/Bugs/bug_OS18260560.js",
+  "chakra/Bugs/misc_bugs.js",
+  "chakra/Bugs/withSplitScope.js",
+  "chakra/Bugs/withnonativeApplyOptimizationBug3433559.js",
+  "chakra/Closures/bug_OS_10735999.js",
+  "chakra/Closures/bug_OS_13412380.js",
+  "chakra/Closures/bug_OS_2525694.js",
+  "chakra/Closures/cachedscope_1.js",
+  "chakra/Closures/initcachedscope.js",
+  "chakra/ControlFlow/DoWhile.js",
+  "chakra/DebuggerCommon/ES6_letconst_shadow_eval_with.js",
+  "chakra/DebuggerCommon/blockScopeGlobalSlotArrayTest.bug222631.js",
+  "chakra/DebuggerCommon/blockScopeSibling.bug263635.js",
+  "chakra/DebuggerCommon/blockScopeWithTest.js",
+  "chakra/DebuggerCommon/bug_204064.js",
+  "chakra/DebuggerCommon/default.js",
+  "chakra/DebuggerCommon/frames_inspection_arrayES5.js",
+  "chakra/DebuggerCommon/funcSource.js",
+  "chakra/DebuggerCommon/globalFuncVars.js",
+  "chakra/DebuggerCommon/level_1.js",
+  "chakra/DebuggerCommon/protoTest2.js",
+  "chakra/DebuggerCommon/shadow_with.js",
+  "chakra/DebuggerCommon/step_in_only_debugJIT_attach.js",
+  "chakra/DebuggerCommon/with_shadow.js",
+  "chakra/EH/optional-catch-binding.js",
+  "chakra/EH/try2.js",
+  "chakra/EH/try5-ES3.js",
+  "chakra/EH/tryfinallyinlineswbug.js",
+  "chakra/Function/LabelFuncAsWithStmt.js",
+  "chakra/Function/arguments1.js",
+  "chakra/Function/arguments2.js",
+  "chakra/Function/arguments4.js",
+  "chakra/Function/argumentsMisc.js",
+  "chakra/Function/bug542360.js",
+  "chakra/Function/caller_replaced_proto.js",
+  "chakra/Function/callsideeffects.js",
+  "chakra/Function/crosssite_bind_main.js",
+  "chakra/Function/defernested.js",
+  "chakra/Function/deferredBadContinue.js",
+  "chakra/Function/deferredParsing.js",
+  "chakra/Function/deferredWith.js",
+  "chakra/Function/deferredWith2.js",
+  "chakra/Function/funcExpr.js",
+  "chakra/Function/sameNamePara.js",
+  "chakra/Function/someMoreArguments.js",
+  "chakra/Function/stackargs.js",
+  "chakra/Generated/add.js",
+  "chakra/Generated/add0.js",
+  "chakra/Generated/add1.js",
+  "chakra/Generated/add2.js",
+  "chakra/Generated/add3.js",
+  "chakra/Generated/and.js",
+  "chakra/Generated/and0.js",
+  "chakra/Generated/and1.js",
+  "chakra/Generated/and2.js",
+  "chakra/Generated/and3.js",
+  "chakra/Generated/div.js",
+  "chakra/Generated/div0.js",
+  "chakra/Generated/div1.js",
+  "chakra/Generated/div2.js",
+  "chakra/Generated/div3.js",
+  "chakra/Generated/eq.js",
+  "chakra/Generated/eq0.js",
+  "chakra/Generated/eq1.js",
+  "chakra/Generated/eq2.js",
+  "chakra/Generated/eq3.js",
+  "chakra/Generated/ge.js",
+  "chakra/Generated/ge0.js",
+  "chakra/Generated/ge1.js",
+  "chakra/Generated/ge2.js",
+  "chakra/Generated/ge3.js",
+  "chakra/Generated/gt.js",
+  "chakra/Generated/gt0.js",
+  "chakra/Generated/gt1.js",
+  "chakra/Generated/gt2.js",
+  "chakra/Generated/gt3.js",
+  "chakra/Generated/land.js",
+  "chakra/Generated/land0.js",
+  "chakra/Generated/land1.js",
+  "chakra/Generated/land2.js",
+  "chakra/Generated/land3.js",
+  "chakra/Generated/le.js",
+  "chakra/Generated/le0.js",
+  "chakra/Generated/le1.js",
+  "chakra/Generated/le2.js",
+  "chakra/Generated/le3.js",
+  "chakra/Generated/lor.js",
+  "chakra/Generated/lor0.js",
+  "chakra/Generated/lor1.js",
+  "chakra/Generated/lor2.js",
+  "chakra/Generated/lor3.js",
+  "chakra/Generated/lsh.js",
+  "chakra/Generated/lsh0.js",
+  "chakra/Generated/lsh1.js",
+  "chakra/Generated/lsh2.js",
+  "chakra/Generated/lsh3.js",
+  "chakra/Generated/lt.js",
+  "chakra/Generated/lt0.js",
+  "chakra/Generated/lt1.js",
+  "chakra/Generated/lt2.js",
+  "chakra/Generated/lt3.js",
+  "chakra/Generated/mod.js",
+  "chakra/Generated/mod0.js",
+  "chakra/Generated/mod1.js",
+  "chakra/Generated/mod2.js",
+  "chakra/Generated/mod3.js",
+  "chakra/Generated/mul.js",
+  "chakra/Generated/mul0.js",
+  "chakra/Generated/mul1.js",
+  "chakra/Generated/mul2.js",
+  "chakra/Generated/mul3.js",
+  "chakra/Generated/ne.js",
+  "chakra/Generated/ne0.js",
+  "chakra/Generated/ne1.js",
+  "chakra/Generated/ne2.js",
+  "chakra/Generated/ne3.js",
+  "chakra/Generated/or.js",
+  "chakra/Generated/or0.js",
+  "chakra/Generated/or1.js",
+  "chakra/Generated/or2.js",
+  "chakra/Generated/or3.js",
+  "chakra/Generated/rsh.js",
+  "chakra/Generated/rsh0.js",
+  "chakra/Generated/rsh1.js",
+  "chakra/Generated/rsh2.js",
+  "chakra/Generated/rsh3.js",
+  "chakra/Generated/rshu.js",
+  "chakra/Generated/rshu0.js",
+  "chakra/Generated/rshu1.js",
+  "chakra/Generated/rshu2.js",
+  "chakra/Generated/rshu3.js",
+  "chakra/Generated/seq.js",
+  "chakra/Generated/seq0.js",
+  "chakra/Generated/seq1.js",
+  "chakra/Generated/seq2.js",
+  "chakra/Generated/seq3.js",
+  "chakra/Generated/sne.js",
+  "chakra/Generated/sne0.js",
+  "chakra/Generated/sne1.js",
+  "chakra/Generated/sne2.js",
+  "chakra/Generated/sne3.js",
+  "chakra/Generated/sub.js",
+  "chakra/Generated/sub0.js",
+  "chakra/Generated/sub1.js",
+  "chakra/Generated/sub2.js",
+  "chakra/Generated/sub3.js",
+  "chakra/Generated/xor.js",
+  "chakra/Generated/xor0.js",
+  "chakra/Generated/xor1.js",
+  "chakra/Generated/xor2.js",
+  "chakra/Generated/xor3.js",
+  "chakra/GlobalFunctions/ParseInt1.js",
+  "chakra/GlobalFunctions/eval1.js",
+  "chakra/GlobalFunctions/parseInt.js",
+  "chakra/GlobalFunctions/toString.js",
+  "chakra/InlineCaches/test6.js",
+  "chakra/LetConst/arguments.js",
+  "chakra/LetConst/constreassign1.js",
+  "chakra/LetConst/defer1.js",
+  "chakra/LetConst/eval1.js",
+  "chakra/LetConst/letlet.js",
+  "chakra/LetConst/scopegen1.js",
+  "chakra/LetConst/tdz1.js",
+  "chakra/LetConst/tdz2.js",
+  "chakra/LetConst/with.js",
+  "chakra/Operators/delete2.js",
+  "chakra/Operators/delete3.js",
+  "chakra/Optimizer/Miscellaneous_MaxInterpret.js",
+  "chakra/Optimizer/bugsimplepathbrfoldgetter.js",
+  "chakra/Optimizer/test115.js",
+  "chakra/Optimizer/test135.js",
+  "chakra/Optimizer/test146.js",
+  "chakra/PerfHint/try_with_eval_perfhint.js",
+  "chakra/Regex/regex1.js",
+  "chakra/Scanner/InvalidCharacter.js",
+  "chakra/Strings/concat4.js",
+  "chakra/Strings/long_concatstr.js",
+  "chakra/UnifiedRegex/bugFixRegression.js",
+  "chakra/WasmSpec/testsuite/js-api/constructor/instantiate-bad-imports.any.js",
+  "chakra/WasmSpec/testsuite/js-api/instance/constructor-bad-imports.any.js",
+  "chakra/benchmarks/ARES-6/Babylon/babylon-blob.js",
+  "chakra/es6/DeferParseLambda.js",
+  "chakra/es6/DeferParseMethods.js",
+  "chakra/es6/ES6Iterators-forof.js",
+  "chakra/es6/ES6NewTarget.js",
+  "chakra/es6/blockscope-deferred.js",
+  "chakra/es6/blockscope-functionbinding.js",
+  "chakra/es6/boundConstruction.js",
+  "chakra/es6/bug_OS17895855.js",
+  "chakra/es6/bug_OS_4498031.js",
+  "chakra/es6/bug_issue_3247_dep.js",
+  "chakra/es6/bug_issue_5994.js",
+  "chakra/es6/default-splitscope-undodeferparse.js",
+  "chakra/es6/default-splitscope.js",
+  "chakra/es6/default.js",
+  "chakra/es6/destructuring_params.js",
+  "chakra/es6/destructuring_params_arguments_override.js",
+  "chakra/es6/forInEdgeCases.js",
+  "chakra/es6/generators-functionality.js",
+  "chakra/es6/generators-syntax.js",
+  "chakra/es6/globalCatchNewTargetSyntaxError.js",
+  "chakra/es6/globalNewTargetSyntaxError.js",
+  "chakra/es6/globalParamCatchNewTargetSyntaxError.js",
+  "chakra/es6/lambda-expr.js",
+  "chakra/es6/lambda1.js",
+  "chakra/es6/letconst_global_shadow_deleted.js",
+  "chakra/es6/objlit.js",
+  "chakra/es6/rest.js",
+  "chakra/es6/spreadIterator.js",
+  "chakra/es6/supersyntax02.js",
+  "chakra/es6/supersyntax05.js",
+  "chakra/es6/supersyntax06.js",
+  "chakra/es6/unicode_6_identifier_Blue511452.js",
+  "chakra/es6/unicode_6_identifier_Blue524737.js",
+  "chakra/es6/unscopablesWithScopeTest.js",
+  "chakra/es6module/GetModuleNamespace.js",
+  "chakra/es6module/ModuleCircularBar.js",
+  "chakra/es6module/ModuleCircularFoo.js",
+  "chakra/es6module/ModuleComplexExports.js",
+  "chakra/es6module/ModuleComplexReexports.js",
+  "chakra/es6module/ModuleDefaultExport1.js",
+  "chakra/es6module/ModuleDefaultExport2.js",
+  "chakra/es6module/ModuleDefaultExport3.js",
+  "chakra/es6module/ModuleDefaultExport4.js",
+  "chakra/es6module/ModuleDefaultReexport.js",
+  "chakra/es6module/ModuleReexportDefault.js",
+  "chakra/es6module/ModuleSimpleExport.js",
+  "chakra/es6module/ModuleSimpleReexport.js",
+  "chakra/es6module/ValidExportDefaultStatement1.js",
+  "chakra/es6module/ValidExportDefaultStatement2.js",
+  "chakra/es6module/ValidExportStatements.js",
+  "chakra/es6module/ValidExportStatements2.js",
+  "chakra/es6module/ValidImportStatements.js",
+  "chakra/es6module/ValidReExportStatements.js",
+  "chakra/es6module/bug_OS12095746.js",
+  "chakra/es6module/bug_OS12095746_mod0.js",
+  "chakra/es6module/bug_OS12095746_mod1.js",
+  "chakra/es6module/bug_OS12095746_mod2.js",
+  "chakra/es6module/bug_OS12095746_moddep.js",
+  "chakra/es6module/bug_OS12113549_module1.js",
+  "chakra/es6module/bug_OS14562349.js",
+  "chakra/es6module/bug_issue_3076.js",
+  "chakra/es6module/bug_issue_3257/mod/mod0.js",
+  "chakra/es6module/bug_issue_3257/mod1.js",
+  "chakra/es6module/bug_issue_3257/mod2/mod2.js",
+  "chakra/es6module/bug_issue_3257/script/script0.js",
+  "chakra/es6module/bug_issue_5777.js",
+  "chakra/es6module/dynamic_import_promises_5796.js",
+  "chakra/es6module/exportBinding.js",
+  "chakra/es6module/exportmodule.js",
+  "chakra/es6module/module-3250-bug-dep.js",
+  "chakra/es6module/module-3250-bug-dep2.js",
+  "chakra/es6module/module-3250-ext-a.js",
+  "chakra/es6module/module-3250-ext-b.js",
+  "chakra/es6module/moduleExport1.js",
+  "chakra/es6module/moduleImportTheError.js",
+  "chakra/es6module/moduleThrowAnError.js",
+  "chakra/es6module/module_1_2645.js",
+  "chakra/es6module/module_2_2645.js",
+  "chakra/es6module/module_4482_dep1.js",
+  "chakra/es6module/module_4482_dep2.js",
+  "chakra/es6module/module_4482_dep3.js",
+  "chakra/es6module/module_4570_dep1.js",
+  "chakra/es6module/module_4570_dep2.js",
+  "chakra/es6module/moduletest2_mod0.js",
+  "chakra/es6module/moduletest2_mod1a.js",
+  "chakra/es6module/moduletest2_mod1b.js",
+  "chakra/es6module/moduletest2_mod2a.js",
+  "chakra/es6module/otherModule.js",
+  "chakra/es6module/passmodule.js",
+  "chakra/es6module/testDynamicImportfromModule.js",
+  "chakra/es7/asyncawait-functionality.js",
+  "chakra/es7/json_superset.js",
+  "chakra/fieldopts/equiv-mismatch2.js",
+  "chakra/fieldopts/markTemp.js",
+  "chakra/inlining/bug_gh6303.js",
+  "chakra/inlining/profilingbug.js",
+  "chakra/loop/loop.js",
+  "chakra/stackfunc/602481.js",
+  "chakra/stackfunc/arguments_assignment.js",
+  "chakra/stackfunc/with_crossscope.js",
+  "chakra/stackfunc/with_existing.js",
+  "chakra/stackfunc/with_namedfunc.js",
+  "chakra/stackfunc/withref_delayobjscope.js",
+  "chakra/strict/basics_function_in_SM.js",
+  "chakra/strict/bug212755.js",
+  "chakra/strict/comma_bug219390.js",
+  "chakra/strict/delete.js",
+  "chakra/strict/formal_samename1.js",
+  "chakra/strict/formal_samename2.js",
+  "chakra/strict/multiunit.js",
+  "chakra/strict/nonSimpleParameterList.js",
+  "chakra/strict/strictargs-deferred.js",
+  "chakra/strict/strictargs2-deferred.js",
+  "chakra/strict/strictargs3-deferred.js",
+  "chakra/strict/stricteval-deferred.js",
+  "chakra/strict/stricteval2-deferred.js",
+  "chakra/strict/strictkwd-deferred.js",
+  "chakra/wasm.simd/int64x2Tests.js",
+  /* Tests requiring sloppy mode from jstests */
+  "WebKit/JSTests/es6/Proxy_internal_get_calls_HasBinding.js",
+  "WebKit/JSTests/es6/non-strict_function_semantics_function_statements_in_if-statement_clauses.js",
+  "WebKit/JSTests/es6/non-strict_function_semantics_labeled_function_statements.js",
+  "WebKit/JSTests/es6/well-known_symbols_Symbol.unscopables.js",
+  "WebKit/JSTests/stress/adhoc-setter-frame-should-not-be-killed.js",
+  "WebKit/JSTests/stress/allocation-sinking-hints-are-valid-ssa-2.js",
+  "WebKit/JSTests/stress/allocation-sinking-hints-are-valid-ssa.js",
+  "WebKit/JSTests/stress/array-copywithin.js",
+  "WebKit/JSTests/stress/arrow-function-and-use-strict-directive.js",
+  "WebKit/JSTests/stress/arrow-functions-as-default-parameter-values.js",
+  "WebKit/JSTests/stress/arrowfunction-lexical-bind-arguments-non-strict-1.js",
+  "WebKit/JSTests/stress/arrowfunction-lexical-bind-arguments-non-strict-2.js",
+  "WebKit/JSTests/stress/arrowfunction-lexical-bind-this-8.js",
+  "WebKit/JSTests/stress/big-int-as-property-name.js",
+  "WebKit/JSTests/stress/const-and-with-statement.js",
+  "WebKit/JSTests/stress/const-not-strict-mode.js",
+  "WebKit/JSTests/stress/constant-closure-var-with-dynamic-invalidation.js",
+  "WebKit/JSTests/stress/do-eval-virtual-call-correctly.js",
+  "WebKit/JSTests/stress/es6-default-parameters.js",
+  "WebKit/JSTests/stress/eval-cached.js",
+  "WebKit/JSTests/stress/eval-func-decl-block-with-var-and-remove.js",
+  "WebKit/JSTests/stress/eval-func-decl-in-eval-within-with-scope.js",
+  "WebKit/JSTests/stress/eval-that-is-not-eval.js",
+  "WebKit/JSTests/stress/for-in-tests.js",
+  "WebKit/JSTests/stress/for-let-comma.js",
+  "WebKit/JSTests/stress/generator-syntax.js",
+  "WebKit/JSTests/stress/get-argument-by-val-safe-in-inlined-varargs-call-out-of-bounds.js",
+  "WebKit/JSTests/stress/get-by-offset-should-use-correct-child.js",
+  "WebKit/JSTests/stress/get-from-scope-dynamic-onto-proxy.js",
+  "WebKit/JSTests/stress/get-my-argument-by-val-safe-out-of-bounds.js",
+  "WebKit/JSTests/stress/get-my-argument-by-val-safe-wrap-around.js",
+  "WebKit/JSTests/stress/getter-setter-should-be-cell.js",
+  "WebKit/JSTests/stress/global-environment-does-not-trap-unscopables.js",
+  "WebKit/JSTests/stress/global-lexical-variable-with-statement.js",
+  "WebKit/JSTests/stress/global-object-proto-getter.js",
+  "WebKit/JSTests/stress/hashbang.js",
+  "WebKit/JSTests/stress/import-basic.js",
+  "WebKit/JSTests/stress/import-from-eval.js",
+  "WebKit/JSTests/stress/import-reject-with-exception.js",
+  "WebKit/JSTests/stress/import-tests/cocoa.js",
+  "WebKit/JSTests/stress/import-tests/multiple.js",
+  "WebKit/JSTests/stress/import-tests/multiple2.js",
+  "WebKit/JSTests/stress/import-tests/should.js",
+  "WebKit/JSTests/stress/import-with-empty-string.js",
+  "WebKit/JSTests/stress/lazy-global-object-property-materialization-should-not-putDirectWithoutTransition.js",
+  "WebKit/JSTests/stress/lexical-let-and-with-statement.js",
+  "WebKit/JSTests/stress/lexical-let-not-strict-mode.js",
+  "WebKit/JSTests/stress/licm-should-handle-if-a-hoist-causes-a-provable-osr-exit.js",
+  "WebKit/JSTests/stress/module-namespace-access-change.js",
+  "WebKit/JSTests/stress/module-namespace-access-non-constant.js",
+  "WebKit/JSTests/stress/module-namespace-access-poly.js",
+  "WebKit/JSTests/stress/module-namespace-access-transitive-exports.js",
+  "WebKit/JSTests/stress/module-namespace-access.js",
+  "WebKit/JSTests/stress/object-allocation-sinking-interpretation-can-interpret-edges-that-can-be-proven-unreachable-in-ai.js",
+  "WebKit/JSTests/stress/object-allocation-sinking-phase-needs-to-write-to-each-scope-offset.js",
+  "WebKit/JSTests/stress/printableModuleKey-should-never-throw.js",
+  "WebKit/JSTests/stress/proxy-call-apply-handler-to-this.js",
+  "WebKit/JSTests/stress/proxy-getter-stack-overflow.js",
+  "WebKit/JSTests/stress/proxy-stack-overflow-exceptions.js",
+  "WebKit/JSTests/stress/proxy-with-statement.js",
+  "WebKit/JSTests/stress/put-dynamic-var-strict-and-sloppy.js",
+  "WebKit/JSTests/stress/re-execute-error-module.js",
+  "WebKit/JSTests/stress/regress-159954.js",
+  "WebKit/JSTests/stress/regress-170732.js",
+  "WebKit/JSTests/stress/regress-178890.js",
+  "WebKit/JSTests/stress/regress-191856.js",
+  "WebKit/JSTests/stress/regress-192626.js",
+  "WebKit/JSTests/stress/resources/error-module.js",
+  "WebKit/JSTests/stress/resources/module-namespace-access-transitive-exports-2.js",
+  "WebKit/JSTests/stress/resources/module-namespace-access-transitive-exports.js",
+  "WebKit/JSTests/stress/resources/module-namespace-access.js",
+  "WebKit/JSTests/stress/sloppy-mode-function-hoisting.js",
+  "WebKit/JSTests/stress/sloppy-mode-hoist-arguments-function-non-simple-parameter-list.js",
+  "WebKit/JSTests/stress/string-prototype-scopes.js",
+  "WebKit/JSTests/stress/tagged-templates-this.js",
+  "WebKit/JSTests/stress/to-this-before-arrow-function-closes-over-this-that-starts-as-lexical-environment.js",
+  "WebKit/JSTests/stress/unscopables.js",
+  "WebKit/JSTests/stress/use-arguments-as-object-pointer.js",
+  "WebKit/JSTests/stress/values-unscopables.js",
+  "WebKit/JSTests/stress/variable-named-eval-under-tdz.js",
+  "WebKit/JSTests/stress/with.js",
+  "WebKit/JSTests/stress/with_and_arith.js",
+  "WebKit/JSTests/stress/yield-label.js",
+  /* Tests requiring sloppy mode from crashtests */
+  "CrashTests/115674352/util.js",
+  "CrashTests/132918471/fast/js/resources/js-test-post.js",
+  "CrashTests/135733397/resources/cookies-test-post.js",
+  "CrashTests/158344541/support/refTestWait.js",
+  "CrashTests/178811021/media-file.js",
+  "CrashTests/178811021/video-test.js",
+  "CrashTests/31681819/fast/js/resources/js-test-post.js",
+  "CrashTests/4506116685037568/00422.js",
+  "CrashTests/4506610717425664/00502.js",
+  "CrashTests/4523272292270080/02509.js",
+  "CrashTests/4527439453618176/00122.js",
+  "CrashTests/4528969625894912/encaiiljifbdbjlphpgpiimidegddhic/lib/tweet_manager.js",
+  "CrashTests/4528969625894912/lib/tweet_manager.js",
+  "CrashTests/4531783459405824/01124.js",
+  "CrashTests/4542853924782080/01450.js",
+  "CrashTests/4553411667165184/00361.js",
+  "CrashTests/4558140288794624/00118.js",
+  "CrashTests/4559554636677120/00921.js",
+  "CrashTests/4563969814560768/1.0.2/conformance/resources/webgl-test-utils.js",
+  "CrashTests/4563969814560768/1.0.2/conformance/resources/webgl-test.js",
+  "CrashTests/4563969814560768/1.0.2/resources/js-test-pre.js",
+  "CrashTests/4592095397150720/619.js",
+  "CrashTests/4599018605772800/00095.js",
+  "CrashTests/4609052021096448/02286.js",
+  "CrashTests/4620742728613888/02272.js",
+  "CrashTests/4625478540066816/02759.js",
+  "CrashTests/4632675287826432/01188.js",
+  "CrashTests/4636862568726528/02064.js",
+  "CrashTests/4637950708285440/02664.js",
+  "CrashTests/4646057439133696/01096.js",
+  "CrashTests/4665806235369472/02400.js",
+  "CrashTests/4666942225383424/00033.js",
+  "CrashTests/4675875294674944/04443.js",
+  "CrashTests/4676310267068416/00041.js",
+  "CrashTests/4676310267068416/meta-00041.js",
+  "CrashTests/4678125190643712/01568.js",
+  "CrashTests/4689880216240128/meta-00116.js",
+  "CrashTests/4692754406047744/00819.js",
+  "CrashTests/4704815858057216/02665.js",
+  "CrashTests/4709921473167360/00418.js",
+  "CrashTests/4710304564903936/resources/js-test-post.js",
+  "CrashTests/4714207862587392/03389.js",
+  "CrashTests/4714965806153728/00222.js",
+  "CrashTests/4715062213476352/meta-00915.js",
+  "CrashTests/4730791635451904/00498.js",
+  "CrashTests/4731918950203392/04316.js",
+  "CrashTests/4736176662773760/01784.js",
+  "CrashTests/4747467118084096/02508.js",
+  "CrashTests/4747813124571136/00737.js",
+  "CrashTests/4762878654545920/00656.js",
+  "CrashTests/4763433329491968/01684.js",
+  "CrashTests/4769018565623808/00203.js",
+  "CrashTests/4769117811507200/01576.js",
+  "CrashTests/4776648177352704/02684.js",
+  "CrashTests/4777280799506432/01745.js",
+  "CrashTests/4778588336291840/03439.js",
+  "CrashTests/4784915024707584/meta-00090.js",
+  "CrashTests/4786126975139840/00686.js",
+  "CrashTests/4804556499451904/07362.js",
+  "CrashTests/4804606392795136/00345.js",
+  "CrashTests/4808505226690560/00570.js",
+  "CrashTests/4816373903785984/01394.js",
+  "CrashTests/4817362373836800/01654.js",
+  "CrashTests/4823335117783040/meta-00192.js",
+  "CrashTests/4827148775849984/02704.js",
+  "CrashTests/4828749863583744/00863.js",
+  "CrashTests/4830304484196352/01110.js",
+  "CrashTests/4835573090222080/00096.js",
+  "CrashTests/4835573090222080/meta-00096.js",
+  "CrashTests/4837730048278528/03052.js",
+  "CrashTests/4843490131312640/03475.js",
+  "CrashTests/4850895428517888/2670.js",
+  "CrashTests/4854644212105216/392.js",
+  "CrashTests/4855156194934784/meta-00080.js",
+  "CrashTests/4863599620390912/01845.js",
+  "CrashTests/4865459476234240/02997.js",
+  "CrashTests/4867582249664512/01063.js",
+  "CrashTests/4876270013382656/test.js",
+  "CrashTests/4877247254626304/00194.js",
+  "CrashTests/4888824389304320/00897.js",
+  "CrashTests/4895370524491776/00981.js",
+  "CrashTests/4897972860223488/00795.js",
+  "CrashTests/4899093893742592/02412.js",
+  "CrashTests/4904085663776768/01119.js",
+  "CrashTests/4907797561212928/01094.js",
+  "CrashTests/4907899447410688/lib/prototype.js",
+  "CrashTests/4908800751173632/02190.js",
+  "CrashTests/4914294111076352/resources/worker-common.js",
+  "CrashTests/4917356818071552/00992.js",
+  "CrashTests/4920133693472768/09913.js",
+  "CrashTests/4924640359088128/resources/methods.js",
+  "CrashTests/4930344755658752/00813.js",
+  "CrashTests/4932372025311232/01177.js",
+  "CrashTests/4940906897866752/01190.js",
+  "CrashTests/4966722004058112/01202.js",
+  "CrashTests/4972148754743296/01007.js",
+  "CrashTests/4976392256618496/01500.js",
+  "CrashTests/4982480049274880/00401.js",
+  "CrashTests/4991878320095232/01486.js",
+  "CrashTests/4996582417891328/00314.js",
+  "CrashTests/5000906812162048/02217.js",
+  "CrashTests/5006028967247872/01936.js",
+  "CrashTests/5010596302946304/03143.js",
+  "CrashTests/5010966233481216/05443.js",
+  "CrashTests/5015563071913984/00891.js",
+  "CrashTests/5016449390477312/02595.js",
+  "CrashTests/5017709745274880/00480.js",
+  "CrashTests/5019028805124096/01923.js",
+  "CrashTests/5020202404937728/04355.js",
+  "CrashTests/5024919414112256/02190.js",
+  "CrashTests/5026147325968384/01281.js",
+  "CrashTests/5026251190829056/01496.js",
+  "CrashTests/5034236802498560/00038.js",
+  "CrashTests/5034236802498560/meta-00038.js",
+  "CrashTests/5036266378756096/resources/js-test.js",
+  "CrashTests/5040821977219072/07176.js",
+  "CrashTests/5047964758441984/02170.js",
+  "CrashTests/5048503350853632/00378.js",
+  "CrashTests/5055795031965696/00903.js",
+  "CrashTests/5064701201350656/mjsunit_modified.js",
+  "CrashTests/5068740508516352/00968.js",
+  "CrashTests/5068815053619200/02921.js",
+  "CrashTests/5070483825885184/01709.js",
+  "CrashTests/5071018176282624/05782.js",
+  "CrashTests/5072932293050368/01965.js",
+  "CrashTests/5073353348087808/03720.js",
+  "CrashTests/5076362928848896/00103.js",
+  "CrashTests/5080979047317504/04300.js",
+  "CrashTests/5083229709664256/00286.js",
+  "CrashTests/5083537469079552/03453.js",
+  "CrashTests/5086848684654592/00140.js",
+  "CrashTests/5089350304661504/04000.js",
+  "CrashTests/5090843606515712/4564.js",
+  "CrashTests/5091969183776768/js/angular.js",
+  "CrashTests/5092426574987264/02412.js",
+  "CrashTests/5092539386822656/00729.js",
+  "CrashTests/5096882252677120/05262.js",
+  "CrashTests/5097225136504832/00340.js",
+  "CrashTests/5104674803023872/meta-00066.js",
+  "CrashTests/5107484490989568/00655.js",
+  "CrashTests/5110246766673920/117.js",
+  "CrashTests/5113028242702336/03897.js",
+  "CrashTests/5114377424601088/00224.js",
+  "CrashTests/5126302418337792/00216.js",
+  "CrashTests/5126730184654848/00846.js",
+  "CrashTests/5127274311843840/769.js",
+  "CrashTests/5128141337133056/898.js",
+  "CrashTests/5129004072042496/02689.js",
+  "CrashTests/5130481752735744/817.js",
+  "CrashTests/5140656268640256/resources/interpolation-test.js",
+  "CrashTests/5150697335816192/04726.js",
+  "CrashTests/5151090662178816/01492.js",
+  "CrashTests/5152408142086144/01220.js",
+  "CrashTests/5153368765628416/00787.js",
+  "CrashTests/5157575680327680/06055.js",
+  "CrashTests/5159014924288000/01496.js",
+  "CrashTests/5164793027624960/01357.js",
+  "CrashTests/5165045173846016/Common/MV.js",
+  "CrashTests/5165672034205696/05364.js",
+  "CrashTests/5168023154720768/meta-00125.js",
+  "CrashTests/5171157051899904/00383.js",
+  "CrashTests/5171658670473216/02099.js",
+  "CrashTests/5174188858146816/00365.js",
+  "CrashTests/5190716938387456/05795.js",
+  "CrashTests/5192473061359616/meta-00780.js",
+  "CrashTests/5197954434596864/01324.js",
+  "CrashTests/5200019587334144/meta-00398.js",
+  "CrashTests/5214085959909376/00916.js",
+  "CrashTests/5219122255757312/00999.js",
+  "CrashTests/5222394685292544/resources/stress-js-execution.js",
+  "CrashTests/5225766790889472/03395.js",
+  "CrashTests/5226258591121408/04850.js",
+  "CrashTests/5226692407984128/meta-00030.js",
+  "CrashTests/5226950361612288/01783.js",
+  "CrashTests/5231597301334016/00307.js",
+  "CrashTests/5238861996490752/01351.js",
+  "CrashTests/5242104612651008/mjsunit_modified.js",
+  "CrashTests/5254331529166848/meta-00409.js",
+  "CrashTests/5254663852261376/meta-00497.js",
+  "CrashTests/5257477901778944/06912.js",
+  "CrashTests/5265513080946688/00263.js",
+  "CrashTests/5271669179678720/01206.js",
+  "CrashTests/5280655383724032/meta-00355.js",
+  "CrashTests/5283736101322752/00921.js",
+  "CrashTests/5289653387919360/635.js",
+  "CrashTests/5292566928162816/05416.js",
+  "CrashTests/5301431549820928/meta-00738.js",
+  "CrashTests/5313740812320768/00284.js",
+  "CrashTests/5326135539793920/01618.js",
+  "CrashTests/5327466171465728/benchmarks/propertyTreeSplitting.js",
+  "CrashTests/5335637787934720/mersenne.js",
+  "CrashTests/5337864091729920/00078.js",
+  "CrashTests/5346139261108224/meta-00086.js",
+  "CrashTests/5351139388424192/00966.js",
+  "CrashTests/5351969500168192/02350.js",
+  "CrashTests/5352173552795648/00798.js",
+  "CrashTests/5352347554545664/00293.js",
+  "CrashTests/5354535555825664/01561.js",
+  "CrashTests/5361893420302336/03633.js",
+  "CrashTests/5363987624493056/00401.js",
+  "CrashTests/5367250713575424/00858.js",
+  "CrashTests/5384147026837504/00846.js",
+  "CrashTests/5385945858179072/01283.js",
+  "CrashTests/5385947877998592/00345.js",
+  "CrashTests/5390684309946368/00499.js",
+  "CrashTests/5397761290403840/00742.js",
+  "CrashTests/5397969593958400/02399.js",
+  "CrashTests/5400975855321088/04550.js",
+  "CrashTests/5402562393276416/meta-00565.js",
+  "CrashTests/5417888764657664/01529.js",
+  "CrashTests/5418169017303040/156.js",
+  "CrashTests/5420098825748480/01174.js",
+  "CrashTests/5435516837429248/02591.js",
+  "CrashTests/5439052443025408/01562.js",
+  "CrashTests/5443705157976064/04908.js",
+  "CrashTests/5443796254064640/08661.js",
+  "CrashTests/5474124668600320/meta-00992.js",
+  "CrashTests/5480706662727680/02537.js",
+  "CrashTests/5486126228570112/02840.js",
+  "CrashTests/5487631620112384/animations/resources/animation-test-helpers.js",
+  "CrashTests/5494987147444224/01679.js",
+  "CrashTests/5513876092223488/00005.js",
+  "CrashTests/5521703332741120/00376.js",
+  "CrashTests/5528671464456192/01324.js",
+  "CrashTests/5530121586081792/03646.js",
+  "CrashTests/5530153792045056/meta-00910.js",
+  "CrashTests/5536392074493952/01051.js",
+  "CrashTests/5540548491608064/01146.js",
+  "CrashTests/5541203771916288/currentscript.js",
+  "CrashTests/5544125599580160/00526.js",
+  "CrashTests/5559694775025664/imported/w3c/html-templates/testcommon.js",
+  "CrashTests/5559694775025664/resources/testharness.js",
+  "CrashTests/5559694775025664/resources/testharnessreport.js",
+  "CrashTests/5568247077011456/04042.js",
+  "CrashTests/5577681470488576/00437.js",
+  "CrashTests/5593713261412352/319.js",
+  "CrashTests/5603057343660032/01771.js",
+  "CrashTests/5605754113884160/01777.js",
+  "CrashTests/5606041047007232/02795.js",
+  "CrashTests/5606702255964160/meta-00168.js",
+  "CrashTests/5612290518810624/dist/jquery.js",
+  "CrashTests/5615411944226816/02684.js",
+  "CrashTests/5620205313589248/01035.js",
+  "CrashTests/5625391309127680/02293.js",
+  "CrashTests/5630063870214144/02130.js",
+  "CrashTests/5630225822121984/03637.js",
+  "CrashTests/5630410519478272/916.js",
+  "CrashTests/5635092719599616/00041.js",
+  "CrashTests/5635645835182080/worker-serviceworker-7636bedbbb1f120d.js",
+  "CrashTests/5639584467910656/00441.js",
+  "CrashTests/5639628713492480/04139.js",
+  "CrashTests/5642849944993792/resources/js-test.js",
+  "CrashTests/5644307466878976/__MACOSX/._audio.js",
+  "CrashTests/5644307466878976/__MACOSX/._processor.js",
+  "CrashTests/5645896422522880/00670.js",
+  "CrashTests/5648004624678912/01755.js",
+  "CrashTests/5649862583648256/meta-00236.js",
+  "CrashTests/5650039238033408/00812.js",
+  "CrashTests/5651439780495360/250.js",
+  "CrashTests/5651703040835584/resources/testharnessreport.js",
+  "CrashTests/5652465613406208/4584.js",
+  "CrashTests/5657116044951552/scripts/options.js",
+  "CrashTests/5657183745998848/01385.js",
+  "CrashTests/5657306247462912/00434.js",
+  "CrashTests/5661345388167168/resources/stress-js-execution.js",
+  "CrashTests/5662199714480128/00467.js",
+  "CrashTests/5662596912513024/265.js",
+  "CrashTests/5664971082694656/02223.js",
+  "CrashTests/5665078627663872/262.js",
+  "CrashTests/5668694740172800/meta-00294.js",
+  "CrashTests/5672678890405888/resources/js-test.js",
+  "CrashTests/5672678890405888/webaudio/resources/distance-model-testing.js",
+  "CrashTests/5675659103830016/04982.js",
+  "CrashTests/5676981169487872/01110.js",
+  "CrashTests/5677821452091392/02168.js",
+  "CrashTests/5679632916676608/04152.js",
+  "CrashTests/5684463616917504/01498.js",
+  "CrashTests/5685487336161280/03642.js",
+  "CrashTests/5686447370665984/sdk/tests/conformance/ogles/ogles-utils.js",
+  "CrashTests/5686447370665984/sdk/tests/conformance/resources/glsl-conformance-test.js",
+  "CrashTests/5686447370665984/sdk/tests/conformance/resources/webgl-test-utils.js",
+  "CrashTests/5686447370665984/sdk/tests/resources/js-test-pre.js",
+  "CrashTests/5688866685321216/09854.js",
+  "CrashTests/5689884189392896/02723.js",
+  "CrashTests/5690371071803392/01662.js",
+  "CrashTests/5690744270487552/oob_write.js",
+  "CrashTests/5694376231632896/1033966.js",
+  "CrashTests/5694701996867584/conformance/resources/webgl-test.js",
+  "CrashTests/5696049601314816/7.js",
+  "CrashTests/5697903049441280/03188.js",
+  "CrashTests/5703976838234112/test.js",
+  "CrashTests/5707472246472704/1443.js",
+  "CrashTests/5713776938582016/00793.js",
+  "CrashTests/5721502735532032/03042.js",
+  "CrashTests/5733293570392064/00764.js",
+  "CrashTests/5734750167105536/01271.js",
+  "CrashTests/5735023732064256/meta-00070.js",
+  "CrashTests/5736353084342272/resources/testharness.js",
+  "CrashTests/5737388710821888/resources/js-test.js",
+  "CrashTests/5744365229441024/resources/testharness.js",
+  "CrashTests/5745342726537216/meta-00053.js",
+  "CrashTests/5750922200875008/747.js",
+  "CrashTests/5753604559470592/03311.js",
+  "CrashTests/5754855756136448/00202.js",
+  "CrashTests/5755508264534016/00224.js",
+  "CrashTests/5763511307337728/04651.js",
+  "CrashTests/5763879718617088/lib/prototype.js",
+  "CrashTests/5764427184865280/00843.js",
+  "CrashTests/5765576144060416/815.js",
+  "CrashTests/5767941953945600/02556.js",
+  "CrashTests/5774432061095936/00972.js",
+  "CrashTests/5780358435962880/02297.js",
+  "CrashTests/5781432505466880/02329.js",
+  "CrashTests/5784274577129472/146.js",
+  "CrashTests/5784274665996288/02175.js",
+  "CrashTests/5794086030147584/04345.js",
+  "CrashTests/5798263663099904/01459.js",
+  "CrashTests/5802116248764416/06966.js",
+  "CrashTests/5806021251432448/background.js",
+  "CrashTests/58219635/fast/js/resources/js-test-post.js",
+  "CrashTests/5826758986170368/resources/js-test-post.js",
+  "CrashTests/5830309687853056/00112.js",
+  "CrashTests/5830406377832448/01330.js",
+  "CrashTests/5832728473239552/02422.js",
+  "CrashTests/5841445051170816/resources/js-test-pre.js",
+  "CrashTests/5842510916091904/resources/webgl_test_files/resources/js-test-post.js",
+  "CrashTests/58693299/selfhtml.js",
+  "CrashTests/5878747354365952/02158.js",
+  "CrashTests/5910324886634496/02597.js",
+  "CrashTests/5913894233833472/05410.js",
+  "CrashTests/5919491238920192/00154.js",
+  "CrashTests/5925149103357952/webaudio/resources/audit.js",
+  "CrashTests/5931087833333760/03890.js",
+  "CrashTests/5931608799707136/04633.js",
+  "CrashTests/5933875666616320/01048.js",
+  "CrashTests/5936525417644032/00317.js",
+  "CrashTests/5941859303555072/01996.js",
+  "CrashTests/5949184339083264/poc.js",
+  "CrashTests/5950573451804672/494.js",
+  "CrashTests/5950617700007936/01848.js",
+  "CrashTests/5957695718031360/01051.js",
+  "CrashTests/5969639398440960/00249.js",
+  "CrashTests/5970316012290048/414.js",
+  "CrashTests/5978056946876416/00920.js",
+  "CrashTests/5989158878183424/02865.js",
+  "CrashTests/5990019710320640/00076.js",
+  "CrashTests/5996165326962688/meta-00146.js",
+  "CrashTests/6002230377840640/01046.js",
+  "CrashTests/6007370099195904/HTML5/Bugbash/Demo.js",
+  "CrashTests/6010230465626112/resources/webgl_test_files/js/tests/tex-image-and-sub-image-with-image-bitmap-utils.js",
+  "CrashTests/6018592823836672/support/refTestWait.js",
+  "CrashTests/6022256973840384/04395.js",
+  "CrashTests/6022630427590656/03625.js",
+  "CrashTests/6023488715620352/04168.js",
+  "CrashTests/6026840030314496/00848.js",
+  "CrashTests/6034116338909184/513.js",
+  "CrashTests/6037361396809728/02741.js",
+  "CrashTests/6043736092966912/00712.js",
+  "CrashTests/6049932086083584/03169.js",
+  "CrashTests/6051257375784960/03082.js",
+  "CrashTests/6054819434070016/02737.js",
+  "CrashTests/6059329424457728/01928.js",
+  "CrashTests/6068082133696512/01044.js",
+  "CrashTests/6071110049988608/00548.js",
+  "CrashTests/6073192676327424/resources/js-test-pre.js",
+  "CrashTests/6081148422979584/00926.js",
+  "CrashTests/6081568072531968/02867.js",
+  "CrashTests/6085702952681472/00521.js",
+  "CrashTests/6096783899361280/01310.js",
+  "CrashTests/6099421385981952/04526.js",
+  "CrashTests/6103999282413568/fast/dom/script-tests/event-handlers.js",
+  "CrashTests/6107917668319232/00571.js",
+  "CrashTests/6113149884563456/02823.js",
+  "CrashTests/6124318079582208/00744.js",
+  "CrashTests/6134706385977344/00149.js",
+  "CrashTests/6136495474737152/00377.js",
+  "CrashTests/6150179231105024/conformance/resources/webgl-test.js",
+  "CrashTests/6156744933769216/02993.js",
+  "CrashTests/6158905865666560/meta-00624.js",
+  "CrashTests/6162601424453632/00565.js",
+  "CrashTests/6166378025320448/dist/jquery.js",
+  "CrashTests/6169525843394560/00344.js",
+  "CrashTests/6171947516297216/00845.js",
+  "CrashTests/6179220893204480/02159.js",
+  "CrashTests/6179752663842816/01196.js",
+  "CrashTests/6183005912825856/00515.js",
+  "CrashTests/6183415490019328/02656.js",
+  "CrashTests/6198951751188480/1248.js",
+  "CrashTests/6199083597824000/00227.js",
+  "CrashTests/6202558821957632/02015.js",
+  "CrashTests/6204924829630464/meta-00272.js",
+  "CrashTests/6211017381249024/01253.js",
+  "CrashTests/6213317429559296/02944.js",
+  "CrashTests/6216405020835840/03985.js",
+  "CrashTests/6218202061209600/mersenne.js",
+  "CrashTests/6220111297118208/meta-00050.js",
+  "CrashTests/6223202209234944/02648.js",
+  "CrashTests/6234705257168896/01780.js",
+  "CrashTests/6239384157552640/meta-00271.js",
+  "CrashTests/6248723961348096/01050.js",
+  "CrashTests/6249957991645184/00503.js",
+  "CrashTests/6250003584909312/1114.js",
+  "CrashTests/6250055858126848/04619.js",
+  "CrashTests/6255231244697600/meta-00216.js",
+  "CrashTests/6255916311379968/1372.js",
+  "CrashTests/6259138054324224/meta-00172.js",
+  "CrashTests/6269363175555072/00815.js",
+  "CrashTests/6273728140017664/03583.js",
+  "CrashTests/6277052166832128/00830.js",
+  "CrashTests/6278159702425600/01463.js",
+  "CrashTests/6280577705705472/1146.js",
+  "CrashTests/6285336190124032/01621.js",
+  "CrashTests/6292792642371584/00047.js",
+  "CrashTests/6294597573541888/00725.js",
+  "CrashTests/6294835115065344/00805.js",
+  "CrashTests/6295241556492288/01763.js",
+  "CrashTests/6300171514675200/00115.js",
+  "CrashTests/6304143111356416/00782.js",
+  "CrashTests/6319065615040512/04012.js",
+  "CrashTests/6328755580567552/resources/js-test.js",
+  "CrashTests/6328755580567552/svg/dynamic-updates/resources/SVGTestCase.js",
+  "CrashTests/6330764245073920/poc1.js",
+  "CrashTests/6332832186761216/00681.js",
+  "CrashTests/6332904701427712/00888.js",
+  "CrashTests/6332930432958464/02637.js",
+  "CrashTests/6339944789049344/04142.js",
+  "CrashTests/6345007341764608/00699.js",
+  "CrashTests/6346448656400384/dist/jquery.js",
+  "CrashTests/6351789088833536/meta-00122.js",
+  "CrashTests/6352599495475200/meta-00093.js",
+  "CrashTests/6353579392630784/04156.js",
+  "CrashTests/6358320674242560/resources/js-test.js",
+  "CrashTests/6359996105359360/934166-poc.js",
+  "CrashTests/6362821967740928/00524.js",
+  "CrashTests/6364115106004992/03982.js",
+  "CrashTests/6374053756272640/824.js",
+  "CrashTests/6384055239835648/00899.js",
+  "CrashTests/6387037962240000/02996.js",
+  "CrashTests/6391863691706368/01129.js",
+  "CrashTests/6394941698998272/meta-00167.js",
+  "CrashTests/6403694305476608/meta-00079.js",
+  "CrashTests/6405187880353792/meta-00826.js",
+  "CrashTests/6406267560263680/02111.js",
+  "CrashTests/6406622734974976/meta-00188.js",
+  "CrashTests/6408374819487744/02912.js",
+  "CrashTests/6413480679964672/01826.js",
+  "CrashTests/6417030613041152/02923.js",
+  "CrashTests/6417972328660992/09282.js",
+  "CrashTests/6419282024726528/00370.js",
+  "CrashTests/6423571127599104/04989.js",
+  "CrashTests/6427802493779968/meta-00335.js",
+  "CrashTests/6440904627060736/meta-00149.js",
+  "CrashTests/6441460252803072/04234.js",
+  "CrashTests/6443097497010176/1.0.2/resources/webgl_test_files/conformance/resources/webgl-test-utils.js",
+  "CrashTests/6443097497010176/1.0.2/resources/webgl_test_files/conformance/resources/webgl-test.js",
+  "CrashTests/6443097497010176/1.0.2/resources/webgl_test_files/resources/js-test-pre.js",
+  "CrashTests/6443097497010176/1.0.2/resources/webkit-webgl-test-harness.js",
+  "CrashTests/6449605693931520/meta-00261.js",
+  "CrashTests/6452658006392832/meta-00824.js",
+  "CrashTests/6454378660626432/00345.js",
+  "CrashTests/6463535346614272/meta-00886.js",
+  "CrashTests/6486287733489664/01857.js",
+  "CrashTests/6491889133158400/01408.js",
+  "CrashTests/6499287285366784/01193.js",
+  "CrashTests/6506410742120448/04615.js",
+  "CrashTests/6507668386873344/02815.js",
+  "CrashTests/6509855193169920/03031.js",
+  "CrashTests/6530413356122112/meta-00391.js",
+  "CrashTests/6534217117990912/01172.js",
+  "CrashTests/6541223017054208/01484.js",
+  "CrashTests/6550225930944512/mnt/scratch0/clusterfuzz/slave-bot/inputs/fuzzers/inferno_twister_custom_bundle/inferno_twister_custom_bundle_data/moz_tests/dom/workers/test/threadErrors_worker1.js",
+  "CrashTests/6552552797503488/bug_41414141.js",
+  "CrashTests/6566622022860800/05257.js",
+  "CrashTests/6566953431597056/02044.js",
+  "CrashTests/6574969751601152/01903.js",
+  "CrashTests/6576437049950208/conformance/resources/glsl-generator.js",
+  "CrashTests/6576437049950208/resources/js-test-pre.js",
+  "CrashTests/6576519795965952/04466.js",
+  "CrashTests/6586504922267648/00672.js",
+  "CrashTests/6588129316438016/04328.js",
+  "CrashTests/6590585390235648/00180.js",
+  "CrashTests/6597230699216896/meta-00299.js",
+  "CrashTests/6598446651015168/02933.js",
+  "CrashTests/6603081141977088/00482.js",
+  "CrashTests/6603770342408192/00211.js",
+  "CrashTests/6613865297084416/builds/chromium-browser-syzyasan_win32-release/revisions/asan-win32-release-276100/resources/inspector/main/Main.js",
+  "CrashTests/6616252740009984/01288.js",
+  "CrashTests/6622275291840512/resources/js-test.js",
+  "CrashTests/6637202159960064/01577.js",
+  "CrashTests/6637774979465216/01973.js",
+  "CrashTests/6644133880397824/00752.js",
+  "CrashTests/6645639153188864/00207.js",
+  "CrashTests/6646069054013440/poc.js",
+  "CrashTests/6652514189508608/poc.js",
+  "CrashTests/6658388688371712/00042.js",
+  "CrashTests/6659440421109760/00027.js",
+  "CrashTests/6669656538742784/00851.js",
+  "CrashTests/6669776086630400/01043.js",
+  "CrashTests/6681695539429376/01791.js",
+  "CrashTests/6694038640001024/01601.js",
+  "CrashTests/6695292278931456/04706.js",
+  "CrashTests/6701111818059776/00487.js",
+  "CrashTests/6710149884674048/00808.js",
+  "CrashTests/6710629039079424/01300.js",
+  "CrashTests/6720085192343552/00257.js",
+  "CrashTests/6727300586405888/builds/chromium-browser-syzyasan_win32-release/revisions/asan-win32-release-259551/resources/inspector/Main.js",
+  "CrashTests/6731147175526400/meta-00107.js",
+  "CrashTests/6744125769252864/494.js",
+];
+module.exports = {
+  generatedSkipped: new Set(skipped),
+  generatedSoftSkipped: new Set(softSkipped),
+  generatedSloppy: new Set(sloppy),
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/array_mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/array_mutator.js
new file mode 100644
index 0000000..bcfb601
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/array_mutator.js
@@ -0,0 +1,115 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Mutator for array expressions.
+ */
+
+'use strict';
+
+const babelTypes = require('@babel/types');
+
+const common = require('./common.js');
+const mutator = require('./mutator.js');
+const random = require('../random.js');
+
+// Blueprint for choosing the maximum number of mutations. Bias towards
+// performing only one mutation.
+const MUTATION_CHOICES = [1, 1, 1, 1, 1, 2, 2, 2, 3];
+
+const MAX_ARRAY_LENGTH = 50;
+
+class ArrayMutator extends mutator.Mutator {
+  constructor(settings) {
+    super();
+    this.settings = settings;
+  }
+
+  get visitor() {
+    const thisMutator = this;
+
+    return {
+      ArrayExpression(path) {
+        const elements = path.node.elements;
+        if (!random.choose(thisMutator.settings.MUTATE_ARRAYS) ||
+            elements.length > MAX_ARRAY_LENGTH) {
+          return;
+        }
+
+        // Annotate array expression with the action taken, indicating
+        // if we also replaced elements.
+        function annotate(message, replace) {
+          if (replace) message += ' (replaced)';
+          thisMutator.annotate(path.node, message);
+        }
+
+        // Add or replace elements at a random index.
+        function randomSplice(replace, ...args) {
+          // Choose an index that's small enough to replace all desired items.
+          const index = random.randInt(0, elements.length - replace);
+          elements.splice(index, replace, ...args);
+        }
+
+        function duplicateElement(replace) {
+          const element = random.single(elements);
+          if (!element || common.isLargeNode(element)) {
+            return;
+          }
+          annotate('Duplicate an element', replace);
+          randomSplice(replace, babelTypes.cloneDeep(element));
+        }
+
+        function insertRandomValue(replace) {
+          annotate('Insert a random value', replace);
+          randomSplice(replace, common.randomValue(path));
+        }
+
+        function insertHole(replace) {
+          annotate('Insert a hole', replace);
+          randomSplice(replace, null);
+        }
+
+        function removeElements(count) {
+          annotate('Remove elements');
+          randomSplice(random.randInt(1, count));
+        }
+
+        function shuffle() {
+          annotate('Shuffle array');
+          random.shuffle(elements);
+        }
+
+        // Mutation options. Repeated mutations have a higher probability.
+        const mutations = [
+          () => duplicateElement(1),
+          () => duplicateElement(1),
+          () => duplicateElement(1),
+          () => duplicateElement(0),
+          () => duplicateElement(0),
+          () => insertRandomValue(1),
+          () => insertRandomValue(1),
+          () => insertRandomValue(0),
+          () => insertHole(1),
+          () => insertHole(0),
+          () => removeElements(1),
+          () => removeElements(elements.length),
+          shuffle,
+        ];
+
+        // Perform several mutations.
+        const count = random.single(MUTATION_CHOICES);
+        for (let i = 0; i < count; i++) {
+          random.single(mutations)();
+        }
+
+        // Don't recurse on nested arrays.
+        path.skip();
+      },
+    }
+  }
+}
+
+module.exports = {
+  ArrayMutator: ArrayMutator,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/common.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/common.js
new file mode 100644
index 0000000..64a1645
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/common.js
@@ -0,0 +1,376 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Common mutator utilities.
+ */
+
+const babelTemplate = require('@babel/template').default;
+const babelTypes = require('@babel/types');
+const babylon = require('@babel/parser');
+
+const sourceHelpers = require('../source_helpers.js');
+const random = require('../random.js');
+
+const INTERESTING_NUMBER_VALUES = [
+    -1, -0.0, 0, 1,
+
+    // Float values.
+    -0.000000000000001, 0.000000000000001,
+
+    // Special values.
+    NaN, +Infinity, -Infinity,
+
+    // Boundaries of int, signed, unsigned, SMI (near +/- 2^(30, 31, 32).
+     0x03fffffff,  0x040000000,  0x040000001,
+    -0x03fffffff, -0x040000000, -0x040000001,
+     0x07fffffff,  0x080000000,  0x080000001,
+    -0x07fffffff, -0x080000000, -0x080000001,
+     0x0ffffffff,  0x100000000,  0x100000001,
+    -0x0ffffffff, -0x100000000, -0x100000001,
+
+    // Boundaries of maximum safe integer (near +/- 2^53).
+     9007199254740990,  9007199254740991,  9007199254740992,
+    -9007199254740990, -9007199254740991, -9007199254740992,
+
+    // Boundaries of double.
+     5e-324, 1.7976931348623157e+308,
+    -5e-324,-1.7976931348623157e+308,
+]
+
+const INTERESTING_NON_NUMBER_VALUES = [
+    // Simple arrays.
+    '[]',
+    'Array(0x8000).fill("a")',
+
+    // Simple object.
+    '{}',
+    '{a: "foo", b: 10, c: {}}',
+
+    // Simple strings.
+    '"foo"',
+    '""',
+
+    // Simple regex.
+    '/0/',
+    '"/0/"',
+
+    // Simple symbol.
+    'Symbol("foo")',
+
+    // Long string.
+    'Array(0x8000).join("a")',
+
+    // Math.PI
+    'Math.PI',
+
+    // Others.
+    'false',
+    'true',
+    'undefined',
+    'null',
+    'this',
+    'this[0]',
+    'this[1]',
+
+    // Empty function.
+    '(function() {return 0;})',
+
+    // Objects with functions.
+    '({toString:function(){return "0";}})',
+    '({valueOf:function(){return 0;}})',
+    '({valueOf:function(){return "0";}})',
+
+    // Objects for primitive types created using new.
+    '(new Boolean(false))',
+    '(new Boolean(true))',
+    '(new String(""))',
+    '(new Number(0))',
+    '(new Number(-0))',
+]
+
+const LARGE_NODE_SIZE = 100;
+const MAX_ARGUMENT_COUNT = 10;
+
+function _identifier(identifier) {
+  return babelTypes.identifier(identifier);
+}
+
+function _numericLiteral(number) {
+  return babelTypes.numericLiteral(number);
+}
+
+function _unwrapExpressionStatement(value) {
+  if (babelTypes.isExpressionStatement(value)) {
+    return value.expression;
+  }
+
+  return value;
+}
+
+function isVariableIdentifier(name) {
+  return /__v_[0-9]+/.test(name);
+}
+
+function isFunctionIdentifier(name) {
+  return /__f_[0-9]+/.test(name);
+}
+
+function isInForLoopCondition(path) {
+  // Return whether if we're in the init/test/update parts of a for loop (but
+  // not the body). Mutating variables in the init/test/update will likely
+  // modify loop variables and cause infinite loops.
+  const forStatementChild = path.find(
+      p => p.parent && babelTypes.isForStatement(p.parent));
+
+  return (forStatementChild && forStatementChild.parentKey !== 'body');
+}
+
+function isInWhileLoop(path) {
+  // Return whether if we're in a while loop.
+  const whileStatement = path.find(p => babelTypes.isWhileStatement(p));
+  return Boolean(whileStatement);
+}
+
+function _availableIdentifiers(path, filter) {
+  // TODO(ochang): Consider globals that aren't declared with let/var etc.
+  const available = new Array();
+  const allBindings = path.scope.getAllBindings();
+  for (const key of Object.keys(allBindings)) {
+    if (!filter(key)) {
+      continue;
+    }
+
+    if (filter === isVariableIdentifier &&
+        path.willIMaybeExecuteBefore(allBindings[key].path)) {
+      continue;
+    }
+
+    available.push(_identifier(key));
+  }
+
+  return available;
+}
+
+function availableVariables(path) {
+  return _availableIdentifiers(path, isVariableIdentifier);
+}
+
+function availableFunctions(path) {
+  return _availableIdentifiers(path, isFunctionIdentifier);
+}
+
+function randomVariable(path) {
+  return random.single(availableVariables(path));
+}
+
+function randomFunction(path) {
+  return random.single(availableFunctions(path));
+}
+
+function randomSeed() {
+  return random.randInt(0, 2**20);
+}
+
+function randomObject(seed) {
+  if (seed === undefined) {
+    seed = randomSeed();
+  }
+
+  const template = babelTemplate('__getRandomObject(SEED)');
+  return template({
+    SEED: _numericLiteral(seed),
+  }).expression;
+}
+
+function randomProperty(identifier, seed) {
+  if (seed === undefined) {
+    seed = randomSeed();
+  }
+
+  const template = babelTemplate('__getRandomProperty(IDENTIFIER, SEED)');
+  return template({
+    IDENTIFIER: identifier,
+    SEED: _numericLiteral(seed),
+  }).expression;
+}
+
+function randomArguments(path) {
+  const numArgs = random.randInt(0, MAX_ARGUMENT_COUNT);
+  const args = [];
+
+  for (let i = 0; i < numArgs; i++) {
+    args.push(randomValue(path));
+  }
+
+  return args.map(_unwrapExpressionStatement);
+}
+
+function randomValue(path) {
+  const probability = random.random();
+
+  if (probability < 0.01) {
+    const randomFunc = randomFunction(path);
+    if (randomFunc) {
+      return randomFunc;
+    }
+  }
+
+  if (probability < 0.25) {
+    const randomVar = randomVariable(path);
+    if (randomVar) {
+      return randomVar;
+    }
+  }
+
+  if (probability < 0.5) {
+    return randomInterestingNumber();
+  }
+
+  if (probability < 0.75) {
+    return randomInterestingNonNumber();
+  }
+
+  return randomObject();
+}
+
+function callRandomFunction(path, identifier, seed) {
+  if (seed === undefined) {
+    seed = randomSeed();
+  }
+
+  let args = [
+      identifier,
+      _numericLiteral(seed)
+  ];
+
+  args = args.map(_unwrapExpressionStatement);
+  args = args.concat(randomArguments(path));
+
+  return babelTypes.callExpression(
+      babelTypes.identifier('__callRandomFunction'),
+      args);
+}
+
+function nearbyRandomNumber(value) {
+  const probability = random.random();
+
+  if (probability < 0.9) {
+    return _numericLiteral(value + random.randInt(-0x10, 0x10));
+  } else if (probability < 0.95) {
+    return _numericLiteral(value + random.randInt(-0x100, 0x100));
+  } else if (probability < 0.99) {
+    return _numericLiteral(value + random.randInt(-0x1000, 0x1000));
+  }
+
+  return _numericLiteral(value + random.randInt(-0x10000, 0x10000));
+}
+
+function randomInterestingNumber() {
+  const value = random.single(INTERESTING_NUMBER_VALUES);
+  if (random.choose(0.05)) {
+    return nearbyRandomNumber(value);
+  }
+  return _numericLiteral(value);
+}
+
+function randomInterestingNonNumber() {
+  return babylon.parseExpression(random.single(INTERESTING_NON_NUMBER_VALUES));
+}
+
+function concatFlags(inputs) {
+  const flags = new Set();
+  for (const input of inputs) {
+    for (const flag of input.flags || []) {
+      flags.add(flag);
+    }
+  }
+  return Array.from(flags.values());
+}
+
+function concatPrograms(inputs) {
+  // Concatentate programs.
+  const resultProgram = babelTypes.program([]);
+  const result = babelTypes.file(resultProgram, [], null);
+
+  for (const input of inputs) {
+    const ast = input.ast.program;
+    resultProgram.body = resultProgram.body.concat(ast.body);
+    resultProgram.directives = resultProgram.directives.concat(ast.directives);
+  }
+
+  // TODO(machenbach): Concat dependencies here as soon as they are cached.
+  const combined = new sourceHelpers.ParsedSource(
+      result, '', '', concatFlags(inputs));
+  // If any input file is sloppy, the combined result is sloppy.
+  combined.sloppy = inputs.some(input => input.isSloppy());
+  return combined;
+}
+
+function setSourceLoc(source, index, total) {
+  const noop = babelTypes.noop();
+  noop.__loc = index / total;
+  noop.__self = noop;
+  source.ast.program.body.unshift(noop);
+}
+
+function getSourceLoc(node) {
+  // Source location is invalid in cloned nodes.
+  if (node !== node.__self) {
+    return undefined;
+  }
+  return node.__loc;
+}
+
+function setOriginalPath(source, originalPath) {
+  const noop = babelTypes.noop();
+  noop.__path = originalPath;
+  noop.__self = noop;
+  source.ast.program.body.unshift(noop);
+}
+
+function getOriginalPath(node) {
+  // Original path is invalid in cloned nodes.
+  if (node !== node.__self) {
+    return undefined;
+  }
+  return node.__path;
+}
+
+// Estimate the size of a node in raw source characters.
+function isLargeNode(node) {
+  // Ignore array holes inserted by us (null) or previously cloned nodes
+  // (they have no start/end).
+  if (!node || node.start === undefined || node.end === undefined ) {
+    return false;
+  }
+  return node.end - node.start > LARGE_NODE_SIZE;
+}
+
+module.exports = {
+  callRandomFunction: callRandomFunction,
+  concatFlags: concatFlags,
+  concatPrograms: concatPrograms,
+  availableVariables: availableVariables,
+  availableFunctions: availableFunctions,
+  randomFunction: randomFunction,
+  randomVariable: randomVariable,
+  isInForLoopCondition: isInForLoopCondition,
+  isInWhileLoop: isInWhileLoop,
+  isLargeNode: isLargeNode,
+  isVariableIdentifier: isVariableIdentifier,
+  isFunctionIdentifier: isFunctionIdentifier,
+  nearbyRandomNumber: nearbyRandomNumber,
+  randomArguments: randomArguments,
+  randomInterestingNonNumber: randomInterestingNonNumber,
+  randomInterestingNumber: randomInterestingNumber,
+  randomObject: randomObject,
+  randomProperty: randomProperty,
+  randomSeed: randomSeed,
+  randomValue: randomValue,
+  getOriginalPath: getOriginalPath,
+  setOriginalPath: setOriginalPath,
+  getSourceLoc: getSourceLoc,
+  setSourceLoc: setSourceLoc,
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js
new file mode 100644
index 0000000..7e3c495
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js
@@ -0,0 +1,88 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Expression mutator.
+ */
+
+'use strict';
+
+const babelTemplate = require('@babel/template').default;
+
+const common = require('./common.js');
+const random = require('../random.js');
+const mutator = require('./mutator.js');
+const sourceHelpers = require('../source_helpers.js');
+
+class CrossOverMutator extends mutator.Mutator {
+  constructor(settings, db) {
+    super();
+    this.settings = settings;
+    this.db = db;
+  }
+
+  get visitor() {
+    const thisMutator = this;
+
+    return [{
+      ExpressionStatement(path) {
+        if (!random.choose(thisMutator.settings.MUTATE_CROSSOVER_INSERT)) {
+          return;
+        }
+
+        const canHaveSuper = Boolean(path.findParent(x => x.isClassMethod()));
+        const randomExpression = thisMutator.db.getRandomStatement(
+            {canHaveSuper: canHaveSuper});
+
+        // Insert the statement.
+        var templateOptions = Object.assign({}, sourceHelpers.BABYLON_OPTIONS);
+        templateOptions['placeholderPattern'] = /^VAR_[0-9]+$/;
+
+        let toInsert = babelTemplate(
+            randomExpression.source,
+            templateOptions);
+        const dependencies = {};
+
+        if (randomExpression.dependencies) {
+          const variables = common.availableVariables(path);
+          if (!variables.length) {
+            return;
+          }
+          for (const dependency of randomExpression.dependencies) {
+            dependencies[dependency] = random.single(variables);
+          }
+        }
+
+        try {
+          toInsert = toInsert(dependencies);
+        } catch (e) {
+          if (thisMutator.settings.testing) {
+            // Fail early in tests.
+            throw e;
+          }
+          console.log('ERROR: Failed to parse:', randomExpression.source);
+          console.log(e);
+          return;
+        }
+
+        thisMutator.annotate(
+            toInsert,
+            'Crossover from ' + randomExpression.originalPath);
+
+        if (random.choose(0.5)) {
+          thisMutator.insertBeforeSkip(path, toInsert);
+        } else {
+          thisMutator.insertAfterSkip(path, toInsert);
+        }
+
+        path.skip();
+      },
+    }, {
+    }];
+  }
+}
+
+module.exports = {
+  CrossOverMutator: CrossOverMutator,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/differential_fuzz_mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/differential_fuzz_mutator.js
new file mode 100644
index 0000000..92c8df6
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/differential_fuzz_mutator.js
@@ -0,0 +1,225 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Mutator for differential fuzzing.
+ */
+
+'use strict';
+
+const babelTemplate = require('@babel/template').default;
+const babelTypes = require('@babel/types');
+
+const common = require('./common.js');
+const mutator = require('./mutator.js');
+const random = require('../random.js');
+
+// Templates for various statements.
+const incCaught = babelTemplate('__caught++;');
+const printValue = babelTemplate('print(VALUE);');
+const printCaught = babelTemplate('print("Caught: " + __caught);');
+const printHash = babelTemplate('print("Hash: " + __hash);');
+const prettyPrint = babelTemplate('__prettyPrint(ID);');
+const prettyPrintExtra = babelTemplate('__prettyPrintExtra(ID);');
+
+// This section prefix is expected by v8_foozzie.py. Existing prefixes
+// (e.g. from CrashTests) are cleaned up with CLEANED_PREFIX.
+const SECTION_PREFIX = 'v8-foozzie source: ';
+const CLEANED_PREFIX = 'v***************e: ';
+
+/**
+ * Babel statement for calling deep printing from the fuzz library.
+ */
+function prettyPrintStatement(variable) {
+  return prettyPrint({ ID: babelTypes.cloneDeep(variable) });
+}
+
+/**
+ * As above, but using the "extra" variant, which will reduce printing
+ * after too many calls to prevent I/O flooding.
+ */
+function prettyPrintExtraStatement(variable) {
+  return prettyPrintExtra({ ID: babelTypes.cloneDeep(variable) });
+}
+
+/**
+ * Mutator for suppressing known and/or unfixable issues.
+ */
+class DifferentialFuzzSuppressions extends mutator.Mutator {
+  get visitor() {
+    let thisMutator = this;
+
+    return {
+      // Clean up strings containing the magic section prefix. Those can come
+      // e.g. from CrashTests and would confuse the deduplication in
+      // v8_foozzie.py.
+      StringLiteral(path) {
+        if (path.node.value.startsWith(SECTION_PREFIX)) {
+          const postfix = path.node.value.substring(SECTION_PREFIX.length);
+          path.node.value = CLEANED_PREFIX + postfix;
+          thisMutator.annotate(path.node, 'Replaced magic string');
+        }
+      },
+      // Known precision differences: https://crbug.com/1063568
+      BinaryExpression(path) {
+        if (path.node.operator == '**') {
+          path.node.operator = '+';
+          thisMutator.annotate(path.node, 'Replaced **');
+        }
+      },
+      // Unsupported language feature: https://crbug.com/1020573
+      MemberExpression(path) {
+        if (path.node.property.name == "arguments") {
+          let replacement = common.randomVariable(path);
+          if (!replacement) {
+            replacement = babelTypes.thisExpression();
+          }
+          thisMutator.annotate(replacement, 'Replaced .arguments');
+          thisMutator.replaceWithSkip(path, replacement);
+        }
+      },
+    };
+  }
+}
+
+/**
+ * Mutator for tracking original input files and for extra printing.
+ */
+class DifferentialFuzzMutator extends mutator.Mutator {
+  constructor(settings) {
+    super();
+    this.settings = settings;
+  }
+
+  /**
+   * Looks for the dummy node that marks the beginning of an input file
+   * from the corpus.
+   */
+  isSectionStart(path) {
+    return !!common.getOriginalPath(path.node);
+  }
+
+  /**
+   * Create print statements for printing the magic section prefix that's
+   * expected by v8_foozzie.py to differentiate different source files.
+   */
+  getSectionHeader(path) {
+    const orig = common.getOriginalPath(path.node);
+    return printValue({
+      VALUE: babelTypes.stringLiteral(SECTION_PREFIX + orig),
+    });
+  }
+
+  /**
+   * Create statements for extra printing at the end of a section. We print
+   * the number of caught exceptions, a generic hash of all observed values
+   * and the contents of all variables in scope.
+   */
+  getSectionFooter(path) {
+    const variables = common.availableVariables(path);
+    const statements = variables.map(prettyPrintStatement);
+    statements.unshift(printCaught());
+    statements.unshift(printHash());
+    const statement = babelTypes.tryStatement(
+        babelTypes.blockStatement(statements),
+        babelTypes.catchClause(
+            babelTypes.identifier('e'),
+            babelTypes.blockStatement([])));
+    this.annotate(statement, 'Print variables and exceptions from section');
+    return statement;
+  }
+
+  /**
+   * Helper for printing the contents of several variables.
+   */
+  printVariables(path, nodes) {
+    const statements = [];
+    for (const node of nodes) {
+      if (!babelTypes.isIdentifier(node) ||
+          !common.isVariableIdentifier(node.name))
+        continue;
+      statements.push(prettyPrintExtraStatement(node));
+    }
+    if (statements.length) {
+      this.annotate(statements[0], 'Extra variable printing');
+      this.insertAfterSkip(path, statements);
+    }
+  }
+
+  get visitor() {
+    const thisMutator = this;
+    const settings = this.settings;
+
+    return {
+      // Replace existing normal print statements with deep printing.
+      CallExpression(path) {
+        if (babelTypes.isIdentifier(path.node.callee) &&
+            path.node.callee.name == 'print') {
+          path.node.callee = babelTypes.identifier('__prettyPrintExtra');
+          thisMutator.annotate(path.node, 'Pretty printing');
+        }
+      },
+      // Either print or track caught exceptions, guarded by a probability.
+      CatchClause(path) {
+        const probability = random.random();
+        if (probability < settings.DIFF_FUZZ_EXTRA_PRINT &&
+            path.node.param &&
+            babelTypes.isIdentifier(path.node.param)) {
+          const statement = prettyPrintExtraStatement(path.node.param);
+          path.node.body.body.unshift(statement);
+        } else if (probability < settings.DIFF_FUZZ_TRACK_CAUGHT) {
+          path.node.body.body.unshift(incCaught());
+        }
+      },
+      // Insert section headers and footers between the contents of two
+      // original source files. We detect the dummy no-op nodes that were
+      // previously tagged with the original path of the file.
+      Noop(path) {
+        if (!thisMutator.isSectionStart(path)) {
+          return;
+        }
+        const header = thisMutator.getSectionHeader(path);
+        const footer = thisMutator.getSectionFooter(path);
+        thisMutator.insertBeforeSkip(path, footer);
+        thisMutator.insertBeforeSkip(path, header);
+      },
+      // Additionally we print one footer in the end.
+      Program: {
+        exit(path) {
+          const footer = thisMutator.getSectionFooter(path);
+          path.node.body.push(footer);
+        },
+      },
+      // Print contents of variables after assignments, guarded by a
+      // probability.
+      ExpressionStatement(path) {
+        if (!babelTypes.isAssignmentExpression(path.node.expression) ||
+            !random.choose(settings.DIFF_FUZZ_EXTRA_PRINT)) {
+          return;
+        }
+        const left = path.node.expression.left;
+        if (babelTypes.isMemberExpression(left)) {
+          thisMutator.printVariables(path, [left.object]);
+        } else {
+          thisMutator.printVariables(path, [left]);
+        }
+      },
+      // Print contents of variables after declaration, guarded by a
+      // probability.
+      VariableDeclaration(path) {
+        if (babelTypes.isLoop(path.parent) ||
+            !random.choose(settings.DIFF_FUZZ_EXTRA_PRINT)) {
+          return;
+        }
+        const identifiers = path.node.declarations.map(decl => decl.id);
+        thisMutator.printVariables(path, identifiers);
+      },
+    };
+  }
+}
+
+module.exports = {
+  DifferentialFuzzMutator: DifferentialFuzzMutator,
+  DifferentialFuzzSuppressions: DifferentialFuzzSuppressions,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/expression_mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/expression_mutator.js
new file mode 100644
index 0000000..516d65a
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/expression_mutator.js
@@ -0,0 +1,63 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Expression mutator.
+ */
+
+'use strict';
+
+const babelTypes = require('@babel/types');
+
+const random = require('../random.js');
+const mutator = require('./mutator.js');
+
+class ExpressionMutator extends mutator.Mutator {
+  constructor(settings) {
+    super();
+    this.settings = settings;
+  }
+
+  get visitor() {
+    const thisMutator = this;
+
+    return {
+      ExpressionStatement(path) {
+        if (!random.choose(thisMutator.settings.MUTATE_EXPRESSIONS)) {
+          return;
+        }
+
+        const probability = random.random();
+
+        if (probability < 0.7) {
+          const repeated = babelTypes.cloneDeep(path.node);
+          thisMutator.annotate(repeated, 'Repeated');
+          thisMutator.insertBeforeSkip(path, repeated);
+        } else if (path.key > 0) {
+          // Get a random previous sibling.
+          const prev = path.getSibling(random.randInt(0, path.key - 1));
+          if (!prev || !prev.node) {
+            return;
+          }
+          // Either select a previous or the current node to clone.
+          const [selected, destination] = random.shuffle([prev, path]);
+          if (selected.isDeclaration()) {
+            return;
+          }
+          const cloned = babelTypes.cloneDeep(selected.node);
+          thisMutator.annotate(cloned, 'Cloned sibling');
+          if (random.choose(0.5)) {
+            thisMutator.insertBeforeSkip(destination, cloned);
+          } else {
+            thisMutator.insertAfterSkip(destination, cloned);
+          }
+        }
+      },
+    };
+  }
+}
+
+module.exports = {
+  ExpressionMutator: ExpressionMutator,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js
new file mode 100644
index 0000000..4f34e15
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/function_call_mutator.js
@@ -0,0 +1,126 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Function calls mutator.
+ */
+
+'use strict';
+
+const babelTemplate = require('@babel/template').default;
+const babelTypes = require('@babel/types');
+
+const common = require('./common.js');
+const random = require('../random.js');
+const mutator = require('./mutator.js');
+
+function _liftExpressionsToStatements(path, nodes) {
+  // If the node we're replacing is an expression in an expression statement,
+  // lift the replacement nodes into statements too.
+  if (!babelTypes.isExpressionStatement(path.parent)) {
+    return nodes;
+  }
+
+  return nodes.map(n => babelTypes.expressionStatement(n));
+}
+
+class FunctionCallMutator extends mutator.Mutator {
+  constructor(settings) {
+    super();
+    this.settings = settings;
+  }
+
+  get visitor() {
+    const thisMutator = this;
+
+    return {
+      CallExpression(path) {
+        if (!babelTypes.isIdentifier(path.node.callee)) {
+          return;
+        }
+
+        if (!common.isFunctionIdentifier(path.node.callee.name)) {
+          return;
+        }
+
+        if (!random.choose(thisMutator.settings.MUTATE_FUNCTION_CALLS)) {
+          return;
+        }
+
+        const probability = random.random();
+        if (probability < 0.5) {
+          const randFunc = common.randomFunction(path);
+          if (randFunc) {
+            thisMutator.annotate(
+                path.node,
+                `Replaced ${path.node.callee.name} with ${randFunc.name}`);
+
+            path.node.callee = randFunc;
+          }
+        } else if (probability < 0.7 && thisMutator.settings.engine == 'V8') {
+          const prepareTemplate = babelTemplate(
+              '__V8BuiltinPrepareFunctionForOptimization(ID)');
+          const optimizeTemplate = babelTemplate(
+              '__V8BuiltinOptimizeFunctionOnNextCall(ID)');
+
+          const nodes = [
+              prepareTemplate({
+                ID: babelTypes.cloneDeep(path.node.callee),
+              }).expression,
+              babelTypes.cloneDeep(path.node),
+              babelTypes.cloneDeep(path.node),
+              optimizeTemplate({
+                ID: babelTypes.cloneDeep(path.node.callee),
+              }).expression,
+          ];
+
+          thisMutator.annotate(
+              path.node,
+              `Optimizing ${path.node.callee.name}`);
+          if (!babelTypes.isExpressionStatement(path.parent)) {
+            nodes.push(path.node);
+            thisMutator.replaceWithSkip(
+                path, babelTypes.sequenceExpression(nodes));
+          } else {
+            thisMutator.insertBeforeSkip(
+                path, _liftExpressionsToStatements(path, nodes));
+          }
+        } else if (probability < 0.85 &&
+                   thisMutator.settings.engine == 'V8') {
+          const template = babelTemplate(
+              '__V8BuiltinDeoptimizeFunction(ID)');
+          const insert = _liftExpressionsToStatements(path, [
+              template({
+                ID: babelTypes.cloneDeep(path.node.callee),
+              }).expression,
+          ]);
+
+          thisMutator.annotate(
+              path.node,
+              `Deoptimizing ${path.node.callee.name}`);
+
+          thisMutator.insertAfterSkip(path, insert);
+        } else {
+          const template = babelTemplate(
+              'runNearStackLimit(() => { return CALL });');
+          thisMutator.annotate(
+              path.node,
+              `Run to stack limit ${path.node.callee.name}`);
+
+          thisMutator.replaceWithSkip(
+              path,
+              template({
+                CALL: path.node,
+              }).expression);
+        }
+
+        path.skip();
+      },
+    }
+  }
+}
+
+module.exports = {
+  FunctionCallMutator: FunctionCallMutator,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/mutator.js
new file mode 100644
index 0000000..9f27d16
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/mutator.js
@@ -0,0 +1,98 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Mutator
+ */
+'use strict';
+
+const babelTraverse = require('@babel/traverse').default;
+const babelTypes = require('@babel/types');
+
+class Mutator {
+  get visitor() {
+    return null;
+  }
+
+  _traverse(ast, visitor) {
+    let oldEnter = null;
+    if (Object.prototype.hasOwnProperty.call(visitor, 'enter')) {
+      oldEnter = visitor['enter'];
+    }
+
+    // Transparently skip nodes that are marked.
+    visitor['enter'] = (path) => {
+      if (this.shouldSkip(path.node)) {
+        path.skip();
+        return;
+      }
+
+      if (oldEnter) {
+        oldEnter(path);
+      }
+    }
+
+    babelTraverse(ast, visitor);
+  }
+
+  mutate(source) {
+    if (Array.isArray(this.visitor)) {
+      for (const visitor of this.visitor) {
+        this._traverse(source.ast, visitor);
+      }
+    } else {
+      this._traverse(source.ast, this.visitor);
+    }
+  }
+
+  get _skipPropertyName() {
+    return '__skip' + this.constructor.name;
+  }
+
+  shouldSkip(node) {
+    return Boolean(node[this._skipPropertyName]);
+  }
+
+  skipMutations(node) {
+    // Mark a node to skip further mutations of the same kind.
+    if (Array.isArray(node)) {
+      for (const item of node) {
+        item[this._skipPropertyName] = true;
+      }
+    } else {
+      node[this._skipPropertyName] = true;
+    }
+
+    return node;
+  }
+
+  insertBeforeSkip(path, node) {
+    this.skipMutations(node);
+    path.insertBefore(node);
+  }
+
+  insertAfterSkip(path, node) {
+    this.skipMutations(node);
+    path.insertAfter(node);
+  }
+
+  replaceWithSkip(path, node) {
+    this.skipMutations(node);
+    path.replaceWith(node);
+  }
+
+  replaceWithMultipleSkip(path, node) {
+    this.skipMutations(node);
+    path.replaceWithMultiple(node);
+  }
+
+  annotate(node, message) {
+    babelTypes.addComment(
+        node, 'leading', ` ${this.constructor.name}: ${message} `);
+  }
+}
+
+module.exports = {
+  Mutator: Mutator,
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/normalizer.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/normalizer.js
new file mode 100644
index 0000000..7b5a35a
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/normalizer.js
@@ -0,0 +1,89 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Normalizer.
+ * This renames variables so that we don't have collisions when combining
+ * different files. It also simplifies other logic when e.g. determining the
+ * type of an identifier.
+ */
+'use strict';
+
+const babelTypes = require('@babel/types');
+
+const mutator = require('./mutator.js');
+
+class NormalizerContext {
+  constructor() {
+    this.funcIndex = 0;
+    this.varIndex = 0;
+    this.classIndex = 0;
+  }
+}
+
+class IdentifierNormalizer extends mutator.Mutator {
+  constructor() {
+    super();
+    this.context = new NormalizerContext();
+  }
+
+  get visitor() {
+    const context = this.context;
+    const renamed = new WeakSet();
+    const globalMappings = new Map();
+
+    return [{
+      Scope(path) {
+        for (const [name, binding] of Object.entries(path.scope.bindings)) {
+          if (renamed.has(binding.identifier)) {
+            continue;
+          }
+
+          renamed.add(binding.identifier);
+
+          if (babelTypes.isClassDeclaration(binding.path.node) ||
+              babelTypes.isClassExpression(binding.path.node)) {
+            path.scope.rename(name, '__c_' + context.classIndex++);
+          } else if (babelTypes.isFunctionDeclaration(binding.path.node) ||
+                     babelTypes.isFunctionExpression(binding.path.node)) {
+            path.scope.rename(name, '__f_' + context.funcIndex++);
+          } else {
+            path.scope.rename(name, '__v_' + context.varIndex++);
+          }
+        }
+      },
+
+      AssignmentExpression(path) {
+        // Find assignments for which we have no binding in the scope. We assume
+        // that these are globals which are local to our script (which weren't
+        // declared with var/let/const etc).
+        const ids = path.getBindingIdentifiers();
+        for (const name in ids) {
+          if (!path.scope.getBinding(name)) {
+            globalMappings.set(name, '__v_' + context.varIndex++);
+          }
+        }
+      }
+    }, {
+      // Second pass to rename globals that weren't declared with
+      // var/let/const etc.
+      Identifier(path) {
+        if (!globalMappings.has(path.node.name)) {
+          return;
+        }
+
+        if (path.scope.getBinding(path.node.name)) {
+          // Don't rename if there is a binding that hides the global.
+          return;
+        }
+
+        path.node.name = globalMappings.get(path.node.name);
+      }
+    }];
+  }
+}
+
+module.exports = {
+  IdentifierNormalizer: IdentifierNormalizer,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/number_mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/number_mutator.js
new file mode 100644
index 0000000..c0f3ea8
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/number_mutator.js
@@ -0,0 +1,105 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Numbers mutator.
+ */
+
+'use strict';
+
+const babelTypes = require('@babel/types');
+
+const common = require('./common.js');
+const random = require('../random.js');
+const mutator = require('./mutator.js');
+
+const MIN_SAFE_INTEGER = -9007199254740991;
+const MAX_SAFE_INTEGER = 9007199254740991;
+
+
+function isObjectKey(path) {
+  return (path.parent &&
+          babelTypes.isObjectMember(path.parent) &&
+          path.parent.key === path.node);
+}
+
+function createRandomNumber(value) {
+  // TODO(ochang): Maybe replace with variable.
+  const probability = random.random();
+  if (probability < 0.01) {
+    return babelTypes.numericLiteral(
+        random.randInt(MIN_SAFE_INTEGER, MAX_SAFE_INTEGER));
+  } else if (probability < 0.06) {
+    return common.randomInterestingNumber();
+  } else {
+    return common.nearbyRandomNumber(value);
+  }
+}
+
+class NumberMutator extends mutator.Mutator {
+  constructor(settings) {
+    super();
+    this.settings = settings;
+  }
+
+  ignore(path) {
+    return !random.choose(this.settings.MUTATE_NUMBERS) ||
+           common.isInForLoopCondition(path) ||
+           common.isInWhileLoop(path);
+  }
+
+  randomReplace(path, value, forcePositive=false) {
+    const randomNumber = createRandomNumber(value);
+
+    if (forcePositive) {
+      randomNumber.value = Math.abs(randomNumber.value);
+    }
+
+    this.annotate(
+        path.node,
+        `Replaced ${value} with ${randomNumber.value}`);
+
+    this.replaceWithSkip(path, randomNumber);
+  }
+
+  get visitor() {
+    const thisMutator = this;
+
+    return {
+      NumericLiteral(path) {
+        if (thisMutator.ignore(path)) {
+          return;
+        }
+
+        // We handle negative unary expressions separately to replace the whole
+        // expression below. E.g. -5 is UnaryExpression(-, NumericLiteral(5)).
+        if (path.parent && babelTypes.isUnaryExpression(path.parent) &&
+            path.parent.operator === '-') {
+          return;
+        }
+
+        // Enfore positive numbers if the literal is the key of an object
+        // property or method. Negative keys cause syntax errors.
+        const forcePositive = isObjectKey(path);
+
+        thisMutator.randomReplace(path, path.node.value, forcePositive);
+      },
+      UnaryExpression(path) {
+        if (thisMutator.ignore(path)) {
+          return;
+        }
+
+        // Handle the case we ignore above.
+        if (path.node.operator === '-' &&
+            babelTypes.isNumericLiteral(path.node.argument)) {
+          thisMutator.randomReplace(path, -path.node.argument.value);
+        }
+      }
+    };
+  }
+}
+
+module.exports = {
+  NumberMutator: NumberMutator,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/object_mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/object_mutator.js
new file mode 100644
index 0000000..7906b4f
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/object_mutator.js
@@ -0,0 +1,135 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Mutator for object expressions.
+ */
+
+'use strict';
+
+const babelTypes = require('@babel/types');
+
+const common = require('./common.js');
+const mutator = require('./mutator.js');
+const random = require('../random.js');
+
+const MAX_PROPERTIES = 50;
+
+/**
+ * Turn the key of an object property into a string literal.
+ */
+function keyToString(key) {
+  if (babelTypes.isNumericLiteral(key)) {
+    return babelTypes.stringLiteral(key.value.toString());
+  }
+  if (babelTypes.isIdentifier(key)) {
+    return babelTypes.stringLiteral(key.name);
+  }
+  // Already a string literal.
+  return key;
+}
+
+class ObjectMutator extends mutator.Mutator {
+  constructor(settings) {
+    super();
+    this.settings = settings;
+  }
+
+  get visitor() {
+    const thisMutator = this;
+
+    return {
+      ObjectExpression(path) {
+        const properties = path.node.properties;
+        if (!random.choose(thisMutator.settings.MUTATE_OBJECTS) ||
+            properties.length > MAX_PROPERTIES) {
+          return;
+        }
+
+        // Use the indices of object properties for mutations. We ignore
+        // getters and setters.
+        const propertyIndicies = [];
+        for (const [index, property] of properties.entries()) {
+          if (babelTypes.isObjectProperty(property)) {
+            propertyIndicies.push(index);
+          }
+        }
+
+        // The mutations below require at least one property.
+        if (!propertyIndicies.length) {
+          return;
+        }
+
+        // Annotate object expression with the action taken.
+        function annotate(message) {
+          thisMutator.annotate(path.node, message);
+        }
+
+        function getOneRandomProperty() {
+          return properties[random.single(propertyIndicies)];
+        }
+
+        function getTwoRandomProperties() {
+          const [a, b] = random.sample(propertyIndicies, 2);
+          return [properties[a], properties[b]];
+        }
+
+        function swapPropertyValues() {
+          if (propertyIndicies.length > 1) {
+            annotate('Swap properties');
+            const [a, b] = getTwoRandomProperties();
+            [a.value, b.value] = [b.value, a.value];
+          }
+        }
+
+        function duplicatePropertyValue() {
+          if (propertyIndicies.length > 1) {
+            const [a, b] = random.shuffle(getTwoRandomProperties());
+            if (common.isLargeNode(b.value)) {
+              return;
+            }
+            annotate('Duplicate a property value');
+            a.value = babelTypes.cloneDeep(b.value);
+          }
+        }
+
+        function insertRandomValue() {
+          annotate('Insert a random value');
+          const property = getOneRandomProperty();
+          property.value = common.randomValue(path);
+        }
+
+        function stringifyKey() {
+          annotate('Stringify a property key');
+          const property = getOneRandomProperty();
+          property.key = keyToString(property.key);
+        }
+
+        function removeProperty() {
+          annotate('Remove a property');
+          properties.splice(random.single(propertyIndicies), 1);
+        }
+
+        // Mutation options. Repeated mutations have a higher probability.
+        const mutations = [
+          swapPropertyValues,
+          swapPropertyValues,
+          duplicatePropertyValue,
+          duplicatePropertyValue,
+          insertRandomValue,
+          insertRandomValue,
+          removeProperty,
+          stringifyKey,
+        ];
+
+        // Perform mutation.
+        random.single(mutations)();
+      },
+    }
+  }
+}
+
+module.exports = {
+  ObjectMutator: ObjectMutator,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/try_catch.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/try_catch.js
new file mode 100644
index 0000000..57f81f2
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/try_catch.js
@@ -0,0 +1,175 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Try catch wrapper.
+ */
+
+const babelTypes = require('@babel/types');
+
+const common = require('./common.js');
+const mutator = require('./mutator.js');
+const random = require('../random.js');
+
+// Default target probability for skipping try-catch completely.
+const DEFAULT_SKIP_PROB = 0.2;
+
+// Default target probability to wrap only on toplevel, i.e. to not nest
+// try-catch.
+const DEFAULT_TOPLEVEL_PROB = 0.3;
+
+// Probability to deviate from defaults and use extreme cases.
+const IGNORE_DEFAULT_PROB = 0.05;
+
+// Member expressions to be wrapped. List of (object, property) identifier
+// tuples.
+const WRAPPED_MEMBER_EXPRESSIONS = [
+  ['WebAssembly', 'Module'],
+  ['WebAssembly', 'Instantiate'],
+];
+
+function wrapTryCatch(node) {
+  return babelTypes.tryStatement(
+      babelTypes.blockStatement([node]),
+      babelTypes.catchClause(
+          babelTypes.identifier('e'),
+          babelTypes.blockStatement([])));
+}
+
+function wrapTryCatchInFunction(node) {
+  const ret = wrapTryCatch(babelTypes.returnStatement(node));
+  const anonymousFun = babelTypes.functionExpression(
+      null, [], babelTypes.blockStatement([ret]));
+  return babelTypes.callExpression(anonymousFun, []);
+}
+
+// Wrap particular member expressions after `new` that are known to appear
+// in initializer lists of `let` and `const`.
+function replaceNewExpression(path) {
+  const callee = path.node.callee;
+  if (!babelTypes.isMemberExpression(callee) ||
+      !babelTypes.isIdentifier(callee.object) ||
+      !babelTypes.isIdentifier(callee.property)) {
+    return;
+  }
+  if (WRAPPED_MEMBER_EXPRESSIONS.some(
+      ([object, property]) => callee.object.name === object &&
+                              callee.property.name === property)) {
+    path.replaceWith(wrapTryCatchInFunction(path.node));
+    path.skip();
+  }
+}
+
+function replaceAndSkip(path) {
+  if (!babelTypes.isLabeledStatement(path.parent) ||
+      !babelTypes.isLoop(path.node)) {
+    // Don't wrap loops with labels as it makes continue
+    // statements syntactically invalid. We wrap the label
+    // instead below.
+    path.replaceWith(wrapTryCatch(path.node));
+  }
+  // Prevent infinite looping.
+  path.skip();
+}
+
+class AddTryCatchMutator extends mutator.Mutator {
+  callWithProb(path, fun) {
+    const probability = random.random();
+    if (probability < this.skipProb * this.loc) {
+      // Entirely skip try-catch wrapper.
+      path.skip();
+    } else if (probability < (this.skipProb + this.toplevelProb) * this.loc) {
+      // Only wrap on top-level.
+      fun(path);
+    }
+  }
+
+  get visitor() {
+    const thisMutator = this;
+    const accessStatement = {
+      enter(path) {
+        thisMutator.callWithProb(path, replaceAndSkip);
+      },
+      exit(path) {
+        // Apply nested wrapping (is only executed if not skipped above).
+        replaceAndSkip(path);
+      }
+    };
+    return {
+      Program: {
+        enter(path) {
+          // Track original source location fraction in [0, 1).
+          thisMutator.loc = 0;
+          // Target probability for skipping try-catch.
+          thisMutator.skipProb = DEFAULT_SKIP_PROB;
+          // Target probability for not nesting try-catch.
+          thisMutator.toplevelProb = DEFAULT_TOPLEVEL_PROB;
+          // Maybe deviate from target probability for the entire test.
+          if (random.choose(IGNORE_DEFAULT_PROB)) {
+            thisMutator.skipProb = random.uniform(0, 1);
+            thisMutator.toplevelProb = random.uniform(0, 1);
+            thisMutator.annotate(
+                path.node,
+                'Target skip probability ' + thisMutator.skipProb +
+                ' and toplevel probability ' + thisMutator.toplevelProb);
+          }
+        }
+      },
+      Noop: {
+        enter(path) {
+          if (common.getSourceLoc(path.node)) {
+            thisMutator.loc = common.getSourceLoc(path.node);
+          }
+        },
+      },
+      ExpressionStatement: accessStatement,
+      IfStatement: accessStatement,
+      LabeledStatement: {
+        enter(path) {
+          // Apply an extra try-catch around the label of a loop, since we
+          // ignore the loop itself if it has a label.
+          if (babelTypes.isLoop(path.node.body)) {
+            thisMutator.callWithProb(path, replaceAndSkip);
+          }
+        },
+        exit(path) {
+          // Apply nested wrapping (is only executed if not skipped above).
+          if (babelTypes.isLoop(path.node.body)) {
+            replaceAndSkip(path);
+          }
+        },
+      },
+      // This covers {While|DoWhile|ForIn|ForOf|For}Statement.
+      Loop: accessStatement,
+      NewExpression: {
+        enter(path) {
+          thisMutator.callWithProb(path, replaceNewExpression);
+        },
+        exit(path) {
+          // Apply nested wrapping (is only executed if not skipped above).
+          replaceNewExpression(path);
+        }
+      },
+      SwitchStatement: accessStatement,
+      VariableDeclaration: {
+        enter(path) {
+          if (path.node.kind !== 'var' || babelTypes.isLoop(path.parent))
+            return;
+          thisMutator.callWithProb(path, replaceAndSkip);
+        },
+        exit(path) {
+          if (path.node.kind !== 'var' || babelTypes.isLoop(path.parent))
+            return;
+          // Apply nested wrapping (is only executed if not skipped above).
+          replaceAndSkip(path);
+        }
+      },
+      WithStatement: accessStatement,
+    };
+  }
+}
+
+module.exports = {
+  AddTryCatchMutator: AddTryCatchMutator,
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/variable_mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/variable_mutator.js
new file mode 100644
index 0000000..4222932
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/variable_mutator.js
@@ -0,0 +1,73 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Variables mutator.
+ */
+
+'use strict';
+
+const babelTypes = require('@babel/types');
+
+const common = require('./common.js');
+const random = require('../random.js');
+const mutator = require('./mutator.js');
+
+function _isInFunctionParam(path) {
+  const child = path.find(p => p.parent && babelTypes.isFunction(p.parent));
+  return child && child.parentKey === 'params';
+}
+
+class VariableMutator extends mutator.Mutator {
+  constructor(settings) {
+    super();
+    this.settings = settings;
+  }
+
+  get visitor() {
+    const thisMutator = this;
+
+    return {
+      Identifier(path) {
+        if (!random.choose(thisMutator.settings.MUTATE_VARIABLES)) {
+          return;
+        }
+
+        if (!common.isVariableIdentifier(path.node.name)) {
+          return;
+        }
+
+        // Don't mutate variables that are being declared.
+        if (babelTypes.isVariableDeclarator(path.parent)) {
+          return;
+        }
+
+        // Don't mutate function params.
+        if (_isInFunctionParam(path)) {
+          return;
+        }
+
+        if (common.isInForLoopCondition(path) ||
+            common.isInWhileLoop(path)) {
+          return;
+        }
+
+        const randVar = common.randomVariable(path);
+        if (!randVar) {
+          return;
+        }
+
+        const newName = randVar.name;
+        thisMutator.annotate(
+            path.node,
+            `Replaced ${path.node.name} with ${newName}`);
+        path.node.name = newName;
+      }
+    };
+  }
+}
+
+module.exports = {
+  VariableMutator: VariableMutator,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/variable_or_object_mutation.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/variable_or_object_mutation.js
new file mode 100644
index 0000000..3c96464
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/mutators/variable_or_object_mutation.js
@@ -0,0 +1,154 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Variables mutator.
+ */
+
+'use strict';
+
+const babelTemplate = require('@babel/template').default;
+const babelTypes = require('@babel/types');
+
+const common = require('./common.js');
+const random = require('../random.js');
+const mutator = require('./mutator.js');
+
+const MAX_MUTATION_RECURSION_DEPTH = 5;
+
+class VariableOrObjectMutator extends mutator.Mutator {
+  constructor(settings) {
+    super();
+    this.settings = settings;
+  }
+
+  _randomVariableOrObject(path) {
+    const randomVar = common.randomVariable(path);
+    if (random.choose(0.05) || !randomVar) {
+      return common.randomObject();
+    }
+
+    return randomVar;
+  }
+
+  _randomVariableOrObjectMutations(path, recurseDepth=0) {
+    if (recurseDepth >= MAX_MUTATION_RECURSION_DEPTH) {
+      return new Array();
+    }
+
+    const probability = random.random();
+
+    if (probability < 0.3) {
+      const first = this._randomVariableOrObjectMutations(path, recurseDepth + 1);
+      const second = this._randomVariableOrObjectMutations(
+          path, recurseDepth + 1);
+      return first.concat(second);
+    }
+
+    const randVarOrObject = this._randomVariableOrObject(path);
+    const randProperty = common.randomProperty(randVarOrObject);
+    let newRandVarOrObject = randVarOrObject;
+    if (random.choose(0.2)) {
+      newRandVarOrObject = this._randomVariableOrObject(path);
+    }
+
+    const mutations = new Array();
+
+    if (probability < 0.4) {
+      const template = babelTemplate(
+          'delete IDENTIFIER[PROPERTY], __callGC()')
+      mutations.push(template({
+        IDENTIFIER: randVarOrObject,
+        PROPERTY: randProperty
+      }));
+    } else if (probability < 0.5) {
+      const template = babelTemplate(
+          'IDENTIFIER[PROPERTY], __callGC()')
+      mutations.push(template({
+        IDENTIFIER: randVarOrObject,
+        PROPERTY: randProperty
+      }));
+    } else if (probability < 0.6) {
+      const template = babelTemplate(
+          'IDENTIFIER[PROPERTY] = RANDOM, __callGC()')
+      mutations.push(template({
+        IDENTIFIER: randVarOrObject,
+        PROPERTY: randProperty,
+        RANDOM: common.randomValue(path),
+      }));
+    } else if (probability < 0.7) {
+      mutations.push(
+          babelTypes.expressionStatement(
+              common.callRandomFunction(path, randVarOrObject)));
+    } else if (probability < 0.8) {
+      const template = babelTemplate(
+          'VAR = IDENTIFIER, __callGC()')
+      var randomVar = common.randomVariable(path);
+      if (!randomVar) {
+        return mutations;
+      }
+
+      mutations.push(template({
+        VAR: randomVar,
+        IDENTIFIER: randVarOrObject,
+      }));
+    } else if (probability < 0.9) {
+      const template = babelTemplate(
+          'if (IDENTIFIER != null && typeof(IDENTIFIER) == "object") ' +
+          'Object.defineProperty(IDENTIFIER, PROPERTY, {value: VALUE})')
+      mutations.push(template({
+          IDENTIFIER: newRandVarOrObject,
+          PROPERTY: randProperty,
+          VALUE: common.randomValue(path),
+      }));
+    } else {
+      const template = babelTemplate(
+          'if (IDENTIFIER != null && typeof(IDENTIFIER) == "object") ' +
+          'Object.defineProperty(IDENTIFIER, PROPERTY, {' +
+          'get: function() { GETTER_MUTATION ; return VALUE; },' +
+          'set: function(value) { SETTER_MUTATION; }' +
+          '})');
+      mutations.push(template({
+          IDENTIFIER: newRandVarOrObject,
+          PROPERTY: randProperty,
+          GETTER_MUTATION: this._randomVariableOrObjectMutations(
+              path, recurseDepth + 1),
+          SETTER_MUTATION: this._randomVariableOrObjectMutations(
+              path, recurseDepth + 1),
+          VALUE: common.randomValue(path),
+      }));
+    }
+
+    return mutations;
+  }
+
+
+  get visitor() {
+    const settings = this.settings;
+    const thisMutator = this;
+
+    return {
+      ExpressionStatement(path) {
+        if (!random.choose(settings.ADD_VAR_OR_OBJ_MUTATIONS)) {
+          return;
+        }
+
+        const mutations = thisMutator._randomVariableOrObjectMutations(path);
+        thisMutator.annotate(mutations[0], 'Random mutation');
+
+        if (random.choose(0.5)) {
+          thisMutator.insertBeforeSkip(path, mutations);
+        } else {
+          thisMutator.insertAfterSkip(path, mutations);
+        }
+
+        path.skip();
+      }
+    };
+  }
+}
+
+module.exports = {
+  VariableOrObjectMutator: VariableOrObjectMutator,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/package-lock.json b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/package-lock.json
new file mode 100644
index 0000000..85eb89d
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/package-lock.json
@@ -0,0 +1,3556 @@
+{
+  "name": "ochang_js_fuzzer",
+  "version": "1.0.0",
+  "lockfileVersion": 1,
+  "requires": true,
+  "dependencies": {
+    "@babel/code-frame": {
+      "version": "7.0.0",
+      "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.0.0.tgz",
+      "integrity": "sha512-OfC2uemaknXr87bdLUkWog7nYuliM9Ij5HUcajsVcMCpQrcLmtxRbVFTIqmcSkSeYRBFBRxs2FiUqFJDLdiebA==",
+      "requires": {
+        "@babel/highlight": "^7.0.0"
+      }
+    },
+    "@babel/generator": {
+      "version": "7.1.3",
+      "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.1.3.tgz",
+      "integrity": "sha512-ZoCZGcfIJFJuZBqxcY9OjC1KW2lWK64qrX1o4UYL3yshVhwKFYgzpWZ0vvtGMNJdTlvkw0W+HR1VnYN8q3QPFQ==",
+      "requires": {
+        "@babel/types": "^7.1.3",
+        "jsesc": "^2.5.1",
+        "lodash": "^4.17.10",
+        "source-map": "^0.5.0",
+        "trim-right": "^1.0.1"
+      }
+    },
+    "@babel/helper-function-name": {
+      "version": "7.1.0",
+      "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.1.0.tgz",
+      "integrity": "sha512-A95XEoCpb3TO+KZzJ4S/5uW5fNe26DjBGqf1o9ucyLyCmi1dXq/B3c8iaWTfBk3VvetUxl16e8tIrd5teOCfGw==",
+      "requires": {
+        "@babel/helper-get-function-arity": "^7.0.0",
+        "@babel/template": "^7.1.0",
+        "@babel/types": "^7.0.0"
+      }
+    },
+    "@babel/helper-get-function-arity": {
+      "version": "7.0.0",
+      "resolved": "https://registry.npmjs.org/@babel/helper-get-function-arity/-/helper-get-function-arity-7.0.0.tgz",
+      "integrity": "sha512-r2DbJeg4svYvt3HOS74U4eWKsUAMRH01Z1ds1zx8KNTPtpTL5JAsdFv8BNyOpVqdFhHkkRDIg5B4AsxmkjAlmQ==",
+      "requires": {
+        "@babel/types": "^7.0.0"
+      }
+    },
+    "@babel/helper-split-export-declaration": {
+      "version": "7.0.0",
+      "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.0.0.tgz",
+      "integrity": "sha512-MXkOJqva62dfC0w85mEf/LucPPS/1+04nmmRMPEBUB++hiiThQ2zPtX/mEWQ3mtzCEjIJvPY8nuwxXtQeQwUag==",
+      "requires": {
+        "@babel/types": "^7.0.0"
+      }
+    },
+    "@babel/highlight": {
+      "version": "7.0.0",
+      "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.0.0.tgz",
+      "integrity": "sha512-UFMC4ZeFC48Tpvj7C8UgLvtkaUuovQX+5xNWrsIoMG8o2z+XFKjKaN9iVmS84dPwVN00W4wPmqvYoZF3EGAsfw==",
+      "requires": {
+        "chalk": "^2.0.0",
+        "esutils": "^2.0.2",
+        "js-tokens": "^4.0.0"
+      }
+    },
+    "@babel/parser": {
+      "version": "7.1.3",
+      "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.1.3.tgz",
+      "integrity": "sha512-gqmspPZOMW3MIRb9HlrnbZHXI1/KHTOroBwN1NcLL6pWxzqzEKGvRTq0W/PxS45OtQGbaFikSQpkS5zbnsQm2w=="
+    },
+    "@babel/template": {
+      "version": "7.1.2",
+      "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.1.2.tgz",
+      "integrity": "sha512-SY1MmplssORfFiLDcOETrW7fCLl+PavlwMh92rrGcikQaRq4iWPVH0MpwPpY3etVMx6RnDjXtr6VZYr/IbP/Ag==",
+      "requires": {
+        "@babel/code-frame": "^7.0.0",
+        "@babel/parser": "^7.1.2",
+        "@babel/types": "^7.1.2"
+      }
+    },
+    "@babel/traverse": {
+      "version": "7.1.4",
+      "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.1.4.tgz",
+      "integrity": "sha512-my9mdrAIGdDiSVBuMjpn/oXYpva0/EZwWL3sm3Wcy/AVWO2eXnsoZruOT9jOGNRXU8KbCIu5zsKnXcAJ6PcV6Q==",
+      "requires": {
+        "@babel/code-frame": "^7.0.0",
+        "@babel/generator": "^7.1.3",
+        "@babel/helper-function-name": "^7.1.0",
+        "@babel/helper-split-export-declaration": "^7.0.0",
+        "@babel/parser": "^7.1.3",
+        "@babel/types": "^7.1.3",
+        "debug": "^3.1.0",
+        "globals": "^11.1.0",
+        "lodash": "^4.17.10"
+      },
+      "dependencies": {
+        "debug": {
+          "version": "3.2.6",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.6.tgz",
+          "integrity": "sha512-mel+jf7nrtEl5Pn1Qx46zARXKDpBbvzezse7p7LqINmdoIk8PYP5SySaxEmYv6TZ0JyEKA1hsCId6DIhgITtWQ==",
+          "requires": {
+            "ms": "^2.1.1"
+          }
+        },
+        "globals": {
+          "version": "11.8.0",
+          "resolved": "https://registry.npmjs.org/globals/-/globals-11.8.0.tgz",
+          "integrity": "sha512-io6LkyPVuzCHBSQV9fmOwxZkUk6nIaGmxheLDgmuFv89j0fm2aqDbIXKAGfzCMHqz3HLF2Zf8WSG6VqMh2qFmA=="
+        },
+        "ms": {
+          "version": "2.1.1",
+          "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.1.tgz",
+          "integrity": "sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg=="
+        }
+      }
+    },
+    "@babel/types": {
+      "version": "7.1.3",
+      "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.1.3.tgz",
+      "integrity": "sha512-RpPOVfK+yatXyn8n4PB1NW6k9qjinrXrRR8ugBN8fD6hCy5RXI6PSbVqpOJBO9oSaY7Nom4ohj35feb0UR9hSA==",
+      "requires": {
+        "esutils": "^2.0.2",
+        "lodash": "^4.17.10",
+        "to-fast-properties": "^2.0.0"
+      }
+    },
+    "@mrmlnc/readdir-enhanced": {
+      "version": "2.2.1",
+      "resolved": "https://registry.npmjs.org/@mrmlnc/readdir-enhanced/-/readdir-enhanced-2.2.1.tgz",
+      "integrity": "sha512-bPHp6Ji8b41szTOcaP63VlnbbO5Ny6dwAATtY6JTjh5N2OLrb5Qk/Th5cRkRQhkWCt+EJsYrNB0MiL+Gpn6e3g==",
+      "dev": true,
+      "requires": {
+        "call-me-maybe": "^1.0.1",
+        "glob-to-regexp": "^0.3.0"
+      }
+    },
+    "@nodelib/fs.stat": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-1.1.2.tgz",
+      "integrity": "sha512-yprFYuno9FtNsSHVlSWd+nRlmGoAbqbeCwOryP6sC/zoCjhpArcRMYp19EvpSUSizJAlsXEwJv+wcWS9XaXdMw==",
+      "dev": true
+    },
+    "@types/color-name": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/@types/color-name/-/color-name-1.1.1.tgz",
+      "integrity": "sha512-rr+OQyAjxze7GgWrSaJwydHStIhHq2lvY3BOC2Mj7KnzI7XK0Uw1TOOdI9lDoajEbSWLiYgoo4f1R51erQfhPQ==",
+      "dev": true
+    },
+    "acorn": {
+      "version": "7.1.1",
+      "resolved": "https://registry.npmjs.org/acorn/-/acorn-7.1.1.tgz",
+      "integrity": "sha512-add7dgA5ppRPxCFJoAGfMDi7PIBXq1RtGo7BhbLaxwrXPOmw8gq48Y9ozT01hUKy9byMjlR20EJhu5zlkErEkg==",
+      "dev": true
+    },
+    "acorn-jsx": {
+      "version": "5.2.0",
+      "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.2.0.tgz",
+      "integrity": "sha512-HiUX/+K2YpkpJ+SzBffkM/AQ2YE03S0U1kjTLVpoJdhZMOWy8qvXVN9JdLqv2QsaQ6MPYQIuNmwD8zOiYUofLQ==",
+      "dev": true
+    },
+    "ajv": {
+      "version": "5.5.2",
+      "resolved": "https://registry.npmjs.org/ajv/-/ajv-5.5.2.tgz",
+      "integrity": "sha1-c7Xuyj+rZT49P5Qis0GtQiBdyWU=",
+      "dev": true,
+      "requires": {
+        "co": "^4.6.0",
+        "fast-deep-equal": "^1.0.0",
+        "fast-json-stable-stringify": "^2.0.0",
+        "json-schema-traverse": "^0.3.0"
+      }
+    },
+    "ansi-escapes": {
+      "version": "4.3.1",
+      "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.1.tgz",
+      "integrity": "sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA==",
+      "dev": true,
+      "requires": {
+        "type-fest": "^0.11.0"
+      },
+      "dependencies": {
+        "type-fest": {
+          "version": "0.11.0",
+          "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.11.0.tgz",
+          "integrity": "sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ==",
+          "dev": true
+        }
+      }
+    },
+    "ansi-regex": {
+      "version": "5.0.0",
+      "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz",
+      "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==",
+      "dev": true
+    },
+    "ansi-styles": {
+      "version": "3.2.1",
+      "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+      "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+      "requires": {
+        "color-convert": "^1.9.0"
+      }
+    },
+    "argparse": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+      "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+      "dev": true,
+      "requires": {
+        "sprintf-js": "~1.0.2"
+      }
+    },
+    "arr-diff": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz",
+      "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA=",
+      "dev": true
+    },
+    "arr-flatten": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
+      "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg==",
+      "dev": true
+    },
+    "arr-union": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz",
+      "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ=",
+      "dev": true
+    },
+    "array-union": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/array-union/-/array-union-1.0.2.tgz",
+      "integrity": "sha1-mjRBDk9OPaI96jdb5b5w8kd47Dk=",
+      "dev": true,
+      "requires": {
+        "array-uniq": "^1.0.1"
+      }
+    },
+    "array-uniq": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/array-uniq/-/array-uniq-1.0.3.tgz",
+      "integrity": "sha1-r2rId6Jcx/dOBYiUdThY39sk/bY=",
+      "dev": true
+    },
+    "array-unique": {
+      "version": "0.3.2",
+      "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz",
+      "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg=",
+      "dev": true
+    },
+    "arrify": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz",
+      "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=",
+      "dev": true
+    },
+    "asn1": {
+      "version": "0.2.4",
+      "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz",
+      "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==",
+      "dev": true,
+      "requires": {
+        "safer-buffer": "~2.1.0"
+      }
+    },
+    "assert-plus": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz",
+      "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=",
+      "dev": true
+    },
+    "assign-symbols": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz",
+      "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c=",
+      "dev": true
+    },
+    "astral-regex": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/astral-regex/-/astral-regex-1.0.0.tgz",
+      "integrity": "sha512-+Ryf6g3BKoRc7jfp7ad8tM4TtMiaWvbF/1/sQcZPkkS7ag3D5nMBCe2UfOTONtAkaG0tO0ij3C5Lwmf1EiyjHg==",
+      "dev": true
+    },
+    "asynckit": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
+      "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=",
+      "dev": true
+    },
+    "atob": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz",
+      "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==",
+      "dev": true
+    },
+    "aws-sign2": {
+      "version": "0.7.0",
+      "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz",
+      "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=",
+      "dev": true
+    },
+    "aws4": {
+      "version": "1.8.0",
+      "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.8.0.tgz",
+      "integrity": "sha512-ReZxvNHIOv88FlT7rxcXIIC0fPt4KZqZbOlivyWtXLt8ESx84zd3kMC6iK5jVeS2qt+g7ftS7ye4fi06X5rtRQ==",
+      "dev": true
+    },
+    "babel-runtime": {
+      "version": "6.26.0",
+      "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz",
+      "integrity": "sha1-llxwWGaOgrVde/4E/yM3vItWR/4=",
+      "dev": true,
+      "requires": {
+        "core-js": "^2.4.0",
+        "regenerator-runtime": "^0.11.0"
+      }
+    },
+    "balanced-match": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
+      "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=",
+      "dev": true
+    },
+    "base": {
+      "version": "0.11.2",
+      "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz",
+      "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==",
+      "dev": true,
+      "requires": {
+        "cache-base": "^1.0.1",
+        "class-utils": "^0.3.5",
+        "component-emitter": "^1.2.1",
+        "define-property": "^1.0.0",
+        "isobject": "^3.0.1",
+        "mixin-deep": "^1.2.0",
+        "pascalcase": "^0.1.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+          "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+          "dev": true,
+          "requires": {
+            "is-descriptor": "^1.0.0"
+          }
+        },
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "dev": true,
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "dev": true,
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "dev": true,
+          "requires": {
+            "is-accessor-descriptor": "^1.0.0",
+            "is-data-descriptor": "^1.0.0",
+            "kind-of": "^6.0.2"
+          }
+        }
+      }
+    },
+    "bcrypt-pbkdf": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
+      "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=",
+      "dev": true,
+      "requires": {
+        "tweetnacl": "^0.14.3"
+      }
+    },
+    "boom": {
+      "version": "4.3.1",
+      "resolved": "https://registry.npmjs.org/boom/-/boom-4.3.1.tgz",
+      "integrity": "sha1-T4owBctKfjiJ90kDD9JbluAdLjE=",
+      "dev": true,
+      "requires": {
+        "hoek": "4.x.x"
+      }
+    },
+    "brace-expansion": {
+      "version": "1.1.8",
+      "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.8.tgz",
+      "integrity": "sha1-wHshHHyVLsH479Uad+8NHTmQopI=",
+      "dev": true,
+      "requires": {
+        "balanced-match": "^1.0.0",
+        "concat-map": "0.0.1"
+      }
+    },
+    "braces": {
+      "version": "2.3.2",
+      "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz",
+      "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==",
+      "dev": true,
+      "requires": {
+        "arr-flatten": "^1.1.0",
+        "array-unique": "^0.3.2",
+        "extend-shallow": "^2.0.1",
+        "fill-range": "^4.0.0",
+        "isobject": "^3.0.1",
+        "repeat-element": "^1.1.2",
+        "snapdragon": "^0.8.1",
+        "snapdragon-node": "^2.0.1",
+        "split-string": "^3.0.2",
+        "to-regex": "^3.0.1"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "dev": true,
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        }
+      }
+    },
+    "browser-stdout": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.0.tgz",
+      "integrity": "sha1-81HTKWnTL6XXpVZxVCY9korjvR8=",
+      "dev": true
+    },
+    "byline": {
+      "version": "5.0.0",
+      "resolved": "https://registry.npmjs.org/byline/-/byline-5.0.0.tgz",
+      "integrity": "sha1-dBxSFkaOrcRXsDQQEYrXfejB3bE=",
+      "dev": true
+    },
+    "cache-base": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz",
+      "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==",
+      "dev": true,
+      "requires": {
+        "collection-visit": "^1.0.0",
+        "component-emitter": "^1.2.1",
+        "get-value": "^2.0.6",
+        "has-value": "^1.0.0",
+        "isobject": "^3.0.1",
+        "set-value": "^2.0.0",
+        "to-object-path": "^0.3.0",
+        "union-value": "^1.0.0",
+        "unset-value": "^1.0.0"
+      }
+    },
+    "call-me-maybe": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.1.tgz",
+      "integrity": "sha1-JtII6onje1y95gJQoV8DHBak1ms=",
+      "dev": true
+    },
+    "callsites": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+      "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+      "dev": true
+    },
+    "caseless": {
+      "version": "0.12.0",
+      "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz",
+      "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=",
+      "dev": true
+    },
+    "chalk": {
+      "version": "2.4.1",
+      "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz",
+      "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==",
+      "requires": {
+        "ansi-styles": "^3.2.1",
+        "escape-string-regexp": "^1.0.5",
+        "supports-color": "^5.3.0"
+      },
+      "dependencies": {
+        "has-flag": {
+          "version": "3.0.0",
+          "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+          "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0="
+        },
+        "supports-color": {
+          "version": "5.5.0",
+          "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+          "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+          "requires": {
+            "has-flag": "^3.0.0"
+          }
+        }
+      }
+    },
+    "chardet": {
+      "version": "0.7.0",
+      "resolved": "https://registry.npmjs.org/chardet/-/chardet-0.7.0.tgz",
+      "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==",
+      "dev": true
+    },
+    "class-utils": {
+      "version": "0.3.6",
+      "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz",
+      "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==",
+      "dev": true,
+      "requires": {
+        "arr-union": "^3.1.0",
+        "define-property": "^0.2.5",
+        "isobject": "^3.0.0",
+        "static-extend": "^0.1.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "dev": true,
+          "requires": {
+            "is-descriptor": "^0.1.0"
+          }
+        }
+      }
+    },
+    "cli-cursor": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-3.1.0.tgz",
+      "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==",
+      "dev": true,
+      "requires": {
+        "restore-cursor": "^3.1.0"
+      }
+    },
+    "cli-width": {
+      "version": "2.2.0",
+      "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-2.2.0.tgz",
+      "integrity": "sha1-/xnt6Kml5XkyQUewwR8PvLq+1jk=",
+      "dev": true
+    },
+    "co": {
+      "version": "4.6.0",
+      "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
+      "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=",
+      "dev": true
+    },
+    "collection-visit": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
+      "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=",
+      "dev": true,
+      "requires": {
+        "map-visit": "^1.0.0",
+        "object-visit": "^1.0.0"
+      }
+    },
+    "color-convert": {
+      "version": "1.9.0",
+      "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.0.tgz",
+      "integrity": "sha1-Gsz5fdc5uYO/mU1W/sj5WFNkG3o=",
+      "requires": {
+        "color-name": "^1.1.1"
+      }
+    },
+    "color-name": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+      "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU="
+    },
+    "combined-stream": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.7.tgz",
+      "integrity": "sha512-brWl9y6vOB1xYPZcpZde3N9zDByXTosAeMDo4p1wzo6UMOX4vumB+TP1RZ76sfE6Md68Q0NJSrE/gbezd4Ul+w==",
+      "dev": true,
+      "requires": {
+        "delayed-stream": "~1.0.0"
+      }
+    },
+    "commander": {
+      "version": "2.11.0",
+      "resolved": "https://registry.npmjs.org/commander/-/commander-2.11.0.tgz",
+      "integrity": "sha512-b0553uYA5YAEGgyYIGYROzKQ7X5RAqedkfjiZxwi0kL1g3bOaBNNZfYkzt/CL0umgD5wc9Jec2FbB98CjkMRvQ=="
+    },
+    "component-emitter": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz",
+      "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY=",
+      "dev": true
+    },
+    "concat-map": {
+      "version": "0.0.1",
+      "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+      "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=",
+      "dev": true
+    },
+    "copy-descriptor": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz",
+      "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40=",
+      "dev": true
+    },
+    "core-js": {
+      "version": "2.5.7",
+      "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.5.7.tgz",
+      "integrity": "sha512-RszJCAxg/PP6uzXVXL6BsxSXx/B05oJAQ2vkJRjyjrEcNVycaqOmNb5OTxZPE3xa5gwZduqza6L9JOCenh/Ecw==",
+      "dev": true
+    },
+    "core-util-is": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
+      "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=",
+      "dev": true
+    },
+    "cross-spawn": {
+      "version": "6.0.5",
+      "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-6.0.5.tgz",
+      "integrity": "sha512-eTVLrBSt7fjbDygz805pMnstIs2VTBNkRm0qxZd+M7A5XDdxVRWO5MxGBXZhjY4cqLYLdtrGqRf8mBPmzwSpWQ==",
+      "dev": true,
+      "requires": {
+        "nice-try": "^1.0.4",
+        "path-key": "^2.0.1",
+        "semver": "^5.5.0",
+        "shebang-command": "^1.2.0",
+        "which": "^1.2.9"
+      }
+    },
+    "cryptiles": {
+      "version": "3.1.2",
+      "resolved": "https://registry.npmjs.org/cryptiles/-/cryptiles-3.1.2.tgz",
+      "integrity": "sha1-qJ+7Ig9c4l7FboxKqKT9e1sNKf4=",
+      "dev": true,
+      "requires": {
+        "boom": "5.x.x"
+      },
+      "dependencies": {
+        "boom": {
+          "version": "5.2.0",
+          "resolved": "https://registry.npmjs.org/boom/-/boom-5.2.0.tgz",
+          "integrity": "sha512-Z5BTk6ZRe4tXXQlkqftmsAUANpXmuwlsF5Oov8ThoMbQRzdGTA1ngYRW160GexgOgjsFOKJz0LYhoNi+2AMBUw==",
+          "dev": true,
+          "requires": {
+            "hoek": "4.x.x"
+          }
+        }
+      }
+    },
+    "crypto-random-string": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz",
+      "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA=="
+    },
+    "dashdash": {
+      "version": "1.14.1",
+      "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz",
+      "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=",
+      "dev": true,
+      "requires": {
+        "assert-plus": "^1.0.0"
+      }
+    },
+    "debug": {
+      "version": "2.6.8",
+      "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.8.tgz",
+      "integrity": "sha1-5zFTHKLt4n0YgiJCfaF4IdaP9Pw=",
+      "dev": true,
+      "requires": {
+        "ms": "2.0.0"
+      }
+    },
+    "decode-uri-component": {
+      "version": "0.2.0",
+      "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz",
+      "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU=",
+      "dev": true
+    },
+    "deep-is": {
+      "version": "0.1.3",
+      "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.3.tgz",
+      "integrity": "sha1-s2nW+128E+7PUk+RsHD+7cNXzzQ=",
+      "dev": true
+    },
+    "define-property": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+      "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+      "dev": true,
+      "requires": {
+        "is-descriptor": "^1.0.2",
+        "isobject": "^3.0.1"
+      },
+      "dependencies": {
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "dev": true,
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "dev": true,
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "dev": true,
+          "requires": {
+            "is-accessor-descriptor": "^1.0.0",
+            "is-data-descriptor": "^1.0.0",
+            "kind-of": "^6.0.2"
+          }
+        }
+      }
+    },
+    "delayed-stream": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
+      "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=",
+      "dev": true
+    },
+    "diff": {
+      "version": "3.2.0",
+      "resolved": "https://registry.npmjs.org/diff/-/diff-3.2.0.tgz",
+      "integrity": "sha1-yc45Okt8vQsFinJck98pkCeGj/k=",
+      "dev": true
+    },
+    "dir-glob": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-2.0.0.tgz",
+      "integrity": "sha512-37qirFDz8cA5fimp9feo43fSuRo2gHwaIn6dXL8Ber1dGwUosDrGZeCCXq57WnIqE4aQ+u3eQZzsk1yOzhdwag==",
+      "dev": true,
+      "requires": {
+        "arrify": "^1.0.1",
+        "path-type": "^3.0.0"
+      }
+    },
+    "doctrine": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz",
+      "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==",
+      "dev": true,
+      "requires": {
+        "esutils": "^2.0.2"
+      }
+    },
+    "ecc-jsbn": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz",
+      "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=",
+      "dev": true,
+      "requires": {
+        "jsbn": "~0.1.0",
+        "safer-buffer": "^2.1.0"
+      }
+    },
+    "emoji-regex": {
+      "version": "8.0.0",
+      "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+      "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==",
+      "dev": true
+    },
+    "escape-string-regexp": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+      "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ="
+    },
+    "escodegen": {
+      "version": "1.10.0",
+      "resolved": "https://registry.npmjs.org/escodegen/-/escodegen-1.10.0.tgz",
+      "integrity": "sha512-fjUOf8johsv23WuIKdNQU4P9t9jhQ4Qzx6pC2uW890OloK3Zs1ZAoCNpg/2larNF501jLl3UNy0kIRcF6VI22g==",
+      "dev": true,
+      "requires": {
+        "esprima": "^3.1.3",
+        "estraverse": "^4.2.0",
+        "esutils": "^2.0.2",
+        "optionator": "^0.8.1",
+        "source-map": "~0.6.1"
+      },
+      "dependencies": {
+        "source-map": {
+          "version": "0.6.1",
+          "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+          "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+          "dev": true,
+          "optional": true
+        }
+      }
+    },
+    "eslint": {
+      "version": "6.8.0",
+      "resolved": "https://registry.npmjs.org/eslint/-/eslint-6.8.0.tgz",
+      "integrity": "sha512-K+Iayyo2LtyYhDSYwz5D5QdWw0hCacNzyq1Y821Xna2xSJj7cijoLLYmLxTQgcgZ9mC61nryMy9S7GRbYpI5Ig==",
+      "dev": true,
+      "requires": {
+        "@babel/code-frame": "^7.0.0",
+        "ajv": "^6.10.0",
+        "chalk": "^2.1.0",
+        "cross-spawn": "^6.0.5",
+        "debug": "^4.0.1",
+        "doctrine": "^3.0.0",
+        "eslint-scope": "^5.0.0",
+        "eslint-utils": "^1.4.3",
+        "eslint-visitor-keys": "^1.1.0",
+        "espree": "^6.1.2",
+        "esquery": "^1.0.1",
+        "esutils": "^2.0.2",
+        "file-entry-cache": "^5.0.1",
+        "functional-red-black-tree": "^1.0.1",
+        "glob-parent": "^5.0.0",
+        "globals": "^12.1.0",
+        "ignore": "^4.0.6",
+        "import-fresh": "^3.0.0",
+        "imurmurhash": "^0.1.4",
+        "inquirer": "^7.0.0",
+        "is-glob": "^4.0.0",
+        "js-yaml": "^3.13.1",
+        "json-stable-stringify-without-jsonify": "^1.0.1",
+        "levn": "^0.3.0",
+        "lodash": "^4.17.14",
+        "minimatch": "^3.0.4",
+        "mkdirp": "^0.5.1",
+        "natural-compare": "^1.4.0",
+        "optionator": "^0.8.3",
+        "progress": "^2.0.0",
+        "regexpp": "^2.0.1",
+        "semver": "^6.1.2",
+        "strip-ansi": "^5.2.0",
+        "strip-json-comments": "^3.0.1",
+        "table": "^5.2.3",
+        "text-table": "^0.2.0",
+        "v8-compile-cache": "^2.0.3"
+      },
+      "dependencies": {
+        "ajv": {
+          "version": "6.12.0",
+          "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.0.tgz",
+          "integrity": "sha512-D6gFiFA0RRLyUbvijN74DWAjXSFxWKaWP7mldxkVhyhAV3+SWA9HEJPHQ2c9soIeTFJqcSdFDGFgdqs1iUU2Hw==",
+          "dev": true,
+          "requires": {
+            "fast-deep-equal": "^3.1.1",
+            "fast-json-stable-stringify": "^2.0.0",
+            "json-schema-traverse": "^0.4.1",
+            "uri-js": "^4.2.2"
+          }
+        },
+        "debug": {
+          "version": "4.1.1",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-4.1.1.tgz",
+          "integrity": "sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==",
+          "dev": true,
+          "requires": {
+            "ms": "^2.1.1"
+          }
+        },
+        "fast-deep-equal": {
+          "version": "3.1.1",
+          "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz",
+          "integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==",
+          "dev": true
+        },
+        "glob-parent": {
+          "version": "5.1.1",
+          "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.1.tgz",
+          "integrity": "sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ==",
+          "dev": true,
+          "requires": {
+            "is-glob": "^4.0.1"
+          },
+          "dependencies": {
+            "is-glob": {
+              "version": "4.0.1",
+              "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.1.tgz",
+              "integrity": "sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg==",
+              "dev": true,
+              "requires": {
+                "is-extglob": "^2.1.1"
+              }
+            }
+          }
+        },
+        "globals": {
+          "version": "12.4.0",
+          "resolved": "https://registry.npmjs.org/globals/-/globals-12.4.0.tgz",
+          "integrity": "sha512-BWICuzzDvDoH54NHKCseDanAhE3CeDorgDL5MT6LMXXj2WCnd9UC2szdk4AWLfjdgNBCXLUanXYcpBBKOSWGwg==",
+          "dev": true,
+          "requires": {
+            "type-fest": "^0.8.1"
+          }
+        },
+        "ignore": {
+          "version": "4.0.6",
+          "resolved": "https://registry.npmjs.org/ignore/-/ignore-4.0.6.tgz",
+          "integrity": "sha512-cyFDKrqc/YdcWFniJhzI42+AzS+gNwmUzOSFcRCQYwySuBBBy/KjuxWLZ/FHEH6Moq1NizMOBWyTcv8O4OZIMg==",
+          "dev": true
+        },
+        "json-schema-traverse": {
+          "version": "0.4.1",
+          "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+          "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
+          "dev": true
+        },
+        "lodash": {
+          "version": "4.17.15",
+          "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
+          "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==",
+          "dev": true
+        },
+        "ms": {
+          "version": "2.1.2",
+          "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+          "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==",
+          "dev": true
+        },
+        "optionator": {
+          "version": "0.8.3",
+          "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz",
+          "integrity": "sha512-+IW9pACdk3XWmmTXG8m3upGUJst5XRGzxMRjXzAuJ1XnIFNvfhjjIuYkDvysnPQ7qzqVzLt78BCruntqRhWQbA==",
+          "dev": true,
+          "requires": {
+            "deep-is": "~0.1.3",
+            "fast-levenshtein": "~2.0.6",
+            "levn": "~0.3.0",
+            "prelude-ls": "~1.1.2",
+            "type-check": "~0.3.2",
+            "word-wrap": "~1.2.3"
+          }
+        },
+        "semver": {
+          "version": "6.3.0",
+          "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.0.tgz",
+          "integrity": "sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw==",
+          "dev": true
+        }
+      }
+    },
+    "eslint-scope": {
+      "version": "5.0.0",
+      "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.0.0.tgz",
+      "integrity": "sha512-oYrhJW7S0bxAFDvWqzvMPRm6pcgcnWc4QnofCAqRTRfQC0JcwenzGglTtsLyIuuWFfkqDG9vz67cnttSd53djw==",
+      "dev": true,
+      "requires": {
+        "esrecurse": "^4.1.0",
+        "estraverse": "^4.1.1"
+      }
+    },
+    "eslint-utils": {
+      "version": "1.4.3",
+      "resolved": "https://registry.npmjs.org/eslint-utils/-/eslint-utils-1.4.3.tgz",
+      "integrity": "sha512-fbBN5W2xdY45KulGXmLHZ3c3FHfVYmKg0IrAKGOkT/464PQsx2UeIzfz1RmEci+KLm1bBaAzZAh8+/E+XAeZ8Q==",
+      "dev": true,
+      "requires": {
+        "eslint-visitor-keys": "^1.1.0"
+      }
+    },
+    "eslint-visitor-keys": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-1.1.0.tgz",
+      "integrity": "sha512-8y9YjtM1JBJU/A9Kc+SbaOV4y29sSWckBwMHa+FGtVj5gN/sbnKDf6xJUl+8g7FAij9LVaP8C24DUiH/f/2Z9A==",
+      "dev": true
+    },
+    "espree": {
+      "version": "6.2.1",
+      "resolved": "https://registry.npmjs.org/espree/-/espree-6.2.1.tgz",
+      "integrity": "sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==",
+      "dev": true,
+      "requires": {
+        "acorn": "^7.1.1",
+        "acorn-jsx": "^5.2.0",
+        "eslint-visitor-keys": "^1.1.0"
+      }
+    },
+    "esprima": {
+      "version": "3.1.3",
+      "resolved": "https://registry.npmjs.org/esprima/-/esprima-3.1.3.tgz",
+      "integrity": "sha1-/cpRzuYTOJXjyI1TXOSdv/YqRjM=",
+      "dev": true
+    },
+    "esquery": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.2.0.tgz",
+      "integrity": "sha512-weltsSqdeWIX9G2qQZz7KlTRJdkkOCTPgLYJUz1Hacf48R4YOwGPHO3+ORfWedqJKbq5WQmsgK90n+pFLIKt/Q==",
+      "dev": true,
+      "requires": {
+        "estraverse": "^5.0.0"
+      },
+      "dependencies": {
+        "estraverse": {
+          "version": "5.0.0",
+          "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.0.0.tgz",
+          "integrity": "sha512-j3acdrMzqrxmJTNj5dbr1YbjacrYgAxVMeF0gK16E3j494mOe7xygM/ZLIguEQ0ETwAg2hlJCtHRGav+y0Ny5A==",
+          "dev": true
+        }
+      }
+    },
+    "esrecurse": {
+      "version": "4.2.1",
+      "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.2.1.tgz",
+      "integrity": "sha512-64RBB++fIOAXPw3P9cy89qfMlvZEXZkqqJkjqqXIvzP5ezRZjW+lPWjw35UX/3EhUPFYbg5ER4JYgDw4007/DQ==",
+      "dev": true,
+      "requires": {
+        "estraverse": "^4.1.0"
+      }
+    },
+    "estraverse": {
+      "version": "4.2.0",
+      "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.2.0.tgz",
+      "integrity": "sha1-De4/7TH81GlhjOc0IJn8GvoL2xM=",
+      "dev": true
+    },
+    "esutils": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.2.tgz",
+      "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs="
+    },
+    "expand-brackets": {
+      "version": "2.1.4",
+      "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz",
+      "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=",
+      "dev": true,
+      "requires": {
+        "debug": "^2.3.3",
+        "define-property": "^0.2.5",
+        "extend-shallow": "^2.0.1",
+        "posix-character-classes": "^0.1.0",
+        "regex-not": "^1.0.0",
+        "snapdragon": "^0.8.1",
+        "to-regex": "^3.0.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "dev": true,
+          "requires": {
+            "is-descriptor": "^0.1.0"
+          }
+        },
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "dev": true,
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        }
+      }
+    },
+    "expand-template": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-1.1.1.tgz",
+      "integrity": "sha512-cebqLtV8KOZfw0UI8TEFWxtczxxC1jvyUvx6H4fyp1K1FN7A4Q+uggVUlOsI1K8AGU0rwOGqP8nCapdrw8CYQg==",
+      "dev": true
+    },
+    "extend": {
+      "version": "3.0.2",
+      "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+      "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+      "dev": true
+    },
+    "extend-shallow": {
+      "version": "3.0.2",
+      "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+      "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+      "dev": true,
+      "requires": {
+        "assign-symbols": "^1.0.0",
+        "is-extendable": "^1.0.1"
+      },
+      "dependencies": {
+        "is-extendable": {
+          "version": "1.0.1",
+          "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+          "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+          "dev": true,
+          "requires": {
+            "is-plain-object": "^2.0.4"
+          }
+        }
+      }
+    },
+    "external-editor": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/external-editor/-/external-editor-3.1.0.tgz",
+      "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==",
+      "dev": true,
+      "requires": {
+        "chardet": "^0.7.0",
+        "iconv-lite": "^0.4.24",
+        "tmp": "^0.0.33"
+      }
+    },
+    "extglob": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz",
+      "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==",
+      "dev": true,
+      "requires": {
+        "array-unique": "^0.3.2",
+        "define-property": "^1.0.0",
+        "expand-brackets": "^2.1.4",
+        "extend-shallow": "^2.0.1",
+        "fragment-cache": "^0.2.1",
+        "regex-not": "^1.0.0",
+        "snapdragon": "^0.8.1",
+        "to-regex": "^3.0.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+          "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+          "dev": true,
+          "requires": {
+            "is-descriptor": "^1.0.0"
+          }
+        },
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "dev": true,
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        },
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "dev": true,
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "dev": true,
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "dev": true,
+          "requires": {
+            "is-accessor-descriptor": "^1.0.0",
+            "is-data-descriptor": "^1.0.0",
+            "kind-of": "^6.0.2"
+          }
+        }
+      }
+    },
+    "extsprintf": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz",
+      "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=",
+      "dev": true
+    },
+    "fast-deep-equal": {
+      "version": "1.1.0",
+      "resolved": "http://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-1.1.0.tgz",
+      "integrity": "sha1-wFNHeBfIa1HaqFPIHgWbcz0CNhQ=",
+      "dev": true
+    },
+    "fast-glob": {
+      "version": "2.2.3",
+      "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-2.2.3.tgz",
+      "integrity": "sha512-NiX+JXjnx43RzvVFwRWfPKo4U+1BrK5pJPsHQdKMlLoFHrrGktXglQhHliSihWAq+m1z6fHk3uwGHrtRbS9vLA==",
+      "dev": true,
+      "requires": {
+        "@mrmlnc/readdir-enhanced": "^2.2.1",
+        "@nodelib/fs.stat": "^1.0.1",
+        "glob-parent": "^3.1.0",
+        "is-glob": "^4.0.0",
+        "merge2": "^1.2.1",
+        "micromatch": "^3.1.10"
+      }
+    },
+    "fast-json-stable-stringify": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.0.0.tgz",
+      "integrity": "sha1-1RQsDK7msRifh9OnYREGT4bIu/I=",
+      "dev": true
+    },
+    "fast-levenshtein": {
+      "version": "2.0.6",
+      "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz",
+      "integrity": "sha1-PYpcZog6FqMMqGQ+hR8Zuqd5eRc=",
+      "dev": true
+    },
+    "figures": {
+      "version": "3.2.0",
+      "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz",
+      "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==",
+      "dev": true,
+      "requires": {
+        "escape-string-regexp": "^1.0.5"
+      }
+    },
+    "file-entry-cache": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-5.0.1.tgz",
+      "integrity": "sha512-bCg29ictuBaKUwwArK4ouCaqDgLZcysCFLmM/Yn/FDoqndh/9vNuQfXRDvTuXKLxfD/JtZQGKFT8MGcJBK644g==",
+      "dev": true,
+      "requires": {
+        "flat-cache": "^2.0.1"
+      }
+    },
+    "fill-range": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz",
+      "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=",
+      "dev": true,
+      "requires": {
+        "extend-shallow": "^2.0.1",
+        "is-number": "^3.0.0",
+        "repeat-string": "^1.6.1",
+        "to-regex-range": "^2.1.0"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "dev": true,
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        }
+      }
+    },
+    "flat-cache": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-2.0.1.tgz",
+      "integrity": "sha512-LoQe6yDuUMDzQAEH8sgmh4Md6oZnc/7PjtwjNFSzveXqSHt6ka9fPBuso7IGf9Rz4uqnSnWiFH2B/zj24a5ReA==",
+      "dev": true,
+      "requires": {
+        "flatted": "^2.0.0",
+        "rimraf": "2.6.3",
+        "write": "1.0.3"
+      }
+    },
+    "flatted": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/flatted/-/flatted-2.0.2.tgz",
+      "integrity": "sha512-r5wGx7YeOwNWNlCA0wQ86zKyDLMQr+/RB8xy74M4hTphfmjlijTSSXGuH8rnvKZnfT9i+75zmd8jcKdMR4O6jA==",
+      "dev": true
+    },
+    "for-in": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
+      "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA=",
+      "dev": true
+    },
+    "forever-agent": {
+      "version": "0.6.1",
+      "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz",
+      "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=",
+      "dev": true
+    },
+    "form-data": {
+      "version": "2.3.3",
+      "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz",
+      "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==",
+      "dev": true,
+      "requires": {
+        "asynckit": "^0.4.0",
+        "combined-stream": "^1.0.6",
+        "mime-types": "^2.1.12"
+      }
+    },
+    "formatio": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/formatio/-/formatio-1.2.0.tgz",
+      "integrity": "sha1-87IWfZBoxGmKjVH092CjmlTYGOs=",
+      "dev": true,
+      "requires": {
+        "samsam": "1.x"
+      }
+    },
+    "fragment-cache": {
+      "version": "0.2.1",
+      "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
+      "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=",
+      "dev": true,
+      "requires": {
+        "map-cache": "^0.2.2"
+      }
+    },
+    "from2": {
+      "version": "2.3.0",
+      "resolved": "https://registry.npmjs.org/from2/-/from2-2.3.0.tgz",
+      "integrity": "sha1-i/tVAr3kpNNs/e6gB/zKIdfjgq8=",
+      "dev": true,
+      "requires": {
+        "inherits": "^2.0.1",
+        "readable-stream": "^2.0.0"
+      }
+    },
+    "fs-extra": {
+      "version": "6.0.1",
+      "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-6.0.1.tgz",
+      "integrity": "sha512-GnyIkKhhzXZUWFCaJzvyDLEEgDkPfb4/TPvJCJVuS8MWZgoSsErf++QpiAlDnKFcqhRlm+tIOcencCjyJE6ZCA==",
+      "dev": true,
+      "requires": {
+        "graceful-fs": "^4.1.2",
+        "jsonfile": "^4.0.0",
+        "universalify": "^0.1.0"
+      }
+    },
+    "fs.realpath": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+      "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=",
+      "dev": true
+    },
+    "functional-red-black-tree": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/functional-red-black-tree/-/functional-red-black-tree-1.0.1.tgz",
+      "integrity": "sha1-GwqzvVU7Kg1jmdKcDj6gslIHgyc=",
+      "dev": true
+    },
+    "get-value": {
+      "version": "2.0.6",
+      "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz",
+      "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg=",
+      "dev": true
+    },
+    "getpass": {
+      "version": "0.1.7",
+      "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz",
+      "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=",
+      "dev": true,
+      "requires": {
+        "assert-plus": "^1.0.0"
+      }
+    },
+    "glob": {
+      "version": "7.1.1",
+      "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.1.tgz",
+      "integrity": "sha1-gFIR3wT6rxxjo2ADBs31reULLsg=",
+      "dev": true,
+      "requires": {
+        "fs.realpath": "^1.0.0",
+        "inflight": "^1.0.4",
+        "inherits": "2",
+        "minimatch": "^3.0.2",
+        "once": "^1.3.0",
+        "path-is-absolute": "^1.0.0"
+      }
+    },
+    "glob-parent": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-3.1.0.tgz",
+      "integrity": "sha1-nmr2KZ2NO9K9QEMIMr0RPfkGxa4=",
+      "dev": true,
+      "requires": {
+        "is-glob": "^3.1.0",
+        "path-dirname": "^1.0.0"
+      },
+      "dependencies": {
+        "is-glob": {
+          "version": "3.1.0",
+          "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-3.1.0.tgz",
+          "integrity": "sha1-e6WuJCF4BKxwcHuWkiVnSGzD6Eo=",
+          "dev": true,
+          "requires": {
+            "is-extglob": "^2.1.0"
+          }
+        }
+      }
+    },
+    "glob-to-regexp": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz",
+      "integrity": "sha1-jFoUlNIGbFcMw7/kSWF1rMTVAqs=",
+      "dev": true
+    },
+    "globals": {
+      "version": "10.1.0",
+      "resolved": "https://registry.npmjs.org/globals/-/globals-10.1.0.tgz",
+      "integrity": "sha1-RCWhiBvg0za0qCOoKnvnJdXdmHw="
+    },
+    "globby": {
+      "version": "8.0.1",
+      "resolved": "https://registry.npmjs.org/globby/-/globby-8.0.1.tgz",
+      "integrity": "sha512-oMrYrJERnKBLXNLVTqhm3vPEdJ/b2ZE28xN4YARiix1NOIOBPEpOUnm844K1iu/BkphCaf2WNFwMszv8Soi1pw==",
+      "dev": true,
+      "requires": {
+        "array-union": "^1.0.1",
+        "dir-glob": "^2.0.0",
+        "fast-glob": "^2.0.2",
+        "glob": "^7.1.2",
+        "ignore": "^3.3.5",
+        "pify": "^3.0.0",
+        "slash": "^1.0.0"
+      },
+      "dependencies": {
+        "glob": {
+          "version": "7.1.3",
+          "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.3.tgz",
+          "integrity": "sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==",
+          "dev": true,
+          "requires": {
+            "fs.realpath": "^1.0.0",
+            "inflight": "^1.0.4",
+            "inherits": "2",
+            "minimatch": "^3.0.4",
+            "once": "^1.3.0",
+            "path-is-absolute": "^1.0.0"
+          }
+        }
+      }
+    },
+    "graceful-fs": {
+      "version": "4.1.11",
+      "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.11.tgz",
+      "integrity": "sha1-Dovf5NHduIVNZOBOp8AOKgJuVlg=",
+      "dev": true
+    },
+    "graceful-readlink": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/graceful-readlink/-/graceful-readlink-1.0.1.tgz",
+      "integrity": "sha1-TK+tdrxi8C+gObL5Tpo906ORpyU=",
+      "dev": true
+    },
+    "growl": {
+      "version": "1.9.2",
+      "resolved": "https://registry.npmjs.org/growl/-/growl-1.9.2.tgz",
+      "integrity": "sha1-Dqd0NxXbjY3ixe3hd14bRayFwC8=",
+      "dev": true
+    },
+    "har-schema": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz",
+      "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=",
+      "dev": true
+    },
+    "har-validator": {
+      "version": "5.0.3",
+      "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.0.3.tgz",
+      "integrity": "sha1-ukAsJmGU8VlW7xXg/PJCmT9qff0=",
+      "dev": true,
+      "requires": {
+        "ajv": "^5.1.0",
+        "har-schema": "^2.0.0"
+      }
+    },
+    "has-flag": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-1.0.0.tgz",
+      "integrity": "sha1-nZ55MWXOAXoA8AQYxD+UKnsdEfo=",
+      "dev": true
+    },
+    "has-value": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz",
+      "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=",
+      "dev": true,
+      "requires": {
+        "get-value": "^2.0.6",
+        "has-values": "^1.0.0",
+        "isobject": "^3.0.0"
+      }
+    },
+    "has-values": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz",
+      "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=",
+      "dev": true,
+      "requires": {
+        "is-number": "^3.0.0",
+        "kind-of": "^4.0.0"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "4.0.0",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz",
+          "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=",
+          "dev": true,
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "hawk": {
+      "version": "6.0.2",
+      "resolved": "https://registry.npmjs.org/hawk/-/hawk-6.0.2.tgz",
+      "integrity": "sha512-miowhl2+U7Qle4vdLqDdPt9m09K6yZhkLDTWGoUiUzrQCn+mHHSmfJgAyGaLRZbPmTqfFFjRV1QWCW0VWUJBbQ==",
+      "dev": true,
+      "requires": {
+        "boom": "4.x.x",
+        "cryptiles": "3.x.x",
+        "hoek": "4.x.x",
+        "sntp": "2.x.x"
+      }
+    },
+    "he": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz",
+      "integrity": "sha1-k0EP0hsAlzUVH4howvJx80J+I/0=",
+      "dev": true
+    },
+    "hoek": {
+      "version": "4.2.1",
+      "resolved": "https://registry.npmjs.org/hoek/-/hoek-4.2.1.tgz",
+      "integrity": "sha512-QLg82fGkfnJ/4iy1xZ81/9SIJiq1NGFUMGs6ParyjBZr6jW2Ufj/snDqTHixNlHdPNwN2RLVD0Pi3igeK9+JfA==",
+      "dev": true
+    },
+    "http-signature": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz",
+      "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=",
+      "dev": true,
+      "requires": {
+        "assert-plus": "^1.0.0",
+        "jsprim": "^1.2.2",
+        "sshpk": "^1.7.0"
+      }
+    },
+    "iconv-lite": {
+      "version": "0.4.24",
+      "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+      "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+      "dev": true,
+      "requires": {
+        "safer-buffer": ">= 2.1.2 < 3"
+      }
+    },
+    "ignore": {
+      "version": "3.3.10",
+      "resolved": "https://registry.npmjs.org/ignore/-/ignore-3.3.10.tgz",
+      "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==",
+      "dev": true
+    },
+    "import-fresh": {
+      "version": "3.2.1",
+      "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.2.1.tgz",
+      "integrity": "sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ==",
+      "dev": true,
+      "requires": {
+        "parent-module": "^1.0.0",
+        "resolve-from": "^4.0.0"
+      }
+    },
+    "imurmurhash": {
+      "version": "0.1.4",
+      "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+      "integrity": "sha1-khi5srkoojixPcT7a21XbyMUU+o=",
+      "dev": true
+    },
+    "in-publish": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/in-publish/-/in-publish-2.0.0.tgz",
+      "integrity": "sha1-4g/146KvwmkDILbcVSaCqcf631E=",
+      "dev": true
+    },
+    "inflight": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+      "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
+      "dev": true,
+      "requires": {
+        "once": "^1.3.0",
+        "wrappy": "1"
+      }
+    },
+    "inherits": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+      "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=",
+      "dev": true
+    },
+    "inquirer": {
+      "version": "7.1.0",
+      "resolved": "https://registry.npmjs.org/inquirer/-/inquirer-7.1.0.tgz",
+      "integrity": "sha512-5fJMWEmikSYu0nv/flMc475MhGbB7TSPd/2IpFV4I4rMklboCH2rQjYY5kKiYGHqUF9gvaambupcJFFG9dvReg==",
+      "dev": true,
+      "requires": {
+        "ansi-escapes": "^4.2.1",
+        "chalk": "^3.0.0",
+        "cli-cursor": "^3.1.0",
+        "cli-width": "^2.0.0",
+        "external-editor": "^3.0.3",
+        "figures": "^3.0.0",
+        "lodash": "^4.17.15",
+        "mute-stream": "0.0.8",
+        "run-async": "^2.4.0",
+        "rxjs": "^6.5.3",
+        "string-width": "^4.1.0",
+        "strip-ansi": "^6.0.0",
+        "through": "^2.3.6"
+      },
+      "dependencies": {
+        "ansi-styles": {
+          "version": "4.2.1",
+          "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.2.1.tgz",
+          "integrity": "sha512-9VGjrMsG1vePxcSweQsN20KY/c4zN0h9fLjqAbwbPfahM3t+NL+M9HC8xeXG2I8pX5NoamTGNuomEUFI7fcUjA==",
+          "dev": true,
+          "requires": {
+            "@types/color-name": "^1.1.1",
+            "color-convert": "^2.0.1"
+          }
+        },
+        "chalk": {
+          "version": "3.0.0",
+          "resolved": "https://registry.npmjs.org/chalk/-/chalk-3.0.0.tgz",
+          "integrity": "sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg==",
+          "dev": true,
+          "requires": {
+            "ansi-styles": "^4.1.0",
+            "supports-color": "^7.1.0"
+          }
+        },
+        "color-convert": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+          "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+          "dev": true,
+          "requires": {
+            "color-name": "~1.1.4"
+          }
+        },
+        "color-name": {
+          "version": "1.1.4",
+          "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+          "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
+          "dev": true
+        },
+        "has-flag": {
+          "version": "4.0.0",
+          "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+          "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+          "dev": true
+        },
+        "lodash": {
+          "version": "4.17.15",
+          "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
+          "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==",
+          "dev": true
+        },
+        "strip-ansi": {
+          "version": "6.0.0",
+          "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
+          "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
+          "dev": true,
+          "requires": {
+            "ansi-regex": "^5.0.0"
+          }
+        },
+        "supports-color": {
+          "version": "7.1.0",
+          "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.1.0.tgz",
+          "integrity": "sha512-oRSIpR8pxT1Wr2FquTNnGet79b3BWljqOuoW/h4oBhxJ/HUbX5nX6JSruTkvXDCFMwDPvsaTTbvMLKZWSy0R5g==",
+          "dev": true,
+          "requires": {
+            "has-flag": "^4.0.0"
+          }
+        }
+      }
+    },
+    "into-stream": {
+      "version": "3.1.0",
+      "resolved": "http://registry.npmjs.org/into-stream/-/into-stream-3.1.0.tgz",
+      "integrity": "sha1-lvsKk2wSur1v8XUqF9BWFqvQlMY=",
+      "dev": true,
+      "requires": {
+        "from2": "^2.1.1",
+        "p-is-promise": "^1.1.0"
+      }
+    },
+    "is-accessor-descriptor": {
+      "version": "0.1.6",
+      "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+      "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+      "dev": true,
+      "requires": {
+        "kind-of": "^3.0.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "dev": true,
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "is-buffer": {
+      "version": "1.1.6",
+      "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
+      "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w==",
+      "dev": true
+    },
+    "is-data-descriptor": {
+      "version": "0.1.4",
+      "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+      "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+      "dev": true,
+      "requires": {
+        "kind-of": "^3.0.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "dev": true,
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "is-descriptor": {
+      "version": "0.1.6",
+      "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+      "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+      "dev": true,
+      "requires": {
+        "is-accessor-descriptor": "^0.1.6",
+        "is-data-descriptor": "^0.1.4",
+        "kind-of": "^5.0.0"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "5.1.0",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+          "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw==",
+          "dev": true
+        }
+      }
+    },
+    "is-extendable": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+      "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik=",
+      "dev": true
+    },
+    "is-extglob": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+      "integrity": "sha1-qIwCU1eR8C7TfHahueqXc8gz+MI=",
+      "dev": true
+    },
+    "is-fullwidth-code-point": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+      "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+      "dev": true
+    },
+    "is-glob": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.0.tgz",
+      "integrity": "sha1-lSHHaEXMJhCoUgPd8ICpWML/q8A=",
+      "dev": true,
+      "requires": {
+        "is-extglob": "^2.1.1"
+      }
+    },
+    "is-number": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+      "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+      "dev": true,
+      "requires": {
+        "kind-of": "^3.0.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "dev": true,
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "is-plain-object": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+      "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+      "dev": true,
+      "requires": {
+        "isobject": "^3.0.1"
+      }
+    },
+    "is-promise": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.1.0.tgz",
+      "integrity": "sha1-eaKp7OfwlugPNtKy87wWwf9L8/o=",
+      "dev": true
+    },
+    "is-stream": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz",
+      "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw=="
+    },
+    "is-typedarray": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
+      "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=",
+      "dev": true
+    },
+    "is-windows": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+      "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==",
+      "dev": true
+    },
+    "isarray": {
+      "version": "0.0.1",
+      "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+      "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+      "dev": true
+    },
+    "isexe": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+      "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=",
+      "dev": true
+    },
+    "isobject": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+      "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8=",
+      "dev": true
+    },
+    "isstream": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz",
+      "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=",
+      "dev": true
+    },
+    "js-tokens": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+      "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
+    },
+    "js-yaml": {
+      "version": "3.13.1",
+      "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz",
+      "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==",
+      "dev": true,
+      "requires": {
+        "argparse": "^1.0.7",
+        "esprima": "^4.0.0"
+      },
+      "dependencies": {
+        "esprima": {
+          "version": "4.0.1",
+          "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+          "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+          "dev": true
+        }
+      }
+    },
+    "jsbn": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz",
+      "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=",
+      "dev": true
+    },
+    "jsesc": {
+      "version": "2.5.1",
+      "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.1.tgz",
+      "integrity": "sha1-5CGiqOINawgZ3yiQj3glJrlt0f4="
+    },
+    "json-schema": {
+      "version": "0.2.3",
+      "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz",
+      "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=",
+      "dev": true
+    },
+    "json-schema-traverse": {
+      "version": "0.3.1",
+      "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.3.1.tgz",
+      "integrity": "sha1-NJptRMU6Ud6JtAgFxdXlm0F9M0A=",
+      "dev": true
+    },
+    "json-stable-stringify-without-jsonify": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz",
+      "integrity": "sha1-nbe1lJatPzz+8wp1FC0tkwrXJlE=",
+      "dev": true
+    },
+    "json-stringify-safe": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
+      "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=",
+      "dev": true
+    },
+    "json3": {
+      "version": "3.3.2",
+      "resolved": "https://registry.npmjs.org/json3/-/json3-3.3.2.tgz",
+      "integrity": "sha1-PAQ0dD35Pi9cQq7nsZvLSDV19OE=",
+      "dev": true
+    },
+    "jsonfile": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
+      "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
+      "dev": true,
+      "requires": {
+        "graceful-fs": "^4.1.6"
+      }
+    },
+    "jsprim": {
+      "version": "1.4.1",
+      "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz",
+      "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=",
+      "dev": true,
+      "requires": {
+        "assert-plus": "1.0.0",
+        "extsprintf": "1.3.0",
+        "json-schema": "0.2.3",
+        "verror": "1.10.0"
+      }
+    },
+    "just-extend": {
+      "version": "1.1.22",
+      "resolved": "https://registry.npmjs.org/just-extend/-/just-extend-1.1.22.tgz",
+      "integrity": "sha1-MzCvdWyralQnAMZLLk5KoGLVL/8=",
+      "dev": true
+    },
+    "kind-of": {
+      "version": "6.0.2",
+      "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz",
+      "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA==",
+      "dev": true
+    },
+    "levn": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/levn/-/levn-0.3.0.tgz",
+      "integrity": "sha1-OwmSTt+fCDwEkP3UwLxEIeBHZO4=",
+      "dev": true,
+      "requires": {
+        "prelude-ls": "~1.1.2",
+        "type-check": "~0.3.2"
+      }
+    },
+    "lodash": {
+      "version": "4.17.11",
+      "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz",
+      "integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg=="
+    },
+    "lodash._baseassign": {
+      "version": "3.2.0",
+      "resolved": "https://registry.npmjs.org/lodash._baseassign/-/lodash._baseassign-3.2.0.tgz",
+      "integrity": "sha1-jDigmVAPIVrQnlnxci/QxSv+Ck4=",
+      "dev": true,
+      "requires": {
+        "lodash._basecopy": "^3.0.0",
+        "lodash.keys": "^3.0.0"
+      }
+    },
+    "lodash._basecopy": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/lodash._basecopy/-/lodash._basecopy-3.0.1.tgz",
+      "integrity": "sha1-jaDmqHbPNEwK2KVIghEd08XHyjY=",
+      "dev": true
+    },
+    "lodash._basecreate": {
+      "version": "3.0.3",
+      "resolved": "https://registry.npmjs.org/lodash._basecreate/-/lodash._basecreate-3.0.3.tgz",
+      "integrity": "sha1-G8ZhYU2qf8MRt9A78WgGoCE8+CE=",
+      "dev": true
+    },
+    "lodash._getnative": {
+      "version": "3.9.1",
+      "resolved": "https://registry.npmjs.org/lodash._getnative/-/lodash._getnative-3.9.1.tgz",
+      "integrity": "sha1-VwvH3t5G1hzc3mh9ZdPuy6o6r/U=",
+      "dev": true
+    },
+    "lodash._isiterateecall": {
+      "version": "3.0.9",
+      "resolved": "https://registry.npmjs.org/lodash._isiterateecall/-/lodash._isiterateecall-3.0.9.tgz",
+      "integrity": "sha1-UgOte6Ql+uhCRg5pbbnPPmqsBXw=",
+      "dev": true
+    },
+    "lodash.create": {
+      "version": "3.1.1",
+      "resolved": "https://registry.npmjs.org/lodash.create/-/lodash.create-3.1.1.tgz",
+      "integrity": "sha1-1/KEnw29p+BGgruM1yqwIkYd6+c=",
+      "dev": true,
+      "requires": {
+        "lodash._baseassign": "^3.0.0",
+        "lodash._basecreate": "^3.0.0",
+        "lodash._isiterateecall": "^3.0.0"
+      }
+    },
+    "lodash.get": {
+      "version": "4.4.2",
+      "resolved": "https://registry.npmjs.org/lodash.get/-/lodash.get-4.4.2.tgz",
+      "integrity": "sha1-LRd/ZS+jHpObRDjVNBSZ36OCXpk=",
+      "dev": true
+    },
+    "lodash.isarguments": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/lodash.isarguments/-/lodash.isarguments-3.1.0.tgz",
+      "integrity": "sha1-L1c9hcaiQon/AGY7SRwdM4/zRYo=",
+      "dev": true
+    },
+    "lodash.isarray": {
+      "version": "3.0.4",
+      "resolved": "https://registry.npmjs.org/lodash.isarray/-/lodash.isarray-3.0.4.tgz",
+      "integrity": "sha1-eeTriMNqgSKvhvhEqpvNhRtfu1U=",
+      "dev": true
+    },
+    "lodash.keys": {
+      "version": "3.1.2",
+      "resolved": "https://registry.npmjs.org/lodash.keys/-/lodash.keys-3.1.2.tgz",
+      "integrity": "sha1-TbwEcrFWvlCgsoaFXRvQsMZWCYo=",
+      "dev": true,
+      "requires": {
+        "lodash._getnative": "^3.0.0",
+        "lodash.isarguments": "^3.0.0",
+        "lodash.isarray": "^3.0.0"
+      }
+    },
+    "lolex": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/lolex/-/lolex-2.1.2.tgz",
+      "integrity": "sha1-JpS5U8nqTQE+W4v7qJHJkQJbJik=",
+      "dev": true
+    },
+    "map-cache": {
+      "version": "0.2.2",
+      "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz",
+      "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8=",
+      "dev": true
+    },
+    "map-visit": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz",
+      "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=",
+      "dev": true,
+      "requires": {
+        "object-visit": "^1.0.0"
+      }
+    },
+    "merge2": {
+      "version": "1.2.3",
+      "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.2.3.tgz",
+      "integrity": "sha512-gdUU1Fwj5ep4kplwcmftruWofEFt6lfpkkr3h860CXbAB9c3hGb55EOL2ali0Td5oebvW0E1+3Sr+Ur7XfKpRA==",
+      "dev": true
+    },
+    "micromatch": {
+      "version": "3.1.10",
+      "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
+      "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==",
+      "dev": true,
+      "requires": {
+        "arr-diff": "^4.0.0",
+        "array-unique": "^0.3.2",
+        "braces": "^2.3.1",
+        "define-property": "^2.0.2",
+        "extend-shallow": "^3.0.2",
+        "extglob": "^2.0.4",
+        "fragment-cache": "^0.2.1",
+        "kind-of": "^6.0.2",
+        "nanomatch": "^1.2.9",
+        "object.pick": "^1.3.0",
+        "regex-not": "^1.0.0",
+        "snapdragon": "^0.8.1",
+        "to-regex": "^3.0.2"
+      }
+    },
+    "mime-db": {
+      "version": "1.37.0",
+      "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.37.0.tgz",
+      "integrity": "sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg==",
+      "dev": true
+    },
+    "mime-types": {
+      "version": "2.1.21",
+      "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.21.tgz",
+      "integrity": "sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg==",
+      "dev": true,
+      "requires": {
+        "mime-db": "~1.37.0"
+      }
+    },
+    "mimic-fn": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
+      "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
+      "dev": true
+    },
+    "minimatch": {
+      "version": "3.0.4",
+      "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
+      "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
+      "dev": true,
+      "requires": {
+        "brace-expansion": "^1.1.7"
+      }
+    },
+    "minimist": {
+      "version": "0.0.8",
+      "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz",
+      "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=",
+      "dev": true
+    },
+    "mixin-deep": {
+      "version": "1.3.1",
+      "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.1.tgz",
+      "integrity": "sha512-8ZItLHeEgaqEvd5lYBXfm4EZSFCX29Jb9K+lAHhDKzReKBQKj3R+7NOF6tjqYi9t4oI8VUfaWITJQm86wnXGNQ==",
+      "dev": true,
+      "requires": {
+        "for-in": "^1.0.2",
+        "is-extendable": "^1.0.1"
+      },
+      "dependencies": {
+        "is-extendable": {
+          "version": "1.0.1",
+          "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+          "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+          "dev": true,
+          "requires": {
+            "is-plain-object": "^2.0.4"
+          }
+        }
+      }
+    },
+    "mkdirp": {
+      "version": "0.5.1",
+      "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz",
+      "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=",
+      "dev": true,
+      "requires": {
+        "minimist": "0.0.8"
+      }
+    },
+    "mocha": {
+      "version": "3.5.3",
+      "resolved": "https://registry.npmjs.org/mocha/-/mocha-3.5.3.tgz",
+      "integrity": "sha512-/6na001MJWEtYxHOV1WLfsmR4YIynkUEhBwzsb+fk2qmQ3iqsi258l/Q2MWHJMImAcNpZ8DEdYAK72NHoIQ9Eg==",
+      "dev": true,
+      "requires": {
+        "browser-stdout": "1.3.0",
+        "commander": "2.9.0",
+        "debug": "2.6.8",
+        "diff": "3.2.0",
+        "escape-string-regexp": "1.0.5",
+        "glob": "7.1.1",
+        "growl": "1.9.2",
+        "he": "1.1.1",
+        "json3": "3.3.2",
+        "lodash.create": "3.1.1",
+        "mkdirp": "0.5.1",
+        "supports-color": "3.1.2"
+      },
+      "dependencies": {
+        "commander": {
+          "version": "2.9.0",
+          "resolved": "https://registry.npmjs.org/commander/-/commander-2.9.0.tgz",
+          "integrity": "sha1-nJkJQXbhIkDLItbFFGCYQA/g99Q=",
+          "dev": true,
+          "requires": {
+            "graceful-readlink": ">= 1.0.0"
+          }
+        }
+      }
+    },
+    "ms": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+      "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=",
+      "dev": true
+    },
+    "multistream": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/multistream/-/multistream-2.1.1.tgz",
+      "integrity": "sha512-xasv76hl6nr1dEy3lPvy7Ej7K/Lx3O/FCvwge8PeVJpciPPoNCbaANcNiBug3IpdvTveZUcAV0DJzdnUDMesNQ==",
+      "dev": true,
+      "requires": {
+        "inherits": "^2.0.1",
+        "readable-stream": "^2.0.5"
+      }
+    },
+    "mute-stream": {
+      "version": "0.0.8",
+      "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz",
+      "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==",
+      "dev": true
+    },
+    "nanomatch": {
+      "version": "1.2.13",
+      "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
+      "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==",
+      "dev": true,
+      "requires": {
+        "arr-diff": "^4.0.0",
+        "array-unique": "^0.3.2",
+        "define-property": "^2.0.2",
+        "extend-shallow": "^3.0.2",
+        "fragment-cache": "^0.2.1",
+        "is-windows": "^1.0.2",
+        "kind-of": "^6.0.2",
+        "object.pick": "^1.3.0",
+        "regex-not": "^1.0.0",
+        "snapdragon": "^0.8.1",
+        "to-regex": "^3.0.1"
+      }
+    },
+    "native-promise-only": {
+      "version": "0.8.1",
+      "resolved": "https://registry.npmjs.org/native-promise-only/-/native-promise-only-0.8.1.tgz",
+      "integrity": "sha1-IKMYwwy0X3H+et+/eyHJnBRy7xE=",
+      "dev": true
+    },
+    "natural-compare": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
+      "integrity": "sha1-Sr6/7tdUHywnrPspvbvRXI1bpPc=",
+      "dev": true
+    },
+    "nice-try": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/nice-try/-/nice-try-1.0.5.tgz",
+      "integrity": "sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==",
+      "dev": true
+    },
+    "nise": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/nise/-/nise-1.1.0.tgz",
+      "integrity": "sha512-lIFidCxB0mJGyq1i33tLRNojtMoYX95EAI7WQEU+/ees0w6hvXZQHZ7WD130Tjeh5+YJAUVLfQ3k/s9EA8jj+w==",
+      "dev": true,
+      "requires": {
+        "formatio": "^1.2.0",
+        "just-extend": "^1.1.22",
+        "lolex": "^1.6.0",
+        "path-to-regexp": "^1.7.0",
+        "text-encoding": "^0.6.4"
+      },
+      "dependencies": {
+        "lolex": {
+          "version": "1.6.0",
+          "resolved": "https://registry.npmjs.org/lolex/-/lolex-1.6.0.tgz",
+          "integrity": "sha1-OpoCg0UqR9dDnnJzG54H1zhuSfY=",
+          "dev": true
+        }
+      }
+    },
+    "oauth-sign": {
+      "version": "0.8.2",
+      "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.8.2.tgz",
+      "integrity": "sha1-Rqarfwrq2N6unsBWV4C31O/rnUM=",
+      "dev": true
+    },
+    "object-copy": {
+      "version": "0.1.0",
+      "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz",
+      "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=",
+      "dev": true,
+      "requires": {
+        "copy-descriptor": "^0.1.0",
+        "define-property": "^0.2.5",
+        "kind-of": "^3.0.3"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "dev": true,
+          "requires": {
+            "is-descriptor": "^0.1.0"
+          }
+        },
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "dev": true,
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "object-visit": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz",
+      "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=",
+      "dev": true,
+      "requires": {
+        "isobject": "^3.0.0"
+      }
+    },
+    "object.pick": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz",
+      "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=",
+      "dev": true,
+      "requires": {
+        "isobject": "^3.0.1"
+      }
+    },
+    "once": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+      "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+      "dev": true,
+      "requires": {
+        "wrappy": "1"
+      }
+    },
+    "onetime": {
+      "version": "5.1.0",
+      "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.0.tgz",
+      "integrity": "sha512-5NcSkPHhwTVFIQN+TUqXoS5+dlElHXdpAWu9I0HP20YOtIi+aZ0Ct82jdlILDxjLEAWwvm+qj1m6aEtsDVmm6Q==",
+      "dev": true,
+      "requires": {
+        "mimic-fn": "^2.1.0"
+      }
+    },
+    "optionator": {
+      "version": "0.8.2",
+      "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.2.tgz",
+      "integrity": "sha1-NkxeQJ0/TWMB1sC0wFu6UBgK62Q=",
+      "dev": true,
+      "requires": {
+        "deep-is": "~0.1.3",
+        "fast-levenshtein": "~2.0.4",
+        "levn": "~0.3.0",
+        "prelude-ls": "~1.1.2",
+        "type-check": "~0.3.2",
+        "wordwrap": "~1.0.0"
+      }
+    },
+    "os-tmpdir": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz",
+      "integrity": "sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ=",
+      "dev": true
+    },
+    "p-is-promise": {
+      "version": "1.1.0",
+      "resolved": "http://registry.npmjs.org/p-is-promise/-/p-is-promise-1.1.0.tgz",
+      "integrity": "sha1-nJRWmJ6fZYgBewQ01WCXZ1w9oF4=",
+      "dev": true
+    },
+    "parent-module": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+      "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+      "dev": true,
+      "requires": {
+        "callsites": "^3.0.0"
+      }
+    },
+    "pascalcase": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz",
+      "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ=",
+      "dev": true
+    },
+    "path-dirname": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/path-dirname/-/path-dirname-1.0.2.tgz",
+      "integrity": "sha1-zDPSTVJeCZpTiMAzbG4yuRYGCeA=",
+      "dev": true
+    },
+    "path-is-absolute": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+      "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
+      "dev": true
+    },
+    "path-key": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/path-key/-/path-key-2.0.1.tgz",
+      "integrity": "sha1-QRyttXTFoUDTpLGRDUDYDMn0C0A=",
+      "dev": true
+    },
+    "path-parse": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
+      "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw==",
+      "dev": true
+    },
+    "path-to-regexp": {
+      "version": "1.7.0",
+      "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+      "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+      "dev": true,
+      "requires": {
+        "isarray": "0.0.1"
+      }
+    },
+    "path-type": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/path-type/-/path-type-3.0.0.tgz",
+      "integrity": "sha512-T2ZUsdZFHgA3u4e5PfPbjd7HDDpxPnQb5jN0SrDsjNSuVXHJqtwTnWqG0B1jZrgmJ/7lj1EmVIByWt1gxGkWvg==",
+      "dev": true,
+      "requires": {
+        "pify": "^3.0.0"
+      }
+    },
+    "performance-now": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz",
+      "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=",
+      "dev": true
+    },
+    "pify": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz",
+      "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=",
+      "dev": true
+    },
+    "pkg": {
+      "version": "4.3.4",
+      "resolved": "https://registry.npmjs.org/pkg/-/pkg-4.3.4.tgz",
+      "integrity": "sha512-/vHksmZRu68BCZi+gX4ghCMKVPMXkoo6E16HbI0jAR1/wDMrRFGCXFIho2BQLTiFwtx80KH9FpZixEs/gYxfwA==",
+      "dev": true,
+      "requires": {
+        "@babel/parser": "7.0.0-beta.51",
+        "babel-runtime": "6.26.0",
+        "chalk": "2.4.1",
+        "escodegen": "1.10.0",
+        "fs-extra": "6.0.1",
+        "globby": "8.0.1",
+        "into-stream": "3.1.0",
+        "minimist": "1.2.0",
+        "multistream": "2.1.1",
+        "pkg-fetch": "2.5.6",
+        "progress": "2.0.0",
+        "resolve": "1.6.0",
+        "stream-meter": "1.0.4"
+      },
+      "dependencies": {
+        "@babel/parser": {
+          "version": "7.0.0-beta.51",
+          "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.0.0-beta.51.tgz",
+          "integrity": "sha1-J87C30Cd9gr1gnDtj2qlVAnqhvY=",
+          "dev": true
+        },
+        "minimist": {
+          "version": "1.2.0",
+          "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz",
+          "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=",
+          "dev": true
+        }
+      }
+    },
+    "pkg-fetch": {
+      "version": "2.5.6",
+      "resolved": "https://registry.npmjs.org/pkg-fetch/-/pkg-fetch-2.5.6.tgz",
+      "integrity": "sha512-Bmae7NUnImRzFL7OtWfC0fnzhTqQi3vXpZjlruww882hQEVt/Z9Jcq8o7KdL6VGJPGo72JTZHDa+PRD++HkAXQ==",
+      "dev": true,
+      "requires": {
+        "babel-runtime": "6.26.0",
+        "byline": "5.0.0",
+        "chalk": "2.4.1",
+        "expand-template": "1.1.1",
+        "fs-extra": "6.0.1",
+        "in-publish": "2.0.0",
+        "minimist": "1.2.0",
+        "progress": "2.0.0",
+        "request": "2.85.0",
+        "request-progress": "3.0.0",
+        "semver": "5.5.0",
+        "unique-temp-dir": "1.0.0"
+      },
+      "dependencies": {
+        "minimist": {
+          "version": "1.2.0",
+          "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz",
+          "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=",
+          "dev": true
+        }
+      }
+    },
+    "posix-character-classes": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz",
+      "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=",
+      "dev": true
+    },
+    "prelude-ls": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.1.2.tgz",
+      "integrity": "sha1-IZMqVJ9eUv/ZqCf1cOBL5iqX2lQ=",
+      "dev": true
+    },
+    "prettier": {
+      "version": "2.0.5",
+      "resolved": "https://registry.npmjs.org/prettier/-/prettier-2.0.5.tgz",
+      "integrity": "sha512-7PtVymN48hGcO4fGjybyBSIWDsLU4H4XlvOHfq91pz9kkGlonzwTfYkaIEwiRg/dAJF9YlbsduBAgtYLi+8cFg==",
+      "dev": true
+    },
+    "process-nextick-args": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz",
+      "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==",
+      "dev": true
+    },
+    "progress": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/progress/-/progress-2.0.0.tgz",
+      "integrity": "sha1-ihvjZr+Pwj2yvSPxDG/pILQ4nR8=",
+      "dev": true
+    },
+    "punycode": {
+      "version": "1.4.1",
+      "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
+      "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=",
+      "dev": true
+    },
+    "qs": {
+      "version": "6.5.2",
+      "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
+      "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==",
+      "dev": true
+    },
+    "readable-stream": {
+      "version": "2.3.6",
+      "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
+      "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
+      "dev": true,
+      "requires": {
+        "core-util-is": "~1.0.0",
+        "inherits": "~2.0.3",
+        "isarray": "~1.0.0",
+        "process-nextick-args": "~2.0.0",
+        "safe-buffer": "~5.1.1",
+        "string_decoder": "~1.1.1",
+        "util-deprecate": "~1.0.1"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+          "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
+          "dev": true
+        }
+      }
+    },
+    "regenerator-runtime": {
+      "version": "0.11.1",
+      "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.11.1.tgz",
+      "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==",
+      "dev": true
+    },
+    "regex-not": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz",
+      "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==",
+      "dev": true,
+      "requires": {
+        "extend-shallow": "^3.0.2",
+        "safe-regex": "^1.1.0"
+      }
+    },
+    "regexpp": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/regexpp/-/regexpp-2.0.1.tgz",
+      "integrity": "sha512-lv0M6+TkDVniA3aD1Eg0DVpfU/booSu7Eev3TDO/mZKHBfVjgCGTV4t4buppESEYDtkArYFOxTJWv6S5C+iaNw==",
+      "dev": true
+    },
+    "repeat-element": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz",
+      "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g==",
+      "dev": true
+    },
+    "repeat-string": {
+      "version": "1.6.1",
+      "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+      "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=",
+      "dev": true
+    },
+    "request": {
+      "version": "2.85.0",
+      "resolved": "http://registry.npmjs.org/request/-/request-2.85.0.tgz",
+      "integrity": "sha512-8H7Ehijd4js+s6wuVPLjwORxD4zeuyjYugprdOXlPSqaApmL/QOy+EB/beICHVCHkGMKNh5rvihb5ov+IDw4mg==",
+      "dev": true,
+      "requires": {
+        "aws-sign2": "~0.7.0",
+        "aws4": "^1.6.0",
+        "caseless": "~0.12.0",
+        "combined-stream": "~1.0.5",
+        "extend": "~3.0.1",
+        "forever-agent": "~0.6.1",
+        "form-data": "~2.3.1",
+        "har-validator": "~5.0.3",
+        "hawk": "~6.0.2",
+        "http-signature": "~1.2.0",
+        "is-typedarray": "~1.0.0",
+        "isstream": "~0.1.2",
+        "json-stringify-safe": "~5.0.1",
+        "mime-types": "~2.1.17",
+        "oauth-sign": "~0.8.2",
+        "performance-now": "^2.1.0",
+        "qs": "~6.5.1",
+        "safe-buffer": "^5.1.1",
+        "stringstream": "~0.0.5",
+        "tough-cookie": "~2.3.3",
+        "tunnel-agent": "^0.6.0",
+        "uuid": "^3.1.0"
+      }
+    },
+    "request-progress": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/request-progress/-/request-progress-3.0.0.tgz",
+      "integrity": "sha1-TKdUCBx/7GP1BeT6qCWqBs1mnb4=",
+      "dev": true,
+      "requires": {
+        "throttleit": "^1.0.0"
+      }
+    },
+    "resolve": {
+      "version": "1.6.0",
+      "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.6.0.tgz",
+      "integrity": "sha512-mw7JQNu5ExIkcw4LPih0owX/TZXjD/ZUF/ZQ/pDnkw3ZKhDcZZw5klmBlj6gVMwjQ3Pz5Jgu7F3d0jcDVuEWdw==",
+      "dev": true,
+      "requires": {
+        "path-parse": "^1.0.5"
+      }
+    },
+    "resolve-from": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+      "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
+      "dev": true
+    },
+    "resolve-url": {
+      "version": "0.2.1",
+      "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz",
+      "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo=",
+      "dev": true
+    },
+    "restore-cursor": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-3.1.0.tgz",
+      "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==",
+      "dev": true,
+      "requires": {
+        "onetime": "^5.1.0",
+        "signal-exit": "^3.0.2"
+      }
+    },
+    "ret": {
+      "version": "0.1.15",
+      "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz",
+      "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg==",
+      "dev": true
+    },
+    "rimraf": {
+      "version": "2.6.3",
+      "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.6.3.tgz",
+      "integrity": "sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==",
+      "dev": true,
+      "requires": {
+        "glob": "^7.1.3"
+      },
+      "dependencies": {
+        "glob": {
+          "version": "7.1.6",
+          "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
+          "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==",
+          "dev": true,
+          "requires": {
+            "fs.realpath": "^1.0.0",
+            "inflight": "^1.0.4",
+            "inherits": "2",
+            "minimatch": "^3.0.4",
+            "once": "^1.3.0",
+            "path-is-absolute": "^1.0.0"
+          }
+        }
+      }
+    },
+    "run-async": {
+      "version": "2.4.0",
+      "resolved": "https://registry.npmjs.org/run-async/-/run-async-2.4.0.tgz",
+      "integrity": "sha512-xJTbh/d7Lm7SBhc1tNvTpeCHaEzoyxPrqNlvSdMfBTYwaY++UJFyXUOxAtsRUXjlqOfj8luNaR9vjCh4KeV+pg==",
+      "dev": true,
+      "requires": {
+        "is-promise": "^2.1.0"
+      }
+    },
+    "rxjs": {
+      "version": "6.5.4",
+      "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.5.4.tgz",
+      "integrity": "sha512-naMQXcgEo3csAEGvw/NydRA0fuS2nDZJiw1YUWFKU7aPPAPGZEsD4Iimit96qwCieH6y614MCLYwdkrWx7z/7Q==",
+      "dev": true,
+      "requires": {
+        "tslib": "^1.9.0"
+      }
+    },
+    "safe-buffer": {
+      "version": "5.1.2",
+      "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+      "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
+      "dev": true
+    },
+    "safe-regex": {
+      "version": "1.1.0",
+      "resolved": "http://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
+      "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
+      "dev": true,
+      "requires": {
+        "ret": "~0.1.10"
+      }
+    },
+    "safer-buffer": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+      "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+      "dev": true
+    },
+    "samsam": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/samsam/-/samsam-1.2.1.tgz",
+      "integrity": "sha1-7dOQk6MYQ3DLhZJDsr3yVefY6mc=",
+      "dev": true
+    },
+    "semver": {
+      "version": "5.5.0",
+      "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz",
+      "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==",
+      "dev": true
+    },
+    "set-value": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.0.tgz",
+      "integrity": "sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg==",
+      "dev": true,
+      "requires": {
+        "extend-shallow": "^2.0.1",
+        "is-extendable": "^0.1.1",
+        "is-plain-object": "^2.0.3",
+        "split-string": "^3.0.1"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "dev": true,
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        }
+      }
+    },
+    "shebang-command": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-1.2.0.tgz",
+      "integrity": "sha1-RKrGW2lbAzmJaMOfNj/uXer98eo=",
+      "dev": true,
+      "requires": {
+        "shebang-regex": "^1.0.0"
+      }
+    },
+    "shebang-regex": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-1.0.0.tgz",
+      "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=",
+      "dev": true
+    },
+    "signal-exit": {
+      "version": "3.0.3",
+      "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz",
+      "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==",
+      "dev": true
+    },
+    "sinon": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/sinon/-/sinon-4.0.0.tgz",
+      "integrity": "sha1-pUpfAjeqHdIhXl6ByJtCtQxP22s=",
+      "dev": true,
+      "requires": {
+        "diff": "^3.1.0",
+        "formatio": "1.2.0",
+        "lodash.get": "^4.4.2",
+        "lolex": "^2.1.2",
+        "native-promise-only": "^0.8.1",
+        "nise": "^1.1.0",
+        "path-to-regexp": "^1.7.0",
+        "samsam": "^1.1.3",
+        "text-encoding": "0.6.4",
+        "type-detect": "^4.0.0"
+      }
+    },
+    "slash": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz",
+      "integrity": "sha1-xB8vbDn8FtHNF61LXYlhFK5HDVU=",
+      "dev": true
+    },
+    "slice-ansi": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-2.1.0.tgz",
+      "integrity": "sha512-Qu+VC3EwYLldKa1fCxuuvULvSJOKEgk9pi8dZeCVK7TqBfUNTH4sFkk4joj8afVSfAYgJoSOetjx9QWOJ5mYoQ==",
+      "dev": true,
+      "requires": {
+        "ansi-styles": "^3.2.0",
+        "astral-regex": "^1.0.0",
+        "is-fullwidth-code-point": "^2.0.0"
+      },
+      "dependencies": {
+        "is-fullwidth-code-point": {
+          "version": "2.0.0",
+          "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+          "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+          "dev": true
+        }
+      }
+    },
+    "snapdragon": {
+      "version": "0.8.2",
+      "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz",
+      "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==",
+      "dev": true,
+      "requires": {
+        "base": "^0.11.1",
+        "debug": "^2.2.0",
+        "define-property": "^0.2.5",
+        "extend-shallow": "^2.0.1",
+        "map-cache": "^0.2.2",
+        "source-map": "^0.5.6",
+        "source-map-resolve": "^0.5.0",
+        "use": "^3.1.0"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "dev": true,
+          "requires": {
+            "is-descriptor": "^0.1.0"
+          }
+        },
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "dev": true,
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        }
+      }
+    },
+    "snapdragon-node": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz",
+      "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==",
+      "dev": true,
+      "requires": {
+        "define-property": "^1.0.0",
+        "isobject": "^3.0.0",
+        "snapdragon-util": "^3.0.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+          "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+          "dev": true,
+          "requires": {
+            "is-descriptor": "^1.0.0"
+          }
+        },
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "dev": true,
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "dev": true,
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "dev": true,
+          "requires": {
+            "is-accessor-descriptor": "^1.0.0",
+            "is-data-descriptor": "^1.0.0",
+            "kind-of": "^6.0.2"
+          }
+        }
+      }
+    },
+    "snapdragon-util": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz",
+      "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==",
+      "dev": true,
+      "requires": {
+        "kind-of": "^3.2.0"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "dev": true,
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "sntp": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/sntp/-/sntp-2.1.0.tgz",
+      "integrity": "sha512-FL1b58BDrqS3A11lJ0zEdnJ3UOKqVxawAkF3k7F0CVN7VQ34aZrV+G8BZ1WC9ZL7NyrwsW0oviwsWDgRuVYtJg==",
+      "dev": true,
+      "requires": {
+        "hoek": "4.x.x"
+      }
+    },
+    "source-map": {
+      "version": "0.5.7",
+      "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+      "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w="
+    },
+    "source-map-resolve": {
+      "version": "0.5.2",
+      "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.2.tgz",
+      "integrity": "sha512-MjqsvNwyz1s0k81Goz/9vRBe9SZdB09Bdw+/zYyO+3CuPk6fouTaxscHkgtE8jKvf01kVfl8riHzERQ/kefaSA==",
+      "dev": true,
+      "requires": {
+        "atob": "^2.1.1",
+        "decode-uri-component": "^0.2.0",
+        "resolve-url": "^0.2.1",
+        "source-map-url": "^0.4.0",
+        "urix": "^0.1.0"
+      }
+    },
+    "source-map-url": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz",
+      "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=",
+      "dev": true
+    },
+    "split-string": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz",
+      "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==",
+      "dev": true,
+      "requires": {
+        "extend-shallow": "^3.0.0"
+      }
+    },
+    "sprintf-js": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+      "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
+      "dev": true
+    },
+    "sshpk": {
+      "version": "1.15.1",
+      "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.15.1.tgz",
+      "integrity": "sha512-mSdgNUaidk+dRU5MhYtN9zebdzF2iG0cNPWy8HG+W8y+fT1JnSkh0fzzpjOa0L7P8i1Rscz38t0h4gPcKz43xA==",
+      "dev": true,
+      "requires": {
+        "asn1": "~0.2.3",
+        "assert-plus": "^1.0.0",
+        "bcrypt-pbkdf": "^1.0.0",
+        "dashdash": "^1.12.0",
+        "ecc-jsbn": "~0.1.1",
+        "getpass": "^0.1.1",
+        "jsbn": "~0.1.0",
+        "safer-buffer": "^2.0.2",
+        "tweetnacl": "~0.14.0"
+      }
+    },
+    "static-extend": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz",
+      "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=",
+      "dev": true,
+      "requires": {
+        "define-property": "^0.2.5",
+        "object-copy": "^0.1.0"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "dev": true,
+          "requires": {
+            "is-descriptor": "^0.1.0"
+          }
+        }
+      }
+    },
+    "stream-meter": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/stream-meter/-/stream-meter-1.0.4.tgz",
+      "integrity": "sha1-Uq+Vql6nYKJJFxZwTb/5D3Ov3R0=",
+      "dev": true,
+      "requires": {
+        "readable-stream": "^2.1.4"
+      }
+    },
+    "string-width": {
+      "version": "4.2.0",
+      "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.0.tgz",
+      "integrity": "sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg==",
+      "dev": true,
+      "requires": {
+        "emoji-regex": "^8.0.0",
+        "is-fullwidth-code-point": "^3.0.0",
+        "strip-ansi": "^6.0.0"
+      },
+      "dependencies": {
+        "strip-ansi": {
+          "version": "6.0.0",
+          "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz",
+          "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==",
+          "dev": true,
+          "requires": {
+            "ansi-regex": "^5.0.0"
+          }
+        }
+      }
+    },
+    "string_decoder": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+      "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+      "dev": true,
+      "requires": {
+        "safe-buffer": "~5.1.0"
+      }
+    },
+    "stringstream": {
+      "version": "0.0.6",
+      "resolved": "https://registry.npmjs.org/stringstream/-/stringstream-0.0.6.tgz",
+      "integrity": "sha512-87GEBAkegbBcweToUrdzf3eLhWNg06FJTebl4BVJz/JgWy8CvEr9dRtX5qWphiynMSQlxxi+QqN0z5T32SLlhA==",
+      "dev": true
+    },
+    "strip-ansi": {
+      "version": "5.2.0",
+      "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-5.2.0.tgz",
+      "integrity": "sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA==",
+      "dev": true,
+      "requires": {
+        "ansi-regex": "^4.1.0"
+      },
+      "dependencies": {
+        "ansi-regex": {
+          "version": "4.1.0",
+          "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-4.1.0.tgz",
+          "integrity": "sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg==",
+          "dev": true
+        }
+      }
+    },
+    "strip-json-comments": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.0.1.tgz",
+      "integrity": "sha512-VTyMAUfdm047mwKl+u79WIdrZxtFtn+nBxHeb844XBQ9uMNTuTHdx2hc5RiAJYqwTj3wc/xe5HLSdJSkJ+WfZw==",
+      "dev": true
+    },
+    "supports-color": {
+      "version": "3.1.2",
+      "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-3.1.2.tgz",
+      "integrity": "sha1-cqJiiU2dQIuVbKBf83su2KbiotU=",
+      "dev": true,
+      "requires": {
+        "has-flag": "^1.0.0"
+      }
+    },
+    "table": {
+      "version": "5.4.6",
+      "resolved": "https://registry.npmjs.org/table/-/table-5.4.6.tgz",
+      "integrity": "sha512-wmEc8m4fjnob4gt5riFRtTu/6+4rSe12TpAELNSqHMfF3IqnA+CH37USM6/YR3qRZv7e56kAEAtd6nKZaxe0Ug==",
+      "dev": true,
+      "requires": {
+        "ajv": "^6.10.2",
+        "lodash": "^4.17.14",
+        "slice-ansi": "^2.1.0",
+        "string-width": "^3.0.0"
+      },
+      "dependencies": {
+        "ajv": {
+          "version": "6.12.0",
+          "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.0.tgz",
+          "integrity": "sha512-D6gFiFA0RRLyUbvijN74DWAjXSFxWKaWP7mldxkVhyhAV3+SWA9HEJPHQ2c9soIeTFJqcSdFDGFgdqs1iUU2Hw==",
+          "dev": true,
+          "requires": {
+            "fast-deep-equal": "^3.1.1",
+            "fast-json-stable-stringify": "^2.0.0",
+            "json-schema-traverse": "^0.4.1",
+            "uri-js": "^4.2.2"
+          }
+        },
+        "emoji-regex": {
+          "version": "7.0.3",
+          "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz",
+          "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==",
+          "dev": true
+        },
+        "fast-deep-equal": {
+          "version": "3.1.1",
+          "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.1.tgz",
+          "integrity": "sha512-8UEa58QDLauDNfpbrX55Q9jrGHThw2ZMdOky5Gl1CDtVeJDPVrG4Jxx1N8jw2gkWaff5UUuX1KJd+9zGe2B+ZA==",
+          "dev": true
+        },
+        "is-fullwidth-code-point": {
+          "version": "2.0.0",
+          "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz",
+          "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=",
+          "dev": true
+        },
+        "json-schema-traverse": {
+          "version": "0.4.1",
+          "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+          "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
+          "dev": true
+        },
+        "lodash": {
+          "version": "4.17.15",
+          "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.15.tgz",
+          "integrity": "sha512-8xOcRHvCjnocdS5cpwXQXVzmmh5e5+saE2QGoeQmbKmRS6J3VQppPOIt0MnmE+4xlZoumy0GPG0D0MVIQbNA1A==",
+          "dev": true
+        },
+        "string-width": {
+          "version": "3.1.0",
+          "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz",
+          "integrity": "sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w==",
+          "dev": true,
+          "requires": {
+            "emoji-regex": "^7.0.1",
+            "is-fullwidth-code-point": "^2.0.0",
+            "strip-ansi": "^5.1.0"
+          }
+        }
+      }
+    },
+    "temp-dir": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/temp-dir/-/temp-dir-2.0.0.tgz",
+      "integrity": "sha512-aoBAniQmmwtcKp/7BzsH8Cxzv8OL736p7v1ihGb5e9DJ9kTwGWHrQrVB5+lfVDzfGrdRzXch+ig7LHaY1JTOrg=="
+    },
+    "tempfile": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/tempfile/-/tempfile-3.0.0.tgz",
+      "integrity": "sha512-uNFCg478XovRi85iD42egu+eSFUmmka750Jy7L5tfHI5hQKKtbPnxaSaXAbBqCDYrw3wx4tXjKwci4/QmsZJxw==",
+      "requires": {
+        "temp-dir": "^2.0.0",
+        "uuid": "^3.3.2"
+      }
+    },
+    "tempy": {
+      "version": "0.5.0",
+      "resolved": "https://registry.npmjs.org/tempy/-/tempy-0.5.0.tgz",
+      "integrity": "sha512-VEY96x7gbIRfsxqsafy2l5yVxxp3PhwAGoWMyC2D2Zt5DmEv+2tGiPOrquNRpf21hhGnKLVEsuqleqiZmKG/qw==",
+      "requires": {
+        "is-stream": "^2.0.0",
+        "temp-dir": "^2.0.0",
+        "type-fest": "^0.12.0",
+        "unique-string": "^2.0.0"
+      },
+      "dependencies": {
+        "type-fest": {
+          "version": "0.12.0",
+          "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.12.0.tgz",
+          "integrity": "sha512-53RyidyjvkGpnWPMF9bQgFtWp+Sl8O2Rp13VavmJgfAP9WWG6q6TkrKU8iyJdnwnfgHI6k2hTlgqH4aSdjoTbg=="
+        }
+      }
+    },
+    "text-encoding": {
+      "version": "0.6.4",
+      "resolved": "https://registry.npmjs.org/text-encoding/-/text-encoding-0.6.4.tgz",
+      "integrity": "sha1-45mpgiV6J22uQou5KEXLcb3CbRk=",
+      "dev": true
+    },
+    "text-table": {
+      "version": "0.2.0",
+      "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+      "integrity": "sha1-f17oI66AUgfACvLfSoTsP8+lcLQ=",
+      "dev": true
+    },
+    "throttleit": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-1.0.0.tgz",
+      "integrity": "sha1-nnhYNtr0Z0MUWlmEtiaNgoUorGw=",
+      "dev": true
+    },
+    "through": {
+      "version": "2.3.8",
+      "resolved": "https://registry.npmjs.org/through/-/through-2.3.8.tgz",
+      "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
+      "dev": true
+    },
+    "tmp": {
+      "version": "0.0.33",
+      "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz",
+      "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==",
+      "dev": true,
+      "requires": {
+        "os-tmpdir": "~1.0.2"
+      }
+    },
+    "to-fast-properties": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+      "integrity": "sha1-3F5pjL0HkmW8c+A3doGk5Og/YW4="
+    },
+    "to-object-path": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz",
+      "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=",
+      "dev": true,
+      "requires": {
+        "kind-of": "^3.0.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "dev": true,
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "to-regex": {
+      "version": "3.0.2",
+      "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz",
+      "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==",
+      "dev": true,
+      "requires": {
+        "define-property": "^2.0.2",
+        "extend-shallow": "^3.0.2",
+        "regex-not": "^1.0.2",
+        "safe-regex": "^1.1.0"
+      }
+    },
+    "to-regex-range": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz",
+      "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=",
+      "dev": true,
+      "requires": {
+        "is-number": "^3.0.0",
+        "repeat-string": "^1.6.1"
+      }
+    },
+    "tough-cookie": {
+      "version": "2.3.4",
+      "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.3.4.tgz",
+      "integrity": "sha512-TZ6TTfI5NtZnuyy/Kecv+CnoROnyXn2DN97LontgQpCwsX2XyLYCC0ENhYkehSOwAp8rTQKc/NUIF7BkQ5rKLA==",
+      "dev": true,
+      "requires": {
+        "punycode": "^1.4.1"
+      }
+    },
+    "trim-right": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/trim-right/-/trim-right-1.0.1.tgz",
+      "integrity": "sha1-yy4SAwZ+DI3h9hQJS5/kVwTqYAM="
+    },
+    "tslib": {
+      "version": "1.11.1",
+      "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.11.1.tgz",
+      "integrity": "sha512-aZW88SY8kQbU7gpV19lN24LtXh/yD4ZZg6qieAJDDg+YBsJcSmLGK9QpnUjAKVG/xefmvJGd1WUmfpT/g6AJGA==",
+      "dev": true
+    },
+    "tunnel-agent": {
+      "version": "0.6.0",
+      "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
+      "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=",
+      "dev": true,
+      "requires": {
+        "safe-buffer": "^5.0.1"
+      }
+    },
+    "tweetnacl": {
+      "version": "0.14.5",
+      "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
+      "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=",
+      "dev": true
+    },
+    "type-check": {
+      "version": "0.3.2",
+      "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.3.2.tgz",
+      "integrity": "sha1-WITKtRLPHTVeP7eE8wgEsrUg23I=",
+      "dev": true,
+      "requires": {
+        "prelude-ls": "~1.1.2"
+      }
+    },
+    "type-detect": {
+      "version": "4.0.3",
+      "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.3.tgz",
+      "integrity": "sha1-Dj8mcLRAmbC0bChNE2p+9Jx0wuo=",
+      "dev": true
+    },
+    "type-fest": {
+      "version": "0.8.1",
+      "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz",
+      "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==",
+      "dev": true
+    },
+    "uid2": {
+      "version": "0.0.3",
+      "resolved": "https://registry.npmjs.org/uid2/-/uid2-0.0.3.tgz",
+      "integrity": "sha1-SDEm4Rd03y9xuLY53NeZw3YWK4I=",
+      "dev": true
+    },
+    "union-value": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.0.tgz",
+      "integrity": "sha1-XHHDTLW61dzr4+oM0IIHulqhrqQ=",
+      "dev": true,
+      "requires": {
+        "arr-union": "^3.1.0",
+        "get-value": "^2.0.6",
+        "is-extendable": "^0.1.1",
+        "set-value": "^0.4.3"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "dev": true,
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        },
+        "set-value": {
+          "version": "0.4.3",
+          "resolved": "https://registry.npmjs.org/set-value/-/set-value-0.4.3.tgz",
+          "integrity": "sha1-fbCPnT0i3H945Trzw79GZuzfzPE=",
+          "dev": true,
+          "requires": {
+            "extend-shallow": "^2.0.1",
+            "is-extendable": "^0.1.1",
+            "is-plain-object": "^2.0.1",
+            "to-object-path": "^0.3.0"
+          }
+        }
+      }
+    },
+    "unique-string": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz",
+      "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==",
+      "requires": {
+        "crypto-random-string": "^2.0.0"
+      }
+    },
+    "unique-temp-dir": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/unique-temp-dir/-/unique-temp-dir-1.0.0.tgz",
+      "integrity": "sha1-bc6VsmgcoAPuv7MEpBX5y6vMU4U=",
+      "dev": true,
+      "requires": {
+        "mkdirp": "^0.5.1",
+        "os-tmpdir": "^1.0.1",
+        "uid2": "0.0.3"
+      }
+    },
+    "universalify": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
+      "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==",
+      "dev": true
+    },
+    "unset-value": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz",
+      "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=",
+      "dev": true,
+      "requires": {
+        "has-value": "^0.3.1",
+        "isobject": "^3.0.0"
+      },
+      "dependencies": {
+        "has-value": {
+          "version": "0.3.1",
+          "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz",
+          "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=",
+          "dev": true,
+          "requires": {
+            "get-value": "^2.0.3",
+            "has-values": "^0.1.4",
+            "isobject": "^2.0.0"
+          },
+          "dependencies": {
+            "isobject": {
+              "version": "2.1.0",
+              "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz",
+              "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=",
+              "dev": true,
+              "requires": {
+                "isarray": "1.0.0"
+              }
+            }
+          }
+        },
+        "has-values": {
+          "version": "0.1.4",
+          "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz",
+          "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E=",
+          "dev": true
+        },
+        "isarray": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+          "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
+          "dev": true
+        }
+      }
+    },
+    "uri-js": {
+      "version": "4.2.2",
+      "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.2.2.tgz",
+      "integrity": "sha512-KY9Frmirql91X2Qgjry0Wd4Y+YTdrdZheS8TFwvkbLWf/G5KNJDCh6pKL5OZctEW4+0Baa5idK2ZQuELRwPznQ==",
+      "dev": true,
+      "requires": {
+        "punycode": "^2.1.0"
+      },
+      "dependencies": {
+        "punycode": {
+          "version": "2.1.1",
+          "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
+          "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==",
+          "dev": true
+        }
+      }
+    },
+    "urix": {
+      "version": "0.1.0",
+      "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
+      "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI=",
+      "dev": true
+    },
+    "use": {
+      "version": "3.1.1",
+      "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz",
+      "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==",
+      "dev": true
+    },
+    "util-deprecate": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+      "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=",
+      "dev": true
+    },
+    "uuid": {
+      "version": "3.3.2",
+      "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.3.2.tgz",
+      "integrity": "sha512-yXJmeNaw3DnnKAOKJE51sL/ZaYfWJRl1pK9dr19YFCu0ObS231AB1/LbqTKRAQ5kw8A90rA6fr4riOUpTZvQZA=="
+    },
+    "v8-compile-cache": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/v8-compile-cache/-/v8-compile-cache-2.1.0.tgz",
+      "integrity": "sha512-usZBT3PW+LOjM25wbqIlZwPeJV+3OSz3M1k1Ws8snlW39dZyYL9lOGC5FgPVHfk0jKmjiDV8Z0mIbVQPiwFs7g==",
+      "dev": true
+    },
+    "verror": {
+      "version": "1.10.0",
+      "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz",
+      "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=",
+      "dev": true,
+      "requires": {
+        "assert-plus": "^1.0.0",
+        "core-util-is": "1.0.2",
+        "extsprintf": "^1.2.0"
+      }
+    },
+    "which": {
+      "version": "1.3.1",
+      "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
+      "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
+      "dev": true,
+      "requires": {
+        "isexe": "^2.0.0"
+      }
+    },
+    "word-wrap": {
+      "version": "1.2.3",
+      "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.3.tgz",
+      "integrity": "sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==",
+      "dev": true
+    },
+    "wordwrap": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
+      "integrity": "sha1-J1hIEIkUVqQXHI0CJkQa3pDLyus=",
+      "dev": true
+    },
+    "wrappy": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+      "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=",
+      "dev": true
+    },
+    "write": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/write/-/write-1.0.3.tgz",
+      "integrity": "sha512-/lg70HAjtkUgWPVZhZcm+T4hkL8Zbtp1nFNOn3lRrxnlv50SRBv7cR7RqR+GMsd3hUXy9hWBo4CHTbFTcOYwig==",
+      "dev": true,
+      "requires": {
+        "mkdirp": "^0.5.1"
+      }
+    }
+  }
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/package.json b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/package.json
new file mode 100644
index 0000000..5a7796e
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/package.json
@@ -0,0 +1,33 @@
+{
+  "name": "ochang_js_fuzzer",
+  "version": "1.0.0",
+  "description": "",
+  "main": "run.js",
+  "scripts": {
+    "test": "APP_NAME=d8 mocha"
+  },
+  "bin": "run.js",
+  "author": "ochang@google.com",
+  "license": "ISC",
+  "dependencies": {
+    "@babel/generator": "^7.1.3",
+    "@babel/template": "^7.1.2",
+    "@babel/traverse": "^7.1.4",
+    "@babel/types": "^7.1.3",
+    "@babel/parser": "^7.1.3",
+    "commander": "^2.11.0",
+    "globals": "^10.1.0",
+    "tempfile": "^3.0.0",
+    "tempy": "^0.5.0"
+  },
+  "devDependencies": {
+    "eslint": "^6.8.0",
+    "mocha": "^3.5.3",
+    "pkg": "^4.3.4",
+    "prettier": "2.0.5",
+    "sinon": "^4.0.0"
+  },
+  "pkg": {
+    "assets": "resources/**/*"
+  }
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/package.sh b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/package.sh
new file mode 100755
index 0000000..61ac465
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/package.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+OS="linux"
+OS_LABEL="Linux"
+SUFFIX=""
+if [[ -n "$1" && $1 == "win" ]]; then
+  OS="win"
+  OS_LABEL="Windows"
+  SUFFIX=".exe"
+elif [[ -n "$1" && $1 == "macos" ]]; then
+  OS="macos"
+  OS_LABEL="MacOS"
+fi
+
+echo "Building and packaging for $OS_LABEL..."
+(set -x; $DIR/node_modules/.bin/pkg -t node10-$OS-x64 $DIR)
+
+rm -rf $DIR/output > /dev/null 2>&1 || true
+rm $DIR/output.zip > /dev/null 2>&1 || true
+
+mkdir $DIR/output
+cd $DIR/output
+ln -s ../db db
+ln -s ../ochang_js_fuzzer$SUFFIX run$SUFFIX
+ln -s ../foozzie_launcher.py foozzie_launcher.py
+echo "Creating $DIR/output.zip"
+(set -x; zip -r $DIR/output.zip * > /dev/null)
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/random.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/random.js
new file mode 100644
index 0000000..f6d73b6
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/random.js
@@ -0,0 +1,113 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Random helpers.
+ */
+
+'use strict';
+
+const assert = require('assert');
+
+function randInt(min, max) {
+  return Math.floor(Math.random() * (max - min + 1)) + min;
+}
+
+function choose(probability) {
+  return Math.random() < probability;
+}
+
+function random() {
+  return Math.random();
+}
+
+function uniform(min, max) {
+  return Math.random() * (max - min) + min;
+}
+
+function sample(iterable, count) {
+  const result = new Array(count);
+  let index = 0;
+
+  for (const item of iterable) {
+    if (index < count) {
+      result[index] = item;
+    } else {
+      const randIndex = randInt(0, index);
+      if (randIndex < count) {
+        result[randIndex] = item;
+      }
+    }
+
+    index++;
+  }
+
+  if (index < count) {
+    // Not enough items.
+    result.length = index;
+  }
+
+  return result;
+}
+
+function swap(array, p1, p2) {
+  [array[p1], array[p2]] = [array[p2], array[p1]];
+}
+
+/**
+ * Returns "count" elements, randomly selected from "highProbArray" and
+ * "lowProbArray". Elements from highProbArray have a "factor" times
+ * higher chance to be chosen. As a side effect, this swaps the chosen
+ * elements to the end of the respective input arrays. The complexity is
+ * O(count).
+ */
+function twoBucketSample(lowProbArray, highProbArray, factor, count) {
+  // Track number of available elements for choosing.
+  let low = lowProbArray.length;
+  let high = highProbArray.length;
+  assert(low + high >= count);
+  const result = [];
+  for (let i = 0; i < count; i++) {
+    // Map a random number to the summarized indices of both arrays. Give
+    // highProbArray elements a "factor" times higher probability.
+    const p = random();
+    const index = Math.floor(p * (high * factor + low));
+    if (index < low) {
+      // If the index is in the low part, draw the element and discard it.
+      result.push(lowProbArray[index]);
+      swap(lowProbArray, index, --low);
+    } else {
+      // Same as above but for a highProbArray element. The index is first
+      // mapped back to the array's range.
+      const highIndex = Math.floor((index - low) / factor);
+      result.push(highProbArray[highIndex]);
+      swap(highProbArray, highIndex, --high);
+    }
+  }
+  return result;
+}
+
+function single(array) {
+  return array[randInt(0, array.length - 1)];
+}
+
+function shuffle(array) {
+  for (let i = 0; i < array.length - 1; i++) {
+    const j = randInt(i, array.length - 1);
+    swap(array, i, j);
+  }
+
+  return array;
+}
+
+module.exports = {
+  choose: choose,
+  randInt: randInt,
+  random: random,
+  sample: sample,
+  shuffle: shuffle,
+  single: single,
+  twoBucketSample: twoBucketSample,
+  uniform: uniform,
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_chakra.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_chakra.js
new file mode 100644
index 0000000..250d0a3
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_chakra.js
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Adjust chakra behavior for differential fuzzing.
+
+this.WScript = new Proxy({}, {
+  get(target, name) {
+    switch (name) {
+      case 'Echo':
+        return __prettyPrintExtra;
+      default:
+        return {};
+    }
+  }
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_jstest.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_jstest.js
new file mode 100644
index 0000000..5c4885c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_jstest.js
@@ -0,0 +1,11 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function debug(msg) {
+  __prettyPrintExtra(msg);
+}
+
+function shouldBe(_a) {
+  __prettyPrintExtra((typeof _a == "function" ? _a() : eval(_a)));
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_library.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_library.js
new file mode 100644
index 0000000..37b95d1
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_library.js
@@ -0,0 +1,122 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Helpers for printing in correctness fuzzing.
+
+// Global helper functions for printing.
+var __prettyPrint;
+var __prettyPrintExtra;
+
+// Track caught exceptions.
+var __caught = 0;
+
+// Track a hash of all printed values - printing is cut off after a
+// certain size.
+var __hash = 0;
+
+(function() {
+  const charCodeAt = String.prototype.charCodeAt;
+  const join = Array.prototype.join;
+  const map = Array.prototype.map;
+  const substring = String.prototype.substring;
+  const toString = Object.prototype.toString;
+
+  // Same as in mjsunit.js.
+  const classOf = function(object) {
+    // Argument must not be null or undefined.
+    const string = toString.call(object);
+    // String has format [object <ClassName>].
+    return substring.call(string, 8, string.length - 1);
+  };
+
+  // For standard cases use original prettyPrinted from mjsunit.
+  const origPrettyPrinted = prettyPrinted;
+
+  // Override prettyPrinted with a version that also recusively prints objects
+  // and arrays with a depth of 4. We don't track circles, but we'd cut off
+  // after a depth of 4 if there are any.
+  prettyPrinted = function prettyPrinted(value, depth=4) {
+    if (depth <= 0) {
+      return "...";
+    }
+    switch (typeof value) {
+      case "object":
+        if (value === null) return "null";
+        switch (classOf(value)) {
+          case "Array":
+            return prettyPrintedArray(value, depth);
+          case "Object":
+            return prettyPrintedObject(value, depth);
+        }
+    }
+    // Fall through to original version for all other types.
+    return origPrettyPrinted(value);
+  }
+
+  // Helper for pretty array with depth.
+  function prettyPrintedArray(array, depth) {
+    const result = map.call(array, (value, index, array) => {
+      if (value === undefined && !(index in array)) return "";
+      return prettyPrinted(value, depth - 1);
+    });
+    return `[${join.call(result, ", ")}]`;
+  }
+
+  // Helper for pretty objects with depth.
+  function prettyPrintedObject(object, depth) {
+    const keys = Object.keys(object);
+    const prettyValues = map.call(keys, (key) => {
+      return `${key}: ${prettyPrinted(object[key], depth - 1)}`;
+    });
+    const content = join.call(prettyValues, ", ");
+    return `${object.constructor.name || "Object"}{${content}}`;
+  }
+
+  // Helper for calculating a hash code of a string.
+  function hashCode(str) {
+      let hash = 0;
+      if (str.length == 0) {
+          return hash;
+      }
+      for (let i = 0; i < str.length; i++) {
+          const char = charCodeAt.call(str, i);
+          hash = ((hash << 5) - hash) + char;
+          hash = hash & hash;
+      }
+      return hash;
+  }
+
+  // Upper limit for calling extra printing. When reached, hashes of
+  // strings are tracked and printed instead.
+  let maxExtraPrinting = 100;
+
+  // Helper for pretty printing.
+  __prettyPrint = function(value, extra=false) {
+    let str = prettyPrinted(value);
+
+    // Change __hash with the contents of the full string to
+    // keep track of differences also when we don't print.
+    const hash = hashCode(str);
+    __hash = hashCode(hash + __hash.toString());
+
+    if (extra && maxExtraPrinting-- <= 0) {
+      return;
+    }
+
+    // Cut off long strings to prevent overloading I/O. We still track
+    // the hash of the full string.
+    if (str.length > 64) {
+      const head = substring.call(str, 0, 54);
+      const tail = substring.call(str, str.length - 10, str.length - 1);
+      str = `${head}[...]${tail}`;
+    }
+
+    print(str);
+  };
+
+  __prettyPrintExtra = function (value) {
+    __prettyPrint(value, true);
+  }
+})();
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_mjsunit.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_mjsunit.js
new file mode 100644
index 0000000..901299b
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_mjsunit.js
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Substitute for mjsunit. We reuse prettyPrinted from mjsunit, but only if
+// it is loaded. If not, we use this substitute instead.
+let prettyPrinted = value => value;
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_suppressions.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_suppressions.js
new file mode 100644
index 0000000..b964f78
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_suppressions.js
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Don't breach stack limit in differential fuzzing as it leads to
+// early bailout.
+runNearStackLimit = function(f) {
+  try {
+    f();
+  } catch (e) {}
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_v8.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_v8.js
new file mode 100644
index 0000000..042f1ce
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/differential_fuzz_v8.js
@@ -0,0 +1,27 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Adjust mjsunit behavior for differential fuzzing.
+
+// We're not interested in stack traces.
+MjsUnitAssertionError = () => {};
+
+// Do more printing in assertions for more correctness coverage.
+failWithMessage = message => { __prettyPrint(message); };
+assertSame = (expected, found, name_opt) => { __prettyPrint(found); };
+assertNotSame = (expected, found, name_opt) => { __prettyPrint(found); };
+assertEquals = (expected, found, name_opt) => { __prettyPrint(found); };
+assertNotEquals = (expected, found, name_opt) => { __prettyPrint(found); };
+assertNull = (value, name_opt) => { __prettyPrint(value); };
+assertNotNull = (value, name_opt) => { __prettyPrint(value); };
+
+// Suppress optimization status as it leads to false positives.
+assertUnoptimized = () => {};
+assertOptimized = () => {};
+isNeverOptimize = () => {};
+isAlwaysOptimize = () => {};
+isInterpreted = () => {};
+isOptimized = () => {};
+isTurboFanned = () => {};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/fuzz_library.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/fuzz_library.js
new file mode 100644
index 0000000..343b4d1
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/fuzz_library.js
@@ -0,0 +1,116 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Slightly modified variants from http://code.fitness/post/2016/01/javascript-enumerate-methods.html.
+function __isPropertyOfType(obj, name, type) {
+  let desc;
+  try {
+    desc = Object.getOwnPropertyDescriptor(obj, name);
+  } catch(e) {
+    return false;
+  }
+
+  if (!desc)
+    return false;
+
+  return typeof type === 'undefined' || typeof desc.value === type;
+}
+
+function __getProperties(obj, type) {
+  if (typeof obj === "undefined" || obj === null)
+    return [];
+
+  let properties = [];
+  for (let name of Object.getOwnPropertyNames(obj)) {
+    if (__isPropertyOfType(obj, name, type))
+      properties.push(name);
+  }
+
+  let proto = Object.getPrototypeOf(obj);
+  while (proto && proto != Object.prototype) {
+    Object.getOwnPropertyNames(proto)
+      .forEach (name => {
+        if (name !== 'constructor') {
+          if (__isPropertyOfType(proto, name, type))
+            properties.push(name);
+        }
+      });
+    proto = Object.getPrototypeOf(proto);
+  }
+  return properties;
+}
+
+function* __getObjects(root = this, level = 0) {
+    if (level > 4)
+      return;
+
+    let obj_names = __getProperties(root, 'object');
+    for (let obj_name of obj_names) {
+      let obj = root[obj_name];
+      if (obj === root)
+        continue;
+
+      yield obj;
+      yield* __getObjects(obj, level + 1);
+    }
+}
+
+function __getRandomObject(seed) {
+  let objects = [];
+  for (let obj of __getObjects()) {
+    objects.push(obj);
+  }
+
+  return objects[seed % objects.length];
+}
+
+function __getRandomProperty(obj, seed) {
+  let properties = __getProperties(obj);
+  if (!properties.length)
+    return undefined;
+
+  return properties[seed % properties.length];
+}
+
+function __callRandomFunction(obj, seed, ...args)
+{
+  let functions = __getProperties(obj, 'function');
+  if (!functions.length)
+    return;
+
+  let random_function = functions[seed % functions.length];
+  try {
+    obj[random_function](...args);
+  } catch(e) { }
+}
+
+function runNearStackLimit(f) {
+  function t() {
+    try {
+      return t();
+    } catch (e) {
+      return f();
+    }
+  };
+  try {
+    return t();
+  } catch (e) {}
+}
+
+// Limit number of times we cause major GCs in tests to reduce hangs
+// when called within larger loops.
+let __callGC;
+(function() {
+  let countGC = 0;
+  __callGC = function() {
+    if (countGC++ < 50) {
+      gc();
+    }
+  };
+})();
+
+// Neuter common test functions.
+try { this.failWithMessage = nop; } catch(e) { }
+try { this.triggerAssertFalse = nop; } catch(e) { }
+try { this.quit = nop; } catch(e) { }
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/jstest_stubs.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/jstest_stubs.js
new file mode 100644
index 0000000..b5921e0
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/jstest_stubs.js
@@ -0,0 +1,41 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Minimally stub out methods from JSTest's standalone-pre.js.
+function description(msg) {}
+function debug(msg) {}
+
+function shouldBe(_a) {
+  print((typeof _a == "function" ? _a() : eval(_a)));
+}
+
+function shouldBeTrue(_a) { shouldBe(_a); }
+function shouldBeFalse(_a) { shouldBe(_a); }
+function shouldBeNaN(_a) { shouldBe(_a); }
+function shouldBeNull(_a) { shouldBe(_a); }
+function shouldNotThrow(_a) { shouldBe(_a); }
+function shouldThrow(_a) { shouldBe(_a); }
+
+function noInline() {}
+function finishJSTest() {}
+
+// Stub out $vm.
+try {
+  $vm;
+} catch(e) {
+  const handler = {
+    get: function(x, prop) {
+      if (prop == Symbol.toPrimitive) {
+        return function() { return undefined; };
+      }
+      return dummy;
+    },
+  };
+  const dummy = new Proxy(function() { return dummy; }, handler);
+  this.$vm = dummy;
+}
+
+// Other functions.
+function ensureArrayStorage() {}
+function transferArrayBuffer() {}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/stubs.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/stubs.js
new file mode 100644
index 0000000..37d1c62
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/resources/stubs.js
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Helper neuter function.
+function nop() { return false; }
+
+// Stubs for non-standard functions.
+try { gc; } catch(e) {
+  this.gc = function () {
+    for (let i = 0; i < 10000; i++) {
+      let s = new String("AAAA" + Math.random());
+    }
+  }
+}
+try { uneval; } catch(e) { this.uneval = this.nop; }
+
+try {
+  // For Chakra tests.
+  WScript;
+} catch(e) {
+  this.WScript = new Proxy({}, {
+    get(target, name) {
+      switch (name) {
+        case 'Echo':
+          return print;
+        default:
+          return {};
+      }
+
+    }
+  });
+}
+
+try { this.alert = console.log; } catch(e) { }
+try { this.print = console.log; } catch(e) { }
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/run.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/run.js
new file mode 100644
index 0000000..3712172
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/run.js
@@ -0,0 +1,239 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Description of this file.
+ */
+
+'use strict';
+
+const assert = require('assert');
+const fs = require('fs');
+const path = require('path');
+
+const program = require('commander');
+
+const corpus = require('./corpus.js');
+const differentialScriptMutator = require('./differential_script_mutator.js');
+const random = require('./random.js');
+const scriptMutator = require('./script_mutator.js');
+const sourceHelpers = require('./source_helpers.js');
+
+// Maximum number of test inputs to use for one fuzz test.
+const MAX_TEST_INPUTS_PER_TEST = 10;
+
+// Base implementations for default or differential fuzzing.
+const SCRIPT_MUTATORS = {
+  default: scriptMutator.ScriptMutator,
+  foozzie: differentialScriptMutator.DifferentialScriptMutator,
+};
+
+function getRandomInputs(primaryCorpus, secondaryCorpora, count) {
+  count = random.randInt(2, count);
+
+  // Choose 40%-80% of inputs from primary corpus.
+  const primaryCount = Math.floor(random.uniform(0.4, 0.8) * count);
+  count -= primaryCount;
+
+  let inputs = primaryCorpus.getRandomTestcases(primaryCount);
+
+  // Split remainder equally between the secondary corpora.
+  const secondaryCount = Math.floor(count / secondaryCorpora.length);
+
+  for (let i = 0; i < secondaryCorpora.length; i++) {
+    let currentCount = secondaryCount;
+    if (i == secondaryCorpora.length - 1) {
+      // Last one takes the remainder.
+      currentCount = count;
+    }
+
+    count -= currentCount;
+    if (currentCount) {
+      inputs = inputs.concat(
+          secondaryCorpora[i].getRandomTestcases(currentCount));
+    }
+  }
+
+  return random.shuffle(inputs);
+}
+
+function collect(value, total) {
+  total.push(value);
+  return total;
+}
+
+function overrideSettings(settings, settingOverrides) {
+  for (const setting of settingOverrides) {
+    const parts = setting.split('=');
+    settings[parts[0]] = parseFloat(parts[1]);
+  }
+}
+
+function* randomInputGen(engine) {
+  const inputDir = path.resolve(program.input_dir);
+
+  const v8Corpus = new corpus.Corpus(inputDir, 'v8');
+  const chakraCorpus = new corpus.Corpus(inputDir, 'chakra');
+  const spiderMonkeyCorpus = new corpus.Corpus(inputDir, 'spidermonkey');
+  const jscCorpus = new corpus.Corpus(inputDir, 'WebKit/JSTests');
+  const crashTestsCorpus = new corpus.Corpus(inputDir, 'CrashTests');
+
+  for (let i = 0; i < program.no_of_files; i++) {
+    let inputs;
+    if (engine === 'V8') {
+      inputs = getRandomInputs(
+          v8Corpus,
+          random.shuffle([chakraCorpus, spiderMonkeyCorpus, jscCorpus,
+                          crashTestsCorpus, v8Corpus]),
+          MAX_TEST_INPUTS_PER_TEST);
+    } else if (engine == 'chakra') {
+      inputs = getRandomInputs(
+          chakraCorpus,
+          random.shuffle([v8Corpus, spiderMonkeyCorpus, jscCorpus,
+                          crashTestsCorpus]),
+          MAX_TEST_INPUTS_PER_TEST);
+    } else if (engine == 'spidermonkey') {
+      inputs = getRandomInputs(
+          spiderMonkeyCorpus,
+          random.shuffle([v8Corpus, chakraCorpus, jscCorpus,
+                          crashTestsCorpus]),
+          MAX_TEST_INPUTS_PER_TEST);
+    } else {
+      inputs = getRandomInputs(
+          jscCorpus,
+          random.shuffle([chakraCorpus, spiderMonkeyCorpus, v8Corpus,
+                          crashTestsCorpus]),
+          MAX_TEST_INPUTS_PER_TEST);
+    }
+
+    if (inputs.length > 0) {
+      yield inputs;
+    }
+  }
+}
+
+function* corpusInputGen() {
+  const inputCorpus = new corpus.Corpus(
+      path.resolve(program.input_dir),
+      program.mutate_corpus,
+      program.extra_strict);
+  for (const input of inputCorpus.getAllTestcases()) {
+    yield [input];
+  }
+}
+
+function* enumerate(iterable) {
+  let i = 0;
+  for (const value of iterable) {
+    yield [i, value];
+    i++;
+  }
+}
+
+function main() {
+  Error.stackTraceLimit = Infinity;
+
+  program
+    .version('0.0.1')
+    .option('-i, --input_dir <path>', 'Input directory.')
+    .option('-o, --output_dir <path>', 'Output directory.')
+    .option('-n, --no_of_files <n>', 'Output directory.', parseInt)
+    .option('-c, --mutate_corpus <name>', 'Mutate single files in a corpus.')
+    .option('-e, --extra_strict', 'Additionally parse files in strict mode.')
+    .option('-m, --mutate <path>', 'Mutate a file and output results.')
+    .option('-s, --setting [setting]', 'Settings overrides.', collect, [])
+    .option('-v, --verbose', 'More verbose printing.')
+    .option('-z, --zero_settings', 'Zero all settings.')
+    .parse(process.argv);
+
+  const settings = scriptMutator.defaultSettings();
+  if (program.zero_settings) {
+    for (const key of Object.keys(settings)) {
+      settings[key] = 0.0;
+    }
+  }
+
+  if (program.setting.length > 0) {
+    overrideSettings(settings, program.setting);
+  }
+
+  let app_name = process.env.APP_NAME;
+  if (app_name && app_name.endsWith('.exe')) {
+    app_name = app_name.substr(0, app_name.length - 4);
+  }
+
+  if (app_name === 'd8' || app_name === 'v8_foozzie.py') {
+    // V8 supports running the raw d8 executable or the differential fuzzing
+    // harness 'foozzie'.
+    settings.engine = 'V8';
+  } else if (app_name === 'ch') {
+    settings.engine = 'chakra';
+  } else if (app_name === 'js') {
+    settings.engine = 'spidermonkey';
+  } else if (app_name === 'jsc') {
+    settings.engine = 'jsc';
+  } else {
+    console.log('ERROR: Invalid APP_NAME');
+    process.exit(1);
+  }
+
+  const mode = process.env.FUZZ_MODE || 'default';
+  assert(mode in SCRIPT_MUTATORS, `Unknown mode ${mode}`);
+  const mutator = new SCRIPT_MUTATORS[mode](settings);
+
+  if (program.mutate) {
+    const absPath = path.resolve(program.mutate);
+    const baseDir = path.dirname(absPath);
+    const fileName = path.basename(absPath);
+    const input = sourceHelpers.loadSource(
+        baseDir, fileName, program.extra_strict);
+    const mutated = mutator.mutateMultiple([input]);
+    console.log(mutated.code);
+    return;
+  }
+
+  let inputGen;
+
+  if (program.mutate_corpus) {
+    inputGen = corpusInputGen();
+  } else {
+    inputGen = randomInputGen(settings.engine);
+  }
+
+  for (const [i, inputs] of enumerate(inputGen)) {
+    const outputPath = path.join(program.output_dir, 'fuzz-' + i + '.js');
+
+    const start = Date.now();
+    const paths = inputs.map(input => input.relPath);
+
+    try {
+      const mutated = mutator.mutateMultiple(inputs);
+      fs.writeFileSync(outputPath, mutated.code);
+
+      if (settings.engine === 'V8' && mutated.flags && mutated.flags.length > 0) {
+        const flagsPath = path.join(program.output_dir, 'flags-' + i + '.js');
+        fs.writeFileSync(flagsPath, mutated.flags.join(' '));
+      }
+    } catch (e) {
+      if (e.message.startsWith('ENOSPC')) {
+        console.log('ERROR: No space left. Bailing out...');
+        console.log(e);
+        return;
+      }
+      console.log(`ERROR: Exception during mutate: ${paths}`);
+      console.log(e);
+      continue;
+    } finally {
+      if (program.verbose) {
+        const duration = Date.now() - start;
+        console.log(`Mutating ${paths} took ${duration} ms.`);
+      }
+    }
+    if ((i + 1)  % 10 == 0) {
+      console.log('Up to ', i + 1);
+    }
+  }
+}
+
+main();
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/script_mutator.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/script_mutator.js
new file mode 100644
index 0000000..f0e1fd7
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/script_mutator.js
@@ -0,0 +1,225 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Script mutator.
+ */
+
+'use strict';
+
+const fs = require('fs');
+const path = require('path');
+
+const common = require('./mutators/common.js');
+const db = require('./db.js');
+const sourceHelpers = require('./source_helpers.js');
+
+const { AddTryCatchMutator } = require('./mutators/try_catch.js');
+const { ArrayMutator } = require('./mutators/array_mutator.js');
+const { CrossOverMutator } = require('./mutators/crossover_mutator.js');
+const { ExpressionMutator } = require('./mutators/expression_mutator.js');
+const { FunctionCallMutator } = require('./mutators/function_call_mutator.js');
+const { IdentifierNormalizer } = require('./mutators/normalizer.js');
+const { NumberMutator } = require('./mutators/number_mutator.js');
+const { ObjectMutator } = require('./mutators/object_mutator.js');
+const { VariableMutator } = require('./mutators/variable_mutator.js');
+const { VariableOrObjectMutator } = require('./mutators/variable_or_object_mutation.js');
+
+function defaultSettings() {
+  return {
+    ADD_VAR_OR_OBJ_MUTATIONS: 0.1,
+    DIFF_FUZZ_EXTRA_PRINT: 0.1,
+    DIFF_FUZZ_TRACK_CAUGHT: 0.4,
+    MUTATE_ARRAYS: 0.1,
+    MUTATE_CROSSOVER_INSERT: 0.05,
+    MUTATE_EXPRESSIONS: 0.1,
+    MUTATE_FUNCTION_CALLS: 0.1,
+    MUTATE_NUMBERS: 0.05,
+    MUTATE_OBJECTS: 0.1,
+    MUTATE_VARIABLES: 0.075,
+  };
+}
+
+class Result {
+  constructor(code, flags) {
+    this.code = code;
+    this.flags = flags;
+  }
+}
+
+class ScriptMutator {
+  constructor(settings, db_path=undefined) {
+    // Use process.cwd() to bypass pkg's snapshot filesystem.
+    this.mutateDb = new db.MutateDb(db_path || path.join(process.cwd(), 'db'));
+    this.mutators = [
+      new ArrayMutator(settings),
+      new ObjectMutator(settings),
+      new VariableMutator(settings),
+      new NumberMutator(settings),
+      new CrossOverMutator(settings, this.mutateDb),
+      new ExpressionMutator(settings),
+      new FunctionCallMutator(settings),
+      new VariableOrObjectMutator(settings),
+      new AddTryCatchMutator(settings),
+    ];
+  }
+
+  _addMjsunitIfNeeded(dependencies, input) {
+    if (dependencies.has('mjsunit')) {
+      return;
+    }
+
+    if (!input.absPath.includes('mjsunit')) {
+      return;
+    }
+
+    // Find mjsunit.js
+    let mjsunitPath = input.absPath;
+    while (path.dirname(mjsunitPath) != mjsunitPath &&
+           path.basename(mjsunitPath) != 'mjsunit') {
+      mjsunitPath = path.dirname(mjsunitPath);
+    }
+
+    if (path.basename(mjsunitPath) == 'mjsunit') {
+      mjsunitPath = path.join(mjsunitPath, 'mjsunit.js');
+      dependencies.set('mjsunit', sourceHelpers.loadDependencyAbs(
+          input.baseDir, mjsunitPath));
+      return;
+    }
+
+    console.log('ERROR: Failed to find mjsunit.js');
+  }
+
+  _addSpiderMonkeyShellIfNeeded(dependencies, input) {
+    // Find shell.js files
+    const shellJsPaths = new Array();
+    let currentDir = path.dirname(input.absPath);
+
+    while (path.dirname(currentDir) != currentDir) {
+      const shellJsPath = path.join(currentDir, 'shell.js');
+      if (fs.existsSync(shellJsPath)) {
+         shellJsPaths.push(shellJsPath);
+      }
+
+      if (currentDir == 'spidermonkey') {
+        break;
+      }
+      currentDir = path.dirname(currentDir);
+    }
+
+    // Add shell.js dependencies in reverse to add ones that are higher up in
+    // the directory tree first.
+    for (let i = shellJsPaths.length - 1; i >= 0; i--) {
+      if (!dependencies.has(shellJsPaths[i])) {
+        const dependency = sourceHelpers.loadDependencyAbs(
+            input.baseDir, shellJsPaths[i]);
+        dependencies.set(shellJsPaths[i], dependency);
+      }
+    }
+  }
+
+  _addJSTestStubsIfNeeded(dependencies, input) {
+    if (dependencies.has('jstest_stubs') ||
+        !input.absPath.includes('JSTests')) {
+      return;
+    }
+    dependencies.set(
+        'jstest_stubs', sourceHelpers.loadResource('jstest_stubs.js'));
+  }
+
+  mutate(source) {
+    for (const mutator of this.mutators) {
+      mutator.mutate(source);
+    }
+  }
+
+  // Returns parsed dependencies for inputs.
+  resolveInputDependencies(inputs) {
+    const dependencies = new Map();
+
+    // Resolve test harness files.
+    inputs.forEach(input => {
+      try {
+        // TODO(machenbach): Some harness files contain load expressions
+        // that are not recursively resolved. We already remove them, but we
+        // also need to load the dependencies they point to.
+        this._addJSTestStubsIfNeeded(dependencies, input);
+        this._addMjsunitIfNeeded(dependencies, input)
+        this._addSpiderMonkeyShellIfNeeded(dependencies, input);
+      } catch (e) {
+        console.log(
+            'ERROR: Failed to resolve test harness for', input.relPath);
+        throw e;
+      }
+    });
+
+    // Resolve dependencies loaded within the input files.
+    inputs.forEach(input => {
+      try {
+        input.loadDependencies(dependencies);
+      } catch (e) {
+        console.log(
+            'ERROR: Failed to resolve dependencies for', input.relPath);
+        throw e;
+      }
+    });
+
+    // Map.values() returns values in insertion order.
+    return Array.from(dependencies.values());
+  }
+
+  // Combines input dependencies with fuzzer resources.
+  resolveDependencies(inputs) {
+    const dependencies = this.resolveInputDependencies(inputs);
+
+    // Add stubs for non-standard functions in the beginning.
+    dependencies.unshift(sourceHelpers.loadResource('stubs.js'));
+
+    // Add our fuzzing support helpers. This also overrides some common test
+    // functions from earlier dependencies that cause early bailouts.
+    dependencies.push(sourceHelpers.loadResource('fuzz_library.js'));
+
+    return dependencies;
+  }
+
+  // Normalizes, combines and mutates multiple inputs.
+  mutateInputs(inputs) {
+    const normalizerMutator = new IdentifierNormalizer();
+
+    for (const [index, input] of inputs.entries()) {
+      try {
+        normalizerMutator.mutate(input);
+      } catch (e) {
+        console.log('ERROR: Failed to normalize ', input.relPath);
+        throw e;
+      }
+
+      common.setSourceLoc(input, index, inputs.length);
+    }
+
+    // Combine ASTs into one. This is so that mutations have more context to
+    // cross over content between ASTs (e.g. variables).
+    const combinedSource = common.concatPrograms(inputs);
+    this.mutate(combinedSource);
+
+    return combinedSource;
+  }
+
+  mutateMultiple(inputs) {
+    // High level operation:
+    // 1) Compute dependencies from inputs.
+    // 2) Normalize, combine and mutate inputs.
+    // 3) Generate code with dependency code prepended.
+    const dependencies = this.resolveDependencies(inputs);
+    const combinedSource = this.mutateInputs(inputs);
+    const code = sourceHelpers.generateCode(combinedSource, dependencies);
+    const flags = common.concatFlags(dependencies.concat([combinedSource]));
+    return new Result(code, flags);
+  }
+}
+
+module.exports = {
+  defaultSettings: defaultSettings,
+  ScriptMutator: ScriptMutator,
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
new file mode 100644
index 0000000..2647346
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
@@ -0,0 +1,477 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Source loader.
+ */
+
+const fs = require('fs');
+const fsPath = require('path');
+
+const { EOL } = require('os');
+
+const babelGenerator = require('@babel/generator').default;
+const babelTraverse = require('@babel/traverse').default;
+const babelTypes = require('@babel/types');
+const babylon = require('@babel/parser');
+
+const exceptions = require('./exceptions.js');
+
+const SCRIPT = Symbol('SCRIPT');
+const MODULE = Symbol('MODULE');
+
+const V8_BUILTIN_PREFIX = '__V8Builtin';
+const V8_REPLACE_BUILTIN_REGEXP = new RegExp(
+    V8_BUILTIN_PREFIX + '(\\w+)\\(', 'g');
+
+const BABYLON_OPTIONS = {
+    sourceType: 'script',
+    allowReturnOutsideFunction: true,
+    tokens: false,
+    ranges: false,
+    plugins: [
+        'asyncGenerators',
+        'bigInt',
+        'classPrivateMethods',
+        'classPrivateProperties',
+        'classProperties',
+        'doExpressions',
+        'exportDefaultFrom',
+        'nullishCoalescingOperator',
+        'numericSeparator',
+        'objectRestSpread',
+        'optionalCatchBinding',
+        'optionalChaining',
+    ],
+}
+
+function _isV8OrSpiderMonkeyLoad(path) {
+  // 'load' and 'loadRelativeToScript' used by V8 and SpiderMonkey.
+  return (babelTypes.isIdentifier(path.node.callee) &&
+          (path.node.callee.name == 'load' ||
+           path.node.callee.name == 'loadRelativeToScript') &&
+          path.node.arguments.length == 1 &&
+          babelTypes.isStringLiteral(path.node.arguments[0]));
+}
+
+function _isChakraLoad(path) {
+  // 'WScript.LoadScriptFile' used by Chakra.
+  // TODO(ochang): The optional second argument can change semantics ("self",
+  // "samethread", "crossthread" etc).
+  // Investigate whether if it still makes sense to include them.
+  return (babelTypes.isMemberExpression(path.node.callee) &&
+          babelTypes.isIdentifier(path.node.callee.property) &&
+          path.node.callee.property.name == 'LoadScriptFile' &&
+          path.node.arguments.length >= 1 &&
+          babelTypes.isStringLiteral(path.node.arguments[0]));
+}
+
+function _findPath(path, caseSensitive=true) {
+  // If the path exists, return the path. Otherwise return null. Used to handle
+  // case insensitive matches for Chakra tests.
+  if (caseSensitive) {
+    return fs.existsSync(path) ? path : null;
+  }
+
+  path = fsPath.normalize(fsPath.resolve(path));
+  const pathComponents = path.split(fsPath.sep);
+  let realPath = fsPath.resolve(fsPath.sep);
+
+  for (let i = 1; i < pathComponents.length; i++) {
+    // For each path component, do a directory listing to see if there is a case
+    // insensitive match.
+    const curListing = fs.readdirSync(realPath);
+    let realComponent = null;
+    for (const component of curListing) {
+      if (i < pathComponents.length - 1 &&
+          !fs.statSync(fsPath.join(realPath, component)).isDirectory()) {
+        continue;
+      }
+
+      if (component.toLowerCase() == pathComponents[i].toLowerCase()) {
+        realComponent = component;
+        break;
+      }
+    }
+
+    if (!realComponent) {
+      return null;
+    }
+
+    realPath = fsPath.join(realPath, realComponent);
+  }
+
+  return realPath;
+}
+
+function _findDependentCodePath(filePath, baseDirectory, caseSensitive=true) {
+  const fullPath = fsPath.join(baseDirectory, filePath);
+
+  const realPath = _findPath(fullPath, caseSensitive)
+  if (realPath) {
+    // Check base directory of current file.
+    return realPath;
+  }
+
+  while (fsPath.dirname(baseDirectory) != baseDirectory) {
+    // Walk up the directory tree.
+    const testPath = fsPath.join(baseDirectory, filePath);
+    const realPath = _findPath(testPath, caseSensitive)
+    if (realPath) {
+      return realPath;
+    }
+
+    baseDirectory = fsPath.dirname(baseDirectory);
+  }
+
+  return null;
+}
+
+/**
+ * Removes V8/Spidermonkey/Chakra load expressions in a source AST and returns
+ * their string values in an array.
+ *
+ * @param {string} originalFilePath Absolute path to file.
+ * @param {AST} ast Babel AST of the sources.
+ */
+function resolveLoads(originalFilePath, ast) {
+  const dependencies = [];
+
+  babelTraverse(ast, {
+    CallExpression(path) {
+      const isV8OrSpiderMonkeyLoad = _isV8OrSpiderMonkeyLoad(path);
+      const isChakraLoad = _isChakraLoad(path);
+      if (!isV8OrSpiderMonkeyLoad && !isChakraLoad) {
+        return;
+      }
+
+      let loadValue = path.node.arguments[0].extra.rawValue;
+      // Normalize Windows path separators.
+      loadValue = loadValue.replace(/\\/g, fsPath.sep);
+
+      // Remove load call.
+      path.remove();
+
+      const resolvedPath = _findDependentCodePath(
+          loadValue, fsPath.dirname(originalFilePath), !isChakraLoad);
+      if (!resolvedPath) {
+        console.log('ERROR: Could not find dependent path for', loadValue);
+        return;
+      }
+
+      if (exceptions.isTestSkippedAbs(resolvedPath)) {
+        // Dependency is skipped.
+        return;
+      }
+
+      // Add the dependency path.
+      dependencies.push(resolvedPath);
+    }
+  });
+  return dependencies;
+}
+
+function isStrictDirective(directive) {
+  return (directive.value &&
+          babelTypes.isDirectiveLiteral(directive.value) &&
+          directive.value.value === 'use strict');
+}
+
+function replaceV8Builtins(code) {
+  return code.replace(/%(\w+)\(/g, V8_BUILTIN_PREFIX + '$1(');
+}
+
+function restoreV8Builtins(code) {
+  return code.replace(V8_REPLACE_BUILTIN_REGEXP, '%$1(');
+}
+
+function maybeUseStict(code, useStrict) {
+  if (useStrict) {
+    return `'use strict';${EOL}${EOL}${code}`;
+  }
+  return code;
+}
+
+class Source {
+  constructor(baseDir, relPath, flags, dependentPaths) {
+    this.baseDir = baseDir;
+    this.relPath = relPath;
+    this.flags = flags;
+    this.dependentPaths = dependentPaths;
+    this.sloppy = exceptions.isTestSloppyRel(relPath);
+  }
+
+  get absPath() {
+    return fsPath.join(this.baseDir, this.relPath);
+  }
+
+  /**
+   * Specifies if the source isn't compatible with strict mode.
+   */
+  isSloppy() {
+    return this.sloppy;
+  }
+
+  /**
+   * Specifies if the source has a top-level 'use strict' directive.
+   */
+  isStrict() {
+    throw Error('Not implemented');
+  }
+
+  /**
+   * Generates the code as a string without any top-level 'use strict'
+   * directives. V8 natives that were replaced before parsing are restored.
+   */
+  generateNoStrict() {
+    throw Error('Not implemented');
+  }
+
+  /**
+   * Recursively adds dependencies of a this source file.
+   *
+   * @param {Map} dependencies Dependency map to which to add new, parsed
+   *     dependencies unless they are already in the map.
+   * @param {Map} visitedDependencies A set for avoiding loops.
+   */
+  loadDependencies(dependencies, visitedDependencies) {
+    visitedDependencies = visitedDependencies || new Set();
+
+    for (const absPath of this.dependentPaths) {
+      if (dependencies.has(absPath) ||
+          visitedDependencies.has(absPath)) {
+        // Already added.
+        continue;
+      }
+
+      // Prevent infinite loops.
+      visitedDependencies.add(absPath);
+
+      // Recursively load dependencies.
+      const dependency = loadDependencyAbs(this.baseDir, absPath);
+      dependency.loadDependencies(dependencies, visitedDependencies);
+
+      // Add the dependency.
+      dependencies.set(absPath, dependency);
+    }
+  }
+}
+
+/**
+ * Represents sources whose AST can be manipulated.
+ */
+class ParsedSource extends Source {
+  constructor(ast, baseDir, relPath, flags, dependentPaths) {
+    super(baseDir, relPath, flags, dependentPaths);
+    this.ast = ast;
+  }
+
+  isStrict() {
+    return !!this.ast.program.directives.filter(isStrictDirective).length;
+  }
+
+  generateNoStrict() {
+    const allDirectives = this.ast.program.directives;
+    this.ast.program.directives = this.ast.program.directives.filter(
+        directive => !isStrictDirective(directive));
+    try {
+      const code = babelGenerator(this.ast.program, {comments: true}).code;
+      return restoreV8Builtins(code);
+    } finally {
+      this.ast.program.directives = allDirectives;
+    }
+  }
+}
+
+/**
+ * Represents sources with cached code.
+ */
+class CachedSource extends Source {
+  constructor(source) {
+    super(source.baseDir, source.relPath, source.flags, source.dependentPaths);
+    this.use_strict = source.isStrict();
+    this.code = source.generateNoStrict();
+  }
+
+  isStrict() {
+    return this.use_strict;
+  }
+
+  generateNoStrict() {
+    return this.code;
+  }
+}
+
+/**
+ * Read file path into an AST.
+ *
+ * Post-processes the AST by replacing V8 natives and removing disallowed
+ * natives, as well as removing load expressions and adding the paths-to-load
+ * as meta data.
+ */
+function loadSource(baseDir, relPath, parseStrict=false) {
+  const absPath = fsPath.resolve(fsPath.join(baseDir, relPath));
+  const data = fs.readFileSync(absPath, 'utf-8');
+
+  if (guessType(data) !== SCRIPT) {
+    return null;
+  }
+
+  const preprocessed = maybeUseStict(replaceV8Builtins(data), parseStrict);
+  const ast = babylon.parse(preprocessed, BABYLON_OPTIONS);
+
+  removeComments(ast);
+  cleanAsserts(ast);
+  neuterDisallowedV8Natives(ast);
+  annotateWithOriginalPath(ast, relPath);
+
+  const flags = loadFlags(data);
+  const dependentPaths = resolveLoads(absPath, ast);
+
+  return new ParsedSource(ast, baseDir, relPath, flags, dependentPaths);
+}
+
+function guessType(data) {
+  if (data.includes('// MODULE')) {
+    return MODULE;
+  }
+
+  return SCRIPT;
+}
+
+/**
+ * Remove existing comments.
+ */
+function removeComments(ast) {
+  babelTraverse(ast, {
+    enter(path) {
+      babelTypes.removeComments(path.node);
+    }
+  });
+}
+
+/**
+ * Removes "Assert" from strings in spidermonkey shells or from older
+ * crash tests: https://crbug.com/1068268
+ */
+function cleanAsserts(ast) {
+  function replace(string) {
+    return string.replace(/Assert/g, 'A****t');
+  }
+  babelTraverse(ast, {
+    StringLiteral(path) {
+      path.node.value = replace(path.node.value);
+      path.node.extra.raw = replace(path.node.extra.raw);
+      path.node.extra.rawValue = replace(path.node.extra.rawValue);
+    },
+    TemplateElement(path) {
+      path.node.value.cooked = replace(path.node.value.cooked);
+      path.node.value.raw = replace(path.node.value.raw);
+    },
+  });
+}
+
+/**
+ * Filter out disallowed V8 runtime functions.
+ */
+function neuterDisallowedV8Natives(ast) {
+  babelTraverse(ast, {
+    CallExpression(path) {
+      if (!babelTypes.isIdentifier(path.node.callee) ||
+          !path.node.callee.name.startsWith(V8_BUILTIN_PREFIX)) {
+        return;
+      }
+
+      const functionName = path.node.callee.name.substr(
+          V8_BUILTIN_PREFIX.length);
+
+      if (!exceptions.isAllowedRuntimeFunction(functionName)) {
+        path.replaceWith(babelTypes.callExpression(
+            babelTypes.identifier('nop'), []));
+      }
+    }
+  });
+}
+
+/**
+ * Annotate code with original file path.
+ */
+function annotateWithOriginalPath(ast, relPath) {
+  if (ast.program && ast.program.body && ast.program.body.length > 0) {
+    babelTypes.addComment(
+        ast.program.body[0], 'leading', ' Original: ' + relPath, true);
+  }
+}
+
+// TODO(machenbach): Move this into the V8 corpus. Other test suites don't
+// use this flag logic.
+function loadFlags(data) {
+  const result = [];
+  let count = 0;
+  for (const line of data.split('\n')) {
+    if (count++ > 40) {
+      // No need to process the whole file. Flags are always added after the
+      // copyright header.
+      break;
+    }
+    const match = line.match(/\/\/ Flags:\s*(.*)\s*/);
+    if (!match) {
+      continue;
+    }
+    for (const flag of exceptions.filterFlags(match[1].split(/\s+/))) {
+      result.push(flag);
+    }
+  }
+  return result;
+}
+
+// Convenience helper to load sources with absolute paths.
+function loadSourceAbs(baseDir, absPath) {
+  return loadSource(baseDir, fsPath.relative(baseDir, absPath));
+}
+
+const dependencyCache = new Map();
+
+function loadDependency(baseDir, relPath) {
+  const absPath = fsPath.join(baseDir, relPath);
+  let dependency = dependencyCache.get(absPath);
+  if (!dependency) {
+    const source = loadSource(baseDir, relPath);
+    dependency = new CachedSource(source);
+    dependencyCache.set(absPath, dependency);
+  }
+  return dependency;
+}
+
+function loadDependencyAbs(baseDir, absPath) {
+  return loadDependency(baseDir, fsPath.relative(baseDir, absPath));
+}
+
+// Convenience helper to load a file from the resources directory.
+function loadResource(fileName) {
+  return loadDependency(__dirname, fsPath.join('resources', fileName));
+}
+
+function generateCode(source, dependencies=[]) {
+  const allSources = dependencies.concat([source]);
+  const codePieces = allSources.map(
+      source => source.generateNoStrict());
+
+  if (allSources.some(source => source.isStrict()) &&
+      !allSources.some(source => source.isSloppy())) {
+    codePieces.unshift('\'use strict\';');
+  }
+
+  return codePieces.join(EOL + EOL);
+}
+
+module.exports = {
+  BABYLON_OPTIONS: BABYLON_OPTIONS,
+  generateCode: generateCode,
+  loadDependencyAbs: loadDependencyAbs,
+  loadResource: loadResource,
+  loadSource: loadSource,
+  loadSourceAbs: loadSourceAbs,
+  ParsedSource: ParsedSource,
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/helpers.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/helpers.js
new file mode 100644
index 0000000..172f8c8
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/helpers.js
@@ -0,0 +1,75 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Test helpers.
+ */
+
+'use strict';
+
+const assert = require('assert');
+const path = require('path');
+const fs = require('fs');
+
+const sourceHelpers = require('../source_helpers.js');
+
+const BASE_DIR = path.join(path.dirname(__dirname), 'test_data');
+const DB_DIR = path.join(BASE_DIR, 'fake_db');
+
+const HEADER = `// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+`;
+
+/**
+ * Create a function that returns one of `probs` when called. It rotates
+ * through the values. Useful to replace `random.random()` in tests using
+ * the probabilities that trigger different interesting cases.
+ */
+function cycleProbabilitiesFun(probs) {
+  let index = 0;
+  return () => {
+    index = index % probs.length;
+    return probs[index++];
+  };
+}
+
+/**
+ * Replace Math.random with a deterministic pseudo-random function.
+ */
+function deterministicRandom(sandbox) {
+    let seed = 1;
+    function random() {
+        const x = Math.sin(seed++) * 10000;
+        return x - Math.floor(x);
+    }
+    sandbox.stub(Math, 'random').callsFake(() => { return random(); });
+}
+
+function loadTestData(relPath) {
+  return sourceHelpers.loadSource(BASE_DIR, relPath);
+}
+
+function assertExpectedResult(expectedPath, result) {
+  const absPath = path.join(BASE_DIR, expectedPath);
+  if (process.env.GENERATE) {
+    fs.writeFileSync(absPath, HEADER + result.trim() + '\n');
+    return;
+  }
+
+  // Omit copyright header when comparing files.
+  const expected = fs.readFileSync(absPath, 'utf-8').trim().split('\n');
+  expected.splice(0, 4);
+  assert.strictEqual(expected.join('\n'), result.trim());
+}
+
+module.exports = {
+  BASE_DIR: BASE_DIR,
+  DB_DIR: DB_DIR,
+  assertExpectedResult: assertExpectedResult,
+  cycleProbabilitiesFun: cycleProbabilitiesFun,
+  deterministicRandom: deterministicRandom,
+  loadTestData: loadTestData,
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_available_variables.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_available_variables.js
new file mode 100644
index 0000000..bf699ec
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_available_variables.js
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Tests for mutating variables
+ */
+
+'use strict';
+
+const babelTraverse = require('@babel/traverse').default;
+
+const common = require('../mutators/common.js');
+const helpers = require('./helpers.js');
+
+describe('Available variables and functions', () => {
+  it('test', () => {
+    const source = helpers.loadTestData('available_variables.js');
+    const result = new Array();
+
+    babelTraverse(source.ast, {
+      CallExpression(path) {
+        result.push({
+          variables: common.availableVariables(path),
+          functions: common.availableFunctions(path),
+        });
+      }
+    });
+
+    helpers.assertExpectedResult(
+        'available_variables_expected.js',
+        JSON.stringify(result, null, 2));
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_corpus.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_corpus.js
new file mode 100644
index 0000000..1f35199
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_corpus.js
@@ -0,0 +1,113 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Corpus loading.
+ */
+
+'use strict';
+
+const assert = require('assert');
+const sinon = require('sinon');
+
+const exceptions = require('../exceptions.js');
+const corpus = require('../corpus.js');
+
+const sandbox = sinon.createSandbox();
+
+function testSoftSkipped(count, softSkipped, paths) {
+  sandbox.stub(exceptions, 'getSoftSkipped').callsFake(() => {
+    return softSkipped;
+  });
+  const mjsunit = new corpus.Corpus('test_data', 'mjsunit_softskipped');
+  const cases = mjsunit.getRandomTestcasePaths(count);
+  assert.deepEqual(paths, cases);
+}
+
+describe('Loading corpus', () => {
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('keeps all tests with no soft-skipped tests', () => {
+    sandbox.stub(Math, 'random').callsFake(() => 0.9);
+    testSoftSkipped(
+        3,
+        [],
+        ['mjsunit_softskipped/permitted.js',
+         'mjsunit_softskipped/object-literal.js',
+         'mjsunit_softskipped/regress/binaryen-123.js']);
+  });
+
+  it('choose one test with no soft-skipped tests', () => {
+    sandbox.stub(Math, 'random').callsFake(() => 0.9);
+    testSoftSkipped(
+        1,
+        [],
+        ['mjsunit_softskipped/permitted.js']);
+  });
+
+  it('keeps soft-skipped tests', () => {
+    sandbox.stub(Math, 'random').callsFake(() => 0.9);
+    testSoftSkipped(
+        1,
+        [/^binaryen.*\.js/, 'object-literal.js'],
+        ['mjsunit_softskipped/permitted.js']);
+  });
+
+  it('keeps no generated soft-skipped tests', () => {
+    sandbox.stub(Math, 'random').callsFake(() => 0.9);
+    const softSkipped = [
+      // Correctly listed full relative path of test case.
+      'mjsunit_softskipped/regress/binaryen-123.js',
+      // Only basename doesn't match.
+      'object-literal.js',
+      // Only pieces of the path don't match.
+      'mjsunit_softskipped',
+    ];
+    sandbox.stub(exceptions, 'getGeneratedSoftSkipped').callsFake(
+        () => { return new Set(softSkipped); });
+    testSoftSkipped(
+        2,
+        // None soft-skipped for basenames and regexps.
+        [],
+        // Only binaryen-123.js gets filtered out.
+        ['mjsunit_softskipped/object-literal.js',
+         'mjsunit_softskipped/permitted.js']);
+  });
+
+  it('keeps soft-skipped tests by chance', () => {
+    sandbox.stub(Math, 'random').callsFake(() => 0);
+    testSoftSkipped(
+        3,
+        [/^binaryen.*\.js/, 'object-literal.js'],
+        ['mjsunit_softskipped/object-literal.js',
+         'mjsunit_softskipped/regress/binaryen-123.js',
+         'mjsunit_softskipped/permitted.js']);
+  });
+
+  it('caches relative paths', () => {
+    sandbox.stub(Math, 'random').callsFake(() => 0);
+    sandbox.stub(exceptions, 'getSoftSkipped').callsFake(
+        () => { return ['object-literal.js']; });
+    const generatedSoftSkipped = [
+      'mjsunit_softskipped/regress/binaryen-123.js',
+    ];
+    sandbox.stub(exceptions, 'getGeneratedSoftSkipped').callsFake(
+        () => { return new Set(generatedSoftSkipped); });
+    const mjsunit = new corpus.Corpus('test_data' , 'mjsunit_softskipped');
+    assert.deepEqual(
+        ['mjsunit_softskipped/object-literal.js',
+         'mjsunit_softskipped/regress/binaryen-123.js'],
+        mjsunit.softSkippedFiles);
+    assert.deepEqual(
+        ['mjsunit_softskipped/permitted.js'],
+        mjsunit.permittedFiles);
+    assert.deepEqual(
+        ['mjsunit_softskipped/permitted.js',
+         'mjsunit_softskipped/object-literal.js',
+         'mjsunit_softskipped/regress/binaryen-123.js'],
+        Array.from(mjsunit.relFiles()));
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_differential_fuzz.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_differential_fuzz.js
new file mode 100644
index 0000000..31fe989
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_differential_fuzz.js
@@ -0,0 +1,141 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Tests for differential fuzzing.
+ */
+
+'use strict';
+
+const assert = require('assert');
+const program = require('commander');
+const sinon = require('sinon');
+
+const helpers = require('./helpers.js');
+const scriptMutator = require('../script_mutator.js');
+const sourceHelpers = require('../source_helpers.js');
+const random = require('../random.js');
+
+const { DifferentialFuzzMutator, DifferentialFuzzSuppressions } = require(
+    '../mutators/differential_fuzz_mutator.js');
+const { DifferentialScriptMutator } = require(
+    '../differential_script_mutator.js');
+
+const sandbox = sinon.createSandbox();
+
+function testMutators(settings, mutatorClass, inputFile, expectedFile) {
+  const source = helpers.loadTestData('differential_fuzz/' + inputFile);
+
+  const mutator = new mutatorClass(settings);
+  mutator.mutate(source);
+
+  const mutated = sourceHelpers.generateCode(source);
+  helpers.assertExpectedResult(
+      'differential_fuzz/' + expectedFile, mutated);
+}
+
+describe('Differential fuzzing', () => {
+  beforeEach(() => {
+    // Zero settings for all mutators.
+    this.settings = scriptMutator.defaultSettings();
+    for (const key of Object.keys(this.settings)) {
+      this.settings[key] = 0.0;
+    }
+    // By default, deterministically use all mutations of differential
+    // fuzzing.
+    this.settings['DIFF_FUZZ_EXTRA_PRINT'] = 1.0;
+    this.settings['DIFF_FUZZ_TRACK_CAUGHT'] = 1.0;
+
+    // Fake fuzzer being called with --input_dir flag.
+    this.oldInputDir = program.input_dir;
+    program.input_dir = helpers.BASE_DIR;
+  });
+
+  afterEach(() => {
+    sandbox.restore();
+    program.input_dir = this.oldInputDir;
+  });
+
+  it('applies suppressions', () => {
+    // This selects the first random variable when replacing .arguments.
+    sandbox.stub(random, 'single').callsFake(a => a[0]);
+    testMutators(
+        this.settings,
+        DifferentialFuzzSuppressions,
+        'suppressions.js',
+        'suppressions_expected.js');
+  });
+
+  it('adds extra printing', () => {
+    testMutators(
+        this.settings,
+        DifferentialFuzzMutator,
+        'mutations.js',
+        'mutations_expected.js');
+  });
+
+  it('does no extra printing', () => {
+    this.settings['DIFF_FUZZ_EXTRA_PRINT'] = 0.0;
+    testMutators(
+        this.settings,
+        DifferentialFuzzMutator,
+        'exceptions.js',
+        'exceptions_expected.js');
+  });
+
+  it('runs end to end', () => {
+    // Don't choose any zeroed settings or IGNORE_DEFAULT_PROB in try-catch
+    // mutator. Choose using original flags with >= 2%.
+    const chooseOrigFlagsProb = 0.2;
+    sandbox.stub(random, 'choose').callsFake((p) => p >= chooseOrigFlagsProb);
+
+    // Fake build directory from which two json configurations for flags are
+    // loaded.
+    const env = {
+      APP_DIR: 'test_data/differential_fuzz',
+      GENERATE: process.env.GENERATE,
+    };
+    sandbox.stub(process, 'env').value(env);
+
+    // Fake loading resources and instead load one fixed fake file for each.
+    sandbox.stub(sourceHelpers, 'loadResource').callsFake(() => {
+      return helpers.loadTestData('differential_fuzz/fake_resource.js');
+    });
+
+    // Load input files.
+    const files = [
+      'differential_fuzz/input1.js',
+      'differential_fuzz/input2.js',
+    ];
+    const sources = files.map(helpers.loadTestData);
+
+    // Apply top-level fuzzing, with all probabilistic configs switched off.
+    this.settings['DIFF_FUZZ_EXTRA_PRINT'] = 0.0;
+    this.settings['DIFF_FUZZ_TRACK_CAUGHT'] = 0.0;
+    const mutator = new DifferentialScriptMutator(
+        this.settings, helpers.DB_DIR);
+    const mutated = mutator.mutateMultiple(sources);
+    helpers.assertExpectedResult(
+        'differential_fuzz/combined_expected.js', mutated.code);
+
+    // Flags for v8_foozzie.py are calculated from v8_fuzz_experiments.json and
+    // v8_fuzz_flags.json in test_data/differential_fuzz.
+    const expectedFlags = [
+      '--first-config=ignition',
+      '--second-config=ignition_turbo',
+      '--second-d8=d8',
+      '--second-config-extra-flags=--foo1',
+      '--second-config-extra-flags=--foo2',
+      '--first-config-extra-flags=--flag1',
+      '--second-config-extra-flags=--flag1',
+      '--first-config-extra-flags=--flag2',
+      '--second-config-extra-flags=--flag2',
+      '--first-config-extra-flags=--flag3',
+      '--second-config-extra-flags=--flag3',
+      '--first-config-extra-flags=--flag4',
+      '--second-config-extra-flags=--flag4'
+    ];
+    assert.deepEqual(expectedFlags, mutated.flags);
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_differential_fuzz_library.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_differential_fuzz_library.js
new file mode 100644
index 0000000..58e7c88
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_differential_fuzz_library.js
@@ -0,0 +1,113 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Tests for the differential-fuzzing library files.
+ */
+
+'use strict';
+
+const assert = require('assert');
+const fs = require('fs');
+const path = require('path');
+
+const libPath = path.resolve(
+    path.join(__dirname, '..', 'resources', 'differential_fuzz_library.js'));
+const code = fs.readFileSync(libPath, 'utf-8');
+
+// We wire the print function to write to this result variable.
+const resultDummy = 'let result; const print = text => { result = text; };';
+
+// The prettyPrinted function from mjsunit is reused in the library.
+const prettyPrint = 'let prettyPrinted = value => value;';
+
+const hookedUpCode = resultDummy + prettyPrint + code;
+
+// Runs the library, adds test code and verifies the result.
+function testLibrary(testCode, expected) {
+  // The code isn't structured as a module. The test code is expected to
+  // evaluate to a result which we store in actual.
+  const actual = eval(hookedUpCode + testCode);
+  assert.deepEqual(expected, actual);
+}
+
+describe('Differential fuzzing library', () => {
+  it('prints objects', () => {
+    testLibrary(
+        '__prettyPrint([0, 1, 2, 3]); result;',
+        '[0, 1, 2, 3]');
+    testLibrary(
+        '__prettyPrint({0: 1, 2: 3}); result;',
+        'Object{0: 1, 2: 3}');
+    testLibrary(
+        'const o = {}; o.k = 42;__prettyPrint(o); result;',
+        'Object{k: 42}');
+  });
+
+  it('cuts off deep nesting', () => {
+    // We print only until a nesting depth of 4.
+    testLibrary(
+        '__prettyPrint({0: [1, 2, [3, {4: []}]]}); result;',
+        'Object{0: [1, 2, [3, Object{4: ...}]]}');
+  });
+
+  it('cuts off long strings', () => {
+    const long = new Array(66).join('a');
+    const head = new Array(55).join('a');
+    const tail = new Array(10).join('a');
+    testLibrary(
+        `__prettyPrint("${long}"); result;`,
+        `${head}[...]${tail}`);
+    // If the string gets longer, the cut-off version is still the same.
+    const veryLong = new Array(100).join('a');
+    testLibrary(
+        `__prettyPrint("${veryLong}"); result;`,
+        `${head}[...]${tail}`);
+  });
+
+  it('tracks hash difference', () => {
+    // Test that we track a hash value for each string we print.
+    const long = new Array(66).join('a');
+    testLibrary(
+        `__prettyPrint("${long}"); __hash;`,
+        2097980794);
+    // Test that the hash value differs, also when the cut-off result doesn't.
+    const veryLong = new Array(100).join('a');
+    testLibrary(
+        `__prettyPrint("${veryLong}"); __hash;`,
+        -428472866);
+    // Test that repeated calls update the hash.
+    testLibrary(
+        `__prettyPrint("${long}");__prettyPrint("${long}"); __hash;`,
+        -909224493);
+  });
+
+  it('limits extra printing', () => {
+    // Test that after exceeding the limit for calling extra printing, there
+    // is no new string printed (in the test case no new result added).
+    testLibrary(
+        'for (let i = 0; i < 20; i++) __prettyPrintExtra(i); result;',
+        '19');
+    testLibrary(
+        'for (let i = 0; i < 101; i++) __prettyPrintExtra(i); result;',
+        '99');
+    testLibrary(
+        'for (let i = 0; i < 102; i++) __prettyPrintExtra(i); result;',
+        '99');
+  });
+
+  it('tracks hash after limit', () => {
+    // Test that after exceeding the limit for calling extra printing, the
+    // hash is still updated.
+    testLibrary(
+        'for (let i = 0; i < 20; i++) __prettyPrintExtra(i); __hash;',
+        -945753644);
+    testLibrary(
+        'for (let i = 0; i < 101; i++) __prettyPrintExtra(i); __hash;',
+        1907055979);
+    testLibrary(
+        'for (let i = 0; i < 102; i++) __prettyPrintExtra(i); __hash;',
+        -590842070);
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_load.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_load.js
new file mode 100644
index 0000000..202bbbe
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_load.js
@@ -0,0 +1,69 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Test normalization.
+ */
+
+'use strict';
+
+const sinon = require('sinon');
+
+const helpers = require('./helpers.js');
+const sourceHelpers = require('../source_helpers.js');
+
+const { ScriptMutator } = require('../script_mutator.js');
+
+const sandbox = sinon.createSandbox();
+
+function testLoad(testPath, expectedPath) {
+  const mutator = new ScriptMutator({}, helpers.DB_DIR);
+  const source = helpers.loadTestData(testPath);
+  const dependencies = mutator.resolveInputDependencies([source]);
+  const code = sourceHelpers.generateCode(source, dependencies);
+  helpers.assertExpectedResult(expectedPath, code);
+}
+
+describe('V8 dependencies', () => {
+  it('test', () => {
+    testLoad(
+        'mjsunit/test_load.js',
+        'mjsunit/test_load_expected.js');
+
+  });
+  it('does not loop indefinitely', () => {
+    testLoad(
+        'mjsunit/test_load_self.js',
+        'mjsunit/test_load_self_expected.js');
+  });
+});
+
+describe('Chakra dependencies', () => {
+  it('test', () => {
+    testLoad(
+        'chakra/load.js',
+        'chakra/load_expected.js');
+  });
+});
+
+describe('JSTest dependencies', () => {
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('test', () => {
+    const fakeStubs = sourceHelpers.loadSource(
+        helpers.BASE_DIR, 'JSTests/fake_stub.js');
+    sandbox.stub(sourceHelpers, 'loadResource').callsFake(() => fakeStubs);
+    testLoad('JSTests/load.js', 'JSTests/load_expected.js');
+  });
+});
+
+describe('SpiderMonkey dependencies', () => {
+  it('test', () => {
+    testLoad(
+        'spidermonkey/test/load.js',
+        'spidermonkey/test/load_expected.js');
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_arrays.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_arrays.js
new file mode 100644
index 0000000..1af1094
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_arrays.js
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Tests for mutating arrays
+ */
+
+'use strict';
+
+const sinon = require('sinon');
+
+const babylon = require('@babel/parser');
+
+const common = require('../mutators/common.js');
+const helpers = require('./helpers.js');
+const scriptMutator = require('../script_mutator.js');
+const sourceHelpers = require('../source_helpers.js');
+
+const {ArrayMutator} = require('../mutators/array_mutator.js');
+
+const sandbox = sinon.createSandbox();
+
+describe('Mutate arrays', () => {
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('performs all mutations', () => {
+    // Make random operations deterministic.
+    sandbox.stub(common, 'randomValue').callsFake(
+        () => babylon.parseExpression('""'));
+    helpers.deterministicRandom(sandbox);
+
+    const source = helpers.loadTestData('mutate_arrays.js');
+
+    const settings = scriptMutator.defaultSettings();
+    settings['MUTATE_ARRAYS'] = 1.0;
+
+    const mutator = new ArrayMutator(settings);
+    mutator.mutate(source);
+
+    const mutated = sourceHelpers.generateCode(source);
+    helpers.assertExpectedResult(
+        'mutate_arrays_expected.js', mutated);
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_expressions.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_expressions.js
new file mode 100644
index 0000000..0b808a7
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_expressions.js
@@ -0,0 +1,79 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Tests for mutating expressions
+ */
+
+'use strict';
+
+const assert = require('assert');
+
+const babelTypes = require('@babel/types');
+const sinon = require('sinon');
+
+const common = require('../mutators/common.js');
+const expressionMutator = require('../mutators/expression_mutator.js');
+const helpers = require('./helpers.js');
+const scriptMutator = require('../script_mutator.js');
+const sourceHelpers = require('../source_helpers.js');
+const random = require('../random.js');
+
+const sandbox = sinon.createSandbox();
+
+function testCloneSiblings(expected_file) {
+  const source = helpers.loadTestData('mutate_expressions.js');
+
+  const settings = scriptMutator.defaultSettings();
+  settings['MUTATE_EXPRESSIONS'] = 1.0;
+
+  const mutator = new expressionMutator.ExpressionMutator(settings);
+  mutator.mutate(source);
+
+  const mutated = sourceHelpers.generateCode(source);
+  helpers.assertExpectedResult(expected_file, mutated);
+}
+
+describe('Mutate expressions', () => {
+  beforeEach(() => {
+    // Select the previous sibling.
+    sandbox.stub(random, 'randInt').callsFake((a, b) => b);
+    // This chooses cloning siblings.
+    sandbox.stub(random, 'random').callsFake(() => 0.8);
+  });
+
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('clones previous to current', () => {
+    // Keep the order of [previous, current], select previous.
+    sandbox.stub(random, 'shuffle').callsFake(a => a);
+    // Insert after. Keep returning true for the MUTATE_EXPRESSIONS check.
+    sandbox.stub(random, 'choose').callsFake(a => a === 1);
+
+    testCloneSiblings('mutate_expressions_previous_expected.js');
+  });
+
+  it('clones current to previous', () => {
+    // Switch the order of [previous, current], select current.
+    sandbox.stub(random, 'shuffle').callsFake(a => [a[1], a[0]]);
+    // Insert before.
+    sandbox.stub(random, 'choose').callsFake(() => true);
+
+    testCloneSiblings('mutate_expressions_current_expected.js');
+  });
+});
+
+describe('Cloning', () => {
+  // Ensure that the source location we add are not cloned.
+  it('is not copying added state', () => {
+    const source = helpers.loadTestData('mutate_expressions.js');
+    common.setSourceLoc(source, 5, 10);
+    const noopNode = source.ast.program.body[0];
+    assert.equal(0.5, common.getSourceLoc(noopNode));
+    const cloned = babelTypes.cloneDeep(noopNode);
+    assert.equal(undefined, common.getSourceLoc(cloned));
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js
new file mode 100644
index 0000000..30793b3
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_function_calls.js
@@ -0,0 +1,65 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Tests for mutating funciton calls.
+ */
+
+'use strict';
+
+const sinon = require('sinon');
+
+const helpers = require('./helpers.js');
+const random = require('../random.js');
+const scriptMutator = require('../script_mutator.js');
+const sourceHelpers = require('../source_helpers.js');
+const functionCallMutator = require('../mutators/function_call_mutator.js');
+
+const sandbox = sinon.createSandbox();
+
+function loadAndMutate(input_file) {
+  const source = helpers.loadTestData(input_file);
+
+  const settings = scriptMutator.defaultSettings();
+  settings['engine'] = 'V8';
+  settings['MUTATE_FUNCTION_CALLS'] = 1.0;
+
+  const mutator = new functionCallMutator.FunctionCallMutator(settings);
+  mutator.mutate(source);
+  return source;
+}
+
+describe('Mutate functions', () => {
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('is robust without available functions', () => {
+    // This chooses replacing fuctions.
+    sandbox.stub(random, 'random').callsFake(() => { return 0.4; });
+
+    // We just ensure here that mutating this file doesn't throw.
+    loadAndMutate('mutate_function_call.js');
+  });
+
+  it('optimizes functions in V8', () => {
+    // This omits function replacement and chooses V8 optimizations.
+    sandbox.stub(random, 'random').callsFake(() => { return 0.6; });
+
+    const source = loadAndMutate('mutate_function_call.js');
+    const mutated = sourceHelpers.generateCode(source);
+    helpers.assertExpectedResult(
+        'mutate_function_call_expected.js', mutated);
+  });
+
+  it('deoptimizes functions in V8', () => {
+    // This chooses V8 deoptimization.
+    sandbox.stub(random, 'random').callsFake(() => { return 0.8; });
+
+    const source = loadAndMutate('mutate_function_call.js');
+    const mutated = sourceHelpers.generateCode(source);
+    helpers.assertExpectedResult(
+        'mutate_function_call_deopt_expected.js', mutated);
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_numbers.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_numbers.js
new file mode 100644
index 0000000..b9d6311
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_numbers.js
@@ -0,0 +1,54 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Tests for mutating variables
+ */
+
+'use strict';
+
+const babelTypes = require('@babel/types');
+const sinon = require('sinon');
+
+const common = require('../mutators/common.js');
+const helpers = require('./helpers.js');
+const scriptMutator = require('../script_mutator.js');
+const sourceHelpers = require('../source_helpers.js');
+const numberMutator = require('../mutators/number_mutator.js');
+const random = require('../random.js');
+
+const sandbox = sinon.createSandbox();
+
+describe('Mutate numbers', () => {
+  beforeEach(() => {
+    sandbox.stub(common, 'nearbyRandomNumber').callsFake(
+        () => { return babelTypes.numericLiteral(-3) });
+    sandbox.stub(common, 'randomInterestingNumber').callsFake(
+        () => { return babelTypes.numericLiteral(-4) });
+    sandbox.stub(random, 'randInt').callsFake(() => { return -5 });
+
+    // Interesting cases from number mutator.
+    const interestingProbs = [0.009, 0.05, 0.5];
+    sandbox.stub(random, 'random').callsFake(
+        helpers.cycleProbabilitiesFun(interestingProbs));
+  });
+
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('test', () => {
+    const source = helpers.loadTestData('mutate_numbers.js');
+
+    const settings = scriptMutator.defaultSettings();
+    settings['MUTATE_NUMBERS'] = 1.0;
+
+    const mutator = new numberMutator.NumberMutator(settings);
+    mutator.mutate(source);
+
+    const mutated = sourceHelpers.generateCode(source);
+    helpers.assertExpectedResult(
+        'mutate_numbers_expected.js', mutated);
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_objects.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_objects.js
new file mode 100644
index 0000000..184e627
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_objects.js
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Tests for mutating object expressions
+ */
+
+'use strict';
+
+const sinon = require('sinon');
+
+const babylon = require('@babel/parser');
+
+const common = require('../mutators/common.js');
+const helpers = require('./helpers.js');
+const scriptMutator = require('../script_mutator.js');
+const sourceHelpers = require('../source_helpers.js');
+
+const {ObjectMutator} = require('../mutators/object_mutator.js');
+
+const sandbox = sinon.createSandbox();
+
+describe('Mutate objects', () => {
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('performs all mutations', () => {
+    // Make random operations deterministic.
+    sandbox.stub(common, 'randomValue').callsFake(
+        () => babylon.parseExpression('""'));
+    helpers.deterministicRandom(sandbox);
+
+    const source = helpers.loadTestData('mutate_objects.js');
+
+    const settings = scriptMutator.defaultSettings();
+    settings['MUTATE_OBJECTS'] = 1.0;
+
+    const mutator = new ObjectMutator(settings);
+    mutator.mutate(source);
+
+    const mutated = sourceHelpers.generateCode(source);
+    helpers.assertExpectedResult(
+        'mutate_objects_expected.js', mutated);
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_variable_or_object.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_variable_or_object.js
new file mode 100644
index 0000000..7bdfe90
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_variable_or_object.js
@@ -0,0 +1,72 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Test variable-or-object mutator.
+ */
+
+'use strict';
+
+const babylon = require('@babel/parser');
+const sinon = require('sinon');
+
+const common = require('../mutators/common.js');
+const helpers = require('./helpers.js');
+const variableOrObject = require('../mutators/variable_or_object_mutation.js');
+const random = require('../random.js');
+const sourceHelpers = require('../source_helpers.js');
+
+const sandbox = sinon.createSandbox();
+
+function testMutations(testPath, expectedPath) {
+  const source = helpers.loadTestData(testPath);
+
+  const mutator = new variableOrObject.VariableOrObjectMutator(
+      { ADD_VAR_OR_OBJ_MUTATIONS: 1.0 });
+
+  mutator.mutate(source);
+
+  const mutated = sourceHelpers.generateCode(source);
+  helpers.assertExpectedResult(expectedPath, mutated);
+}
+
+describe('Variable or object mutator', () => {
+  beforeEach(() => {
+    // Make before/after insertion deterministic. This also chooses
+    // random objects.
+    sandbox.stub(random, 'choose').callsFake(() => { return true; });
+    // This stubs out the random seed.
+    sandbox.stub(random, 'randInt').callsFake(() => { return 123; });
+    // Random value is itself dependent on too much randomization.
+    sandbox.stub(common, 'randomValue').callsFake(
+        () => { return babylon.parseExpression('0'); });
+  });
+
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('test', () => {
+    let index = 0;
+    // Test different cases of _randomVariableOrObjectMutations in
+    // variable_or_object_mutation.js.
+    const choices = [
+      0.2, // Trigger recursive case.
+      0.3, // Recursion 1: Delete.
+      0.4, // Recursion 2: Property access.
+      0.5, // Random assignment.
+      // 0.6 case for randomFunction omitted as it has too much randomization.
+      0.7, // Variable assignment.
+      0.8, // Object.defineProperty.
+      0.9, // Object.defineProperty recursive.
+      0.3, // Recursion 1: Delete.
+      0.4, // Recursion 2: Property access.
+    ];
+    sandbox.stub(random, 'random').callsFake(
+        () => { return choices[index++]; });
+    testMutations(
+        'mutate_var_or_obj.js',
+        'mutate_var_or_obj_expected.js');
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_variables.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_variables.js
new file mode 100644
index 0000000..e544599
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_mutate_variables.js
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Tests for mutating variables
+ */
+
+'use strict';
+
+const babelTypes = require('@babel/types');
+const sinon = require('sinon');
+
+const common = require('../mutators/common.js');
+const helpers = require('./helpers.js');
+const scriptMutator = require('../script_mutator.js');
+const sourceHelpers = require('../source_helpers.js');
+const variableMutator = require('../mutators/variable_mutator.js');
+
+const sandbox = sinon.createSandbox();
+
+describe('Mutate variables', () => {
+  beforeEach(() => {
+    sandbox.stub(
+        common, 'randomVariable').callsFake(
+            () => { return babelTypes.identifier('REPLACED') });
+  });
+
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('test', () => {
+
+    const source = helpers.loadTestData('mutate_variables.js');
+
+    const settings = scriptMutator.defaultSettings();
+    settings['MUTATE_VARIABLES'] = 1.0;
+
+    const mutator = new variableMutator.VariableMutator(settings);
+    mutator.mutate(source);
+
+    const mutated = sourceHelpers.generateCode(source);
+    helpers.assertExpectedResult(
+        'mutate_variables_expected.js', mutated);
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_normalize.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_normalize.js
new file mode 100644
index 0000000..d92ad57
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_normalize.js
@@ -0,0 +1,42 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Test normalization.
+ */
+
+'use strict';
+
+const helpers = require('./helpers.js');
+const normalizer = require('../mutators/normalizer.js');
+const sourceHelpers = require('../source_helpers.js');
+
+describe('Normalize', () => {
+  it('test basic', () => {
+    const source = helpers.loadTestData('normalize.js');
+
+    const mutator = new normalizer.IdentifierNormalizer();
+    mutator.mutate(source);
+
+    const normalized_0 = sourceHelpers.generateCode(source);
+    helpers.assertExpectedResult(
+        'normalize_expected_0.js', normalized_0);
+
+    mutator.mutate(source);
+    const normalized_1 = sourceHelpers.generateCode(source);
+    helpers.assertExpectedResult(
+        'normalize_expected_1.js', normalized_1);
+  });
+
+  it('test simple_test.js', () => {
+    const source = helpers.loadTestData('simple_test.js');
+
+    const mutator = new normalizer.IdentifierNormalizer();
+    mutator.mutate(source);
+
+    const normalized = sourceHelpers.generateCode(source);
+    helpers.assertExpectedResult(
+        'simple_test_expected.js', normalized);
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_random.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_random.js
new file mode 100644
index 0000000..b27b992
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_random.js
@@ -0,0 +1,51 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Test random utilities.
+ */
+
+'use strict';
+
+const assert = require('assert');
+const sinon = require('sinon');
+
+const { twoBucketSample } = require('../random.js');
+
+const sandbox = sinon.createSandbox();
+
+
+describe('Two-bucket choosing', () => {
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('with one empty', () => {
+    sandbox.stub(Math, 'random').callsFake(() => 0.5);
+    assert.deepEqual([1, 2], twoBucketSample([0, 1, 2], [], 1, 2));
+    assert.deepEqual([1, 2], twoBucketSample([], [0, 1, 2], 1, 2));
+    assert.deepEqual([0], twoBucketSample([0], [], 1, 1));
+    assert.deepEqual([0], twoBucketSample([], [0], 1, 1));
+  });
+
+  it('chooses with 0.3', () => {
+    sandbox.stub(Math, 'random').callsFake(() => 0.3);
+    assert.deepEqual([1, 2], twoBucketSample([0, 1, 2], [3, 4, 5], 1, 2));
+    // Higher factor.
+    assert.deepEqual([3, 5], twoBucketSample([0, 1, 2], [3, 4, 5], 4, 2));
+  });
+
+  it('chooses with 0.7', () => {
+    sandbox.stub(Math, 'random').callsFake(() => 0.7);
+    assert.deepEqual([4, 3], twoBucketSample([0, 1, 2], [3, 4, 5], 1, 2));
+  });
+
+  it('chooses with 0.5', () => {
+    sandbox.stub(Math, 'random').callsFake(() => 0.5);
+    assert.deepEqual([3], twoBucketSample([0, 1], [2, 3, 4, 5], 1, 1));
+    assert.deepEqual([3], twoBucketSample([0, 1, 2, 3], [4, 5], 1, 1));
+    // Higher factor.
+    assert.deepEqual([4], twoBucketSample([0, 1, 2, 3], [4, 5], 2, 1));
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_regressions.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_regressions.js
new file mode 100644
index 0000000..a753c1c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_regressions.js
@@ -0,0 +1,159 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Regression tests.
+ */
+
+'use strict';
+
+const assert = require('assert');
+const { execSync } = require("child_process");
+const fs = require('fs');
+const sinon = require('sinon');
+const tempfile = require('tempfile');
+const tempy = require('tempy');
+
+const exceptions = require('../exceptions.js');
+const helpers = require('./helpers.js');
+const scriptMutator = require('../script_mutator.js');
+
+const sandbox = sinon.createSandbox();
+
+const SYNTAX_ERROR_RE = /.*SyntaxError.*/
+
+function createFuzzTest(fake_db, settings, inputFiles) {
+  const sources = inputFiles.map(input => helpers.loadTestData(input));
+
+  const mutator = new scriptMutator.ScriptMutator(settings, fake_db);
+  const result = mutator.mutateMultiple(sources);
+
+  const output_file = tempfile('.js');
+  fs.writeFileSync(output_file, result.code);
+  return output_file;
+}
+
+function execFile(jsFile) {
+  execSync("node " + jsFile, {stdio: ['pipe']});
+}
+
+function buildDb(inputDir, corpusName, outputDir) {
+  execSync(
+      `node build_db.js -i ${inputDir} -o ${outputDir} ${corpusName}`,
+      {stdio: ['pipe']});
+}
+
+function assertFuzzWithDbThrows(dbInputDir, corpusName, settings, regexp) {
+  const outPath = tempy.directory();
+  buildDb(dbInputDir, corpusName, outPath);
+
+  settings['MUTATE_CROSSOVER_INSERT'] = 1.0;
+  assert.throws(
+    () => {
+      createFuzzTest(
+          outPath, settings,
+          ['regress/build_db/cross_over_mutator_input.js']);
+    },
+    err => {
+      assert(regexp.test(err));
+      return true;
+    },
+    'unexpected error',
+  );
+}
+
+describe('Regression tests', () => {
+  beforeEach(() => {
+    helpers.deterministicRandom(sandbox);
+
+    this.settings = {
+      ADD_VAR_OR_OBJ_MUTATIONS: 0.0,
+      MUTATE_CROSSOVER_INSERT: 0.0,
+      MUTATE_EXPRESSIONS: 0.0,
+      MUTATE_FUNCTION_CALLS: 0.0,
+      MUTATE_NUMBERS: 0.0,
+      MUTATE_VARIABLES: 0.0,
+      engine: 'V8',
+      testing: true,
+    }
+  });
+
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  it('combine strict and with', () => {
+    // Test that when a file with "use strict" is used in the inputs,
+    // the result is only strict if no other file contains anything
+    // prohibited in strict mode (here a with statement).
+    // It is assumed that such input files are marked as sloppy in the
+    // auto generated exceptions.
+    sandbox.stub(exceptions, 'getGeneratedSloppy').callsFake(
+        () => { return new Set(['regress/strict/input_with.js']); });
+    const file = createFuzzTest(
+        'test_data/regress/strict/db',
+        this.settings,
+        ['regress/strict/input_strict.js', 'regress/strict/input_with.js']);
+    execFile(file);
+  });
+
+  it('combine strict and delete', () => {
+    // As above with unqualified delete.
+    sandbox.stub(exceptions, 'getGeneratedSloppy').callsFake(
+        () => { return new Set(['regress/strict/input_delete.js']); });
+    const file = createFuzzTest(
+        'test_data/regress/strict/db',
+        this.settings,
+        ['regress/strict/input_strict.js', 'regress/strict/input_delete.js']);
+    execFile(file);
+  });
+
+  it('mutates negative value', () => {
+    // This tests that the combination of number, function call and expression
+    // mutator does't produce an update expression.
+    // Previously the 1 in -1 was replaced with another negative number leading
+    // to e.g. -/*comment/*-2. Then cloning the expression removed the
+    // comment and produced --2 in the end.
+    this.settings['MUTATE_NUMBERS'] = 1.0;
+    this.settings['MUTATE_FUNCTION_CALLS'] = 1.0;
+    this.settings['MUTATE_EXPRESSIONS'] = 1.0;
+    const file = createFuzzTest(
+        'test_data/regress/numbers/db',
+        this.settings,
+        ['regress/numbers/input_negative.js']);
+    execFile(file);
+  });
+
+  it('mutates indices', () => {
+    // Test that indices are not replaced with a negative number causing a
+    // syntax error (e.g. {-1: ""}).
+    this.settings['MUTATE_NUMBERS'] = 1.0;
+    const file = createFuzzTest(
+        'test_data/regress/numbers/db',
+        this.settings,
+        ['regress/numbers/input_indices.js']);
+    execFile(file);
+  });
+
+  it('create call expression', () => {
+    // TODO(machenbach): Build_db extracts a function expression without
+    // parentheses, re-parsing this later fails in cross-over mutator.
+    assertFuzzWithDbThrows(
+        'test_data/regress/build_db',
+        'destructuring',
+        this.settings,
+        SYNTAX_ERROR_RE);
+  });
+
+  it('create assignment expression', () => {
+    // TODO(machenbach): Build_db extracts some assignment expressions with a
+    // spurious dependency. This leads to an "unknown substitution" error
+    // when applying the template.
+    assertFuzzWithDbThrows(
+        'test_data/regress/build_db',
+        'this',
+        this.settings,
+        /.*Unknown substitution.*/);
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_try_catch.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_try_catch.js
new file mode 100644
index 0000000..b7cd296
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test/test_try_catch.js
@@ -0,0 +1,85 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Test normalization.
+ */
+
+'use strict';
+
+const sinon = require('sinon');
+
+const common = require('../mutators/common.js');
+const helpers = require('./helpers.js');
+const random = require('../random.js');
+const sourceHelpers = require('../source_helpers.js');
+const tryCatch = require('../mutators/try_catch.js');
+
+const sandbox = sinon.createSandbox();
+
+function loadSource() {
+  return helpers.loadTestData('try_catch.js');
+}
+
+function testTryCatch(source, expected) {
+  const mutator = new tryCatch.AddTryCatchMutator();
+  mutator.mutate(source);
+
+  const mutated = sourceHelpers.generateCode(source);
+  helpers.assertExpectedResult(expected, mutated);
+}
+
+describe('Try catch', () => {
+  afterEach(() => {
+    sandbox.restore();
+  });
+
+  // Wrap on exit, hence wrap everything nested.
+  it('wraps all', () => {
+    sandbox.stub(random, 'choose').callsFake(() => { return false; });
+    sandbox.stub(random, 'random').callsFake(() => { return 0.7; });
+    testTryCatch(loadSource(), 'try_catch_expected.js');
+  });
+
+  // Wrap on enter and skip.
+  it('wraps toplevel', () => {
+    sandbox.stub(random, 'choose').callsFake(() => { return false; });
+    sandbox.stub(random, 'random').callsFake(() => { return 0.04; });
+    const source = loadSource();
+
+    // Fake source fraction 0.1 (i.e. the second of 10 files).
+    // Probability for toplevel try-catch is 0.05.
+    common.setSourceLoc(source, 1, 10);
+
+    testTryCatch(source, 'try_catch_toplevel_expected.js');
+  });
+
+  // Choose the rare case of skipping try-catch.
+  it('wraps nothing', () => {
+    sandbox.stub(random, 'choose').callsFake(() => { return false; });
+    sandbox.stub(random, 'random').callsFake(() => { return 0.01; });
+    const source = loadSource();
+
+    // Fake source fraction 0.1 (i.e. the second of 10 files).
+    // Probability for skipping is 0.02.
+    common.setSourceLoc(source, 1, 10);
+
+    testTryCatch(source, 'try_catch_nothing_expected.js');
+  });
+
+  // Choose to alter the target probability to 0.9 resulting in skipping
+  // all try-catch.
+  it('wraps nothing with high target probability', () => {
+    sandbox.stub(random, 'choose').callsFake(() => { return true; });
+    sandbox.stub(random, 'uniform').callsFake(() => { return 0.9; });
+    sandbox.stub(random, 'random').callsFake(() => { return 0.8; });
+    const source = loadSource();
+
+    // Fake source fraction 0.9 (i.e. the last of 10 files).
+    // Probability for skipping is 0.81 (0.9 * 0.9).
+    common.setSourceLoc(source, 9, 10);
+
+    testTryCatch(source, 'try_catch_alternate_expected.js');
+  });
+});
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/JSTests/fake_stub.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/JSTests/fake_stub.js
new file mode 100644
index 0000000..57f5006
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/JSTests/fake_stub.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Fake stub");
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/JSTests/load.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/JSTests/load.js
new file mode 100644
index 0000000..41582e0
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/JSTests/load.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("JSTest");
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/JSTests/load_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/JSTests/load_expected.js
new file mode 100644
index 0000000..113397c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/JSTests/load_expected.js
@@ -0,0 +1,9 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: JSTests/fake_stub.js
+print("Fake stub");
+
+// Original: JSTests/load.js
+print("JSTest");
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/available_variables.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/available_variables.js
new file mode 100644
index 0000000..d711567
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/available_variables.js
@@ -0,0 +1,33 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let __v_0 = 0;
+let __v_1 = 0;
+
+console.log(__v_0, __v_1, __f_0, __f_1);
+
+function __f_0() {
+  let __v_2 = 0;
+  console.log(__v_0, __v_1, __v_2, __f_0, __f_1);
+}
+
+let __v_3 = 0;
+
+console.log(__v_0, __v_1, __v_3, __f_0, __f_1);
+
+function __f_1(__v_7) {
+  let __v_4 = 0;
+
+  console.log(__v_0, __v_1, __v_3, __v_4, __v_7, __f_0, __f_1);
+  {
+    let __v_5 = 0;
+    var __v_6 = 0;
+    console.log(__v_0, __v_1, __v_3, __v_4, __v_5, __v_6, __v_7, __f_0, __f_1, __f_2);
+    function __f_2 () {};
+    console.log(__v_0, __v_1, __v_3, __v_4, __v_5, __v_6, __v_7, __f_0, __f_1, __f_2);
+  }
+  // TODO(machenbach): __f_2 is missing as available identifier.
+  console.log(__v_0, __v_1, __v_3, __v_4, __v_6, __v_7, __f_0, __f_1, __f_2);
+}
+console.log(__v_0, __v_1, __v_3, __f_0, __f_1);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/available_variables_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/available_variables_expected.js
new file mode 100644
index 0000000..4562bb2
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/available_variables_expected.js
@@ -0,0 +1,270 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[
+  {
+    "variables": [
+      {
+        "type": "Identifier",
+        "name": "__v_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_1"
+      }
+    ],
+    "functions": [
+      {
+        "type": "Identifier",
+        "name": "__f_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__f_1"
+      }
+    ]
+  },
+  {
+    "variables": [
+      {
+        "type": "Identifier",
+        "name": "__v_2"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_1"
+      }
+    ],
+    "functions": [
+      {
+        "type": "Identifier",
+        "name": "__f_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__f_1"
+      }
+    ]
+  },
+  {
+    "variables": [
+      {
+        "type": "Identifier",
+        "name": "__v_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_1"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_3"
+      }
+    ],
+    "functions": [
+      {
+        "type": "Identifier",
+        "name": "__f_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__f_1"
+      }
+    ]
+  },
+  {
+    "variables": [
+      {
+        "type": "Identifier",
+        "name": "__v_7"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_4"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_1"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_3"
+      }
+    ],
+    "functions": [
+      {
+        "type": "Identifier",
+        "name": "__f_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__f_1"
+      }
+    ]
+  },
+  {
+    "variables": [
+      {
+        "type": "Identifier",
+        "name": "__v_5"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_7"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_4"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_6"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_1"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_3"
+      }
+    ],
+    "functions": [
+      {
+        "type": "Identifier",
+        "name": "__f_2"
+      },
+      {
+        "type": "Identifier",
+        "name": "__f_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__f_1"
+      }
+    ]
+  },
+  {
+    "variables": [
+      {
+        "type": "Identifier",
+        "name": "__v_5"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_7"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_4"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_6"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_1"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_3"
+      }
+    ],
+    "functions": [
+      {
+        "type": "Identifier",
+        "name": "__f_2"
+      },
+      {
+        "type": "Identifier",
+        "name": "__f_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__f_1"
+      }
+    ]
+  },
+  {
+    "variables": [
+      {
+        "type": "Identifier",
+        "name": "__v_7"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_4"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_6"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_1"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_3"
+      }
+    ],
+    "functions": [
+      {
+        "type": "Identifier",
+        "name": "__f_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__f_1"
+      }
+    ]
+  },
+  {
+    "variables": [
+      {
+        "type": "Identifier",
+        "name": "__v_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_1"
+      },
+      {
+        "type": "Identifier",
+        "name": "__v_3"
+      }
+    ],
+    "functions": [
+      {
+        "type": "Identifier",
+        "name": "__f_0"
+      },
+      {
+        "type": "Identifier",
+        "name": "__f_1"
+      }
+    ]
+  }
+]
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/dir/load3.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/dir/load3.js
new file mode 100644
index 0000000..c836f1b
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/dir/load3.js
@@ -0,0 +1,6 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+WScript.LoadScriptFile("..\\load2.js", "self");
+console.log('load3');
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load.js
new file mode 100644
index 0000000..bfddb5d
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load.js
@@ -0,0 +1,9 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+if (this.WScript && this.WScript.LoadScriptFile) {
+  WScript.LoadScriptFile("load1.js");
+}
+
+console.log('load.js');
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load1.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load1.js
new file mode 100644
index 0000000..14f6814
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load1.js
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test case insensitivity.
+WScript.LoadScriptFile("DIR\\LoAd3.js");
+
+console.log('load1.js');
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load2.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load2.js
new file mode 100644
index 0000000..a8ee5d2
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load2.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+console.log('load2.js');
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load_expected.js
new file mode 100644
index 0000000..9711227
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/chakra/load_expected.js
@@ -0,0 +1,17 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: chakra/load2.js
+console.log('load2.js');
+
+// Original: chakra/dir/load3.js
+console.log('load3');
+
+// Original: chakra/load1.js
+console.log('load1.js');
+
+// Original: chakra/load.js
+if (this.WScript && this.WScript.LoadScriptFile) {}
+
+console.log('load.js');
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/combined_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/combined_expected.js
new file mode 100644
index 0000000..1caeebd
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/combined_expected.js
@@ -0,0 +1,59 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: differential_fuzz/fake_resource.js
+print("I'm a resource.");
+
+// Original: differential_fuzz/fake_resource.js
+print("I'm a resource.");
+
+// Original: differential_fuzz/fake_resource.js
+print("I'm a resource.");
+
+// Original: differential_fuzz/fake_resource.js
+print("I'm a resource.");
+
+// Original: differential_fuzz/fake_resource.js
+print("I'm a resource.");
+
+/* DifferentialFuzzMutator: Print variables and exceptions from section */
+try {
+  print("Hash: " + __hash);
+  print("Caught: " + __caught);
+} catch (e) {}
+
+print("v8-foozzie source: differential_fuzz/input1.js");
+
+// Original: differential_fuzz/input1.js
+try {
+  var __v_0 = 0;
+} catch (e) {}
+
+try {
+  /* DifferentialFuzzMutator: Pretty printing */
+  __prettyPrintExtra(__v_0);
+} catch (e) {}
+
+/* DifferentialFuzzMutator: Print variables and exceptions from section */
+try {
+  print("Hash: " + __hash);
+  print("Caught: " + __caught);
+
+  __prettyPrint(__v_0);
+} catch (e) {}
+
+print("v8-foozzie source: differential_fuzz/input2.js");
+
+// Original: differential_fuzz/input2.js
+let __v_1 = 1;
+
+/* DifferentialFuzzMutator: Print variables and exceptions from section */
+try {
+  print("Hash: " + __hash);
+  print("Caught: " + __caught);
+
+  __prettyPrint(__v_0);
+
+  __prettyPrint(__v_1);
+} catch (e) {}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/exceptions.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/exceptions.js
new file mode 100644
index 0000000..2ff571f
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/exceptions.js
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+try {
+  let __v_0 = boom;
+} catch (e) {}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/exceptions_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/exceptions_expected.js
new file mode 100644
index 0000000..b5498a5
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/exceptions_expected.js
@@ -0,0 +1,16 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: differential_fuzz/exceptions.js
+try {
+  let __v_0 = boom;
+} catch (e) {
+  __caught++;
+}
+
+/* DifferentialFuzzMutator: Print variables and exceptions from section */
+try {
+  print("Hash: " + __hash);
+  print("Caught: " + __caught);
+} catch (e) {}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/fake_resource.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/fake_resource.js
new file mode 100644
index 0000000..c417acd
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/fake_resource.js
@@ -0,0 +1,7 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// This file represents anything loaded from the resources directory.
+print("I'm a resource.");
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/input1.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/input1.js
new file mode 100644
index 0000000..cbf2e85
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/input1.js
@@ -0,0 +1,9 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --flag1 --flag2
+// Flags: --flag3
+
+var a = 0;
+print(a);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/input2.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/input2.js
new file mode 100644
index 0000000..e22b1cc
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/input2.js
@@ -0,0 +1,7 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --flag4
+
+let b = 1;
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/mutations.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/mutations.js
new file mode 100644
index 0000000..606527b
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/mutations.js
@@ -0,0 +1,26 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Print after declaration.
+var __v_0 = [1, 2, 3];
+
+// Don't print after declarations or assigments in loops.
+for (let __v_1 = 0; __v_1 < 3; __v_1 += 1) {
+
+  // Print after multiple declarations.
+  let __v_2, __v_3 = 0;
+
+  // Print after assigning to member.
+  __v_0.foo = undefined;
+
+  // Replace with deep printing.
+  print(0);
+
+  // Print exception.
+  try {
+    // Print after assignment.
+    __v_1 += 1;
+  } catch(e) {}
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/mutations_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/mutations_expected.js
new file mode 100644
index 0000000..5b7ff6f
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/mutations_expected.js
@@ -0,0 +1,44 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: differential_fuzz/mutations.js
+var __v_0 = [1, 2, 3];
+
+/* DifferentialFuzzMutator: Extra variable printing */
+__prettyPrintExtra(__v_0);
+
+for (let __v_1 = 0; __v_1 < 3; __v_1 += 1) {
+  let __v_2,
+      __v_3 = 0;
+
+  /* DifferentialFuzzMutator: Extra variable printing */
+  __prettyPrintExtra(__v_2);
+
+  __prettyPrintExtra(__v_3);
+
+  __v_0.foo = undefined;
+
+  /* DifferentialFuzzMutator: Extra variable printing */
+  __prettyPrintExtra(__v_0);
+
+  /* DifferentialFuzzMutator: Pretty printing */
+  __prettyPrintExtra(0);
+
+  try {
+    __v_1 += 1;
+
+    /* DifferentialFuzzMutator: Extra variable printing */
+    __prettyPrintExtra(__v_1);
+  } catch (e) {
+    __prettyPrintExtra(e);
+  }
+}
+
+/* DifferentialFuzzMutator: Print variables and exceptions from section */
+try {
+  print("Hash: " + __hash);
+  print("Caught: " + __caught);
+
+  __prettyPrint(__v_0);
+} catch (e) {}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/suppressions.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/suppressions.js
new file mode 100644
index 0000000..59be2c3
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/suppressions.js
@@ -0,0 +1,15 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// These statements might come from a CrashTest.
+print("v8-foozzie source: some/file/name");
+print('v8-foozzie source: some/file/name');
+
+function foo(__v_0) {
+  // This is an unsupported language feature.
+  return 1 in foo.arguments;
+}
+
+// This leads to precision differences in optimized code.
+print(192 ** -0.5);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/suppressions_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/suppressions_expected.js
new file mode 100644
index 0000000..bf78cae
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/suppressions_expected.js
@@ -0,0 +1,21 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: differential_fuzz/suppressions.js
+print(
+/* DifferentialFuzzSuppressions: Replaced magic string */
+"v***************e: some/file/name");
+print(
+/* DifferentialFuzzSuppressions: Replaced magic string */
+"v***************e: some/file/name");
+
+function foo(__v_0) {
+  return 1 in
+  /* DifferentialFuzzSuppressions: Replaced .arguments */
+  __v_0;
+}
+
+print(
+/* DifferentialFuzzSuppressions: Replaced ** */
+192 + -0.5);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/v8_fuzz_experiments.json b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/v8_fuzz_experiments.json
new file mode 100644
index 0000000..9f83ff5
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/v8_fuzz_experiments.json
@@ -0,0 +1,3 @@
+[
+  [100, "ignition", "ignition_turbo", "d8"]
+]
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/v8_fuzz_flags.json b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/v8_fuzz_flags.json
new file mode 100644
index 0000000..cbb498c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/differential_fuzz/v8_fuzz_flags.json
@@ -0,0 +1,3 @@
+[
+  [1.0, "--foo1 --foo2"]
+]
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/fake_db/index.json b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/fake_db/index.json
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/fake_db/index.json
@@ -0,0 +1 @@
+{}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/mjsunit.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/mjsunit.js
new file mode 100644
index 0000000..1506cd3
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/mjsunit.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var fakeMjsunit = 'fake';
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js
new file mode 100644
index 0000000..342c9d8
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js
@@ -0,0 +1,7 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var testLoad = 'test_load';
+load('test_data/mjsunit/test_load_1.js');
+load('test_load_0.js');
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js
new file mode 100644
index 0000000..d0e66e4
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test_data/mjsunit/test_load_1.js');
+load('test_load_2.js');
+load('test_load_3.js');
+var testLoad0 = 'test_load_0';
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js
new file mode 100644
index 0000000..03c9166
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js
@@ -0,0 +1,6 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test_load_2.js');
+var testLoad1 = 'test_load_1';
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_2.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_2.js
new file mode 100644
index 0000000..47ef1c7
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_2.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var testLoad2 = 'test_load_2';
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_3.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_3.js
new file mode 100644
index 0000000..c6a1546
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_3.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var testLoad3 = 'test_load_3';
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_expected.js
new file mode 100644
index 0000000..e545df8
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_expected.js
@@ -0,0 +1,21 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: mjsunit/mjsunit.js
+var fakeMjsunit = 'fake';
+
+// Original: mjsunit/test_load_2.js
+var testLoad2 = 'test_load_2';
+
+// Original: mjsunit/test_load_1.js
+var testLoad1 = 'test_load_1';
+
+// Original: mjsunit/test_load_3.js
+var testLoad3 = 'test_load_3';
+
+// Original: mjsunit/test_load_0.js
+var testLoad0 = 'test_load_0';
+
+// Original: mjsunit/test_load.js
+var testLoad = 'test_load';
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js
new file mode 100644
index 0000000..31a9f4c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load("test_load_self.js");
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self_expected.js
new file mode 100644
index 0000000..495ebf9
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self_expected.js
@@ -0,0 +1,6 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: mjsunit/mjsunit.js
+var fakeMjsunit = 'fake';
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit_softskipped/object-literal.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit_softskipped/object-literal.js
new file mode 100644
index 0000000..65ffc18
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit_softskipped/object-literal.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Fake file
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit_softskipped/permitted.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit_softskipped/permitted.js
new file mode 100644
index 0000000..65ffc18
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit_softskipped/permitted.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Fake file
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit_softskipped/regress/binaryen-123.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit_softskipped/regress/binaryen-123.js
new file mode 100644
index 0000000..65ffc18
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit_softskipped/regress/binaryen-123.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Fake file
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_arrays.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_arrays.js
new file mode 100644
index 0000000..8d907ae
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_arrays.js
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[];
+[];
+[];
+[];
+[];
+[];
+[];
+[];
+[];
+[];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
+[1, 2, 3];
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_arrays_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_arrays_expected.js
new file mode 100644
index 0000000..ce53377
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_arrays_expected.js
@@ -0,0 +1,109 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: mutate_arrays.js
+
+/* ArrayMutator: Remove elements */
+
+/* ArrayMutator: Insert a hole */
+[];
+[];
+
+/* ArrayMutator: Shuffle array */
+[];
+
+/* ArrayMutator: Insert a random value */
+[""];
+
+/* ArrayMutator: Insert a random value (replaced) */
+[""];
+
+/* ArrayMutator: Insert a hole (replaced) */
+[,];
+[];
+
+/* ArrayMutator: Insert a hole (replaced) */
+[,];
+
+/* ArrayMutator: Remove elements */
+[];
+
+/* ArrayMutator: Remove elements */
+[];
+
+/* ArrayMutator: Duplicate an element */
+[1, 1, 2, 3];
+
+/* ArrayMutator: Insert a random value (replaced) */
+[1, "", 3];
+
+/* ArrayMutator: Remove elements */
+[];
+
+/* ArrayMutator: Duplicate an element */
+[1, 2, 3, 2];
+
+/* ArrayMutator: Remove elements */
+[3];
+
+/* ArrayMutator: Duplicate an element (replaced) */
+[1, 2, 3];
+
+/* ArrayMutator: Insert a hole (replaced) */
+
+/* ArrayMutator: Duplicate an element (replaced) */
+[1, 2,,];
+
+/* ArrayMutator: Remove elements */
+[1, 2];
+
+/* ArrayMutator: Insert a hole (replaced) */
+
+/* ArrayMutator: Duplicate an element */
+[1, 1, 2,,];
+
+/* ArrayMutator: Shuffle array */
+[2, 1, 3];
+
+/* ArrayMutator: Remove elements */
+
+/* ArrayMutator: Remove elements */
+[3];
+
+/* ArrayMutator: Duplicate an element (replaced) */
+[1, 2, 1];
+
+/* ArrayMutator: Duplicate an element (replaced) */
+
+/* ArrayMutator: Duplicate an element (replaced) */
+[1, 2, 2];
+
+/* ArrayMutator: Insert a random value */
+[1, 2, 3, ""];
+
+/* ArrayMutator: Duplicate an element */
+[1, 2, 3, 3];
+
+/* ArrayMutator: Remove elements */
+
+/* ArrayMutator: Duplicate an element */
+[1, 2];
+
+/* ArrayMutator: Insert a random value (replaced) */
+
+/* ArrayMutator: Duplicate an element (replaced) */
+[1, 2, ""];
+
+/* ArrayMutator: Insert a random value (replaced) */
+
+/* ArrayMutator: Insert a random value (replaced) */
+["", 2, 3];
+
+/* ArrayMutator: Duplicate an element */
+
+/* ArrayMutator: Remove elements */
+[1, 1, 3];
+
+/* ArrayMutator: Remove elements */
+[1, 2];
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_expressions.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_expressions.js
new file mode 100644
index 0000000..0ffae11
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_expressions.js
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+1;
+let foo = undefined;
+2;
+3;
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_expressions_current_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_expressions_current_expected.js
new file mode 100644
index 0000000..f3d4e49
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_expressions_current_expected.js
@@ -0,0 +1,15 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: mutate_expressions.js
+1;
+
+/* ExpressionMutator: Cloned sibling */
+2;
+let foo = undefined;
+
+/* ExpressionMutator: Cloned sibling */
+3;
+2;
+3;
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_expressions_previous_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_expressions_previous_expected.js
new file mode 100644
index 0000000..1473183
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_expressions_previous_expected.js
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: mutate_expressions.js
+1;
+let foo = undefined;
+2;
+3;
+
+/* ExpressionMutator: Cloned sibling */
+2;
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call.js
new file mode 100644
index 0000000..2da557d
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call.js
@@ -0,0 +1,7 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+__f_0(1);
+a = __f_0(1);
+foo(1, __f_0());
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_deopt_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_deopt_expected.js
new file mode 100644
index 0000000..085322c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_deopt_expected.js
@@ -0,0 +1,19 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var _temp, _temp2;
+
+// Original: mutate_function_call.js
+
+/* FunctionCallMutator: Deoptimizing __f_0 */
+__f_0(1);
+
+%DeoptimizeFunction(__f_0);
+
+a = (
+/* FunctionCallMutator: Deoptimizing __f_0 */
+_temp = __f_0(1), %DeoptimizeFunction(__f_0), _temp);
+foo(1, (
+/* FunctionCallMutator: Deoptimizing __f_0 */
+_temp2 = __f_0(), %DeoptimizeFunction(__f_0), _temp2));
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_expected.js
new file mode 100644
index 0000000..f4fe3cb
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_function_call_expected.js
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+%PrepareFunctionForOptimization(__f_0);
+
+__f_0(1);
+
+__f_0(1);
+
+%OptimizeFunctionOnNextCall(__f_0);
+
+// Original: mutate_function_call.js
+
+/* FunctionCallMutator: Optimizing __f_0 */
+__f_0(1);
+
+a = (
+/* FunctionCallMutator: Optimizing __f_0 */
+%PrepareFunctionForOptimization(__f_0), __f_0(1), __f_0(1), %OptimizeFunctionOnNextCall(__f_0), __f_0(1));
+foo(1, (
+/* FunctionCallMutator: Optimizing __f_0 */
+%PrepareFunctionForOptimization(__f_0), __f_0(), __f_0(), %OptimizeFunctionOnNextCall(__f_0), __f_0()));
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_numbers.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_numbers.js
new file mode 100644
index 0000000..44bfc4a
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_numbers.js
@@ -0,0 +1,22 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+function foo() {
+  let a = 123;
+  for (let i = 0; i < 456; i++) {
+    a += 1;
+  }
+
+  let b = 0;
+  while (b < 10) {
+    b += 2;
+  }
+
+  a += 1;
+}
+
+var a = {0: "", 1: "", get 1(){}};
+var b = -10;
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_numbers_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_numbers_expected.js
new file mode 100644
index 0000000..cac1c60
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_numbers_expected.js
@@ -0,0 +1,46 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+// Original: mutate_numbers.js
+function foo() {
+  let a =
+  /* NumberMutator: Replaced 123 with -5 */
+  -5;
+
+  for (let i = 0; i < 456; i++) {
+    a +=
+    /* NumberMutator: Replaced 1 with -4 */
+    -4;
+  }
+
+  let b =
+  /* NumberMutator: Replaced 0 with -3 */
+  -3;
+
+  while (b < 10) {
+    b += 2;
+  }
+
+  a +=
+  /* NumberMutator: Replaced 1 with -5 */
+  -5;
+}
+
+var a = {
+  /* NumberMutator: Replaced 0 with 4 */
+  4: "",
+
+  /* NumberMutator: Replaced 1 with 3 */
+  3: "",
+
+  get
+  /* NumberMutator: Replaced 1 with 5 */
+  5() {}
+
+};
+var b =
+/* NumberMutator: Replaced -10 with -4 */
+-4;
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_objects.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_objects.js
new file mode 100644
index 0000000..3f70329
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_objects.js
@@ -0,0 +1,41 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Empty objects are not manipulated.
+a = {};
+a = {};
+a = {};
+a = {};
+a = {};
+a = {};
+
+// Small objects only get some mutations.
+a = {1: 0};
+a = {a: 0};
+a = {"s": 0};
+a = {1: 0};
+a = {a: 0};
+a = {"s": 0};
+
+// Larger objects get all mutations.
+a = {1: "a", 2: "b", 3: "c"};
+a = {1: "a", 2: "b", 3: "c"};
+a = {1: "a", 2: "b", 3: "c"};
+a = {1: "a", 2: "b", 3: "c"};
+a = {1: "a", 2: "b", 3: "c"};
+a = {1: "a", 2: "b", 3: "c"};
+a = {1: "a", 2: "b", 3: "c"};
+a = {1: "a", 2: "b", 3: "c"};
+a = {1: "a", 2: "b", 3: "c"};
+a = {1: "a", 2: "b", 3: "c"};
+
+// Getters and setters are ignored.
+a = {get bar() { return 0 }, 1: 0, set bar(t) {}};
+a = {get bar() { return 0 }, 1: 0, set bar(t) {}};
+a = {get bar() { return 0 }, 1: 0, set bar(t) {}};
+
+// Recursive.
+a = {1: {4: "4", 5: "5", 6: "6"}, 2: {3: "3"}};
+a = {1: {4: "4", 5: "5", 6: "6"}, 2: {3: "3"}};
+a = {1: {4: "4", 5: "5", 6: "6"}, 2: {3: "3"}};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_objects_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_objects_expected.js
new file mode 100644
index 0000000..6c3e3cf
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_objects_expected.js
@@ -0,0 +1,182 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: mutate_objects.js
+a = {};
+a = {};
+a = {};
+a = {};
+a = {};
+a = {};
+a =
+/* ObjectMutator: Insert a random value */
+{
+  1: ""
+};
+a = {
+  a: 0
+};
+a =
+/* ObjectMutator: Insert a random value */
+{
+  "s": ""
+};
+a =
+/* ObjectMutator: Stringify a property key */
+{
+  "1": 0
+};
+a =
+/* ObjectMutator: Remove a property */
+{};
+a = {
+  "s": 0
+};
+a =
+/* ObjectMutator: Swap properties */
+{
+  1: "c",
+  2: "b",
+  3: "a"
+};
+a =
+/* ObjectMutator: Remove a property */
+{
+  2: "b",
+  3: "c"
+};
+a =
+/* ObjectMutator: Insert a random value */
+{
+  1: "a",
+  2: "",
+  3: "c"
+};
+a =
+/* ObjectMutator: Swap properties */
+{
+  1: "b",
+  2: "a",
+  3: "c"
+};
+a =
+/* ObjectMutator: Swap properties */
+{
+  1: "c",
+  2: "b",
+  3: "a"
+};
+a =
+/* ObjectMutator: Stringify a property key */
+{
+  "1": "a",
+  2: "b",
+  3: "c"
+};
+a =
+/* ObjectMutator: Remove a property */
+{
+  2: "b",
+  3: "c"
+};
+a =
+/* ObjectMutator: Swap properties */
+{
+  1: "b",
+  2: "a",
+  3: "c"
+};
+a =
+/* ObjectMutator: Duplicate a property value */
+{
+  1: "c",
+  2: "b",
+  3: "c"
+};
+a =
+/* ObjectMutator: Duplicate a property value */
+{
+  1: "a",
+  2: "b",
+  3: "b"
+};
+a = {
+  get bar() {
+    return 0;
+  },
+
+  1: 0,
+
+  set bar(t) {}
+
+};
+a =
+/* ObjectMutator: Insert a random value */
+{
+  get bar() {
+    return 0;
+  },
+
+  1: "",
+
+  set bar(t) {}
+
+};
+a =
+/* ObjectMutator: Remove a property */
+{
+  get bar() {
+    return 0;
+  },
+
+  set bar(t) {}
+
+};
+a =
+/* ObjectMutator: Duplicate a property value */
+{
+  1:
+  /* ObjectMutator: Remove a property */
+  {},
+  2:
+  /* ObjectMutator: Stringify a property key */
+  {
+    "3": "3"
+  }
+};
+a =
+/* ObjectMutator: Duplicate a property value */
+{
+  1:
+  /* ObjectMutator: Swap properties */
+  {
+    4: "4",
+    5: "6",
+    6: "5"
+  },
+  2:
+  /* ObjectMutator: Remove a property */
+  {
+    5: "5",
+    6: "6"
+  }
+};
+a =
+/* ObjectMutator: Duplicate a property value */
+{
+  1:
+  /* ObjectMutator: Swap properties */
+  {
+    4: "6",
+    5: "5",
+    6: "4"
+  },
+  2:
+  /* ObjectMutator: Stringify a property key */
+  {
+    4: "4",
+    5: "5",
+    "6": "6"
+  }
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_var_or_obj.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_var_or_obj.js
new file mode 100644
index 0000000..d8a6583
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_var_or_obj.js
@@ -0,0 +1,10 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let __v_0 = {};
+Math.pow(1, 2);
+Math.pow(1, 2);
+Math.pow(1, 2);
+Math.pow(1, 2);
+Math.pow(1, 2);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_var_or_obj_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_var_or_obj_expected.js
new file mode 100644
index 0000000..19a4d5f
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_var_or_obj_expected.js
@@ -0,0 +1,37 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: mutate_var_or_obj.js
+let __v_0 = {};
+
+/* VariableOrObjectMutator: Random mutation */
+delete __getRandomObject(123)[__getRandomProperty(__getRandomObject(123), 123)], __callGC();
+__getRandomObject(123)[__getRandomProperty(__getRandomObject(123), 123)], __callGC();
+Math.pow(1, 2);
+
+/* VariableOrObjectMutator: Random mutation */
+__getRandomObject(123)[__getRandomProperty(__getRandomObject(123), 123)] = 0, __callGC();
+Math.pow(1, 2);
+
+/* VariableOrObjectMutator: Random mutation */
+__v_0 = __getRandomObject(123), __callGC();
+Math.pow(1, 2);
+
+/* VariableOrObjectMutator: Random mutation */
+if (__getRandomObject(123) != null && typeof __getRandomObject(123) == "object") Object.defineProperty(__getRandomObject(123), __getRandomProperty(__getRandomObject(123), 123), {
+  value: 0
+});
+Math.pow(1, 2);
+
+/* VariableOrObjectMutator: Random mutation */
+if (__getRandomObject(123) != null && typeof __getRandomObject(123) == "object") Object.defineProperty(__getRandomObject(123), __getRandomProperty(__getRandomObject(123), 123), {
+  get: function () {
+    delete __getRandomObject(123)[__getRandomProperty(__getRandomObject(123), 123)], __callGC();
+    return 0;
+  },
+  set: function (value) {
+    __getRandomObject(123)[__getRandomProperty(__getRandomObject(123), 123)], __callGC();
+  }
+});
+Math.pow(1, 2);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_variables.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_variables.js
new file mode 100644
index 0000000..93fed1b
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_variables.js
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function __f_0(__v_10, __v_11) {
+    let __v_4 = 4;
+    let __v_5 = 5;
+    let __v_6 = 6;
+    let __v_7 = 7;
+    console.log(__v_4);
+    console.log(__v_5);
+    console.log(__v_6);
+    console.log(__v_7);
+    for (let __v_9 = 0; __v_9 < 10; __v_9++) {
+        console.log(__v_4);
+    }
+    let __v_8 = 0;
+    while (__v_8 < 10) {
+        __v_8++;
+    }
+}
+let __v_0 = 1;
+let __v_1 = 2;
+let __v_2 = 3;
+let __v_3 = 4;
+console.log(__v_0);
+console.log(__v_1);
+console.log(__v_2);
+console.log(__v_3);
+__f_0();
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_variables_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_variables_expected.js
new file mode 100644
index 0000000..920e740
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/mutate_variables_expected.js
@@ -0,0 +1,54 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: mutate_variables.js
+function __f_0(__v_10, __v_11) {
+  let __v_4 = 4;
+  let __v_5 = 5;
+  let __v_6 = 6;
+  let __v_7 = 7;
+  console.log(
+  /* VariableMutator: Replaced __v_4 with REPLACED */
+  REPLACED);
+  console.log(
+  /* VariableMutator: Replaced __v_5 with REPLACED */
+  REPLACED);
+  console.log(
+  /* VariableMutator: Replaced __v_6 with REPLACED */
+  REPLACED);
+  console.log(
+  /* VariableMutator: Replaced __v_7 with REPLACED */
+  REPLACED);
+
+  for (let __v_9 = 0; __v_9 < 10; __v_9++) {
+    console.log(
+    /* VariableMutator: Replaced __v_4 with REPLACED */
+    REPLACED);
+  }
+
+  let __v_8 = 0;
+
+  while (__v_8 < 10) {
+    __v_8++;
+  }
+}
+
+let __v_0 = 1;
+let __v_1 = 2;
+let __v_2 = 3;
+let __v_3 = 4;
+console.log(
+/* VariableMutator: Replaced __v_0 with REPLACED */
+REPLACED);
+console.log(
+/* VariableMutator: Replaced __v_1 with REPLACED */
+REPLACED);
+console.log(
+/* VariableMutator: Replaced __v_2 with REPLACED */
+REPLACED);
+console.log(
+/* VariableMutator: Replaced __v_3 with REPLACED */
+REPLACED);
+
+__f_0();
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/normalize.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/normalize.js
new file mode 100644
index 0000000..ddf5b9f
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/normalize.js
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+class Class {
+  constructor() {
+    this.abc = 789;
+    this.selfRef = Class;
+  }
+}
+
+function foo() {
+  let a = 123;
+  console.log(a);
+}
+
+foo();
+let a = 456;
+console.log(a);
+let b = new Class();
+console.log(b.abc);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/normalize_expected_0.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/normalize_expected_0.js
new file mode 100644
index 0000000..5f832f7
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/normalize_expected_0.js
@@ -0,0 +1,28 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+// Original: normalize.js
+class __c_0 {
+  constructor() {
+    this.abc = 789;
+    this.selfRef = __c_0;
+  }
+
+}
+
+function __f_0() {
+  let __v_2 = 123;
+  console.log(__v_2);
+}
+
+__f_0();
+
+let __v_0 = 456;
+console.log(__v_0);
+
+let __v_1 = new __c_0();
+
+console.log(__v_1.abc);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/normalize_expected_1.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/normalize_expected_1.js
new file mode 100644
index 0000000..672f29e
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/normalize_expected_1.js
@@ -0,0 +1,28 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+// Original: normalize.js
+class __c_1 {
+  constructor() {
+    this.abc = 789;
+    this.selfRef = __c_1;
+  }
+
+}
+
+function __f_1() {
+  let __v_5 = 123;
+  console.log(__v_5);
+}
+
+__f_1();
+
+let __v_3 = 456;
+console.log(__v_3);
+
+let __v_4 = new __c_1();
+
+console.log(__v_4.abc);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_class_input.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_class_input.js
new file mode 100644
index 0000000..f16fb2f
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_class_input.js
@@ -0,0 +1,11 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class __C {
+  foo() {
+    let __v_0 = 2;
+    let __v_1 = 2;
+    Math.pow(__v_0, __v_1);
+  }
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_input.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_input.js
new file mode 100644
index 0000000..3d7ed65
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_input.js
@@ -0,0 +1,7 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let x = 2;
+let y = 2;
+Math.pow(x, y);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js
new file mode 100644
index 0000000..fce0782
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js
@@ -0,0 +1,7 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+let x, y;
+(function([ x = y = 1 ]) {}([]));
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/this/file.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/this/file.js
new file mode 100644
index 0000000..115616d
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/this/file.js
@@ -0,0 +1,9 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function C() {
+  this.c = "c";
+}
+
+var c = new C();
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/numbers/db/index.json b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/numbers/db/index.json
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/numbers/db/index.json
@@ -0,0 +1 @@
+{}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/numbers/input_indices.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/numbers/input_indices.js
new file mode 100644
index 0000000..685453f
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/numbers/input_indices.js
@@ -0,0 +1,11 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+let a = {
+  0: "",
+  1: "",
+  2: "",
+  3: "",
+  4: "",
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/numbers/input_negative.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/numbers/input_negative.js
new file mode 100644
index 0000000..d6747bb
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/numbers/input_negative.js
@@ -0,0 +1,8 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+foo(-1);
+foo(-1);
+foo(-1);
+foo(-1);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/spidermonkey/db/index.json b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/spidermonkey/db/index.json
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/spidermonkey/db/index.json
@@ -0,0 +1 @@
+{}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/spidermonkey/input.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/spidermonkey/input.js
new file mode 100644
index 0000000..fc34ae5
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/spidermonkey/input.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+print("Won't see this.");
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/spidermonkey/shell.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/spidermonkey/shell.js
new file mode 100644
index 0000000..4967ffb
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/spidermonkey/shell.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+loadRelativeToScript('PatternAsserts.js');
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/db/index.json b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/db/index.json
new file mode 100644
index 0000000..0967ef4
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/db/index.json
@@ -0,0 +1 @@
+{}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/input_delete.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/input_delete.js
new file mode 100644
index 0000000..154bb60
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/input_delete.js
@@ -0,0 +1,6 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var x;
+delete x;
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/input_strict.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/input_strict.js
new file mode 100644
index 0000000..779fa08
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/input_strict.js
@@ -0,0 +1,7 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+print("Hello");
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/input_with.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/input_with.js
new file mode 100644
index 0000000..abb0215
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/strict/input_with.js
@@ -0,0 +1,7 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+with (Math) {
+  print(PI);
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/simple_test.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/simple_test.js
new file mode 100644
index 0000000..bb53b40
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/simple_test.js
@@ -0,0 +1,87 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test comment.
+// Flags: --gc-interval = 1
+var abs = Math.abs;
+var v1 = 5, v2; var v3;
+if (v1) {
+  var v4 = 3;
+  for (var v5 = 0; v5 < 4; v5++) {
+    console.log('Value of v5: ' +
+                v5);
+  }
+}
+let v6 = 3;
+const v7 = 5 + \u{0076}6;
+v1 = {['p' + v6]: ''};
+v1 = `test\`
+value is ${ v6 + v7 }` + '\0\400\377'
+v1 = (v8=2, {v9 = eval('v8')},) => { return v8 + v9 + 4; };
+v1 = () => 4 + 5;
+v1 = v10 => { return v10 + 4; }
+v1 = async v11 => v11 + 4;
+v12 = [0, 1, 2,];
+v13 = [3, 4, 5];
+v14 = [...v12, ...v13];
+v15 = ([v16, v17] = [1, 2], {v31: v18} = {v31: v16 + v17}) => v16 + v17 + v18;
+v16 = 170%16/16 + 2**32;
+v17 = 0o1 + 0O1 + 01 + 0b011 + 0B011;
+for (var v18 of [1, 2, 3]) console.log(v18);
+function f1(v19,) {}
+f1();
+%OptimizeFunctionOnNextCall(f1);
+function f2() {
+  var v20 = 5;
+  return v20 + 6;
+}
+(async function f3() {
+  var v21 = await 1;
+  console.log(v21);
+})();
+function* f4(v22=2, ...v23) {
+  yield* [1, 2, 3];
+}
+function* f5() { (yield 3) + (yield); }
+{ function f6() { } }
+v23 = { v6, [v6]: 3, f7() { }, get f8 () { }, *f9 () { }, async f10 () { } }
+var [v24, v25, ...v26] = [10, 20], {v27, v28} = {v27: 10, v28: 20};
+class c1 {
+  f11(v29) {
+    return v29 + 1;
+  }
+  static* f12() {
+    yield 'a' + super.f12();
+  }
+  constructor(v30) {
+    console.log(new.target.name);
+  }
+  [0]() { }
+}
+class c2 extends c1 { }
+do ; while(0);
+v16 **= 4;
+for (const v32 = 1; v32 < 1;);
+for (let v33 = 1; v33 < 5; v33++);
+for (var v34 = 1; v34 < 5; v34++);
+for (const {v35 = 0, v36 = 3} = {}; v36 < 1;);
+for (let {v37 = 0, v38 = 3} = {}; v38 != 0; v38--);
+for (var {v39 = 0, v40 = 3} = {}; v40 != 0; v40--);
+for (const v41 of [1, 2, 3]);
+for (let v42 of [1, 2, 3]);
+for (var v43 of [1, 2, 3]);
+for (const v44 in [1, 2, 3]);
+for (let v45 in [1, 2, 3]);
+for (var v46 in [1, 2, 3]);
+label: function f13() { }
+
+var a = function b() {
+  b();
+};
+
+var c = class C {
+  constructor() {
+    console.log(C.name);
+  }
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/simple_test_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/simple_test_expected.js
new file mode 100644
index 0000000..0405fac
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/simple_test_expected.js
@@ -0,0 +1,177 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: simple_test.js
+var __v_0 = Math.abs;
+
+var __v_1 = 5,
+    __v_2;
+
+var __v_3;
+
+if (__v_1) {
+  var __v_4 = 3;
+
+  for (var __v_5 = 0; __v_5 < 4; __v_5++) {
+    console.log('Value of v5: ' + __v_5);
+  }
+}
+
+let __v_6 = 3;
+
+const __v_7 = 5 + __v_6;
+
+__v_1 = {
+  ['p' + __v_6]: ''
+};
+__v_1 = `test\`
+value is ${__v_6 + __v_7}` + '\0\400\377';
+
+__v_1 = (__v_21 = 2, {
+  v9: __v_22 = eval('v8')
+}) => {
+  return __v_21 + __v_22 + 4;
+};
+
+__v_1 = () => 4 + 5;
+
+__v_1 = __v_23 => {
+  return __v_23 + 4;
+};
+
+__v_1 = async __v_24 => __v_24 + 4;
+
+__v_25 = [0, 1, 2];
+__v_26 = [3, 4, 5];
+__v_27 = [...__v_25, ...__v_26];
+
+__v_28 = ([__v_29, __v_30] = [1, 2], {
+  v31: __v_31
+} = {
+  v31: __v_29 + __v_30
+}) => __v_29 + __v_30 + __v_31;
+
+__v_42 = 170 % 16 / 16 + 2 ** 32;
+__v_33 = 0o1 + 0O1 + 01 + 0b011 + 0B011;
+
+for (var __v_8 of [1, 2, 3]) console.log(__v_8);
+
+function __f_0(__v_34) {}
+
+__f_0();
+
+%OptimizeFunctionOnNextCall(__f_0);
+
+function __f_1() {
+  var __v_35 = 5;
+  return __v_35 + 6;
+}
+
+(async function __f_5() {
+  var __v_36 = await 1;
+
+  console.log(__v_36);
+})();
+
+function* __f_2(__v_37 = 2, ...__v_38) {
+  yield* [1, 2, 3];
+}
+
+function* __f_3() {
+  (yield 3) + (yield);
+}
+
+{
+  function __f_6() {}
+}
+__v_39 = {
+  v6: __v_6,
+  [__v_6]: 3,
+
+  f7() {},
+
+  get f8() {},
+
+  *f9() {},
+
+  async f10() {}
+
+};
+var [__v_9, __v_10, ...__v_11] = [10, 20],
+    {
+  v27: __v_12,
+  v28: __v_13
+} = {
+  v27: 10,
+  v28: 20
+};
+
+class __c_0 {
+  f11(__v_40) {
+    return __v_40 + 1;
+  }
+
+  static *f12() {
+    yield 'a' + super.f12();
+  }
+
+  constructor(__v_41) {
+    console.log(new.target.name);
+  }
+
+  [0]() {}
+
+}
+
+class __c_1 extends __c_0 {}
+
+do ; while (0);
+
+__v_42 **= 4;
+
+for (const __v_43 = 1; __v_43 < 1;);
+
+for (let __v_44 = 1; __v_44 < 5; __v_44++);
+
+for (var __v_14 = 1; __v_14 < 5; __v_14++);
+
+for (const {
+  v35: __v_45 = 0,
+  v36: __v_46 = 3
+} = {}; __v_46 < 1;);
+
+for (let {
+  v37: __v_47 = 0,
+  v38: __v_48 = 3
+} = {}; __v_48 != 0; __v_48--);
+
+for (var {
+  v39: __v_15 = 0,
+  v40: __v_16 = 3
+} = {}; __v_16 != 0; __v_16--);
+
+for (const __v_49 of [1, 2, 3]);
+
+for (let __v_50 of [1, 2, 3]);
+
+for (var __v_17 of [1, 2, 3]);
+
+for (const __v_51 in [1, 2, 3]);
+
+for (let __v_52 in [1, 2, 3]);
+
+for (var __v_18 in [1, 2, 3]);
+
+label: function __f_4() {}
+
+var __v_19 = function __f_7() {
+  __f_7();
+};
+
+var __v_20 = class __c_2 {
+  constructor() {
+    console.log(__c_2.name);
+  }
+
+};
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/load1.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/load1.js
new file mode 100644
index 0000000..5e3f902
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/load1.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+console.log('load1.js');
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/shell.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/shell.js
new file mode 100644
index 0000000..98e863a
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/shell.js
@@ -0,0 +1,7 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+console.log('/shell.js');
+if (!ok)
+  throw new Error(`Assertion failed: ${f} did not throw as expected`);
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js
new file mode 100644
index 0000000..43a776c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js
@@ -0,0 +1,12 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('load1.js');
+loadRelativeToScript('load2.js');
+console.log('load.js');
+
+if (!ok)
+  throw new Error(`Assertion failed: Some text`);
+
+print("Assertion failed: Some text");
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load2.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load2.js
new file mode 100644
index 0000000..a8ee5d2
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load2.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+console.log('load2.js');
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load_expected.js
new file mode 100644
index 0000000..58e7f05
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load_expected.js
@@ -0,0 +1,21 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: spidermonkey/shell.js
+console.log('/shell.js');
+if (!ok) throw new Error(`A****tion failed: ${f} did not throw as expected`);
+
+// Original: spidermonkey/test/shell.js
+console.log('/test/shell.js');
+
+// Original: spidermonkey/load1.js
+console.log('load1.js');
+
+// Original: spidermonkey/test/load2.js
+console.log('load2.js');
+
+// Original: spidermonkey/test/load.js
+console.log('load.js');
+if (!ok) throw new Error(`A****tion failed: Some text`);
+print("A****tion failed: Some text");
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/shell.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/shell.js
new file mode 100644
index 0000000..9c0b357
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/shell.js
@@ -0,0 +1,5 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+console.log('/test/shell.js');
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch.js
new file mode 100644
index 0000000..9414a3b
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch.js
@@ -0,0 +1,41 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function blah() {
+  try {
+    var a = 10;
+    console.log(a);
+  } catch (e) {}
+
+  label: for (var i = 0; i < 100; i++) {
+    var b = 0;
+    while (b < 10) {
+      console.log(b);
+      b += 2;
+      continue label;
+    }
+  }
+}
+
+blah();
+blah();
+
+(function () {1;1;})();
+
+if (true) {
+  2;2;
+} else {
+  3;3;
+}
+
+let a = 0;
+switch (a) {
+  case 1: 1;
+}
+
+with (Math) {
+  cos(PI);
+}
+
+let module = new WebAssembly.Module(builder.toBuffer());
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_alternate_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_alternate_expected.js
new file mode 100644
index 0000000..31fe283
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_alternate_expected.js
@@ -0,0 +1,51 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/* AddTryCatchMutator: Target skip probability 0.9 and toplevel probability 0.9 */
+
+// Original: try_catch.js
+function blah() {
+  try {
+    var a = 10;
+    console.log(a);
+  } catch (e) {}
+
+  label: for (var i = 0; i < 100; i++) {
+    var b = 0;
+
+    while (b < 10) {
+      console.log(b);
+      b += 2;
+      continue label;
+    }
+  }
+}
+
+blah();
+blah();
+
+(function () {
+  1;
+  1;
+})();
+
+if (true) {
+  2;
+  2;
+} else {
+  3;
+  3;
+}
+
+let a = 0;
+
+switch (a) {
+  case 1:
+    1;
+}
+
+with (Math) {
+  cos(PI);
+}
+let module = new WebAssembly.Module(builder.toBuffer());
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_expected.js
new file mode 100644
index 0000000..266662b
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_expected.js
@@ -0,0 +1,104 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: try_catch.js
+function blah() {
+  try {
+    try {
+      var a = 10;
+    } catch (e) {}
+
+    try {
+      console.log(a);
+    } catch (e) {}
+  } catch (e) {}
+
+  try {
+    label: for (var i = 0; i < 100; i++) {
+      try {
+        var b = 0;
+      } catch (e) {}
+
+      try {
+        while (b < 10) {
+          try {
+            console.log(b);
+          } catch (e) {}
+
+          try {
+            b += 2;
+          } catch (e) {}
+
+          continue label;
+        }
+      } catch (e) {}
+    }
+  } catch (e) {}
+}
+
+try {
+  blah();
+} catch (e) {}
+
+try {
+  blah();
+} catch (e) {}
+
+try {
+  (function () {
+    try {
+      1;
+    } catch (e) {}
+
+    try {
+      1;
+    } catch (e) {}
+  })();
+} catch (e) {}
+
+try {
+  if (true) {
+    try {
+      2;
+    } catch (e) {}
+
+    try {
+      2;
+    } catch (e) {}
+  } else {
+    try {
+      3;
+    } catch (e) {}
+
+    try {
+      3;
+    } catch (e) {}
+  }
+} catch (e) {}
+
+let a = 0;
+
+try {
+  switch (a) {
+    case 1:
+      try {
+        1;
+      } catch (e) {}
+
+  }
+} catch (e) {}
+
+try {
+  with (Math) {
+    try {
+      cos(PI);
+    } catch (e) {}
+  }
+} catch (e) {}
+
+let module = function () {
+  try {
+    return new WebAssembly.Module(builder.toBuffer());
+  } catch (e) {}
+}();
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_nothing_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_nothing_expected.js
new file mode 100644
index 0000000..b7b86f1
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_nothing_expected.js
@@ -0,0 +1,49 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: try_catch.js
+function blah() {
+  try {
+    var a = 10;
+    console.log(a);
+  } catch (e) {}
+
+  label: for (var i = 0; i < 100; i++) {
+    var b = 0;
+
+    while (b < 10) {
+      console.log(b);
+      b += 2;
+      continue label;
+    }
+  }
+}
+
+blah();
+blah();
+
+(function () {
+  1;
+  1;
+})();
+
+if (true) {
+  2;
+  2;
+} else {
+  3;
+  3;
+}
+
+let a = 0;
+
+switch (a) {
+  case 1:
+    1;
+}
+
+with (Math) {
+  cos(PI);
+}
+let module = new WebAssembly.Module(builder.toBuffer());
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_toplevel_expected.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_toplevel_expected.js
new file mode 100644
index 0000000..39ee907
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_data/try_catch_toplevel_expected.js
@@ -0,0 +1,74 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Original: try_catch.js
+function blah() {
+  try {
+    try {
+      var a = 10;
+    } catch (e) {}
+
+    try {
+      console.log(a);
+    } catch (e) {}
+  } catch (e) {}
+
+  try {
+    label: for (var i = 0; i < 100; i++) {
+      var b = 0;
+
+      while (b < 10) {
+        console.log(b);
+        b += 2;
+        continue label;
+      }
+    }
+  } catch (e) {}
+}
+
+try {
+  blah();
+} catch (e) {}
+
+try {
+  blah();
+} catch (e) {}
+
+try {
+  (function () {
+    1;
+    1;
+  })();
+} catch (e) {}
+
+try {
+  if (true) {
+    2;
+    2;
+  } else {
+    3;
+    3;
+  }
+} catch (e) {}
+
+let a = 0;
+
+try {
+  switch (a) {
+    case 1:
+      1;
+  }
+} catch (e) {}
+
+try {
+  with (Math) {
+    cos(PI);
+  }
+} catch (e) {}
+
+let module = function () {
+  try {
+    return new WebAssembly.Module(builder.toBuffer());
+  } catch (e) {}
+}();
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_db.js b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_db.js
new file mode 100644
index 0000000..ff13c38
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/test_db.js
@@ -0,0 +1,67 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Test all expressions in DB.
+ */
+
+const fs = require('fs');
+const fsPath = require('path');
+const program = require('commander');
+const sinon = require('sinon');
+
+const crossOverMutator = require('./mutators/crossover_mutator.js');
+const db = require('./db.js');
+const random = require('./random.js');
+const sourceHelpers = require('./source_helpers.js');
+
+const sandbox = sinon.createSandbox();
+
+function main() {
+  program
+    .version('0.0.1')
+    .option('-i, --input_dir <path>', 'DB directory.')
+    .parse(process.argv);
+
+  if (!program.input_dir) {
+    console.log('Need to specify DB dir.');
+    return;
+  }
+
+  const loader = new sourceHelpers.V8SourceLoader();
+  const mutateDb = new db.MutateDb(program.input_dir);
+  const mutator = new crossOverMutator.CrossOverMutator(
+      { MUTATE_CROSSOVER_INSERT: 1.0, testing: true }, mutateDb);
+
+  let nPass = 0;
+  let nFail = 0;
+  // Iterate over all statements saved in the DB.
+  for (const statementPath of mutateDb.index.all) {
+    const expression = JSON.parse(fs.readFileSync(
+        fsPath.join(program.input_dir, statementPath)), 'utf-8');
+    // Stub out choosing random variables in cross-over mutator.
+    sandbox.stub(random, 'single').callsFake((a) => { return a[0]; });
+    // Ensure we are selecting the statement of the current iteration.
+    sandbox.stub(mutateDb, 'getRandomStatement').callsFake(
+        () => { return expression; });
+    // Use a source that will try to insert one statement, allowing
+    // super.
+    const source = loader.load(
+        __dirname,
+        'test_data/regress/build_db/cross_over_mutator_class_input.js');
+    try {
+      mutator.mutate(source);
+      nPass++;
+    } catch (e) {
+      console.log('******************************************************')
+      console.log(expression);
+      console.log(e.message);
+      nFail++;
+    }
+    sandbox.restore();
+  }
+  console.log(`Result: ${nPass} passed, ${nFail} failed.`)
+}
+
+main();
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/fuzz_one.py b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/fuzz_one.py
new file mode 100644
index 0000000..28646d4
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/fuzz_one.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""
+Helper script to execute a single-processed fuzzing session.
+
+Creates fuzz tests in workdir/output/dir-<dir number>/fuzz-XXX.js.
+Expects the <dir number> as single parameter.
+"""
+
+import os
+import subprocess
+import sys
+import time
+
+BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+APP_DIR = os.path.join(BASE_PATH, 'workdir', 'app_dir')
+FUZZ_EXE = os.path.join(BASE_PATH, 'workdir', 'fuzzer', 'ochang_js_fuzzer')
+INPUT_DIR = os.path.join(BASE_PATH, 'workdir', 'input')
+TEST_CASES = os.path.join(BASE_PATH, 'workdir', 'output')
+
+COUNT = 64
+FUZZ = ('FUZZ_MODE=foozzie APP_NAME=d8 APP_DIR=%s %s -o %%s -n %s -i %s > %%s'
+        % (APP_DIR, FUZZ_EXE, COUNT, INPUT_DIR))
+
+assert(len(sys.argv) > 1)
+dir_number = int(sys.argv[1])
+assert(dir_number >= 0)
+
+path = os.path.join(TEST_CASES, 'dir-%d' % dir_number)
+assert not os.path.exists(path), 'Need fresh workdir for fuzzing'
+os.makedirs(path)
+
+start = time.time()
+subprocess.check_call(
+    FUZZ % (path, os.path.join(path, 'out.log')), shell=True)
+duration = int(time.time() - start)
+
+with open(os.path.join(path, 'duration.log'), 'w') as f:
+  f.write(str(duration))
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/minimize.py b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/minimize.py
new file mode 100644
index 0000000..8729b2a
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/minimize.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""
+Helper script to forge a command line for clusterfuzz' minimizer for
+each failure found during a fuzzing session with workbench.py.
+
+Expects the path to the minimizer tools, e.g. something like:
+path/to/src/python/bot/minimizer
+"""
+
+import json
+from multiprocessing import cpu_count
+import os
+import sys
+
+PROCESSES = cpu_count()
+BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+OUT_PATH = os.path.join(BASE_PATH, 'out.js')
+FAILURES_JSON_PATH = os.path.join(
+    BASE_PATH, 'workdir', 'output', 'failures.json')
+
+assert(len(sys.argv) > 1, 'Need to specify minimizer path.')
+minimizer_path = sys.argv[1]
+
+def getcmd(command):
+  parts = command.split(' ')
+  prefix = command[:-(len(parts[-1]) + 1)]
+  return ('python %s/run.py -t%d -mjs -o %s "%s" %s' %
+          (minimizer_path, PROCESSES, OUT_PATH, prefix, parts[-1]))
+
+with open(FAILURES_JSON_PATH) as f:
+  failures = json.load(f)
+
+for failure in failures:
+  print('*********************************************************')
+  print('Source: ' + failure['source'])
+  print('Command:')
+  print(failure['command'])
+  print('Minimize:')
+  print(getcmd(failure['command']))
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/run_one.py b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/run_one.py
new file mode 100644
index 0000000..7762cb2
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/run_one.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""
+Helper script to execute fuzz tests in a single process.
+
+Expects fuzz tests in workdir/output/dir-<dir number>/fuzz-XXX.js.
+Expects the <dir number> as single parameter.
+"""
+
+import json
+import os
+import random
+import re
+import subprocess
+import sys
+
+BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+FOOZZIE = os.path.join(BASE_PATH, 'workdir', 'app_dir', 'v8_foozzie.py')
+TEST_CASES = os.path.join(BASE_PATH, 'workdir', 'output')
+
+# Output pattern from foozzie.py when it finds a failure.
+FAILURE_RE = re.compile(
+    r'# V8 correctness failure.'
+    r'# V8 correctness configs: (?P<configs>.*).'
+    r'# V8 correctness sources: (?P<source>.*).'
+    r'# V8 correctness suppression:.*', re.S)
+
+assert(len(sys.argv) > 1)
+dir_number = int(sys.argv[1])
+assert(dir_number >= 0)
+
+test_dir = os.path.join(TEST_CASES, 'dir-%d' % dir_number)
+assert os.path.exists(test_dir)
+
+def failure_state(command, stdout):
+  return dict(FAILURE_RE.search(stdout).groupdict(), command=command)
+
+def random_seed():
+  """Returns random, non-zero seed."""
+  seed = 0
+  while not seed:
+    seed = random.SystemRandom().randint(-2147483648, 2147483647)
+  return seed
+
+def run(fuzz_file, flag_file):
+  """Executes the differential-fuzzing harness foozzie with one fuzz test."""
+  with open(flag_file) as f:
+    flags = f.read().split(' ')
+  args = [FOOZZIE, '--random-seed=%d' % random_seed()] + flags + [fuzz_file]
+  cmd = ' '.join(args)
+  try:
+    output = subprocess.check_output(cmd, stderr=subprocess.PIPE, shell=True)
+    return (cmd, output)
+  except Exception as e:
+    return (cmd, e.output)
+
+def list_tests():
+  """Iterates all fuzz tests and corresponding flags in the given base dir."""
+  for f in os.listdir(test_dir):
+    if f.startswith('fuzz'):
+      n = int(re.match(r'fuzz-(\d+)\.js', f).group(1))
+      ff = 'flags-%d.js' % n
+      yield (os.path.join(test_dir, f), os.path.join(test_dir, ff))
+
+# Some counters for the statistics.
+count = 0
+count_timeout = 0
+count_crash = 0
+count_failure = 0
+failures = []
+
+# Execute all tests in the given directory. Interpret foozzie's output and add
+# it to the statistics.
+for fuzz_file, flag_file in list_tests():
+  cmd, output = run(fuzz_file, flag_file)
+  count += 1
+  if '# V8 correctness - pass' in output:
+    continue
+  if '# V8 correctness - T-I-M-E-O-U-T' in output:
+    count_timeout += 1
+    continue
+  if '# V8 correctness - C-R-A-S-H' in output:
+    count_crash += 1
+    continue
+  count_failure += 1
+  failures.append(failure_state(cmd, output))
+
+with open(os.path.join(test_dir, 'failures.json'), 'w') as f:
+  json.dump(failures, f)
+
+stats = {
+  'total': count,
+  'timeout': count_timeout,
+  'crash': count_crash,
+  'failure': count_failure,
+}
+
+with open(os.path.join(test_dir, 'stats.json'), 'w') as f:
+  json.dump(stats, f)
diff --git a/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/workbench.py b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/workbench.py
new file mode 100644
index 0000000..52a84bf
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/js_fuzzer/tools/workbench.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""
+Tool to execute multiprocessed fuzzing and testing sessions.
+
+Expects a single parameter with the number of sessions.
+
+Regularly updates a stats.json and failures.json during executions. E.g.
+stay up-to-date with:
+cat workdir/output/stats.json | python -m json.tool
+"""
+
+# TODO(machenbach): This is currently tailored for differential fuzzing
+# with foozzie. It could be generalized, but that'd require duplicating
+# clusterfuzz' stack analysis to some degree. E.g. understanding asan
+# or DCHECK failures.
+
+from __future__ import print_function
+
+import json
+import math
+from multiprocessing import Pool, cpu_count
+import os
+import random
+import subprocess
+import sys
+
+PROCESSES = cpu_count()
+BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+TEST_CASES = os.path.join(BASE_PATH, 'workdir', 'output')
+FUZZ_ONE = os.path.join(BASE_PATH, 'tools', 'fuzz_one.py')
+RUN_ONE = os.path.join(BASE_PATH, 'tools', 'run_one.py')
+
+os.chdir(BASE_PATH)
+
+assert not os.path.exists(TEST_CASES)
+os.makedirs(TEST_CASES)
+
+# Use ~40000 for 24 hours of fuzzing on a modern work station.
+RUNS = 8
+if len(sys.argv) > 1:
+  RUNS = int(sys.argv[1])
+
+def run(n):
+  """Multiprocessed function that executes a single fuzz session and
+  afterwards executes all fuzz tests and collects the statistics.
+
+  Args:
+    n: Subdirectory index of this run.
+  """
+  subprocess.check_call([sys.executable, FUZZ_ONE, str(n)])
+  subprocess.check_call([sys.executable, RUN_ONE, str(n)])
+  test_dir = os.path.join(TEST_CASES, 'dir-%d' % n)
+  with open(os.path.join(test_dir, 'stats.json')) as f:
+    stats = json.load(f)
+  with open(os.path.join(test_dir, 'failures.json')) as f:
+    failures = json.load(f)
+  return (stats, failures)
+
+
+class Stats(object):
+  def __init__(self):
+    self.total = 0
+    self.crash = 0
+    self.timeout = 0
+    self.failure = 0
+    self.dupe = 0
+    self.failures = []
+    self.known_states = set()
+
+  def add(self, stats, failures):
+    # Aggregate common stats.
+    self.total += stats['total']
+    self.crash += stats['crash']
+    self.timeout += stats['timeout']
+
+    # Dedupe failures.
+    for failure in failures:
+      if failure['source'] in self.known_states:
+        self.dupe += 1
+        continue
+
+      self.known_states.add(failure['source'])
+      self.failure += 1
+      self.failures.append(failure)
+
+  @property
+  def stats(self):
+    return {
+      'total': self.total,
+      'crash': self.crash,
+      'failure': self.failure,
+      'dupe': self.dupe,
+      'timeout': self.timeout,
+    }
+
+all_stats = Stats()
+count = 0
+pool = Pool(processes=PROCESSES)
+
+# Iterate over all runs multiprocessed and merge the statistics and
+# failure data of the single runs.
+for stats, failures in pool.imap_unordered(run, range(RUNS)):
+  all_stats.add(stats, failures)
+  count += 1
+  if count % max(1, int(RUNS / 20)) == 0:
+    print('Progress: %d runs (%d%%)' % (count, count * 100 / RUNS))
+
+  # Update overall stats.
+  with open(os.path.join(TEST_CASES, 'stats.json'), 'w') as f:
+    json.dump(all_stats.stats, f)
+  with open(os.path.join(TEST_CASES, 'failures.json'), 'w') as f:
+    json.dump(all_stats.failures, f)
+
+print('Ran %(total)d test cases (%(timeout)d timeouts, '
+      '%(crash)d crashes, %(failure)d failures, %(dupe)d dupes)'
+      % all_stats.stats)
+
+for failure in all_stats.failures:
+  print(failure)
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/baseline/d8.py b/src/third_party/v8/tools/clusterfuzz/testdata/baseline/d8.py
new file mode 100644
index 0000000..4a3d008
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/baseline/d8.py
@@ -0,0 +1,17 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print("""
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird error
+        ^
+3
+unknown
+""")
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/baseline/v8_build_config.json b/src/third_party/v8/tools/clusterfuzz/testdata/baseline/v8_build_config.json
new file mode 100644
index 0000000..ea27b1c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/baseline/v8_build_config.json
@@ -0,0 +1 @@
+{"v8_current_cpu": "x64"}
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/build1/d8.py b/src/third_party/v8/tools/clusterfuzz/testdata/build1/d8.py
new file mode 100644
index 0000000..824b222
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/build1/d8.py
@@ -0,0 +1,17 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print("""
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+unknown
+""")
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/build1/v8_build_config.json b/src/third_party/v8/tools/clusterfuzz/testdata/build1/v8_build_config.json
new file mode 100644
index 0000000..ea27b1c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/build1/v8_build_config.json
@@ -0,0 +1 @@
+{"v8_current_cpu": "x64"}
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/build2/d8.py b/src/third_party/v8/tools/clusterfuzz/testdata/build2/d8.py
new file mode 100644
index 0000000..0b19a3f
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/build2/d8.py
@@ -0,0 +1,17 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print("""
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+not unknown
+""")
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/build2/v8_build_config.json b/src/third_party/v8/tools/clusterfuzz/testdata/build2/v8_build_config.json
new file mode 100644
index 0000000..ea27b1c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/build2/v8_build_config.json
@@ -0,0 +1 @@
+{"v8_current_cpu": "x64"}
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/build3/d8.py b/src/third_party/v8/tools/clusterfuzz/testdata/build3/d8.py
new file mode 100644
index 0000000..824b222
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/build3/d8.py
@@ -0,0 +1,17 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print("""
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+unknown
+""")
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/build3/v8_build_config.json b/src/third_party/v8/tools/clusterfuzz/testdata/build3/v8_build_config.json
new file mode 100644
index 0000000..2a9917a
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/build3/v8_build_config.json
@@ -0,0 +1 @@
+{"v8_current_cpu": "x86"}
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/failure_output.txt b/src/third_party/v8/tools/clusterfuzz/testdata/failure_output.txt
new file mode 100644
index 0000000..e151153
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/failure_output.txt
@@ -0,0 +1,50 @@
+#
+# V8 correctness failure
+# V8 correctness configs: x64,ignition:x64,ignition_turbo
+# V8 correctness sources: f60
+# V8 correctness suppression: 
+#
+# CHECK
+#
+# Compared x64,ignition with x64,ignition_turbo
+#
+# Flags of x64,ignition:
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up --flag1 --flag2=0
+# Flags of x64,ignition_turbo:
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
+#
+# Difference:
+- unknown
++ not unknown
+#
+# Source file:
+name/to/file.js
+#
+### Start of configuration x64,ignition:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird error
+        ^
+3
+unknown
+
+
+### End of configuration x64,ignition
+#
+### Start of configuration x64,ignition_turbo:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+not unknown
+
+
+### End of configuration x64,ignition_turbo
+
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/fuzz-123.js b/src/third_party/v8/tools/clusterfuzz/testdata/fuzz-123.js
new file mode 100644
index 0000000..fbde573
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/fuzz-123.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Empty test dummy.
+print("js-mutation: start generated test case");
diff --git a/src/third_party/v8/tools/clusterfuzz/testdata/sanity_check_output.txt b/src/third_party/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
new file mode 100644
index 0000000..ea6b8a9
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
@@ -0,0 +1,50 @@
+#
+# V8 correctness failure
+# V8 correctness configs: x64,ignition:x64,ignition_turbo
+# V8 correctness sources: sanity check failed
+# V8 correctness suppression: 
+#
+# CHECK
+#
+# Compared x64,ignition with x64,ignition_turbo
+#
+# Flags of x64,ignition:
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+# Flags of x64,ignition_turbo:
+--correctness-fuzzer-suppressions --expose-gc --fuzzing --allow-natives-for-differential-fuzzing --invoke-weak-callbacks --omit-quit --es-staging --wasm-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
+#
+# Difference:
+- unknown
++ not unknown
+#
+# Source file:
+name/to/file.js
+#
+### Start of configuration x64,ignition:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird error
+        ^
+3
+unknown
+
+
+### End of configuration x64,ignition
+#
+### Start of configuration x64,ignition_turbo:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+not unknown
+
+
+### End of configuration x64,ignition_turbo
+
diff --git a/src/third_party/v8/tools/clusterfuzz/toolchain/BUILD.gn b/src/third_party/v8/tools/clusterfuzz/toolchain/BUILD.gn
new file mode 100644
index 0000000..ddcb4e1
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/toolchain/BUILD.gn
@@ -0,0 +1,15 @@
+# Copyright 2019 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/toolchain/gcc_toolchain.gni")
+
+# Fake toolchain to enable build output for a pointer-compression-comparison
+# build in a nested build sub-directory. We toggle pointer compression when
+# this toolchain is used in v8/BUILD.gn.
+clang_toolchain("clang_x64_pointer_compression") {
+  toolchain_args = {
+    current_cpu = "x64"
+    current_os = "linux"
+  }
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_commands.py b/src/third_party/v8/tools/clusterfuzz/v8_commands.py
new file mode 100644
index 0000000..924acbe
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_commands.py
@@ -0,0 +1,155 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Fork from commands.py and output.py in v8 test driver.
+
+import os
+import signal
+import subprocess
+import sys
+from threading import Event, Timer
+
+import v8_fuzz_config
+
+PYTHON3 = sys.version_info >= (3, 0)
+
+# List of default flags passed to each d8 run.
+DEFAULT_FLAGS = [
+  '--correctness-fuzzer-suppressions',
+  '--expose-gc',
+  '--fuzzing',
+  '--allow-natives-for-differential-fuzzing',
+  '--invoke-weak-callbacks',
+  '--omit-quit',
+  '--es-staging',
+  '--wasm-staging',
+  '--no-wasm-async-compilation',
+  '--suppress-asm-messages',
+]
+
+BASE_PATH = os.path.dirname(os.path.abspath(__file__))
+
+# List of files passed to each d8 run before the testcase.
+DEFAULT_MOCK = os.path.join(BASE_PATH, 'v8_mock.js')
+
+# Suppressions on JavaScript level for known issues.
+JS_SUPPRESSIONS = os.path.join(BASE_PATH, 'v8_suppressions.js')
+
+# Config-specific mock files.
+ARCH_MOCKS = os.path.join(BASE_PATH, 'v8_mock_archs.js')
+WEBASSEMBLY_MOCKS = os.path.join(BASE_PATH, 'v8_mock_webassembly.js')
+
+
+def _startup_files(options):
+  """Default files and optional config-specific mock files."""
+  files = [DEFAULT_MOCK]
+  if not options.skip_suppressions:
+    files.append(JS_SUPPRESSIONS)
+  if options.first.arch != options.second.arch:
+    files.append(ARCH_MOCKS)
+  # Mock out WebAssembly when comparing with jitless mode.
+  if '--jitless' in options.first.flags + options.second.flags:
+    files.append(WEBASSEMBLY_MOCKS)
+  return files
+
+
+class BaseException(Exception):
+  """Used to abort the comparison workflow and print the given message."""
+  def __init__(self, message):
+    self.message = message
+
+
+class PassException(BaseException):
+  """Represents an early abort making the overall run pass."""
+  pass
+
+
+class FailException(BaseException):
+  """Represents an early abort making the overall run fail."""
+  pass
+
+
+class Command(object):
+  """Represents a configuration for running V8 multiple times with certain
+  flags and files.
+  """
+  def __init__(self, options, label, executable, config_flags):
+    self.label = label
+    self.executable = executable
+    self.config_flags = config_flags
+    self.common_flags =  DEFAULT_FLAGS[:]
+    self.common_flags.extend(['--random-seed', str(options.random_seed)])
+
+    self.files = _startup_files(options)
+
+  def run(self, testcase, timeout, verbose=False):
+    """Run the executable with a specific testcase."""
+    args = [self.executable] + self.flags + self.files + [testcase]
+    if verbose:
+      print('# Command line for %s comparison:' % self.label)
+      print(' '.join(args))
+    if self.executable.endswith('.py'):
+      # Wrap with python in tests.
+      args = [sys.executable] + args
+    return Execute(
+        args,
+        cwd=os.path.dirname(os.path.abspath(testcase)),
+        timeout=timeout,
+    )
+
+  @property
+  def flags(self):
+    return self.common_flags + self.config_flags
+
+
+class Output(object):
+  def __init__(self, exit_code, stdout, pid):
+    self.exit_code = exit_code
+    self.stdout = stdout
+    self.pid = pid
+
+  def HasCrashed(self):
+    return (self.exit_code < 0 and
+            self.exit_code != -signal.SIGABRT)
+
+
+def Execute(args, cwd, timeout=None):
+  popen_args = [c for c in args if c != ""]
+  kwargs = {}
+  if PYTHON3:
+    kwargs['encoding'] = 'utf-8'
+  try:
+    process = subprocess.Popen(
+      args=popen_args,
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+      cwd=cwd,
+      **kwargs
+    )
+  except Exception as e:
+    sys.stderr.write("Error executing: %s\n" % popen_args)
+    raise e
+
+  timeout_event = Event()
+
+  def kill_process():
+    timeout_event.set()
+    try:
+      process.kill()
+    except OSError:
+      sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+
+  timer = Timer(timeout, kill_process)
+  timer.start()
+  stdout, _ = process.communicate()
+  timer.cancel()
+
+  if timeout_event.is_set():
+    raise PassException('# V8 correctness - T-I-M-E-O-U-T')
+
+  return Output(
+      process.returncode,
+      stdout,
+      process.pid,
+  )
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_foozzie.py b/src/third_party/v8/tools/clusterfuzz/v8_foozzie.py
new file mode 100755
index 0000000..5581459
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_foozzie.py
@@ -0,0 +1,508 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+V8 correctness fuzzer launcher script.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import hashlib
+import itertools
+import json
+import os
+import random
+import re
+import sys
+import traceback
+
+from collections import namedtuple
+
+from v8_commands import Command, FailException, PassException
+import v8_suppressions
+
+PYTHON3 = sys.version_info >= (3, 0)
+
+CONFIGS = dict(
+  default=[],
+  ignition=[
+    '--turbo-filter=~',
+    '--noopt',
+    '--liftoff',
+    '--no-wasm-tier-up',
+  ],
+  ignition_asm=[
+    '--turbo-filter=~',
+    '--noopt',
+    '--validate-asm',
+    '--stress-validate-asm',
+  ],
+  ignition_eager=[
+    '--turbo-filter=~',
+    '--noopt',
+    '--no-lazy',
+    '--no-lazy-inner-functions',
+  ],
+  ignition_no_ic=[
+    '--turbo-filter=~',
+    '--noopt',
+    '--liftoff',
+    '--no-wasm-tier-up',
+    '--no-use-ic',
+    '--no-lazy-feedback-allocation',
+  ],
+  ignition_turbo=[],
+  ignition_turbo_no_ic=[
+    '--no-use-ic',
+  ],
+  ignition_turbo_opt=[
+    '--always-opt',
+    '--no-liftoff',
+  ],
+  ignition_turbo_opt_eager=[
+    '--always-opt',
+    '--no-lazy',
+    '--no-lazy-inner-functions',
+  ],
+  jitless=[
+    '--jitless',
+  ],
+  slow_path=[
+    '--force-slow-path',
+  ],
+  slow_path_opt=[
+    '--always-opt',
+    '--force-slow-path',
+  ],
+  trusted=[
+    '--no-untrusted-code-mitigations',
+  ],
+  trusted_opt=[
+    '--always-opt',
+    '--no-untrusted-code-mitigations',
+  ],
+)
+
+BASELINE_CONFIG = 'ignition'
+DEFAULT_CONFIG = 'ignition_turbo'
+DEFAULT_D8 = 'd8'
+
+# Return codes.
+RETURN_PASS = 0
+RETURN_FAIL = 2
+
+BASE_PATH = os.path.dirname(os.path.abspath(__file__))
+SANITY_CHECKS = os.path.join(BASE_PATH, 'v8_sanity_checks.js')
+
+# Timeout for one d8 run.
+SANITY_CHECK_TIMEOUT_SEC = 1
+TEST_TIMEOUT_SEC = 3
+
+SUPPORTED_ARCHS = ['ia32', 'x64', 'arm', 'arm64']
+
+# Output for suppressed failure case.
+FAILURE_HEADER_TEMPLATE = """#
+# V8 correctness failure
+# V8 correctness configs: %(configs)s
+# V8 correctness sources: %(source_key)s
+# V8 correctness suppression: %(suppression)s
+"""
+
+# Extended output for failure case. The 'CHECK' is for the minimizer.
+FAILURE_TEMPLATE = FAILURE_HEADER_TEMPLATE + """#
+# CHECK
+#
+# Compared %(first_config_label)s with %(second_config_label)s
+#
+# Flags of %(first_config_label)s:
+%(first_config_flags)s
+# Flags of %(second_config_label)s:
+%(second_config_flags)s
+#
+# Difference:
+%(difference)s%(source_file_text)s
+#
+### Start of configuration %(first_config_label)s:
+%(first_config_output)s
+### End of configuration %(first_config_label)s
+#
+### Start of configuration %(second_config_label)s:
+%(second_config_output)s
+### End of configuration %(second_config_label)s
+"""
+
+SOURCE_FILE_TEMPLATE = """
+#
+# Source file:
+%s"""
+
+
+FUZZ_TEST_RE = re.compile(r'.*fuzz(-\d+\.js)')
+SOURCE_RE = re.compile(r'print\("v8-foozzie source: (.*)"\);')
+
+# The number of hex digits used from the hash of the original source file path.
+# Keep the number small to avoid duplicate explosion.
+ORIGINAL_SOURCE_HASH_LENGTH = 3
+
+# Placeholder string if no original source file could be determined.
+ORIGINAL_SOURCE_DEFAULT = 'none'
+
+# Placeholder string for failures from crash tests. If a failure is found with
+# this signature, the matching sources should be moved to the mapping below.
+ORIGINAL_SOURCE_CRASHTESTS = 'placeholder for CrashTests'
+
+# Mapping from relative original source path (e.g. CrashTests/path/to/file.js)
+# to a string key. Map to the same key for duplicate issues. The key should
+# have more than 3 characters to not collide with other existing hashes.
+# If a symptom from a particular original source file is known to map to a
+# known failure, it can be added to this mapping. This should be done for all
+# failures from CrashTests, as those by default map to the placeholder above.
+KNOWN_FAILURES = {
+  # Foo.caller with asm.js: https://crbug.com/1042556
+  'CrashTests/4782147262545920/494.js': '.caller',
+  'CrashTests/5637524389167104/01457.js': '.caller',
+  'CrashTests/5703451898085376/02176.js': '.caller',
+  'CrashTests/4846282433495040/04342.js': '.caller',
+  'CrashTests/5712410200899584/04483.js': '.caller',
+  'v8/test/mjsunit/regress/regress-105.js': '.caller',
+  # Flaky issue that almost never repros.
+  'CrashTests/5694376231632896/1033966.js': 'flaky',
+}
+
+
+def infer_arch(d8):
+  """Infer the V8 architecture from the build configuration next to the
+  executable.
+  """
+  with open(os.path.join(os.path.dirname(d8), 'v8_build_config.json')) as f:
+    arch = json.load(f)['v8_current_cpu']
+  arch = 'ia32' if arch == 'x86' else arch
+  assert arch in SUPPORTED_ARCHS
+  return arch
+
+
+class ExecutionArgumentsConfig(object):
+  def __init__(self, label):
+    self.label = label
+
+  def add_arguments(self, parser, default_config):
+    def add_argument(flag_template, help_template, **kwargs):
+      parser.add_argument(
+          flag_template % self.label,
+          help=help_template % self.label,
+          **kwargs)
+
+    add_argument(
+        '--%s-config',
+        '%s configuration',
+        default=default_config)
+    add_argument(
+        '--%s-config-extra-flags',
+        'additional flags passed to the %s run',
+        action='append',
+        default=[])
+    add_argument(
+        '--%s-d8',
+        'optional path to %s d8 executable, '
+        'default: bundled in the directory of this script',
+        default=DEFAULT_D8)
+
+  def make_options(self, options, default_config=None):
+    def get(name):
+      return getattr(options, '%s_%s' % (self.label, name))
+
+    config = default_config or get('config')
+    assert config in CONFIGS
+
+    d8 = get('d8')
+    if not os.path.isabs(d8):
+      d8 = os.path.join(BASE_PATH, d8)
+    assert os.path.exists(d8)
+
+    flags = CONFIGS[config] + get('config_extra_flags')
+
+    RunOptions = namedtuple('RunOptions', ['arch', 'config', 'd8', 'flags'])
+    return RunOptions(infer_arch(d8), config, d8, flags)
+
+
+class ExecutionConfig(object):
+  def __init__(self, options, label):
+    self.options = options
+    self.label = label
+    self.arch = getattr(options, label).arch
+    self.config = getattr(options, label).config
+    d8 = getattr(options, label).d8
+    flags = getattr(options, label).flags
+    self.command = Command(options, label, d8, flags)
+
+  @property
+  def flags(self):
+    return self.command.flags
+
+
+def parse_args():
+  first_config_arguments = ExecutionArgumentsConfig('first')
+  second_config_arguments = ExecutionArgumentsConfig('second')
+
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+    '--random-seed', type=int, required=True,
+    help='random seed passed to both runs')
+  parser.add_argument(
+      '--skip-sanity-checks', default=False, action='store_true',
+      help='skip sanity checks for testing purposes')
+  parser.add_argument(
+      '--skip-suppressions', default=False, action='store_true',
+      help='skip suppressions to reproduce known issues')
+
+  # Add arguments for each run configuration.
+  first_config_arguments.add_arguments(parser, BASELINE_CONFIG)
+  second_config_arguments.add_arguments(parser, DEFAULT_CONFIG)
+
+  parser.add_argument('testcase', help='path to test case')
+  options = parser.parse_args()
+
+  # Ensure we have a test case.
+  assert (os.path.exists(options.testcase) and
+          os.path.isfile(options.testcase)), (
+      'Test case %s doesn\'t exist' % options.testcase)
+
+  options.first = first_config_arguments.make_options(options)
+  options.second = second_config_arguments.make_options(options)
+  options.default = second_config_arguments.make_options(
+      options, DEFAULT_CONFIG)
+
+  # Ensure we make a valid comparison.
+  if (options.first.d8 == options.second.d8 and
+      options.first.config == options.second.config):
+    parser.error('Need either executable or config difference.')
+
+  return options
+
+
+def get_meta_data(content):
+  """Extracts original-source-file paths from test case content."""
+  sources = []
+  for line in content.splitlines():
+    match = SOURCE_RE.match(line)
+    if match:
+      sources.append(match.group(1))
+  return {'sources': sources}
+
+
+def content_bailout(content, ignore_fun):
+  """Print failure state and return if ignore_fun matches content."""
+  bug = (ignore_fun(content) or '').strip()
+  if bug:
+    raise FailException(FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression=bug))
+
+
+def fail_bailout(output, ignore_by_output_fun):
+  """Print failure state and return if ignore_by_output_fun matches output."""
+  bug = (ignore_by_output_fun(output.stdout) or '').strip()
+  if bug:
+    raise FailException(FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression=bug))
+
+
+def format_difference(
+    source_key, first_config, second_config,
+    first_config_output, second_config_output, difference, source=None):
+  # The first three entries will be parsed by clusterfuzz. Format changes
+  # will require changes on the clusterfuzz side.
+  first_config_label = '%s,%s' % (first_config.arch, first_config.config)
+  second_config_label = '%s,%s' % (second_config.arch, second_config.config)
+  source_file_text = SOURCE_FILE_TEMPLATE % source if source else ''
+
+  if PYTHON3:
+    first_stdout = first_config_output.stdout
+    second_stdout = second_config_output.stdout
+  else:
+    first_stdout = first_config_output.stdout.decode('utf-8', 'replace')
+    second_stdout = second_config_output.stdout.decode('utf-8', 'replace')
+    difference = difference.decode('utf-8', 'replace')
+
+  text = (FAILURE_TEMPLATE % dict(
+      configs='%s:%s' % (first_config_label, second_config_label),
+      source_file_text=source_file_text,
+      source_key=source_key,
+      suppression='', # We can't tie bugs to differences.
+      first_config_label=first_config_label,
+      second_config_label=second_config_label,
+      first_config_flags=' '.join(first_config.flags),
+      second_config_flags=' '.join(second_config.flags),
+      first_config_output=first_stdout,
+      second_config_output=second_stdout,
+      source=source,
+      difference=difference,
+  ))
+  if PYTHON3:
+    return text
+  else:
+    return text.encode('utf-8', 'replace')
+
+
+def cluster_failures(source, known_failures=None):
+  """Returns a string key for clustering duplicate failures.
+
+  Args:
+    source: The original source path where the failure happened.
+    known_failures: Mapping from original source path to failure key.
+  """
+  known_failures = known_failures or KNOWN_FAILURES
+  # No source known. Typical for manually uploaded issues. This
+  # requires also manual issue creation.
+  if not source:
+    return ORIGINAL_SOURCE_DEFAULT
+  # Source is known to produce a particular failure.
+  if source in known_failures:
+    return known_failures[source]
+  # Subsume all other sources from CrashTests under one key. Otherwise
+  # failures lead to new crash tests which in turn lead to new failures.
+  if source.startswith('CrashTests'):
+    return ORIGINAL_SOURCE_CRASHTESTS
+
+  # We map all remaining failures to a short hash of the original source.
+  long_key = hashlib.sha1(source.encode('utf-8')).hexdigest()
+  return long_key[:ORIGINAL_SOURCE_HASH_LENGTH]
+
+
+def run_comparisons(suppress, execution_configs, test_case, timeout,
+                    verbose=True, ignore_crashes=True, source_key=None):
+  """Runs different configurations and bails out on output difference.
+
+  Args:
+    suppress: The helper object for textual suppressions.
+    execution_configs: Two or more configurations to run. The first one will be
+        used as baseline to compare all others to.
+    test_case: The test case to run.
+    timeout: Timeout in seconds for one run.
+    verbose: Prints the executed commands.
+    ignore_crashes: Typically we ignore crashes during fuzzing as they are
+        frequent. However, when running sanity checks we should not crash
+        and immediately flag crashes as a failure.
+    source_key: A fixed source key. If not given, it will be inferred from the
+        output.
+  """
+  run_test_case = lambda config: config.command.run(
+      test_case, timeout=timeout, verbose=verbose)
+
+  # Run the baseline configuration.
+  baseline_config = execution_configs[0]
+  baseline_output = run_test_case(baseline_config)
+  has_crashed = baseline_output.HasCrashed()
+
+  # Iterate over the remaining configurations, run and compare.
+  for comparison_config in execution_configs[1:]:
+    comparison_output = run_test_case(comparison_config)
+    has_crashed = has_crashed or comparison_output.HasCrashed()
+    difference, source = suppress.diff(baseline_output, comparison_output)
+
+    if difference:
+      # Only bail out due to suppressed output if there was a difference. If a
+      # suppression doesn't show up anymore in the statistics, we might want to
+      # remove it.
+      fail_bailout(baseline_output, suppress.ignore_by_output)
+      fail_bailout(comparison_output, suppress.ignore_by_output)
+
+      source_key = source_key or cluster_failures(source)
+      raise FailException(format_difference(
+          source_key, baseline_config, comparison_config,
+          baseline_output, comparison_output, difference, source))
+
+  if has_crashed:
+    if ignore_crashes:
+      # Show if a crash has happened in one of the runs and no difference was
+      # detected. This is only for the statistics during experiments.
+      raise PassException('# V8 correctness - C-R-A-S-H')
+    else:
+      # Subsume unexpected crashes (e.g. during sanity checks) with one failure
+      # state.
+      raise FailException(FAILURE_HEADER_TEMPLATE % dict(
+          configs='', source_key='', suppression='unexpected crash'))
+
+
+def main():
+  options = parse_args()
+  suppress = v8_suppressions.get_suppression(options.skip_suppressions)
+
+  # Static bailout based on test case content or metadata.
+  kwargs = {}
+  if PYTHON3:
+    kwargs['encoding'] = 'utf-8'
+  with open(options.testcase, 'r', **kwargs) as f:
+    content = f.read()
+  content_bailout(get_meta_data(content), suppress.ignore_by_metadata)
+  content_bailout(content, suppress.ignore_by_content)
+
+  # Prepare the baseline, default and a secondary configuration to compare to.
+  # The baseline (turbofan) takes precedence as many of the secondary configs
+  # are based on the turbofan config with additional parameters.
+  execution_configs = [
+    ExecutionConfig(options, 'first'),
+    ExecutionConfig(options, 'default'),
+    ExecutionConfig(options, 'second'),
+  ]
+
+  # First, run some fixed smoke tests in all configs to ensure nothing
+  # is fundamentally wrong, in order to prevent bug flooding.
+  if not options.skip_sanity_checks:
+    run_comparisons(
+        suppress, execution_configs,
+        test_case=SANITY_CHECKS,
+        timeout=SANITY_CHECK_TIMEOUT_SEC,
+        verbose=False,
+        # Don't accept crashes during sanity checks. A crash would hint at
+        # a flag that might be incompatible or a broken test file.
+        ignore_crashes=False,
+        # Special source key for sanity checks so that clusterfuzz dedupes all
+        # cases on this in case it's hit.
+        source_key = 'sanity check failed',
+    )
+
+  # Second, run all configs against the fuzz test case.
+  run_comparisons(
+      suppress, execution_configs,
+      test_case=options.testcase,
+      timeout=TEST_TIMEOUT_SEC,
+  )
+
+  # TODO(machenbach): Figure out if we could also return a bug in case
+  # there's no difference, but one of the line suppressions has matched -
+  # and without the match there would be a difference.
+  print('# V8 correctness - pass')
+  return RETURN_PASS
+
+
+if __name__ == "__main__":
+  try:
+    result = main()
+  except FailException as e:
+    print(e.message)
+    result = RETURN_FAIL
+  except PassException as e:
+    print(e.message)
+    result = RETURN_PASS
+  except SystemExit:
+    # Make sure clusterfuzz reports internal errors and wrong usage.
+    # Use one label for all internal and usage errors.
+    print(FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression='wrong_usage'))
+    result = RETURN_FAIL
+  except MemoryError:
+    # Running out of memory happens occasionally but is not actionable.
+    print('# V8 correctness - pass')
+    result = RETURN_PASS
+  except Exception as e:
+    print(FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression='internal_error'))
+    print('# Internal error: %s' % e)
+    traceback.print_exc(file=sys.stdout)
+    result = RETURN_FAIL
+
+  sys.exit(result)
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js b/src/third_party/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
new file mode 100644
index 0000000..4a8ed35
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
@@ -0,0 +1,96 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Extensions to mjsunit and other test harnesses added between harness and
+// fuzzing code.
+
+try {
+  // Scope for utility functions.
+  (function() {
+    // Same as in mjsunit.js.
+    function classOf(object) {
+      // Argument must not be null or undefined.
+      var string = Object.prototype.toString.call(object);
+      // String has format [object <ClassName>].
+      return string.substring(8, string.length - 1);
+    }
+
+    // Override prettyPrinted with a version that also recusively prints object
+    // properties (with a depth of 3).
+    let origPrettyPrinted = prettyPrinted;
+    prettyPrinted = function prettyPrinted(value, depth=3) {
+      if (depth == 0) {
+        return "...";
+      }
+      switch (typeof value) {
+        case "object":
+          if (value === null) return "null";
+          var objectClass = classOf(value);
+          switch (objectClass) {
+            case "Object":
+              var name = value.constructor.name;
+              if (!name)
+                name = "Object";
+              return name + "{" + Object.keys(value).map(function(key, index) {
+                return (
+                    prettyPrinted(key, depth - 1) +
+                    ": " +
+                    prettyPrinted(value[key], depth - 1)
+                );
+              }).join(",")  + "}";
+          }
+      }
+      // Fall through to original version for all other types.
+      return origPrettyPrinted(value);
+    }
+
+    // We're not interested in stack traces.
+    MjsUnitAssertionError = function MjsUnitAssertionError(message) {}
+    MjsUnitAssertionError.prototype.toString = function () { return ""; };
+
+    // Do more printing in assertions for more correctness coverage.
+    failWithMessage = function failWithMessage(message) {
+      print(prettyPrinted(message))
+    }
+
+    assertSame = function assertSame(expected, found, name_opt) {
+      print(prettyPrinted(found));
+    }
+
+    assertNotSame = function assertNotSame(expected, found, name_opt) {
+      print(prettyPrinted(found));
+    }
+
+    assertEquals = function assertEquals(expected, found, name_opt) {
+      print(prettyPrinted(found));
+    }
+
+    assertNotEquals = function assertNotEquals(expected, found, name_opt) {
+      print(prettyPrinted(found));
+    }
+
+    assertNull = function assertNull(value, name_opt) {
+      print(prettyPrinted(value));
+    }
+
+    assertNotNull = function assertNotNull(value, name_opt) {
+      print(prettyPrinted(value));
+    }
+
+    // Suppress optimization status as it leads to false positives.
+    assertUnoptimized = function assertUnoptimized() {}
+
+    assertOptimized = function assertOptimized() {}
+
+    isNeverOptimize = function isNeverOptimize() {}
+
+    isAlwaysOptimize = function isAlwaysOptimize() {}
+
+    isInterpreted = function isInterpreted() {}
+
+    isOptimized = function isOptimized() {}
+
+    isTurboFanned = function isTurboFanned() {}
+  })();
+} catch(e) { }
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_foozzie_test.py b/src/third_party/v8/tools/clusterfuzz/v8_foozzie_test.py
new file mode 100755
index 0000000..8bb568c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -0,0 +1,345 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import random
+import subprocess
+import sys
+import unittest
+
+import v8_commands
+import v8_foozzie
+import v8_fuzz_config
+import v8_suppressions
+
+try:
+  basestring
+except NameError:
+  basestring = str
+
+PYTHON3 = sys.version_info >= (3, 0)
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+FOOZZIE = os.path.join(BASE_DIR, 'v8_foozzie.py')
+TEST_DATA = os.path.join(BASE_DIR, 'testdata')
+
+KNOWN_BUILDS = [
+  'd8',
+  'clang_x86/d8',
+  'clang_x86_v8_arm/d8',
+  'clang_x64_v8_arm64/d8',
+  'clang_x64_pointer_compression/d8',
+]
+
+
+class ConfigTest(unittest.TestCase):
+  def testExperiments(self):
+    """Test integrity of probabilities and configs."""
+    CONFIGS = v8_foozzie.CONFIGS
+    EXPERIMENTS = v8_fuzz_config.FOOZZIE_EXPERIMENTS
+    FLAGS = v8_fuzz_config.ADDITIONAL_FLAGS
+    # Probabilities add up to 100%.
+    first_is_int = lambda x: type(x[0]) == int
+    assert all(map(first_is_int, EXPERIMENTS))
+    assert sum(x[0] for x in EXPERIMENTS) == 100
+    # Configs used in experiments are defined.
+    assert all(map(lambda x: x[1] in CONFIGS, EXPERIMENTS))
+    assert all(map(lambda x: x[2] in CONFIGS, EXPERIMENTS))
+    # The last config item points to a known build configuration.
+    assert all(map(lambda x: x[3] in KNOWN_BUILDS, EXPERIMENTS))
+    # All flags have a probability.
+    first_is_float = lambda x: type(x[0]) == float
+    assert all(map(first_is_float, FLAGS))
+    first_between_0_and_1 = lambda x: x[0] > 0 and x[0] < 1
+    assert all(map(first_between_0_and_1, FLAGS))
+    # Test consistent flags.
+    second_is_string = lambda x: isinstance(x[1], basestring)
+    assert all(map(second_is_string, FLAGS))
+    # We allow spaces to separate more flags. We don't allow spaces in the flag
+    # value.
+    is_flag = lambda x: x.startswith('--')
+    all_parts_are_flags = lambda x: all(map(is_flag, x[1].split()))
+    assert all(map(all_parts_are_flags, FLAGS))
+
+  def testConfig(self):
+    """Smoke test how to choose experiments."""
+    config = v8_fuzz_config.Config('foo', random.Random(42))
+    experiments = [
+      [25, 'ignition', 'jitless', 'd8'],
+      [75, 'ignition', 'ignition', 'clang_x86/d8'],
+    ]
+    flags = [
+      [0.1, '--flag'],
+      [0.3, '--baz'],
+      [0.3, '--foo --bar'],
+    ]
+    self.assertEqual(
+        [
+          '--first-config=ignition',
+          '--second-config=jitless',
+          '--second-d8=d8',
+          '--second-config-extra-flags=--baz',
+          '--second-config-extra-flags=--foo',
+          '--second-config-extra-flags=--bar',
+        ],
+        config.choose_foozzie_flags(experiments, flags),
+    )
+    self.assertEqual(
+        [
+          '--first-config=ignition',
+          '--second-config=jitless',
+          '--second-d8=d8',
+        ],
+        config.choose_foozzie_flags(experiments, flags),
+    )
+
+
+class UnitTest(unittest.TestCase):
+  def testCluster(self):
+    crash_test_example_path = 'CrashTests/path/to/file.js'
+    self.assertEqual(
+        v8_foozzie.ORIGINAL_SOURCE_DEFAULT,
+        v8_foozzie.cluster_failures(''))
+    self.assertEqual(
+        v8_foozzie.ORIGINAL_SOURCE_CRASHTESTS,
+        v8_foozzie.cluster_failures(crash_test_example_path))
+    self.assertEqual(
+        '_o_O_',
+        v8_foozzie.cluster_failures(
+            crash_test_example_path,
+            known_failures={crash_test_example_path: '_o_O_'}))
+    self.assertEqual(
+        '980',
+        v8_foozzie.cluster_failures('v8/test/mjsunit/apply.js'))
+
+  def testDiff(self):
+    def diff_fun(one, two, skip=False):
+      suppress = v8_suppressions.get_suppression(skip)
+      return suppress.diff_lines(one.splitlines(), two.splitlines())
+
+    one = ''
+    two = ''
+    diff = None, None
+    self.assertEqual(diff, diff_fun(one, two))
+
+    one = 'a \n  b\nc();'
+    two = 'a \n  b\nc();'
+    diff = None, None
+    self.assertEqual(diff, diff_fun(one, two))
+
+    # Ignore line before caret and caret position.
+    one = """
+undefined
+weird stuff
+      ^
+somefile.js: TypeError: suppressed message
+  undefined
+"""
+    two = """
+undefined
+other weird stuff
+            ^
+somefile.js: TypeError: suppressed message
+  undefined
+"""
+    diff = None, None
+    self.assertEqual(diff, diff_fun(one, two))
+
+    one = """
+Still equal
+Extra line
+"""
+    two = """
+Still equal
+"""
+    diff = '- Extra line', None
+    self.assertEqual(diff, diff_fun(one, two))
+
+    one = """
+Still equal
+"""
+    two = """
+Still equal
+Extra line
+"""
+    diff = '+ Extra line', None
+    self.assertEqual(diff, diff_fun(one, two))
+
+    one = """
+undefined
+somefile.js: TypeError: undefined is not a constructor
+"""
+    two = """
+undefined
+otherfile.js: TypeError: undefined is not a constructor
+"""
+    diff = """- somefile.js: TypeError: undefined is not a constructor
++ otherfile.js: TypeError: undefined is not a constructor""", None
+    self.assertEqual(diff, diff_fun(one, two))
+
+    # Test that skipping suppressions works.
+    one = """
+v8-foozzie source: foo
+weird stuff
+      ^
+"""
+    two = """
+v8-foozzie source: foo
+other weird stuff
+            ^
+"""
+    self.assertEqual((None, 'foo'), diff_fun(one, two))
+    diff = ('-       ^\n+             ^', 'foo')
+    self.assertEqual(diff, diff_fun(one, two, skip=True))
+
+  def testOutputCapping(self):
+    def output(stdout, is_crash):
+      exit_code = -1 if is_crash else 0
+      return v8_commands.Output(exit_code=exit_code, stdout=stdout, pid=0)
+
+    def check(stdout1, stdout2, is_crash1, is_crash2, capped_lines1,
+              capped_lines2):
+      output1 = output(stdout1, is_crash1)
+      output2 = output(stdout2, is_crash2)
+      self.assertEqual(
+          (capped_lines1, capped_lines2),
+          v8_suppressions.get_output_capped(output1, output2))
+
+    # No capping, already equal.
+    check('1\n2', '1\n2', True, True, '1\n2', '1\n2')
+    # No crash, no capping.
+    check('1\n2', '1\n2\n3', False, False, '1\n2', '1\n2\n3')
+    check('1\n2\n3', '1\n2', False, False, '1\n2\n3', '1\n2')
+    # Cap smallest if all runs crash.
+    check('1\n2', '1\n2\n3', True, True, '1\n2', '1\n2')
+    check('1\n2\n3', '1\n2', True, True, '1\n2', '1\n2')
+    check('1\n2', '1\n23', True, True, '1\n2', '1\n2')
+    check('1\n23', '1\n2', True, True, '1\n2', '1\n2')
+    # Cap the non-crashy run.
+    check('1\n2\n3', '1\n2', False, True, '1\n2', '1\n2')
+    check('1\n2', '1\n2\n3', True, False, '1\n2', '1\n2')
+    check('1\n23', '1\n2', False, True, '1\n2', '1\n2')
+    check('1\n2', '1\n23', True, False, '1\n2', '1\n2')
+    # The crashy run has more output.
+    check('1\n2\n3', '1\n2', True, False, '1\n2\n3', '1\n2')
+    check('1\n2', '1\n2\n3', False, True, '1\n2', '1\n2\n3')
+    check('1\n23', '1\n2', True, False, '1\n23', '1\n2')
+    check('1\n2', '1\n23', False, True, '1\n2', '1\n23')
+    # Keep output difference when capping.
+    check('1\n2', '3\n4\n5', True, True, '1\n2', '3\n4')
+    check('1\n2\n3', '4\n5', True, True, '1\n2', '4\n5')
+    check('12', '345', True, True, '12', '34')
+    check('123', '45', True, True, '12', '45')
+
+
+def cut_verbose_output(stdout, n_comp):
+  # This removes the first lines containing d8 commands of `n_comp` comparison
+  # runs.
+  return '\n'.join(stdout.split('\n')[n_comp * 2:])
+
+
+def run_foozzie(second_d8_dir, *extra_flags, **kwargs):
+  second_config = 'ignition_turbo'
+  if 'second_config' in kwargs:
+    second_config = 'jitless'
+  kwargs = {}
+  if PYTHON3:
+    kwargs['text'] = True
+  return subprocess.check_output([
+    sys.executable, FOOZZIE,
+    '--random-seed', '12345',
+    '--first-d8', os.path.join(TEST_DATA, 'baseline', 'd8.py'),
+    '--second-d8', os.path.join(TEST_DATA, second_d8_dir, 'd8.py'),
+    '--first-config', 'ignition',
+    '--second-config', second_config,
+    os.path.join(TEST_DATA, 'fuzz-123.js'),
+  ] + list(extra_flags), **kwargs)
+
+class SystemTest(unittest.TestCase):
+  """This tests the whole correctness-fuzzing harness with fake build
+  artifacts.
+
+  Overview of fakes:
+    baseline: Example foozzie output including a syntax error.
+    build1: Difference to baseline is a stack trace differece expected to
+            be suppressed.
+    build2: Difference to baseline is a non-suppressed output difference
+            causing the script to fail.
+    build3: As build1 but with an architecture difference as well.
+  """
+  def testSyntaxErrorDiffPass(self):
+    stdout = run_foozzie('build1', '--skip-sanity-checks')
+    self.assertEqual('# V8 correctness - pass\n',
+                     cut_verbose_output(stdout, 3))
+    # Default comparison includes suppressions.
+    self.assertIn('v8_suppressions.js', stdout)
+    # Default comparison doesn't include any specific mock files.
+    self.assertNotIn('v8_mock_archs.js', stdout)
+    self.assertNotIn('v8_mock_webassembly.js', stdout)
+
+  def testDifferentOutputFail(self):
+    with open(os.path.join(TEST_DATA, 'failure_output.txt')) as f:
+      expected_output = f.read()
+    with self.assertRaises(subprocess.CalledProcessError) as ctx:
+      run_foozzie('build2', '--skip-sanity-checks',
+                  '--first-config-extra-flags=--flag1',
+                  '--first-config-extra-flags=--flag2=0',
+                  '--second-config-extra-flags=--flag3')
+    e = ctx.exception
+    self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
+    self.assertEqual(expected_output, cut_verbose_output(e.output, 2))
+
+  def testSanityCheck(self):
+    with open(os.path.join(TEST_DATA, 'sanity_check_output.txt')) as f:
+      expected_output = f.read()
+    with self.assertRaises(subprocess.CalledProcessError) as ctx:
+      run_foozzie('build2')
+    e = ctx.exception
+    self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
+    self.assertEqual(expected_output, e.output)
+
+  def testDifferentArch(self):
+    """Test that the architecture-specific mocks are passed to both runs when
+    we use executables with different architectures.
+    """
+    # Build 3 simulates x86, while the baseline is x64.
+    stdout = run_foozzie('build3', '--skip-sanity-checks')
+    lines = stdout.split('\n')
+    # TODO(machenbach): Don't depend on the command-lines being printed in
+    # particular lines.
+    self.assertIn('v8_mock_archs.js', lines[1])
+    self.assertIn('v8_mock_archs.js', lines[3])
+
+  def testJitless(self):
+    """Test that webassembly is mocked out when comparing with jitless."""
+    stdout = run_foozzie(
+        'build1', '--skip-sanity-checks', second_config='jitless')
+    lines = stdout.split('\n')
+    # TODO(machenbach): Don't depend on the command-lines being printed in
+    # particular lines.
+    self.assertIn('v8_mock_webassembly.js', lines[1])
+    self.assertIn('v8_mock_webassembly.js', lines[3])
+
+  def testSkipSuppressions(self):
+    """Test that the suppressions file is not passed when skipping
+    suppressions.
+    """
+    # Compare baseline with baseline. This passes as there is no difference.
+    stdout = run_foozzie(
+        'baseline', '--skip-sanity-checks', '--skip-suppressions')
+    self.assertNotIn('v8_suppressions.js', stdout)
+
+    # Compare with a build that usually suppresses a difference. Now we fail
+    # since we skip suppressions.
+    with self.assertRaises(subprocess.CalledProcessError) as ctx:
+      run_foozzie(
+          'build1', '--skip-sanity-checks', '--skip-suppressions')
+    e = ctx.exception
+    self.assertEqual(v8_foozzie.RETURN_FAIL, e.returncode)
+    self.assertNotIn('v8_suppressions.js', e.output)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_fuzz_config.py b/src/third_party/v8/tools/clusterfuzz/v8_fuzz_config.py
new file mode 100644
index 0000000..99439a9
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -0,0 +1,64 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import os
+import random
+
+THIS_DIR = os.path.dirname(os.path.abspath(__file__))
+
+# List of configuration experiments for correctness fuzzing.
+# List of <probability>, <1st config name>, <2nd config name>, <2nd d8>.
+# Probabilities must add up to 100.
+with open(os.path.join(THIS_DIR, 'v8_fuzz_experiments.json')) as f:
+  FOOZZIE_EXPERIMENTS = json.load(f)
+
+# Additional flag experiments. List of tuples like
+# (<likelihood to use flags in [0,1)>, <flag>).
+with open(os.path.join(THIS_DIR, 'v8_fuzz_flags.json')) as f:
+  ADDITIONAL_FLAGS = json.load(f)
+
+
+class Config(object):
+  def __init__(self, name, rng=None):
+    """
+    Args:
+      name: Name of the used fuzzer.
+      rng: Random number generator for generating experiments.
+      random_seed: Random-seed used for d8 throughout one fuzz session.
+    """
+    self.name = name
+    self.rng = rng or random.Random()
+
+  def choose_foozzie_flags(self, foozzie_experiments=None, additional_flags=None):
+    """Randomly chooses a configuration from FOOZZIE_EXPERIMENTS.
+
+    Args:
+      foozzie_experiments: Override experiment config for testing.
+      additional_flags: Override additional flags for testing.
+
+    Returns: List of flags to pass to v8_foozzie.py fuzz harness.
+    """
+    foozzie_experiments = foozzie_experiments or FOOZZIE_EXPERIMENTS
+    additional_flags = additional_flags or ADDITIONAL_FLAGS
+
+    # Add additional flags to second config based on experiment percentages.
+    extra_flags = []
+    for p, flags in additional_flags:
+      if self.rng.random() < p:
+        for flag in flags.split():
+          extra_flags.append('--second-config-extra-flags=%s' % flag)
+
+    # Calculate flags determining the experiment.
+    acc = 0
+    threshold = self.rng.random() * 100
+    for prob, first_config, second_config, second_d8 in foozzie_experiments:
+      acc += prob
+      if acc > threshold:
+        return [
+          '--first-config=' + first_config,
+          '--second-config=' + second_config,
+          '--second-d8=' + second_d8,
+        ] + extra_flags
+    assert False
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_fuzz_experiments.json b/src/third_party/v8/tools/clusterfuzz/v8_fuzz_experiments.json
new file mode 100644
index 0000000..8c6baa2
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_fuzz_experiments.json
@@ -0,0 +1,15 @@
+[
+  [15, "ignition", "jitless", "d8"],
+  [10, "ignition", "slow_path", "d8"],
+  [10, "ignition_no_ic", "slow_path", "d8"],
+  [5, "ignition", "slow_path_opt", "d8"],
+  [5, "ignition", "ignition_turbo_no_ic", "d8"],
+  [20, "ignition", "ignition_turbo_opt", "d8"],
+  [5, "ignition_no_ic", "ignition_turbo_opt", "d8"],
+  [5, "ignition_turbo", "ignition_turbo_opt", "clang_x64_pointer_compression/d8"],
+  [5, "ignition", "ignition", "clang_x86/d8"],
+  [5, "ignition", "ignition_turbo_opt", "clang_x86/d8"],
+  [5, "ignition", "slow_path", "clang_x86/d8"],
+  [5, "ignition", "ignition_turbo_opt", "clang_x64_v8_arm64/d8"],
+  [5, "ignition", "ignition_turbo_opt", "clang_x86_v8_arm/d8"]
+]
\ No newline at end of file
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_fuzz_flags.json b/src/third_party/v8/tools/clusterfuzz/v8_fuzz_flags.json
new file mode 100644
index 0000000..71ab2c2
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_fuzz_flags.json
@@ -0,0 +1,34 @@
+[
+  [0.05, "--stress-compaction-random"],
+  [0.05, "--stress-delay-tasks"],
+  [0.05, "--stress-marking=100"],
+  [0.05, "--stress-scavenge=100"],
+  [0.05, "--random-gc-interval=2000"],
+  [0.2, "--noanalyze-environment-liveness"],
+  [0.01, "--thread-pool-size=1"],
+  [0.01, "--thread-pool-size=2"],
+  [0.01, "--thread-pool-size=4"],
+  [0.01, "--thread-pool-size=8"],
+  [0.1, "--interrupt-budget=1000"],
+  [0.25, "--future"],
+  [0.2, "--no-regexp-tier-up"],
+  [0.1, "--regexp-interpret-all"],
+  [0.1, "--regexp-tier-up-ticks=10"],
+  [0.1, "--regexp-tier-up-ticks=100"],
+  [0.1, "--turbo-instruction-scheduling"],
+  [0.1, "--turbo-stress-instruction-scheduling"],
+  [0.1, "--no-enable-sse3"],
+  [0.1, "--no-enable-ssse3"],
+  [0.1, "--no-enable-sse4_1"],
+  [0.1, "--no-enable-sse4_2"],
+  [0.1, "--no-enable-sahf"],
+  [0.1, "--no-enable-avx"],
+  [0.1, "--no-enable-fma3"],
+  [0.1, "--no-enable-bmi1"],
+  [0.1, "--no-enable-bmi2"],
+  [0.1, "--no-enable-lzcnt"],
+  [0.1, "--no-enable-popcnt"],
+  [0.25, "--no-lazy-feedback-allocation"],
+  [0.1, "--no-lazy-feedback-allocation --interrupt-budget=100"],
+  [0.05, "--budget-for-feedback-vector-allocation=0"]
+]
\ No newline at end of file
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_mock.js b/src/third_party/v8/tools/clusterfuzz/v8_mock.js
new file mode 100644
index 0000000..be7a40b
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_mock.js
@@ -0,0 +1,206 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is intended for permanent JS behavior changes for mocking out
+// non-deterministic behavior. For temporary suppressions, please refer to
+// v8_suppressions.js.
+// This file is loaded before each correctness test cases and won't get
+// minimized.
+
+
+// This will be overridden in the test cases. The override can be minimized.
+var prettyPrinted = function prettyPrinted(msg) { return msg; };
+
+// Mock Math.random.
+(function() {
+  let index = 1;
+  Math.random = function() {
+      const x = Math.sin(index++) * 10000;
+      return x - Math.floor(x);
+  }
+})();
+
+// Mock Math.pow. Work around an optimization for -0.5.
+(function() {
+  const origMathPow = Math.pow;
+  Math.pow = function(a, b) {
+    if (b === -0.5) {
+      return 0;
+    } else {
+      return origMathPow(a, b);
+    }
+  }
+})();
+
+
+// Mock Date.
+(function() {
+  let index = 0;
+  let mockDate = 1477662728696;
+  const mockDateNow = function() {
+    index = (index + 1) % 10;
+    mockDate = mockDate + index + 1;
+    return mockDate;
+  }
+
+  const origDate = Date;
+  const construct = Reflect.construct;
+  const constructDate = function(args) {
+    let result;
+    if (args.length) {
+      result = construct(origDate, args);
+    } else {
+      result = new origDate(mockDateNow());
+    }
+    result.constructor = function(...args) { return constructDate(args); }
+    Object.defineProperty(
+        result, "constructor", { configurable: false, writable: false });
+    return result;
+  }
+
+  origDate.prototype.constructor = function(...args) {
+    return constructDate(args);
+  };
+
+  var handler = {
+    apply: function(target, thisArg, args) {
+      return constructDate(args);
+    },
+    construct: function(target, args, newTarget) {
+      return constructDate(args);
+    },
+    get: function(target, property, receiver) {
+      if (property == "now") {
+        return mockDateNow;
+      }
+      if (property == "prototype") {
+        return origDate.prototype;
+      }
+    },
+  }
+
+  Date = new Proxy(Date, handler);
+})();
+
+// Mock performance methods.
+performance.now = function() { return 1.2; };
+performance.measureMemory = function() { return []; };
+
+// Mock readline so that test cases don't hang.
+readline = function() { return "foo"; };
+
+// Mock stack traces.
+Error.prepareStackTrace = function(error, structuredStackTrace) {
+  return "";
+};
+Object.defineProperty(
+    Error, 'prepareStackTrace', { configurable: false, writable: false });
+
+// Mock buffer access in float typed arrays because of varying NaN patterns.
+(function() {
+  const origIsNaN = isNaN;
+  const deNaNify = function(value) { return origIsNaN(value) ? 1 : value; };
+  const mock = function(type) {
+
+    // Remove NaN values from parameters to "set" function.
+    const set = type.prototype.set;
+    type.prototype.set = function(array, offset) {
+      if (Array.isArray(array)) {
+        array = array.map(deNaNify);
+      }
+      set.apply(this, [array, offset]);
+    };
+
+    const handler = {
+      // Remove NaN values from parameters to constructor.
+      construct: function(target, args) {
+        for (let i = 0; i < args.length; i++) {
+          if (args[i] != null &&
+              typeof args[i][Symbol.iterator] === 'function') {
+            // Consume iterators.
+            args[i] = Array.from(args[i]);
+          }
+          if (Array.isArray(args[i])) {
+            args[i] = args[i].map(deNaNify);
+          }
+        }
+
+        const obj = new (
+            Function.prototype.bind.call(type, null, ...args));
+        return new Proxy(obj, {
+          get: function(x, prop) {
+            if (typeof x[prop] == "function")
+              return x[prop].bind(obj);
+            return x[prop];
+          },
+          // Remove NaN values that get assigned.
+          set: function(target, prop, value, receiver) {
+            target[prop] = deNaNify(value);
+            return value;
+          }
+        });
+      },
+    };
+    return new Proxy(type, handler);
+  }
+
+  Float32Array = mock(Float32Array);
+  Float64Array = mock(Float64Array);
+})();
+
+// Mock buffer access via DataViews because of varying NaN patterns.
+(function() {
+  const origIsNaN = isNaN;
+  const deNaNify = function(value) { return origIsNaN(value) ? 1 : value; };
+  const origSetFloat32 = DataView.prototype.setFloat32;
+  DataView.prototype.setFloat32 = function(offset, value, ...rest) {
+    origSetFloat32.call(this, offset, deNaNify(value), ...rest);
+  };
+  const origSetFloat64 = DataView.prototype.setFloat64;
+  DataView.prototype.setFloat64 = function(offset, value, ...rest) {
+    origSetFloat64.call(this, offset, deNaNify(value), ...rest);
+  };
+})();
+
+// Mock Worker.
+(function() {
+  let index = 0;
+  // TODO(machenbach): Randomize this for each test case, but keep stable
+  // during comparison. Also data and random above.
+  const workerMessages = [
+    undefined, 0, -1, "", "foo", 42, [], {}, [0], {"x": 0}
+  ];
+  Worker = function(code){
+    try {
+      print(prettyPrinted(eval(code)));
+    } catch(e) {
+      print(prettyPrinted(e));
+    }
+    this.getMessage = function(){
+      index = (index + 1) % 10;
+      return workerMessages[index];
+    }
+    this.postMessage = function(msg){
+      print(prettyPrinted(msg));
+    }
+  };
+})();
+
+// Mock Realm.
+Realm.eval = function(realm, code) { return eval(code) };
+
+// Mock the nondeterministic parts of WeakRef and FinalizationRegistry.
+WeakRef.prototype.deref = function() { };
+FinalizationRegistry = function(callback) { };
+FinalizationRegistry.prototype.register = function(target, holdings) { };
+FinalizationRegistry.prototype.unregister = function(unregisterToken) { };
+FinalizationRegistry.prototype.cleanupSome = function() { };
+FinalizationRegistry.prototype[Symbol.toStringTag] = "FinalizationRegistry";
+
+// Mock the nondeterministic Atomics.waitAsync.
+Atomics.waitAsync = function() {
+  // Return a mock "Promise" whose "then" function will call the callback
+  // immediately.
+  return {'value': {'then': function (f) { f(); }}};
+}
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_mock_archs.js b/src/third_party/v8/tools/clusterfuzz/v8_mock_archs.js
new file mode 100644
index 0000000..3482e8c
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_mock_archs.js
@@ -0,0 +1,91 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is intended for permanent JS behavior changes for mocking out
+// non-deterministic behavior. For temporary suppressions, please refer to
+// v8_suppressions.js.
+// This mocks only architecture specific differences. Refer to v8_mocks.js
+// for the general case.
+// This file is loaded before each correctness test cases and won't get
+// minimized.
+
+// Mock maximum typed-array buffer and limit to 1MiB. Otherwise we might
+// get range errors. We ignore those by crashing, but that reduces coverage,
+// hence, let's reduce the range-error rate.
+(function() {
+  // Math.min might be manipulated in test cases.
+  const min = Math.min;
+  const maxBytes = 1048576;
+  const mock = function(type) {
+    const maxLength = maxBytes / (type.BYTES_PER_ELEMENT || 1);
+    const handler = {
+      construct: function(target, args) {
+        if (args[0] && typeof args[0] != "object") {
+          // Length used as first argument.
+          args[0] = min(maxLength, Number(args[0]));
+        } else if (args[0] instanceof ArrayBuffer && args.length > 1) {
+          // Buffer used as first argument.
+          const buffer = args[0];
+          args[1] = Number(args[1]);
+          // Ensure offset is multiple of bytes per element.
+          args[1] = args[1] - (args[1] % type.BYTES_PER_ELEMENT);
+          // Limit offset to length of buffer.
+          args[1] = min(args[1], buffer.byteLength || 0);
+          if (args.length > 2) {
+            // If also length is given, limit it to the maximum that's possible
+            // given buffer and offset.
+            const maxBytesLeft = buffer.byteLength - args[1];
+            const maxLengthLeft = maxBytesLeft / type.BYTES_PER_ELEMENT;
+            args[2] = min(Number(args[2]), maxLengthLeft);
+          }
+        }
+        return new (Function.prototype.bind.apply(type, [null].concat(args)));
+      },
+    };
+    return new Proxy(type, handler);
+  }
+
+  ArrayBuffer = mock(ArrayBuffer);
+  SharedArrayBuffer = mock(SharedArrayBuffer);
+  Int8Array = mock(Int8Array);
+  Uint8Array = mock(Uint8Array);
+  Uint8ClampedArray = mock(Uint8ClampedArray);
+  Int16Array = mock(Int16Array);
+  Uint16Array = mock(Uint16Array);
+  Int32Array = mock(Int32Array);
+  Uint32Array = mock(Uint32Array);
+  BigInt64Array = mock(BigInt64Array);
+  BigUint64Array = mock(BigUint64Array);
+  Float32Array = mock(Float32Array);
+  Float64Array = mock(Float64Array);
+})();
+
+// Mock typed array set function and cap offset to not throw a range error.
+(function() {
+  // Math.min might be manipulated in test cases.
+  const min = Math.min;
+  const types = [
+    Int8Array,
+    Uint8Array,
+    Uint8ClampedArray,
+    Int16Array,
+    Uint16Array,
+    Int32Array,
+    Uint32Array,
+    BigInt64Array,
+    BigUint64Array,
+    Float32Array,
+    Float64Array,
+  ];
+  for (const type of types) {
+    const set = type.prototype.set;
+    type.prototype.set = function(array, offset) {
+      if (Array.isArray(array)) {
+        offset = Number(offset);
+        offset = min(offset, this.length - array.length);
+      }
+      set.call(this, array, offset);
+    };
+  }
+})();
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_mock_webassembly.js b/src/third_party/v8/tools/clusterfuzz/v8_mock_webassembly.js
new file mode 100644
index 0000000..594e6e7
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_mock_webassembly.js
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This mocks out the WebAssembly object with a permissive dummy.
+
+(function() {
+  const handler = {
+    get: function(x, prop) {
+      if (prop == Symbol.toPrimitive) {
+        return function() { return undefined; };
+      }
+      return dummy;
+    },
+  };
+  const dummy = new Proxy(function() { return dummy; }, handler);
+  WebAssembly = dummy;
+})();
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_sanity_checks.js b/src/third_party/v8/tools/clusterfuzz/v8_sanity_checks.js
new file mode 100644
index 0000000..c2f0b2a
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_sanity_checks.js
@@ -0,0 +1,45 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is executed separately before the correctness test case. Add here
+// checking of global properties that should never differ in any configuration.
+// A difference found in the prints below will prevent any further correctness
+// comparison for the selected configurations to avoid flooding bugs.
+
+print("https://crbug.com/932656");
+print(Object.getOwnPropertyNames(this));
+
+print("https://crbug.com/935800");
+(function () {
+  function foo() {
+    "use asm";
+    function baz() {}
+    return {bar: baz};
+  }
+  print(Object.getOwnPropertyNames(foo().bar));
+})();
+
+print("https://crbug.com/985154");
+(function () {
+  "use strict";
+  function foo() {
+    "use asm";
+    function baz() {}
+    return {bar: baz};
+  }
+  print(Object.getOwnPropertyNames(foo().bar));
+})();
+
+print("Suppresses sensitive natives");
+(function () {
+  function foo() {}
+  %PrepareFunctionForOptimization(foo);
+  foo();
+  foo();
+  %OptimizeFunctionOnNextCall(foo);
+  foo();
+  print(%GetOptimizationStatus(foo));
+  const fun = new Function("f", "sync", "return %GetOptimizationStatus(f);");
+  print(fun(foo));
+})();
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_suppressions.js b/src/third_party/v8/tools/clusterfuzz/v8_suppressions.js
new file mode 100644
index 0000000..d73ce04
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_suppressions.js
@@ -0,0 +1,8 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is loaded before each correctness test case and after v8_mock.js.
+// You can temporarily change JS behavior here to silence known problems.
+// Please refer to a bug in a comment and remove the suppression once the
+// problem is fixed.
diff --git a/src/third_party/v8/tools/clusterfuzz/v8_suppressions.py b/src/third_party/v8/tools/clusterfuzz/v8_suppressions.py
new file mode 100644
index 0000000..71c69fb
--- /dev/null
+++ b/src/third_party/v8/tools/clusterfuzz/v8_suppressions.py
@@ -0,0 +1,272 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Suppressions for V8 correctness fuzzer failures.
+
+We support three types of suppressions:
+1. Ignore test case by pattern.
+Map a regular expression to a bug entry. A new failure will be reported
+when the pattern matches a JS test case.
+Subsequent matches will be recoreded under the first failure.
+
+2. Ignore test run by output pattern:
+Map a regular expression to a bug entry. A new failure will be reported
+when the pattern matches the output of a particular run.
+Subsequent matches will be recoreded under the first failure.
+
+3. Relax line-to-line comparisons with expressions of lines to ignore and
+lines to be normalized (i.e. ignore only portions of lines).
+These are not tied to bugs, be careful to not silently switch off this tool!
+
+Alternatively, think about adding a behavior change to v8_suppressions.js
+to silence a particular class of problems.
+"""
+
+import itertools
+import re
+
+try:
+  # Python 3
+  from itertools import zip_longest
+except ImportError:
+  # Python 2
+  from itertools import izip_longest as zip_longest
+
+# Max line length for regular experessions checking for lines to ignore.
+MAX_LINE_LENGTH = 512
+
+# For ignoring lines before carets and to ignore caret positions.
+CARET_RE = re.compile(r'^\s*\^\s*$')
+
+# Ignore by original source files. Map from bug->list of relative file paths,
+# e.g. 'v8/test/mjsunit/d8-performance-now.js'. A test will be suppressed if
+# one of the files below was used to mutate the test.
+IGNORE_SOURCES = {
+}
+
+# Ignore by test case pattern. Map from bug->regexp.
+# Bug is preferred to be a crbug.com/XYZ, but can be any short distinguishable
+# label.
+# Regular expressions are assumed to be compiled. We use regexp.search.
+IGNORE_TEST_CASES = {
+}
+
+# Ignore by output pattern. Map from bug->regexp like above.
+IGNORE_OUTPUT = {
+  'crbug.com/689877':
+      re.compile(r'^.*SyntaxError: .*Stack overflow$', re.M),
+}
+
+# Lines matching any of the following regular expressions will be ignored
+# if appearing on both sides. The capturing groups need to match exactly.
+# Use uncompiled regular expressions - they'll be compiled later.
+ALLOWED_LINE_DIFFS = [
+  # Ignore caret position in stack traces.
+  r'^\s*\^\s*$',
+]
+
+# Lines matching any of the following regular expressions will be ignored.
+# Use uncompiled regular expressions - they'll be compiled later.
+IGNORE_LINES = [
+  r'^Warning: unknown flag .*$',
+  r'^Warning: .+ is deprecated.*$',
+  r'^Try --help for options$',
+
+  # crbug.com/705962
+  r'^\s\[0x[0-9a-f]+\]$',
+]
+
+
+###############################################################################
+# Implementation - you should not need to change anything below this point.
+
+# Compile regular expressions.
+ALLOWED_LINE_DIFFS = [re.compile(exp) for exp in ALLOWED_LINE_DIFFS]
+IGNORE_LINES = [re.compile(exp) for exp in IGNORE_LINES]
+
+ORIGINAL_SOURCE_PREFIX = 'v8-foozzie source: '
+
+
+def get_output_capped(output1, output2):
+  """Returns a pair of stdout strings.
+
+  The strings are safely capped if at least one run has crashed.
+  """
+
+  # No length difference or no crash -> no capping.
+  if (len(output1.stdout) == len(output2.stdout) or
+      (not output1.HasCrashed() and not output2.HasCrashed())):
+    return output1.stdout, output2.stdout
+
+  # Both runs have crashed, cap by the shorter output.
+  if output1.HasCrashed() and output2.HasCrashed():
+    cap = min(len(output1.stdout), len(output2.stdout))
+  # Only the first run has crashed, cap by its output length.
+  elif output1.HasCrashed():
+    cap = len(output1.stdout)
+  # Similar if only the second run has crashed.
+  else:
+    cap = len(output2.stdout)
+
+  return output1.stdout[0:cap], output2.stdout[0:cap]
+
+
+def line_pairs(lines):
+  return zip_longest(
+      lines, itertools.islice(lines, 1, None), fillvalue=None)
+
+
+def caret_match(line1, line2):
+  if (not line1 or
+      not line2 or
+      len(line1) > MAX_LINE_LENGTH or
+      len(line2) > MAX_LINE_LENGTH):
+    return False
+  return bool(CARET_RE.match(line1) and CARET_RE.match(line2))
+
+
+def short_line_output(line):
+  if len(line) <= MAX_LINE_LENGTH:
+    # Avoid copying.
+    return line
+  return line[0:MAX_LINE_LENGTH] + '...'
+
+
+def ignore_by_regexp(line1, line2, allowed):
+  if len(line1) > MAX_LINE_LENGTH or len(line2) > MAX_LINE_LENGTH:
+    return False
+  for exp in allowed:
+    match1 = exp.match(line1)
+    match2 = exp.match(line2)
+    if match1 and match2:
+      # If there are groups in the regexp, ensure the groups matched the same
+      # things.
+      if match1.groups() == match2.groups():  # tuple comparison
+        return True
+  return False
+
+
+def diff_output(output1, output2, allowed, ignore1, ignore2):
+  """Returns a tuple (difference, source).
+
+  The difference is None if there's no difference, otherwise a string
+  with a readable diff.
+
+  The source is the last source output within the test case, or None if no
+  such output existed.
+  """
+  def useful_line(ignore):
+    def fun(line):
+      return all(not e.match(line) for e in ignore)
+    return fun
+
+  lines1 = list(filter(useful_line(ignore1), output1))
+  lines2 = list(filter(useful_line(ignore2), output2))
+
+  # This keeps track where we are in the original source file of the fuzz
+  # test case.
+  source = None
+
+  for ((line1, lookahead1), (line2, lookahead2)) in zip_longest(
+      line_pairs(lines1), line_pairs(lines2), fillvalue=(None, None)):
+
+    # Only one of the two iterators should run out.
+    assert not (line1 is None and line2 is None)
+
+    # One iterator ends earlier.
+    if line1 is None:
+      return '+ %s' % short_line_output(line2), source
+    if line2 is None:
+      return '- %s' % short_line_output(line1), source
+
+    # If lines are equal, no further checks are necessary.
+    if line1 == line2:
+      # Instrumented original-source-file output must be equal in both
+      # versions. It only makes sense to update it here when both lines
+      # are equal.
+      if line1.startswith(ORIGINAL_SOURCE_PREFIX):
+        source = line1[len(ORIGINAL_SOURCE_PREFIX):]
+      continue
+
+    # Look ahead. If next line is a caret, ignore this line.
+    if caret_match(lookahead1, lookahead2):
+      continue
+
+    # Check if a regexp allows these lines to be different.
+    if ignore_by_regexp(line1, line2, allowed):
+      continue
+
+    # Lines are different.
+    return (
+        '- %s\n+ %s' % (short_line_output(line1), short_line_output(line2)),
+        source,
+    )
+
+  # No difference found.
+  return None, source
+
+
+def get_suppression(skip=False):
+  return V8Suppression(skip)
+
+
+class V8Suppression(object):
+  def __init__(self, skip):
+    if skip:
+      self.allowed_line_diffs = []
+      self.ignore_output = {}
+      self.ignore_sources = {}
+    else:
+      self.allowed_line_diffs = ALLOWED_LINE_DIFFS
+      self.ignore_output = IGNORE_OUTPUT
+      self.ignore_sources = IGNORE_SOURCES
+
+  def diff(self, output1, output2):
+    # Diff capped lines in the presence of crashes.
+    return self.diff_lines(
+        *map(str.splitlines, get_output_capped(output1, output2)))
+
+  def diff_lines(self, output1_lines, output2_lines):
+    return diff_output(
+        output1_lines,
+        output2_lines,
+        self.allowed_line_diffs,
+        IGNORE_LINES,
+        IGNORE_LINES,
+    )
+
+  def ignore_by_content(self, testcase):
+    # Strip off test case preamble.
+    try:
+      lines = testcase.splitlines()
+      lines = lines[lines.index(
+          'print("js-mutation: start generated test case");'):]
+      content = '\n'.join(lines)
+    except ValueError:
+      # Search the whole test case if preamble can't be found. E.g. older
+      # already minimized test cases might have dropped the delimiter line.
+      content = testcase
+    for bug, exp in IGNORE_TEST_CASES.items():
+      if exp.search(content):
+        return bug
+    return None
+
+  def ignore_by_metadata(self, metadata):
+    for bug, sources in self.ignore_sources.items():
+      for source in sources:
+        if source in metadata['sources']:
+          return bug
+    return None
+
+  def ignore_by_output(self, output):
+    def check(mapping):
+      for bug, exp in mapping.items():
+        if exp.search(output):
+          return bug
+      return None
+    bug = check(self.ignore_output)
+    if bug:
+      return bug
+    return None
diff --git a/src/third_party/v8/tools/codemap.js b/src/third_party/v8/tools/codemap.js
new file mode 100644
index 0000000..71e3e6a
--- /dev/null
+++ b/src/third_party/v8/tools/codemap.js
@@ -0,0 +1,320 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+/**
+ * Constructs a mapper that maps addresses into code entries.
+ *
+ * @constructor
+ */
+function CodeMap() {
+  /**
+   * Dynamic code entries. Used for JIT compiled code.
+   */
+  this.dynamics_ = new SplayTree();
+
+  /**
+   * Name generator for entries having duplicate names.
+   */
+  this.dynamicsNameGen_ = new CodeMap.NameGenerator();
+
+  /**
+   * Static code entries. Used for statically compiled code.
+   */
+  this.statics_ = new SplayTree();
+
+  /**
+   * Libraries entries. Used for the whole static code libraries.
+   */
+  this.libraries_ = new SplayTree();
+
+  /**
+   * Map of memory pages occupied with static code.
+   */
+  this.pages_ = [];
+};
+
+
+/**
+ * The number of alignment bits in a page address.
+ */
+CodeMap.PAGE_ALIGNMENT = 12;
+
+
+/**
+ * Page size in bytes.
+ */
+CodeMap.PAGE_SIZE =
+    1 << CodeMap.PAGE_ALIGNMENT;
+
+
+/**
+ * Adds a dynamic (i.e. moveable and discardable) code entry.
+ *
+ * @param {number} start The starting address.
+ * @param {CodeMap.CodeEntry} codeEntry Code entry object.
+ */
+CodeMap.prototype.addCode = function(start, codeEntry) {
+  this.deleteAllCoveredNodes_(this.dynamics_, start, start + codeEntry.size);
+  this.dynamics_.insert(start, codeEntry);
+};
+
+
+/**
+ * Moves a dynamic code entry. Throws an exception if there is no dynamic
+ * code entry with the specified starting address.
+ *
+ * @param {number} from The starting address of the entry being moved.
+ * @param {number} to The destination address.
+ */
+CodeMap.prototype.moveCode = function(from, to) {
+  var removedNode = this.dynamics_.remove(from);
+  this.deleteAllCoveredNodes_(this.dynamics_, to, to + removedNode.value.size);
+  this.dynamics_.insert(to, removedNode.value);
+};
+
+
+/**
+ * Discards a dynamic code entry. Throws an exception if there is no dynamic
+ * code entry with the specified starting address.
+ *
+ * @param {number} start The starting address of the entry being deleted.
+ */
+CodeMap.prototype.deleteCode = function(start) {
+  var removedNode = this.dynamics_.remove(start);
+};
+
+
+/**
+ * Adds a library entry.
+ *
+ * @param {number} start The starting address.
+ * @param {CodeMap.CodeEntry} codeEntry Code entry object.
+ */
+CodeMap.prototype.addLibrary = function(
+    start, codeEntry) {
+  this.markPages_(start, start + codeEntry.size);
+  this.libraries_.insert(start, codeEntry);
+};
+
+
+/**
+ * Adds a static code entry.
+ *
+ * @param {number} start The starting address.
+ * @param {CodeMap.CodeEntry} codeEntry Code entry object.
+ */
+CodeMap.prototype.addStaticCode = function(
+    start, codeEntry) {
+  this.statics_.insert(start, codeEntry);
+};
+
+
+/**
+ * @private
+ */
+CodeMap.prototype.markPages_ = function(start, end) {
+  for (var addr = start; addr <= end;
+       addr += CodeMap.PAGE_SIZE) {
+    this.pages_[(addr / CodeMap.PAGE_SIZE)|0] = 1;
+  }
+};
+
+
+/**
+ * @private
+ */
+CodeMap.prototype.deleteAllCoveredNodes_ = function(tree, start, end) {
+  var to_delete = [];
+  var addr = end - 1;
+  while (addr >= start) {
+    var node = tree.findGreatestLessThan(addr);
+    if (!node) break;
+    var start2 = node.key, end2 = start2 + node.value.size;
+    if (start2 < end && start < end2) to_delete.push(start2);
+    addr = start2 - 1;
+  }
+  for (var i = 0, l = to_delete.length; i < l; ++i) tree.remove(to_delete[i]);
+};
+
+
+/**
+ * @private
+ */
+CodeMap.prototype.isAddressBelongsTo_ = function(addr, node) {
+  return addr >= node.key && addr < (node.key + node.value.size);
+};
+
+
+/**
+ * @private
+ */
+CodeMap.prototype.findInTree_ = function(tree, addr) {
+  var node = tree.findGreatestLessThan(addr);
+  return node && this.isAddressBelongsTo_(addr, node) ? node : null;
+};
+
+
+/**
+ * Finds a code entry that contains the specified address. Both static and
+ * dynamic code entries are considered. Returns the code entry and the offset
+ * within the entry.
+ *
+ * @param {number} addr Address.
+ */
+CodeMap.prototype.findAddress = function(addr) {
+  var pageAddr = (addr / CodeMap.PAGE_SIZE)|0;
+  if (pageAddr in this.pages_) {
+    // Static code entries can contain "holes" of unnamed code.
+    // In this case, the whole library is assigned to this address.
+    var result = this.findInTree_(this.statics_, addr);
+    if (!result) {
+      result = this.findInTree_(this.libraries_, addr);
+      if (!result) return null;
+    }
+    return { entry : result.value, offset : addr - result.key };
+  }
+  var min = this.dynamics_.findMin();
+  var max = this.dynamics_.findMax();
+  if (max != null && addr < (max.key + max.value.size) && addr >= min.key) {
+    var dynaEntry = this.findInTree_(this.dynamics_, addr);
+    if (dynaEntry == null) return null;
+    // Dedupe entry name.
+    var entry = dynaEntry.value;
+    if (!entry.nameUpdated_) {
+      entry.name = this.dynamicsNameGen_.getName(entry.name);
+      entry.nameUpdated_ = true;
+    }
+    return { entry : entry, offset : addr - dynaEntry.key };
+  }
+  return null;
+};
+
+
+/**
+ * Finds a code entry that contains the specified address. Both static and
+ * dynamic code entries are considered.
+ *
+ * @param {number} addr Address.
+ */
+CodeMap.prototype.findEntry = function(addr) {
+  var result = this.findAddress(addr);
+  return result ? result.entry : null;
+};
+
+
+/**
+ * Returns a dynamic code entry using its starting address.
+ *
+ * @param {number} addr Address.
+ */
+CodeMap.prototype.findDynamicEntryByStartAddress =
+    function(addr) {
+  var node = this.dynamics_.find(addr);
+  return node ? node.value : null;
+};
+
+
+/**
+ * Returns an array of all dynamic code entries.
+ */
+CodeMap.prototype.getAllDynamicEntries = function() {
+  return this.dynamics_.exportValues();
+};
+
+
+/**
+ * Returns an array of pairs of all dynamic code entries and their addresses.
+ */
+CodeMap.prototype.getAllDynamicEntriesWithAddresses = function() {
+  return this.dynamics_.exportKeysAndValues();
+};
+
+
+/**
+ * Returns an array of all static code entries.
+ */
+CodeMap.prototype.getAllStaticEntries = function() {
+  return this.statics_.exportValues();
+};
+
+
+/**
+ * Returns an array of pairs of all static code entries and their addresses.
+ */
+CodeMap.prototype.getAllStaticEntriesWithAddresses = function() {
+  return this.statics_.exportKeysAndValues();
+};
+
+
+/**
+ * Returns an array of all libraries entries.
+ */
+CodeMap.prototype.getAllLibrariesEntries = function() {
+  return this.libraries_.exportValues();
+};
+
+
+/**
+ * Creates a code entry object.
+ *
+ * @param {number} size Code entry size in bytes.
+ * @param {string} opt_name Code entry name.
+ * @param {string} opt_type Code entry type, e.g. SHARED_LIB, CPP.
+ * @constructor
+ */
+CodeMap.CodeEntry = function(size, opt_name, opt_type) {
+  this.size = size;
+  this.name = opt_name || '';
+  this.type = opt_type || '';
+  this.nameUpdated_ = false;
+};
+
+
+CodeMap.CodeEntry.prototype.getName = function() {
+  return this.name;
+};
+
+
+CodeMap.CodeEntry.prototype.toString = function() {
+  return this.name + ': ' + this.size.toString(16);
+};
+
+
+CodeMap.NameGenerator = function() {
+  this.knownNames_ = {};
+};
+
+
+CodeMap.NameGenerator.prototype.getName = function(name) {
+  if (!(name in this.knownNames_)) {
+    this.knownNames_[name] = 0;
+    return name;
+  }
+  var count = ++this.knownNames_[name];
+  return name + ' {' + count + '}';
+};
diff --git a/src/third_party/v8/tools/codemap.mjs b/src/third_party/v8/tools/codemap.mjs
new file mode 100644
index 0000000..245b6ba
--- /dev/null
+++ b/src/third_party/v8/tools/codemap.mjs
@@ -0,0 +1,298 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import { SplayTree } from "./splaytree.mjs";
+
+/**
+ * Constructs a mapper that maps addresses into code entries.
+ *
+ * @constructor
+ */
+export class CodeMap {
+  /**
+   * Dynamic code entries. Used for JIT compiled code.
+   */
+  dynamics_ = new SplayTree();
+
+  /**
+   * Name generator for entries having duplicate names.
+   */
+  dynamicsNameGen_ = new NameGenerator();
+
+  /**
+   * Static code entries. Used for statically compiled code.
+   */
+  statics_ = new SplayTree();
+
+  /**
+   * Libraries entries. Used for the whole static code libraries.
+   */
+  libraries_ = new SplayTree();
+
+  /**
+   * Map of memory pages occupied with static code.
+   */
+  pages_ = [];
+
+
+  /**
+   * The number of alignment bits in a page address.
+   */
+  static PAGE_ALIGNMENT = 12;
+
+
+  /**
+   * Page size in bytes.
+   */
+  static PAGE_SIZE =  1 << CodeMap.PAGE_ALIGNMENT;
+
+
+  /**
+   * Adds a dynamic (i.e. moveable and discardable) code entry.
+   *
+   * @param {number} start The starting address.
+   * @param {CodeMap.CodeEntry} codeEntry Code entry object.
+   */
+  addCode(start, codeEntry) {
+    this.deleteAllCoveredNodes_(this.dynamics_, start, start + codeEntry.size);
+    this.dynamics_.insert(start, codeEntry);
+  }
+
+  /**
+   * Moves a dynamic code entry. Throws an exception if there is no dynamic
+   * code entry with the specified starting address.
+   *
+   * @param {number} from The starting address of the entry being moved.
+   * @param {number} to The destination address.
+   */
+  moveCode(from, to) {
+    const removedNode = this.dynamics_.remove(from);
+    this.deleteAllCoveredNodes_(this.dynamics_, to, to + removedNode.value.size);
+    this.dynamics_.insert(to, removedNode.value);
+  }
+
+  /**
+   * Discards a dynamic code entry. Throws an exception if there is no dynamic
+   * code entry with the specified starting address.
+   *
+   * @param {number} start The starting address of the entry being deleted.
+   */
+  deleteCode(start) {
+    const removedNode = this.dynamics_.remove(start);
+  }
+
+  /**
+   * Adds a library entry.
+   *
+   * @param {number} start The starting address.
+   * @param {CodeMap.CodeEntry} codeEntry Code entry object.
+   */
+  addLibrary(start, codeEntry) {
+    this.markPages_(start, start + codeEntry.size);
+    this.libraries_.insert(start, codeEntry);
+  }
+
+  /**
+   * Adds a static code entry.
+   *
+   * @param {number} start The starting address.
+   * @param {CodeMap.CodeEntry} codeEntry Code entry object.
+   */
+  addStaticCode(start, codeEntry) {
+    this.statics_.insert(start, codeEntry);
+  }
+
+  /**
+   * @private
+   */
+  markPages_(start, end) {
+    for (let addr = start; addr <= end;
+        addr += CodeMap.PAGE_SIZE) {
+      this.pages_[(addr / CodeMap.PAGE_SIZE)|0] = 1;
+    }
+  }
+
+  /**
+   * @private
+   */
+  deleteAllCoveredNodes_(tree, start, end) {
+    const to_delete = [];
+    let addr = end - 1;
+    while (addr >= start) {
+      const node = tree.findGreatestLessThan(addr);
+      if (!node) break;
+      const start2 = node.key, end2 = start2 + node.value.size;
+      if (start2 < end && start < end2) to_delete.push(start2);
+      addr = start2 - 1;
+    }
+    for (let i = 0, l = to_delete.length; i < l; ++i) tree.remove(to_delete[i]);
+  }
+
+  /**
+   * @private
+   */
+  isAddressBelongsTo_(addr, node) {
+    return addr >= node.key && addr < (node.key + node.value.size);
+  }
+
+  /**
+   * @private
+   */
+  findInTree_(tree, addr) {
+    const node = tree.findGreatestLessThan(addr);
+    return node && this.isAddressBelongsTo_(addr, node) ? node : null;
+  }
+
+  /**
+   * Finds a code entry that contains the specified address. Both static and
+   * dynamic code entries are considered. Returns the code entry and the offset
+   * within the entry.
+   *
+   * @param {number} addr Address.
+   */
+  findAddress(addr) {
+    const pageAddr = (addr / CodeMap.PAGE_SIZE)|0;
+    if (pageAddr in this.pages_) {
+      // Static code entries can contain "holes" of unnamed code.
+      // In this case, the whole library is assigned to this address.
+      let result = this.findInTree_(this.statics_, addr);
+      if (!result) {
+        result = this.findInTree_(this.libraries_, addr);
+        if (!result) return null;
+      }
+      return { entry : result.value, offset : addr - result.key };
+    }
+    const min = this.dynamics_.findMin();
+    const max = this.dynamics_.findMax();
+    if (max != null && addr < (max.key + max.value.size) && addr >= min.key) {
+      const dynaEntry = this.findInTree_(this.dynamics_, addr);
+      if (dynaEntry == null) return null;
+      // Dedupe entry name.
+      const entry = dynaEntry.value;
+      if (!entry.nameUpdated_) {
+        entry.name = this.dynamicsNameGen_.getName(entry.name);
+        entry.nameUpdated_ = true;
+      }
+      return { entry, offset : addr - dynaEntry.key };
+    }
+    return null;
+  }
+
+  /**
+   * Finds a code entry that contains the specified address. Both static and
+   * dynamic code entries are considered.
+   *
+   * @param {number} addr Address.
+   */
+  findEntry(addr) {
+    const result = this.findAddress(addr);
+    return result ? result.entry : null;
+  }
+
+  /**
+   * Returns a dynamic code entry using its starting address.
+   *
+   * @param {number} addr Address.
+   */
+  findDynamicEntryByStartAddress(addr) {
+    const node = this.dynamics_.find(addr);
+    return node ? node.value : null;
+  }
+
+  /**
+   * Returns an array of all dynamic code entries.
+   */
+  getAllDynamicEntries() {
+    return this.dynamics_.exportValues();
+  }
+
+  /**
+   * Returns an array of pairs of all dynamic code entries and their addresses.
+   */
+  getAllDynamicEntriesWithAddresses() {
+    return this.dynamics_.exportKeysAndValues();
+  }
+
+  /**
+   * Returns an array of all static code entries.
+   */
+  getAllStaticEntries() {
+    return this.statics_.exportValues();
+  }
+
+  /**
+   * Returns an array of pairs of all static code entries and their addresses.
+   */
+  getAllStaticEntriesWithAddresses() {
+    return this.statics_.exportKeysAndValues();
+  }
+
+  /**
+   * Returns an array of all libraries entries.
+   */
+  getAllLibrariesEntries() {
+    return this.libraries_.exportValues();
+  }
+}
+
+
+/**
+ * Creates a code entry object.
+ *
+ * @param {number} size Code entry size in bytes.
+ * @param {string} opt_name Code entry name.
+ * @param {string} opt_type Code entry type, e.g. SHARED_LIB, CPP.
+ * @constructor
+ */
+export class CodeEntry {
+  constructor(size, opt_name, opt_type) {
+    this.size = size;
+    this.name = opt_name || '';
+    this.type = opt_type || '';
+    this.nameUpdated_ = false;
+  }
+
+  getName() {
+    return this.name;
+  }
+
+  toString() {
+    return this.name + ': ' + this.size.toString(16);
+  }
+}
+
+class NameGenerator {
+  knownNames_ = { __proto__:null }
+  getName(name) {
+    if (!(name in this.knownNames_)) {
+      this.knownNames_[name] = 0;
+      return name;
+    }
+    const count = ++this.knownNames_[name];
+    return name + ' {' + count + '}';
+  };
+}
diff --git a/src/third_party/v8/tools/collect_deprecation_stats.sh b/src/third_party/v8/tools/collect_deprecation_stats.sh
new file mode 100755
index 0000000..aa3f413
--- /dev/null
+++ b/src/third_party/v8/tools/collect_deprecation_stats.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Collect the number of [[deprecated]] calls detected when compiling V8.
+# Requires "v8_deprecate_get_isolate = true" to be useful.
+
+set -e
+
+if [ -z "$1" ]; then
+  (>&2 echo "Usage: collect_deprecation_stats.sh [<outdir>|<log>]")
+  exit 1
+fi
+
+if [ -d "$1" ]; then
+  OUTDIR=$1
+  FULL_LOG=/tmp/get_isolate_deprecation.log
+  gn clean "$OUTDIR"
+  autoninja -C "$OUTDIR" > $FULL_LOG
+else
+  FULL_LOG=$1
+fi
+
+FILTERED_LOG=/tmp/filtered_isolate_deprecation.log
+UNIQUE_WARNINGS_LOG=/tmp/unique_warnings.log
+
+grep "warning:" "$FULL_LOG" | sed $'
+s|^\.\./\.\./||;
+s/: warning: \'/: /;
+
+# strip everything after deprecated function name (including template param).
+s/\(<.*>\)\\?\'.*//' > $FILTERED_LOG
+
+sort -u $FILTERED_LOG > $UNIQUE_WARNINGS_LOG
+
+echo "Total deprecated calls: $(wc -l < $UNIQUE_WARNINGS_LOG)"
+cut -f2 -d' ' $UNIQUE_WARNINGS_LOG | sort | uniq -c
diff --git a/src/third_party/v8/tools/compare-table-gen.js b/src/third_party/v8/tools/compare-table-gen.js
new file mode 100644
index 0000000..e0c870d
--- /dev/null
+++ b/src/third_party/v8/tools/compare-table-gen.js
@@ -0,0 +1,120 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generates a comparison table test case.
+// Usage: d8 compare-table-gen.js -- lt|lteq|gt|gteq|eq|ne|eq|sne|min|max
+
+var strings = ["true", "false", "null", "void 0", "0", "0.0", "-0", "\"\"", "-1", "-1.25", "1", "1.25", "-2147483648", "2147483648", "Infinity", "-Infinity", "NaN"];
+var values = new Array(strings.length);
+for (var i = 0; i < strings.length; i++) {
+  values[i] = eval(strings[i]);
+}
+
+function test() {
+  for (var i = 0; i < values.length; i++) {
+    for (var j = 0; j < values.length; j++) {
+      var a = values[i];
+      var b = values[j];
+      var x = expected[i][j];
+      assertEquals(x, func(a,b));
+      assertEquals(x, left_funcs[i](b));
+      assertEquals(x, right_funcs[j](a));
+    }
+  }
+
+  var result = matrix();
+  for (var i = 0; i < values.length; i++) {
+    for (var j = 0; j < values.length; j++) {
+      assertEquals(expected[i][j], result[i][j]);
+    }
+  }
+}
+
+function expr(infix, a, cmp, b) {
+  return infix ? a + " " + cmp + " " + b : cmp + "(" + a + ", " + b + ")";
+}
+
+function SpecialToString(x) {
+  if ((1 / x) == -Infinity) return "-0";
+  return "" + x;
+}
+
+function gen(name, cmp, infix) {
+
+  print("// Copyright 2015 the V8 project authors. All rights reserved.");
+  print("// Use of this source code is governed by a BSD-style license that can be");
+  print("// found in the LICENSE file.");
+  print();
+  print("var values = [" + strings + "];");
+
+  var body = "(function " + name + "(a,b) { return " + expr(infix, "a", cmp, "b") + "; })";
+  var func = eval(body);
+
+  print("var expected = [");
+
+  for (var i = 0; i < values.length; i++) {
+    var line = "  [";
+    for (var j = 0; j < values.length; j++) {
+      if (j > 0) line += ",";
+      line += SpecialToString(func(values[i], values[j]));
+    }
+    line += "]";
+    if (i < (values.length - 1)) line += ",";
+    print(line);
+  }
+  print("];");
+
+  print("var func = " + body + ";");
+  print("var left_funcs = [");
+
+  for (var i = 0; i < values.length; i++) {
+    var value = strings[i];
+    var body = "(function " + name + "_L" + i + "(b) { return " + expr(infix, value, cmp, "b") + "; })";
+    var end = i < (values.length - 1) ? "," : "";
+    print("  " + body + end);
+  }
+  print("];");
+
+  print("var right_funcs = [");
+  for (var i = 0; i < values.length; i++) {
+    var value = strings[i];
+    var body = "(function " + name + "_R" + i + "(a) { return " + expr(infix, "a", cmp, value) + "; })";
+    var end = i < (values.length - 1) ? "," : "";
+    print("  " + body + end);
+  }
+  print("];");
+
+  print("function matrix() {");
+  print("  return [");
+  for (var i = 0; i < values.length; i++) {
+    var line = "    [";
+    for (var j = 0; j < values.length; j++) {
+      if (j > 0) line += ",";
+      line += expr(infix, strings[i], cmp, strings[j]);
+    }
+    line += "]";
+    if (i < (values.length - 1)) line += ",";
+    print(line);
+  }
+  print("  ];");
+  print("}");
+
+
+  print(test.toString());
+  print("test();");
+  print("test();");
+}
+
+switch (arguments[0]) {
+  case "lt":   gen("lt",   "<", true); break;
+  case "lteq": gen("lteq", "<=", true); break;
+  case "gt":   gen("gt",   ">", true); break;
+  case "gteq": gen("gteq", ">=", true); break;
+  case "eq":   gen("eq",   "==", true); break;
+  case "ne":   gen("ne",   "!=", true); break;
+  case "seq":  gen("seq",  "===", true); break;
+  case "sne":  gen("sne",  "!==", true); break;
+  case "min":  gen("min",  "Math.min", false); break;
+  case "max":  gen("max",  "Math.max", false); break;
+}
diff --git a/src/third_party/v8/tools/compare_torque_output.py b/src/third_party/v8/tools/compare_torque_output.py
new file mode 100644
index 0000000..50e93a7
--- /dev/null
+++ b/src/third_party/v8/tools/compare_torque_output.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+Compare two folders and print any differences between files to both a
+results file and stderr.
+
+Specifically we use this to compare the output of Torque generator for
+both x86 and x64 (-m32) toolchains.
+"""
+
+import difflib
+import filecmp
+import itertools
+import os
+import sys
+
+assert len(sys.argv) > 3
+
+folder1 = sys.argv[1]
+folder2 = sys.argv[2]
+results_file_name = sys.argv[3]
+
+with open(results_file_name, "w") as results_file:
+  def write(line):
+    # Print line to both results file and stderr
+    sys.stderr.write(line)
+    results_file.write(line)
+
+  def has_one_sided_diff(dcmp, side, side_list):
+    # Check that we do not have files only on one side of the comparison
+    if side_list:
+      write("Some files exist only in %s\n" % side)
+      for fl in side_list:
+        write(fl)
+    return side_list
+
+  def has_content_diff(dcmp):
+    # Check that we do not have content differences in the common files
+    _, diffs, _ = filecmp.cmpfiles(
+          dcmp.left, dcmp.right,
+          dcmp.common_files, shallow=False)
+    if diffs:
+      write("Found content differences between %s and %s\n" %
+        (dcmp.left, dcmp.right))
+      for name in diffs:
+        write("File diff %s\n" % name)
+        left_file = os.path.join(dcmp.left, name)
+        right_file = os.path.join(dcmp.right, name)
+        with open(left_file) as f1, open(right_file) as f2:
+          diff = difflib.unified_diff(
+              f1.readlines(), f2.readlines(),
+              dcmp.left, dcmp.right)
+          for l in itertools.islice(diff, 100):
+            write(l)
+        write("\n\n")
+    return diffs
+
+  dcmp = filecmp.dircmp(folder1, folder2)
+  has_diffs = has_one_sided_diff(dcmp, dcmp.left, dcmp.left_only) \
+    or has_one_sided_diff(dcmp, dcmp.right, dcmp.right_only) \
+    or has_content_diff(dcmp)
+
+if has_diffs:
+  sys.exit(1)
diff --git a/src/third_party/v8/tools/consarray.js b/src/third_party/v8/tools/consarray.js
new file mode 100644
index 0000000..dbce1de
--- /dev/null
+++ b/src/third_party/v8/tools/consarray.js
@@ -0,0 +1,92 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+/**
+ * Constructs a ConsArray object. It is used mainly for tree traversal.
+ * In this use case we have lots of arrays that we need to iterate
+ * sequentally. The internal Array implementation is horribly slow
+ * when concatenating on large (10K items) arrays due to memory copying.
+ * That's why we avoid copying memory and insead build a linked list
+ * of arrays to iterate through.
+ *
+ * @constructor
+ */
+function ConsArray() {
+  this.tail_ = new ConsArray.Cell(null, null);
+  this.currCell_ = this.tail_;
+  this.currCellPos_ = 0;
+};
+
+
+/**
+ * Concatenates another array for iterating. Empty arrays are ignored.
+ * This operation can be safely performed during ongoing ConsArray
+ * iteration.
+ *
+ * @param {Array} arr Array to concatenate.
+ */
+ConsArray.prototype.concat = function(arr) {
+  if (arr.length > 0) {
+    this.tail_.data = arr;
+    this.tail_ = this.tail_.next = new ConsArray.Cell(null, null);
+  }
+};
+
+
+/**
+ * Whether the end of iteration is reached.
+ */
+ConsArray.prototype.atEnd = function() {
+  return this.currCell_ === null ||
+      this.currCell_.data === null ||
+      this.currCellPos_ >= this.currCell_.data.length;
+};
+
+
+/**
+ * Returns the current item, moves to the next one.
+ */
+ConsArray.prototype.next = function() {
+  var result = this.currCell_.data[this.currCellPos_++];
+  if (this.currCellPos_ >= this.currCell_.data.length) {
+    this.currCell_ = this.currCell_.next;
+    this.currCellPos_ = 0;
+  }
+  return result;
+};
+
+
+/**
+ * A cell object used for constructing a list in ConsArray.
+ *
+ * @constructor
+ */
+ConsArray.Cell = function(data, next) {
+  this.data = data;
+  this.next = next;
+};
diff --git a/src/third_party/v8/tools/consarray.mjs b/src/third_party/v8/tools/consarray.mjs
new file mode 100644
index 0000000..1dc2afe
--- /dev/null
+++ b/src/third_party/v8/tools/consarray.mjs
@@ -0,0 +1,92 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+/**
+ * Constructs a ConsArray object. It is used mainly for tree traversal.
+ * In this use case we have lots of arrays that we need to iterate
+ * sequentally. The internal Array implementation is horribly slow
+ * when concatenating on large (10K items) arrays due to memory copying.
+ * That's why we avoid copying memory and insead build a linked list
+ * of arrays to iterate through.
+ *
+ * @constructor
+ */
+export function ConsArray() {
+  this.tail_ = new ConsArray.Cell(null, null);
+  this.currCell_ = this.tail_;
+  this.currCellPos_ = 0;
+};
+
+
+/**
+ * Concatenates another array for iterating. Empty arrays are ignored.
+ * This operation can be safely performed during ongoing ConsArray
+ * iteration.
+ *
+ * @param {Array} arr Array to concatenate.
+ */
+ConsArray.prototype.concat = function(arr) {
+  if (arr.length > 0) {
+    this.tail_.data = arr;
+    this.tail_ = this.tail_.next = new ConsArray.Cell(null, null);
+  }
+};
+
+
+/**
+ * Whether the end of iteration is reached.
+ */
+ConsArray.prototype.atEnd = function() {
+  return this.currCell_ === null ||
+      this.currCell_.data === null ||
+      this.currCellPos_ >= this.currCell_.data.length;
+};
+
+
+/**
+ * Returns the current item, moves to the next one.
+ */
+ConsArray.prototype.next = function() {
+  const result = this.currCell_.data[this.currCellPos_++];
+  if (this.currCellPos_ >= this.currCell_.data.length) {
+    this.currCell_ = this.currCell_.next;
+    this.currCellPos_ = 0;
+  }
+  return result;
+};
+
+
+/**
+ * A cell object used for constructing a list in ConsArray.
+ *
+ * @constructor
+ */
+ConsArray.Cell = function(data, next) {
+  this.data = data;
+  this.next = next;
+};
diff --git a/src/third_party/v8/tools/cppgc/copy.bara.sky b/src/third_party/v8/tools/cppgc/copy.bara.sky
new file mode 100644
index 0000000..169db0e
--- /dev/null
+++ b/src/third_party/v8/tools/cppgc/copy.bara.sky
@@ -0,0 +1,41 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+INTERESTING_PATHS = [
+    'AUTHORS',
+    'CODE_OF_CONDUCT.md',
+    'LICENSE',
+    'samples/cppgc/**',
+    'src/base/**',
+    'src/heap/base/**',
+    'src/heap/cppgc/**',
+    'include/cppgc/**',
+    'include/v8config.h',
+    'include/v8-platform.h',
+    'testing/**',
+    'test/unittests/heap/cppgc/**',
+]
+
+origin_url = 'https://chromium.googlesource.com/v8/v8.git'
+# TODO(v8:10724): Add GitHub destination url.
+destination_url = 'TODO'
+
+# This workflow syncs cppgc source inside V8's googlesource repo with GitHub.
+core.workflow(
+    name='default',
+    origin=git.origin(
+        url=origin_url,
+        ref='master',
+    ),
+    destination=git.destination(url=destination_url, ),
+    origin_files=glob(INTERESTING_PATHS,
+                      exclude=['samples/cppgc/cppgc-for-v8-embedders.cc']),
+    destination_files=glob(['**'], exclude=['CMakeLists.txt', 'cmake/**']),
+    mode='SQUASH',
+    authoring=authoring.pass_thru('V8 Team <v8-dev@googlegroups.com>'),
+    transformations=[
+        metadata.squash_notes(prefix='Export of V8 changes from ' +
+                              origin_url + '\n\nList of included changes:\n\n')
+    ]
+)
diff --git a/src/third_party/v8/tools/cppgc/export_to_github.sh b/src/third_party/v8/tools/cppgc/export_to_github.sh
new file mode 100755
index 0000000..fe86068
--- /dev/null
+++ b/src/third_party/v8/tools/cppgc/export_to_github.sh
@@ -0,0 +1,121 @@
+#!/bin/sh
+#
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+source_dir=$(cd "$(dirname "$0")"; pwd -P)
+
+copybara_exe=copybara
+copybara_file="$source_dir/copy.bara.sky"
+init_history=''
+
+for arg in "$@"; do
+  case $arg in
+    --copybara-exe=*)
+      copybara_exe="${arg#*=}"
+      shift
+      ;;
+    --copybara-file=*)
+      copybara_file="${arg#*=}"
+      shift
+      ;;
+    --init-history)
+      init_history='--init-history --force'
+      shift
+      ;;
+    *)
+      echo -e "Usage:$arg"
+      echo -e "    export_to_github.sh [--copybara-exe=<path-to-copybara>]\n" \
+              "                       [--copybara-file=<path-to-copy.bara.sky>]"
+      exit 1
+  esac
+done
+
+v8_origin="https://chromium.googlesource.com/v8/v8.git"
+v8_ref="master"
+
+NOCOLOR="\033[0m"
+RED="\033[0;31m"
+GREEN="\033[0;32m"
+BLUE="\033[0;34m"
+
+function fail {
+  echo -e "${RED}${1}${NOCOLOR}" > /dev/stderr
+  exit 1
+}
+
+function success {
+  echo -e "${BLUE}${1}${NOCOLOR}" > /dev/stderr
+  exit 0
+}
+
+function message {
+  echo -e "${GREEN}${1}${NOCOLOR}" > /dev/stderr
+}
+
+function cleanup {
+  if [ -d "$git_temp_dir" ]; then
+    rm -rf $git_temp_dir
+  fi
+}
+
+trap "exit 1" HUP INT PIPE QUIT TERM
+trap cleanup EXIT
+
+[ ! -x $copybara_exe ] && fail "$copybara_exe doesn't exist or was not found in PATH!"
+[ ! -f $copybara_file ] && fail "Input $copybara_file doesn't exist!"
+
+git_temp_dir=$(mktemp -d)
+if [[ ! "$git_temp_dir" || ! -d "$git_temp_dir" ]]; then
+  fail "Failed to create temporary dir"
+fi
+
+if [[ $init_history ]]; then
+  read -p "--init-history is only supposed to be used on the first export of \
+cppgc. Is this what is really intended? (y/N)" answer
+  if [ "$answer" != "y" ]; then
+    exit 0
+  fi
+fi
+
+message "Running copybara..."
+$copybara_exe $init_history $copybara_file --dry-run --git-destination-path $git_temp_dir
+result=$?
+if [ "$result" -eq 4 ]; then
+  success "Nothing needs to be done, exiting..."
+elif [ "$result" -ne 0 ]; then
+  fail "Failed to run copybara"
+fi
+
+cd $git_temp_dir
+
+main_gn="BUILD.gn"
+test_gn="test/unittests/BUILD.gn"
+gen_cmake="tools/cppgc/gen_cmake.py"
+
+message "Checking out BUILD.gn files..."
+git remote add v8_origin "$v8_origin"
+git fetch --depth=1 v8_origin $v8_ref
+git checkout v8_origin/master -- "$main_gn" "$test_gn" "$gen_cmake" \
+  || fail "Failed to checkout BUILD.gn from V8 origin"
+
+message "Generating CMakeLists.txt..."
+cmakelists="$git_temp_dir/CMakeLists.txt"
+$gen_cmake --out=$cmakelists --main-gn=$main_gn --test-gn=$test_gn \
+  || fail "CMakeLists.txt generation has failed!"
+
+git rm -f $main_gn $test_gn $gen_cmake > /dev/null
+
+if git status -s | grep -q $(basename $cmakelists); then
+  message "CMakeLists.txt needs to be changed"
+  git add $cmakelists
+  git commit --amend --no-edit > /dev/null
+else
+  message "No changes in CMakeLists.txt need to be done"
+fi
+
+message "Pushing changes to GitHub..."
+git push copybara_remote master
+
+success "CppGC GitHub mirror was successfully updated"
diff --git a/src/third_party/v8/tools/cppgc/gen_cmake.py b/src/third_party/v8/tools/cppgc/gen_cmake.py
new file mode 100755
index 0000000..6fc1bc0
--- /dev/null
+++ b/src/third_party/v8/tools/cppgc/gen_cmake.py
@@ -0,0 +1,518 @@
+#!/usr/bin/env python3
+#
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+import lark
+import argparse
+from contextlib import suppress
+from collections import namedtuple
+from datetime import datetime
+
+# GN grammar from https://gn.googlesource.com/gn/+/master/src/gn/parser.cc.
+GN_GRAMMAR = """
+    ?file : statement_list
+
+    ?statement     : assignment | call | condition
+    ?lvalue        : IDENTIFIER | array_access | scope_access
+    assignment     : lvalue assign_op expr
+    call           : IDENTIFIER "(" [ expr_list ] ")" [ block ]
+    condition      : "if" "(" expr ")" block [ "else" ( condition | block ) ]
+    ?block         : "{" statement_list "}"
+    statement_list : statement*
+
+    array_access   : IDENTIFIER "[" expr "]"
+    scope_access   : IDENTIFIER "." IDENTIFIER
+    ?primary_expr  : IDENTIFIER | INTEGER | STRING | call
+                   | array_access | scope_access | block
+                   | "(" expr ")" -> par_expr
+                   | array
+    array          : "[" [ expr ( "," expr? )* ] "]"
+    expr_list      : expr ( "," expr )*
+
+    ?assign_op : "="  -> asgn_op
+               | "+=" -> asgn_add_op
+               | "-=" -> asgn_sub_op
+
+    ?expr      : expr1
+    ?expr1     : expr1 "||" expr2 -> or
+               | expr2
+    ?expr2     : expr2 "&&" expr3 -> and
+               | expr3
+    ?expr3     : expr3 "==" expr4 -> eq
+               | expr3 "!=" expr4 -> ne
+               | expr4
+    ?expr4     : expr4 "<=" expr5 -> le
+               | expr4 "<" expr5  -> lt
+               | expr4 ">=" expr5 -> ge
+               | expr4 ">" expr5  -> gt
+               | expr5
+    ?expr5     : expr5 "+" expr6  -> add
+               | expr5 "-" expr6  -> sub
+               | expr6
+    ?expr6     : "!" primary_expr -> neg
+               | primary_expr
+
+    COMMENT : /#.*/
+
+    %import common.ESCAPED_STRING -> STRING
+    %import common.SIGNED_INT -> INTEGER
+    %import common.CNAME -> IDENTIFIER
+    %import common.WS
+    %ignore WS
+    %ignore COMMENT
+"""
+
+V8_TARGET_TYPES = (
+    'v8_component',
+    'v8_source_set',
+    'v8_executable',
+)
+
+OPS = (
+    'neg',
+    'eq',
+    'ne',
+    'le',
+    'lt',
+    'ge',
+    'gt',
+    'and',
+    'or',
+)
+
+
+class UnsupportedOperation(Exception):
+    pass
+
+
+class V8GNTransformer(object):
+    """
+    Traverse GN parse-tree and build resulting object.
+    """
+    def __init__(self, builder, filtered_targets):
+        self.builder = builder
+        self.filtered_targets = filtered_targets
+        self.current_target = None
+
+    def Traverse(self, tree):
+        self.builder.BuildPrologue()
+        self.TraverseTargets(tree)
+        self.builder.BuildEpilogue()
+
+    def TraverseTargets(self, tree):
+        'Traverse top level GN targets and call the builder functions'
+        for stmt in tree.children:
+            if stmt.data != 'call':
+                continue
+            target_type = stmt.children[0]
+            if target_type not in V8_TARGET_TYPES:
+                continue
+            target = stmt.children[1].children[0].strip('\"')
+            if target not in self.filtered_targets:
+                continue
+            self.current_target = target
+            self._Target(target_type, target, stmt.children[2].children)
+
+    def _Target(self, target_type, target, stmts):
+        stmts = self._StatementList(stmts)
+        return self.builder.BuildTarget(target_type, target, stmts)
+
+    def _StatementList(self, stmts):
+        built_stmts = []
+        for stmt in stmts:
+            built_stmts.append(self._Statement(stmt))
+        return [stmt for stmt in built_stmts if stmt]
+
+    def _Statement(self, stmt):
+        # Handle only interesting gn statements.
+        with suppress(KeyError):
+            return self.STATEMENTS[stmt.data](self, *stmt.children)
+
+    def _Assignment(self, left, op, right):
+        return self.ASSIGN_TYPES[op.data](self, left, right)
+
+    def _AssignEq(self, left, right):
+        if left == 'sources':
+            return self.builder.BuildSourcesList(
+                self.current_target, [str(token) for token in right.children])
+
+    def _AssignAdd(self, left, right):
+        if left == 'sources':
+            return self.builder.BuildAppendSources(
+                self.current_target, [str(token) for token in right.children])
+
+    def _AssignSub(self, left, right):
+        if left == 'sources':
+            return self.builder.BuildRemoveSources(
+                self.current_target, [str(token) for token in right.children])
+
+    def _Condition(self, cond_expr, then_stmts, else_stmts=None):
+        'Visit GN condition: if (cond) {then_stmts} else {else_stmts}'
+        cond_expr = self._Expr(cond_expr)
+        then_stmts = self._StatementList(then_stmts.children)
+        if not then_stmts:
+            # Ignore conditions with empty then stmts.
+            return
+        if else_stmts is None:
+            return self.builder.BuildCondition(cond_expr, then_stmts)
+        elif else_stmts.data == 'condition':
+            else_cond = self._Condition(*else_stmts.children)
+            return self.builder.BuildConditionWithElseCond(
+                cond_expr, then_stmts, else_cond)
+        else:
+            assert 'statement_list' == else_stmts.data
+            else_stmts = self._StatementList(else_stmts.children)
+            return self.builder.BuildConditionWithElseStmts(
+                cond_expr, then_stmts, else_stmts)
+
+    def _Expr(self, expr):
+        'Post-order traverse expression trees'
+        if isinstance(expr, lark.Token):
+            if expr.type == 'IDENTIFIER':
+                return self.builder.BuildIdentifier(str(expr))
+            elif expr.type == 'INTEGER':
+                return self.builder.BuildInteger(str(expr))
+            else:
+                return self.builder.BuildString(str(expr))
+        if expr.data == 'par_expr':
+            return self.builder.BuildParenthesizedOperation(
+                self._Expr(*expr.children))
+        if expr.data not in OPS:
+            raise UnsupportedOperation(
+                f'The operator "{expr.data}" is not supported')
+        if len(expr.children) == 1:
+            return self._UnaryExpr(expr.data, *expr.children)
+        if len(expr.children) == 2:
+            return self._BinaryExpr(expr.data, *expr.children)
+        raise UnsupportedOperation(f'Unsupported arity {len(expr.children)}')
+
+    def _UnaryExpr(self, op, right):
+        right = self._Expr(right)
+        return self.builder.BuildUnaryOperation(op, right)
+
+    def _BinaryExpr(self, op, left, right):
+        left = self._Expr(left)
+        right = self._Expr(right)
+        return self.builder.BuildBinaryOperation(left, op, right)
+
+    STATEMENTS = {
+        'assignment': _Assignment,
+        'condition': _Condition,
+    }
+
+    ASSIGN_TYPES = {
+        'asgn_op': _AssignEq,
+        'asgn_add_op': _AssignAdd,
+        'asgn_sub_op': _AssignSub,
+    }
+
+
+TARGETS = {
+    'v8_libbase': 'lib',
+    'v8_cppgc_shared': 'lib',
+    'cppgc_base': 'lib',
+    'cppgc_standalone': 'sample',
+    'cppgc_unittests_sources': 'tests',
+    'cppgc_unittests': 'tests',
+}
+
+
+class CMakeBuilder(object):
+    """
+    Builder that produces the main CMakeLists.txt.
+    """
+    def __init__(self):
+        self.result = []
+        self.source_sets = {}
+
+    def BuildPrologue(self):
+        self.result.append(f"""
+# Copyright {datetime.now().year} the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# This file is automatically generated by {__file__}. Do NOT edit it.
+
+cmake_minimum_required(VERSION 3.11)
+project(cppgc CXX)
+
+set(CMAKE_CXX_STANDARD 14)
+set(CMAKE_CXX_STANDARD_REQUIRED ON)
+
+option(CPPGC_ENABLE_OBJECT_NAMES "Enable object names in cppgc for debug purposes" OFF)
+option(CPPGC_ENABLE_CAGED_HEAP "Enable heap reservation of size 4GB, only possible for 64bit archs" OFF)
+option(CPPGC_ENABLE_YOUNG_GENERATION "Enable young generation in cppgc" OFF)
+set(CPPGC_TARGET_ARCH "x64" CACHE STRING "Target architecture, possible options: x64, x86, arm, arm64, ppc64, s390x, mipsel, mips64el")
+
+set(IS_POSIX ${{UNIX}})
+set(IS_MAC ${{APPLE}})
+set(IS_WIN ${{WIN32}})
+if("${{CMAKE_SYSTEM_NAME}}" STREQUAL "Linux")
+  set(IS_LINUX 1)
+elseif("${{CMAKE_SYSTEM_NAME}}" STREQUAL "Fuchsia")
+  set(IS_FUCHSIA 1)
+endif()
+
+set(CURRENT_CPU ${{CPPGC_TARGET_ARCH}})
+
+if("${{CPPGC_TARGET_ARCH}}" STREQUAL "x64" OR
+   "${{CPPGC_TARGET_ARCH}}" STREQUAL "arm64" OR
+   "${{CPPGC_TARGET_ARCH}}" STREQUAL "ppc64" OR
+   "${{CPPGC_TARGET_ARCH}}" STREQUAL "mips64el")
+  if(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
+    message(FATAL_ERROR "64-bit arch specified for 32-bit compiler")
+  endif()
+  set(CPPGC_64_BITS ON)
+endif()
+
+if(CPPGC_ENABLE_CAGED_HEAP AND NOT CPPGC_64_BITS)
+  message(FATAL_ERROR "Caged heap is only supported for 64bit archs")
+endif()
+
+if(CPPGC_64_BITS)
+  # Always enable caged heap for 64bits archs.
+  set(CPPGC_ENABLE_CAGED_HEAP ON CACHE BOOL "Enable caged heap for 64bit" FORCE)
+endif()
+
+if(CPPGC_ENABLE_YOUNG_GENERATION AND NOT CPPGC_ENABLE_CAGED_HEAP)
+  message(FATAL_ERROR "Young generation is only supported for caged heap configuration")
+endif()
+
+if(NOT CPPGC_64_BITS)
+  if(NOT MSVC)
+    set(CMAKE_CXX_FLAGS "${{CMAKE_CXX_FLAGS}} -m32")
+    set(CMAKE_C_FLAGS "${{CMAKE_C_FLAGS}} -m32")
+    set(CMAKE_EXE_LINKER_FLAGS "${{CMAKE_EXE_LINKER_FLAGS}} -m32")
+    set(CMAKE_SHARED_LINKER_FLAGS "${{CMAKE_SHARED_LINKER_FLAGS}} -m32")
+    set(CMAKE_MODULE_LINKER_FLAGS "${{CMAKE_MODULE_LINKER_FLAGS}} -m32")
+  endif()
+endif()
+
+find_package(Threads REQUIRED)
+
+include(FetchContent)
+FetchContent_Declare(
+  googletest
+  GIT_REPOSITORY "https://chromium.googlesource.com/external/github.com/google/googletest.git"
+  GIT_TAG        "4fe018038f87675c083d0cfb6a6b57c274fb1753"
+  SOURCE_DIR     "${{CMAKE_BINARY_DIR}}/third_party/googletest/src"
+)
+
+FetchContent_GetProperties(googletest)
+if(NOT googletest_POPULATED)
+  FetchContent_Populate(googletest)
+  message("Fetched googletest into ${{googletest_SOURCE_DIR}}")
+  add_subdirectory(${{googletest_SOURCE_DIR}} ${{googletest_BINARY_DIR}} EXCLUDE_FROM_ALL)
+  include_directories("${{CMAKE_BINARY_DIR}}")
+endif()
+""")
+
+    def BuildEpilogue(self):
+        self.result.extend(
+            self._GenTargetString(target, sets)
+            for target, sets in self.source_sets.items())
+        self.result.append("\ninstall(TARGETS cppgc)")
+
+    def BuildTarget(self, target_type, target, rules):
+        # Don't generate CMake targets yet, defer it to build_epilogue.
+        comment = f"""
+#===============================================================================
+# {self._CMakeTarget(target)} sources.
+#==============================================================================="""
+        self.result.append(comment)
+        self.result.extend(rules)
+        self.source_sets.setdefault(
+            TARGETS[target], []).append('${' + self._SourceVar(target) + '}')
+
+    def BuildSourcesList(self, target, sources):
+        sources = self._ExpandSources(target, sources)
+        return f'set({self._SourceVar(target)} {sources})'
+
+    def BuildAppendSources(self, target, sources):
+        sources = self._ExpandSources(target, sources)
+        return f'list(APPEND {self._SourceVar(target)} {sources})'
+
+    def BuildRemoveSources(self, target, sources):
+        sources = self._ExpandSources(target, sources)
+        return f'list(REMOVE_ITEM {self._SourceVar(target)} {sources})'
+
+    def BuildCondition(self, cond, then_stmts):
+        return f"""
+if({cond})
+  {' '.join(then_stmts)}
+endif()
+        """.strip()
+
+    def BuildConditionWithElseStmts(self, cond, then_stmts, else_stmts):
+        return f"""
+if({cond})
+  {' '.join(then_stmts)}
+{'else() ' + ' '.join(else_stmts)}
+endif()
+        """.strip()
+
+    def BuildConditionWithElseCond(self, cond, then_stmts, else_cond):
+        return f"""
+if({cond})
+  {' '.join(then_stmts)}
+else{else_cond}
+        """.strip()
+
+    def BuildParenthesizedOperation(self, operation):
+        return ''.join(['(', operation, ')'])
+
+    def BuildUnaryOperation(self, op, right):
+        OPS = {
+            'neg': 'NOT',
+        }
+        return ' '.join([OPS[op], right])
+
+    def BuildBinaryOperation(self, left, op, right):
+        if op == 'ne':
+            neg_result = self.BuildBinaryOperation(left, 'eq', right)
+            return self.BuildUnaryOperation('neg', neg_result)
+        OPS = {
+            'eq': 'STREQUAL',
+            'le': 'LESS_EQUAL',
+            'lt': 'LESS',
+            'ge': 'GREATER_EQUAL',
+            'gt': 'GREATER',
+            'and': 'AND',
+            'or': 'OR',
+        }
+        return ' '.join([left, OPS[op], right])
+
+    def BuildIdentifier(self, token):
+        return self._CMakeVarRef(token)
+
+    def BuildInteger(self, integer):
+        return integer
+
+    def BuildString(self, string):
+        return string
+
+    def GetResult(self):
+        return '\n'.join(self.result)
+
+    @staticmethod
+    def _GenTargetString(target_type, source_sets):
+        Target = namedtuple('Target', 'name cmake deps desc')
+        CMAKE_TARGETS = {
+            'lib':
+            Target(name='cppgc',
+                   cmake='add_library',
+                   deps=['Threads::Threads'],
+                   desc='Main library'),
+            'sample':
+            Target(name='cppgc_sample',
+                   cmake='add_executable',
+                   deps=['cppgc'],
+                   desc='Example'),
+            'tests':
+            Target(name='cppgc_unittests',
+                   cmake='add_executable',
+                   deps=['cppgc', 'gtest', 'gmock'],
+                   desc='Unittests')
+        }
+        target = CMAKE_TARGETS[target_type]
+        return f"""
+# {target.desc} target.
+{target.cmake}({target.name} {' '.join(source_sets)})
+
+{'target_link_libraries(' + target.name + ' ' + ' '.join(target.deps) + ')' if target.deps else ''}
+
+target_include_directories({target.name} PRIVATE "${{CMAKE_SOURCE_DIR}}"
+                                         PRIVATE "${{CMAKE_SOURCE_DIR}}/include")
+
+if(CPPGC_ENABLE_OBJECT_NAMES)
+  target_compile_definitions({target.name} PRIVATE "-DCPPGC_SUPPORTS_OBJECT_NAMES")
+endif()
+if(CPPGC_ENABLE_CAGED_HEAP)
+  target_compile_definitions({target.name} PRIVATE "-DCPPGC_CAGED_HEAP")
+endif()
+if(CPPGC_ENABLE_YOUNG_GENERATION)
+  target_compile_definitions({target.name} PRIVATE "-DCPPGC_YOUNG_GENERATION")
+endif()"""
+
+    @staticmethod
+    def _ExpandSources(target, sources):
+        if TARGETS[target] == 'tests':
+            sources = ['\"test/unittests/' + s[1:] for s in sources]
+        return ' '.join(sources)
+
+    @staticmethod
+    def _SourceVar(target):
+        return CMakeBuilder._CMakeVar(target) + '_SOURCES'
+
+    @staticmethod
+    def _CMakeVar(var):
+        return var.replace('v8_', '').upper()
+
+    @staticmethod
+    def _CMakeTarget(var):
+        return var.replace('v8_', '')
+
+    @staticmethod
+    def _CMakeVarRef(var):
+        return '\"${' + CMakeBuilder._CMakeVar(var) + '}"'
+
+
+def FormatCMake(contents):
+    from cmake_format import configuration, lexer, parse, formatter
+    cfg = configuration.Configuration()
+    tokens = lexer.tokenize(contents)
+    parse_tree = parse.parse(tokens)
+    box_tree = formatter.layout_tree(parse_tree, cfg)
+    return formatter.write_tree(box_tree, cfg, contents)
+
+
+def SaveContents(contents, outfile):
+    if outfile == '-':
+        return print(contents)
+    with open(outfile, 'w+') as ofile:
+        ofile.write(contents)
+
+
+def ParseGN(contents):
+    parser = lark.Lark(GN_GRAMMAR, parser='lalr', start='file')
+    return parser.parse(contents)
+
+
+def ParseGNFile(filename):
+    with open(filename, 'r') as file:
+        contents = file.read()
+        return ParseGN(contents)
+
+
+def GenCMake(main_gn, test_gn, outfile):
+    tree = ParseGNFile(main_gn)
+    tree.children.extend(ParseGNFile(test_gn).children)
+    builder = CMakeBuilder()
+    V8GNTransformer(builder, TARGETS.keys()).Traverse(tree)
+    result = FormatCMake(builder.GetResult())
+    SaveContents(result, outfile)
+
+
+def Main():
+    arg_parser = argparse.ArgumentParser(
+        description=
+        'Generate CMake from the main GN file for targets needed to build CppGC.'
+    )
+    arg_parser.add_argument('--out', help='output CMake filename', default='-')
+    arg_parser.add_argument('--main-gn',
+                            help='main BUILD.gn input file',
+                            default='BUILD.gn')
+    arg_parser.add_argument('--test-gn',
+                            help='unittest BUILD.gn input file',
+                            default='test/unittests/BUILD.gn')
+    args = arg_parser.parse_args()
+
+    GenCMake(args.main_gn, args.test_gn, args.out)
+    return 0
+
+
+if __name__ == '__main__':
+    sys.exit(Main())
diff --git a/src/third_party/v8/tools/cppgc/gen_cmake_test.py b/src/third_party/v8/tools/cppgc/gen_cmake_test.py
new file mode 100755
index 0000000..907872c
--- /dev/null
+++ b/src/third_party/v8/tools/cppgc/gen_cmake_test.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python3
+#
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from gen_cmake import CMakeBuilder, V8GNTransformer, ParseGN, V8_TARGET_TYPES
+import unittest
+
+
+class CMakeMockBuilder(CMakeBuilder):
+    """
+    Similar to CMakeBuilder but doesn't produce prologues/epilogues.
+    """
+    def BuildPrologue(self):
+        pass
+
+    def BuildEpilogue(self):
+        pass
+
+
+class CMakeGenerationTest(unittest.TestCase):
+    TARGET = 'cppgc_base'
+    CMAKE_TARGET_SOURCES = TARGET.upper() + '_SOURCES'
+
+    def test_source_assignment(self):
+        self._CompileAndCheck(
+            f'set({self.CMAKE_TARGET_SOURCES} "source1.h" "source1.cc")',
+            'sources = [ "source1.h", "source1.cc", ]')
+
+    def test_source_append(self):
+        self._CompileAndCheck(
+            f'list(APPEND {self.CMAKE_TARGET_SOURCES} "source1.h" "source1.cc")',
+            'sources += [ "source1.h", "source1.cc", ]')
+
+    def test_source_remove(self):
+        self._CompileAndCheck(
+            f'list(REMOVE_ITEM {self.CMAKE_TARGET_SOURCES} "source1.h" "source1.cc")',
+            'sources -= [ "source1.h", "source1.cc", ]')
+
+    def test_equal(self):
+        self._CompileExpressionAndCheck('"${CURRENT_CPU}" STREQUAL "x64"',
+                                        'current_cpu == "x64"')
+
+    def test_not_equal(self):
+        self._CompileExpressionAndCheck('NOT "${CURRENT_CPU}" STREQUAL "x86"',
+                                        'current_cpu != "x86"')
+
+    def test_comparison_ops(self):
+        OPS = {
+            '<': 'LESS',
+            '<=': 'LESS_EQUAL',
+            '>': 'GREATER',
+            '>=': 'GREATER_EQUAL',
+        }
+        for gn_op, cmake_op in OPS.items():
+            self._CompileExpressionAndCheck(
+                f'"${{GCC_VERSION}}" {cmake_op} 40802',
+                f'gcc_version {gn_op} 40802')
+
+    def test_parenthesized_expressions(self):
+        self._CompileExpressionAndCheck(
+            '(("${IS_POSIX}" AND NOT "${IS_ANDROID}") OR "${IS_FUCHSIA}") AND NOT "${USING_SANITIZER}"',
+            '((is_posix && !is_android) || is_fuchsia) && !using_sanitizer')
+
+    def test_conditional_statements(self):
+        self._CompileAndCheck(
+            f"""
+if("${{IS_POSIX}}")
+  list(APPEND {self.CMAKE_TARGET_SOURCES} "unistd.h")
+else()
+  list(REMOVE_ITEM {self.CMAKE_TARGET_SOURCES} "unistd.h")
+endif()
+            """, """
+if (is_posix) {
+  sources += ["unistd.h"]
+} else {
+  sources -= ["unistd.h"]
+}
+            """)
+
+    def test_conditional_statements_elseif(self):
+        self._CompileAndCheck(
+            f"""
+if("${{IS_POSIX}}")
+  list(APPEND {self.CMAKE_TARGET_SOURCES} "unistd.h")
+elseif("${{IS_WIN}}")
+  list(REMOVE_ITEM {self.CMAKE_TARGET_SOURCES} "unistd.h")
+endif()
+            """, """
+if (is_posix) {
+  sources += ["unistd.h"]
+} else if (is_win) {
+  sources -= ["unistd.h"]
+}
+            """)
+
+    def _Compile(self, gn_string):
+        gn_code = f'v8_component({self.TARGET}) {{ {gn_string} }}'
+        tree = ParseGN(gn_code)
+        builder = CMakeMockBuilder()
+        V8GNTransformer(builder, [self.TARGET]).Traverse(tree)
+        return builder.GetResult()
+
+    def _CompileAndCheck(self, expected_cmake, gn_string):
+        actual_cmake = self._Compile(gn_string)
+        self.assertIn(self._Canonicalize(expected_cmake),
+                      self._Canonicalize(actual_cmake))
+        pass
+
+    def _CompileExpressionAndCheck(self, expected_cmake, gn_string):
+        gn_string = f'if ({gn_string}) {{ sources = [ "source.cc" ] }}'
+        expected_cmake = f'if({expected_cmake})'
+        actual_cmake = self._Compile(gn_string)
+        self.assertIn(self._Canonicalize(expected_cmake),
+                      self._Canonicalize(actual_cmake))
+        pass
+
+    @staticmethod
+    def _Canonicalize(str):
+        return ' '.join(str.split()).strip()
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/third_party/v8/tools/cppgc/test_cmake.sh b/src/third_party/v8/tools/cppgc/test_cmake.sh
new file mode 100755
index 0000000..77f551c
--- /dev/null
+++ b/src/third_party/v8/tools/cppgc/test_cmake.sh
@@ -0,0 +1,59 @@
+#!/bin/sh
+#
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+sourcedir=$(cd "$(dirname "$0")"; pwd -P)
+rootdir=$sourcedir/../../
+testdir=$rootdir/test/unittests/
+
+maingn=$rootdir/BUILD.gn
+testgn=$testdir/BUILD.gn
+
+function fail {
+  echo -e "\033[0;31m$1\033[0m" > /dev/stderr
+  exit 1
+}
+
+function cleanup {
+  rm $cmakelists
+  if [[ -d "$tempdir" ]]; then
+    rm -rf $tempdir
+  fi
+}
+
+trap "exit 1" HUP INT PIPE QUIT TERM
+trap cleanup EXIT
+
+if [[ ! -f "$maingn" || ! -f "$testgn" ]]; then
+  fail "Expected GN files are not present"
+fi
+
+cmakelists=$rootdir/CMakeLists.txt
+
+# Generate CMakeLists.txt in the root project directory.
+$sourcedir/gen_cmake.py --out=$cmakelists --main-gn=$maingn --test-gn=$testgn
+if [ $? -ne 0 ]; then
+  fail "CMakeLists.txt generation has failed"
+fi
+
+# Create a temporary build directory.
+tempdir=$(mktemp -d)
+if [[ ! "$tempdir" || ! -d "$tempdir" ]]; then
+  fail "Failed to create temporary dir"
+fi
+
+# Configure project with cmake.
+cd $tempdir
+cmake -GNinja $rootdir || fail "Failed to execute cmake"
+
+# Build all targets.
+ninja cppgc || fail "Failed to build cppgc"
+ninja cppgc_sample || fail "Failed to build sample"
+ninja cppgc_unittests || fail "Failed to build unittests"
+
+# Run unittests.
+./cppgc_unittests || fail "Failed to run unittests"
+
+echo -e "\033[0;32mThe test has succesfully passed\033[0m"
diff --git a/src/third_party/v8/tools/cpu.sh b/src/third_party/v8/tools/cpu.sh
new file mode 100755
index 0000000..5634cac
--- /dev/null
+++ b/src/third_party/v8/tools/cpu.sh
@@ -0,0 +1,81 @@
+#!/bin/bash
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+CPUPATH=/sys/devices/system/cpu
+
+MAXID=$(cat $CPUPATH/present | awk -F- '{print $NF}')
+
+set_governor() {
+  echo "Setting CPU frequency governor to \"$1\""
+  for (( i=0; i<=$MAXID; i++ )); do
+    echo "$1" > $CPUPATH/cpu$i/cpufreq/scaling_governor
+  done
+}
+
+enable_cores() {
+  # $1: How many cores to enable.
+  for (( i=1; i<=$MAXID; i++ )); do
+    if [ "$i" -lt "$1" ]; then
+      echo 1 > $CPUPATH/cpu$i/online
+    else
+      echo 0 > $CPUPATH/cpu$i/online
+    fi
+  done
+}
+
+dual_core() {
+  echo "Switching to dual-core mode"
+  enable_cores 2
+}
+
+single_core() {
+  echo "Switching to single-core mode"
+  enable_cores 1
+}
+
+
+all_cores() {
+  echo "Reactivating all CPU cores"
+  enable_cores $((MAXID+1))
+}
+
+
+limit_cores() {
+  # $1: How many cores to enable.
+  echo "Limiting to $1 cores"
+  enable_cores $1
+}
+
+case "$1" in
+  fast | performance)
+    set_governor "performance"
+    ;;
+  slow | powersave)
+    set_governor "powersave"
+    ;;
+  default | ondemand)
+    set_governor "ondemand"
+    ;;
+  dualcore | dual)
+    dual_core
+    ;;
+  singlecore | single)
+    single_core
+    ;;
+  allcores | all)
+    all_cores
+    ;;
+  limit_cores)
+    if [ $# -ne 2 ]; then
+      echo "Usage $0 limit_cores <num>"
+      exit 1
+    fi
+    limit_cores $2
+    ;;
+  *)
+    echo "Usage: $0 fast|slow|default|singlecore|dualcore|all|limit_cores"
+    exit 1
+    ;;
+esac 
diff --git a/src/third_party/v8/tools/cross_build_gcc.sh b/src/third_party/v8/tools/cross_build_gcc.sh
new file mode 100755
index 0000000..e3603cc
--- /dev/null
+++ b/src/third_party/v8/tools/cross_build_gcc.sh
@@ -0,0 +1,72 @@
+#!/bin/sh
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+if [ "$#" -lt 1 ]; then
+  echo "Usage: tools/cross_build_gcc.sh <GCC prefix> [make arguments ...]"
+  exit 1
+fi
+
+export CXX=$1g++
+export AR=$1ar
+export RANLIB=$1ranlib
+export CC=$1gcc
+export LD=$1g++
+export LINK=$1g++
+
+OK=1
+if [ ! -x "$CXX" ]; then
+  echo "Error: $CXX does not exist or is not executable."
+  OK=0
+fi
+if [ ! -x "$AR" ]; then
+  echo "Error: $AR does not exist or is not executable."
+  OK=0
+fi
+if [ ! -x "$RANLIB" ]; then
+  echo "Error: $RANLIB does not exist or is not executable."
+  OK=0
+fi
+if [ ! -x "$CC" ]; then
+  echo "Error: $CC does not exist or is not executable."
+  OK=0
+fi
+if [ ! -x "$LD" ]; then
+  echo "Error: $LD does not exist or is not executable."
+  OK=0
+fi
+if [ ! -x "$LINK" ]; then
+  echo "Error: $LINK does not exist or is not executable."
+  OK=0
+fi
+if [ $OK -ne 1 ]; then
+  exit 1
+fi
+
+shift
+make snapshot=off $@
diff --git a/src/third_party/v8/tools/csvparser.js b/src/third_party/v8/tools/csvparser.js
new file mode 100644
index 0000000..a4d0304
--- /dev/null
+++ b/src/third_party/v8/tools/csvparser.js
@@ -0,0 +1,105 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+/**
+ * Creates a CSV lines parser.
+ */
+class CsvParser {
+  /**
+   * Converts \x00 and \u0000 escape sequences in the given string.
+   *
+   * @param {string} input field.
+   **/
+  escapeField(string) {
+    let nextPos = string.indexOf("\\");
+    if (nextPos === -1) return string;
+
+    let result = string.substring(0, nextPos);
+    // Escape sequences of the form \x00 and \u0000;
+    let endPos = string.length;
+    let pos = 0;
+    while (nextPos !== -1) {
+      let escapeIdentifier = string.charAt(nextPos + 1);
+      pos = nextPos + 2;
+      if (escapeIdentifier === 'n') {
+        result += '\n';
+        nextPos = pos;
+      } else if (escapeIdentifier === '\\') {
+        result += '\\';
+        nextPos = pos;
+      } else {
+        if (escapeIdentifier === 'x') {
+          // \x00 ascii range escapes consume 2 chars.
+          nextPos = pos + 2;
+        } else {
+          // \u0000 unicode range escapes consume 4 chars.
+          nextPos = pos + 4;
+        }
+        // Convert the selected escape sequence to a single character.
+        let escapeChars = string.substring(pos, nextPos);
+        result += String.fromCharCode(parseInt(escapeChars, 16));
+      }
+
+      // Continue looking for the next escape sequence.
+      pos = nextPos;
+      nextPos = string.indexOf("\\", pos);
+      // If there are no more escape sequences consume the rest of the string.
+      if (nextPos === -1) {
+        result += string.substr(pos);
+      } else if (pos !== nextPos) {
+        result += string.substring(pos, nextPos);
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Parses a line of CSV-encoded values. Returns an array of fields.
+   *
+   * @param {string} line Input line.
+   */
+  parseLine(line) {
+    var pos = 0;
+    var endPos = line.length;
+    var fields = [];
+    if (endPos == 0) return fields;
+    let nextPos = 0;
+    while(nextPos !== -1) {
+      nextPos = line.indexOf(',', pos);
+      let field;
+      if (nextPos === -1) {
+        field = line.substr(pos);
+      } else {
+        field = line.substring(pos, nextPos);
+      }
+      fields.push(this.escapeField(field));
+      pos = nextPos + 1;
+    };
+    return fields
+  }
+}
diff --git a/src/third_party/v8/tools/csvparser.mjs b/src/third_party/v8/tools/csvparser.mjs
new file mode 100644
index 0000000..e027d47
--- /dev/null
+++ b/src/third_party/v8/tools/csvparser.mjs
@@ -0,0 +1,105 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+/**
+ * Creates a CSV lines parser.
+ */
+export class CsvParser {
+  /**
+   * Converts \x00 and \u0000 escape sequences in the given string.
+   *
+   * @param {string} input field.
+   **/
+  escapeField(string) {
+    let nextPos = string.indexOf("\\");
+    if (nextPos === -1) return string;
+
+    let result = string.substring(0, nextPos);
+    // Escape sequences of the form \x00 and \u0000;
+    let endPos = string.length;
+    let pos = 0;
+    while (nextPos !== -1) {
+      let escapeIdentifier = string.charAt(nextPos + 1);
+      pos = nextPos + 2;
+      if (escapeIdentifier === 'n') {
+        result += '\n';
+        nextPos = pos;
+      } else if (escapeIdentifier === '\\') {
+        result += '\\';
+        nextPos = pos;
+      } else {
+        if (escapeIdentifier === 'x') {
+          // \x00 ascii range escapes consume 2 chars.
+          nextPos = pos + 2;
+        } else {
+          // \u0000 unicode range escapes consume 4 chars.
+          nextPos = pos + 4;
+        }
+        // Convert the selected escape sequence to a single character.
+        let escapeChars = string.substring(pos, nextPos);
+        result += String.fromCharCode(parseInt(escapeChars, 16));
+      }
+
+      // Continue looking for the next escape sequence.
+      pos = nextPos;
+      nextPos = string.indexOf("\\", pos);
+      // If there are no more escape sequences consume the rest of the string.
+      if (nextPos === -1) {
+        result += string.substr(pos);
+      } else if (pos !== nextPos) {
+        result += string.substring(pos, nextPos);
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Parses a line of CSV-encoded values. Returns an array of fields.
+   *
+   * @param {string} line Input line.
+   */
+  parseLine(line) {
+    let pos = 0;
+    const endPos = line.length;
+    const fields = [];
+    if (endPos == 0) return fields;
+    let nextPos = 0;
+    while(nextPos !== -1) {
+      nextPos = line.indexOf(',', pos);
+      let field;
+      if (nextPos === -1) {
+        field = line.substr(pos);
+      } else {
+        field = line.substring(pos, nextPos);
+      }
+      fields.push(this.escapeField(field));
+      pos = nextPos + 1;
+    };
+    return fields
+  }
+}
diff --git a/src/third_party/v8/tools/debug_helper/BUILD.gn b/src/third_party/v8/tools/debug_helper/BUILD.gn
new file mode 100644
index 0000000..064bc32
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/BUILD.gn
@@ -0,0 +1,109 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/snapshot_toolchain.gni")
+import("../../gni/v8.gni")
+
+config("internal_config") {
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
+
+  if (is_component_build) {
+    defines = [ "BUILDING_V8_DEBUG_HELPER" ]
+  }
+
+  configs = [ "../..:v8_tracing_config" ]
+
+  include_dirs = [
+    ".",
+    "../..",
+    "$target_gen_dir",
+    "$target_gen_dir/../..",
+  ]
+}
+
+# This config should be applied to code using v8_debug_helper.
+config("external_config") {
+  if (is_component_build) {
+    defines = [ "USING_V8_DEBUG_HELPER" ]
+  }
+
+  configs = [ "../..:external_config" ]
+
+  include_dirs = [ "." ]
+}
+
+action("run_mkgrokdump") {
+  testonly = true
+  visibility = [ ":*" ]
+
+  deps = [
+    "../..:run_mksnapshot_default",
+    "../../test/mkgrokdump:mkgrokdump($v8_generator_toolchain)",
+  ]
+
+  script = "../run.py"
+
+  outputs = [ "$target_gen_dir/v8heapconst.py" ]
+
+  args = [
+    "./" + rebase_path(
+            get_label_info(
+                    "../../test/mkgrokdump:mkgrokdump($v8_generator_toolchain)",
+                    "root_out_dir") + "/mkgrokdump",
+            root_build_dir),
+    "--outfile",
+    rebase_path("$target_gen_dir/v8heapconst.py", root_build_dir),
+  ]
+}
+
+action("gen_heap_constants") {
+  testonly = true
+  visibility = [ ":*" ]
+  deps = [ ":run_mkgrokdump" ]
+  script = "gen-heap-constants.py"
+  outputs = [ "$target_gen_dir/heap-constants-gen.cc" ]
+  args = [
+    rebase_path(target_gen_dir, root_build_dir),
+    rebase_path("$target_gen_dir/heap-constants-gen.cc", root_build_dir),
+  ]
+}
+
+v8_component("v8_debug_helper") {
+  testonly = true
+
+  public = [ "debug-helper.h" ]
+
+  sources = [
+    "$target_gen_dir/../../torque-generated/class-debug-readers.cc",
+    "$target_gen_dir/../../torque-generated/class-debug-readers.h",
+    "$target_gen_dir/../../torque-generated/instance-types.h",
+    "$target_gen_dir/heap-constants-gen.cc",
+    "compiler-types.cc",
+    "debug-helper-internal.cc",
+    "debug-helper-internal.h",
+    "debug-helper.h",
+    "get-object-properties.cc",
+    "heap-constants.cc",
+    "heap-constants.h",
+    "list-object-classes.cc",
+  ]
+
+  deps = [
+    ":gen_heap_constants",
+    "../..:generate_bytecode_builtins_list",
+    "../..:run_torque",
+    "../..:v8_headers",
+    "../..:v8_libbase",
+  ]
+
+  configs = [ ":internal_config" ]
+  if (v8_enable_i18n_support) {
+    configs += [ "//third_party/icu:icu_config" ]
+  }
+
+  remove_configs = [ "//build/config/compiler:no_rtti" ]
+  configs += [ "//build/config/compiler:rtti" ]
+
+  public_configs = [ ":external_config" ]
+}
diff --git a/src/third_party/v8/tools/debug_helper/README.md b/src/third_party/v8/tools/debug_helper/README.md
new file mode 100644
index 0000000..bc99569
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/README.md
@@ -0,0 +1,6 @@
+# V8 debug helper
+
+This library is for debugging V8 itself, not debugging JavaScript running within
+V8. It is designed to be called from a debugger extension running within a
+native debugger such as WinDbg or LLDB. It can be used on live processes or
+crash dumps, and cannot assume that all memory is available in a dump.
diff --git a/src/third_party/v8/tools/debug_helper/compiler-types.cc b/src/third_party/v8/tools/debug_helper/compiler-types.cc
new file mode 100644
index 0000000..1d9f6eb
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/compiler-types.cc
@@ -0,0 +1,31 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "debug-helper-internal.h"
+#include "src/compiler/types.h"
+
+namespace ic = v8::internal::compiler;
+
+extern "C" {
+V8_DEBUG_HELPER_EXPORT const char* _v8_debug_helper_BitsetName(
+    uint64_t payload) {
+  // Check if payload is a bitset and return the bitset type.
+  // This line is duplicating the logic from Type::IsBitset.
+  bool is_bit_set = payload & 1;
+  if (!is_bit_set) return nullptr;
+  ic::BitsetType::bitset bits =
+      static_cast<ic::BitsetType::bitset>(payload ^ 1u);
+  switch (bits) {
+#define RETURN_NAMED_TYPE(type, value) \
+  case ic::BitsetType::k##type:        \
+    return #type;
+    PROPER_BITSET_TYPE_LIST(RETURN_NAMED_TYPE)
+    INTERNAL_BITSET_TYPE_LIST(RETURN_NAMED_TYPE)
+#undef RETURN_NAMED_TYPE
+
+    default:
+      return nullptr;
+  }
+}
+}
diff --git a/src/third_party/v8/tools/debug_helper/debug-helper-internal.cc b/src/third_party/v8/tools/debug_helper/debug-helper-internal.cc
new file mode 100644
index 0000000..29af7eb
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/debug-helper-internal.cc
@@ -0,0 +1,65 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "debug-helper-internal.h"
+#include "src/common/ptr-compr-inl.h"
+#include "torque-generated/class-debug-readers.h"
+
+namespace i = v8::internal;
+
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
+
+bool IsPointerCompressed(uintptr_t address) {
+#if COMPRESS_POINTERS_BOOL
+  return address < i::kPtrComprHeapReservationSize;
+#else
+  return false;
+#endif
+}
+
+uintptr_t EnsureDecompressed(uintptr_t address,
+                             uintptr_t any_uncompressed_ptr) {
+  if (!COMPRESS_POINTERS_BOOL || !IsPointerCompressed(address)) return address;
+  return i::DecompressTaggedAny(any_uncompressed_ptr,
+                                static_cast<i::Tagged_t>(address));
+}
+
+d::PropertyKind GetArrayKind(d::MemoryAccessResult mem_result) {
+  d::PropertyKind indexed_field_kind{};
+  switch (mem_result) {
+    case d::MemoryAccessResult::kOk:
+      indexed_field_kind = d::PropertyKind::kArrayOfKnownSize;
+      break;
+    case d::MemoryAccessResult::kAddressNotValid:
+      indexed_field_kind =
+          d::PropertyKind::kArrayOfUnknownSizeDueToInvalidMemory;
+      break;
+    default:
+      indexed_field_kind =
+          d::PropertyKind::kArrayOfUnknownSizeDueToValidButInaccessibleMemory;
+      break;
+  }
+  return indexed_field_kind;
+}
+
+std::vector<std::unique_ptr<ObjectProperty>> TqObject::GetProperties(
+    d::MemoryAccessor accessor) const {
+  return std::vector<std::unique_ptr<ObjectProperty>>();
+}
+
+const char* TqObject::GetName() const { return "v8::internal::Object"; }
+
+void TqObject::Visit(TqObjectVisitor* visitor) const {
+  visitor->VisitObject(this);
+}
+
+bool TqObject::IsSuperclassOf(const TqObject* other) const {
+  return GetName() != other->GetName();
+}
+
+}  // namespace debug_helper_internal
+}  // namespace internal
+}  // namespace v8
diff --git a/src/third_party/v8/tools/debug_helper/debug-helper-internal.h b/src/third_party/v8/tools/debug_helper/debug-helper-internal.h
new file mode 100644
index 0000000..129d0d3
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/debug-helper-internal.h
@@ -0,0 +1,255 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines internal versions of the public API structs. These should
+// all be tidy and simple classes which maintain proper ownership (unique_ptr)
+// of each other. Each contains an instance of its corresponding public type,
+// which can be filled out with GetPublicView.
+
+#ifndef V8_TOOLS_DEBUG_HELPER_DEBUG_HELPER_INTERNAL_H_
+#define V8_TOOLS_DEBUG_HELPER_DEBUG_HELPER_INTERNAL_H_
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "debug-helper.h"
+#include "src/objects/instance-type.h"
+
+namespace d = v8::debug_helper;
+
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
+
+// A value that was read from the debuggee's memory.
+template <typename TValue>
+struct Value {
+  d::MemoryAccessResult validity;
+  TValue value;
+};
+
+// Internal version of API class v8::debug_helper::PropertyBase.
+class PropertyBase {
+ public:
+  PropertyBase(std::string name, std::string type,
+               std::string decompressed_type)
+      : name_(name), type_(type), decompressed_type_(decompressed_type) {}
+  void SetFieldsOnPublicView(d::PropertyBase* public_view) {
+    public_view->name = name_.c_str();
+    public_view->type = type_.c_str();
+    public_view->decompressed_type = decompressed_type_.c_str();
+  }
+
+ private:
+  std::string name_;
+  std::string type_;
+  std::string decompressed_type_;
+};
+
+// Internal version of API class v8::debug_helper::StructProperty.
+class StructProperty : public PropertyBase {
+ public:
+  StructProperty(std::string name, std::string type,
+                 std::string decompressed_type, size_t offset, uint8_t num_bits,
+                 uint8_t shift_bits)
+      : PropertyBase(std::move(name), std::move(type),
+                     std::move(decompressed_type)),
+        offset_(offset),
+        num_bits_(num_bits),
+        shift_bits_(shift_bits) {}
+
+  d::StructProperty* GetPublicView() {
+    PropertyBase::SetFieldsOnPublicView(&public_view_);
+    public_view_.offset = offset_;
+    public_view_.num_bits = num_bits_;
+    public_view_.shift_bits = shift_bits_;
+    return &public_view_;
+  }
+
+ private:
+  size_t offset_;
+  uint8_t num_bits_;
+  uint8_t shift_bits_;
+
+  d::StructProperty public_view_;
+};
+
+// Internal version of API class v8::debug_helper::ObjectProperty.
+class ObjectProperty : public PropertyBase {
+ public:
+  ObjectProperty(std::string name, std::string type,
+                 std::string decompressed_type, uintptr_t address,
+                 size_t num_values, size_t size,
+                 std::vector<std::unique_ptr<StructProperty>> struct_fields,
+                 d::PropertyKind kind)
+      : PropertyBase(std::move(name), std::move(type),
+                     std::move(decompressed_type)),
+        address_(address),
+        num_values_(num_values),
+        size_(size),
+        struct_fields_(std::move(struct_fields)),
+        kind_(kind) {}
+
+  d::ObjectProperty* GetPublicView() {
+    PropertyBase::SetFieldsOnPublicView(&public_view_);
+    public_view_.address = address_;
+    public_view_.num_values = num_values_;
+    public_view_.size = size_;
+    public_view_.num_struct_fields = struct_fields_.size();
+    struct_fields_raw_.clear();
+    for (const auto& property : struct_fields_) {
+      struct_fields_raw_.push_back(property->GetPublicView());
+    }
+    public_view_.struct_fields = struct_fields_raw_.data();
+    public_view_.kind = kind_;
+    return &public_view_;
+  }
+
+ private:
+  uintptr_t address_;
+  size_t num_values_;
+  size_t size_;
+  std::vector<std::unique_ptr<StructProperty>> struct_fields_;
+  d::PropertyKind kind_;
+
+  d::ObjectProperty public_view_;
+  std::vector<d::StructProperty*> struct_fields_raw_;
+};
+
+class ObjectPropertiesResult;
+struct ObjectPropertiesResultExtended : public d::ObjectPropertiesResult {
+  // Back reference for cleanup.
+  debug_helper_internal::ObjectPropertiesResult* base;
+};
+
+// Internal version of API class v8::debug_helper::ObjectPropertiesResult.
+class ObjectPropertiesResult {
+ public:
+  ObjectPropertiesResult(d::TypeCheckResult type_check_result,
+                         std::string brief, std::string type)
+      : type_check_result_(type_check_result), brief_(brief), type_(type) {}
+  ObjectPropertiesResult(
+      d::TypeCheckResult type_check_result, std::string brief, std::string type,
+      std::vector<std::unique_ptr<ObjectProperty>> properties,
+      std::vector<std::string> guessed_types)
+      : ObjectPropertiesResult(type_check_result, brief, type) {
+    properties_ = std::move(properties);
+    guessed_types_ = std::move(guessed_types);
+  }
+
+  void Prepend(const char* prefix) { brief_ = prefix + brief_; }
+
+  d::ObjectPropertiesResult* GetPublicView() {
+    public_view_.type_check_result = type_check_result_;
+    public_view_.brief = brief_.c_str();
+    public_view_.type = type_.c_str();
+    public_view_.num_properties = properties_.size();
+    properties_raw_.clear();
+    for (const auto& property : properties_) {
+      properties_raw_.push_back(property->GetPublicView());
+    }
+    public_view_.properties = properties_raw_.data();
+    public_view_.num_guessed_types = guessed_types_.size();
+    guessed_types_raw_.clear();
+    for (const auto& guess : guessed_types_) {
+      guessed_types_raw_.push_back(guess.c_str());
+    }
+    public_view_.guessed_types = guessed_types_raw_.data();
+    public_view_.base = this;
+    return &public_view_;
+  }
+
+ private:
+  d::TypeCheckResult type_check_result_;
+  std::string brief_;
+  std::string type_;
+  std::vector<std::unique_ptr<ObjectProperty>> properties_;
+  std::vector<std::string> guessed_types_;
+
+  ObjectPropertiesResultExtended public_view_;
+  std::vector<d::ObjectProperty*> properties_raw_;
+  std::vector<const char*> guessed_types_raw_;
+};
+
+class StackFrameResult;
+struct StackFrameResultExtended : public d::StackFrameResult {
+  // Back reference for cleanup.
+  debug_helper_internal::StackFrameResult* base;
+};
+
+// Internal version of API class v8::debug_helper::StackFrameResult.
+class StackFrameResult {
+ public:
+  StackFrameResult(std::vector<std::unique_ptr<ObjectProperty>> properties) {
+    properties_ = std::move(properties);
+  }
+
+  d::StackFrameResult* GetPublicView() {
+    public_view_.num_properties = properties_.size();
+    properties_raw_.clear();
+    for (const auto& property : properties_) {
+      properties_raw_.push_back(property->GetPublicView());
+    }
+    public_view_.properties = properties_raw_.data();
+    public_view_.base = this;
+    return &public_view_;
+  }
+
+ private:
+  std::vector<std::unique_ptr<ObjectProperty>> properties_;
+
+  StackFrameResultExtended public_view_;
+  std::vector<d::ObjectProperty*> properties_raw_;
+};
+
+class TqObjectVisitor;
+
+// Base class representing a V8 object in the debuggee's address space.
+// Subclasses for specific object types are generated by the Torque compiler.
+class TqObject {
+ public:
+  TqObject(uintptr_t address) : address_(address) {}
+  virtual ~TqObject() = default;
+  virtual std::vector<std::unique_ptr<ObjectProperty>> GetProperties(
+      d::MemoryAccessor accessor) const;
+  virtual const char* GetName() const;
+  virtual void Visit(TqObjectVisitor* visitor) const;
+  virtual bool IsSuperclassOf(const TqObject* other) const;
+
+ protected:
+  uintptr_t address_;
+};
+
+// A helpful template so that generated code can be sure that a string type name
+// actually resolves to a type, by repeating the name as the template parameter
+// and the value.
+template <typename T>
+const char* CheckTypeName(const char* name) {
+  return name;
+}
+
+// In ptr-compr builds, returns whether the address looks like a compressed
+// pointer (zero-extended from 32 bits). Otherwise returns false because no
+// pointers can be compressed.
+bool IsPointerCompressed(uintptr_t address);
+
+// If the given address looks like a compressed pointer, returns a decompressed
+// representation of it. Otherwise returns the address unmodified.
+uintptr_t EnsureDecompressed(uintptr_t address,
+                             uintptr_t any_uncompressed_address);
+
+// Converts the MemoryAccessResult from attempting to read an array's length
+// into the corresponding PropertyKind for the array.
+d::PropertyKind GetArrayKind(d::MemoryAccessResult mem_result);
+
+// List of fully-qualified names for every Object subtype, generated based on
+// Torque class definitions.
+extern const d::ClassList kObjectClassList;
+
+}  // namespace debug_helper_internal
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/third_party/v8/tools/debug_helper/debug-helper.h b/src/third_party/v8/tools/debug_helper/debug-helper.h
new file mode 100644
index 0000000..73f4e66
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/debug-helper.h
@@ -0,0 +1,267 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file defines the public interface to v8_debug_helper.
+
+#ifndef V8_TOOLS_DEBUG_HELPER_DEBUG_HELPER_H_
+#define V8_TOOLS_DEBUG_HELPER_DEBUG_HELPER_H_
+
+#include <cstdint>
+#include <memory>
+
+#if defined(_WIN32)
+
+#ifdef BUILDING_V8_DEBUG_HELPER
+#define V8_DEBUG_HELPER_EXPORT __declspec(dllexport)
+#elif USING_V8_DEBUG_HELPER
+#define V8_DEBUG_HELPER_EXPORT __declspec(dllimport)
+#else
+#define V8_DEBUG_HELPER_EXPORT
+#endif
+
+#else  // defined(_WIN32)
+
+#ifdef BUILDING_V8_DEBUG_HELPER
+#define V8_DEBUG_HELPER_EXPORT __attribute__((visibility("default")))
+#else
+#define V8_DEBUG_HELPER_EXPORT
+#endif
+
+#endif  // defined(_WIN32)
+
+namespace v8 {
+namespace debug_helper {
+
+// Possible results when attempting to fetch memory from the debuggee.
+enum class MemoryAccessResult {
+  kOk,
+  kAddressNotValid,
+  kAddressValidButInaccessible,  // Possible in incomplete dump.
+};
+
+// Information about how this tool discovered the type of the object.
+enum class TypeCheckResult {
+  // Success cases:
+  kSmi,
+  kWeakRef,
+  kUsedMap,
+  kKnownMapPointer,
+  kUsedTypeHint,
+
+  // Failure cases:
+  kUnableToDecompress,  // Caller must provide the heap range somehow.
+  kObjectPointerInvalid,
+  kObjectPointerValidButInaccessible,  // Possible in incomplete dump.
+  kMapPointerInvalid,
+  kMapPointerValidButInaccessible,  // Possible in incomplete dump.
+  kUnknownInstanceType,
+  kUnknownTypeHint,
+};
+
+enum class PropertyKind {
+  kSingle,
+  kArrayOfKnownSize,
+  kArrayOfUnknownSizeDueToInvalidMemory,
+  kArrayOfUnknownSizeDueToValidButInaccessibleMemory,
+};
+
+struct PropertyBase {
+  const char* name;
+
+  // Statically-determined type, such as from .tq definition. Can be an empty
+  // string if this property is itself a Torque-defined struct; in that case use
+  // |struct_fields| instead. This type should be treated as if it were used in
+  // the v8::internal namespace; that is, type "X::Y" can mean any of the
+  // following, in order of decreasing preference:
+  // - v8::internal::X::Y
+  // - v8::X::Y
+  // - X::Y
+  const char* type;
+
+  // In some cases, |type| may be a simple type representing a compressed
+  // pointer such as v8::internal::TaggedValue. In those cases,
+  // |decompressed_type| will contain the type of the object when decompressed.
+  // Otherwise, |decompressed_type| will match |type|. In any case, it is safe
+  // to pass the |decompressed_type| value as the type_hint on a subsequent call
+  // to GetObjectProperties.
+  const char* decompressed_type;
+};
+
+struct StructProperty : public PropertyBase {
+  // The offset from the beginning of the struct to this field.
+  size_t offset;
+
+  // The number of bits that are present, if this value is a bitfield. Zero
+  // indicates that this value is not a bitfield (the full value is stored).
+  uint8_t num_bits;
+
+  // The number of bits by which this value has been left-shifted for storage as
+  // a bitfield.
+  uint8_t shift_bits;
+};
+
+struct ObjectProperty : public PropertyBase {
+  // The address where the property value can be found in the debuggee's address
+  // space, or the address of the first value for an array.
+  uintptr_t address;
+
+  // If kind indicates an array of unknown size, num_values will be 0 and debug
+  // tools should display this property as a raw pointer. Note that there is a
+  // semantic difference between num_values=1 and kind=kSingle (normal property)
+  // versus num_values=1 and kind=kArrayOfKnownSize (one-element array).
+  size_t num_values;
+
+  // The number of bytes occupied by a single instance of the value type for
+  // this property. This can also be used as the array stride because arrays are
+  // tightly packed like in C.
+  size_t size;
+
+  // If the property is a struct made up of several pieces of data packed
+  // together, then the |struct_fields| array contains descriptions of those
+  // fields.
+  size_t num_struct_fields;
+  StructProperty** struct_fields;
+
+  PropertyKind kind;
+};
+
+struct ObjectPropertiesResult {
+  TypeCheckResult type_check_result;
+  const char* brief;
+  const char* type;  // Runtime type of the object.
+  size_t num_properties;
+  ObjectProperty** properties;
+
+  // If not all relevant memory is available, GetObjectProperties may respond
+  // with a technically correct but uninteresting type such as HeapObject, and
+  // use other heuristics to make reasonable guesses about what specific type
+  // the object actually is. You may request data about the same object again
+  // using any of these guesses as the type hint, but the results should be
+  // formatted to the user in a way that clearly indicates that they're only
+  // guesses.
+  size_t num_guessed_types;
+  const char** guessed_types;
+};
+
+struct StackFrameResult {
+  size_t num_properties;
+  ObjectProperty** properties;
+};
+
+// Copies byte_count bytes of memory from the given address in the debuggee to
+// the destination buffer.
+typedef MemoryAccessResult (*MemoryAccessor)(uintptr_t address,
+                                             void* destination,
+                                             size_t byte_count);
+
+// Additional data that can help GetObjectProperties to be more accurate. Any
+// fields you don't know can be set to zero and this library will do the best it
+// can with the information available.
+struct HeapAddresses {
+  // Beginning of allocated space for various kinds of data. These can help us
+  // to detect certain common objects that are placed in memory during startup.
+  // These values might be provided via name-value pairs in CrashPad dumps.
+  // Otherwise, they can be obtained as follows:
+  // 1. Get the Isolate pointer for the current thread. It might be somewhere on
+  //    the stack, or it might be accessible from thread-local storage with the
+  //    key stored in v8::internal::Isolate::isolate_key_.
+  // 2. Get isolate->heap_.map_space_->memory_chunk_list_.front_ and similar for
+  //    old_space_ and read_only_space_.
+  uintptr_t map_space_first_page;
+  uintptr_t old_space_first_page;
+  uintptr_t read_only_space_first_page;
+
+  // Any valid heap pointer address. On platforms where pointer compression is
+  // enabled, this can allow us to get data from compressed pointers even if the
+  // other data above is not provided. The Isolate pointer is valid for this
+  // purpose if you have it.
+  uintptr_t any_heap_pointer;
+};
+
+// Result type for ListObjectClasses.
+struct ClassList {
+  size_t num_class_names;
+  const char* const* class_names;  // Fully qualified class names.
+};
+
+}  // namespace debug_helper
+}  // namespace v8
+
+extern "C" {
+// Raw library interface. If possible, use functions in v8::debug_helper
+// namespace instead because they use smart pointers to prevent leaks.
+V8_DEBUG_HELPER_EXPORT v8::debug_helper::ObjectPropertiesResult*
+_v8_debug_helper_GetObjectProperties(
+    uintptr_t object, v8::debug_helper::MemoryAccessor memory_accessor,
+    const v8::debug_helper::HeapAddresses& heap_addresses,
+    const char* type_hint);
+V8_DEBUG_HELPER_EXPORT void _v8_debug_helper_Free_ObjectPropertiesResult(
+    v8::debug_helper::ObjectPropertiesResult* result);
+V8_DEBUG_HELPER_EXPORT v8::debug_helper::StackFrameResult*
+_v8_debug_helper_GetStackFrame(
+    uintptr_t frame_pointer, v8::debug_helper::MemoryAccessor memory_accessor);
+V8_DEBUG_HELPER_EXPORT void _v8_debug_helper_Free_StackFrameResult(
+    v8::debug_helper::StackFrameResult* result);
+V8_DEBUG_HELPER_EXPORT const v8::debug_helper::ClassList*
+_v8_debug_helper_ListObjectClasses();
+V8_DEBUG_HELPER_EXPORT const char* _v8_debug_helper_BitsetName(
+    uint64_t payload);
+}
+
+namespace v8 {
+namespace debug_helper {
+
+struct DebugHelperObjectPropertiesResultDeleter {
+  void operator()(v8::debug_helper::ObjectPropertiesResult* ptr) {
+    _v8_debug_helper_Free_ObjectPropertiesResult(ptr);
+  }
+};
+using ObjectPropertiesResultPtr =
+    std::unique_ptr<ObjectPropertiesResult,
+                    DebugHelperObjectPropertiesResultDeleter>;
+
+// Get information about the given object pointer, which could be:
+// - A tagged pointer, strong or weak
+// - A cleared weak pointer
+// - A compressed tagged pointer, zero-extended to 64 bits
+// - A tagged small integer
+// The type hint is only used if the object's Map is missing or corrupt. It
+// should be the fully-qualified name of a class that inherits from
+// v8::internal::Object.
+inline ObjectPropertiesResultPtr GetObjectProperties(
+    uintptr_t object, v8::debug_helper::MemoryAccessor memory_accessor,
+    const HeapAddresses& heap_addresses, const char* type_hint = nullptr) {
+  return ObjectPropertiesResultPtr(_v8_debug_helper_GetObjectProperties(
+      object, memory_accessor, heap_addresses, type_hint));
+}
+
+// Get a list of all class names deriving from v8::internal::Object.
+inline const ClassList* ListObjectClasses() {
+  return _v8_debug_helper_ListObjectClasses();
+}
+
+// Return a bitset name for a v8::internal::compiler::Type with payload or null
+// if the payload is not a bitset.
+inline const char* BitsetName(uint64_t payload) {
+  return _v8_debug_helper_BitsetName(payload);
+}
+
+struct DebugHelperStackFrameResultDeleter {
+  void operator()(v8::debug_helper::StackFrameResult* ptr) {
+    _v8_debug_helper_Free_StackFrameResult(ptr);
+  }
+};
+using StackFrameResultPtr =
+    std::unique_ptr<StackFrameResult, DebugHelperStackFrameResultDeleter>;
+
+inline StackFrameResultPtr GetStackFrame(
+    uintptr_t frame_pointer, v8::debug_helper::MemoryAccessor memory_accessor) {
+  return StackFrameResultPtr(
+      _v8_debug_helper_GetStackFrame(frame_pointer, memory_accessor));
+}
+
+}  // namespace debug_helper
+}  // namespace v8
+
+#endif
diff --git a/src/third_party/v8/tools/debug_helper/gen-heap-constants.py b/src/third_party/v8/tools/debug_helper/gen-heap-constants.py
new file mode 100644
index 0000000..6eb7f37
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/gen-heap-constants.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This program writes a C++ file that can be used to look up whether a given
+address matches known object locations. The first argument is the directory
+containing the file v8heapconst.py; the second argument is the output .cc file.
+"""
+
+import sys
+sys.path.insert(0, sys.argv[1])
+import v8heapconst
+
+out = """
+#include <cstdint>
+#include <string>
+
+#include "src/common/ptr-compr-inl.h"
+#include "tools/debug_helper/debug-helper-internal.h"
+
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
+"""
+
+def iterate_objects(target_space, camel_space_name):
+  global out
+  result = []
+  for (space, offset), (instance_type, name) in v8heapconst.KNOWN_MAPS.items():
+    if space == target_space:
+      result.append((offset, name))
+  for (space, offset), name in v8heapconst.KNOWN_OBJECTS.items():
+    if space == target_space:
+      result.append((offset, name))
+  out = out + '\nstd::string FindKnownObjectIn' + camel_space_name \
+      + '(uintptr_t offset) {\n  switch (offset) {\n'
+  for offset, name in result:
+    out = out + '    case ' + str(offset) + ': return "' + name + '";\n'
+  out = out + '    default: return "";\n  }\n}\n'
+
+iterate_objects('map_space', 'MapSpace')
+iterate_objects('read_only_space', 'ReadOnlySpace')
+iterate_objects('old_space', 'OldSpace')
+
+def iterate_maps(target_space, camel_space_name):
+  global out
+  out = out + '\nint FindKnownMapInstanceTypeIn' + camel_space_name \
+      + '(uintptr_t offset) {\n  switch (offset) {\n'
+  for (space, offset), (instance_type, name) in v8heapconst.KNOWN_MAPS.items():
+    if space == target_space:
+      out = out + '    case ' + str(offset) + ': return ' + str(instance_type) \
+          + ';\n'
+  out = out + '    default: return -1;\n  }\n}\n'
+
+iterate_maps('map_space', 'MapSpace')
+iterate_maps('read_only_space', 'ReadOnlySpace')
+
+out = out + '\nvoid FillInUnknownHeapAddresses(' + \
+    'd::HeapAddresses* heap_addresses, uintptr_t any_uncompressed_ptr) {\n'
+if (hasattr(v8heapconst, 'HEAP_FIRST_PAGES')):  # Only exists in ptr-compr builds.
+  out = out + '  if (heap_addresses->any_heap_pointer == 0) {\n'
+  out = out + '    heap_addresses->any_heap_pointer = any_uncompressed_ptr;\n'
+  out = out + '  }\n'
+  expected_spaces = set(['map_space', 'read_only_space', 'old_space'])
+  for offset, space_name in v8heapconst.HEAP_FIRST_PAGES.items():
+    if (space_name in expected_spaces):
+      out = out + '  if (heap_addresses->' + space_name + '_first_page == 0) {\n'
+      out = out + '    heap_addresses->' + space_name + \
+          '_first_page = i::DecompressTaggedPointer(any_uncompressed_ptr, ' + \
+          str(offset) + ');\n'
+      out = out + '  }\n'
+out = out + '}\n'
+
+out = out + '\n}\n}\n}\n'
+
+try:
+  with open(sys.argv[2], "r") as out_file:
+    if out == out_file.read():
+      sys.exit(0)  # No modification needed.
+except:
+  pass  # File probably doesn't exist; write it.
+with open(sys.argv[2], "w") as out_file:
+  out_file.write(out)
diff --git a/src/third_party/v8/tools/debug_helper/get-object-properties.cc b/src/third_party/v8/tools/debug_helper/get-object-properties.cc
new file mode 100644
index 0000000..181c58d
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/get-object-properties.cc
@@ -0,0 +1,696 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+
+#include "debug-helper-internal.h"
+#include "heap-constants.h"
+#include "include/v8-internal.h"
+#include "src/common/external-pointer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/execution/isolate-utils.h"
+#include "src/objects/string-inl.h"
+#include "src/strings/unicode-inl.h"
+#include "torque-generated/class-debug-readers.h"
+
+namespace i = v8::internal;
+
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
+
+constexpr char kObject[] = "v8::internal::Object";
+constexpr char kTaggedValue[] = "v8::internal::TaggedValue";
+constexpr char kSmi[] = "v8::internal::Smi";
+constexpr char kHeapObject[] = "v8::internal::HeapObject";
+#ifdef V8_COMPRESS_POINTERS
+constexpr char kObjectAsStoredInHeap[] = "v8::internal::TaggedValue";
+#else
+constexpr char kObjectAsStoredInHeap[] = "v8::internal::Object";
+#endif
+
+std::string AppendAddressAndType(const std::string& brief, uintptr_t address,
+                                 const char* type) {
+  std::stringstream brief_stream;
+  brief_stream << "0x" << std::hex << address << " <" << type << ">";
+  return brief.empty() ? brief_stream.str()
+                       : brief + " (" + brief_stream.str() + ")";
+}
+
+std::string JoinWithSpace(const std::string& a, const std::string& b) {
+  return a.empty() || b.empty() ? a + b : a + " " + b;
+}
+
+struct TypedObject {
+  TypedObject(d::TypeCheckResult type_check_result,
+              std::unique_ptr<TqObject> object)
+      : type_check_result(type_check_result), object(std::move(object)) {}
+
+  // How we discovered the object's type, or why we failed to do so.
+  d::TypeCheckResult type_check_result;
+
+  // Pointer to some TqObject subclass, representing the most specific known
+  // type for the object.
+  std::unique_ptr<TqObject> object;
+
+  // Collection of other guesses at more specific types than the one represented
+  // by |object|.
+  std::vector<TypedObject> possible_types;
+};
+
+TypedObject GetTypedObjectByHint(uintptr_t address,
+                                 std::string type_hint_string) {
+#define TYPE_NAME_CASE(ClassName, ...)                   \
+  if (type_hint_string == "v8::internal::" #ClassName) { \
+    return {d::TypeCheckResult::kUsedTypeHint,           \
+            std::make_unique<Tq##ClassName>(address)};   \
+  }
+
+  TORQUE_INSTANCE_CHECKERS_SINGLE_FULLY_DEFINED(TYPE_NAME_CASE)
+  TORQUE_INSTANCE_CHECKERS_RANGE_FULLY_DEFINED(TYPE_NAME_CASE)
+  STRING_CLASS_TYPES(TYPE_NAME_CASE)
+
+#undef TYPE_NAME_CASE
+
+  return {d::TypeCheckResult::kUnknownTypeHint,
+          std::make_unique<TqHeapObject>(address)};
+}
+
+TypedObject GetTypedObjectForString(uintptr_t address, i::InstanceType type,
+                                    d::TypeCheckResult type_source) {
+  class StringGetDispatcher : public i::AllStatic {
+   public:
+#define DEFINE_METHOD(ClassName)                                    \
+  static inline TypedObject Handle##ClassName(                      \
+      uintptr_t address, d::TypeCheckResult type_source) {          \
+    return {type_source, std::make_unique<Tq##ClassName>(address)}; \
+  }
+    STRING_CLASS_TYPES(DEFINE_METHOD)
+#undef DEFINE_METHOD
+    static inline TypedObject HandleInvalidString(
+        uintptr_t address, d::TypeCheckResult type_source) {
+      return {d::TypeCheckResult::kUnknownInstanceType,
+              std::make_unique<TqString>(address)};
+    }
+  };
+
+  return i::StringShape(type)
+      .DispatchToSpecificTypeWithoutCast<StringGetDispatcher, TypedObject>(
+          address, type_source);
+}
+
+TypedObject GetTypedObjectByInstanceType(uintptr_t address,
+                                         i::InstanceType type,
+                                         d::TypeCheckResult type_source) {
+  switch (type) {
+#define INSTANCE_TYPE_CASE(ClassName, INSTANCE_TYPE) \
+  case i::INSTANCE_TYPE:                             \
+    return {type_source, std::make_unique<Tq##ClassName>(address)};
+    TORQUE_INSTANCE_CHECKERS_SINGLE_FULLY_DEFINED(INSTANCE_TYPE_CASE)
+    TORQUE_INSTANCE_CHECKERS_MULTIPLE_FULLY_DEFINED(INSTANCE_TYPE_CASE)
+#undef INSTANCE_TYPE_CASE
+
+    default:
+
+      // Special case: concrete subtypes of String are not included in the
+      // main instance type list because they use the low bits of the instance
+      // type enum as flags.
+      if (type <= i::LAST_STRING_TYPE) {
+        return GetTypedObjectForString(address, type, type_source);
+      }
+
+#define INSTANCE_RANGE_CASE(ClassName, FIRST_TYPE, LAST_TYPE)       \
+  if (type >= i::FIRST_TYPE && type <= i::LAST_TYPE) {              \
+    return {type_source, std::make_unique<Tq##ClassName>(address)}; \
+  }
+      TORQUE_INSTANCE_CHECKERS_RANGE_FULLY_DEFINED(INSTANCE_RANGE_CASE)
+#undef INSTANCE_RANGE_CASE
+
+      return {d::TypeCheckResult::kUnknownInstanceType,
+              std::make_unique<TqHeapObject>(address)};
+  }
+}
+
+TypedObject GetTypedHeapObject(uintptr_t address, d::MemoryAccessor accessor,
+                               const char* type_hint,
+                               const d::HeapAddresses& heap_addresses) {
+  auto heap_object = std::make_unique<TqHeapObject>(address);
+  Value<uintptr_t> map_ptr = heap_object->GetMapValue(accessor);
+
+  if (map_ptr.validity != d::MemoryAccessResult::kOk) {
+    // If we can't read the Map pointer from the object, then we likely can't
+    // read anything else, so there's not any point in attempting to use the
+    // type hint. Just return a failure.
+    return {map_ptr.validity == d::MemoryAccessResult::kAddressNotValid
+                ? d::TypeCheckResult::kObjectPointerInvalid
+                : d::TypeCheckResult::kObjectPointerValidButInaccessible,
+            std::move(heap_object)};
+  }
+
+  Value<i::InstanceType> type =
+      TqMap(map_ptr.value).GetInstanceTypeValue(accessor);
+  if (type.validity == d::MemoryAccessResult::kOk) {
+    return GetTypedObjectByInstanceType(address, type.value,
+                                        d::TypeCheckResult::kUsedMap);
+  }
+
+  // We can't read the Map, so check whether it is in the list of known Maps,
+  // as another way to get its instance type.
+  KnownInstanceType known_map_type =
+      FindKnownMapInstanceTypes(map_ptr.value, heap_addresses);
+  if (known_map_type.confidence == KnownInstanceType::Confidence::kHigh) {
+    DCHECK_EQ(known_map_type.types.size(), 1);
+    return GetTypedObjectByInstanceType(address, known_map_type.types[0],
+                                        d::TypeCheckResult::kKnownMapPointer);
+  }
+
+  // Create a basic result that says that the object is a HeapObject and we
+  // couldn't read its Map.
+  TypedObject result = {
+      type.validity == d::MemoryAccessResult::kAddressNotValid
+          ? d::TypeCheckResult::kMapPointerInvalid
+          : d::TypeCheckResult::kMapPointerValidButInaccessible,
+      std::move(heap_object)};
+
+  // If a type hint is available, it may give us something more specific than
+  // HeapObject. However, a type hint of Object would be even less specific, so
+  // we'll only use the type hint if it's a subclass of HeapObject.
+  if (type_hint != nullptr) {
+    TypedObject hint_result = GetTypedObjectByHint(address, type_hint);
+    if (result.object->IsSuperclassOf(hint_result.object.get())) {
+      result = std::move(hint_result);
+    }
+  }
+
+  // If low-confidence results are available from known Maps, include them only
+  // if they don't contradict the primary type and would provide some additional
+  // specificity.
+  for (const i::InstanceType type_guess : known_map_type.types) {
+    TypedObject guess_result = GetTypedObjectByInstanceType(
+        address, type_guess, d::TypeCheckResult::kKnownMapPointer);
+    if (result.object->IsSuperclassOf(guess_result.object.get())) {
+      result.possible_types.push_back(std::move(guess_result));
+    }
+  }
+
+  return result;
+}
+
+// An object visitor that accumulates the first few characters of a string.
+class ReadStringVisitor : public TqObjectVisitor {
+ public:
+  static v8::base::Optional<std::string> Visit(
+      d::MemoryAccessor accessor, const d::HeapAddresses& heap_addresses,
+      const TqString* object) {
+    ReadStringVisitor visitor(accessor, heap_addresses);
+    object->Visit(&visitor);
+    return visitor.GetString();
+  }
+
+  // Returns the result as UTF-8 once visiting is complete.
+  v8::base::Optional<std::string> GetString() {
+    if (failed_) return {};
+    std::vector<char> result(
+        string_.size() * unibrow::Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit);
+    unsigned write_index = 0;
+    int prev_char = unibrow::Utf16::kNoPreviousCharacter;
+    for (size_t read_index = 0; read_index < string_.size(); ++read_index) {
+      uint16_t character = string_[read_index];
+      write_index +=
+          unibrow::Utf8::Encode(result.data() + write_index, character,
+                                prev_char, /*replace_invalid=*/true);
+      prev_char = character;
+    }
+    return std::string(result.data(), write_index);
+  }
+
+  template <typename TChar>
+  Value<TChar> ReadCharacter(uintptr_t data_address, int32_t index) {
+    TChar value{};
+    d::MemoryAccessResult validity =
+        accessor_(data_address + index * sizeof(TChar),
+                  reinterpret_cast<uint8_t*>(&value), sizeof(value));
+    return {validity, value};
+  }
+
+  template <typename TChar>
+  void ReadStringCharacters(const TqString* object, uintptr_t data_address) {
+    int32_t length = GetOrFinish(object->GetLengthValue(accessor_));
+    for (; index_ < length && index_ < limit_ && !done_; ++index_) {
+      STATIC_ASSERT(sizeof(TChar) <= sizeof(char16_t));
+      char16_t c = static_cast<char16_t>(
+          GetOrFinish(ReadCharacter<TChar>(data_address, index_)));
+      if (!done_) AddCharacter(c);
+    }
+  }
+
+  template <typename TChar, typename TString>
+  void ReadSeqString(const TString* object) {
+    ReadStringCharacters<TChar>(object, object->GetCharsAddress());
+  }
+
+  void VisitSeqOneByteString(const TqSeqOneByteString* object) override {
+    ReadSeqString<char>(object);
+  }
+
+  void VisitSeqTwoByteString(const TqSeqTwoByteString* object) override {
+    ReadSeqString<char16_t>(object);
+  }
+
+  void VisitConsString(const TqConsString* object) override {
+    uintptr_t first_address = GetOrFinish(object->GetFirstValue(accessor_));
+    if (done_) return;
+    auto first =
+        GetTypedHeapObject(first_address, accessor_, nullptr, heap_addresses_)
+            .object;
+    first->Visit(this);
+    if (done_) return;
+    int32_t first_length = GetOrFinish(
+        static_cast<TqString*>(first.get())->GetLengthValue(accessor_));
+    uintptr_t second = GetOrFinish(object->GetSecondValue(accessor_));
+    if (done_) return;
+    IndexModifier modifier(this, -first_length, -first_length);
+    GetTypedHeapObject(second, accessor_, nullptr, heap_addresses_)
+        .object->Visit(this);
+  }
+
+  void VisitSlicedString(const TqSlicedString* object) override {
+    uintptr_t parent = GetOrFinish(object->GetParentValue(accessor_));
+    int32_t length = GetOrFinish(object->GetLengthValue(accessor_));
+    int32_t offset = i::PlatformSmiTagging::SmiToInt(
+        GetOrFinish(object->GetOffsetValue(accessor_)));
+    if (done_) return;
+    int32_t limit_adjust = offset + length - limit_;
+    IndexModifier modifier(this, offset, limit_adjust < 0 ? limit_adjust : 0);
+    GetTypedHeapObject(parent, accessor_, nullptr, heap_addresses_)
+        .object->Visit(this);
+  }
+
+  void VisitThinString(const TqThinString* object) override {
+    uintptr_t actual = GetOrFinish(object->GetActualValue(accessor_));
+    if (done_) return;
+    GetTypedHeapObject(actual, accessor_, nullptr, heap_addresses_)
+        .object->Visit(this);
+  }
+
+  bool IsExternalStringCached(const TqExternalString* object) {
+    // The safest way to get the instance type is to use known map pointers, in
+    // case the map data is not available.
+    uintptr_t map = GetOrFinish(object->GetMapValue(accessor_));
+    if (done_) return false;
+    auto instance_types = FindKnownMapInstanceTypes(map, heap_addresses_);
+    // Exactly one of the matched instance types should be a string type,
+    // because all maps for string types are in the same space (read-only
+    // space). The "uncached" flag on that instance type tells us whether it's
+    // safe to read the cached data.
+    for (const auto& type : instance_types.types) {
+      if ((type & i::kIsNotStringMask) == i::kStringTag &&
+          (type & i::kStringRepresentationMask) == i::kExternalStringTag) {
+        return (type & i::kUncachedExternalStringMask) !=
+               i::kUncachedExternalStringTag;
+      }
+    }
+
+    // If for some reason we can't find an external string type here (maybe the
+    // caller provided an external string type as the type hint, but it doesn't
+    // actually match the in-memory map pointer), then we can't safely use the
+    // cached data.
+    return false;
+  }
+
+  template <typename TChar>
+  void ReadExternalString(const TqExternalString* object) {
+    // Cached external strings are easy to read; uncached external strings
+    // require knowledge of the embedder. For now, we only read cached external
+    // strings.
+    if (IsExternalStringCached(object)) {
+      ExternalPointer_t resource_data =
+          GetOrFinish(object->GetResourceDataValue(accessor_));
+#ifdef V8_COMPRESS_POINTERS
+      uintptr_t data_address = static_cast<uintptr_t>(
+          DecodeExternalPointer(GetIsolateForPtrComprFromOnHeapAddress(
+                                    heap_addresses_.any_heap_pointer),
+                                resource_data, kExternalStringResourceDataTag));
+#else
+      uintptr_t data_address = static_cast<uintptr_t>(resource_data);
+#endif  // V8_COMPRESS_POINTERS
+      if (done_) return;
+      ReadStringCharacters<TChar>(object, data_address);
+    } else {
+      // TODO(v8:9376): Come up with some way that a caller with full knowledge
+      // of a particular embedder could provide a callback function for getting
+      // uncached string data.
+      AddEllipsisAndFinish();
+    }
+  }
+
+  void VisitExternalOneByteString(
+      const TqExternalOneByteString* object) override {
+    ReadExternalString<char>(object);
+  }
+
+  void VisitExternalTwoByteString(
+      const TqExternalTwoByteString* object) override {
+    ReadExternalString<char16_t>(object);
+  }
+
+  void VisitObject(const TqObject* object) override {
+    // If we fail to find a specific type for a sub-object within a cons string,
+    // sliced string, or thin string, we will end up here.
+    AddEllipsisAndFinish();
+  }
+
+ private:
+  ReadStringVisitor(d::MemoryAccessor accessor,
+                    const d::HeapAddresses& heap_addresses)
+      : accessor_(accessor),
+        heap_addresses_(heap_addresses),
+        index_(0),
+        limit_(INT32_MAX),
+        done_(false),
+        failed_(false) {}
+
+  // Unpacks a value that was fetched from the debuggee. If the value indicates
+  // that it couldn't successfully fetch memory, then prevents further work.
+  template <typename T>
+  T GetOrFinish(Value<T> value) {
+    if (value.validity != d::MemoryAccessResult::kOk) {
+      AddEllipsisAndFinish();
+    }
+    return value.value;
+  }
+
+  void AddEllipsisAndFinish() {
+    if (!done_) {
+      done_ = true;
+      if (string_.empty()) {
+        failed_ = true;
+      } else {
+        string_ += u"...";
+      }
+    }
+  }
+
+  void AddCharacter(char16_t c) {
+    if (string_.size() >= kMaxCharacters) {
+      AddEllipsisAndFinish();
+    } else {
+      string_.push_back(c);
+    }
+  }
+
+  // Temporarily adds offsets to both index_ and limit_, to handle ConsString
+  // and SlicedString.
+  class IndexModifier {
+   public:
+    IndexModifier(ReadStringVisitor* that, int32_t index_adjust,
+                  int32_t limit_adjust)
+        : that_(that),
+          index_adjust_(index_adjust),
+          limit_adjust_(limit_adjust) {
+      that_->index_ += index_adjust_;
+      that_->limit_ += limit_adjust_;
+    }
+    ~IndexModifier() {
+      that_->index_ -= index_adjust_;
+      that_->limit_ -= limit_adjust_;
+    }
+
+   private:
+    ReadStringVisitor* that_;
+    int32_t index_adjust_;
+    int32_t limit_adjust_;
+    DISALLOW_COPY_AND_ASSIGN(IndexModifier);
+  };
+
+  static constexpr int kMaxCharacters = 80;  // How many characters to print.
+
+  std::u16string string_;  // Result string.
+  d::MemoryAccessor accessor_;
+  const d::HeapAddresses& heap_addresses_;
+  int32_t index_;  // Index of next char to read.
+  int32_t limit_;  // Don't read past this index (set by SlicedString).
+  bool done_;      // Whether to stop further work.
+  bool failed_;    // Whether an error was encountered before any valid data.
+};
+
+// An object visitor that supplies extra information for some types.
+class AddInfoVisitor : public TqObjectVisitor {
+ public:
+  // Returns a descriptive string and a list of properties for the given object.
+  // Both may be empty, and are meant as an addition or a replacement for,
+  // the Torque-generated data about the object.
+  static std::pair<std::string, std::vector<std::unique_ptr<ObjectProperty>>>
+  Visit(const TqObject* object, d::MemoryAccessor accessor,
+        const d::HeapAddresses& heap_addresses) {
+    AddInfoVisitor visitor(accessor, heap_addresses);
+    object->Visit(&visitor);
+    return {std::move(visitor.brief_), std::move(visitor.properties_)};
+  }
+
+  void VisitString(const TqString* object) override {
+    auto str = ReadStringVisitor::Visit(accessor_, heap_addresses_, object);
+    if (str.has_value()) {
+      brief_ = "\"" + *str + "\"";
+    }
+  }
+
+  void VisitExternalString(const TqExternalString* object) override {
+    VisitString(object);
+    // Cast resource field to v8::String::ExternalStringResourceBase* would add
+    // more info.
+    properties_.push_back(std::make_unique<ObjectProperty>(
+        "resource",
+        CheckTypeName<v8::String::ExternalStringResourceBase*>(
+            "v8::String::ExternalStringResourceBase*"),
+        CheckTypeName<v8::String::ExternalStringResourceBase*>(
+            "v8::String::ExternalStringResourceBase*"),
+        object->GetResourceAddress(), 1,
+        sizeof(v8::String::ExternalStringResourceBase*),
+        std::vector<std::unique_ptr<StructProperty>>(),
+        d::PropertyKind::kSingle));
+  }
+
+  void VisitJSObject(const TqJSObject* object) override {
+    // JSObject and its subclasses can be followed directly by an array of
+    // property values. The start and end offsets of those values are described
+    // by a pair of values in its Map.
+    auto map_ptr = object->GetMapValue(accessor_);
+    if (map_ptr.validity != d::MemoryAccessResult::kOk) {
+      return;  // Can't read the JSObject. Nothing useful to do.
+    }
+    TqMap map(map_ptr.value);
+
+    // On JSObject instances, this value is the start of in-object properties.
+    // The constructor function index option is only for primitives.
+    auto start_offset =
+        map.GetInObjectPropertiesStartOrConstructorFunctionIndexValue(
+            accessor_);
+
+    // The total size of the object in memory. This may include over-allocated
+    // expansion space that doesn't correspond to any user-accessible property.
+    auto instance_size = map.GetInstanceSizeInWordsValue(accessor_);
+
+    if (start_offset.validity != d::MemoryAccessResult::kOk ||
+        instance_size.validity != d::MemoryAccessResult::kOk) {
+      return;  // Can't read the Map. Nothing useful to do.
+    }
+    int num_properties = instance_size.value - start_offset.value;
+    if (num_properties > 0) {
+      properties_.push_back(std::make_unique<ObjectProperty>(
+          "in-object properties", kObjectAsStoredInHeap, kObject,
+          object->GetMapAddress() + start_offset.value * i::kTaggedSize,
+          num_properties, i::kTaggedSize,
+          std::vector<std::unique_ptr<StructProperty>>(),
+          d::PropertyKind::kArrayOfKnownSize));
+    }
+  }
+
+ private:
+  AddInfoVisitor(d::MemoryAccessor accessor,
+                 const d::HeapAddresses& heap_addresses)
+      : accessor_(accessor), heap_addresses_(heap_addresses) {}
+
+  // Inputs used by this visitor:
+
+  d::MemoryAccessor accessor_;
+  const d::HeapAddresses& heap_addresses_;
+
+  // Outputs generated by this visitor:
+
+  // A brief description of the object.
+  std::string brief_;
+  // A list of extra properties to append after the automatic ones that are
+  // created for all Torque-defined class fields.
+  std::vector<std::unique_ptr<ObjectProperty>> properties_;
+};
+
+std::unique_ptr<ObjectPropertiesResult> GetHeapObjectPropertiesNotCompressed(
+    uintptr_t address, d::MemoryAccessor accessor, const char* type_hint,
+    const d::HeapAddresses& heap_addresses) {
+  // Regardless of whether we can read the object itself, maybe we can find its
+  // pointer in the list of known objects.
+  std::string brief = FindKnownObject(address, heap_addresses);
+
+  TypedObject typed =
+      GetTypedHeapObject(address, accessor, type_hint, heap_addresses);
+  auto props = typed.object->GetProperties(accessor);
+
+  // Use the AddInfoVisitor to get any extra properties or descriptive text that
+  // can't be directly derived from Torque class definitions.
+  auto extra_info =
+      AddInfoVisitor::Visit(typed.object.get(), accessor, heap_addresses);
+  brief = JoinWithSpace(brief, extra_info.first);
+
+  // Overwrite existing properties if they have the same name.
+  for (size_t i = 0; i < extra_info.second.size(); i++) {
+    bool overwrite = false;
+    for (size_t j = 0; j < props.size(); j++) {
+      if (strcmp(props[j]->GetPublicView()->name,
+                 extra_info.second[i]->GetPublicView()->name) == 0) {
+        props[j] = std::move(extra_info.second[i]);
+        overwrite = true;
+        break;
+      }
+    }
+    if (overwrite) continue;
+    props.push_back(std::move(extra_info.second[i]));
+  }
+
+  brief = AppendAddressAndType(brief, address, typed.object->GetName());
+
+  // Convert the low-confidence guessed types to a list of strings as expected
+  // for the response.
+  std::vector<std::string> guessed_types;
+  for (const auto& guess : typed.possible_types) {
+    guessed_types.push_back(guess.object->GetName());
+  }
+
+  return std::make_unique<ObjectPropertiesResult>(
+      typed.type_check_result, brief, typed.object->GetName(), std::move(props),
+      std::move(guessed_types));
+}
+
+std::unique_ptr<ObjectPropertiesResult> GetHeapObjectPropertiesMaybeCompressed(
+    uintptr_t address, d::MemoryAccessor memory_accessor,
+    d::HeapAddresses heap_addresses, const char* type_hint) {
+  // Try to figure out the heap range, for pointer compression (this is unused
+  // if pointer compression is disabled).
+  uintptr_t any_uncompressed_ptr = 0;
+  if (!IsPointerCompressed(address)) any_uncompressed_ptr = address;
+  if (any_uncompressed_ptr == 0)
+    any_uncompressed_ptr = heap_addresses.any_heap_pointer;
+  if (any_uncompressed_ptr == 0)
+    any_uncompressed_ptr = heap_addresses.map_space_first_page;
+  if (any_uncompressed_ptr == 0)
+    any_uncompressed_ptr = heap_addresses.old_space_first_page;
+  if (any_uncompressed_ptr == 0)
+    any_uncompressed_ptr = heap_addresses.read_only_space_first_page;
+  FillInUnknownHeapAddresses(&heap_addresses, any_uncompressed_ptr);
+  if (any_uncompressed_ptr == 0) {
+    // We can't figure out the heap range. Just check for known objects.
+    std::string brief = FindKnownObject(address, heap_addresses);
+    brief = AppendAddressAndType(brief, address, kTaggedValue);
+    return std::make_unique<ObjectPropertiesResult>(
+        d::TypeCheckResult::kUnableToDecompress, brief, kTaggedValue);
+  }
+
+  address = EnsureDecompressed(address, any_uncompressed_ptr);
+
+  return GetHeapObjectPropertiesNotCompressed(address, memory_accessor,
+                                              type_hint, heap_addresses);
+}
+
+std::unique_ptr<ObjectPropertiesResult> GetObjectProperties(
+    uintptr_t address, d::MemoryAccessor memory_accessor,
+    const d::HeapAddresses& heap_addresses, const char* type_hint) {
+  if (static_cast<uint32_t>(address) == i::kClearedWeakHeapObjectLower32) {
+    return std::make_unique<ObjectPropertiesResult>(
+        d::TypeCheckResult::kWeakRef, "cleared weak ref", kHeapObject);
+  }
+  bool is_weak = (address & i::kHeapObjectTagMask) == i::kWeakHeapObjectTag;
+  if (is_weak) {
+    address &= ~i::kWeakHeapObjectMask;
+  }
+  if (i::Internals::HasHeapObjectTag(address)) {
+    std::unique_ptr<ObjectPropertiesResult> result =
+        GetHeapObjectPropertiesMaybeCompressed(address, memory_accessor,
+                                               heap_addresses, type_hint);
+    if (is_weak) {
+      result->Prepend("weak ref to ");
+    }
+    return result;
+  }
+
+  // For smi values, construct a response with a description representing the
+  // untagged value.
+  int32_t value = i::PlatformSmiTagging::SmiToInt(address);
+  std::stringstream stream;
+  stream << value << " (0x" << std::hex << value << ")";
+  return std::make_unique<ObjectPropertiesResult>(d::TypeCheckResult::kSmi,
+                                                  stream.str(), kSmi);
+}
+
+std::unique_ptr<StackFrameResult> GetStackFrame(
+    uintptr_t frame_pointer, d::MemoryAccessor memory_accessor) {
+  // Read the data at frame_pointer + kContextOrFrameTypeOffset.
+  intptr_t context_or_frame_type = 0;
+  d::MemoryAccessResult validity = memory_accessor(
+      frame_pointer + CommonFrameConstants::kContextOrFrameTypeOffset,
+      reinterpret_cast<void*>(&context_or_frame_type), sizeof(intptr_t));
+  auto props = std::vector<std::unique_ptr<ObjectProperty>>();
+  if (validity == d::MemoryAccessResult::kOk) {
+    // If it is context, not frame marker then add new property
+    // "currently_executing_function".
+    if (!StackFrame::IsTypeMarker(context_or_frame_type)) {
+      props.push_back(std::make_unique<ObjectProperty>(
+          "currently_executing_jsfunction",
+          CheckTypeName<v8::internal::JSFunction>("v8::internal::JSFunction"),
+          CheckTypeName<v8::internal::JSFunction*>("v8::internal::JSFunction"),
+          frame_pointer + StandardFrameConstants::kFunctionOffset, 1,
+          sizeof(v8::internal::JSFunction),
+          std::vector<std::unique_ptr<StructProperty>>(),
+          d::PropertyKind::kSingle));
+    }
+  }
+
+  return std::make_unique<StackFrameResult>(std::move(props));
+}
+
+}  // namespace debug_helper_internal
+}  // namespace internal
+}  // namespace v8
+
+namespace di = v8::internal::debug_helper_internal;
+
+extern "C" {
+V8_DEBUG_HELPER_EXPORT d::ObjectPropertiesResult*
+_v8_debug_helper_GetObjectProperties(uintptr_t object,
+                                     d::MemoryAccessor memory_accessor,
+                                     const d::HeapAddresses& heap_addresses,
+                                     const char* type_hint) {
+  return di::GetObjectProperties(object, memory_accessor, heap_addresses,
+                                 type_hint)
+      .release()
+      ->GetPublicView();
+}
+V8_DEBUG_HELPER_EXPORT void _v8_debug_helper_Free_ObjectPropertiesResult(
+    d::ObjectPropertiesResult* result) {
+  std::unique_ptr<di::ObjectPropertiesResult> ptr(
+      static_cast<di::ObjectPropertiesResultExtended*>(result)->base);
+}
+
+V8_DEBUG_HELPER_EXPORT d::StackFrameResult* _v8_debug_helper_GetStackFrame(
+    uintptr_t frame_pointer, d::MemoryAccessor memory_accessor) {
+  return di::GetStackFrame(frame_pointer, memory_accessor)
+      .release()
+      ->GetPublicView();
+}
+V8_DEBUG_HELPER_EXPORT void _v8_debug_helper_Free_StackFrameResult(
+    d::StackFrameResult* result) {
+  std::unique_ptr<di::StackFrameResult> ptr(
+      static_cast<di::StackFrameResultExtended*>(result)->base);
+}
+}
diff --git a/src/third_party/v8/tools/debug_helper/heap-constants.cc b/src/third_party/v8/tools/debug_helper/heap-constants.cc
new file mode 100644
index 0000000..f62dd9b
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/heap-constants.cc
@@ -0,0 +1,89 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "heap-constants.h"
+#include "src/common/globals.h"
+
+namespace d = v8::debug_helper;
+
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
+
+std::string FindKnownObject(uintptr_t address,
+                            const d::HeapAddresses& heap_addresses) {
+  uintptr_t containing_page = address & ~i::kPageAlignmentMask;
+  uintptr_t offset_in_page = address & i::kPageAlignmentMask;
+
+  // If there's a match with a known page, then search only that page.
+  if (containing_page == heap_addresses.map_space_first_page) {
+    return FindKnownObjectInMapSpace(offset_in_page);
+  }
+  if (containing_page == heap_addresses.old_space_first_page) {
+    return FindKnownObjectInOldSpace(offset_in_page);
+  }
+  if (containing_page == heap_addresses.read_only_space_first_page) {
+    return FindKnownObjectInReadOnlySpace(offset_in_page);
+  }
+
+  // For any unknown pages, compile a list of things this object might be.
+  std::string result;
+  if (heap_addresses.map_space_first_page == 0) {
+    std::string sub_result = FindKnownObjectInMapSpace(offset_in_page);
+    if (!sub_result.empty()) {
+      result += "maybe " + sub_result;
+    }
+  }
+  if (heap_addresses.old_space_first_page == 0) {
+    std::string sub_result = FindKnownObjectInOldSpace(offset_in_page);
+    if (!sub_result.empty()) {
+      result = (result.empty() ? "" : result + ", ") + "maybe " + sub_result;
+    }
+  }
+  if (heap_addresses.read_only_space_first_page == 0) {
+    std::string sub_result = FindKnownObjectInReadOnlySpace(offset_in_page);
+    if (!sub_result.empty()) {
+      result = (result.empty() ? "" : result + ", ") + "maybe " + sub_result;
+    }
+  }
+
+  return result;
+}
+
+KnownInstanceType FindKnownMapInstanceTypes(
+    uintptr_t address, const d::HeapAddresses& heap_addresses) {
+  uintptr_t containing_page = address & ~i::kPageAlignmentMask;
+  uintptr_t offset_in_page = address & i::kPageAlignmentMask;
+
+  // If there's a match with a known page, then search only that page.
+  if (containing_page == heap_addresses.map_space_first_page) {
+    return KnownInstanceType(
+        FindKnownMapInstanceTypeInMapSpace(offset_in_page));
+  }
+  if (containing_page == heap_addresses.read_only_space_first_page) {
+    return KnownInstanceType(
+        FindKnownMapInstanceTypeInReadOnlySpace(offset_in_page));
+  }
+
+  // For any unknown pages, compile a list of things this object might be.
+  KnownInstanceType result;
+  if (heap_addresses.map_space_first_page == 0) {
+    int sub_result = FindKnownMapInstanceTypeInMapSpace(offset_in_page);
+    if (sub_result >= 0) {
+      result.types.push_back(static_cast<i::InstanceType>(sub_result));
+    }
+  }
+  if (heap_addresses.read_only_space_first_page == 0) {
+    int sub_result = FindKnownMapInstanceTypeInReadOnlySpace(offset_in_page);
+    if (sub_result >= 0) {
+      result.types.push_back(static_cast<i::InstanceType>(sub_result));
+    }
+  }
+
+  return result;
+}
+
+}  // namespace debug_helper_internal
+}  // namespace internal
+}  // namespace v8
diff --git a/src/third_party/v8/tools/debug_helper/heap-constants.h b/src/third_party/v8/tools/debug_helper/heap-constants.h
new file mode 100644
index 0000000..8962047
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/heap-constants.h
@@ -0,0 +1,71 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_DEBUG_HELPER_HEAP_CONSTANTS_H_
+#define V8_TOOLS_DEBUG_HELPER_HEAP_CONSTANTS_H_
+
+#include <cstdint>
+#include <string>
+#include <vector>
+
+#include "debug-helper.h"
+#include "src/objects/instance-type.h"
+
+namespace d = v8::debug_helper;
+
+namespace v8 {
+namespace internal {
+namespace debug_helper_internal {
+
+// ===== Functions generated by gen-heap-constants.py: =========================
+
+// Returns the name of a known object, given its offset within the first page of
+// the space, or empty string on failure.
+std::string FindKnownObjectInOldSpace(uintptr_t offset);
+std::string FindKnownObjectInReadOnlySpace(uintptr_t offset);
+std::string FindKnownObjectInMapSpace(uintptr_t offset);
+
+// In builds with pointer compression enabled, sets the *_first_page members in
+// the HeapAddresses object. In other builds, does nothing.
+void FillInUnknownHeapAddresses(d::HeapAddresses* heap_addresses,
+                                uintptr_t any_uncompressed_ptr);
+
+// Returns the instance type for the known Map, given its offset within the
+// first page of the space, or empty string on failure.
+int FindKnownMapInstanceTypeInMapSpace(uintptr_t offset);
+int FindKnownMapInstanceTypeInReadOnlySpace(uintptr_t offset);
+
+// ===== End of generated functions. ===========================================
+
+// Returns a descriptive string if the given address matches a known object, or
+// an empty string otherwise.
+std::string FindKnownObject(uintptr_t address,
+                            const d::HeapAddresses& heap_addresses);
+
+struct KnownInstanceType {
+  enum class Confidence {
+    kLow,
+    kHigh,
+  };
+  KnownInstanceType() : confidence(Confidence::kLow) {}
+  KnownInstanceType(int type) : KnownInstanceType() {
+    if (type >= 0) {
+      confidence = Confidence::kHigh;
+      types.push_back(static_cast<v8::internal::InstanceType>(type));
+    }
+  }
+  Confidence confidence;
+  std::vector<v8::internal::InstanceType> types;
+};
+
+// Returns information about the instance type of the Map at the given address,
+// based on the list of known Maps.
+KnownInstanceType FindKnownMapInstanceTypes(
+    uintptr_t address, const d::HeapAddresses& heap_addresses);
+
+}  // namespace debug_helper_internal
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/third_party/v8/tools/debug_helper/list-object-classes.cc b/src/third_party/v8/tools/debug_helper/list-object-classes.cc
new file mode 100644
index 0000000..0e17dcb
--- /dev/null
+++ b/src/third_party/v8/tools/debug_helper/list-object-classes.cc
@@ -0,0 +1,15 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "debug-helper-internal.h"
+#include "torque-generated/class-debug-readers.h"
+
+namespace di = v8::internal::debug_helper_internal;
+
+extern "C" {
+V8_DEBUG_HELPER_EXPORT const d::ClassList*
+_v8_debug_helper_ListObjectClasses() {
+  return &di::kObjectClassList;
+}
+}
diff --git a/src/third_party/v8/tools/deprecation_stats.py b/src/third_party/v8/tools/deprecation_stats.py
new file mode 100755
index 0000000..628eebc
--- /dev/null
+++ b/src/third_party/v8/tools/deprecation_stats.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+from datetime import datetime
+import re
+import subprocess
+import sys
+
+RE_GITHASH = re.compile(r"^[0-9a-f]{40}")
+RE_AUTHOR_TIME = re.compile(r"^author-time (\d+)$")
+RE_FILENAME = re.compile(r"^filename (.+)$")
+
+def GetBlame(file_path):
+  result = subprocess.check_output(
+      ['git', 'blame', '-t', '--line-porcelain', file_path])
+  line_iter = iter(result.splitlines())
+  blame_list = list()
+  current_blame = None
+  while True:
+    line = next(line_iter, None)
+    if line is None:
+      break
+    if RE_GITHASH.match(line):
+      if current_blame is not None:
+        blame_list.append(current_blame)
+      current_blame = {'time': 0, 'filename': None, 'content': None}
+      continue
+    match = RE_AUTHOR_TIME.match(line)
+    if match:
+      current_blame['time'] = datetime.fromtimestamp(int(match.groups()[0]))
+      continue
+    match = RE_FILENAME.match(line)
+    if match:
+      current_blame['filename'] = match.groups()[0]
+      current_blame['content'] = next(line_iter).strip()
+      continue
+  blame_list.append(current_blame)
+  return blame_list
+
+RE_MACRO_END = re.compile(r"\);");
+RE_DEPRECATE_MACRO = re.compile(r"\(.*?,(.*)\);", re.MULTILINE)
+
+def FilterAndPrint(blame_list, macro, before):
+  index = 0
+  re_macro = re.compile(macro)
+  deprecated = list()
+  while index < len(blame_list):
+    blame = blame_list[index]
+    match = re_macro.search(blame['content'])
+    if match and blame['time'] < before:
+      line = blame['content']
+      time = blame['time']
+      pos = match.end()
+      start = -1
+      parens = 0
+      quotes = 0
+      while True:
+        if pos >= len(line):
+          # extend to next line
+          index = index + 1
+          blame = blame_list[index]
+          if line.endswith(','):
+            # add whitespace when breaking line due to comma
+            line = line + ' '
+          line = line + blame['content']
+        if line[pos] == '(':
+          parens = parens + 1
+        elif line[pos] == ')':
+          parens = parens - 1
+          if parens == 0:
+            break
+        elif line[pos] == '"':
+          quotes = quotes + 1
+        elif line[pos] == ',' and quotes % 2 == 0 and start == -1:
+          start = pos + 1
+        pos = pos + 1
+      deprecated.append([index + 1, time, line[start:pos].strip()])
+    index = index + 1
+  print("Marked as " + macro + ": " + str(len(deprecated)))
+  for linenumber, time, content in deprecated:
+    print(str(linenumber).rjust(8) + " : " + str(time) + " : " + content)
+  return len(deprecated)
+
+def ParseOptions(args):
+  parser = argparse.ArgumentParser(description="Collect deprecation statistics")
+  parser.add_argument("file_path", help="Path to v8.h")
+  parser.add_argument("--before", help="Filter by date")
+  options = parser.parse_args(args)
+  if options.before:
+    options.before = datetime.strptime(options.before, '%Y-%m-%d')
+  else:
+    options.before = datetime.now()
+  return options
+
+def Main(args):
+  options = ParseOptions(args)
+  blame_list = GetBlame(options.file_path)
+  FilterAndPrint(blame_list, "V8_DEPRECATE_SOON", options.before)
+  FilterAndPrint(blame_list, "V8_DEPRECATED", options.before)
+
+if __name__ == "__main__":
+  Main(sys.argv[1:])
diff --git a/src/third_party/v8/tools/detect-builtins.js b/src/third_party/v8/tools/detect-builtins.js
new file mode 100644
index 0000000..90bdc08
--- /dev/null
+++ b/src/third_party/v8/tools/detect-builtins.js
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global) {
+
+  var GetProperties = function(this_name, object) {
+    var result = {};
+    try {
+      var names = Object.getOwnPropertyNames(object);
+    } catch(e) {
+      return;
+    }
+    for (var i = 0; i < names.length; ++i) {
+      var name = names[i];
+      if (typeof object === "function") {
+        if (name === "length" ||
+            name === "name" ||
+            name === "arguments" ||
+            name === "caller" ||
+            name === "prototype") {
+          continue;
+        }
+      }
+      // Avoid endless recursion.
+      if (this_name === "prototype" && name === "constructor") continue;
+      // Avoid needless duplication.
+      if (this_name === "__PROTO__" && name === "constructor") continue;
+      // Could get this from the parent, but having it locally is easier.
+      var property = { "name": name };
+      try {
+        var value = object[name];
+      } catch(e) {
+        property.type = "getter";
+        result[name] = property;
+        continue;
+      }
+      var type = typeof value;
+      property.type = type;
+      if (type === "function") {
+        property.length = value.length;
+        property.prototype = GetProperties("prototype", value.prototype);
+      }
+      if (type === "string" || type === "number") {
+        property.value = value;
+      } else {
+        property.properties = GetProperties(name, value);
+      }
+      result[name] = property;
+    }
+    // Print the __proto__ if it's not the default Object prototype.
+    if (typeof object === "object" && object.__proto__ !== null &&
+        !object.__proto__.hasOwnProperty("__proto__")) {
+      result.__PROTO__ = GetProperties("__PROTO__", object.__proto__);
+    }
+    return result;
+  };
+
+  var g = GetProperties("", global, "");
+  print(JSON.stringify(g, undefined, 2));
+
+})(this);  // Must wrap in anonymous closure or it'll detect itself as builtin.
diff --git a/src/third_party/v8/tools/dev/gen-tags.py b/src/third_party/v8/tools/dev/gen-tags.py
new file mode 100755
index 0000000..a478ee3
--- /dev/null
+++ b/src/third_party/v8/tools/dev/gen-tags.py
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""\
+Convenience script for generating arch-specific ctags file.
+This script MUST be executed at the top directory.
+
+Usage:
+    $ tools/dev/gen-tags.py [<arch>...]
+
+The example usage is as follows:
+    $ tools/dev/gen-tags.py x64
+
+If no <arch> is given, it generates tags file for all arches:
+    $ tools/dev/gen-tags.py
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import subprocess
+import sys
+
+# All arches that this script understands.
+ARCHES = ["ia32", "x64", "arm", "arm64", "mips", "mips64", "ppc", "s390"]
+
+def PrintHelpAndExit():
+  print(__doc__)
+  sys.exit(0)
+
+
+def _Call(cmd, silent=False):
+  if not silent: print("# %s" % cmd)
+  return subprocess.call(cmd, shell=True)
+
+
+def ParseArguments(argv):
+  if not "tools/dev" in argv[0]:
+    PrintHelpAndExit()
+  argv = argv[1:]
+
+  # If no argument is given, then generate ctags for all arches.
+  if len(argv) == 0:
+    return ARCHES
+
+  user_arches = []
+  for argstring in argv:
+    if argstring in ("-h", "--help", "help"):
+      PrintHelpAndExit()
+    if argstring not in ARCHES:
+      print("Invalid argument: %s" % argstring)
+      sys.exit(1)
+    user_arches.append(argstring)
+
+  return user_arches
+
+
+def Exclude(fullpath, exclude_arches):
+  for arch in exclude_arches:
+    if ("/%s/" % arch) in fullpath: return True
+  return False
+
+
+def Main(argv):
+  user_arches = []
+
+  user_arches = ParseArguments(argv)
+
+  exclude_arches = list(ARCHES)
+  for user_arch in user_arches:
+    exclude_arches.remove(user_arch)
+
+  paths = ["include", "src", "test"]
+  exts = [".h", ".cc", ".c"]
+
+  gtags_filename = "gtags.files"
+
+  with open(gtags_filename, "w") as gtags:
+    for path in paths:
+      for root, dirs, files in os.walk(path):
+        for file in files:
+          if not file.endswith(tuple(exts)): continue
+          fullpath = os.path.join(root, file)
+          if Exclude(fullpath, exclude_arches): continue
+          gtags.write(fullpath + os.linesep)
+
+  _Call("ctags --fields=+l -L " + gtags_filename)
+
+
+if __name__ == "__main__":
+  sys.exit(Main(sys.argv))
diff --git a/src/third_party/v8/tools/dev/gm.py b/src/third_party/v8/tools/dev/gm.py
new file mode 100755
index 0000000..a211a99
--- /dev/null
+++ b/src/third_party/v8/tools/dev/gm.py
@@ -0,0 +1,444 @@
+#!/usr/bin/env python2
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""\
+Convenience wrapper for compiling V8 with gn/ninja and running tests.
+Sets up build output directories if they don't exist.
+Produces simulator builds for non-Intel target architectures.
+Uses Goma by default if it is detected (at output directory setup time).
+Expects to be run from the root of a V8 checkout.
+
+Usage:
+    gm.py [<arch>].[<mode>[-<suffix>]].[<target>] [testname...] [--flag]
+
+All arguments are optional. Most combinations should work, e.g.:
+    gm.py ia32.debug x64.release x64.release-my-custom-opts d8
+    gm.py android_arm.release.check --progress=verbose
+    gm.py x64 mjsunit/foo cctest/test-bar/*
+
+Flags are passed unchanged to the test runner. They must start with -- and must
+not contain spaces.
+"""
+# See HELP below for additional documentation.
+# Note on Python3 compatibility: gm.py itself is Python3 compatible, but
+# run-tests.py, which will be executed by the same binary, is not; hence
+# the hashbang line at the top of this file explicitly requires Python2.
+
+from __future__ import print_function
+import errno
+import os
+import re
+import subprocess
+import sys
+
+USE_PTY = "linux" in sys.platform
+if USE_PTY:
+  import pty
+
+BUILD_TARGETS_TEST = ["d8", "cctest", "inspector-test", "unittests",
+                      "wasm_api_tests"]
+BUILD_TARGETS_ALL = ["all"]
+
+# All arches that this script understands.
+ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
+          "s390", "s390x", "android_arm", "android_arm64"]
+# Arches that get built/run when you don't specify any.
+DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
+# Modes that this script understands.
+MODES = ["release", "debug", "optdebug"]
+# Modes that get built/run when you don't specify any.
+DEFAULT_MODES = ["release", "debug"]
+# Build targets that can be manually specified.
+TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "wasm_api_tests", "wee8",
+           "mkgrokdump", "generate-bytecode-expectations", "inspector-test"]
+# Build targets that get built when you don't specify any (and specified tests
+# don't imply any other targets).
+DEFAULT_TARGETS = ["d8"]
+# Tests that run-tests.py would run by default that can be run with
+# BUILD_TARGETS_TESTS.
+DEFAULT_TESTS = ["cctest", "debugger", "intl", "message", "mjsunit",
+                 "unittests"]
+# These can be suffixed to any <arch>.<mode> combo, or used standalone,
+# or used as global modifiers (affecting all <arch>.<mode> combos).
+ACTIONS = {
+  "all": {"targets": BUILD_TARGETS_ALL, "tests": []},
+  "tests": {"targets": BUILD_TARGETS_TEST, "tests": []},
+  "check": {"targets": BUILD_TARGETS_TEST, "tests": DEFAULT_TESTS},
+  "checkall": {"targets": BUILD_TARGETS_ALL, "tests": ["ALL"]},
+}
+
+HELP = """<arch> can be any of: %(arches)s
+<mode> can be any of: %(modes)s
+<target> can be any of:
+ - %(targets)s (build respective binary)
+ - all (build all binaries)
+ - tests (build test binaries)
+ - check (build test binaries, run most tests)
+ - checkall (build all binaries, run more tests)
+""" % {"arches": " ".join(ARCHES),
+       "modes": " ".join(MODES),
+       "targets": ", ".join(TARGETS)}
+
+TESTSUITES_TARGETS = {"benchmarks": "d8",
+              "cctest": "cctest",
+              "debugger": "d8",
+              "fuzzer": "v8_fuzzers",
+              "inspector": "inspector-test",
+              "intl": "d8",
+              "message": "d8",
+              "mjsunit": "d8",
+              "mozilla": "d8",
+              "test262": "d8",
+              "unittests": "unittests",
+              "wasm-api-tests": "wasm_api_tests",
+              "wasm-js": "d8",
+              "wasm-spec-tests": "d8",
+              "webkit": "d8"}
+
+OUTDIR = "out"
+
+def DetectGoma():
+  home_goma = os.path.expanduser("~/goma")
+  if os.path.exists(home_goma):
+    return home_goma
+  if os.environ.get("GOMA_DIR"):
+    return os.environ.get("GOMA_DIR")
+  if os.environ.get("GOMADIR"):
+    return os.environ.get("GOMADIR")
+  return None
+
+GOMADIR = DetectGoma()
+IS_GOMA_MACHINE = GOMADIR is not None
+
+USE_GOMA = "true" if IS_GOMA_MACHINE else "false"
+
+RELEASE_ARGS_TEMPLATE = """\
+is_component_build = false
+is_debug = false
+%s
+use_goma = {GOMA}
+goma_dir = \"{GOMA_DIR}\"
+v8_enable_backtrace = true
+v8_enable_disassembler = true
+v8_enable_object_print = true
+v8_enable_verify_heap = true
+""".replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
+
+DEBUG_ARGS_TEMPLATE = """\
+is_component_build = true
+is_debug = true
+symbol_level = 2
+%s
+use_goma = {GOMA}
+goma_dir = \"{GOMA_DIR}\"
+v8_enable_backtrace = true
+v8_enable_fast_mksnapshot = true
+v8_enable_slow_dchecks = true
+v8_optimized_debug = false
+""".replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
+
+OPTDEBUG_ARGS_TEMPLATE = """\
+is_component_build = true
+is_debug = true
+symbol_level = 1
+%s
+use_goma = {GOMA}
+goma_dir = \"{GOMA_DIR}\"
+v8_enable_backtrace = true
+v8_enable_fast_mksnapshot = true
+v8_enable_verify_heap = true
+v8_optimized_debug = true
+""".replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
+
+ARGS_TEMPLATES = {
+  "release": RELEASE_ARGS_TEMPLATE,
+  "debug": DEBUG_ARGS_TEMPLATE,
+  "optdebug": OPTDEBUG_ARGS_TEMPLATE
+}
+
+def PrintHelpAndExit():
+  print(__doc__)
+  print(HELP)
+  sys.exit(0)
+
+def _Call(cmd, silent=False):
+  if not silent: print("# %s" % cmd)
+  return subprocess.call(cmd, shell=True)
+
+def _CallWithOutputNoTerminal(cmd):
+  return subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
+
+def _CallWithOutput(cmd):
+  print("# %s" % cmd)
+  # The following trickery is required so that the 'cmd' thinks it's running
+  # in a real terminal, while this script gets to intercept its output.
+  parent, child = pty.openpty()
+  p = subprocess.Popen(cmd, shell=True, stdin=child, stdout=child, stderr=child)
+  os.close(child)
+  output = []
+  try:
+    while True:
+      try:
+        data = os.read(parent, 512).decode('utf-8')
+      except OSError as e:
+        if e.errno != errno.EIO: raise
+        break # EIO means EOF on some systems
+      else:
+        if not data: # EOF
+          break
+        print(data, end="")
+        sys.stdout.flush()
+        output.append(data)
+  finally:
+    os.close(parent)
+    p.wait()
+  return p.returncode, "".join(output)
+
+def _Which(cmd):
+  for path in os.environ["PATH"].split(os.pathsep):
+    if os.path.exists(os.path.join(path, cmd)):
+      return os.path.join(path, cmd)
+  return None
+
+def _Write(filename, content):
+  print("# echo > %s << EOF\n%sEOF" % (filename, content))
+  with open(filename, "w") as f:
+    f.write(content)
+
+def _Notify(summary, body):
+  if _Which('notify-send') is not None:
+    _Call("notify-send '{}' '{}'".format(summary, body), silent=True)
+  else:
+    print("{} - {}".format(summary, body))
+
+def GetPath(arch, mode):
+  subdir = "%s.%s" % (arch, mode)
+  return os.path.join(OUTDIR, subdir)
+
+def PrepareMksnapshotCmdline(orig_cmdline, path):
+  result = "gdb --args %s/mksnapshot " % path
+  for w in orig_cmdline.split(" "):
+    if w.startswith("gen/") or w.startswith("snapshot_blob"):
+      result += ("%(path)s%(sep)s%(arg)s " %
+                 {"path": path, "sep": os.sep, "arg": w})
+    else:
+      result += "%s " % w
+  return result
+
+class Config(object):
+  def __init__(self, arch, mode, targets, tests=[], testrunner_args=[]):
+    self.arch = arch
+    self.mode = mode
+    self.targets = set(targets)
+    self.tests = set(tests)
+    self.testrunner_args = testrunner_args
+
+  def Extend(self, targets, tests=[]):
+    self.targets.update(targets)
+    self.tests.update(tests)
+
+  def GetTargetCpu(self):
+    if self.arch == "android_arm": return "target_cpu = \"arm\""
+    if self.arch == "android_arm64": return "target_cpu = \"arm64\""
+    cpu = "x86"
+    if "64" in self.arch or self.arch == "s390x":
+      cpu = "x64"
+    return "target_cpu = \"%s\"" % cpu
+
+  def GetV8TargetCpu(self):
+    if self.arch == "android_arm": return "\nv8_target_cpu = \"arm\""
+    if self.arch == "android_arm64": return "\nv8_target_cpu = \"arm64\""
+    if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
+                     "s390", "s390x"):
+      return "\nv8_target_cpu = \"%s\"" % self.arch
+    return ""
+
+  def GetTargetOS(self):
+    if self.arch in ("android_arm", "android_arm64"):
+      return "\ntarget_os = \"android\""
+    return ""
+
+  def GetGnArgs(self):
+    # Use only substring before first '-' as the actual mode
+    mode = re.match("([^-]+)", self.mode).group(1)
+    template = ARGS_TEMPLATES[mode]
+    arch_specific = (self.GetTargetCpu() + self.GetV8TargetCpu() +
+                     self.GetTargetOS())
+    return template % arch_specific
+
+  def Build(self):
+    path = GetPath(self.arch, self.mode)
+    args_gn = os.path.join(path, "args.gn")
+    build_ninja = os.path.join(path, "build.ninja")
+    if not os.path.exists(path):
+      print("# mkdir -p %s" % path)
+      os.makedirs(path)
+    if not os.path.exists(args_gn):
+      _Write(args_gn, self.GetGnArgs())
+    if not os.path.exists(build_ninja):
+      code = _Call("gn gen %s" % path)
+      if code != 0: return code
+    targets = " ".join(self.targets)
+    # The implementation of mksnapshot failure detection relies on
+    # the "pty" module and GDB presence, so skip it on non-Linux.
+    if not USE_PTY:
+      return _Call("autoninja -C %s %s" % (path, targets))
+
+    return_code, output = _CallWithOutput("autoninja -C %s %s" %
+                                          (path, targets))
+    if return_code != 0 and "FAILED:" in output and "snapshot_blob" in output:
+      csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
+      match = csa_trap.search(output)
+      extra_opt = match.group(1) if match else ""
+      cmdline = re.compile("python ../../tools/run.py ./mksnapshot (.*)")
+      orig_cmdline = cmdline.search(output).group(1).strip()
+      cmdline = PrepareMksnapshotCmdline(orig_cmdline, path) + extra_opt
+      _Notify("V8 build requires your attention",
+              "Detected mksnapshot failure, re-running in GDB...")
+      _Call(cmdline)
+    return return_code
+
+  def RunTests(self):
+    # Special handling for "mkgrokdump": if it was built, run it.
+    if (self.arch == "x64" and self.mode == "release" and
+        "mkgrokdump" in self.targets):
+      _Call("%s/mkgrokdump > tools/v8heapconst.py" %
+            GetPath(self.arch, self.mode))
+    if not self.tests: return 0
+    if "ALL" in self.tests:
+      tests = ""
+    else:
+      tests = " ".join(self.tests)
+    return _Call('"%s" ' % sys.executable +
+                 os.path.join("tools", "run-tests.py") +
+                 " --outdir=%s %s %s" % (
+                     GetPath(self.arch, self.mode), tests,
+                     " ".join(self.testrunner_args)))
+
+def GetTestBinary(argstring):
+  for suite in TESTSUITES_TARGETS:
+    if argstring.startswith(suite): return TESTSUITES_TARGETS[suite]
+  return None
+
+class ArgumentParser(object):
+  def __init__(self):
+    self.global_targets = set()
+    self.global_tests = set()
+    self.global_actions = set()
+    self.configs = {}
+    self.testrunner_args = []
+
+  def PopulateConfigs(self, arches, modes, targets, tests):
+    for a in arches:
+      for m in modes:
+        path = GetPath(a, m)
+        if path not in self.configs:
+          self.configs[path] = Config(a, m, targets, tests,
+                  self.testrunner_args)
+        else:
+          self.configs[path].Extend(targets, tests)
+
+  def ProcessGlobalActions(self):
+    have_configs = len(self.configs) > 0
+    for action in self.global_actions:
+      impact = ACTIONS[action]
+      if (have_configs):
+        for c in self.configs:
+          self.configs[c].Extend(**impact)
+      else:
+        self.PopulateConfigs(DEFAULT_ARCHES, DEFAULT_MODES, **impact)
+
+  def ParseArg(self, argstring):
+    if argstring in ("-h", "--help", "help"):
+      PrintHelpAndExit()
+    arches = []
+    modes = []
+    targets = []
+    actions = []
+    tests = []
+    # Special handling for "mkgrokdump": build it for x64.release.
+    if argstring == "mkgrokdump":
+      self.PopulateConfigs(["x64"], ["release"], ["mkgrokdump"], [])
+      return
+    # Specifying a single unit test looks like "unittests/Foo.Bar", test262
+    # tests have names like "S15.4.4.7_A4_T1", don't split these.
+    if argstring.startswith("unittests/") or argstring.startswith("test262/"):
+      words = [argstring]
+    elif argstring.startswith("--"):
+      # Pass all other flags to test runner.
+      self.testrunner_args.append(argstring)
+      return
+    else:
+      # Assume it's a word like "x64.release" -> split at the dot.
+      words = argstring.split('.')
+    if len(words) == 1:
+      word = words[0]
+      if word in ACTIONS:
+        self.global_actions.add(word)
+        return
+      if word in TARGETS:
+        self.global_targets.add(word)
+        return
+      maybe_target = GetTestBinary(word)
+      if maybe_target is not None:
+        self.global_tests.add(word)
+        self.global_targets.add(maybe_target)
+        return
+    for word in words:
+      if word in ARCHES:
+        arches.append(word)
+      elif word in MODES:
+        modes.append(word)
+      elif word in TARGETS:
+        targets.append(word)
+      elif word in ACTIONS:
+        actions.append(word)
+      elif any(map(lambda x: word.startswith(x + "-"), MODES)):
+        modes.append(word)
+      else:
+        print("Didn't understand: %s" % word)
+        sys.exit(1)
+    # Process actions.
+    for action in actions:
+      impact = ACTIONS[action]
+      targets += impact["targets"]
+      tests += impact["tests"]
+    # Fill in defaults for things that weren't specified.
+    arches = arches or DEFAULT_ARCHES
+    modes = modes or DEFAULT_MODES
+    targets = targets or DEFAULT_TARGETS
+    # Produce configs.
+    self.PopulateConfigs(arches, modes, targets, tests)
+
+  def ParseArguments(self, argv):
+    if len(argv) == 0:
+      PrintHelpAndExit()
+    for argstring in argv:
+      self.ParseArg(argstring)
+    self.ProcessGlobalActions()
+    for c in self.configs:
+      self.configs[c].Extend(self.global_targets, self.global_tests)
+    return self.configs
+
+def Main(argv):
+  parser = ArgumentParser()
+  configs = parser.ParseArguments(argv[1:])
+  return_code = 0
+  # If we have Goma but it is not running, start it.
+  if (GOMADIR is not None and
+      _Call("ps -e | grep compiler_proxy > /dev/null", silent=True) != 0):
+    _Call("%s/goma_ctl.py ensure_start" % GOMADIR)
+  for c in configs:
+    return_code += configs[c].Build()
+  if return_code == 0:
+    for c in configs:
+      return_code += configs[c].RunTests()
+  if return_code == 0:
+    _Notify('Done!', 'V8 compilation finished successfully.')
+  else:
+    _Notify('Error!', 'V8 compilation finished with errors.')
+  return return_code
+
+if __name__ == "__main__":
+  sys.exit(Main(sys.argv))
diff --git a/src/third_party/v8/tools/dev/update-compile-commands.py b/src/third_party/v8/tools/dev/update-compile-commands.py
new file mode 100755
index 0000000..41c0580
--- /dev/null
+++ b/src/third_party/v8/tools/dev/update-compile-commands.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python3
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""\
+Creates a "compile_commands.json" file for V8, for the needs of clangd and
+similar code indexers. Also updates generated C++ sources, and compiles the
+Torque Language Server, for a complete code indexing experience.
+"""
+
+import json
+import os
+import subprocess
+import sys
+
+PYLIB_PATH = 'tools/clang/pylib'
+GM_PATH = 'tools/dev'
+PYLIB_CHECK = os.path.join(PYLIB_PATH, 'clang', 'compile_db.py')
+GM_CHECK = os.path.join(GM_PATH, 'gm.py')
+def CheckRelativeImport(path):
+  if not os.path.exists(path):
+    print("Error: Please run this script from the root of a V8 checkout. %s "
+          "must be a valid relative path." % path)
+    sys.exit(1)
+CheckRelativeImport(PYLIB_CHECK)
+CheckRelativeImport(GM_CHECK)
+
+sys.path.insert(0, PYLIB_PATH)
+from clang import compile_db
+
+sys.path.insert(0, GM_PATH)
+import gm
+
+def _Call(cmd, silent=False):
+  if not silent: print("# %s" % cmd)
+  return subprocess.call(cmd, shell=True)
+
+def _Write(filename, content):
+  with open(filename, "w") as f:
+    f.write(content)
+
+def PrepareBuildDir(arch, mode):
+  build_dir = os.path.join("out", "%s.%s" % (arch, mode))
+  if not os.path.exists(build_dir):
+    print("# mkdir -p %s" % build_dir)
+    os.makedirs(build_dir)
+  args_gn = os.path.join(build_dir, "args.gn")
+  if not os.path.exists(args_gn):
+    conf = gm.Config(arch, mode, [])
+    _Write(args_gn, conf.GetGnArgs())
+  build_ninja = os.path.join(build_dir, "build.ninja")
+  if not os.path.exists(build_ninja):
+    code = _Call("gn gen %s" % build_dir)
+    if code != 0: raise Error("gn gen failed")
+  else:
+    _Call("ninja -C %s build.ninja" % build_dir)
+  return build_dir
+
+def AddTargetsForArch(arch, combined):
+  build_dir = PrepareBuildDir(arch, "debug")
+  commands = compile_db.ProcessCompileDatabaseIfNeeded(
+                compile_db.GenerateWithNinja(build_dir, ["all"]))
+  added = 0
+  for c in commands:
+    key = c["file"]
+    if key not in combined:
+      combined[key] = c
+      added += 1
+  print("%s: added %d compile commands" % (arch, added))
+
+def UpdateCompileCommands():
+  print(">>> Updating compile_commands.json...")
+  combined = {}
+  AddTargetsForArch("x64", combined)
+  AddTargetsForArch("ia32", combined)
+  AddTargetsForArch("arm", combined)
+  AddTargetsForArch("arm64", combined)
+  commands = []
+  for key in combined:
+    commands.append(combined[key])
+  _Write("compile_commands.json", json.dumps(commands, indent=2))
+
+def CompileLanguageServer():
+  print(">>> Compiling Torque Language Server...")
+  PrepareBuildDir("x64", "release")
+  _Call("autoninja -C out/x64.release torque-language-server")
+
+def GenerateCCFiles():
+  print(">>> Generating generated C++ source files...")
+  # This must be called after UpdateCompileCommands().
+  assert os.path.exists("out/x64.debug/build.ninja")
+  _Call("autoninja -C out/x64.debug v8_generated_cc_files")
+
+def StartGoma():
+  gomadir = gm.DetectGoma()
+  if (gomadir is not None and
+      _Call("ps -e | grep compiler_proxy > /dev/null", silent=True) != 0):
+    _Call("%s/goma_ctl.py ensure_start" % gomadir)
+
+if __name__ == "__main__":
+  StartGoma()
+  CompileLanguageServer()
+  UpdateCompileCommands()
+  GenerateCCFiles()
diff --git a/src/third_party/v8/tools/dev/update-vscode.sh b/src/third_party/v8/tools/dev/update-vscode.sh
new file mode 100755
index 0000000..5a0fd3d
--- /dev/null
+++ b/src/third_party/v8/tools/dev/update-vscode.sh
@@ -0,0 +1,161 @@
+#!/bin/bash
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# The purpose of this script is to make it easy to download/update
+# Visual Studio Code on Linux distributions where for whatever reason there
+# is no good way to do so via the package manager.
+
+# Version of this script: 2020.07.04
+
+# Basic checking of arguments: want at least one, and it's not --help.
+VERSION="$1"
+[ -z "$VERSION" -o \
+    "$VERSION" == "-h" -o \
+    "$VERSION" == "--help" -o \
+    "$VERSION" == "help" ] && {
+  echo "Usage: $0 <version>"
+  echo "<version> may be --auto for auto-detecting the latest available."
+  exit 1
+}
+
+die() {
+  echo "Error: $1"
+  exit 1
+}
+
+if [ "$VERSION" == "--auto" -o "$VERSION" == "auto" ]; then
+  echo "Searching online for latest available version..."
+  # Where to find the latest available version (we assume that it's mentioned
+  # in the first 1000 characters, which is true as of 2020-07).
+  AVAILABLE_PACKAGES_URL="https://packages.microsoft.com/repos/vscode/dists/stable/main/binary-amd64/Packages"
+  VERSION=$(curl "$AVAILABLE_PACKAGES_URL" --range 0-1000 --silent \
+            | grep "^Version: " \
+            | sed 's/[^0-9]*\([0-9.]*\).*/\1/')
+  if [ -z "$VERSION" ]; then
+    die "Detecting latest version failed, please specify it manually."
+  else
+    echo "Latest version found: $VERSION"
+  fi
+fi
+
+# Constant definitions for local paths. Edit these to your liking.
+VSCODE_DIR="$HOME/vscode"
+BACKUP_DIR="$HOME/vscode.prev"
+DOWNLOADS_DIR="$HOME/Downloads"
+DOWNLOAD_FILE="$DOWNLOADS_DIR/vscode-$VERSION.tar.gz"
+DESKTOP_FILE_DIR="$HOME/.local/share/applications"
+DESKTOP_FILE="$DESKTOP_FILE_DIR/code.desktop"
+
+# Constant definitions for remote/upstream things. Might need to be updated
+# when upstream changes things.
+# Where to find the version inside VS Code's installation directory.
+PACKAGE_JSON="$VSCODE_DIR/resources/app/package.json"
+ICON="$VSCODE_DIR/resources/app/resources/linux/code.png"
+# Where to download the archive.
+DOWNLOAD_URL="https://update.code.visualstudio.com/$VERSION/linux-x64/stable"
+CODE_BIN="$VSCODE_DIR/bin/code"
+
+# Check for "code" in $PATH; create a symlink if we can find a good place.
+SYMLINK=$(which code)
+if [ -z "$SYMLINK" ]; then
+  IFS=':' read -ra PATH_ARRAY <<< "$PATH"
+  for P in "${PATH_ARRAY[@]}"; do
+    if [ "$P" == "$HOME/bin" -o \
+         "$P" == "$HOME/local/bin" -o \
+         "$P" == "$HOME/.local/bin" ]; then
+      LOCALBIN="$P"
+      break
+    fi
+  done
+  if [ -n "$LOCALBIN" ]; then
+    echo "Adding symlink to $LOCALBIN..."
+    if [ ! -d "$LOCALBIN" ]; then
+      mkdir -p "$LOCALBIN" || die "Failed to create $LOCALBIN."
+    fi
+    ln -s "$CODE_BIN" "$LOCALBIN/code" || die "Failed to create symlink."
+  else
+    echo "Please put a symlink to $CODE_BIN somewhere on your \$PATH."
+  fi
+fi
+
+if [ ! -r "$DESKTOP_FILE" ]; then
+  echo "Creating .desktop file..."
+  mkdir -p "$DESKTOP_FILE_DIR" || die "Failed to create .desktop directory."
+  cat <<EOF > "$DESKTOP_FILE"
+#!/usr/bin/env xdg-open
+[Desktop Entry]
+Name=Visual Studio Code
+Comment=Code Editing. Redefined.
+GenericName=Text Editor
+Exec=$CODE_BIN --unity-launch %F
+Icon=$ICON
+Type=Application
+StartupNotify=false
+StartupWMClass=Code
+Categories=Utility;TextEditor;Development;IDE;
+MimeType=text/plain;inode/directory;
+Actions=new-empty-window;
+Keywords=vscode;
+
+X-Desktop-File-Install-Version=0.24
+
+[Desktop Action new-empty-window]
+Name=New Empty Window
+Exec=$CODE_BIN --new-window %F
+Icon=$ICON
+EOF
+  chmod +x "$DESKTOP_FILE" || die "Failed to make .desktop file executable."
+fi
+
+# Find currently installed version.
+if [ -d "$VSCODE_DIR" ]; then
+  if [ ! -r "$PACKAGE_JSON" ]; then
+    die "$PACKAGE_JSON file not found, this script must be updated."
+  fi
+  INSTALLED=$(grep '"version":' "$PACKAGE_JSON" \
+              | sed 's/[^0-9]*\([0-9.]*\).*/\1/')
+  echo "Detected installed version: $INSTALLED"
+  if [ "$VERSION" == "$INSTALLED" ] ; then
+    echo "You already have that version."
+    exit 0
+  else
+    echo "Updating from $INSTALLED to $VERSION..."
+  fi
+fi
+
+if [ ! -r "$DOWNLOAD_FILE" ]; then
+  echo "Downloading..."
+  if [ ! -d "$DOWNLOADS_DIR" ]; then
+    mkdir -p "$DOWNLOADS_DIR" || die "Failed to create $DOWNLOADS_DIR."
+  fi
+  wget "$DOWNLOAD_URL" -O "$DOWNLOAD_FILE" || die "Downloading failed."
+else
+  echo "$DOWNLOAD_FILE already exists; delete it to force re-download."
+fi
+
+echo "Extracting..."
+TAR_DIR=$(tar -tf "$DOWNLOAD_FILE" | head -1)
+[ -z "$TAR_DIR" ] && die "Couldn't read archive."
+TMP_DIR=$(mktemp -d)
+tar -C "$TMP_DIR" -xf "$DOWNLOAD_FILE" || {
+  rm -rf "$TMP_DIR"
+  die "Extracting failed."
+}
+
+if [ -d "$BACKUP_DIR" ]; then
+  echo "Deleting previous backup..."
+  rm -rf "$BACKUP_DIR"
+fi
+
+if [ -d "$VSCODE_DIR" ]; then
+  echo "Moving previous installation..."
+  mv "$VSCODE_DIR" "$BACKUP_DIR"
+fi
+
+echo "Installing new version..."
+mv "$TMP_DIR/$TAR_DIR" "$VSCODE_DIR"
+rmdir "$TMP_DIR"
+
+echo "All done, enjoy coding!"
diff --git a/src/third_party/v8/tools/dev/v8gen.py b/src/third_party/v8/tools/dev/v8gen.py
new file mode 100755
index 0000000..0b6e1d1
--- /dev/null
+++ b/src/third_party/v8/tools/dev/v8gen.py
@@ -0,0 +1,312 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script to generate V8's gn arguments based on common developer defaults
+or builder configurations.
+
+Goma is used by default if detected. The compiler proxy is assumed to run.
+
+This script can be added to the PATH and be used on other checkouts. It always
+runs for the checkout nesting the CWD.
+
+Configurations of this script live in infra/mb/mb_config.pyl.
+
+Available actions are: {gen,list}. Omitting the action defaults to "gen".
+
+-------------------------------------------------------------------------------
+
+Examples:
+
+# Generate the ia32.release config in out.gn/ia32.release.
+v8gen.py ia32.release
+
+# Generate into out.gn/foo without goma auto-detect.
+v8gen.py gen -b ia32.release foo --no-goma
+
+# Pass additional gn arguments after -- (don't use spaces within gn args).
+v8gen.py ia32.optdebug -- v8_enable_slow_dchecks=true
+
+# Generate gn arguments of 'V8 Linux64 - builder' from 'client.v8'. To switch
+# off goma usage here, the args.gn file must be edited manually.
+v8gen.py -m client.v8 -b 'V8 Linux64 - builder'
+
+# Show available configurations.
+v8gen.py list
+
+-------------------------------------------------------------------------------
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import os
+import re
+import subprocess
+import sys
+
+CONFIG = os.path.join('infra', 'mb', 'mb_config.pyl')
+GOMA_DEFAULT = os.path.join(os.path.expanduser("~"), 'goma')
+OUT_DIR = 'out.gn'
+
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.append(os.path.join(TOOLS_PATH, 'mb'))
+
+import mb
+
+
+def _sanitize_nonalpha(text):
+  return re.sub(r'[^a-zA-Z0-9.]', '_', text)
+
+
+class GenerateGnArgs(object):
+  def __init__(self, args):
+    # Split args into this script's arguments and gn args passed to the
+    # wrapped gn.
+    index = args.index('--') if '--' in args else len(args)
+    self._options = self._parse_arguments(args[:index])
+    self._gn_args = args[index + 1:]
+
+  def _parse_arguments(self, args):
+    self.parser = argparse.ArgumentParser(
+      description=__doc__,
+      formatter_class=argparse.RawTextHelpFormatter,
+    )
+
+    def add_common_options(p):
+      p.add_argument(
+          '-m', '--master', default='developer_default',
+          help='config group or master from mb_config.pyl - default: '
+               'developer_default')
+      p.add_argument(
+          '-v', '--verbosity', action='count',
+          help='print wrapped commands (use -vv to print output of wrapped '
+               'commands)')
+
+    subps = self.parser.add_subparsers()
+
+    # Command: gen.
+    gen_cmd = subps.add_parser(
+        'gen', help='generate a new set of build files (default)')
+    gen_cmd.set_defaults(func=self.cmd_gen)
+    add_common_options(gen_cmd)
+    gen_cmd.add_argument(
+        'outdir', nargs='?',
+        help='optional gn output directory')
+    gen_cmd.add_argument(
+        '-b', '--builder',
+        help='build configuration or builder name from mb_config.pyl, e.g. '
+             'x64.release')
+    gen_cmd.add_argument(
+        '-p', '--pedantic', action='store_true',
+        help='run gn over command-line gn args to catch errors early')
+
+    goma = gen_cmd.add_mutually_exclusive_group()
+    goma.add_argument(
+        '-g' , '--goma',
+        action='store_true', default=None, dest='goma',
+        help='force using goma')
+    goma.add_argument(
+        '--nogoma', '--no-goma',
+        action='store_false', default=None, dest='goma',
+        help='don\'t use goma auto detection - goma might still be used if '
+             'specified as a gn arg')
+
+    # Command: list.
+    list_cmd = subps.add_parser(
+        'list', help='list available configurations')
+    list_cmd.set_defaults(func=self.cmd_list)
+    add_common_options(list_cmd)
+
+    # Default to "gen" unless global help is requested.
+    if not args or args[0] not in subps.choices.keys() + ['-h', '--help']:
+      args = ['gen'] + args
+
+    return self.parser.parse_args(args)
+
+  def cmd_gen(self):
+    if not self._options.outdir and not self._options.builder:
+      self.parser.error('please specify either an output directory or '
+                        'a builder/config name (-b), e.g. x64.release')
+
+    if not self._options.outdir:
+      # Derive output directory from builder name.
+      self._options.outdir = _sanitize_nonalpha(self._options.builder)
+    else:
+      # Also, if this should work on windows, we might need to use \ where
+      # outdir is used as path, while using / if it's used in a gn context.
+      if self._options.outdir.startswith('/'):
+        self.parser.error(
+            'only output directories relative to %s are supported' % OUT_DIR)
+
+    if not self._options.builder:
+      # Derive builder from output directory.
+      self._options.builder = self._options.outdir
+
+    # Check for builder/config in mb config.
+    if self._options.builder not in self._mbw.masters[self._options.master]:
+      print('%s does not exist in %s for %s' % (
+          self._options.builder, CONFIG, self._options.master))
+      return 1
+
+    # TODO(machenbach): Check if the requested configurations has switched to
+    # gn at all.
+
+    # The directories are separated with slashes in a gn context (platform
+    # independent).
+    gn_outdir = '/'.join([OUT_DIR, self._options.outdir])
+
+    # Call MB to generate the basic configuration.
+    self._call_cmd([
+      sys.executable,
+      '-u', os.path.join('tools', 'mb', 'mb.py'),
+      'gen',
+      '-f', CONFIG,
+      '-m', self._options.master,
+      '-b', self._options.builder,
+      gn_outdir,
+    ])
+
+    # Handle extra gn arguments.
+    gn_args_path = os.path.join(OUT_DIR, self._options.outdir, 'args.gn')
+
+    # Append command-line args.
+    modified = self._append_gn_args(
+        'command-line', gn_args_path, '\n'.join(self._gn_args))
+
+    # Append goma args.
+    # TODO(machenbach): We currently can't remove existing goma args from the
+    # original config. E.g. to build like a bot that uses goma, but switch
+    # goma off.
+    modified |= self._append_gn_args(
+        'goma', gn_args_path, self._goma_args)
+
+    # Regenerate ninja files to check for errors in the additional gn args.
+    if modified and self._options.pedantic:
+      self._call_cmd(['gn', 'gen', gn_outdir])
+    return 0
+
+  def cmd_list(self):
+    print('\n'.join(sorted(self._mbw.masters[self._options.master])))
+    return 0
+
+  def verbose_print_1(self, text):
+    if self._options.verbosity >= 1:
+      print('#' * 80)
+      print(text)
+
+  def verbose_print_2(self, text):
+    if self._options.verbosity >= 2:
+      indent = ' ' * 2
+      for l in text.splitlines():
+        print(indent + l)
+
+  def _call_cmd(self, args):
+    self.verbose_print_1(' '.join(args))
+    try:
+      output = subprocess.check_output(
+        args=args,
+        stderr=subprocess.STDOUT,
+      )
+      self.verbose_print_2(output)
+    except subprocess.CalledProcessError as e:
+      self.verbose_print_2(e.output)
+      raise
+
+  def _find_work_dir(self, path):
+    """Find the closest v8 root to `path`."""
+    if os.path.exists(os.path.join(path, 'tools', 'dev', 'v8gen.py')):
+      # Approximate the v8 root dir by a folder where this script exists
+      # in the expected place.
+      return path
+    elif os.path.dirname(path) == path:
+      raise Exception(
+          'This appears to not be called from a recent v8 checkout')
+    else:
+      return self._find_work_dir(os.path.dirname(path))
+
+  @property
+  def _goma_dir(self):
+    return os.path.normpath(os.environ.get('GOMA_DIR') or GOMA_DEFAULT)
+
+  @property
+  def _need_goma_dir(self):
+    return self._goma_dir != GOMA_DEFAULT
+
+  @property
+  def _use_goma(self):
+    if self._options.goma is None:
+      # Auto-detect.
+      return os.path.exists(self._goma_dir) and os.path.isdir(self._goma_dir)
+    else:
+      return self._options.goma
+
+  @property
+  def _goma_args(self):
+    """Gn args for using goma."""
+    # Specify goma args if we want to use goma and if goma isn't specified
+    # via command line already. The command-line always has precedence over
+    # any other specification.
+    if (self._use_goma and
+        not any(re.match(r'use_goma\s*=.*', x) for x in self._gn_args)):
+      if self._need_goma_dir:
+        return 'use_goma=true\ngoma_dir="%s"' % self._goma_dir
+      else:
+        return 'use_goma=true'
+    else:
+      return ''
+
+  def _append_gn_args(self, type, gn_args_path, more_gn_args):
+    """Append extra gn arguments to the generated args.gn file."""
+    if not more_gn_args:
+      return False
+    self.verbose_print_1('Appending """\n%s\n""" to %s.' % (
+        more_gn_args, os.path.abspath(gn_args_path)))
+    with open(gn_args_path, 'a') as f:
+      f.write('\n# Additional %s args:\n' % type)
+      f.write(more_gn_args)
+      f.write('\n')
+
+    # Artificially increment modification time as our modifications happen too
+    # fast. This makes sure that gn is properly rebuilding the ninja files.
+    mtime = os.path.getmtime(gn_args_path) + 1
+    with open(gn_args_path, 'a'):
+      os.utime(gn_args_path, (mtime, mtime))
+
+    return True
+
+  def main(self):
+    # Always operate relative to the base directory for better relative-path
+    # handling. This script can be used in any v8 checkout.
+    workdir = self._find_work_dir(os.getcwd())
+    if workdir != os.getcwd():
+      self.verbose_print_1('cd ' + workdir)
+      os.chdir(workdir)
+
+    # Initialize MB as a library.
+    self._mbw = mb.MetaBuildWrapper()
+
+    # TODO(machenbach): Factor out common methods independent of mb arguments.
+    self._mbw.ParseArgs(['lookup', '-f', CONFIG])
+    self._mbw.ReadConfigFile()
+
+    if not self._options.master in self._mbw.masters:
+      print('%s not found in %s\n' % (self._options.master, CONFIG))
+      print('Choose one of:\n%s\n' % (
+          '\n'.join(sorted(self._mbw.masters.keys()))))
+      return 1
+
+    return self._options.func()
+
+
+if __name__ == "__main__":
+  gen = GenerateGnArgs(sys.argv[1:])
+  try:
+    sys.exit(gen.main())
+  except Exception:
+    if gen._options.verbosity < 2:
+      print ('\nHint: You can raise verbosity (-vv) to see the output of '
+             'failed commands.\n')
+    raise
diff --git a/src/third_party/v8/tools/disasm.py b/src/third_party/v8/tools/disasm.py
new file mode 100644
index 0000000..a91d0db
--- /dev/null
+++ b/src/third_party/v8/tools/disasm.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+#
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+import re
+import subprocess
+import tempfile
+
+
+# Avoid using the slow (google-specific) wrapper around objdump.
+OBJDUMP_BIN = "/usr/bin/objdump"
+if not os.path.exists(OBJDUMP_BIN):
+  OBJDUMP_BIN = "objdump"
+
+# -M intel-mnemonic selects Intel syntax.
+# -C demangles.
+# -z disables skipping over sections of zeroes.
+_COMMON_DISASM_OPTIONS = ["-M", "intel-mnemonic", "-C", "-z"]
+
+_DISASM_HEADER_RE = re.compile(r"[a-f0-9]+\s+<.*:$")
+_DISASM_LINE_RE = re.compile(r"\s*([a-f0-9]+):\s*(\S.*)")
+
+# Keys must match constants in Logger::LogCodeInfo.
+_ARCH_MAP = {
+  "ia32": "-m i386",
+  "x64": "-m i386 -M x86-64",
+  "arm": "-m arm",  # Not supported by our objdump build.
+  "mips": "-m mips",  # Not supported by our objdump build.
+  "arm64": "-m aarch64"
+}
+
+
+def GetDisasmLines(filename, offset, size, arch, inplace, arch_flags=""):
+  tmp_name = None
+  if not inplace:
+    # Create a temporary file containing a copy of the code.
+    assert arch in _ARCH_MAP, "Unsupported architecture '%s'" % arch
+    arch_flags = arch_flags + " " +  _ARCH_MAP[arch]
+    tmp_file = tempfile.NamedTemporaryFile(prefix=".v8code", delete=False)
+    tmp_name = tmp_file.name
+    tmp_file.close()
+    command = "dd if=%s of=%s bs=1 count=%d skip=%d && " \
+              "%s %s -D -b binary %s %s" % (
+      filename, tmp_name, size, offset,
+      OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), arch_flags,
+      tmp_name)
+  else:
+    command = "%s %s %s --start-address=%d --stop-address=%d -d %s " % (
+      OBJDUMP_BIN, ' '.join(_COMMON_DISASM_OPTIONS), arch_flags,
+      offset,
+      offset + size,
+      filename)
+  process = subprocess.Popen(command,
+                             shell=True,
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.STDOUT)
+  out, err = process.communicate()
+  lines = out.split("\n")
+  header_line = 0
+  for i, line in enumerate(lines):
+    if _DISASM_HEADER_RE.match(line):
+      header_line = i
+      break
+  if tmp_name:
+    os.unlink(tmp_name)
+  split_lines = []
+  for line in lines[header_line + 1:]:
+    match = _DISASM_LINE_RE.match(line)
+    if match:
+      line_address = int(match.group(1), 16)
+      split_lines.append((line_address, match.group(2)))
+  return split_lines
diff --git a/src/third_party/v8/tools/draw_instruction_graph.sh b/src/third_party/v8/tools/draw_instruction_graph.sh
new file mode 100755
index 0000000..549380b
--- /dev/null
+++ b/src/third_party/v8/tools/draw_instruction_graph.sh
@@ -0,0 +1,130 @@
+#!/bin/bash
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script reads in CSV formatted instruction data, and draws a stacked
+# graph in png format.
+
+defaultfile=arm64_inst.csv
+defaultout=arm64_inst.png
+gnuplot=/usr/bin/gnuplot
+
+
+# File containing CSV instruction data from simulator.
+file=${1:-$defaultfile}
+
+# Output graph png file.
+out=${2:-$defaultout}
+
+# Check input file exists.
+if [ ! -e $file ]; then
+  echo "Input file not found: $file."
+  echo "Usage: draw_instruction_graph.sh <input csv> <output png>"
+  exit 1
+fi
+
+# Search for an error message, and if found, exit.
+error=`grep -m1 '# Error:' $file`
+if [ -n "$error" ]; then
+  echo "Error message in input file:"
+  echo " $error"
+  exit 2
+fi
+
+# Sample period - period over which numbers for each category of instructions is
+# counted.
+sp=`grep -m1 '# sample_period=' $file | cut -d= -f2`
+
+# Get number of counters in the CSV file.
+nc=`grep -m1 '# counters=' $file | cut -d= -f2`
+
+# Find the annotation arrows. They appear as comments in the CSV file, in the
+# format:
+#   # xx @ yyyyy
+# Where xx is a two character annotation identifier, and yyyyy is the
+# position in the executed instruction stream that generated the annotation.
+# Turn these locations into labelled arrows.
+arrows=`sed '/^[^#]/ d' $file | \
+        perl -pe "s/^# .. @ (\d+)/set arrow from \1, graph 0.9 to \1, $sp/"`;
+labels=`sed '/^[^#]/d' $file | \
+        sed -r 's/^# (..) @ (.+)/set label at \2, graph 0.9 "\1" \
+                center offset 0,0.5 font "FreeSans, 8"/'`;
+
+# Check for gnuplot, and warn if not available.
+if [ ! -e $gnuplot ]; then
+  echo "Can't find gnuplot at $gnuplot."
+  echo "Gnuplot version 4.6.3 or later required."
+  exit 3
+fi
+
+# Initialise gnuplot, and give it the data to draw.
+echo | $gnuplot <<EOF
+$arrows
+$labels
+MAXCOL=$nc
+set term png size 1920, 800 #ffffff
+set output '$out'
+set datafile separator ','
+set xtics font 'FreeSans, 10'
+set xlabel 'Instructions' font 'FreeSans, 10'
+set ytics font 'FreeSans, 10'
+set yrange [0:*]
+set key outside font 'FreeSans, 8'
+
+set style line 2 lc rgb '#800000'
+set style line 3 lc rgb '#d00000'
+set style line 4 lc rgb '#ff6000'
+set style line 5 lc rgb '#ffc000'
+set style line 6 lc rgb '#ffff00'
+
+set style line 7 lc rgb '#ff00ff'
+set style line 8 lc rgb '#ffc0ff'
+
+set style line 9 lc rgb '#004040'
+set style line 10 lc rgb '#008080'
+set style line 11 lc rgb '#40c0c0'
+set style line 12 lc rgb '#c0f0f0'
+
+set style line 13 lc rgb '#004000'
+set style line 14 lc rgb '#008000'
+set style line 15 lc rgb '#40c040'
+set style line 16 lc rgb '#c0f0c0'
+
+set style line 17 lc rgb '#2020f0'
+set style line 18 lc rgb '#6060f0'
+set style line 19 lc rgb '#a0a0f0'
+
+set style line 20 lc rgb '#000000'
+set style line 21 lc rgb '#ffffff'
+
+plot for [i=2:MAXCOL] '$file' using 1:(sum [col=i:MAXCOL] column(col)) \
+title columnheader(i) with filledcurve y1=0 ls i
+EOF
+
+
+
diff --git a/src/third_party/v8/tools/dump-cpp.py b/src/third_party/v8/tools/dump-cpp.py
new file mode 100644
index 0000000..58c08c8
--- /dev/null
+++ b/src/third_party/v8/tools/dump-cpp.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script executes dumpcpp.js, collects all dumped C++ symbols,
+# and merges them back into v8 log.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import platform
+import re
+import subprocess
+import sys
+
+def is_file_executable(fPath):
+  return os.path.isfile(fPath) and os.access(fPath, os.X_OK)
+
+if __name__ == '__main__':
+  JS_FILES = ['dumpcpp-driver.mjs']
+  tools_path = os.path.dirname(os.path.realpath(__file__))
+  on_windows = platform.system() == 'Windows'
+  JS_FILES = [os.path.join(tools_path, f) for f in JS_FILES]
+
+  args = []
+  log_file = 'v8.log'
+  debug = False
+  for arg in sys.argv[1:]:
+    if arg == '--debug':
+      debug = True
+      continue
+    args.append(arg)
+    if not arg.startswith('-'):
+      log_file = arg
+
+  if on_windows:
+    args.append('--windows')
+
+  with open(log_file, 'r') as f:
+    lines = f.readlines()
+
+  d8_line = re.search(',\"(.*d8)', ''.join(lines))
+  if d8_line:
+    d8_exec = d8_line.group(1)
+    if not is_file_executable(d8_exec):
+      print('d8 binary path found in {} is not executable.'.format(log_file))
+      sys.exit(-1)
+  else:
+    print('No d8 binary path found in {}.'.format(log_file))
+    sys.exit(-1)
+
+  args = [d8_exec] + ['--module'] + JS_FILES + ['--'] + args
+
+  with open(log_file) as f:
+    sp = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                          stdin=f)
+    out, err = sp.communicate()
+  if debug:
+    print(err)
+  if sp.returncode != 0:
+    print(out)
+    exit(-1)
+
+  if on_windows and out:
+    out = re.sub('\r+\n', '\n', out)
+
+  is_written = not bool(out)
+  with open(log_file, 'w') as f:
+    for line in lines:
+      if not is_written and line.startswith('tick'):
+        f.write(out)
+        is_written = True
+      f.write(line)
diff --git a/src/third_party/v8/tools/dumpcpp-driver.mjs b/src/third_party/v8/tools/dumpcpp-driver.mjs
new file mode 100644
index 0000000..8f575d0
--- /dev/null
+++ b/src/third_party/v8/tools/dumpcpp-driver.mjs
@@ -0,0 +1,52 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { WebInspector } from "./sourcemap.mjs";
+import {
+    CppProcessor, ArgumentsProcessor, UnixCppEntriesProvider,
+    WindowsCppEntriesProvider, MacCppEntriesProvider
+  } from  "./dumpcpp.mjs";
+
+// Dump C++ symbols of shared library if possible
+
+function processArguments(args) {
+  const processor = new ArgumentsProcessor(args);
+  if (processor.parse()) {
+    return processor.result();
+  } else {
+    processor.printUsageAndExit();
+  }
+}
+
+function initSourceMapSupport() {
+  // Pull dev tools source maps into our name space.
+  SourceMap = WebInspector.SourceMap;
+
+  // Overwrite the load function to load scripts synchronously.
+  SourceMap.load = function(sourceMapURL) {
+    const content = readFile(sourceMapURL);
+    const sourceMapObject = (JSON.parse(content));
+    return new SourceMap(sourceMapURL, sourceMapObject);
+  };
+}
+
+const entriesProviders = {
+  'unix': UnixCppEntriesProvider,
+  'windows': WindowsCppEntriesProvider,
+  'mac': MacCppEntriesProvider
+};
+
+const params = processArguments(arguments);
+let sourceMap = null;
+if (params.sourceMap) {
+  initSourceMapSupport();
+  sourceMap = SourceMap.load(params.sourceMap);
+}
+
+const cppProcessor = new CppProcessor(
+  new (entriesProviders[params.platform])(params.nm, params.targetRootFS,
+                                          params.apkEmbeddedLibrary),
+  params.timedRange, params.pairwiseTimedRange);
+cppProcessor.processLogFile(params.logFileName);
+cppProcessor.dumpCppSymbols();
diff --git a/src/third_party/v8/tools/dumpcpp.mjs b/src/third_party/v8/tools/dumpcpp.mjs
new file mode 100644
index 0000000..be2dd99
--- /dev/null
+++ b/src/third_party/v8/tools/dumpcpp.mjs
@@ -0,0 +1,68 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { LogReader, parseString } from "./logreader.mjs";
+import { CodeMap, CodeEntry } from "./codemap.mjs";
+export {
+    ArgumentsProcessor, UnixCppEntriesProvider, 
+    WindowsCppEntriesProvider, MacCppEntriesProvider,
+  } from  "./tickprocessor.mjs";
+  import { inherits } from  "./tickprocessor.mjs";
+
+
+export class CppProcessor extends LogReader {
+  constructor(cppEntriesProvider, timedRange, pairwiseTimedRange) {
+    super({}, timedRange, pairwiseTimedRange);
+    this.dispatchTable_ = {
+        'shared-library': {
+          parsers: [parseString, parseInt, parseInt, parseInt],
+          processor: this.processSharedLibrary }
+    };
+    this.cppEntriesProvider_ = cppEntriesProvider;
+    this.codeMap_ = new CodeMap();
+    this.lastLogFileName_ = null;
+  }
+
+  /**
+   * @override
+   */
+  printError(str) {
+    print(str);
+  };
+
+  processLogFile(fileName) {
+    this.lastLogFileName_ = fileName;
+    let line;
+    while (line = readline()) {
+      this.processLogLine(line);
+    }
+  };
+
+  processLogFileInTest(fileName) {
+    // Hack file name to avoid dealing with platform specifics.
+    this.lastLogFileName_ = 'v8.log';
+    const contents = readFile(fileName);
+    this.processLogChunk(contents);
+  };
+
+  processSharedLibrary(name, startAddr, endAddr, aslrSlide) {
+    const self = this;
+    const libFuncs = this.cppEntriesProvider_.parseVmSymbols(
+        name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) {
+      const entry = new CodeEntry(fEnd - fStart, fName, 'CPP');
+      self.codeMap_.addStaticCode(fStart, entry);
+    });
+  };
+
+  dumpCppSymbols() {
+    const staticEntries = this.codeMap_.getAllStaticEntriesWithAddresses();
+    const total = staticEntries.length;
+    for (let i = 0; i < total; ++i) {
+      const entry = staticEntries[i];
+      const printValues = ['cpp', `0x${entry[0].toString(16)}`, entry[1].size,
+                        `"${entry[1].name}"`];
+      print(printValues.join(','));
+    }
+  }
+}
diff --git a/src/third_party/v8/tools/eval_gc_nvp.py b/src/third_party/v8/tools/eval_gc_nvp.py
new file mode 100755
index 0000000..222ebef
--- /dev/null
+++ b/src/third_party/v8/tools/eval_gc_nvp.py
@@ -0,0 +1,223 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This script is used to analyze GCTracer's NVP output."""
+
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+
+from argparse import ArgumentParser
+from copy import deepcopy
+from gc_nvp_common import split_nvp
+from math import ceil, log
+from sys import stdin
+
+
+class LinearBucket:
+  def __init__(self, granularity):
+    self.granularity = granularity
+
+  def value_to_bucket(self, value):
+    return int(value / self.granularity)
+
+  def bucket_to_range(self, bucket):
+    return (bucket * self.granularity, (bucket + 1) * self.granularity)
+
+
+class Log2Bucket:
+  def __init__(self, start):
+    self.start = int(log(start, 2)) - 1
+
+  def value_to_bucket(self, value):
+    index = int(log(value, 2))
+    index -= self.start
+    if index < 0:
+      index = 0
+    return index
+
+  def bucket_to_range(self, bucket):
+    if bucket == 0:
+      return (0, 2 ** (self.start + 1))
+    bucket += self.start
+    return (2 ** bucket, 2 ** (bucket + 1))
+
+
+class Histogram:
+  def __init__(self, bucket_trait, fill_empty):
+    self.histogram = {}
+    self.fill_empty = fill_empty
+    self.bucket_trait = bucket_trait
+
+  def add(self, key):
+    index = self.bucket_trait.value_to_bucket(key)
+    if index not in self.histogram:
+      self.histogram[index] = 0
+    self.histogram[index] += 1
+
+  def __str__(self):
+    ret = []
+    keys = self.histogram.keys()
+    keys.sort()
+    last = keys[len(keys) - 1]
+    for i in range(0, last + 1):
+      (min_value, max_value) = self.bucket_trait.bucket_to_range(i)
+      if i == keys[0]:
+        keys.pop(0)
+        ret.append("  [{0},{1}[: {2}".format(
+          str(min_value), str(max_value), self.histogram[i]))
+      else:
+        if self.fill_empty:
+          ret.append("  [{0},{1}[: {2}".format(
+            str(min_value), str(max_value), 0))
+    return "\n".join(ret)
+
+
+class Category:
+  def __init__(self, key, histogram, csv, percentiles):
+    self.key = key
+    self.values = []
+    self.histogram = histogram
+    self.csv = csv
+    self.percentiles = percentiles
+
+  def process_entry(self, entry):
+    if self.key in entry:
+      self.values.append(float(entry[self.key]))
+      if self.histogram:
+        self.histogram.add(float(entry[self.key]))
+
+  def min(self):
+    return min(self.values)
+
+  def max(self):
+    return max(self.values)
+
+  def avg(self):
+    if len(self.values) == 0:
+      return 0.0
+    return sum(self.values) / len(self.values)
+
+  def empty(self):
+    return len(self.values) == 0
+
+  def _compute_percentiles(self):
+    ret = []
+    if len(self.values) == 0:
+      return ret
+    sorted_values = sorted(self.values)
+    for percentile in self.percentiles:
+      index = int(ceil((len(self.values) - 1) * percentile / 100))
+      ret.append("  {0}%: {1}".format(percentile, sorted_values[index]))
+    return ret
+
+  def __str__(self):
+    if self.csv:
+      ret = [self.key]
+      ret.append(len(self.values))
+      ret.append(self.min())
+      ret.append(self.max())
+      ret.append(self.avg())
+      ret = [str(x) for x in ret]
+      return ",".join(ret)
+    else:
+      ret = [self.key]
+      ret.append("  len: {0}".format(len(self.values)))
+      if len(self.values) > 0:
+        ret.append("  min: {0}".format(self.min()))
+        ret.append("  max: {0}".format(self.max()))
+        ret.append("  avg: {0}".format(self.avg()))
+        if self.histogram:
+          ret.append(str(self.histogram))
+        if self.percentiles:
+          ret.append("\n".join(self._compute_percentiles()))
+      return "\n".join(ret)
+
+  def __repr__(self):
+    return "<Category: {0}>".format(self.key)
+
+
+def make_key_func(cmp_metric):
+  def key_func(a):
+    return getattr(a, cmp_metric)()
+  return key_func
+
+
+def main():
+  parser = ArgumentParser(description="Process GCTracer's NVP output")
+  parser.add_argument('keys', metavar='KEY', type=str, nargs='+',
+                      help='the keys of NVPs to process')
+  parser.add_argument('--histogram-type', metavar='<linear|log2>',
+                      type=str, nargs='?', default="linear",
+                      help='histogram type to use (default: linear)')
+  linear_group = parser.add_argument_group('linear histogram specific')
+  linear_group.add_argument('--linear-histogram-granularity',
+                            metavar='GRANULARITY', type=int, nargs='?',
+                            default=5,
+                            help='histogram granularity (default: 5)')
+  log2_group = parser.add_argument_group('log2 histogram specific')
+  log2_group.add_argument('--log2-histogram-init-bucket', metavar='START',
+                          type=int, nargs='?', default=64,
+                          help='initial buck size (default: 64)')
+  parser.add_argument('--histogram-omit-empty-buckets',
+                      dest='histogram_omit_empty',
+                      action='store_true',
+                      help='omit empty histogram buckets')
+  parser.add_argument('--no-histogram', dest='histogram',
+                      action='store_false', help='do not print histogram')
+  parser.set_defaults(histogram=True)
+  parser.set_defaults(histogram_omit_empty=False)
+  parser.add_argument('--rank', metavar='<no|min|max|avg>',
+                      type=str, nargs='?',
+                      default="no",
+                      help="rank keys by metric (default: no)")
+  parser.add_argument('--csv', dest='csv',
+                      action='store_true', help='provide output as csv')
+  parser.add_argument('--percentiles', dest='percentiles',
+                      type=str, default="",
+                      help='comma separated list of percentiles')
+  args = parser.parse_args()
+
+  histogram = None
+  if args.histogram:
+    bucket_trait = None
+    if args.histogram_type == "log2":
+      bucket_trait = Log2Bucket(args.log2_histogram_init_bucket)
+    else:
+      bucket_trait = LinearBucket(args.linear_histogram_granularity)
+    histogram = Histogram(bucket_trait, not args.histogram_omit_empty)
+
+  percentiles = []
+  for percentile in args.percentiles.split(','):
+    try:
+      percentiles.append(float(percentile))
+    except ValueError:
+      pass
+
+  categories = [ Category(key, deepcopy(histogram), args.csv, percentiles)
+                 for key in args.keys ]
+
+  while True:
+    line = stdin.readline()
+    if not line:
+      break
+    obj = split_nvp(line)
+    for category in categories:
+      category.process_entry(obj)
+
+  # Filter out empty categories.
+  categories = [x for x in categories if not x.empty()]
+
+  if args.rank != "no":
+    categories = sorted(categories, key=make_key_func(args.rank), reverse=True)
+
+  for category in categories:
+    print(category)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/src/third_party/v8/tools/eval_gc_time.sh b/src/third_party/v8/tools/eval_gc_time.sh
new file mode 100755
index 0000000..f809c35
--- /dev/null
+++ b/src/third_party/v8/tools/eval_gc_time.sh
@@ -0,0 +1,162 @@
+#!/bin/bash
+#
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Convenience Script used to rank GC NVP output.
+
+print_usage_and_die() {
+  echo "Usage: $0 [OPTIONS]"
+  echo ""
+  echo "OPTIONS"
+  echo  "  -r|--rank new-gen-rank|old-gen-rank    GC mode to profile"
+  echo  "                                         (default: old-gen-rank)"
+  echo  "  -s|--sort avg|max                      sorting mode (default: max)"
+  echo  "  -t|--top-level                         include top-level categories"
+  echo  "  -c|--csv                               provide csv output"
+  echo  "  -f|--file FILE                         profile input in a file"
+  echo  "                                         (default: stdin)"
+  echo  "  -p|--percentiles                       comma separated percentiles"
+  exit 1
+}
+
+OP=old-gen-rank
+RANK_MODE=max
+TOP_LEVEL=no
+CSV=""
+LOGFILE=/dev/stdin
+PERCENTILES=""
+
+while [[ $# -ge 1 ]]
+do
+  key="$1"
+  case $key in
+    -r|--rank)
+      case $2 in
+        new-gen-rank|old-gen-rank)
+          OP="$2"
+          ;;
+        *)
+          print_usage_and_die
+      esac
+      shift
+      ;;
+    -s|--sort)
+      case $2 in
+        max|avg)
+          RANK_MODE=$2
+          ;;
+        *)
+          print_usage_and_die
+      esac
+      shift
+      ;;
+    -t|--top-level)
+      TOP_LEVEL=yes
+      ;;
+    -c|--csv)
+      CSV=" --csv "
+      ;;
+    -f|--file)
+      LOGFILE=$2
+      shift
+      ;;
+    -p|--percentiles)
+      PERCENTILES="--percentiles=$2"
+      shift
+      ;;
+    *)
+      break
+      ;;
+  esac
+  shift
+done
+
+if [[ $# -ne 0 ]]; then
+  echo "Unknown option(s): $@"
+  echo ""
+  print_usage_and_die
+fi
+
+INTERESTING_NEW_GEN_KEYS="\
+  scavenge \
+  weak \
+  roots \
+  old_new \
+  semispace \
+"
+
+INTERESTING_OLD_GEN_KEYS="\
+  clear.dependent_code \
+  clear.global_handles \
+  clear.maps \
+  clear.slots_buffer \
+  clear.store_buffer \
+  clear.string_table \
+  clear.weak_collections \
+  clear.weak_lists \
+  evacuate.candidates \
+  evacuate.clean_up \
+  evacuate.copy \
+  evacuate.update_pointers \
+  evacuate.update_pointers.to_evacuated \
+  evacuate.update_pointers.to_new \
+  evacuate.update_pointers.weak \
+  external.mc_prologue \
+  external.mc_epilogue \
+  external.mc_incremental_prologue \
+  external.mc_incremental_epilogue \
+  external.weak_global_handles \
+  mark.finish_incremental \
+  mark.roots \
+  mark.weak_closure \
+  mark.weak_closure.ephemeral \
+  mark.weak_closure.weak_handles \
+  mark.weak_closure.weak_roots \
+  mark.weak_closure.harmony \
+  sweep.code \
+  sweep.map \
+  sweep.old \
+"
+
+if [[ "$TOP_LEVEL" = "yes" ]]; then
+  INTERESTING_OLD_GEN_KEYS="\
+    ${INTERESTING_OLD_GEN_KEYS} \
+    clear \
+    evacuate \
+    finish \
+    incremental_finalize \
+    mark \
+    pause
+    sweep \
+  "
+  INTERESTING_NEW_GEN_KEYS="\
+    ${INTERESTING_NEW_GEN_KEYS} \
+  "
+fi
+
+BASE_DIR=$(dirname $0)
+
+case $OP in
+  new-gen-rank)
+    cat $LOGFILE | grep "gc=s" \
+      | $BASE_DIR/eval_gc_nvp.py \
+      --no-histogram \
+      --rank $RANK_MODE \
+      $CSV \
+      $PERCENTILES \
+      ${INTERESTING_NEW_GEN_KEYS}
+    ;;
+  old-gen-rank)
+    cat $LOGFILE | grep "gc=ms" \
+      | $BASE_DIR/eval_gc_nvp.py \
+      --no-histogram \
+      --rank $RANK_MODE \
+      $CSV \
+      $PERCENTILES \
+      ${INTERESTING_OLD_GEN_KEYS}
+    ;;
+  *)
+    ;;
+esac
diff --git a/src/third_party/v8/tools/find-commit-for-patch.py b/src/third_party/v8/tools/find-commit-for-patch.py
new file mode 100755
index 0000000..cca1f40
--- /dev/null
+++ b/src/third_party/v8/tools/find-commit-for-patch.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import subprocess
+import sys
+
+
+def GetArgs():
+  parser = argparse.ArgumentParser(
+      description="Finds a commit that a given patch can be applied to. "
+                  "Does not actually apply the patch or modify your checkout "
+                  "in any way.")
+  parser.add_argument("patch_file", help="Patch file to match")
+  parser.add_argument(
+      "--branch", "-b", default="origin/master", type=str,
+      help="Git tree-ish where to start searching for commits, "
+           "default: %(default)s")
+  parser.add_argument(
+      "--limit", "-l", default=500, type=int,
+      help="Maximum number of commits to search, default: %(default)s")
+  parser.add_argument(
+      "--verbose", "-v", default=False, action="store_true",
+      help="Print verbose output for your entertainment")
+  return parser.parse_args()
+
+
+def FindFilesInPatch(patch_file):
+  files = {}
+  next_file = ""
+  with open(patch_file) as patch:
+    for line in patch:
+      if line.startswith("diff --git "):
+        # diff --git a/src/objects.cc b/src/objects.cc
+        words = line.split()
+        assert words[2].startswith("a/") and len(words[2]) > 2
+        next_file = words[2][2:]
+      elif line.startswith("index "):
+        # index add3e61..d1bbf6a 100644
+        hashes = line.split()[1]
+        old_hash = hashes.split("..")[0]
+        if old_hash.startswith("0000000"): continue  # Ignore new files.
+        files[next_file] = old_hash
+  return files
+
+
+def GetGitCommitHash(treeish):
+  cmd = ["git", "log", "-1", "--format=%H", treeish]
+  return subprocess.check_output(cmd).strip()
+
+
+def CountMatchingFiles(commit, files):
+  matched_files = 0
+  # Calling out to git once and parsing the result Python-side is faster
+  # than calling 'git ls-tree' for every file.
+  cmd = ["git", "ls-tree", "-r", commit] + [f for f in files]
+  output = subprocess.check_output(cmd)
+  for line in output.splitlines():
+    # 100644 blob c6d5daaa7d42e49a653f9861224aad0a0244b944      src/objects.cc
+    _, _, actual_hash, filename = line.split()
+    expected_hash = files[filename]
+    if actual_hash.startswith(expected_hash): matched_files += 1
+  return matched_files
+
+
+def FindFirstMatchingCommit(start, files, limit, verbose):
+  commit = GetGitCommitHash(start)
+  num_files = len(files)
+  if verbose: print(">>> Found %d files modified by patch." % num_files)
+  for _ in range(limit):
+    matched_files = CountMatchingFiles(commit, files)
+    if verbose: print("Commit %s matched %d files" % (commit, matched_files))
+    if matched_files == num_files:
+      return commit
+    commit = GetGitCommitHash("%s^" % commit)
+  print("Sorry, no matching commit found. "
+        "Try running 'git fetch', specifying the correct --branch, "
+        "and/or setting a higher --limit.")
+  sys.exit(1)
+
+
+if __name__ == "__main__":
+  args = GetArgs()
+  files = FindFilesInPatch(args.patch_file)
+  commit = FindFirstMatchingCommit(args.branch, files, args.limit, args.verbose)
+  if args.verbose:
+    print(">>> Matching commit: %s" % commit)
+    print(subprocess.check_output(["git", "log", "-1", commit]))
+    print(">>> Kthxbai.")
+  else:
+    print(commit)
diff --git a/src/third_party/v8/tools/find_depot_tools.py b/src/third_party/v8/tools/find_depot_tools.py
new file mode 100644
index 0000000..db3ffa2
--- /dev/null
+++ b/src/third_party/v8/tools/find_depot_tools.py
@@ -0,0 +1,43 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Small utility function to find depot_tools and add it to the python path.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import sys
+
+
+def directory_really_is_depot_tools(directory):
+  return os.path.isfile(os.path.join(directory, 'gclient.py'))
+
+
+def add_depot_tools_to_path():
+  """Search for depot_tools and add it to sys.path."""
+  # First look if depot_tools is already in PYTHONPATH.
+  for i in sys.path:
+    if i.rstrip(os.sep).endswith('depot_tools'):
+      if directory_really_is_depot_tools(i):
+        return i
+
+  # Then look if depot_tools is in PATH, common case.
+  for i in os.environ['PATH'].split(os.pathsep):
+    if i.rstrip(os.sep).endswith('depot_tools'):
+      if directory_really_is_depot_tools(i):
+        sys.path.insert(0, i.rstrip(os.sep))
+        return i
+  # Rare case, it's not even in PATH, look upward up to root.
+  root_dir = os.path.dirname(os.path.abspath(__file__))
+  previous_dir = os.path.abspath(__file__)
+  while root_dir and root_dir != previous_dir:
+    if directory_really_is_depot_tools(os.path.join(root_dir, 'depot_tools')):
+      i = os.path.join(root_dir, 'depot_tools')
+      sys.path.insert(0, i)
+      return i
+    previous_dir = root_dir
+    root_dir = os.path.dirname(root_dir)
+  print('Failed to find depot_tools', file=sys.stderr)
+  return None
diff --git a/src/third_party/v8/tools/freebsd-tick-processor b/src/third_party/v8/tools/freebsd-tick-processor
new file mode 100755
index 0000000..2bb2618
--- /dev/null
+++ b/src/third_party/v8/tools/freebsd-tick-processor
@@ -0,0 +1,10 @@
+#!/bin/sh
+
+# A wrapper script to call 'linux-tick-processor'.
+
+# Known issues on FreeBSD:
+#  No ticks from C++ code.
+#  You must have d8 built and in your path before calling this.
+
+tools_path=`cd $(dirname "$0");pwd`
+$tools_path/linux-tick-processor "$@"
diff --git a/src/third_party/v8/tools/fuzz-harness.sh b/src/third_party/v8/tools/fuzz-harness.sh
new file mode 100755
index 0000000..01f0353
--- /dev/null
+++ b/src/third_party/v8/tools/fuzz-harness.sh
@@ -0,0 +1,106 @@
+#!/bin/bash
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# A simple harness that downloads and runs 'jsfunfuzz' against d8. This
+# takes a long time because it runs many iterations and is intended for
+# automated usage. The package containing 'jsfunfuzz' can be found as an
+# attachment to this bug:
+# https://bugzilla.mozilla.org/show_bug.cgi?id=jsfunfuzz
+
+JSFUNFUZZ_URL="https://bugzilla.mozilla.org/attachment.cgi?id=310631"
+JSFUNFUZZ_MD5="d0e497201c5cd7bffbb1cdc1574f4e32"
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+jsfunfuzz_dir="$v8_root/tools/jsfunfuzz"
+
+if [ -n "$1" ]; then
+  d8="${v8_root}/$1"
+else
+  d8="${v8_root}/d8"
+fi
+
+if [ ! -f "$d8" ]; then
+  echo "Failed to find d8 binary: $d8"
+  exit 1
+fi
+
+# Deprecated download method. A prepatched archive is downloaded as a hook
+# if jsfunfuzz=1 is specified as a gyp flag. Requires google.com authentication
+# for google storage.
+if [ "$3" == "--download" ]; then
+
+  jsfunfuzz_file="$v8_root/tools/jsfunfuzz.zip"
+  if [ ! -f "$jsfunfuzz_file" ]; then
+    echo "Downloading $jsfunfuzz_file ..."
+    wget -q -O "$jsfunfuzz_file" $JSFUNFUZZ_URL || exit 1
+  fi
+
+  jsfunfuzz_sum=$(md5sum "$jsfunfuzz_file" | awk '{ print $1 }')
+  if [ $jsfunfuzz_sum != $JSFUNFUZZ_MD5 ]; then
+    echo "Failed to verify checksum!"
+    exit 1
+  fi
+
+  if [ ! -d "$jsfunfuzz_dir" ]; then
+    echo "Unpacking into $jsfunfuzz_dir ..."
+    unzip "$jsfunfuzz_file" -d "$jsfunfuzz_dir" || exit 1
+    echo "Patching runner ..."
+    cat << EOF | patch -s -p0 -d "$v8_root"
+--- tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py~
++++ tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py
+@@ -125,7 +125,7 @@
+ 
+ def many_timed_runs():
+     iteration = 0
+-    while True:
++    while iteration < 100:
+         iteration += 1
+         logfilename = "w%d" % iteration
+         one_timed_run(logfilename)
+EOF
+  fi
+
+fi
+
+flags='--expose-gc --verify-gc'
+python -u "$jsfunfuzz_dir/jsfunfuzz/multi_timed_run.py" 300 \
+    "$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js"
+exit_code=$(cat w* | grep " looking good" -c)
+exit_code=$((100-exit_code))
+
+if [ -n "$2" ]; then
+  archive="$2"
+else
+  archive=fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2
+fi
+echo "Creating archive $archive"
+tar -cjf $archive err-* w*
+rm -f err-* w*
+
+echo "Total failures: $exit_code"
+exit $exit_code
diff --git a/src/third_party/v8/tools/gc-nvp-to-csv.py b/src/third_party/v8/tools/gc-nvp-to-csv.py
new file mode 100755
index 0000000..b3ad374
--- /dev/null
+++ b/src/third_party/v8/tools/gc-nvp-to-csv.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+#
+# This is an utility for generating csv files based on GC traces produced by
+# V8 when run with flags --trace-gc --trace-gc-nvp.
+#
+# Usage: gc-nvp-to-csv.py <GC-trace-filename>
+#
+
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import sys
+import gc_nvp_common
+
+
+def process_trace(filename):
+  trace = gc_nvp_common.parse_gc_trace(filename)
+  if len(trace):
+    keys = trace[0].keys()
+    print(', '.join(keys))
+    for entry in trace:
+      print(', '.join(map(lambda key: str(entry[key]), keys)))
+
+
+if len(sys.argv) != 2:
+  print("Usage: %s <GC-trace-filename>" % sys.argv[0])
+  sys.exit(1)
+
+process_trace(sys.argv[1])
diff --git a/src/third_party/v8/tools/gc-nvp-trace-processor.py b/src/third_party/v8/tools/gc-nvp-trace-processor.py
new file mode 100755
index 0000000..75d50b1
--- /dev/null
+++ b/src/third_party/v8/tools/gc-nvp-trace-processor.py
@@ -0,0 +1,370 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#
+# This is an utility for plotting charts based on GC traces produced by V8 when
+# run with flags --trace-gc --trace-gc-nvp. Relies on gnuplot for actual
+# plotting.
+#
+# Usage: gc-nvp-trace-processor.py <GC-trace-filename>
+#
+
+
+# for py2/py3 compatibility
+from __future__ import with_statement
+from __future__ import print_function
+from functools import reduce
+
+import sys, types, subprocess, math
+import gc_nvp_common
+
+
+try:
+  long        # Python 2
+except NameError:
+  long = int  # Python 3
+
+
+def flatten(l):
+  flat = []
+  for i in l: flat.extend(i)
+  return flat
+
+def gnuplot(script):
+  gnuplot = subprocess.Popen(["gnuplot"], stdin=subprocess.PIPE)
+  gnuplot.stdin.write(script)
+  gnuplot.stdin.close()
+  gnuplot.wait()
+
+x1y1 = 'x1y1'
+x1y2 = 'x1y2'
+x2y1 = 'x2y1'
+x2y2 = 'x2y2'
+
+class Item(object):
+  def __init__(self, title, field, axis = x1y1, **keywords):
+    self.title = title
+    self.axis = axis
+    self.props = keywords
+    if type(field) is list:
+      self.field = field
+    else:
+      self.field = [field]
+
+  def fieldrefs(self):
+    return self.field
+
+  def to_gnuplot(self, context):
+    args = ['"%s"' % context.datafile,
+            'using %s' % context.format_fieldref(self.field),
+            'title "%s"' % self.title,
+            'axis %s' % self.axis]
+    if 'style' in self.props:
+      args.append('with %s' % self.props['style'])
+    if 'lc' in self.props:
+      args.append('lc rgb "%s"' % self.props['lc'])
+    if 'fs' in self.props:
+      args.append('fs %s' % self.props['fs'])
+    return ' '.join(args)
+
+class Plot(object):
+  def __init__(self, *items):
+    self.items = items
+
+  def fieldrefs(self):
+    return flatten([item.fieldrefs() for item in self.items])
+
+  def to_gnuplot(self, ctx):
+    return 'plot ' + ', '.join([item.to_gnuplot(ctx) for item in self.items])
+
+class Set(object):
+  def __init__(self, value):
+    self.value = value
+
+  def to_gnuplot(self, ctx):
+    return 'set ' + self.value
+
+  def fieldrefs(self):
+    return []
+
+class Context(object):
+  def __init__(self, datafile, field_to_index):
+    self.datafile = datafile
+    self.field_to_index = field_to_index
+
+  def format_fieldref(self, fieldref):
+    return ':'.join([str(self.field_to_index[field]) for field in fieldref])
+
+def collect_fields(plot):
+  field_to_index = {}
+  fields = []
+
+  def add_field(field):
+    if field not in field_to_index:
+      fields.append(field)
+      field_to_index[field] = len(fields)
+
+  for field in flatten([item.fieldrefs() for item in plot]):
+    add_field(field)
+
+  return (fields, field_to_index)
+
+def is_y2_used(plot):
+  for subplot in plot:
+    if isinstance(subplot, Plot):
+      for item in subplot.items:
+        if item.axis == x1y2 or item.axis == x2y2:
+          return True
+  return False
+
+def get_field(trace_line, field):
+  t = type(field)
+  if t is bytes:
+    return trace_line[field]
+  elif t is types.FunctionType:
+    return field(trace_line)
+
+def generate_datafile(datafile_name, trace, fields):
+  with open(datafile_name, 'w') as datafile:
+    for line in trace:
+      data_line = [str(get_field(line, field)) for field in fields]
+      datafile.write('\t'.join(data_line))
+      datafile.write('\n')
+
+def generate_script_and_datafile(plot, trace, datafile, output):
+  (fields, field_to_index) = collect_fields(plot)
+  generate_datafile(datafile, trace, fields)
+  script = [
+      'set terminal png',
+      'set output "%s"' % output,
+      'set autoscale',
+      'set ytics nomirror',
+      'set xtics nomirror',
+      'set key below'
+  ]
+
+  if is_y2_used(plot):
+    script.append('set autoscale y2')
+    script.append('set y2tics')
+
+  context = Context(datafile, field_to_index)
+
+  for item in plot:
+    script.append(item.to_gnuplot(context))
+
+  return '\n'.join(script)
+
+def plot_all(plots, trace, prefix):
+  charts = []
+
+  for plot in plots:
+    outfilename = "%s_%d.png" % (prefix, len(charts))
+    charts.append(outfilename)
+    script = generate_script_and_datafile(plot, trace, '~datafile', outfilename)
+    print('Plotting %s...' % outfilename)
+    gnuplot(script)
+
+  return charts
+
+def reclaimed_bytes(row):
+  return row['total_size_before'] - row['total_size_after']
+
+def other_scope(r):
+  if r['gc'] == 's':
+    # there is no 'other' scope for scavenging collections.
+    return 0
+  return r['pause'] - r['mark'] - r['sweep'] - r['external']
+
+def scavenge_scope(r):
+  if r['gc'] == 's':
+    return r['pause'] - r['external']
+  return 0
+
+
+def real_mutator(r):
+  return r['mutator'] - r['steps_took']
+
+plots = [
+  [
+    Set('style fill solid 0.5 noborder'),
+    Set('style histogram rowstacked'),
+    Set('style data histograms'),
+    Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
+         Item('Marking', 'mark', lc = 'purple'),
+         Item('Sweep', 'sweep', lc = 'blue'),
+         Item('External', 'external', lc = '#489D43'),
+         Item('Other', other_scope, lc = 'grey'),
+         Item('IGC Steps', 'steps_took', lc = '#FF6347'))
+  ],
+  [
+    Set('style fill solid 0.5 noborder'),
+    Set('style histogram rowstacked'),
+    Set('style data histograms'),
+    Plot(Item('Scavenge', scavenge_scope, lc = 'green'),
+         Item('Marking', 'mark', lc = 'purple'),
+         Item('Sweep', 'sweep', lc = 'blue'),
+         Item('External', 'external', lc = '#489D43'),
+         Item('Other', other_scope, lc = '#ADD8E6'),
+         Item('External', 'external', lc = '#D3D3D3'))
+  ],
+
+  [
+    Plot(Item('Mutator', real_mutator, lc = 'black', style = 'lines'))
+  ],
+  [
+    Set('style histogram rowstacked'),
+    Set('style data histograms'),
+    Plot(Item('Heap Size (before GC)', 'total_size_before', x1y2,
+              fs = 'solid 0.4 noborder',
+              lc = 'green'),
+         Item('Total holes (after GC)', 'holes_size_before', x1y2,
+              fs = 'solid 0.4 noborder',
+              lc = 'red'),
+         Item('GC Time', ['i', 'pause'], style = 'lines', lc = 'red'))
+  ],
+  [
+    Set('style histogram rowstacked'),
+    Set('style data histograms'),
+    Plot(Item('Heap Size (after GC)', 'total_size_after', x1y2,
+              fs = 'solid 0.4 noborder',
+              lc = 'green'),
+         Item('Total holes (after GC)', 'holes_size_after', x1y2,
+              fs = 'solid 0.4 noborder',
+              lc = 'red'),
+         Item('GC Time', ['i', 'pause'],
+              style = 'lines',
+              lc = 'red'))
+  ],
+  [
+    Set('style fill solid 0.5 noborder'),
+    Set('style data histograms'),
+    Plot(Item('Allocated', 'allocated'),
+         Item('Reclaimed', reclaimed_bytes),
+         Item('Promoted', 'promoted', style = 'lines', lc = 'black'))
+  ],
+]
+
+def freduce(f, field, trace, init):
+  return reduce(lambda t,r: f(t, r[field]), trace, init)
+
+def calc_total(trace, field):
+  return freduce(lambda t,v: t + long(v), field, trace, long(0))
+
+def calc_max(trace, field):
+  return freduce(lambda t,r: max(t, r), field, trace, 0)
+
+def count_nonzero(trace, field):
+  return freduce(lambda t,r: t if r == 0 else t + 1, field, trace, 0)
+
+
+def process_trace(filename):
+  trace = gc_nvp_common.parse_gc_trace(filename)
+
+  marksweeps = filter(lambda r: r['gc'] == 'ms', trace)
+  scavenges = filter(lambda r: r['gc'] == 's', trace)
+  globalgcs = filter(lambda r: r['gc'] != 's', trace)
+
+
+  charts = plot_all(plots, trace, filename)
+
+  def stats(out, prefix, trace, field):
+    n = len(trace)
+    total = calc_total(trace, field)
+    max = calc_max(trace, field)
+    if n > 0:
+      avg = total / n
+    else:
+      avg = 0
+    if n > 1:
+      dev = math.sqrt(freduce(lambda t,r: t + (r - avg) ** 2, field, trace, 0) /
+                      (n - 1))
+    else:
+      dev = 0
+
+    out.write('<tr><td>%s</td><td>%d</td><td>%d</td>'
+              '<td>%d</td><td>%d [dev %f]</td></tr>' %
+              (prefix, n, total, max, avg, dev))
+
+  def HumanReadable(size):
+    suffixes = ['bytes', 'kB', 'MB', 'GB']
+    power = 1
+    for i in range(len(suffixes)):
+      if size < power*1024:
+        return "%.1f" % (float(size) / power) + " " + suffixes[i]
+      power *= 1024
+
+  def throughput(name, trace):
+    total_live_after = calc_total(trace, 'total_size_after')
+    total_live_before = calc_total(trace, 'total_size_before')
+    total_gc = calc_total(trace, 'pause')
+    if total_gc == 0:
+      return
+    out.write('GC %s Throughput (after): %s / %s ms = %s/ms<br/>' %
+              (name,
+               HumanReadable(total_live_after),
+               total_gc,
+               HumanReadable(total_live_after / total_gc)))
+    out.write('GC %s Throughput (before): %s / %s ms = %s/ms<br/>' %
+              (name,
+               HumanReadable(total_live_before),
+               total_gc,
+               HumanReadable(total_live_before / total_gc)))
+
+
+  with open(filename + '.html', 'w') as out:
+    out.write('<html><body>')
+    out.write('<table>')
+    out.write('<tr><td>Phase</td><td>Count</td><td>Time (ms)</td>')
+    out.write('<td>Max</td><td>Avg</td></tr>')
+    stats(out, 'Total in GC', trace, 'pause')
+    stats(out, 'Scavenge', scavenges, 'pause')
+    stats(out, 'MarkSweep', marksweeps, 'pause')
+    stats(out, 'Mark', filter(lambda r: r['mark'] != 0, trace), 'mark')
+    stats(out, 'Sweep', filter(lambda r: r['sweep'] != 0, trace), 'sweep')
+    stats(out,
+          'External',
+          filter(lambda r: r['external'] != 0, trace),
+          'external')
+    out.write('</table>')
+    throughput('TOTAL', trace)
+    throughput('MS', marksweeps)
+    throughput('OLDSPACE', globalgcs)
+    out.write('<br/>')
+    for chart in charts:
+      out.write('<img src="%s">' % chart)
+      out.write('</body></html>')
+
+  print("%s generated." % (filename + '.html'))
+
+if len(sys.argv) != 2:
+  print("Usage: %s <GC-trace-filename>" % sys.argv[0])
+  sys.exit(1)
+
+process_trace(sys.argv[1])
diff --git a/src/third_party/v8/tools/gc_nvp_common.py b/src/third_party/v8/tools/gc_nvp_common.py
new file mode 100644
index 0000000..3b51731
--- /dev/null
+++ b/src/third_party/v8/tools/gc_nvp_common.py
@@ -0,0 +1,32 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+#
+# Common code for parsing --trace-gc-nvp output.
+#
+
+
+from __future__ import with_statement
+import re
+
+def split_nvp(s):
+  t = {}
+  for (name, value) in re.findall(r"([._\w]+)=([-\w]+(?:\.[0-9]+)?)", s):
+    try:
+      t[name] = float(value)
+    except ValueError:
+      t[name] = value
+
+  return t
+
+
+def parse_gc_trace(input):
+  trace = []
+  with open(input) as f:
+    for line in f:
+      info = split_nvp(line)
+      if info and 'pause' in info and info['pause'] > 0:
+        info['i'] = len(trace)
+        trace.append(info)
+  return trace
diff --git a/src/third_party/v8/tools/gcmole/BUILD.gn b/src/third_party/v8/tools/gcmole/BUILD.gn
new file mode 100644
index 0000000..9767acb
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/BUILD.gn
@@ -0,0 +1,48 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+group("v8_run_gcmole") {
+  testonly = true
+
+  data = [
+    "gccause.lua",
+    "GCMOLE.gn",
+    "gcmole.lua",
+    "gcmole-test.cc",
+    "gcmole-tools/",
+    "parallel.py",
+    "run-gcmole.py",
+    "suspects.whitelist",
+    "ignored_files",
+    "test-expectations.txt",
+
+    # The following contains all relevant source and build files.
+    "../debug_helper/debug-helper.h",
+    "../../BUILD.gn",
+    "../../base/",
+    "../../include/",
+    "../../src/",
+    "../../test/cctest/",
+    "../../test/common/",
+    "../../testing/gtest/include/gtest/gtest_prod.h",
+    "../../third_party/googletest/src/googletest/include/gtest/gtest_prod.h",
+    "../../third_party/icu/source/",
+    "../../third_party/wasm-api/wasm.h",
+    "../../third_party/wasm-api/wasm.hh",
+    "../../third_party/zlib/",
+    "../../third_party/inspector_protocol/",
+    "$target_gen_dir/../../",
+    "$target_gen_dir/../../torque-generated/",
+  ]
+
+  deps = [ "../../:run_torque" ]
+
+  if (v8_gcmole) {
+    # This assumes gcmole tools have been fetched by a hook
+    # into v8/tools/gcmole/gcmole_tools.
+    data += [ "gcmole-tools/" ]
+  }
+}
diff --git a/src/third_party/v8/tools/gcmole/GCMOLE.gn b/src/third_party/v8/tools/gcmole/GCMOLE.gn
new file mode 100644
index 0000000..62da0a0
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/GCMOLE.gn
@@ -0,0 +1,6 @@
+action("gcmole") {
+  sources = [
+    ### gcmole(all) ###
+    "tools/gcmole/gcmole-test.cc",
+  ]
+}
diff --git a/src/third_party/v8/tools/gcmole/Makefile b/src/third_party/v8/tools/gcmole/Makefile
new file mode 100644
index 0000000..e1bde68
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/Makefile
@@ -0,0 +1,46 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is Makefile for clang plugin part of gcmole tool. See README.
+
+LLVM_SRC_INCLUDE:=$(LLVM_SRC_ROOT)/include
+LLVM_BUILD_INCLUDE:=$(BUILD_ROOT)/include
+CLANG_SRC_INCLUDE:=$(CLANG_SRC_ROOT)/include
+CLANG_BUILD_INCLUDE:=$(BUILD_ROOT)/tools/clang/include
+
+libgcmole.so: gcmole.cc
+	$(CXX) -I$(LLVM_BUILD_INCLUDE) -I$(LLVM_SRC_INCLUDE)                  \
+	-I$(CLANG_BUILD_INCLUDE) -I$(CLANG_SRC_INCLUDE) -I. -D_DEBUG          \
+	-D_GNU_SOURCE -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS         \
+	-D__STDC_LIMIT_MACROS -O3 -fomit-frame-pointer -fno-exceptions        \
+	-fno-rtti -fPIC -Woverloaded-virtual -Wcast-qual -fno-strict-aliasing \
+	-pedantic -Wno-long-long -Wall -W -Wno-unused-parameter               \
+	-Wwrite-strings -static-libstdc++ -std=c++0x -shared -o libgcmole.so  \
+	gcmole.cc
+
+clean:
+	$(RM) libgcmole.so
diff --git a/src/third_party/v8/tools/gcmole/README b/src/third_party/v8/tools/gcmole/README
new file mode 100644
index 0000000..46b4717
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/README
@@ -0,0 +1,132 @@
+DESCRIPTION -------------------------------------------------------------------
+
+gcmole is a simple static analysis tool used to find possible evaluation order 
+dependent GC-unsafe places in the V8 codebase and "stale" pointers to the heap
+(ones whose addresses got invalidated by the GC).
+
+For example the following code is GC-unsafe:
+
+    Handle<Object> Foo();  // Assume Foo can trigger a GC.
+    void Bar(Object, Object);
+
+    Handle<Object> baz;
+    baz->Qux(*Foo());  // (a)
+    Bar(*Foo(), *baz); // (b)
+
+Both in cases (a) and (b) compiler is free to evaluate call arguments (that 
+includes receiver) in any order. That means it can dereference baz before 
+calling to Foo and save a raw pointer to a heap object in the register or 
+on the stack.
+
+In terms of the AST analysis that gcmole does, it warns about places in the
+code which result in 2 subtrees, the order of execution of which is undefined
+by C++, one of which causes a GC and the other dereferences a Handle to a raw
+Object (or its subclasses).
+
+The following code triggers a stale variable warning (assuming that the Foo
+function was detected as potentially allocating, as in the previous example):
+
+    JSObject raw_obj = ...;
+    Foo();
+    raw_obj.Print();
+
+Since Foo can trigger a GC, it might have moved the raw_obj. The solution is
+simply to store it as a Handle.
+
+PREREQUISITES -----------------------------------------------------------------
+
+(1) Install Lua 5.1
+
+    $ sudo apt-get install lua5.1
+
+(2) Get LLVM 8.0 and Clang 8.0 sources and build them.
+
+    Follow the instructions on http://clang.llvm.org/get_started.html.
+
+    Make sure to pass -DCMAKE_BUILD_TYPE=Release to cmake to get Release build 
+    instead of a Debug one.
+
+(3) Build gcmole Clang plugin (libgcmole.so)
+
+    In the tools/gcmole directory execute the following command:
+
+    $ BUILD_ROOT=<path> LLVM_SRC_ROOT=<path> CLANG_SRC_ROOT=<path> make
+
+(*) Note that steps (2) and (3) can also be achieved by just using the included
+    bootstrapping script in this directory:
+
+    $ ./tools/gcmole/bootstrap.sh
+
+    This will use "third_party/llvm+clang-build" as a build directory and checkout
+    required sources in the "third_party" directory.
+
+USING GCMOLE ------------------------------------------------------------------
+
+gcmole consists of driver script written in Lua and Clang plugin that does
+C++ AST processing. Plugin (libgcmole.so) is expected to be in the same
+folder as driver (gcmole.lua).
+
+To start analysis cd into the root of v8 checkout and execute the following
+command:
+
+CLANG_BIN=<path-to-clang-bin-folder> lua tools/gcmole/gcmole.lua [<arch>]
+
+where arch should be one of architectures supported by V8 (arm, ia32, x64).
+
+Analysis will be performed in 2 stages: 
+
+- on the first stage driver will parse all files and build a global callgraph 
+approximation to find all functions that might potentially cause GC, list
+of this functions will be written into gcsuspects file.
+
+- on the second stage driver will parse all files again and will locate all 
+callsites that might be GC-unsafe based on the list of functions causing GC. 
+Such places are marked with a "Possible problem with evaluation order." 
+warning. Messages "Failed to resolve v8::internal::Object" are benign and 
+can be ignored.
+
+If any errors were found driver exits with non-zero status.
+
+TESTING -----------------------------------------------------------------------
+
+Tests are automatically run by the main lua runner. Expectations are in
+test-expectations.txt and need to be updated whenever the sources of the tests
+in gcmole-test.cc are modified (line numbers also count).
+
+PACKAGING ---------------------------------------------------------------------
+
+gcmole is deployed on V8's buildbot infrastructure to run it as part of the
+continuous integration. A pre-built package of gcmole together with Clang is
+hosted on Google Cloud Storage for this purpose. To update this package to a
+newer version, use the provided packaging script:
+
+    $ ./tools/gcmole/package.sh
+
+This will create a new "tools/gcmole/gcmole-tools.tar.gz" package with the
+corresponding SHA1 sum suitable to be used for this purpose. It assumes that
+Clang was built in "third_party/llvm+clang-build" (e.g. by the bootstrapping
+script "bootstrap.sh" mentioned above).
+
+TROUBLESHOOTING ---------------------------------------------------------------
+
+gcmole is tighly coupled with the AST structure that Clang produces. Therefore
+when upgrading to a newer Clang version, it might start producing bogus output
+or completely stop outputting warnings. In such occasion, one might start the
+debugging process by checking weather a new AST node type is introduced which
+is currently not supported by gcmole. Insert the following code at the end of
+the FunctionAnalyzer::VisitExpr method to see the unsupported AST class(es)
+and the source position which generates them:
+
+    if (expr) {
+      clang::Stmt::StmtClass stmtClass = expr->getStmtClass();
+      d_.Report(clang::FullSourceLoc(expr->getExprLoc(), sm_),
+        d_.getCustomDiagID(clang::DiagnosticsEngine::Remark, "%0")) << stmtClass;
+    }
+
+For instance, gcmole currently doesn't support AtomicExprClass statements
+introduced for atomic operations.
+
+A convenient way to observe the AST generated by Clang is to pass the following
+flags when invoking clang++
+
+    -Xclang -ast-dump -fsyntax-only
diff --git a/src/third_party/v8/tools/gcmole/bootstrap.sh b/src/third_party/v8/tools/gcmole/bootstrap.sh
new file mode 100755
index 0000000..05ab1cb
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/bootstrap.sh
@@ -0,0 +1,130 @@
+#!/usr/bin/env bash
+
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script will build libgcmole.so as well as a corresponding recent
+# version of Clang and LLVM. The Clang will be built with the locally
+# installed compiler and statically link against the local libstdc++ so
+# that the resulting binary is easier transferable between different
+# environments.
+
+CLANG_RELEASE=8.0
+
+THIS_DIR="$(readlink -f "$(dirname "${0}")")"
+LLVM_DIR="${THIS_DIR}/../../third_party/llvm"
+CLANG_DIR="${THIS_DIR}/../../third_party/clang"
+BUILD_DIR="${THIS_DIR}/../../third_party/llvm+clang-build"
+
+LLVM_REPO_URL=${LLVM_URL:-https://llvm.org/svn/llvm-project}
+
+# Die if any command dies.
+set -e
+
+OS="$(uname -s)"
+
+# Xcode and clang don't get along when predictive compilation is enabled.
+# http://crbug.com/96315
+if [[ "${OS}" = "Darwin" ]] && xcodebuild -version | grep -q 'Xcode 3.2' ; then
+  XCONF=com.apple.Xcode
+  if [[ "${GYP_GENERATORS}" != "make" ]] && \
+     [ "$(defaults read "${XCONF}" EnablePredictiveCompilation)" != "0" ]; then
+    echo
+    echo "          HEARKEN!"
+    echo "You're using Xcode3 and you have 'Predictive Compilation' enabled."
+    echo "This does not work well with clang (http://crbug.com/96315)."
+    echo "Disable it in Preferences->Building (lower right), or run"
+    echo "    defaults write ${XCONF} EnablePredictiveCompilation -boolean NO"
+    echo "while Xcode is not running."
+    echo
+  fi
+
+  SUB_VERSION=$(xcodebuild -version | sed -Ene 's/Xcode 3\.2\.([0-9]+)/\1/p')
+  if [[ "${SUB_VERSION}" < 6 ]]; then
+    echo
+    echo "          YOUR LD IS BUGGY!"
+    echo "Please upgrade Xcode to at least 3.2.6."
+    echo
+  fi
+fi
+
+echo Getting LLVM release "${CLANG_RELEASE}" in "${LLVM_DIR}"
+if ! svn co --force \
+    "${LLVM_REPO_URL}/llvm/branches/release_${CLANG_RELEASE/./}" \
+    "${LLVM_DIR}"; then
+  echo Checkout failed, retrying
+  rm -rf "${LLVM_DIR}"
+  svn co --force \
+      "${LLVM_REPO_URL}/llvm/branches/release_${CLANG_RELEASE/./}" \
+      "${LLVM_DIR}"
+fi
+
+echo Getting clang release "${CLANG_RELEASE}" in "${CLANG_DIR}"
+svn co --force \
+    "${LLVM_REPO_URL}/cfe/branches/release_${CLANG_RELEASE/./}" \
+    "${CLANG_DIR}"
+
+# Echo all commands
+set -x
+
+NUM_JOBS=3
+if [[ "${OS}" = "Linux" ]]; then
+  NUM_JOBS="$(grep -c "^processor" /proc/cpuinfo)"
+elif [ "${OS}" = "Darwin" ]; then
+  NUM_JOBS="$(sysctl -n hw.ncpu)"
+fi
+
+# Build clang.
+if [ ! -e "${BUILD_DIR}" ]; then
+  mkdir "${BUILD_DIR}"
+fi
+cd "${BUILD_DIR}"
+cmake -DCMAKE_CXX_FLAGS="-static-libstdc++" -DLLVM_ENABLE_TERMINFO=OFF \
+    -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS=clang "${LLVM_DIR}"
+MACOSX_DEPLOYMENT_TARGET=10.5 make -j"${NUM_JOBS}"
+
+# Strip the clang binary.
+STRIP_FLAGS=
+if [ "${OS}" = "Darwin" ]; then
+  # See http://crbug.com/256342
+  STRIP_FLAGS=-x
+fi
+strip ${STRIP_FLAGS} bin/clang
+cd -
+
+# Build libgcmole.so
+make -C "${THIS_DIR}" clean
+make -C "${THIS_DIR}" LLVM_SRC_ROOT="${LLVM_DIR}" \
+    CLANG_SRC_ROOT="${CLANG_DIR}" BUILD_ROOT="${BUILD_DIR}" libgcmole.so
+
+set +x
+
+echo
+echo You can now run gcmole using this command:
+echo
+echo CLANG_BIN=\"third_party/llvm+clang-build/bin\" lua tools/gcmole/gcmole.lua
+echo
diff --git a/src/third_party/v8/tools/gcmole/gccause.lua b/src/third_party/v8/tools/gcmole/gccause.lua
new file mode 100644
index 0000000..b989176
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/gccause.lua
@@ -0,0 +1,62 @@
+-- Copyright 2011 the V8 project authors. All rights reserved.
+-- Redistribution and use in source and binary forms, with or without
+-- modification, are permitted provided that the following conditions are
+-- met:
+--
+--     * Redistributions of source code must retain the above copyright
+--       notice, this list of conditions and the following disclaimer.
+--     * Redistributions in binary form must reproduce the above
+--       copyright notice, this list of conditions and the following
+--       disclaimer in the documentation and/or other materials provided
+--       with the distribution.
+--     * Neither the name of Google Inc. nor the names of its
+--       contributors may be used to endorse or promote products derived
+--       from this software without specific prior written permission.
+--
+-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+-- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+-- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+-- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+-- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+-- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+-- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+-- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+-- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+-- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+-- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-- This is an auxiliary tool that reads gccauses file generated by
+-- gcmole.lua and prints tree of the calls that can potentially cause a GC
+-- inside a given function.
+--
+-- Usage: lua tools/gcmole/gccause.lua <function-name-pattern>
+--
+
+assert(loadfile "gccauses")()
+
+local P = ...
+
+local T = {}
+
+local function TrackCause(name, lvl)
+   io.write(("  "):rep(lvl or 0), name, "\n")
+   if GC[name] then
+      local causes = GC[name]
+      for i = 1, #causes do
+	 local f = causes[i]
+	 if not T[f] then
+	    T[f] = true
+	    TrackCause(f, (lvl or 0) + 1)
+	 end
+
+	 if f == '<GC>' then break end
+      end
+   end
+end
+
+for name, _ in pairs(GC) do
+   if name:match(P) then
+      T = {}
+      TrackCause(name)
+   end
+end
diff --git a/src/third_party/v8/tools/gcmole/gcmole-test.cc b/src/third_party/v8/tools/gcmole/gcmole-test.cc
new file mode 100644
index 0000000..8512d7a
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/gcmole-test.cc
@@ -0,0 +1,233 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/handles.h"
+#include "src/objects/foreign-inl.h"
+#include "src/objects/managed.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+// ------- Test simple argument evaluation order problems ---------
+
+Handle<Object> CauseGC(Handle<Object> obj, Isolate* isolate) {
+  isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+
+  return obj;
+}
+
+Object CauseGCRaw(Object obj, Isolate* isolate) {
+  isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+
+  return obj;
+}
+
+Managed<Smi> CauseGCManaged(int i, Isolate* isolate) {
+  isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+
+  return Managed<Smi>::cast(Smi::FromInt(i));
+}
+
+void TwoArgumentsFunction(Object a, Object b) {
+  a.Print();
+  b.Print();
+}
+
+void TestTwoArguments(Isolate* isolate) {
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  Handle<JSObject> obj2 = isolate->factory()->NewJSObjectWithNullProto();
+  // Should cause warning.
+  TwoArgumentsFunction(*CauseGC(obj1, isolate), *CauseGC(obj2, isolate));
+}
+
+void TwoSizeTArgumentsFunction(size_t a, size_t b) {
+  USE(a);
+  USE(b);
+}
+
+void TestTwoSizeTArguments(Isolate* isolate) {
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  Handle<JSObject> obj2 = isolate->factory()->NewJSObjectWithNullProto();
+  // Should cause warning.
+  TwoSizeTArgumentsFunction(sizeof(*CauseGC(obj1, isolate)),
+                            sizeof(*CauseGC(obj2, isolate)));
+}
+
+// --------- Test problems with method arguments ----------
+
+class SomeObject : public Object {
+ public:
+  void Method(Object a) { a.Print(); }
+
+  SomeObject& operator=(const Object& b) {
+    this->Print();
+    return *this;
+  }
+
+  DECL_CAST(SomeObject)
+
+  OBJECT_CONSTRUCTORS(SomeObject, Object);
+};
+
+void TestMethodCall(Isolate* isolate) {
+  SomeObject obj;
+  Handle<SomeObject> so = handle(obj, isolate);
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  // Should cause warning.
+  so->Method(*CauseGC(obj1, isolate));
+  // Should cause warning.
+  so->Method(CauseGCRaw(*obj1, isolate));
+}
+
+void TestOperatorCall(Isolate* isolate) {
+  SomeObject obj;
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  // Should not cause warning.
+  obj = *CauseGC(obj1, isolate);
+}
+
+// --------- Test for templated sub-classes of Object ----------
+
+void TestFollowingTemplates(Isolate* isolate) {
+  // Should cause warning.
+  CauseGCManaged(42, isolate);
+}
+
+// --------- Test for correctly resolving virtual methods ----------
+
+class BaseObject {
+ public:
+  virtual Handle<Object> VirtualCauseGC(Handle<Object> obj, Isolate* isolate) {
+    return obj;
+  }
+};
+
+class DerivedObject : public BaseObject {
+ public:
+  Handle<Object> VirtualCauseGC(Handle<Object> obj, Isolate* isolate) override {
+    isolate->heap()->CollectGarbage(OLD_SPACE,
+                                    GarbageCollectionReason::kTesting);
+
+    return obj;
+  }
+};
+
+void TestFollowingVirtualFunctions(Isolate* isolate) {
+  DerivedObject derived;
+  BaseObject* base = &derived;
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+
+  SomeObject so;
+  Handle<SomeObject> so_handle = handle(so, isolate);
+  // Should cause warning.
+  so_handle->Method(*derived.VirtualCauseGC(obj1, isolate));
+  // Should cause warning.
+  so_handle->Method(*base->VirtualCauseGC(obj1, isolate));
+}
+
+// --------- Test for correctly resolving static methods ----------
+
+class SomeClass {
+ public:
+  static Handle<Object> StaticCauseGC(Handle<Object> obj, Isolate* isolate) {
+    isolate->heap()->CollectGarbage(OLD_SPACE,
+                                    GarbageCollectionReason::kTesting);
+
+    return obj;
+  }
+};
+
+void TestFollowingStaticFunctions(Isolate* isolate) {
+  SomeObject so;
+  Handle<SomeObject> so_handle = handle(so, isolate);
+
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  // Should cause warning.
+  so_handle->Method(*SomeClass::StaticCauseGC(obj1, isolate));
+}
+
+// --------- Test basic dead variable analysis ----------
+
+void TestDeadVarAnalysis(Isolate* isolate) {
+  JSObject raw_obj = *isolate->factory()->NewJSObjectWithNullProto();
+  CauseGCRaw(raw_obj, isolate);
+
+  // Should cause warning.
+  raw_obj.Print();
+}
+
+void TestGuardedDeadVarAnalysis(Isolate* isolate) {
+  JSObject raw_obj = *isolate->factory()->NewJSObjectWithNullProto();
+
+  // Note: having DisallowHeapAllocation with the same function as CauseGC
+  // normally doesn't make sense, but we want to test whether the gurads
+  // are recognized by GCMole.
+  DisallowHeapAllocation no_gc;
+  CauseGCRaw(raw_obj, isolate);
+
+  // Shouldn't cause warning.
+  raw_obj.Print();
+}
+
+void TestGuardedDeadVarAnalysisNotOnStack(Isolate* isolate) {
+  JSObject raw_obj = *isolate->factory()->NewJSObjectWithNullProto();
+
+  // {DisallowHeapAccess} has a {DisallowHeapAllocation} embedded as a member
+  // field, so both are treated equally by gcmole.
+  DisallowHeapAccess no_gc;
+  CauseGCRaw(raw_obj, isolate);
+
+  // Shouldn't cause warning.
+  raw_obj.Print();
+}
+
+void TestGuardedDeadVarAnalysisNested(JSObject raw_obj, Isolate* isolate) {
+  CauseGCRaw(raw_obj, isolate);
+
+  // Should cause warning.
+  raw_obj.Print();
+}
+
+void TestGuardedDeadVarAnalysisCaller(Isolate* isolate) {
+  DisallowHeapAccess no_gc;
+  JSObject raw_obj = *isolate->factory()->NewJSObjectWithNullProto();
+
+  TestGuardedDeadVarAnalysisNested(raw_obj, isolate);
+
+  // Shouldn't cause warning.
+  raw_obj.Print();
+}
+
+JSObject GuardedAllocation(Isolate* isolate) {
+  DisallowHeapAllocation no_gc;
+  return *isolate->factory()->NewJSObjectWithNullProto();
+}
+
+void TestNestedDeadVarAnalysis(Isolate* isolate) {
+  JSObject raw_obj = GuardedAllocation(isolate);
+  CauseGCRaw(raw_obj, isolate);
+
+  // Should cause warning.
+  raw_obj.Print();
+}
+
+// Test that putting a guard in the middle of the function doesn't
+// mistakenly cover the whole scope of the raw variable.
+void TestGuardedDeadVarAnalysisMidFunction(Isolate* isolate) {
+  JSObject raw_obj = *isolate->factory()->NewJSObjectWithNullProto();
+
+  CauseGCRaw(raw_obj, isolate);
+
+  // Guarding the rest of the function from triggering a GC.
+  DisallowHeapAllocation no_gc;
+  // Should cause warning.
+  raw_obj.Print();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/third_party/v8/tools/gcmole/gcmole-tools.tar.gz.sha1 b/src/third_party/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
new file mode 100644
index 0000000..84b3657
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
@@ -0,0 +1 @@
+7e31d257a711b1a77823633e4f19152c3e0718f4
diff --git a/src/third_party/v8/tools/gcmole/gcmole.cc b/src/third_party/v8/tools/gcmole/gcmole.cc
new file mode 100644
index 0000000..7b32f6c
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/gcmole.cc
@@ -0,0 +1,1699 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This is clang plugin used by gcmole tool. See README for more details.
+
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Frontend/FrontendPluginRegistry.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <bitset>
+#include <fstream>
+#include <iostream>
+#include <map>
+#include <set>
+#include <stack>
+
+namespace {
+
+bool g_tracing_enabled = false;
+bool g_dead_vars_analysis = false;
+
+#define TRACE(str)                   \
+  do {                               \
+    if (g_tracing_enabled) {         \
+      std::cout << str << std::endl; \
+    }                                \
+  } while (false)
+
+#define TRACE_LLVM_TYPE(str, type)                                \
+  do {                                                            \
+    if (g_tracing_enabled) {                                      \
+      std::cout << str << " " << type.getAsString() << std::endl; \
+    }                                                             \
+  } while (false)
+
+// Node: The following is used when tracing --dead-vars
+// to provide extra info for the GC suspect.
+#define TRACE_LLVM_DECL(str, decl)                   \
+  do {                                               \
+    if (g_tracing_enabled && g_dead_vars_analysis) { \
+      std::cout << str << std::endl;                 \
+      decl->dump();                                  \
+    }                                                \
+  } while (false)
+
+typedef std::string MangledName;
+typedef std::set<MangledName> CalleesSet;
+typedef std::map<MangledName, MangledName> CalleesMap;
+
+static bool GetMangledName(clang::MangleContext* ctx,
+                           const clang::NamedDecl* decl,
+                           MangledName* result) {
+  if (!llvm::isa<clang::CXXConstructorDecl>(decl) &&
+      !llvm::isa<clang::CXXDestructorDecl>(decl)) {
+    llvm::SmallVector<char, 512> output;
+    llvm::raw_svector_ostream out(output);
+    ctx->mangleName(decl, out);
+    *result = out.str().str();
+    return true;
+  }
+
+  return false;
+}
+
+
+static bool InV8Namespace(const clang::NamedDecl* decl) {
+  return decl->getQualifiedNameAsString().compare(0, 4, "v8::") == 0;
+}
+
+
+static std::string EXTERNAL("EXTERNAL");
+static std::string STATE_TAG("enum v8::internal::StateTag");
+
+static bool IsExternalVMState(const clang::ValueDecl* var) {
+  const clang::EnumConstantDecl* enum_constant =
+      llvm::dyn_cast<clang::EnumConstantDecl>(var);
+  if (enum_constant != NULL && enum_constant->getNameAsString() == EXTERNAL) {
+    clang::QualType type = enum_constant->getType();
+    return (type.getAsString() == STATE_TAG);
+  }
+
+  return false;
+}
+
+
+struct Resolver {
+  explicit Resolver(clang::ASTContext& ctx)
+      : ctx_(ctx), decl_ctx_(ctx.getTranslationUnitDecl()) {
+  }
+
+  Resolver(clang::ASTContext& ctx, clang::DeclContext* decl_ctx)
+      : ctx_(ctx), decl_ctx_(decl_ctx) {
+  }
+
+  clang::DeclarationName ResolveName(const char* n) {
+    clang::IdentifierInfo* ident = &ctx_.Idents.get(n);
+    return ctx_.DeclarationNames.getIdentifier(ident);
+  }
+
+  Resolver ResolveNamespace(const char* n) {
+    return Resolver(ctx_, Resolve<clang::NamespaceDecl>(n));
+  }
+
+  template<typename T>
+  T* Resolve(const char* n) {
+    if (decl_ctx_ == NULL) return NULL;
+
+    clang::DeclContext::lookup_result result =
+        decl_ctx_->lookup(ResolveName(n));
+
+    clang::DeclContext::lookup_iterator end = result.end();
+    for (clang::DeclContext::lookup_iterator i = result.begin(); i != end;
+         i++) {
+      if (llvm::isa<T>(*i)) {
+        return llvm::cast<T>(*i);
+      } else {
+        llvm::errs() << "Didn't match declaration template against "
+                     << (*i)->getNameAsString() << "\n";
+      }
+    }
+
+    return NULL;
+  }
+
+  clang::CXXRecordDecl* ResolveTemplate(const char* n) {
+    clang::NamedDecl* initial_template = Resolve<clang::NamedDecl>(n);
+    if (!initial_template) return NULL;
+
+    clang::NamedDecl* underlying_template =
+        initial_template->getUnderlyingDecl();
+    if (!underlying_template) {
+      llvm::errs() << "Couldn't resolve underlying template\n";
+      return NULL;
+    }
+    const clang::TypeAliasDecl* type_alias_decl =
+        llvm::dyn_cast_or_null<clang::TypeAliasDecl>(underlying_template);
+    if (!type_alias_decl) {
+      llvm::errs() << "Couldn't resolve TypeAliasDecl\n";
+      return NULL;
+    }
+    const clang::Type* type = type_alias_decl->getTypeForDecl();
+    if (!type) {
+      llvm::errs() << "Couldn't resolve TypeAliasDecl to Type\n";
+      return NULL;
+    }
+    const clang::TypedefType* typedef_type =
+        llvm::dyn_cast_or_null<clang::TypedefType>(type);
+    if (!typedef_type) {
+      llvm::errs() << "Couldn't resolve TypedefType\n";
+      return NULL;
+    }
+    const clang::TypedefNameDecl* typedef_name_decl = typedef_type->getDecl();
+    if (!typedef_name_decl) {
+      llvm::errs() << "Couldn't resolve TypedefType to TypedefNameDecl\n";
+      return NULL;
+    }
+
+    clang::QualType underlying_type = typedef_name_decl->getUnderlyingType();
+    if (!llvm::isa<clang::TemplateSpecializationType>(underlying_type)) {
+      llvm::errs() << "Couldn't resolve TemplateSpecializationType\n";
+      return NULL;
+    }
+
+    const clang::TemplateSpecializationType* templ_specialization_type =
+        llvm::cast<clang::TemplateSpecializationType>(underlying_type);
+    if (!llvm::isa<clang::RecordType>(templ_specialization_type->desugar())) {
+      llvm::errs() << "Couldn't resolve RecordType\n";
+      return NULL;
+    }
+
+    const clang::RecordType* record_type =
+        llvm::cast<clang::RecordType>(templ_specialization_type->desugar());
+    clang::CXXRecordDecl* record_decl =
+        llvm::dyn_cast_or_null<clang::CXXRecordDecl>(record_type->getDecl());
+    if (!record_decl) {
+      llvm::errs() << "Couldn't resolve CXXRecordDecl\n";
+      return NULL;
+    }
+    return record_decl;
+  }
+
+ private:
+  clang::ASTContext& ctx_;
+  clang::DeclContext* decl_ctx_;
+};
+
+
+class CalleesPrinter : public clang::RecursiveASTVisitor<CalleesPrinter> {
+ public:
+  explicit CalleesPrinter(clang::MangleContext* ctx) : ctx_(ctx) {
+  }
+
+  virtual bool VisitCallExpr(clang::CallExpr* expr) {
+    const clang::FunctionDecl* callee = expr->getDirectCallee();
+    if (callee != NULL) AnalyzeFunction(callee);
+    return true;
+  }
+
+  virtual bool VisitDeclRefExpr(clang::DeclRefExpr* expr) {
+    // If function mentions EXTERNAL VMState add artificial garbage collection
+    // mark.
+    if (IsExternalVMState(expr->getDecl()))
+      AddCallee("CollectGarbage", "CollectGarbage");
+    return true;
+  }
+
+  void AnalyzeFunction(const clang::FunctionDecl* f) {
+    MangledName name;
+    if (InV8Namespace(f) && GetMangledName(ctx_, f, &name)) {
+      const std::string& function = f->getNameAsString();
+      AddCallee(name, function);
+
+      const clang::FunctionDecl* body = NULL;
+      if (f->hasBody(body) && !Analyzed(name)) {
+        EnterScope(name);
+        TraverseStmt(body->getBody());
+        LeaveScope();
+      }
+    }
+  }
+
+  typedef std::map<MangledName, CalleesSet* > Callgraph;
+
+  bool Analyzed(const MangledName& name) {
+    return callgraph_[name] != NULL;
+  }
+
+  void EnterScope(const MangledName& name) {
+    CalleesSet* callees = callgraph_[name];
+
+    if (callees == NULL) {
+      callgraph_[name] = callees = new CalleesSet();
+    }
+
+    scopes_.push(callees);
+  }
+
+  void LeaveScope() {
+    scopes_.pop();
+  }
+
+  void AddCallee(const MangledName& name, const MangledName& function) {
+    if (!scopes_.empty()) scopes_.top()->insert(name);
+    mangled_to_function_[name] = function;
+  }
+
+  void PrintCallGraph() {
+    for (Callgraph::const_iterator i = callgraph_.begin(), e = callgraph_.end();
+         i != e;
+         ++i) {
+      std::cout << i->first << "," << mangled_to_function_[i->first] << "\n";
+
+      CalleesSet* callees = i->second;
+      for (CalleesSet::const_iterator j = callees->begin(), e = callees->end();
+           j != e;
+           ++j) {
+        std::cout << "\t" << *j << "," << mangled_to_function_[*j] << "\n";
+      }
+    }
+  }
+
+ private:
+  clang::MangleContext* ctx_;
+
+  std::stack<CalleesSet* > scopes_;
+  Callgraph callgraph_;
+  CalleesMap mangled_to_function_;
+};
+
+
+class FunctionDeclarationFinder
+    : public clang::ASTConsumer,
+      public clang::RecursiveASTVisitor<FunctionDeclarationFinder> {
+ public:
+  explicit FunctionDeclarationFinder(clang::DiagnosticsEngine& d,
+                                     clang::SourceManager& sm,
+                                     const std::vector<std::string>& args)
+      : d_(d), sm_(sm) {}
+
+  virtual void HandleTranslationUnit(clang::ASTContext &ctx) {
+    mangle_context_ = clang::ItaniumMangleContext::create(ctx, d_);
+    callees_printer_ = new CalleesPrinter(mangle_context_);
+
+    TraverseDecl(ctx.getTranslationUnitDecl());
+
+    callees_printer_->PrintCallGraph();
+  }
+
+  virtual bool VisitFunctionDecl(clang::FunctionDecl* decl) {
+    callees_printer_->AnalyzeFunction(decl);
+    return true;
+  }
+
+ private:
+  clang::DiagnosticsEngine& d_;
+  clang::SourceManager& sm_;
+  clang::MangleContext* mangle_context_;
+
+  CalleesPrinter* callees_printer_;
+};
+
+static bool gc_suspects_loaded = false;
+static CalleesSet gc_suspects;
+static CalleesSet gc_functions;
+static bool whitelist_loaded = false;
+static CalleesSet suspects_whitelist;
+
+static void LoadGCSuspects() {
+  if (gc_suspects_loaded) return;
+
+  std::ifstream fin("gcsuspects");
+  std::string mangled, function;
+
+  while (!fin.eof()) {
+    std::getline(fin, mangled, ',');
+    gc_suspects.insert(mangled);
+    std::getline(fin, function);
+    gc_functions.insert(function);
+  }
+
+  gc_suspects_loaded = true;
+}
+
+static void LoadSuspectsWhitelist() {
+  if (whitelist_loaded) return;
+
+  std::ifstream fin("tools/gcmole/suspects.whitelist");
+  std::string s;
+
+  while (fin >> s) suspects_whitelist.insert(s);
+
+  whitelist_loaded = true;
+}
+
+// Looks for exact match of the mangled name.
+static bool KnownToCauseGC(clang::MangleContext* ctx,
+                           const clang::FunctionDecl* decl) {
+  LoadGCSuspects();
+
+  if (!InV8Namespace(decl)) return false;
+
+  if (suspects_whitelist.find(decl->getNameAsString()) !=
+      suspects_whitelist.end()) {
+    return false;
+  }
+
+  MangledName name;
+  if (GetMangledName(ctx, decl, &name)) {
+    return gc_suspects.find(name) != gc_suspects.end();
+  }
+
+  return false;
+}
+
+// Looks for partial match of only the function name.
+static bool SuspectedToCauseGC(clang::MangleContext* ctx,
+                               const clang::FunctionDecl* decl) {
+  LoadGCSuspects();
+
+  if (!InV8Namespace(decl)) return false;
+
+  LoadSuspectsWhitelist();
+  if (suspects_whitelist.find(decl->getNameAsString()) !=
+      suspects_whitelist.end()) {
+    return false;
+  }
+
+  if (gc_functions.find(decl->getNameAsString()) != gc_functions.end()) {
+    TRACE_LLVM_DECL("Suspected by ", decl);
+    return true;
+  }
+
+  return false;
+}
+
+static const int kNoEffect = 0;
+static const int kCausesGC = 1;
+static const int kRawDef = 2;
+static const int kRawUse = 4;
+static const int kAllEffects = kCausesGC | kRawDef | kRawUse;
+
+class Environment;
+
+class ExprEffect {
+ public:
+  bool hasGC() { return (effect_ & kCausesGC) != 0; }
+  void setGC() { effect_ |= kCausesGC; }
+
+  bool hasRawDef() { return (effect_ & kRawDef) != 0; }
+  void setRawDef() { effect_ |= kRawDef; }
+
+  bool hasRawUse() { return (effect_ & kRawUse) != 0; }
+  void setRawUse() { effect_ |= kRawUse; }
+
+  static ExprEffect None() { return ExprEffect(kNoEffect, NULL); }
+  static ExprEffect NoneWithEnv(Environment* env) {
+    return ExprEffect(kNoEffect, env);
+  }
+  static ExprEffect RawUse() { return ExprEffect(kRawUse, NULL); }
+
+  static ExprEffect Merge(ExprEffect a, ExprEffect b);
+  static ExprEffect MergeSeq(ExprEffect a, ExprEffect b);
+  ExprEffect Define(const std::string& name);
+
+  Environment* env() {
+    return reinterpret_cast<Environment*>(effect_ & ~kAllEffects);
+  }
+
+  static ExprEffect GC() {
+    return ExprEffect(kCausesGC, NULL);
+  }
+
+ private:
+  ExprEffect(int effect, Environment* env)
+      : effect_((effect & kAllEffects) |
+                reinterpret_cast<intptr_t>(env)) { }
+
+  intptr_t effect_;
+};
+
+
+const std::string BAD_EXPR_MSG("Possible problem with evaluation order.");
+const std::string DEAD_VAR_MSG("Possibly dead variable.");
+
+
+class Environment {
+ public:
+  Environment() = default;
+
+  static Environment Unreachable() {
+    Environment env;
+    env.unreachable_ = true;
+    return env;
+  }
+
+  static Environment Merge(const Environment& l,
+                           const Environment& r) {
+    Environment out(l);
+    out &= r;
+    return out;
+  }
+
+  Environment ApplyEffect(ExprEffect effect) const {
+    Environment out = effect.hasGC() ? Environment() : Environment(*this);
+    if (effect.env()) out |= *effect.env();
+    return out;
+  }
+
+  typedef std::map<std::string, int> SymbolTable;
+
+  bool IsAlive(const std::string& name) const {
+    SymbolTable::iterator code = symbol_table_.find(name);
+    if (code == symbol_table_.end()) return false;
+    return is_live(code->second);
+  }
+
+  bool Equal(const Environment& env) {
+    if (unreachable_ && env.unreachable_) return true;
+    size_t size = std::max(live_.size(), env.live_.size());
+    for (size_t i = 0; i < size; ++i) {
+      if (is_live(i) != env.is_live(i)) return false;
+    }
+    return true;
+  }
+
+  Environment Define(const std::string& name) const {
+    return Environment(*this, SymbolToCode(name));
+  }
+
+  void MDefine(const std::string& name) { set_live(SymbolToCode(name)); }
+
+  static int SymbolToCode(const std::string& name) {
+    SymbolTable::iterator code = symbol_table_.find(name);
+
+    if (code == symbol_table_.end()) {
+      int new_code = symbol_table_.size();
+      symbol_table_.insert(std::make_pair(name, new_code));
+      return new_code;
+    }
+
+    return code->second;
+  }
+
+  static void ClearSymbolTable() {
+    for (Environment* e : envs_) delete e;
+    envs_.clear();
+    symbol_table_.clear();
+  }
+
+  void Print() const {
+    bool comma = false;
+    std::cout << "{";
+    for (auto& e : symbol_table_) {
+      if (!is_live(e.second)) continue;
+      if (comma) std::cout << ", ";
+      std::cout << e.first;
+      comma = true;
+    }
+    std::cout << "}" << std::endl;
+  }
+
+  static Environment* Allocate(const Environment& env) {
+    Environment* allocated_env = new Environment(env);
+    envs_.push_back(allocated_env);
+    return allocated_env;
+  }
+
+ private:
+  Environment(const Environment& l, int code)
+      : live_(l.live_) {
+    set_live(code);
+  }
+
+  void set_live(size_t pos) {
+    if (unreachable_) return;
+    if (pos >= live_.size()) live_.resize(pos + 1);
+    live_[pos] = true;
+  }
+
+  bool is_live(size_t pos) const {
+    return unreachable_ || (live_.size() > pos && live_[pos]);
+  }
+
+  Environment& operator|=(const Environment& o) {
+    if (o.unreachable_) {
+      unreachable_ = true;
+      live_.clear();
+    } else if (!unreachable_) {
+      for (size_t i = 0, e = o.live_.size(); i < e; ++i) {
+        if (o.live_[i]) set_live(i);
+      }
+    }
+    return *this;
+  }
+
+  Environment& operator&=(const Environment& o) {
+    if (o.unreachable_) return *this;
+    if (unreachable_) return *this = o;
+
+    // Carry over false bits from the tail of o.live_, and reset all bits that
+    // are not set in o.live_.
+    size_t size = std::max(live_.size(), o.live_.size());
+    if (size > live_.size()) live_.resize(size);
+    for (size_t i = 0; i < size; ++i) {
+      if (live_[i] && (i >= o.live_.size() || !o.live_[i])) live_[i] = false;
+    }
+    return *this;
+  }
+
+  static SymbolTable symbol_table_;
+  static std::vector<Environment*> envs_;
+
+  std::vector<bool> live_;
+  // unreachable_ == true implies live_.empty(), but still is_live(i) returns
+  // true for all i.
+  bool unreachable_ = false;
+
+  friend class ExprEffect;
+  friend class CallProps;
+};
+
+
+class CallProps {
+ public:
+  CallProps() : env_(NULL) { }
+
+  void SetEffect(int arg, ExprEffect in) {
+    if (in.hasGC()) {
+      gc_.set(arg);
+    }
+    if (in.hasRawDef()) raw_def_.set(arg);
+    if (in.hasRawUse()) raw_use_.set(arg);
+    if (in.env() != NULL) {
+      if (env_ == NULL) {
+        env_ = in.env();
+      } else {
+        *env_ |= *in.env();
+      }
+    }
+  }
+
+  ExprEffect ComputeCumulativeEffect(bool result_is_raw) {
+    ExprEffect out = ExprEffect::NoneWithEnv(env_);
+    if (gc_.any()) {
+      out.setGC();
+    }
+    if (raw_use_.any()) out.setRawUse();
+    if (result_is_raw) out.setRawDef();
+    return out;
+  }
+
+  bool IsSafe() {
+    if (!gc_.any()) {
+      return true;
+    }
+    std::bitset<kMaxNumberOfArguments> raw = (raw_def_ | raw_use_);
+    if (!raw.any()) {
+      return true;
+    }
+    bool result = gc_.count() == 1 && !((raw ^ gc_).any());
+    return result;
+  }
+
+ private:
+  static const int kMaxNumberOfArguments = 64;
+  std::bitset<kMaxNumberOfArguments> raw_def_;
+  std::bitset<kMaxNumberOfArguments> raw_use_;
+  std::bitset<kMaxNumberOfArguments> gc_;
+  Environment* env_;
+};
+
+
+Environment::SymbolTable Environment::symbol_table_;
+std::vector<Environment*> Environment::envs_;
+
+ExprEffect ExprEffect::Merge(ExprEffect a, ExprEffect b) {
+  Environment* a_env = a.env();
+  Environment* b_env = b.env();
+  Environment* out = NULL;
+  if (a_env != NULL && b_env != NULL) {
+    out = Environment::Allocate(*a_env);
+    *out &= *b_env;
+  }
+  return ExprEffect(a.effect_ | b.effect_, out);
+}
+
+
+ExprEffect ExprEffect::MergeSeq(ExprEffect a, ExprEffect b) {
+  Environment* a_env = b.hasGC() ? NULL : a.env();
+  Environment* b_env = b.env();
+  Environment* out = (b_env == NULL) ? a_env : b_env;
+  if (a_env != NULL && b_env != NULL) {
+    out = Environment::Allocate(*b_env);
+    *out |= *a_env;
+  }
+  return ExprEffect(a.effect_ | b.effect_, out);
+}
+
+
+ExprEffect ExprEffect::Define(const std::string& name) {
+  Environment* e = env();
+  if (e == NULL) {
+    e = Environment::Allocate(Environment());
+  }
+  e->MDefine(name);
+  return ExprEffect(effect_, e);
+}
+
+
+static std::string THIS ("this");
+
+
+class FunctionAnalyzer {
+ public:
+  FunctionAnalyzer(clang::MangleContext* ctx, clang::CXXRecordDecl* object_decl,
+                   clang::CXXRecordDecl* maybe_object_decl,
+                   clang::CXXRecordDecl* smi_decl,
+                   clang::CXXRecordDecl* no_gc_decl,
+                   clang::CXXRecordDecl* no_gc_or_safepoint_decl,
+                   clang::CXXRecordDecl* no_heap_access_decl,
+                   clang::DiagnosticsEngine& d, clang::SourceManager& sm)
+      : ctx_(ctx),
+        object_decl_(object_decl),
+        maybe_object_decl_(maybe_object_decl),
+        smi_decl_(smi_decl),
+        no_gc_decl_(no_gc_decl),
+        no_gc_or_safepoint_decl_(no_gc_or_safepoint_decl),
+        no_heap_access_decl_(no_heap_access_decl),
+        d_(d),
+        sm_(sm),
+        block_(NULL) {}
+
+  // --------------------------------------------------------------------------
+  // Expressions
+  // --------------------------------------------------------------------------
+
+  ExprEffect VisitExpr(clang::Expr* expr, const Environment& env) {
+#define VISIT(type)                                                         \
+  do {                                                                      \
+    clang::type* concrete_expr = llvm::dyn_cast_or_null<clang::type>(expr); \
+    if (concrete_expr != NULL) {                                            \
+      return Visit##type(concrete_expr, env);                               \
+    }                                                                       \
+  } while (0);
+
+    VISIT(AbstractConditionalOperator);
+    VISIT(AddrLabelExpr);
+    VISIT(ArraySubscriptExpr);
+    VISIT(BinaryOperator);
+    VISIT(BlockExpr);
+    VISIT(CallExpr);
+    VISIT(CastExpr);
+    VISIT(CharacterLiteral);
+    VISIT(ChooseExpr);
+    VISIT(CompoundLiteralExpr);
+    VISIT(ConstantExpr);
+    VISIT(CXXBindTemporaryExpr);
+    VISIT(CXXBoolLiteralExpr);
+    VISIT(CXXConstructExpr);
+    VISIT(CXXDefaultArgExpr);
+    VISIT(CXXDeleteExpr);
+    VISIT(CXXDependentScopeMemberExpr);
+    VISIT(CXXNewExpr);
+    VISIT(CXXNoexceptExpr);
+    VISIT(CXXNullPtrLiteralExpr);
+    VISIT(CXXPseudoDestructorExpr);
+    VISIT(CXXScalarValueInitExpr);
+    VISIT(CXXThisExpr);
+    VISIT(CXXThrowExpr);
+    VISIT(CXXTypeidExpr);
+    VISIT(CXXUnresolvedConstructExpr);
+    VISIT(CXXUuidofExpr);
+    VISIT(DeclRefExpr);
+    VISIT(DependentScopeDeclRefExpr);
+    VISIT(DesignatedInitExpr);
+    VISIT(ExprWithCleanups);
+    VISIT(ExtVectorElementExpr);
+    VISIT(FloatingLiteral);
+    VISIT(GNUNullExpr);
+    VISIT(ImaginaryLiteral);
+    VISIT(ImplicitCastExpr);
+    VISIT(ImplicitValueInitExpr);
+    VISIT(InitListExpr);
+    VISIT(IntegerLiteral);
+    VISIT(MaterializeTemporaryExpr);
+    VISIT(MemberExpr);
+    VISIT(OffsetOfExpr);
+    VISIT(OpaqueValueExpr);
+    VISIT(OverloadExpr);
+    VISIT(PackExpansionExpr);
+    VISIT(ParenExpr);
+    VISIT(ParenListExpr);
+    VISIT(PredefinedExpr);
+    VISIT(ShuffleVectorExpr);
+    VISIT(SizeOfPackExpr);
+    VISIT(StmtExpr);
+    VISIT(StringLiteral);
+    VISIT(SubstNonTypeTemplateParmPackExpr);
+    VISIT(TypeTraitExpr);
+    VISIT(UnaryOperator);
+    VISIT(UnaryExprOrTypeTraitExpr);
+    VISIT(VAArgExpr);
+#undef VISIT
+
+    return ExprEffect::None();
+  }
+
+#define DECL_VISIT_EXPR(type)                                           \
+  ExprEffect Visit##type (clang::type* expr, const Environment& env)
+
+#define IGNORE_EXPR(type)                                               \
+  ExprEffect Visit##type (clang::type* expr, const Environment& env) {  \
+    return ExprEffect::None();                                          \
+  }
+
+  IGNORE_EXPR(AddrLabelExpr);
+  IGNORE_EXPR(BlockExpr);
+  IGNORE_EXPR(CharacterLiteral);
+  IGNORE_EXPR(ChooseExpr);
+  IGNORE_EXPR(CompoundLiteralExpr);
+  IGNORE_EXPR(CXXBoolLiteralExpr);
+  IGNORE_EXPR(CXXDependentScopeMemberExpr);
+  IGNORE_EXPR(CXXNullPtrLiteralExpr);
+  IGNORE_EXPR(CXXPseudoDestructorExpr);
+  IGNORE_EXPR(CXXScalarValueInitExpr);
+  IGNORE_EXPR(CXXNoexceptExpr);
+  IGNORE_EXPR(CXXTypeidExpr);
+  IGNORE_EXPR(CXXUnresolvedConstructExpr);
+  IGNORE_EXPR(CXXUuidofExpr);
+  IGNORE_EXPR(DependentScopeDeclRefExpr);
+  IGNORE_EXPR(DesignatedInitExpr);
+  IGNORE_EXPR(ExtVectorElementExpr);
+  IGNORE_EXPR(FloatingLiteral);
+  IGNORE_EXPR(ImaginaryLiteral);
+  IGNORE_EXPR(IntegerLiteral);
+  IGNORE_EXPR(OffsetOfExpr);
+  IGNORE_EXPR(ImplicitValueInitExpr);
+  IGNORE_EXPR(PackExpansionExpr);
+  IGNORE_EXPR(PredefinedExpr);
+  IGNORE_EXPR(ShuffleVectorExpr);
+  IGNORE_EXPR(SizeOfPackExpr);
+  IGNORE_EXPR(StmtExpr);
+  IGNORE_EXPR(StringLiteral);
+  IGNORE_EXPR(SubstNonTypeTemplateParmPackExpr);
+  IGNORE_EXPR(TypeTraitExpr);
+  IGNORE_EXPR(VAArgExpr);
+  IGNORE_EXPR(GNUNullExpr);
+  IGNORE_EXPR(OverloadExpr);
+
+  DECL_VISIT_EXPR(CXXThisExpr) {
+    return Use(expr, expr->getType(), THIS, env);
+  }
+
+  DECL_VISIT_EXPR(AbstractConditionalOperator) {
+    Environment after_cond = env.ApplyEffect(VisitExpr(expr->getCond(), env));
+    return ExprEffect::Merge(VisitExpr(expr->getTrueExpr(), after_cond),
+                             VisitExpr(expr->getFalseExpr(), after_cond));
+  }
+
+  DECL_VISIT_EXPR(ArraySubscriptExpr) {
+    clang::Expr* exprs[2] = {expr->getBase(), expr->getIdx()};
+    return Parallel(expr, 2, exprs, env);
+  }
+
+  bool IsRawPointerVar(clang::Expr* expr, std::string* var_name) {
+    if (llvm::isa<clang::DeclRefExpr>(expr)) {
+      *var_name =
+          llvm::cast<clang::DeclRefExpr>(expr)->getDecl()->getNameAsString();
+      return true;
+    }
+
+    return false;
+  }
+
+  DECL_VISIT_EXPR(BinaryOperator) {
+    clang::Expr* lhs = expr->getLHS();
+    clang::Expr* rhs = expr->getRHS();
+    clang::Expr* exprs[2] = {lhs, rhs};
+
+    switch (expr->getOpcode()) {
+      case clang::BO_Comma:
+        return Sequential(expr, 2, exprs, env);
+
+      case clang::BO_LAnd:
+      case clang::BO_LOr:
+        return ExprEffect::Merge(VisitExpr(lhs, env), VisitExpr(rhs, env));
+
+      default:
+        return Parallel(expr, 2, exprs, env);
+    }
+  }
+
+  DECL_VISIT_EXPR(CXXBindTemporaryExpr) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(MaterializeTemporaryExpr) {
+    return VisitExpr(expr->GetTemporaryExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(CXXConstructExpr) {
+    return VisitArguments<>(expr, env);
+  }
+
+  DECL_VISIT_EXPR(CXXDefaultArgExpr) {
+    return VisitExpr(expr->getExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(CXXDeleteExpr) {
+    return VisitExpr(expr->getArgument(), env);
+  }
+
+  DECL_VISIT_EXPR(CXXNewExpr) { return VisitExpr(expr->getInitializer(), env); }
+
+  DECL_VISIT_EXPR(ExprWithCleanups) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(CXXThrowExpr) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(ImplicitCastExpr) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(ConstantExpr) { return VisitExpr(expr->getSubExpr(), env); }
+
+  DECL_VISIT_EXPR(InitListExpr) {
+    return Sequential(expr, expr->getNumInits(), expr->getInits(), env);
+  }
+
+  DECL_VISIT_EXPR(MemberExpr) {
+    return VisitExpr(expr->getBase(), env);
+  }
+
+  DECL_VISIT_EXPR(OpaqueValueExpr) {
+    return VisitExpr(expr->getSourceExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(ParenExpr) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(ParenListExpr) {
+    return Parallel(expr, expr->getNumExprs(), expr->getExprs(), env);
+  }
+
+  DECL_VISIT_EXPR(UnaryOperator) {
+    // TODO(gcmole): We are treating all expressions that look like
+    // {&raw_pointer_var} as definitions of {raw_pointer_var}. This should be
+    // changed to recognize less generic pattern:
+    //
+    //   if (maybe_object->ToObject(&obj)) return maybe_object;
+    //
+    if (expr->getOpcode() == clang::UO_AddrOf) {
+      std::string var_name;
+      if (IsRawPointerVar(expr->getSubExpr(), &var_name)) {
+        return ExprEffect::None().Define(var_name);
+      }
+    }
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(UnaryExprOrTypeTraitExpr) {
+    if (expr->isArgumentType()) {
+      return ExprEffect::None();
+    }
+
+    return VisitExpr(expr->getArgumentExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(CastExpr) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(DeclRefExpr) {
+    return Use(expr, expr->getDecl(), env);
+  }
+
+  // Represents a node in the AST {parent} whose children {exprs} have
+  // undefined order of evaluation, e.g. array subscript or a binary operator.
+  ExprEffect Parallel(clang::Expr* parent, int n, clang::Expr** exprs,
+                      const Environment& env) {
+    CallProps props;
+
+    for (int i = 0; i < n; ++i) {
+      props.SetEffect(i, VisitExpr(exprs[i], env));
+    }
+
+    if (!props.IsSafe()) ReportUnsafe(parent, BAD_EXPR_MSG);
+
+    return props.ComputeCumulativeEffect(
+        RepresentsRawPointerType(parent->getType()));
+  }
+
+  // Represents a node in the AST {parent} whose children {exprs} are
+  // executed in sequence, e.g. a switch statement or an initializer list.
+  ExprEffect Sequential(clang::Stmt* parent, int n, clang::Expr** exprs,
+                        const Environment& env) {
+    ExprEffect out = ExprEffect::None();
+    Environment out_env = env;
+    for (int i = 0; i < n; ++i) {
+      out = ExprEffect::MergeSeq(out, VisitExpr(exprs[i], out_env));
+      out_env = out_env.ApplyEffect(out);
+    }
+    return out;
+  }
+
+  // Represents a node in the AST {parent} which uses the variable {var_name},
+  // e.g. this expression or operator&.
+  // Here we observe the type in {var_type} of a previously declared variable
+  // and if it's a raw heap object type, we do the following:
+  // 1. If it got stale due to GC since its declaration, we report it as such.
+  // 2. Mark its raw usage in the ExprEffect returned by this function.
+  ExprEffect Use(const clang::Expr* parent,
+                 const clang::QualType& var_type,
+                 const std::string& var_name,
+                 const Environment& env) {
+    if (RepresentsRawPointerType(var_type)) {
+      // We currently care only about our internal pointer types and not about
+      // raw C++ pointers, because normally special care is taken when storing
+      // raw pointers to the managed heap. Furthermore, checking for raw
+      // pointers produces too many false positives in the dead variable
+      // analysis.
+      if (IsInternalPointerType(var_type) && !env.IsAlive(var_name) &&
+          !HasActiveGuard() && g_dead_vars_analysis) {
+        ReportUnsafe(parent, DEAD_VAR_MSG);
+      }
+      return ExprEffect::RawUse();
+    }
+    return ExprEffect::None();
+  }
+
+  ExprEffect Use(const clang::Expr* parent,
+                 const clang::ValueDecl* var,
+                 const Environment& env) {
+    if (IsExternalVMState(var)) {
+      return ExprEffect::GC();
+    }
+    return Use(parent, var->getType(), var->getNameAsString(), env);
+  }
+
+
+  template<typename ExprType>
+  ExprEffect VisitArguments(ExprType* call, const Environment& env) {
+    CallProps props;
+    VisitArguments<>(call, &props, env);
+    if (!props.IsSafe()) ReportUnsafe(call, BAD_EXPR_MSG);
+    return props.ComputeCumulativeEffect(
+        RepresentsRawPointerType(call->getType()));
+  }
+
+  template<typename ExprType>
+  void VisitArguments(ExprType* call,
+                      CallProps* props,
+                      const Environment& env) {
+    for (unsigned arg = 0; arg < call->getNumArgs(); arg++) {
+      props->SetEffect(arg + 1, VisitExpr(call->getArg(arg), env));
+    }
+  }
+
+  // After visiting the receiver and the arguments of the {call} node, this
+  // function might report a GC-unsafe usage (due to the undefined evaluation
+  // order of the receiver and the rest of the arguments).
+  ExprEffect VisitCallExpr(clang::CallExpr* call,
+                           const Environment& env) {
+    CallProps props;
+
+    clang::CXXMemberCallExpr* memcall =
+        llvm::dyn_cast_or_null<clang::CXXMemberCallExpr>(call);
+    if (memcall != NULL) {
+      clang::Expr* receiver = memcall->getImplicitObjectArgument();
+      props.SetEffect(0, VisitExpr(receiver, env));
+    }
+
+    std::string var_name;
+    clang::CXXOperatorCallExpr* opcall =
+        llvm::dyn_cast_or_null<clang::CXXOperatorCallExpr>(call);
+    if (opcall != NULL && opcall->isAssignmentOp() &&
+        IsRawPointerVar(opcall->getArg(0), &var_name)) {
+      // TODO(gcmole): We are treating all assignment operator calls with
+      // the left hand side looking like {raw_pointer_var} as safe independent
+      // of the concrete assignment operator implementation. This should be
+      // changed to be more narrow only if the assignment operator of the base
+      // {Object} or {HeapObject} class was used, which we know to be safe.
+      props.SetEffect(1, VisitExpr(call->getArg(1), env).Define(var_name));
+    } else {
+      VisitArguments<>(call, &props, env);
+    }
+
+    if (!props.IsSafe()) ReportUnsafe(call, BAD_EXPR_MSG);
+
+    ExprEffect out = props.ComputeCumulativeEffect(
+        RepresentsRawPointerType(call->getType()));
+
+    clang::FunctionDecl* callee = call->getDirectCallee();
+    if (callee != NULL) {
+      if (KnownToCauseGC(ctx_, callee)) {
+        out.setGC();
+        scopes_.back().SetGCCauseLocation(
+            clang::FullSourceLoc(call->getExprLoc(), sm_));
+      }
+
+      // Support for virtual methods that might be GC suspects.
+      clang::CXXMethodDecl* method =
+          llvm::dyn_cast_or_null<clang::CXXMethodDecl>(callee);
+      if (method != NULL && method->isVirtual()) {
+        clang::CXXMemberCallExpr* memcall =
+            llvm::dyn_cast_or_null<clang::CXXMemberCallExpr>(call);
+        if (memcall != NULL) {
+          clang::CXXMethodDecl* target = method->getDevirtualizedMethod(
+              memcall->getImplicitObjectArgument(), false);
+          if (target != NULL) {
+            if (KnownToCauseGC(ctx_, target)) {
+              out.setGC();
+              scopes_.back().SetGCCauseLocation(
+                  clang::FullSourceLoc(call->getExprLoc(), sm_));
+            }
+          } else {
+            // According to the documentation, {getDevirtualizedMethod} might
+            // return NULL, in which case we still want to use the partial
+            // match of the {method}'s name against the GC suspects in order
+            // to increase coverage.
+            if (SuspectedToCauseGC(ctx_, method)) {
+              out.setGC();
+              scopes_.back().SetGCCauseLocation(
+                  clang::FullSourceLoc(call->getExprLoc(), sm_));
+            }
+          }
+        }
+      }
+    }
+
+    return out;
+  }
+
+  // --------------------------------------------------------------------------
+  // Statements
+  // --------------------------------------------------------------------------
+
+  Environment VisitStmt(clang::Stmt* stmt, const Environment& env) {
+#define VISIT(type)                                                         \
+  do {                                                                      \
+    clang::type* concrete_stmt = llvm::dyn_cast_or_null<clang::type>(stmt); \
+    if (concrete_stmt != NULL) {                                            \
+      return Visit##type(concrete_stmt, env);                               \
+    }                                                                       \
+  } while (0);
+
+    if (clang::Expr* expr = llvm::dyn_cast_or_null<clang::Expr>(stmt)) {
+      return env.ApplyEffect(VisitExpr(expr, env));
+    }
+
+    VISIT(AsmStmt);
+    VISIT(BreakStmt);
+    VISIT(CompoundStmt);
+    VISIT(ContinueStmt);
+    VISIT(CXXCatchStmt);
+    VISIT(CXXTryStmt);
+    VISIT(DeclStmt);
+    VISIT(DoStmt);
+    VISIT(ForStmt);
+    VISIT(GotoStmt);
+    VISIT(IfStmt);
+    VISIT(IndirectGotoStmt);
+    VISIT(LabelStmt);
+    VISIT(NullStmt);
+    VISIT(ReturnStmt);
+    VISIT(CaseStmt);
+    VISIT(DefaultStmt);
+    VISIT(SwitchStmt);
+    VISIT(WhileStmt);
+#undef VISIT
+
+    return env;
+  }
+
+#define DECL_VISIT_STMT(type)                                           \
+  Environment Visit##type (clang::type* stmt, const Environment& env)
+
+#define IGNORE_STMT(type)                                               \
+  Environment Visit##type (clang::type* stmt, const Environment& env) { \
+    return env;                                                         \
+  }
+
+  IGNORE_STMT(IndirectGotoStmt);
+  IGNORE_STMT(NullStmt);
+  IGNORE_STMT(AsmStmt);
+
+  // We are ignoring control flow for simplicity.
+  IGNORE_STMT(GotoStmt);
+  IGNORE_STMT(LabelStmt);
+
+  // We are ignoring try/catch because V8 does not use them.
+  IGNORE_STMT(CXXCatchStmt);
+  IGNORE_STMT(CXXTryStmt);
+
+  class Block {
+   public:
+    Block(const Environment& in,
+          FunctionAnalyzer* owner)
+        : in_(in),
+          out_(Environment::Unreachable()),
+          changed_(false),
+          owner_(owner) {
+      parent_ = owner_->EnterBlock(this);
+    }
+
+    ~Block() {
+      owner_->LeaveBlock(parent_);
+    }
+
+    void MergeIn(const Environment& env) {
+      Environment old_in = in_;
+      in_ = Environment::Merge(in_, env);
+      changed_ = !old_in.Equal(in_);
+    }
+
+    bool changed() {
+      if (changed_) {
+        changed_ = false;
+        return true;
+      }
+      return false;
+    }
+
+    const Environment& in() {
+      return in_;
+    }
+
+    const Environment& out() {
+      return out_;
+    }
+
+    void MergeOut(const Environment& env) {
+      out_ = Environment::Merge(out_, env);
+    }
+
+    void Sequential(clang::Stmt* a, clang::Stmt* b, clang::Stmt* c) {
+      Environment a_out = owner_->VisitStmt(a, in());
+      Environment b_out = owner_->VisitStmt(b, a_out);
+      Environment c_out = owner_->VisitStmt(c, b_out);
+      MergeOut(c_out);
+    }
+
+    void Sequential(clang::Stmt* a, clang::Stmt* b) {
+      Environment a_out = owner_->VisitStmt(a, in());
+      Environment b_out = owner_->VisitStmt(b, a_out);
+      MergeOut(b_out);
+    }
+
+    void Loop(clang::Stmt* a, clang::Stmt* b, clang::Stmt* c) {
+      Sequential(a, b, c);
+      MergeIn(out());
+    }
+
+    void Loop(clang::Stmt* a, clang::Stmt* b) {
+      Sequential(a, b);
+      MergeIn(out());
+    }
+
+
+   private:
+    Environment in_;
+    Environment out_;
+    bool changed_;
+    FunctionAnalyzer* owner_;
+    Block* parent_;
+  };
+
+
+  DECL_VISIT_STMT(BreakStmt) {
+    block_->MergeOut(env);
+    return Environment::Unreachable();
+  }
+
+  DECL_VISIT_STMT(ContinueStmt) {
+    block_->MergeIn(env);
+    return Environment::Unreachable();
+  }
+
+  DECL_VISIT_STMT(CompoundStmt) {
+    scopes_.push_back(GCScope());
+    Environment out = env;
+    clang::CompoundStmt::body_iterator end = stmt->body_end();
+    for (clang::CompoundStmt::body_iterator s = stmt->body_begin();
+         s != end;
+         ++s) {
+      out = VisitStmt(*s, out);
+    }
+    scopes_.pop_back();
+    return out;
+  }
+
+  DECL_VISIT_STMT(WhileStmt) {
+    Block block (env, this);
+    do {
+      block.Loop(stmt->getCond(), stmt->getBody());
+    } while (block.changed());
+    return block.out();
+  }
+
+  DECL_VISIT_STMT(DoStmt) {
+    Block block (env, this);
+    do {
+      block.Loop(stmt->getBody(), stmt->getCond());
+    } while (block.changed());
+    return block.out();
+  }
+
+  DECL_VISIT_STMT(ForStmt) {
+    Block block (VisitStmt(stmt->getInit(), env), this);
+    do {
+      block.Loop(stmt->getCond(),
+                 stmt->getBody(),
+                 stmt->getInc());
+    } while (block.changed());
+    return block.out();
+  }
+
+  DECL_VISIT_STMT(IfStmt) {
+    Environment cond_out = VisitStmt(stmt->getCond(), env);
+    Environment then_out = VisitStmt(stmt->getThen(), cond_out);
+    Environment else_out = VisitStmt(stmt->getElse(), cond_out);
+    return Environment::Merge(then_out, else_out);
+  }
+
+  DECL_VISIT_STMT(SwitchStmt) {
+    Block block (env, this);
+    block.Sequential(stmt->getCond(), stmt->getBody());
+    return block.out();
+  }
+
+  DECL_VISIT_STMT(CaseStmt) {
+    Environment in = Environment::Merge(env, block_->in());
+    Environment after_lhs = VisitStmt(stmt->getLHS(), in);
+    return VisitStmt(stmt->getSubStmt(), after_lhs);
+  }
+
+  DECL_VISIT_STMT(DefaultStmt) {
+    Environment in = Environment::Merge(env, block_->in());
+    return VisitStmt(stmt->getSubStmt(), in);
+  }
+
+  DECL_VISIT_STMT(ReturnStmt) {
+    VisitExpr(stmt->getRetValue(), env);
+    return Environment::Unreachable();
+  }
+
+  const clang::TagType* ToTagType(const clang::Type* t) {
+    if (t == NULL) {
+      return NULL;
+    } else if (llvm::isa<clang::TagType>(t)) {
+      return llvm::cast<clang::TagType>(t);
+    } else if (llvm::isa<clang::SubstTemplateTypeParmType>(t)) {
+      return ToTagType(llvm::cast<clang::SubstTemplateTypeParmType>(t)
+                           ->getReplacementType()
+                           .getTypePtr());
+    } else {
+      return NULL;
+    }
+  }
+
+  bool IsDerivedFrom(const clang::CXXRecordDecl* record,
+                     const clang::CXXRecordDecl* base) {
+    return (record == base) || record->isDerivedFrom(base);
+  }
+
+  const clang::CXXRecordDecl* GetDefinitionOrNull(
+      const clang::CXXRecordDecl* record) {
+    if (record == NULL) {
+      return NULL;
+    }
+
+    if (!InV8Namespace(record)) return NULL;
+
+    if (!record->hasDefinition()) {
+      return NULL;
+    }
+
+    return record->getDefinition();
+  }
+
+  bool IsDerivedFromInternalPointer(const clang::CXXRecordDecl* record) {
+    const clang::CXXRecordDecl* definition = GetDefinitionOrNull(record);
+    if (!definition) {
+      return false;
+    }
+
+    bool result = (IsDerivedFrom(record, object_decl_) &&
+                   !IsDerivedFrom(record, smi_decl_)) ||
+                  IsDerivedFrom(record, maybe_object_decl_);
+    return result;
+  }
+
+  bool IsRawPointerType(const clang::PointerType* type) {
+    const clang::CXXRecordDecl* record = type->getPointeeCXXRecordDecl();
+    bool result = IsDerivedFromInternalPointer(record);
+    TRACE("is raw " << result << " " << record->getNameAsString());
+    return result;
+  }
+
+  bool IsInternalPointerType(clang::QualType qtype) {
+    const clang::CXXRecordDecl* record = qtype->getAsCXXRecordDecl();
+    bool result = IsDerivedFromInternalPointer(record);
+    TRACE_LLVM_TYPE("is internal " << result, qtype);
+    return result;
+  }
+
+  // Returns weather the given type is a raw pointer or a wrapper around
+  // such. For V8 that means Object and MaybeObject instances.
+  bool RepresentsRawPointerType(clang::QualType qtype) {
+    // Not yet assigned pointers can't get moved by the GC.
+    if (qtype.isNull()) {
+      return false;
+    }
+    // nullptr can't get moved by the GC.
+    if (qtype->isNullPtrType()) {
+      return false;
+    }
+
+    const clang::PointerType* pointer_type =
+        llvm::dyn_cast_or_null<clang::PointerType>(qtype.getTypePtrOrNull());
+    if (pointer_type != NULL) {
+      return IsRawPointerType(pointer_type);
+    } else {
+      return IsInternalPointerType(qtype);
+    }
+  }
+
+  bool IsGCGuard(clang::QualType qtype) {
+    if (qtype.isNull()) {
+      return false;
+    }
+    if (qtype->isNullPtrType()) {
+      return false;
+    }
+
+    const clang::CXXRecordDecl* record = qtype->getAsCXXRecordDecl();
+    const clang::CXXRecordDecl* definition = GetDefinitionOrNull(record);
+
+    if (!definition) {
+      return false;
+    }
+
+    return (no_gc_decl_ && IsDerivedFrom(definition, no_gc_decl_)) ||
+           (no_gc_or_safepoint_decl_ &&
+            IsDerivedFrom(definition, no_gc_or_safepoint_decl_)) ||
+           (no_heap_access_decl_ &&
+            IsDerivedFrom(definition, no_heap_access_decl_));
+  }
+
+  Environment VisitDecl(clang::Decl* decl, Environment& env) {
+    if (clang::VarDecl* var = llvm::dyn_cast<clang::VarDecl>(decl)) {
+      Environment out = var->hasInit() ? VisitStmt(var->getInit(), env) : env;
+
+      if (RepresentsRawPointerType(var->getType())) {
+        out = out.Define(var->getNameAsString());
+      }
+      if (IsGCGuard(var->getType())) {
+        scopes_.back().guard_location =
+            clang::FullSourceLoc(decl->getLocation(), sm_);
+      }
+
+      return out;
+    }
+    // TODO(gcmole): handle other declarations?
+    return env;
+  }
+
+  DECL_VISIT_STMT(DeclStmt) {
+    Environment out = env;
+    clang::DeclStmt::decl_iterator end = stmt->decl_end();
+    for (clang::DeclStmt::decl_iterator decl = stmt->decl_begin();
+         decl != end;
+         ++decl) {
+      out = VisitDecl(*decl, out);
+    }
+    return out;
+  }
+
+
+  void DefineParameters(const clang::FunctionDecl* f,
+                        Environment* env) {
+    env->MDefine(THIS);
+    clang::FunctionDecl::param_const_iterator end = f->param_end();
+    for (clang::FunctionDecl::param_const_iterator p = f->param_begin();
+         p != end;
+         ++p) {
+      env->MDefine((*p)->getNameAsString());
+    }
+  }
+
+
+  void AnalyzeFunction(const clang::FunctionDecl* f) {
+    const clang::FunctionDecl* body = NULL;
+    if (f->hasBody(body)) {
+      Environment env;
+      DefineParameters(body, &env);
+      VisitStmt(body->getBody(), env);
+      Environment::ClearSymbolTable();
+    }
+  }
+
+  Block* EnterBlock(Block* block) {
+    Block* parent = block_;
+    block_ = block;
+    return parent;
+  }
+
+  void LeaveBlock(Block* block) {
+    block_ = block;
+  }
+
+  bool HasActiveGuard() {
+    for (auto s : scopes_) {
+      if (s.IsBeforeGCCause()) return true;
+    }
+    return false;
+  }
+
+ private:
+  void ReportUnsafe(const clang::Expr* expr, const std::string& msg) {
+    d_.Report(clang::FullSourceLoc(expr->getExprLoc(), sm_),
+              d_.getCustomDiagID(clang::DiagnosticsEngine::Warning, "%0"))
+        << msg;
+  }
+
+
+  clang::MangleContext* ctx_;
+  clang::CXXRecordDecl* object_decl_;
+  clang::CXXRecordDecl* maybe_object_decl_;
+  clang::CXXRecordDecl* smi_decl_;
+  clang::CXXRecordDecl* no_gc_decl_;
+  clang::CXXRecordDecl* no_gc_or_safepoint_decl_;
+  clang::CXXRecordDecl* no_heap_access_decl_;
+
+  clang::DiagnosticsEngine& d_;
+  clang::SourceManager& sm_;
+
+  Block* block_;
+
+  struct GCScope {
+    clang::FullSourceLoc guard_location;
+    clang::FullSourceLoc gccause_location;
+
+    // We're only interested in guards that are declared before any further GC
+    // causing calls (see TestGuardedDeadVarAnalysisMidFunction for example).
+    bool IsBeforeGCCause() {
+      if (!guard_location.isValid()) return false;
+      if (!gccause_location.isValid()) return true;
+      return guard_location.isBeforeInTranslationUnitThan(gccause_location);
+    }
+
+    // After we set the first GC cause in the scope, we don't need the later
+    // ones.
+    void SetGCCauseLocation(clang::FullSourceLoc gccause_location_) {
+      if (gccause_location.isValid()) return;
+      gccause_location = gccause_location_;
+    }
+  };
+  std::vector<GCScope> scopes_;
+};
+
+class ProblemsFinder : public clang::ASTConsumer,
+                       public clang::RecursiveASTVisitor<ProblemsFinder> {
+ public:
+  ProblemsFinder(clang::DiagnosticsEngine& d, clang::SourceManager& sm,
+                 const std::vector<std::string>& args)
+      : d_(d), sm_(sm) {
+    for (unsigned i = 0; i < args.size(); ++i) {
+      if (args[i] == "--dead-vars") {
+        g_dead_vars_analysis = true;
+      }
+      if (args[i] == "--verbose") {
+        g_tracing_enabled = true;
+      }
+    }
+  }
+
+  bool TranslationUnitIgnored() {
+    if (!ignored_files_loaded_) {
+      std::ifstream fin("tools/gcmole/ignored_files");
+      std::string s;
+      while (fin >> s) ignored_files_.insert(s);
+      ignored_files_loaded_ = true;
+    }
+
+    clang::FileID main_file_id = sm_.getMainFileID();
+    std::string filename = sm_.getFileEntryForID(main_file_id)->getName().str();
+
+    bool result = ignored_files_.find(filename) != ignored_files_.end();
+    if (result) {
+      llvm::outs() << "Ignoring file " << filename << "\n";
+    }
+    return result;
+  }
+
+  virtual void HandleTranslationUnit(clang::ASTContext &ctx) {
+    if (TranslationUnitIgnored()) {
+      return;
+    }
+
+    Resolver r(ctx);
+
+    // It is a valid situation that no_gc_decl == NULL when the
+    // DisallowHeapAllocation is not included and can't be resolved.
+    // This is gracefully handled in the FunctionAnalyzer later.
+    clang::CXXRecordDecl* no_gc_decl =
+        r.ResolveNamespace("v8")
+            .ResolveNamespace("internal")
+            .ResolveTemplate("DisallowHeapAllocation");
+
+    clang::CXXRecordDecl* no_gc_or_safepoint_decl =
+        r.ResolveNamespace("v8")
+            .ResolveNamespace("internal")
+            .ResolveTemplate("DisallowGarbageCollection");
+
+    clang::CXXRecordDecl* no_heap_access_decl =
+        r.ResolveNamespace("v8")
+            .ResolveNamespace("internal")
+            .Resolve<clang::CXXRecordDecl>("DisallowHeapAccess");
+
+    clang::CXXRecordDecl* object_decl =
+        r.ResolveNamespace("v8").ResolveNamespace("internal").
+            Resolve<clang::CXXRecordDecl>("Object");
+
+    clang::CXXRecordDecl* maybe_object_decl =
+        r.ResolveNamespace("v8")
+            .ResolveNamespace("internal")
+            .Resolve<clang::CXXRecordDecl>("MaybeObject");
+
+    clang::CXXRecordDecl* smi_decl =
+        r.ResolveNamespace("v8").ResolveNamespace("internal").
+            Resolve<clang::CXXRecordDecl>("Smi");
+
+    if (object_decl != NULL) object_decl = object_decl->getDefinition();
+
+    if (maybe_object_decl != NULL)
+      maybe_object_decl = maybe_object_decl->getDefinition();
+
+    if (smi_decl != NULL) smi_decl = smi_decl->getDefinition();
+
+    if (no_heap_access_decl != NULL)
+      no_heap_access_decl = no_heap_access_decl->getDefinition();
+
+    if (object_decl != NULL && smi_decl != NULL && maybe_object_decl != NULL) {
+      function_analyzer_ = new FunctionAnalyzer(
+          clang::ItaniumMangleContext::create(ctx, d_), object_decl,
+          maybe_object_decl, smi_decl, no_gc_decl, no_gc_or_safepoint_decl,
+          no_heap_access_decl, d_, sm_);
+      TraverseDecl(ctx.getTranslationUnitDecl());
+    } else {
+      if (object_decl == NULL) {
+        llvm::errs() << "Failed to resolve v8::internal::Object\n";
+      }
+      if (maybe_object_decl == NULL) {
+        llvm::errs() << "Failed to resolve v8::internal::MaybeObject\n";
+      }
+      if (smi_decl == NULL) {
+        llvm::errs() << "Failed to resolve v8::internal::Smi\n";
+      }
+    }
+  }
+
+  virtual bool VisitFunctionDecl(clang::FunctionDecl* decl) {
+    // Don't print tracing from includes, otherwise the output is too big.
+    bool tracing = g_tracing_enabled;
+    const auto& fileID = sm_.getFileID(decl->getLocation());
+    if (fileID != sm_.getMainFileID()) {
+      g_tracing_enabled = false;
+    }
+
+    TRACE("Visiting function " << decl->getNameAsString());
+    function_analyzer_->AnalyzeFunction(decl);
+
+    g_tracing_enabled = tracing;
+    return true;
+  }
+
+ private:
+  clang::DiagnosticsEngine& d_;
+  clang::SourceManager& sm_;
+
+  bool ignored_files_loaded_ = false;
+  std::set<std::string> ignored_files_;
+
+  FunctionAnalyzer* function_analyzer_;
+};
+
+
+template<typename ConsumerType>
+class Action : public clang::PluginASTAction {
+ protected:
+  virtual std::unique_ptr<clang::ASTConsumer> CreateASTConsumer(
+      clang::CompilerInstance& CI, llvm::StringRef InFile) {
+    return std::unique_ptr<clang::ASTConsumer>(
+        new ConsumerType(CI.getDiagnostics(), CI.getSourceManager(), args_));
+  }
+
+  bool ParseArgs(const clang::CompilerInstance &CI,
+                 const std::vector<std::string>& args) {
+    args_ = args;
+    return true;
+  }
+
+  void PrintHelp(llvm::raw_ostream& ros) {
+  }
+ private:
+  std::vector<std::string> args_;
+};
+
+
+}
+
+static clang::FrontendPluginRegistry::Add<Action<ProblemsFinder> >
+FindProblems("find-problems", "Find GC-unsafe places.");
+
+static clang::FrontendPluginRegistry::Add<
+  Action<FunctionDeclarationFinder> >
+DumpCallees("dump-callees", "Dump callees for each function.");
+
+#undef TRACE
+#undef TRACE_LLVM_TYPE
+#undef TRACE_LLVM_DECL
+#undef DECL_VISIT_EXPR
+#undef IGNORE_EXPR
+#undef DECL_VISIT_STMT
+#undef IGNORE_STMT
diff --git a/src/third_party/v8/tools/gcmole/gcmole.lua b/src/third_party/v8/tools/gcmole/gcmole.lua
new file mode 100644
index 0000000..9705165
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/gcmole.lua
@@ -0,0 +1,532 @@
+-- Copyright 2011 the V8 project authors. All rights reserved.
+-- Redistribution and use in source and binary forms, with or without
+-- modification, are permitted provided that the following conditions are
+-- met:
+--
+--     * Redistributions of source code must retain the above copyright
+--       notice, this list of conditions and the following disclaimer.
+--     * Redistributions in binary form must reproduce the above
+--       copyright notice, this list of conditions and the following
+--       disclaimer in the documentation and/or other materials provided
+--       with the distribution.
+--     * Neither the name of Google Inc. nor the names of its
+--       contributors may be used to endorse or promote products derived
+--       from this software without specific prior written permission.
+--
+-- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+-- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+-- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+-- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+-- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+-- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+-- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+-- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+-- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+-- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+-- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+-- This is main driver for gcmole tool. See README for more details.
+-- Usage: CLANG_BIN=clang-bin-dir lua tools/gcmole/gcmole.lua [arm|ia32|x64]
+
+local DIR = arg[0]:match("^(.+)/[^/]+$")
+
+local FLAGS = {
+   -- Do not build gcsuspects file and reuse previously generated one.
+   reuse_gcsuspects = false;
+
+   -- Don't use parallel python runner.
+   sequential = false;
+
+   -- Print commands to console before executing them.
+   verbose = false;
+
+   -- Perform dead variable analysis.
+   dead_vars = true;
+
+   -- Enable verbose tracing from the plugin itself.
+   verbose_trace = false;
+
+   -- When building gcsuspects whitelist certain functions as if they
+   -- can be causing GC. Currently used to reduce number of false
+   -- positives in dead variables analysis. See TODO for WHITELIST
+   -- below.
+   whitelist = true;
+}
+local ARGS = {}
+
+for i = 1, #arg do
+   local flag = arg[i]:match "^%-%-([%w_-]+)$"
+   if flag then
+      local no, real_flag = flag:match "^(no)([%w_-]+)$"
+      if real_flag then flag = real_flag end
+
+      flag = flag:gsub("%-", "_")
+      if FLAGS[flag] ~= nil then
+         FLAGS[flag] = (no ~= "no")
+      else
+         error("Unknown flag: " .. flag)
+      end
+   else
+      table.insert(ARGS, arg[i])
+   end
+end
+
+local ARCHS = ARGS[1] and { ARGS[1] } or { 'ia32', 'arm', 'x64', 'arm64' }
+
+local io = require "io"
+local os = require "os"
+
+function log(...)
+   io.stderr:write(string.format(...))
+   io.stderr:write "\n"
+end
+
+-------------------------------------------------------------------------------
+-- Clang invocation
+
+local CLANG_BIN = os.getenv "CLANG_BIN"
+local CLANG_PLUGINS = os.getenv "CLANG_PLUGINS"
+
+if not CLANG_BIN or CLANG_BIN == "" then
+   error "CLANG_BIN not set"
+end
+
+if not CLANG_PLUGINS or CLANG_PLUGINS == "" then
+   CLANG_PLUGINS = DIR
+end
+
+local function MakeClangCommandLine(
+      plugin, plugin_args, triple, arch_define, arch_options)
+   if plugin_args then
+     for i = 1, #plugin_args do
+        plugin_args[i] = "-Xclang -plugin-arg-" .. plugin
+           .. " -Xclang " .. plugin_args[i]
+     end
+     plugin_args = " " .. table.concat(plugin_args, " ")
+   end
+   return CLANG_BIN .. "/clang++ -std=c++14 -c"
+      .. " -Xclang -load -Xclang " .. CLANG_PLUGINS .. "/libgcmole.so"
+      .. " -Xclang -plugin -Xclang "  .. plugin
+      .. (plugin_args or "")
+      .. " -Xclang -triple -Xclang " .. triple
+      .. " -fno-exceptions"
+      .. " -D" .. arch_define
+      .. " -DENABLE_DEBUGGER_SUPPORT"
+      .. " -DV8_INTL_SUPPORT"
+      .. " -I./"
+      .. " -Iinclude/"
+      .. " -Iout/build/gen"
+      .. " -Ithird_party/icu/source/common"
+      .. " -Ithird_party/icu/source/i18n"
+      .. " " .. arch_options
+end
+
+local function IterTable(t)
+  return coroutine.wrap(function ()
+    for i, v in ipairs(t) do
+      coroutine.yield(v)
+    end
+  end)
+end
+
+local function SplitResults(lines, func)
+   -- Splits the output of parallel.py and calls func on each result.
+   -- Bails out in case of an error in one of the executions.
+   local current = {}
+   local filename = ""
+   for line in lines do
+      local new_file = line:match "^______________ (.*)$"
+      local code = line:match "^______________ finish (%d+) ______________$"
+      if code then
+         if tonumber(code) > 0 then
+            log(table.concat(current, "\n"))
+            log("Failed to examine " .. filename)
+            return false
+         end
+         log("-- %s", filename)
+         func(filename, IterTable(current))
+      elseif new_file then
+         filename = new_file
+         current = {}
+      else
+         table.insert(current, line)
+      end
+   end
+   return true
+end
+
+function InvokeClangPluginForEachFile(filenames, cfg, func)
+   local cmd_line = MakeClangCommandLine(cfg.plugin,
+                                         cfg.plugin_args,
+                                         cfg.triple,
+                                         cfg.arch_define,
+                                         cfg.arch_options)
+   if FLAGS.sequential then
+      log("** Sequential execution.")
+      for _, filename in ipairs(filenames) do
+         log("-- %s", filename)
+         local action = cmd_line .. " " .. filename .. " 2>&1"
+         if FLAGS.verbose then print('popen ', action) end
+         local pipe = io.popen(action)
+         func(filename, pipe:lines())
+         local success = pipe:close()
+         if not success then error("Failed to run: " .. action) end
+      end
+   else
+      log("** Parallel execution.")
+      local action = "python tools/gcmole/parallel.py \""
+         .. cmd_line .. "\" " .. table.concat(filenames, " ")
+      if FLAGS.verbose then print('popen ', action) end
+      local pipe = io.popen(action)
+      local success = SplitResults(pipe:lines(), func)
+      local closed = pipe:close()
+      if not (success and closed) then error("Failed to run: " .. action) end
+   end
+end
+
+-------------------------------------------------------------------------------
+
+local function ParseGNFile(for_test)
+   local result = {}
+   local gn_files
+   if for_test then
+      gn_files = {
+         { "tools/gcmole/GCMOLE.gn",             '"([^"]-%.cc)"',      ""         }
+      }
+   else
+      gn_files = {
+         { "BUILD.gn",             '"([^"]-%.cc)"',      ""         },
+         { "test/cctest/BUILD.gn", '"(test-[^"]-%.cc)"', "test/cctest/" }
+      }
+   end
+
+   for i = 1, #gn_files do
+      local filename = gn_files[i][1]
+      local pattern = gn_files[i][2]
+      local prefix = gn_files[i][3]
+      local gn_file = assert(io.open(filename), "failed to open GN file")
+      local gn = gn_file:read('*a')
+      for condition, sources in
+         gn:gmatch "### gcmole%((.-)%) ###(.-)%]" do
+         if result[condition] == nil then result[condition] = {} end
+         for file in sources:gmatch(pattern) do
+            table.insert(result[condition], prefix .. file)
+         end
+      end
+      gn_file:close()
+   end
+
+   return result
+end
+
+local function EvaluateCondition(cond, props)
+   if cond == 'all' then return true end
+
+   local p, v = cond:match "(%w+):(%w+)"
+
+   assert(p and v, "failed to parse condition: " .. cond)
+   assert(props[p] ~= nil, "undefined configuration property: " .. p)
+
+   return props[p] == v
+end
+
+local function BuildFileList(sources, props)
+   local list = {}
+   for condition, files in pairs(sources) do
+      if EvaluateCondition(condition, props) then
+         for i = 1, #files do table.insert(list, files[i]) end
+      end
+   end
+   return list
+end
+
+
+local gn_sources = ParseGNFile(false)
+local gn_test_sources = ParseGNFile(true)
+
+local function FilesForArch(arch)
+   return BuildFileList(gn_sources, { os = 'linux',
+                                      arch = arch,
+                                      mode = 'debug',
+                                      simulator = ''})
+end
+
+local function FilesForTest(arch)
+   return BuildFileList(gn_test_sources, { os = 'linux',
+                                      arch = arch,
+                                      mode = 'debug',
+                                      simulator = ''})
+end
+
+local mtConfig = {}
+
+mtConfig.__index = mtConfig
+
+local function config (t) return setmetatable(t, mtConfig) end
+
+function mtConfig:extend(t)
+   local e = {}
+   for k, v in pairs(self) do e[k] = v end
+   for k, v in pairs(t) do e[k] = v end
+   return config(e)
+end
+
+local ARCHITECTURES = {
+   ia32 = config { triple = "i586-unknown-linux",
+                   arch_define = "V8_TARGET_ARCH_IA32",
+                   arch_options = "-m32" },
+   arm = config { triple = "i586-unknown-linux",
+                  arch_define = "V8_TARGET_ARCH_ARM",
+                  arch_options = "-m32" },
+   x64 = config { triple = "x86_64-unknown-linux",
+                  arch_define = "V8_TARGET_ARCH_X64",
+                  arch_options = "" },
+   arm64 = config { triple = "x86_64-unknown-linux",
+                    arch_define = "V8_TARGET_ARCH_ARM64",
+                    arch_options = "" },
+}
+
+-------------------------------------------------------------------------------
+-- GCSuspects Generation
+
+local gc, gc_caused, funcs
+
+-- Note that the gcsuspects file lists functions in the form:
+--  mangled_name,unmangled_function_name
+--
+-- This means that we can match just the function name by matching only
+-- after a comma.
+local WHITELIST = {
+   -- The following functions call CEntryStub which is always present.
+   "MacroAssembler.*,CallRuntime",
+   "CompileCallLoadPropertyWithInterceptor",
+   "CallIC.*,GenerateMiss",
+
+   -- DirectCEntryStub is a special stub used on ARM.
+   -- It is pinned and always present.
+   "DirectCEntryStub.*,GenerateCall",
+
+   -- TODO GCMole currently is sensitive enough to understand that certain
+   --      functions only cause GC and return Failure simulataneously.
+   --      Callsites of such functions are safe as long as they are properly
+   --      check return value and propagate the Failure to the caller.
+   --      It should be possible to extend GCMole to understand this.
+   "Heap.*,TryEvacuateObject",
+
+   -- Ignore all StateTag methods.
+   "StateTag",
+
+   -- Ignore printing of elements transition.
+   "PrintElementsTransition",
+
+   -- CodeCreateEvent receives AbstractCode (a raw ptr) as an argument.
+   "CodeCreateEvent",
+   "WriteField",
+};
+
+local function AddCause(name, cause)
+   local t = gc_caused[name]
+   if not t then
+      t = {}
+      gc_caused[name] = t
+   end
+   table.insert(t, cause)
+end
+
+local function resolve(name)
+   local f = funcs[name]
+
+   if not f then
+      f = {}
+      funcs[name] = f
+
+      if name:match ",.*Collect.*Garbage" then
+         gc[name] = true
+         AddCause(name, "<GC>")
+      end
+
+      if FLAGS.whitelist then
+         for i = 1, #WHITELIST do
+            if name:match(WHITELIST[i]) then
+               gc[name] = false
+            end
+         end
+      end
+   end
+
+    return f
+end
+
+local function parse (filename, lines)
+   local scope
+
+   for funcname in lines do
+      if funcname:sub(1, 1) ~= '\t' then
+         resolve(funcname)
+         scope = funcname
+      else
+         local name = funcname:sub(2)
+         resolve(name)[scope] = true
+      end
+   end
+end
+
+local function propagate ()
+   log "** Propagating GC information"
+
+   local function mark(from, callers)
+      for caller, _ in pairs(callers) do
+         if gc[caller] == nil then
+            gc[caller] = true
+            mark(caller, funcs[caller])
+         end
+         AddCause(caller, from)
+      end
+   end
+
+   for funcname, callers in pairs(funcs) do
+      if gc[funcname] then mark(funcname, callers) end
+   end
+end
+
+local function GenerateGCSuspects(arch, files, cfg)
+   -- Reset the global state.
+   gc, gc_caused, funcs = {}, {}, {}
+
+   log ("** Building GC Suspects for %s", arch)
+   InvokeClangPluginForEachFile (files,
+                                 cfg:extend { plugin = "dump-callees" },
+                                 parse)
+
+   propagate()
+
+   local out = assert(io.open("gcsuspects", "w"))
+   for name, value in pairs(gc) do if value then out:write (name, '\n') end end
+   out:close()
+
+   local out = assert(io.open("gccauses", "w"))
+   out:write "GC = {"
+   for name, causes in pairs(gc_caused) do
+      out:write("['", name, "'] = {")
+      for i = 1, #causes do out:write ("'", causes[i], "';") end
+      out:write("};\n")
+   end
+   out:write "}"
+   out:close()
+
+   log ("** GCSuspects generated for %s", arch)
+end
+
+--------------------------------------------------------------------------------
+-- Analysis
+
+local function CheckCorrectnessForArch(arch, for_test)
+   local files
+   if for_test then
+      files = FilesForTest(arch)
+   else
+      files = FilesForArch(arch)
+   end
+   local cfg = ARCHITECTURES[arch]
+
+   if not FLAGS.reuse_gcsuspects then
+      GenerateGCSuspects(arch, files, cfg)
+   end
+
+   local processed_files = 0
+   local errors_found = false
+   local output = ""
+   local function SearchForErrors(filename, lines)
+      processed_files = processed_files + 1
+      for l in lines do
+         errors_found = errors_found or
+            l:match "^[^:]+:%d+:%d+:" or
+            l:match "error" or
+            l:match "warning"
+         if for_test then
+            output = output.."\n"..l
+         else
+            print(l)
+         end
+      end
+   end
+
+   log("** Searching for evaluation order problems%s for %s",
+       FLAGS.dead_vars and " and dead variables" or "",
+       arch)
+   local plugin_args = {}
+   if FLAGS.dead_vars then table.insert(plugin_args, "--dead-vars") end
+   if FLAGS.verbose_trace then table.insert(plugin_args, '--verbose') end
+   InvokeClangPluginForEachFile(files,
+                                cfg:extend { plugin = "find-problems",
+                                             plugin_args = plugin_args },
+                                SearchForErrors)
+   log("** Done processing %d files. %s",
+       processed_files,
+       errors_found and "Errors found" or "No errors found")
+
+   return errors_found, output
+end
+
+local function SafeCheckCorrectnessForArch(arch, for_test)
+   local status, errors, output = pcall(CheckCorrectnessForArch, arch, for_test)
+   if not status then
+      print(string.format("There was an error: %s", errors))
+      errors = true
+   end
+   return errors, output
+end
+
+-- Source: https://stackoverflow.com/a/41515925/1540248
+local function StringDifference(str1,str2)
+   for i = 1,#str1 do -- Loop over strings
+         -- If that character is not equal to its counterpart
+         if str1:sub(i,i) ~= str2:sub(i,i) then
+            return i --Return that index
+         end
+   end
+   return #str1+1 -- Return the index after where the shorter one ends as fallback.
+end
+
+local function TestRun()
+   local errors, output = SafeCheckCorrectnessForArch('x64', true)
+   if not errors then
+      log("** Test file should produce errors, but none were found. Output:")
+      log(output)
+      return false
+   end
+
+   local filename = "tools/gcmole/test-expectations.txt"
+   local exp_file = assert(io.open(filename), "failed to open test expectations file")
+   local expectations = exp_file:read('*all')
+
+   if output ~= expectations then
+      log("** Output mismatch from running tests. Please run them manually.")
+      local idx = StringDifference(output, expectations)
+
+      log("Difference at byte "..idx)
+      log("Expected: "..expectations:sub(idx-10,idx+10))
+      log("Actual: "..output:sub(idx-10,idx+10))
+
+      log("--- Full output ---")
+      log(output)
+      log("------")
+
+      return false
+   end
+
+   log("** Tests ran successfully")
+   return true
+end
+
+local errors = not TestRun()
+
+for _, arch in ipairs(ARCHS) do
+   if not ARCHITECTURES[arch] then
+      error("Unknown arch: " .. arch)
+   end
+
+   errors = SafeCheckCorrectnessForArch(arch, false) or errors
+end
+
+os.exit(errors and 1 or 0)
diff --git a/src/third_party/v8/tools/gcmole/ignored_files b/src/third_party/v8/tools/gcmole/ignored_files
new file mode 100644
index 0000000..05fcd7a
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/ignored_files
@@ -0,0 +1,2 @@
+src/profiler/heap-snapshot-generator.cc
+src/execution/isolate.cc
diff --git a/src/third_party/v8/tools/gcmole/package.sh b/src/third_party/v8/tools/gcmole/package.sh
new file mode 100755
index 0000000..6206e7b
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/package.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script will package a built gcmole plugin together with the
+# corresponding clang binary into an archive which can be used on the
+# buildbot infrastructure to be run against V8 checkouts.
+
+THIS_DIR="$(readlink -f "$(dirname "${0}")")"
+
+PACKAGE_DIR="${THIS_DIR}/../../tools/gcmole/gcmole-tools"
+PACKAGE_FILE="${THIS_DIR}/../../tools/gcmole/gcmole-tools.tar.gz"
+PACKAGE_SUM="${THIS_DIR}/../../tools/gcmole/gcmole-tools.tar.gz.sha1"
+BUILD_DIR="${THIS_DIR}/../../third_party/llvm+clang-build"
+
+# Echo all commands
+set -x
+
+# Copy all required files
+mkdir -p "${PACKAGE_DIR}/bin"
+cp "${BUILD_DIR}/bin/clang++" "${PACKAGE_DIR}/bin"
+mkdir -p "${PACKAGE_DIR}/lib"
+cp -r "${BUILD_DIR}/lib/clang" "${PACKAGE_DIR}/lib"
+cp "${THIS_DIR}/libgcmole.so" "${PACKAGE_DIR}"
+
+# Generate the archive
+cd "$(dirname "${PACKAGE_DIR}")"
+tar -c -z -f "${PACKAGE_FILE}" "$(basename "${PACKAGE_DIR}")"
+
+# Generate checksum
+sha1sum "${PACKAGE_FILE}" | awk '{print $1}' > "${PACKAGE_SUM}"
+
+set +x
+
+echo
+echo You can find a packaged version of gcmole here:
+echo
+echo $(readlink -f "${PACKAGE_FILE}")
+echo
+echo You can now run gcmole using this command:
+echo
+echo CLANG_BIN="tools/gcmole/gcmole-tools/bin" lua tools/gcmole/gcmole.lua
+echo
diff --git a/src/third_party/v8/tools/gcmole/parallel.py b/src/third_party/v8/tools/gcmole/parallel.py
new file mode 100755
index 0000000..7ff95cc
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/parallel.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This script calls the first argument for each of the following arguments in
+parallel. E.g.
+parallel.py "clang --opt" file1 file2
+calls
+clang --opt file1
+clang --opt file2
+
+The output (stdout and stderr) is concatenated sequentially in the form:
+______________ file1
+<output of clang --opt file1>
+______________ finish <exit code of clang --opt file1> ______________
+______________ file2
+<output of clang --opt file2>
+______________ finish <exit code of clang --opt file2> ______________
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import itertools
+import multiprocessing
+import subprocess
+import sys
+
+def invoke(cmdline):
+  try:
+    return (subprocess.check_output(
+        cmdline, shell=True, stderr=subprocess.STDOUT), 0)
+  except subprocess.CalledProcessError as e:
+    return (e.output, e.returncode)
+
+if __name__ == '__main__':
+  assert len(sys.argv) > 2
+  processes = multiprocessing.cpu_count()
+  pool = multiprocessing.Pool(processes=processes)
+  cmdlines = ["%s %s" % (sys.argv[1], filename) for filename in sys.argv[2:]]
+  for filename, result in itertools.izip(
+      sys.argv[2:], pool.imap(invoke, cmdlines)):
+    print("______________ %s" % filename)
+    print(result[0])
+    print("______________ finish %d ______________" % result[1])
diff --git a/src/third_party/v8/tools/gcmole/run-gcmole.py b/src/third_party/v8/tools/gcmole/run-gcmole.py
new file mode 100755
index 0000000..02174b2
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/run-gcmole.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import os.path
+import signal
+import subprocess
+import sys
+
+GCMOLE_PATH = os.path.dirname(os.path.abspath(__file__))
+CLANG_BIN = os.path.join(GCMOLE_PATH, 'gcmole-tools', 'bin')
+CLANG_PLUGINS = os.path.join(GCMOLE_PATH, 'gcmole-tools')
+LUA = os.path.join(GCMOLE_PATH, 'gcmole-tools', 'lua52')
+DRIVER = os.path.join(GCMOLE_PATH, 'gcmole.lua')
+BASE_PATH = os.path.dirname(os.path.dirname(GCMOLE_PATH))
+
+assert len(sys.argv) == 2
+
+if not os.path.isfile("out/build/gen/torque-generated/builtin-definitions.h"):
+  print("Expected generated headers in out/build/gen.")
+  print("Either build v8 in out/build or change gcmole.lua:115")
+  sys.exit(-1)
+
+proc = subprocess.Popen(
+    [LUA, DRIVER, sys.argv[1]],
+    env={'CLANG_BIN': CLANG_BIN, 'CLANG_PLUGINS': CLANG_PLUGINS},
+    cwd=BASE_PATH,
+)
+
+def handle_sigterm(*args):
+  try:
+    proc.kill()
+  except OSError:
+    pass
+
+signal.signal(signal.SIGTERM, handle_sigterm)
+
+proc.communicate()
+sys.exit(proc.returncode)
diff --git a/src/third_party/v8/tools/gcmole/suspects.whitelist b/src/third_party/v8/tools/gcmole/suspects.whitelist
new file mode 100644
index 0000000..1ac855f
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/suspects.whitelist
@@ -0,0 +1,6 @@
+IsConstructor
+IsEval
+IsAsync
+IsPromiseAll
+IsPromiseAny
+VisitRootPointers
diff --git a/src/third_party/v8/tools/gcmole/test-expectations.txt b/src/third_party/v8/tools/gcmole/test-expectations.txt
new file mode 100644
index 0000000..f6c04e4
--- /dev/null
+++ b/src/third_party/v8/tools/gcmole/test-expectations.txt
@@ -0,0 +1,38 @@
+
+tools/gcmole/gcmole-test.cc:27:10: warning: Possibly dead variable.
+  return obj;
+         ^
+tools/gcmole/gcmole-test.cc:45:3: warning: Possible problem with evaluation order.
+  TwoArgumentsFunction(*CauseGC(obj1, isolate), *CauseGC(obj2, isolate));
+  ^
+tools/gcmole/gcmole-test.cc:57:3: warning: Possible problem with evaluation order.
+  TwoSizeTArgumentsFunction(sizeof(*CauseGC(obj1, isolate)),
+  ^
+tools/gcmole/gcmole-test.cc:82:7: warning: Possible problem with evaluation order.
+  so->Method(*CauseGC(obj1, isolate));
+      ^
+tools/gcmole/gcmole-test.cc:84:7: warning: Possible problem with evaluation order.
+  so->Method(CauseGCRaw(*obj1, isolate));
+      ^
+tools/gcmole/gcmole-test.cc:128:14: warning: Possible problem with evaluation order.
+  so_handle->Method(*derived.VirtualCauseGC(obj1, isolate));
+             ^
+tools/gcmole/gcmole-test.cc:130:14: warning: Possible problem with evaluation order.
+  so_handle->Method(*base->VirtualCauseGC(obj1, isolate));
+             ^
+tools/gcmole/gcmole-test.cc:151:14: warning: Possible problem with evaluation order.
+  so_handle->Method(*SomeClass::StaticCauseGC(obj1, isolate));
+             ^
+tools/gcmole/gcmole-test.cc:161:3: warning: Possibly dead variable.
+  raw_obj.Print();
+  ^
+tools/gcmole/gcmole-test.cc:193:3: warning: Possibly dead variable.
+  raw_obj.Print();
+  ^
+tools/gcmole/gcmole-test.cc:216:3: warning: Possibly dead variable.
+  raw_obj.Print();
+  ^
+tools/gcmole/gcmole-test.cc:229:3: warning: Possibly dead variable.
+  raw_obj.Print();
+  ^
+12 warnings generated.
diff --git a/src/third_party/v8/tools/gdb-v8-support.py b/src/third_party/v8/tools/gdb-v8-support.py
new file mode 100644
index 0000000..23c6c17
--- /dev/null
+++ b/src/third_party/v8/tools/gdb-v8-support.py
@@ -0,0 +1,242 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import re
+import tempfile
+import os
+import subprocess
+import time
+import gdb
+
+kSmiTag = 0
+kSmiTagSize = 1
+kSmiTagMask = (1 << kSmiTagSize) - 1
+
+
+kHeapObjectTag = 1
+kHeapObjectTagSize = 2
+kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1
+
+
+kFailureTag = 3
+kFailureTagSize = 2
+kFailureTagMask = (1 << kFailureTagSize) - 1
+
+
+kSmiShiftSize32 = 0
+kSmiValueSize32 = 31
+kSmiShiftBits32 = kSmiTagSize + kSmiShiftSize32
+
+
+kSmiShiftSize64 = 31
+kSmiValueSize64 = 32
+kSmiShiftBits64 = kSmiTagSize + kSmiShiftSize64
+
+
+kAllBits = 0xFFFFFFFF
+kTopBit32 = 0x80000000
+kTopBit64 = 0x8000000000000000
+
+
+t_u32 = gdb.lookup_type('unsigned int')
+t_u64 = gdb.lookup_type('unsigned long long')
+
+
+def has_smi_tag(v):
+  return v & kSmiTagMask == kSmiTag
+
+
+def has_failure_tag(v):
+  return v & kFailureTagMask == kFailureTag
+
+
+def has_heap_object_tag(v):
+  return v & kHeapObjectTagMask == kHeapObjectTag
+
+
+def raw_heap_object(v):
+  return v - kHeapObjectTag
+
+
+def smi_to_int_32(v):
+  v = v & kAllBits
+  if (v & kTopBit32) == kTopBit32:
+    return ((v & kAllBits) >> kSmiShiftBits32) - 2147483648
+  else:
+    return (v & kAllBits) >> kSmiShiftBits32
+
+
+def smi_to_int_64(v):
+  return (v >> kSmiShiftBits64)
+
+
+def decode_v8_value(v, bitness):
+  base_str = 'v8[%x]' % v
+  if has_smi_tag(v):
+    if bitness == 32:
+      return base_str + (" SMI(%d)" % smi_to_int_32(v))
+    else:
+      return base_str + (" SMI(%d)" % smi_to_int_64(v))
+  elif has_failure_tag(v):
+    return base_str + " (failure)"
+  elif has_heap_object_tag(v):
+    return base_str + (" H(0x%x)" % raw_heap_object(v))
+  else:
+    return base_str
+
+
+class V8ValuePrinter(object):
+  "Print a v8value."
+  def __init__(self, val):
+    self.val = val
+  def to_string(self):
+    if self.val.type.sizeof == 4:
+      v_u32 = self.val.cast(t_u32)
+      return decode_v8_value(int(v_u32), 32)
+    elif self.val.type.sizeof == 8:
+      v_u64 = self.val.cast(t_u64)
+      return decode_v8_value(int(v_u64), 64)
+    else:
+      return 'v8value?'
+  def display_hint(self):
+    return 'v8value'
+
+
+def v8_pretty_printers(val):
+  lookup_tag = val.type.tag
+  if lookup_tag == None:
+    return None
+  elif lookup_tag == 'v8value':
+    return V8ValuePrinter(val)
+  return None
+gdb.pretty_printers.append(v8_pretty_printers)
+
+
+def v8_to_int(v):
+  if v.type.sizeof == 4:
+    return int(v.cast(t_u32))
+  elif v.type.sizeof == 8:
+    return int(v.cast(t_u64))
+  else:
+    return '?'
+
+
+def v8_get_value(vstring):
+  v = gdb.parse_and_eval(vstring)
+  return v8_to_int(v)
+
+
+class V8PrintObject (gdb.Command):
+  """Prints a v8 object."""
+  def __init__ (self):
+    super (V8PrintObject, self).__init__ ("v8print", gdb.COMMAND_DATA)
+  def invoke (self, arg, from_tty):
+    v = v8_get_value(arg)
+    gdb.execute('call __gdb_print_v8_object(%d)' % v)
+V8PrintObject()
+
+
+class FindAnywhere (gdb.Command):
+  """Search memory for the given pattern."""
+  MAPPING_RE = re.compile(r"^\s*\[\d+\]\s+0x([0-9A-Fa-f]+)->0x([0-9A-Fa-f]+)")
+  LIVE_MAPPING_RE = re.compile(r"^\s+0x([0-9A-Fa-f]+)\s+0x([0-9A-Fa-f]+)")
+  def __init__ (self):
+    super (FindAnywhere, self).__init__ ("find-anywhere", gdb.COMMAND_DATA)
+  def find (self, startAddr, endAddr, value):
+    try:
+      result = gdb.execute(
+          "find 0x%s, 0x%s, %s" % (startAddr, endAddr, value),
+          to_string = True)
+      if result.find("not found") == -1:
+        print(result)
+    except:
+      pass
+
+  def invoke (self, value, from_tty):
+    for l in gdb.execute("maint info sections", to_string = True).split('\n'):
+      m = FindAnywhere.MAPPING_RE.match(l)
+      if m is None:
+        continue
+      self.find(m.group(1), m.group(2), value)
+    for l in gdb.execute("info proc mappings", to_string = True).split('\n'):
+      m = FindAnywhere.LIVE_MAPPING_RE.match(l)
+      if m is None:
+        continue
+      self.find(m.group(1), m.group(2), value)
+
+FindAnywhere()
+
+
+class Redirect(gdb.Command):
+  """Redirect the subcommand's stdout  to a temporary file.
+
+Usage:   redirect subcommand...
+Example:
+  redirect job 0x123456789
+  redirect x/1024xg 0x12345678
+
+If provided, the generated temporary file is directly openend with the
+GDB_EXTERNAL_EDITOR environment variable.
+  """
+  def __init__(self):
+    super(Redirect, self).__init__("redirect", gdb.COMMAND_USER)
+
+  def invoke(self, subcommand, from_tty):
+    old_stdout = gdb.execute(
+            "p (int)dup(1)", to_string=True).split("=")[-1].strip()
+    try:
+      time_suffix = time.strftime("%Y%m%d-%H%M%S")
+      fd, file = tempfile.mkstemp(suffix="-%s.gdbout" % time_suffix)
+      try:
+        # Temporaily redirect stdout to the created tmp file for the
+        # duration of the subcommand.
+        gdb.execute('p (int)dup2(open("%s", 1), 1)' % file, to_string=True)
+        # Execute subcommand non interactively.
+        result = gdb.execute(subcommand, from_tty=False, to_string=True)
+        # Write returned string results to the temporary file as well.
+        with open(file, 'a') as f:
+          f.write(result)
+        # Open generated result.
+        if 'GDB_EXTERNAL_EDITOR' in os.environ:
+          open_cmd = os.environ['GDB_EXTERNAL_EDITOR']
+          print("Opening '%s' with %s" % (file, open_cmd))
+          subprocess.call([open_cmd, file])
+        else:
+          print("Open output:\n  %s '%s'" % (os.environ['EDITOR'], file))
+      finally:
+        # Restore original stdout.
+        gdb.execute("p (int)dup2(%s, 1)" % old_stdout, to_string=True)
+        # Close the temporary file.
+        os.close(fd)
+    finally:
+      # Close the originally duplicated stdout descriptor.
+      gdb.execute("p (int)close(%s)" % old_stdout, to_string=True)
+
+Redirect()
diff --git a/src/third_party/v8/tools/gdbinit b/src/third_party/v8/tools/gdbinit
new file mode 100644
index 0000000..f2d4a26
--- /dev/null
+++ b/src/third_party/v8/tools/gdbinit
@@ -0,0 +1,232 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Print tagged object.
+define job
+call (void) _v8_internal_Print_Object((void*)($arg0))
+end
+document job
+Print a v8 JavaScript object
+Usage: job tagged_ptr
+end
+
+# Print content of v8::internal::Handle.
+define jh
+call (void) _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).location_))
+end
+document jh
+Print content of a v8::internal::Handle
+Usage: jh internal_handle
+end
+
+# Print content of v8::Local handle.
+define jlh
+call (void) _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).val_))
+end
+document jlh
+Print content of a v8::Local handle
+Usage: jlh local_handle
+end
+
+# Print Code objects containing given PC.
+define jco
+call (void) _v8_internal_Print_Code((void*)($arg0))
+end
+document jco
+Print a v8 Code object from an internal code address
+Usage: jco pc
+end
+
+# Print LayoutDescriptor.
+define jld
+call (void) _v8_internal_Print_LayoutDescriptor((void*)($arg0))
+end
+document jld
+Print a v8 LayoutDescriptor object
+Usage: jld tagged_ptr
+end
+
+# Print TransitionTree.
+define jtt
+call (void) _v8_internal_Print_TransitionTree((void*)($arg0))
+end
+document jtt
+Print the complete transition tree of the given v8 Map.
+Usage: jtt tagged_ptr
+end
+
+# Print JavaScript stack trace.
+define jst
+call (void) _v8_internal_Print_StackTrace()
+end
+document jst
+Print the current JavaScript stack trace
+Usage: jst
+end
+
+# Print TurboFan graph node.
+define pn
+call _v8_internal_Node_Print((void*)($arg0))
+end
+document pn
+Print a v8 TurboFan graph node
+Usage: pn node_address
+end
+
+# Skip the JavaScript stack.
+define jss
+set $js_entry_sp=v8::internal::Isolate::Current()->thread_local_top()->js_entry_sp_
+set $rbp=*(void**)$js_entry_sp
+set $rsp=$js_entry_sp + 2*sizeof(void*)
+set $pc=*(void**)($js_entry_sp+sizeof(void*))
+end
+document jss
+Skip the jitted stack on x64 to where we entered JS last.
+Usage: jss
+end
+
+# Print stack trace with assertion scopes.
+define bta
+python
+import re
+frame_re = re.compile("^#(\d+)\s*(?:0x[a-f\d]+ in )?(.+) \(.+ at (.+)")
+assert_re = re.compile("^\s*(\S+) = .+<v8::internal::Per\w+AssertScope<v8::internal::(\S*), (false|true)>")
+btl = gdb.execute("backtrace full", to_string = True).splitlines()
+for l in btl:
+  match = frame_re.match(l)
+  if match:
+    print("[%-2s] %-60s %-40s" % (match.group(1), match.group(2), match.group(3)))
+  match = assert_re.match(l)
+  if match:
+    if match.group(3) == "false":
+      prefix = "Disallow"
+      color = "\033[91m"
+    else:
+      prefix = "Allow"
+      color = "\033[92m"
+    print("%s -> %s %s (%s)\033[0m" % (color, prefix, match.group(2), match.group(1)))
+end
+end
+document bta
+Print stack trace with assertion scopes
+Usage: bta
+end
+
+# Search for a pointer inside all valid pages.
+define space_find
+  set $space = $arg0
+  set $current_page = $space->first_page()
+  while ($current_page != 0)
+    printf "#   Searching in %p - %p\n", $current_page->area_start(), $current_page->area_end()-1
+    find $current_page->area_start(), $current_page->area_end()-1, $arg1
+    set $current_page = $current_page->next_page()
+  end
+end
+
+define heap_find
+  set $heap = v8::internal::Isolate::Current()->heap()
+  printf "# Searching for %p in old_space  ===============================\n", $arg0
+  space_find $heap->old_space() ($arg0)
+  printf "# Searching for %p in map_space  ===============================\n", $arg0
+  space_find $heap->map_space() $arg0
+  printf "# Searching for %p in code_space ===============================\n", $arg0
+  space_find $heap->code_space() $arg0
+end
+document heap_find
+Find the location of a given address in V8 pages.
+Usage: heap_find address
+end
+
+# The 'disassembly-flavor' command is only available on i386 and x84_64.
+python
+try:
+  gdb.execute("set disassembly-flavor intel")
+except gdb.error:
+  pass
+end
+set disable-randomization off
+
+# Install a handler whenever the debugger stops due to a signal. It walks up the
+# stack looking for V8_Dcheck and moves the frame to the one above it so it's
+# immediately at the line of code that triggered the DCHECK.
+python
+def dcheck_stop_handler(event):
+  frame = gdb.selected_frame()
+  select_frame = None
+  message = None
+  count = 0
+  # limit stack scanning since they're usually shallow and otherwise stack
+  # overflows can be very slow.
+  while frame is not None and count < 7:
+    count += 1
+    if frame.name() == 'V8_Dcheck':
+      frame_message = gdb.lookup_symbol('message', frame.block())[0]
+      if frame_message:
+        message = frame_message.value(frame).string()
+      select_frame = frame.older()
+      break
+    if frame.name() is not None and frame.name().startswith('V8_Fatal'):
+      select_frame = frame.older()
+    frame = frame.older()
+
+  if select_frame is not None:
+    select_frame.select()
+    gdb.execute('frame')
+    if message:
+      print('DCHECK error: {}'.format(message))
+
+gdb.events.stop.connect(dcheck_stop_handler)
+end
+
+# Code imported from chromium/src/tools/gdb/gdbinit
+python
+
+import os
+import subprocess
+import sys
+
+compile_dirs = set()
+
+
+def get_current_debug_file_directories():
+  dir = gdb.execute("show debug-file-directory", to_string=True)
+  dir = dir[
+      len('The directory where separate debug symbols are searched for is "'
+         ):-len('".') - 1]
+  return set(dir.split(":"))
+
+
+def add_debug_file_directory(dir):
+  # gdb has no function to add debug-file-directory, simulates that by using
+  # `show debug-file-directory` and `set debug-file-directory <directories>`.
+  current_dirs = get_current_debug_file_directories()
+  current_dirs.add(dir)
+  gdb.execute(
+      "set debug-file-directory %s" % ":".join(current_dirs), to_string=True)
+
+
+def newobj_handler(event):
+  global compile_dirs
+  compile_dir = os.path.dirname(event.new_objfile.filename)
+  if not compile_dir:
+    return
+  if compile_dir in compile_dirs:
+    return
+  compile_dirs.add(compile_dir)
+
+  # Add source path
+  gdb.execute("dir %s" % compile_dir)
+
+  # Need to tell the location of .dwo files.
+  # https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html
+  # https://crbug.com/603286#c35
+  add_debug_file_directory(compile_dir)
+
+# Event hook for newly loaded objfiles.
+# https://sourceware.org/gdb/onlinedocs/gdb/Events-In-Python.html
+gdb.events.new_objfile.connect(newobj_handler)
+
+gdb.execute("set environment V8_GDBINIT_SOURCED=1")
+
+end
diff --git a/src/third_party/v8/tools/gen-inlining-tests.py b/src/third_party/v8/tools/gen-inlining-tests.py
new file mode 100644
index 0000000..400386c
--- /dev/null
+++ b/src/third_party/v8/tools/gen-inlining-tests.py
@@ -0,0 +1,568 @@
+#!/usr/bin/env python
+
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from collections import namedtuple
+import textwrap
+import sys
+
+SHARD_FILENAME_TEMPLATE = "test/mjsunit/compiler/inline-exception-{shard}.js"
+# Generates 2 files. Found by trial and error.
+SHARD_SIZE = 97
+
+PREAMBLE = """
+
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-always-opt
+
+// This test file was generated by tools/gen-inlining-tests.py .
+
+// Global variables
+var deopt = undefined; // either true or false
+var counter = 0;
+
+function resetState() {
+  counter = 0;
+}
+
+function warmUp(f) {
+  try {
+    f();
+  } catch (ex) {
+    // ok
+  }
+  try {
+    f();
+  } catch (ex) {
+    // ok
+  }
+}
+
+function resetOptAndAssertResultEquals(expected, f) {
+  warmUp(f);
+  resetState();
+  // %DebugPrint(f);
+  eval("'dont optimize this function itself please, but do optimize f'");
+  %OptimizeFunctionOnNextCall(f);
+  assertEquals(expected, f());
+}
+
+function resetOptAndAssertThrowsWith(expected, f) {
+  warmUp(f);
+  resetState();
+  // %DebugPrint(f);
+  eval("'dont optimize this function itself please, but do optimize f'");
+  %OptimizeFunctionOnNextCall(f);
+  try {
+    var result = f();
+    fail("resetOptAndAssertThrowsWith",
+        "exception: " + expected,
+        "result: " + result);
+  } catch (ex) {
+    assertEquals(expected, ex);
+  }
+}
+
+function increaseAndReturn15() {
+  if (deopt) %DeoptimizeFunction(f);
+  counter++;
+  return 15;
+}
+
+function increaseAndThrow42() {
+  if (deopt) %DeoptimizeFunction(f);
+  counter++;
+  throw 42;
+}
+
+function increaseAndReturn15_noopt_inner() {
+  if (deopt) %DeoptimizeFunction(f);
+  counter++;
+  return 15;
+}
+
+%NeverOptimizeFunction(increaseAndReturn15_noopt_inner);
+
+function increaseAndThrow42_noopt_inner() {
+  if (deopt) %DeoptimizeFunction(f);
+  counter++;
+  throw 42;
+}
+
+%NeverOptimizeFunction(increaseAndThrow42_noopt_inner);
+
+// Alternative 1
+
+function returnOrThrow(doReturn) {
+  if (doReturn) {
+    return increaseAndReturn15();
+  } else {
+    return increaseAndThrow42();
+  }
+}
+
+// Alternative 2
+
+function increaseAndReturn15_calls_noopt() {
+  return increaseAndReturn15_noopt_inner();
+}
+
+function increaseAndThrow42_calls_noopt() {
+  return increaseAndThrow42_noopt_inner();
+}
+
+// Alternative 3.
+// When passed either {increaseAndReturn15} or {increaseAndThrow42}, it acts
+// as the other one.
+function invertFunctionCall(f) {
+  var result;
+  try {
+    result = f();
+  } catch (ex) {
+    return ex - 27;
+  }
+  throw result + 27;
+}
+
+// Alternative 4: constructor
+function increaseAndStore15Constructor() {
+  if (deopt) %DeoptimizeFunction(f);
+  ++counter;
+  this.x = 15;
+}
+
+function increaseAndThrow42Constructor() {
+  if (deopt) %DeoptimizeFunction(f);
+  ++counter;
+  this.x = 42;
+  throw this.x;
+}
+
+// Alternative 5: property
+var magic = {};
+Object.defineProperty(magic, 'prop', {
+  get: function () {
+    if (deopt) %DeoptimizeFunction(f);
+    return 15 + 0 * ++counter;
+  },
+
+  set: function(x) {
+    // argument should be 37
+    if (deopt) %DeoptimizeFunction(f);
+    counter -= 36 - x; // increments counter
+    throw 42;
+  }
+})
+
+// Generate type feedback.
+
+assertEquals(15, increaseAndReturn15_calls_noopt());
+assertThrowsEquals(function() { return increaseAndThrow42_noopt_inner() }, 42);
+
+assertEquals(15, (new increaseAndStore15Constructor()).x);
+assertThrowsEquals(function() {
+        return (new increaseAndThrow42Constructor()).x;
+    },
+    42);
+
+function runThisShard() {
+
+""".strip()
+
+def booltuples(n):
+  """booltuples(2) yields 4 tuples: (False, False), (False, True),
+  (True, False), (True, True)."""
+
+  assert isinstance(n, int)
+  if n <= 0:
+    yield ()
+  else:
+    for initial in booltuples(n-1):
+      yield initial + (False,)
+      yield initial + (True,)
+
+def fnname(flags):
+    assert len(FLAGLETTERS) == len(flags)
+
+    return "f_" + ''.join(
+          FLAGLETTERS[i] if b else '_'
+          for (i, b) in enumerate(flags))
+
+NUM_TESTS_PRINTED = 0
+NUM_TESTS_IN_SHARD = 0
+
+def printtest(flags):
+  """Print a test case. Takes a couple of boolean flags, on which the
+  printed Javascript code depends."""
+
+  assert all(isinstance(flag, bool) for flag in flags)
+
+  # The alternative flags are in reverse order so that if we take all possible
+  # tuples, ordered lexicographically from false to true, we get first the
+  # default, then alternative 1, then 2, etc.
+  (
+    alternativeFn5,      # use alternative #5 for returning/throwing:
+                         #   return/throw using property
+    alternativeFn4,      # use alternative #4 for returning/throwing:
+                         #   return/throw using constructor
+    alternativeFn3,      # use alternative #3 for returning/throwing:
+                         #   return/throw indirectly, based on function argument
+    alternativeFn2,      # use alternative #2 for returning/throwing:
+                         #   return/throw indirectly in unoptimized code,
+                         #   no branching
+    alternativeFn1,      # use alternative #1 for returning/throwing:
+                         #   return/throw indirectly, based on boolean arg
+    tryThrows,           # in try block, call throwing function
+    tryReturns,          # in try block, call returning function
+    tryFirstReturns,     # in try block, returning goes before throwing
+    tryResultToLocal,    # in try block, result goes to local variable
+    doCatch,             # include catch block
+    catchReturns,        # in catch block, return
+    catchWithLocal,      # in catch block, modify or return the local variable
+    catchThrows,         # in catch block, throw
+    doFinally,           # include finally block
+    finallyReturns,      # in finally block, return local variable
+    finallyThrows,       # in finally block, throw
+    endReturnLocal,      # at very end, return variable local
+    deopt,               # deopt inside inlined function
+  ) = flags
+
+  # BASIC RULES
+
+  # Only one alternative can be applied at any time.
+  if (alternativeFn1 + alternativeFn2 + alternativeFn3 + alternativeFn4
+      + alternativeFn5 > 1):
+    return
+
+  # In try, return or throw, or both.
+  if not (tryReturns or tryThrows): return
+
+  # Either doCatch or doFinally.
+  if not doCatch and not doFinally: return
+
+  # Catch flags only make sense when catching
+  if not doCatch and (catchReturns or catchWithLocal or catchThrows):
+    return
+
+  # Finally flags only make sense when finallying
+  if not doFinally and (finallyReturns or finallyThrows):
+    return
+
+  # tryFirstReturns is only relevant when both tryReturns and tryThrows are
+  # true.
+  if tryFirstReturns and not (tryReturns and tryThrows): return
+
+  # From the try and finally block, we can return or throw, but not both.
+  if catchReturns and catchThrows: return
+  if finallyReturns and finallyThrows: return
+
+  # If at the end we return the local, we need to have touched it.
+  if endReturnLocal and not (tryResultToLocal or catchWithLocal): return
+
+  # PRUNING
+
+  anyAlternative = any([alternativeFn1, alternativeFn2, alternativeFn3,
+      alternativeFn4, alternativeFn5])
+  specificAlternative = any([alternativeFn2, alternativeFn3])
+  rareAlternative = not specificAlternative
+
+  # If try returns and throws, then don't catchWithLocal, endReturnLocal, or
+  # deopt, or do any alternative.
+  if (tryReturns and tryThrows and
+      (catchWithLocal or endReturnLocal or deopt or anyAlternative)):
+    return
+  # We don't do any alternative if we do a finally.
+  if doFinally and anyAlternative: return
+  # We only use the local variable if we do alternative #2 or #3.
+  if ((tryResultToLocal or catchWithLocal or endReturnLocal) and
+      not specificAlternative):
+    return
+  # We don't need to test deopting into a finally.
+  if doFinally and deopt: return
+
+  # We're only interested in alternative #2 if we have endReturnLocal, no
+  # catchReturns, and no catchThrows, and deopt.
+  if (alternativeFn2 and
+      (not endReturnLocal or catchReturns or catchThrows or not deopt)):
+    return
+
+
+  # Flag check succeeded.
+
+  trueFlagNames = [name for (name, value) in flags._asdict().items() if value]
+  flagsMsgLine = "  // Variant flags: [{}]".format(', '.join(trueFlagNames))
+  write(textwrap.fill(flagsMsgLine, subsequent_indent='  //   '))
+  write("")
+
+  if not anyAlternative:
+    fragments = {
+      'increaseAndReturn15': 'increaseAndReturn15()',
+      'increaseAndThrow42': 'increaseAndThrow42()',
+    }
+  elif alternativeFn1:
+    fragments = {
+      'increaseAndReturn15': 'returnOrThrow(true)',
+      'increaseAndThrow42': 'returnOrThrow(false)',
+    }
+  elif alternativeFn2:
+    fragments = {
+      'increaseAndReturn15': 'increaseAndReturn15_calls_noopt()',
+      'increaseAndThrow42': 'increaseAndThrow42_calls_noopt()',
+    }
+  elif alternativeFn3:
+    fragments = {
+      'increaseAndReturn15': 'invertFunctionCall(increaseAndThrow42)',
+      'increaseAndThrow42': 'invertFunctionCall(increaseAndReturn15)',
+    }
+  elif alternativeFn4:
+    fragments = {
+      'increaseAndReturn15': '(new increaseAndStore15Constructor()).x',
+      'increaseAndThrow42': '(new increaseAndThrow42Constructor()).x',
+    }
+  else:
+    assert alternativeFn5
+    fragments = {
+      'increaseAndReturn15': 'magic.prop /* returns 15 */',
+      'increaseAndThrow42': '(magic.prop = 37 /* throws 42 */)',
+    }
+
+  # As we print code, we also maintain what the result should be. Variable
+  # {result} can be one of three things:
+  #
+  # - None, indicating returning JS null
+  # - ("return", n) with n an integer
+  # - ("throw", n), with n an integer
+
+  result = None
+  # We also maintain what the counter should be at the end.
+  # The counter is reset just before f is called.
+  counter = 0
+
+  write(    "  f = function {} () {{".format(fnname(flags)))
+  write(    "    var local = 888;")
+  write(    "    deopt = {};".format("true" if deopt else "false"))
+  local = 888
+  write(    "    try {")
+  write(    "      counter++;")
+  counter += 1
+  resultTo = "local +=" if tryResultToLocal else "return"
+  if tryReturns and not (tryThrows and not tryFirstReturns):
+    write(  "      {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
+    if result == None:
+      counter += 1
+      if tryResultToLocal:
+        local += 19
+      else:
+        result = ("return", 19)
+  if tryThrows:
+    write(  "      {} 4 + {increaseAndThrow42};".format(resultTo, **fragments))
+    if result == None:
+      counter += 1
+      result = ("throw", 42)
+  if tryReturns and tryThrows and not tryFirstReturns:
+    write(  "      {} 4 + {increaseAndReturn15};".format(resultTo, **fragments))
+    if result == None:
+      counter += 1
+      if tryResultToLocal:
+        local += 19
+      else:
+        result = ("return", 19)
+  write(    "      counter++;")
+  if result == None:
+    counter += 1
+
+  if doCatch:
+    write(  "    } catch (ex) {")
+    write(  "      counter++;")
+    if isinstance(result, tuple) and result[0] == 'throw':
+      counter += 1
+    if catchThrows:
+      write("      throw 2 + ex;")
+      if isinstance(result, tuple) and result[0] == "throw":
+        result = ('throw', 2 + result[1])
+    elif catchReturns and catchWithLocal:
+      write("      return 2 + local;")
+      if isinstance(result, tuple) and result[0] == "throw":
+        result = ('return', 2 + local)
+    elif catchReturns and not catchWithLocal:
+      write("      return 2 + ex;");
+      if isinstance(result, tuple) and result[0] == "throw":
+        result = ('return', 2 + result[1])
+    elif catchWithLocal:
+      write("      local += ex;");
+      if isinstance(result, tuple) and result[0] == "throw":
+        local += result[1]
+        result = None
+        counter += 1
+    else:
+      if isinstance(result, tuple) and result[0] == "throw":
+        result = None
+        counter += 1
+    write(  "      counter++;")
+
+  if doFinally:
+    write(  "    } finally {")
+    write(  "      counter++;")
+    counter += 1
+    if finallyThrows:
+      write("      throw 25;")
+      result = ('throw', 25)
+    elif finallyReturns:
+      write("      return 3 + local;")
+      result = ('return', 3 + local)
+    elif not finallyReturns and not finallyThrows:
+      write("      local += 2;")
+      local += 2
+      counter += 1
+    else: assert False # unreachable
+    write(  "      counter++;")
+
+  write(    "    }")
+  write(    "    counter++;")
+  if result == None:
+    counter += 1
+  if endReturnLocal:
+    write(  "    return 5 + local;")
+    if result == None:
+      result = ('return', 5 + local)
+  write(    "  }")
+
+  if result == None:
+    write(  "  resetOptAndAssertResultEquals(undefined, f);")
+  else:
+    tag, value = result
+    if tag == "return":
+      write(  "  resetOptAndAssertResultEquals({}, f);".format(value))
+    else:
+      assert tag == "throw"
+      write(  "  resetOptAndAssertThrowsWith({}, f);".format(value))
+
+  write(  "  assertEquals({}, counter);".format(counter))
+  write(  "")
+
+  global NUM_TESTS_PRINTED, NUM_TESTS_IN_SHARD
+  NUM_TESTS_PRINTED += 1
+  NUM_TESTS_IN_SHARD += 1
+
+FILE = None # to be initialised to an open file
+SHARD_NUM = 1
+
+def write(*args):
+  return print(*args, file=FILE)
+
+
+
+def rotateshard():
+  global FILE, NUM_TESTS_IN_SHARD, SHARD_SIZE
+  if MODE != 'shard':
+    return
+  if FILE != None and NUM_TESTS_IN_SHARD < SHARD_SIZE:
+    return
+  if FILE != None:
+    finishshard()
+    assert FILE == None
+  FILE = open(SHARD_FILENAME_TEMPLATE.format(shard=SHARD_NUM), 'w')
+  write_shard_header()
+  NUM_TESTS_IN_SHARD = 0
+
+def finishshard():
+  global FILE, SHARD_NUM, MODE
+  assert FILE
+  write_shard_footer()
+  if MODE == 'shard':
+    print("Wrote shard {}.".format(SHARD_NUM))
+    FILE.close()
+    FILE = None
+    SHARD_NUM += 1
+
+
+def write_shard_header():
+  if MODE == 'shard':
+    write("// Shard {}.".format(SHARD_NUM))
+    write("")
+  write(PREAMBLE)
+  write("")
+
+def write_shard_footer():
+  write("}")
+  write("%NeverOptimizeFunction(runThisShard);")
+  write("")
+  write("// {} tests in this shard.".format(NUM_TESTS_IN_SHARD))
+  write("// {} tests up to here.".format(NUM_TESTS_PRINTED))
+  write("")
+  write("runThisShard();")
+
+FLAGLETTERS="54321trflcrltfrtld"
+
+flagtuple = namedtuple('flagtuple', (
+  "alternativeFn5",
+  "alternativeFn4",
+  "alternativeFn3",
+  "alternativeFn2",
+  "alternativeFn1",
+  "tryThrows",
+  "tryReturns",
+  "tryFirstReturns",
+  "tryResultToLocal",
+  "doCatch",
+  "catchReturns",
+  "catchWithLocal",
+  "catchThrows",
+  "doFinally",
+  "finallyReturns",
+  "finallyThrows",
+  "endReturnLocal",
+  "deopt"
+  ))
+
+emptyflags = flagtuple(*((False,) * len(flagtuple._fields)))
+f1 = emptyflags._replace(tryReturns=True, doCatch=True)
+
+# You can test function printtest with f1.
+
+allFlagCombinations = [
+    flagtuple(*bools)
+    for bools in booltuples(len(flagtuple._fields))
+]
+
+if __name__ == '__main__':
+  global MODE
+  if sys.argv[1:] == []:
+    MODE = 'stdout'
+    print("// Printing all shards together to stdout.")
+    print("")
+    write_shard_header()
+    FILE = sys.stdout
+  elif sys.argv[1:] == ['--shard-and-overwrite']:
+    MODE = 'shard'
+  else:
+    print("Usage:")
+    print("")
+    print("  python {}".format(sys.argv[0]))
+    print("      print all tests to standard output")
+    print("  python {} --shard-and-overwrite".format(sys.argv[0]))
+    print("      print all tests to {}".format(SHARD_FILENAME_TEMPLATE))
+
+    print("")
+    print(sys.argv[1:])
+    print("")
+    sys.exit(1)
+
+  rotateshard()
+
+  for flags in allFlagCombinations:
+    printtest(flags)
+    rotateshard()
+
+  finishshard()
+
+  if MODE == 'shard':
+    print("Total: {} tests.".format(NUM_TESTS_PRINTED))
diff --git a/src/third_party/v8/tools/gen-keywords-gen-h.py b/src/third_party/v8/tools/gen-keywords-gen-h.py
new file mode 100755
index 0000000..02750dc
--- /dev/null
+++ b/src/third_party/v8/tools/gen-keywords-gen-h.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import subprocess
+import re
+import math
+
+INPUT_PATH = "src/parsing/keywords.txt"
+OUTPUT_PATH = "src/parsing/keywords-gen.h"
+
+# TODO(leszeks): Trimming seems to regress performance, investigate.
+TRIM_CHAR_TABLE = False
+
+
+def next_power_of_2(x):
+  return 1 if x == 0 else 2**int(math.ceil(math.log(x, 2)))
+
+
+def call_with_input(cmd, input_string=""):
+  p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+  stdout, _ = p.communicate(input_string)
+  retcode = p.wait()
+  if retcode != 0:
+    raise subprocess.CalledProcessError(retcode, cmd)
+  return stdout
+
+
+def checked_sub(pattern, sub, out, count=1, flags=0):
+  out, n = re.subn(pattern, sub, out, flags=flags)
+  if n != count:
+    raise Exception("Didn't get exactly %d replacement(s) for pattern: %s" %
+                    (count, pattern))
+  return out
+
+
+def change_sizet_to_int(out):
+  # Literal buffer lengths are given as ints, not size_t
+  return checked_sub(r'\bsize_t\b', 'int', out, count=4)
+
+
+def drop_line_directives(out):
+  # #line causes gcov issue, so drop it
+  return re.sub(r'^#\s*line .*$\n', '', out, flags=re.MULTILINE)
+
+
+def trim_and_dcheck_char_table(out):
+  # Potential keyword strings are known to be lowercase ascii, so chop off the
+  # rest of the table and mask out the char
+
+  reads_re = re.compile(
+      r'asso_values\[static_cast<unsigned char>\(str\[(\d+)\]\)\]')
+
+  dchecks = []
+  for str_read in reads_re.finditer(out):
+    dchecks.append("DCHECK_LT(str[%d], 128);" % int(str_read.group(1)))
+
+  if TRIM_CHAR_TABLE:
+    out = checked_sub(
+        r'static const unsigned char asso_values\[\]\s*=\s*\{(\s*\d+\s*,){96}',
+        "".join(dchecks) + r'static const unsigned char asso_values[32] = {',
+        out,
+        flags=re.MULTILINE)
+    out = checked_sub(
+        reads_re.pattern,
+        r'asso_values[static_cast<unsigned char>(str[(\1)]&31)]',
+        out,
+        count=len(dchecks),
+        flags=re.MULTILINE)
+  else:
+    out = checked_sub(
+        r'static const unsigned char asso_values\[\]\s*=\s*\{',
+        "".join(dchecks) + r'static const unsigned char asso_values[128] = {',
+        out,
+        flags=re.MULTILINE)
+
+  return out
+
+
+def use_isinrange(out):
+  # Our IsInRange method is more efficient than checking for min/max length
+  return checked_sub(r'if \(len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH\)',
+                     r'if (IsInRange(len, MIN_WORD_LENGTH, MAX_WORD_LENGTH))',
+                     out)
+
+
+def pad_tables(out):
+  # We don't want to compare against the max hash value, so pad the tables up
+  # to a power of two and mask the hash.
+
+  # First get the new size
+  max_hash_value = int(re.search(r'MAX_HASH_VALUE\s*=\s*(\d+)', out).group(1))
+  old_table_length = max_hash_value + 1
+  new_table_length = next_power_of_2(old_table_length)
+  table_padding_len = new_table_length - old_table_length
+
+  # Pad the length table.
+  single_lengthtable_entry = r'\d+'
+  out = checked_sub(
+      r"""
+      static\ const\ unsigned\ char\ kPerfectKeywordLengthTable\[\]\s*=\s*\{
+        (
+          \s*%(single_lengthtable_entry)s\s*
+          (?:,\s*%(single_lengthtable_entry)s\s*)*
+        )
+      \}
+    """ % {'single_lengthtable_entry': single_lengthtable_entry},
+      r'static const unsigned char kPerfectKeywordLengthTable[%d] = { \1 %s }'
+      % (new_table_length, "".join([',0'] * table_padding_len)),
+      out,
+      flags=re.MULTILINE | re.VERBOSE)
+
+  # Pad the word list.
+  single_wordlist_entry = r"""
+      (?:\#line\ \d+\ ".*"$\s*)?
+      \{\s*"[a-z]*"\s*,\s*Token::[A-Z_]+\}
+    """
+  out = checked_sub(
+      r"""
+      static\ const\ struct\ PerfectKeywordHashTableEntry\ kPerfectKeywordHashTable\[\]\s*=\s*\{
+        (
+          \s*%(single_wordlist_entry)s\s*
+          (?:,\s*%(single_wordlist_entry)s\s*)*
+        )
+      \}
+    """ % {'single_wordlist_entry': single_wordlist_entry},
+      r'static const struct PerfectKeywordHashTableEntry kPerfectKeywordHashTable[%d] = {\1 %s }'
+      % (new_table_length, "".join(
+          [',{"",Token::IDENTIFIER}'] * table_padding_len)),
+      out,
+      flags=re.MULTILINE | re.VERBOSE)
+
+  # Mask the hash and replace the range check with DCHECKs.
+  out = checked_sub(r'Hash\s*\(\s*str,\s*len\s*\)',
+                    r'Hash(str, len)&0x%x' % (new_table_length - 1), out)
+  out = checked_sub(
+      r'if \(key <= MAX_HASH_VALUE\)',
+      r'DCHECK_LT(key, arraysize(kPerfectKeywordLengthTable));DCHECK_LT(key, arraysize(kPerfectKeywordHashTable));',
+      out)
+
+  return out
+
+
+def return_token(out):
+  # We want to return the actual token rather than the table entry.
+
+  # Change the return type of the function. Make it inline too.
+  out = checked_sub(
+      r'const\s*struct\s*PerfectKeywordHashTableEntry\s*\*\s*((?:PerfectKeywordHash::)?GetToken)',
+      r'inline Token::Value \1',
+      out,
+      count=2)
+
+  # Change the return value when the keyword is found
+  out = checked_sub(r'return &kPerfectKeywordHashTable\[key\];',
+                    r'return kPerfectKeywordHashTable[key].value;', out)
+
+  # Change the return value when the keyword is not found
+  out = checked_sub(r'return 0;', r'return Token::IDENTIFIER;', out)
+
+  return out
+
+
+def memcmp_to_while(out):
+  # It's faster to loop over the keyword with a while loop than calling memcmp.
+  # Careful, this replacement is quite flaky, because otherwise the regex is
+  # unreadable.
+  return checked_sub(
+      re.escape("if (*str == *s && !memcmp (str + 1, s + 1, len - 1))") + r"\s*"
+      + re.escape("return kPerfectKeywordHashTable[key].value;"),
+      """
+      while(*s!=0) {
+        if (*s++ != *str++) return Token::IDENTIFIER;
+      }
+      return kPerfectKeywordHashTable[key].value;
+      """,
+      out,
+      flags=re.MULTILINE)
+
+
+def wrap_namespace(out):
+  return """// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is automatically generated by gen-keywords-gen-h.py and should not
+// be modified manually.
+
+#ifndef V8_PARSING_KEYWORDS_GEN_H_
+#define V8_PARSING_KEYWORDS_GEN_H_
+
+#include "src/parsing/token.h"
+
+namespace v8 {
+namespace internal {
+
+%s
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_PARSING_KEYWORDS_GEN_H_
+""" % (out)
+
+
+def trim_character_set_warning(out):
+  # gperf generates an error message that is too large, trim it
+
+  return out.replace(
+      '"gperf generated tables don\'t work with this execution character set. Please report a bug to <bug-gperf@gnu.org>."',
+      '"gperf generated tables don\'t work with this execution character set."\\\n// If you see this error, please report a bug to <bug-gperf@gnu.org>.'
+  )
+
+
+def main():
+  try:
+    script_dir = os.path.dirname(sys.argv[0])
+    root_dir = os.path.join(script_dir, '..')
+
+    out = subprocess.check_output(["gperf", "-m100", INPUT_PATH], cwd=root_dir)
+
+    # And now some munging of the generated file.
+    out = change_sizet_to_int(out)
+    out = drop_line_directives(out)
+    out = trim_and_dcheck_char_table(out)
+    out = use_isinrange(out)
+    out = pad_tables(out)
+    out = return_token(out)
+    out = memcmp_to_while(out)
+    out = wrap_namespace(out)
+    out = trim_character_set_warning(out)
+
+    # Final formatting.
+    clang_format_path = os.path.join(root_dir,
+                                     'third_party/depot_tools/clang-format')
+    out = call_with_input([clang_format_path], out)
+
+    with open(os.path.join(root_dir, OUTPUT_PATH), 'w') as f:
+      f.write(out)
+
+    return 0
+
+  except subprocess.CalledProcessError as e:
+    sys.stderr.write("Error calling '{}'\n".format(" ".join(e.cmd)))
+    return e.returncode
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/src/third_party/v8/tools/gen-postmortem-metadata.py b/src/third_party/v8/tools/gen-postmortem-metadata.py
new file mode 100644
index 0000000..4e9facd
--- /dev/null
+++ b/src/third_party/v8/tools/gen-postmortem-metadata.py
@@ -0,0 +1,745 @@
+#!/usr/bin/env python
+
+#
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+#
+# Emits a C++ file to be compiled and linked into libv8 to support postmortem
+# debugging tools.  Most importantly, this tool emits constants describing V8
+# internals:
+#
+#    v8dbg_type_CLASS__TYPE = VALUE             Describes class type values
+#    v8dbg_class_CLASS__FIELD__TYPE = OFFSET    Describes class fields
+#    v8dbg_parent_CLASS__PARENT                 Describes class hierarchy
+#    v8dbg_frametype_NAME = VALUE               Describes stack frame values
+#    v8dbg_off_fp_NAME = OFFSET                 Frame pointer offsets
+#    v8dbg_prop_NAME = OFFSET                   Object property offsets
+#    v8dbg_NAME = VALUE                         Miscellaneous values
+#
+# These constants are declared as global integers so that they'll be present in
+# the generated libv8 binary.
+#
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import io
+import re
+import sys
+
+#
+# Miscellaneous constants such as tags and masks used for object identification,
+# enumeration values used as indexes in internal tables, etc..
+#
+consts_misc = [
+    { 'name': 'FirstNonstringType',     'value': 'FIRST_NONSTRING_TYPE' },
+    { 'name': 'APIObjectType',          'value': 'JS_API_OBJECT_TYPE' },
+    { 'name': 'SpecialAPIObjectType',   'value': 'JS_SPECIAL_API_OBJECT_TYPE' },
+
+    { 'name': 'FirstContextType',     'value': 'FIRST_CONTEXT_TYPE' },
+    { 'name': 'LastContextType',     'value': 'LAST_CONTEXT_TYPE' },
+
+    { 'name': 'IsNotStringMask',        'value': 'kIsNotStringMask' },
+    { 'name': 'StringTag',              'value': 'kStringTag' },
+
+    { 'name': 'StringEncodingMask',     'value': 'kStringEncodingMask' },
+    { 'name': 'TwoByteStringTag',       'value': 'kTwoByteStringTag' },
+    { 'name': 'OneByteStringTag',       'value': 'kOneByteStringTag' },
+
+    { 'name': 'StringRepresentationMask',
+        'value': 'kStringRepresentationMask' },
+    { 'name': 'SeqStringTag',           'value': 'kSeqStringTag' },
+    { 'name': 'ConsStringTag',          'value': 'kConsStringTag' },
+    { 'name': 'ExternalStringTag',      'value': 'kExternalStringTag' },
+    { 'name': 'SlicedStringTag',        'value': 'kSlicedStringTag' },
+    { 'name': 'ThinStringTag',          'value': 'kThinStringTag' },
+
+    { 'name': 'HeapObjectTag',          'value': 'kHeapObjectTag' },
+    { 'name': 'HeapObjectTagMask',      'value': 'kHeapObjectTagMask' },
+    { 'name': 'SmiTag',                 'value': 'kSmiTag' },
+    { 'name': 'SmiTagMask',             'value': 'kSmiTagMask' },
+    { 'name': 'SmiValueShift',          'value': 'kSmiTagSize' },
+    { 'name': 'SmiShiftSize',           'value': 'kSmiShiftSize' },
+    { 'name': 'SystemPointerSize',      'value': 'kSystemPointerSize' },
+    { 'name': 'SystemPointerSizeLog2',  'value': 'kSystemPointerSizeLog2' },
+    { 'name': 'TaggedSize',             'value': 'kTaggedSize' },
+    { 'name': 'TaggedSizeLog2',         'value': 'kTaggedSizeLog2' },
+
+    { 'name': 'OddballFalse',           'value': 'Oddball::kFalse' },
+    { 'name': 'OddballTrue',            'value': 'Oddball::kTrue' },
+    { 'name': 'OddballTheHole',         'value': 'Oddball::kTheHole' },
+    { 'name': 'OddballNull',            'value': 'Oddball::kNull' },
+    { 'name': 'OddballArgumentsMarker', 'value': 'Oddball::kArgumentsMarker' },
+    { 'name': 'OddballUndefined',       'value': 'Oddball::kUndefined' },
+    { 'name': 'OddballUninitialized',   'value': 'Oddball::kUninitialized' },
+    { 'name': 'OddballOther',           'value': 'Oddball::kOther' },
+    { 'name': 'OddballException',       'value': 'Oddball::kException' },
+
+    { 'name': 'ContextRegister',        'value': 'kContextRegister.code()' },
+    { 'name': 'ReturnRegister0',        'value': 'kReturnRegister0.code()' },
+    { 'name': 'JSFunctionRegister',     'value': 'kJSFunctionRegister.code()' },
+    { 'name': 'InterpreterBytecodeOffsetRegister',
+      'value': 'kInterpreterBytecodeOffsetRegister.code()' },
+    { 'name': 'InterpreterBytecodeArrayRegister',
+      'value': 'kInterpreterBytecodeArrayRegister.code()' },
+    { 'name': 'RuntimeCallFunctionRegister',
+      'value': 'kRuntimeCallFunctionRegister.code()' },
+
+    { 'name': 'prop_kind_Data',
+        'value': 'kData' },
+    { 'name': 'prop_kind_Accessor',
+        'value': 'kAccessor' },
+    { 'name': 'prop_kind_mask',
+        'value': 'PropertyDetails::KindField::kMask' },
+    { 'name': 'prop_location_Descriptor',
+        'value': 'kDescriptor' },
+    { 'name': 'prop_location_Field',
+        'value': 'kField' },
+    { 'name': 'prop_location_mask',
+        'value': 'PropertyDetails::LocationField::kMask' },
+    { 'name': 'prop_location_shift',
+        'value': 'PropertyDetails::LocationField::kShift' },
+    { 'name': 'prop_attributes_NONE', 'value': 'NONE' },
+    { 'name': 'prop_attributes_READ_ONLY', 'value': 'READ_ONLY' },
+    { 'name': 'prop_attributes_DONT_ENUM', 'value': 'DONT_ENUM' },
+    { 'name': 'prop_attributes_DONT_DELETE', 'value': 'DONT_DELETE' },
+    { 'name': 'prop_attributes_mask',
+        'value': 'PropertyDetails::AttributesField::kMask' },
+    { 'name': 'prop_attributes_shift',
+        'value': 'PropertyDetails::AttributesField::kShift' },
+    { 'name': 'prop_index_mask',
+        'value': 'PropertyDetails::FieldIndexField::kMask' },
+    { 'name': 'prop_index_shift',
+        'value': 'PropertyDetails::FieldIndexField::kShift' },
+    { 'name': 'prop_representation_mask',
+        'value': 'PropertyDetails::RepresentationField::kMask' },
+    { 'name': 'prop_representation_shift',
+        'value': 'PropertyDetails::RepresentationField::kShift' },
+    { 'name': 'prop_representation_smi',
+        'value': 'Representation::Kind::kSmi' },
+    { 'name': 'prop_representation_double',
+        'value': 'Representation::Kind::kDouble' },
+    { 'name': 'prop_representation_heapobject',
+        'value': 'Representation::Kind::kHeapObject' },
+    { 'name': 'prop_representation_tagged',
+        'value': 'Representation::Kind::kTagged' },
+
+    { 'name': 'prop_desc_key',
+        'value': 'DescriptorArray::kEntryKeyIndex' },
+    { 'name': 'prop_desc_details',
+        'value': 'DescriptorArray::kEntryDetailsIndex' },
+    { 'name': 'prop_desc_value',
+        'value': 'DescriptorArray::kEntryValueIndex' },
+    { 'name': 'prop_desc_size',
+        'value': 'DescriptorArray::kEntrySize' },
+
+    { 'name': 'elements_fast_holey_elements',
+        'value': 'HOLEY_ELEMENTS' },
+    { 'name': 'elements_fast_elements',
+        'value': 'PACKED_ELEMENTS' },
+    { 'name': 'elements_dictionary_elements',
+        'value': 'DICTIONARY_ELEMENTS' },
+
+    { 'name': 'bit_field2_elements_kind_mask',
+        'value': 'Map::Bits2::ElementsKindBits::kMask' },
+    { 'name': 'bit_field2_elements_kind_shift',
+        'value': 'Map::Bits2::ElementsKindBits::kShift' },
+    { 'name': 'bit_field3_is_dictionary_map_shift',
+        'value': 'Map::Bits3::IsDictionaryMapBit::kShift' },
+    { 'name': 'bit_field3_number_of_own_descriptors_mask',
+        'value': 'Map::Bits3::NumberOfOwnDescriptorsBits::kMask' },
+    { 'name': 'bit_field3_number_of_own_descriptors_shift',
+        'value': 'Map::Bits3::NumberOfOwnDescriptorsBits::kShift' },
+    { 'name': 'class_Map__instance_descriptors_offset',
+        'value': 'Map::kInstanceDescriptorsOffset' },
+
+    { 'name': 'off_fp_context_or_frame_type',
+        'value': 'CommonFrameConstants::kContextOrFrameTypeOffset'},
+    { 'name': 'off_fp_context',
+        'value': 'StandardFrameConstants::kContextOffset' },
+    { 'name': 'off_fp_constant_pool',
+        'value': 'StandardFrameConstants::kConstantPoolOffset' },
+    { 'name': 'off_fp_function',
+        'value': 'StandardFrameConstants::kFunctionOffset' },
+    { 'name': 'off_fp_args',
+        'value': 'StandardFrameConstants::kFixedFrameSizeAboveFp' },
+
+    { 'name': 'scopeinfo_idx_nparams',
+        'value': 'ScopeInfo::kParameterCount' },
+    { 'name': 'scopeinfo_idx_ncontextlocals',
+        'value': 'ScopeInfo::kContextLocalCount' },
+    { 'name': 'scopeinfo_idx_first_vars',
+        'value': 'ScopeInfo::kVariablePartIndex' },
+
+    { 'name': 'jsarray_buffer_was_detached_mask',
+        'value': 'JSArrayBuffer::WasDetachedBit::kMask' },
+    { 'name': 'jsarray_buffer_was_detached_shift',
+        'value': 'JSArrayBuffer::WasDetachedBit::kShift' },
+
+    { 'name': 'context_idx_scope_info',
+        'value': 'Context::SCOPE_INFO_INDEX' },
+    { 'name': 'context_idx_prev',
+        'value': 'Context::PREVIOUS_INDEX' },
+    { 'name': 'context_min_slots',
+        'value': 'Context::MIN_CONTEXT_SLOTS' },
+    { 'name': 'native_context_embedder_data_offset',
+        'value': 'Internals::kNativeContextEmbedderDataOffset' },
+
+
+    { 'name': 'namedictionaryshape_prefix_size',
+        'value': 'NameDictionaryShape::kPrefixSize' },
+    { 'name': 'namedictionaryshape_entry_size',
+        'value': 'NameDictionaryShape::kEntrySize' },
+    { 'name': 'globaldictionaryshape_entry_size',
+        'value': 'GlobalDictionaryShape::kEntrySize' },
+
+    { 'name': 'namedictionary_prefix_start_index',
+        'value': 'NameDictionary::kPrefixStartIndex' },
+
+    { 'name': 'numberdictionaryshape_prefix_size',
+        'value': 'NumberDictionaryShape::kPrefixSize' },
+    { 'name': 'numberdictionaryshape_entry_size',
+        'value': 'NumberDictionaryShape::kEntrySize' },
+
+    { 'name': 'simplenumberdictionaryshape_prefix_size',
+        'value': 'SimpleNumberDictionaryShape::kPrefixSize' },
+    { 'name': 'simplenumberdictionaryshape_entry_size',
+        'value': 'SimpleNumberDictionaryShape::kEntrySize' },
+
+    { 'name': 'type_JSError__JS_ERROR_TYPE', 'value': 'JS_ERROR_TYPE' },
+];
+
+#
+# The following useful fields are missing accessors, so we define fake ones.
+# Please note that extra accessors should _only_ be added to expose offsets that
+# can be used to access actual V8 objects' properties. They should not be added
+# for exposing other values. For instance, enumeration values or class'
+# constants should be exposed by adding an entry in the "consts_misc" table, not
+# in this "extras_accessors" table.
+#
+extras_accessors = [
+    'JSFunction, context, Context, kContextOffset',
+    'JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset',
+    'HeapObject, map, Map, kMapOffset',
+    'JSObject, elements, Object, kElementsOffset',
+    'JSObject, internal_fields, uintptr_t, kHeaderSize',
+    'FixedArray, data, uintptr_t, kHeaderSize',
+    'JSArrayBuffer, backing_store, uintptr_t, kBackingStoreOffset',
+    'JSArrayBuffer, byte_length, size_t, kByteLengthOffset',
+    'JSArrayBufferView, byte_length, size_t, kByteLengthOffset',
+    'JSArrayBufferView, byte_offset, size_t, kByteOffsetOffset',
+    'JSDate, value, Object, kValueOffset',
+    'JSRegExp, source, Object, kSourceOffset',
+    'JSTypedArray, external_pointer, uintptr_t, kExternalPointerOffset',
+    'JSTypedArray, length, Object, kLengthOffset',
+    'Map, instance_size_in_words, char, kInstanceSizeInWordsOffset',
+    'Map, inobject_properties_start_or_constructor_function_index, char, kInObjectPropertiesStartOrConstructorFunctionIndexOffset',
+    'Map, instance_type, uint16_t, kInstanceTypeOffset',
+    'Map, bit_field, char, kBitFieldOffset',
+    'Map, bit_field2, char, kBitField2Offset',
+    'Map, bit_field3, int, kBitField3Offset',
+    'Map, prototype, Object, kPrototypeOffset',
+    'Oddball, kind_offset, int, kKindOffset',
+    'HeapNumber, value, double, kValueOffset',
+    'ExternalString, resource, Object, kResourceOffset',
+    'SeqOneByteString, chars, char, kHeaderSize',
+    'SeqTwoByteString, chars, char, kHeaderSize',
+    'UncompiledData, inferred_name, String, kInferredNameOffset',
+    'UncompiledData, start_position, int32_t, kStartPositionOffset',
+    'UncompiledData, end_position, int32_t, kEndPositionOffset',
+    'SharedFunctionInfo, raw_function_token_offset, int16_t, kFunctionTokenOffsetOffset',
+    'SharedFunctionInfo, internal_formal_parameter_count, uint16_t, kFormalParameterCountOffset',
+    'SharedFunctionInfo, flags, int, kFlagsOffset',
+    'SharedFunctionInfo, length, uint16_t, kLengthOffset',
+    'SlicedString, parent, String, kParentOffset',
+    'Code, instruction_start, uintptr_t, kHeaderSize',
+    'Code, instruction_size, int, kInstructionSizeOffset',
+    'String, length, int32_t, kLengthOffset',
+    'DescriptorArray, header_size, uintptr_t, kHeaderSize',
+    'ConsString, first, String, kFirstOffset',
+    'ConsString, second, String, kSecondOffset',
+    'SlicedString, offset, SMI, kOffsetOffset',
+    'ThinString, actual, String, kActualOffset',
+    'Symbol, name, Object, kDescriptionOffset',
+];
+
+#
+# The following is a whitelist of classes we expect to find when scanning the
+# source code. This list is not exhaustive, but it's still useful to identify
+# when this script gets out of sync with the source. See load_objects().
+#
+expected_classes = [
+    'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
+    'JSObject', 'JSRegExp', 'JSPrimitiveWrapper', 'Map', 'Oddball', 'Script',
+    'SeqOneByteString', 'SharedFunctionInfo', 'ScopeInfo', 'JSPromise',
+    'DescriptorArray'
+];
+
+
+#
+# The following structures store high-level representations of the structures
+# for which we're going to emit descriptive constants.
+#
+types = {};             # set of all type names
+typeclasses = {};       # maps type names to corresponding class names
+klasses = {};           # known classes, including parents
+fields = [];            # field declarations
+
+header = '''
+/*
+ * This file is generated by %s.  Do not edit directly.
+ */
+
+#include "src/init/v8.h"
+#include "src/codegen/register-arch.h"
+#include "src/execution/frames.h"
+#include "src/execution/frames-inl.h" /* for architecture-specific frame constants */
+#include "src/objects/contexts.h"
+#include "src/objects/objects.h"
+#include "src/objects/data-handler.h"
+#include "src/objects/js-promise.h"
+#include "src/objects/js-regexp-string-iterator.h"
+
+namespace v8 {
+namespace internal {
+
+extern "C" {
+
+/* stack frame constants */
+#define FRAME_CONST(value, klass)       \
+    V8_EXPORT int v8dbg_frametype_##klass = StackFrame::value;
+
+STACK_FRAME_TYPE_LIST(FRAME_CONST)
+
+#undef FRAME_CONST
+
+''' % sys.argv[0];
+
+footer = '''
+}
+
+}
+}
+'''
+
+#
+# Get the base class
+#
+def get_base_class(klass):
+        if (klass == 'Object'):
+                return klass;
+
+        if (not (klass in klasses)):
+                return None;
+
+        k = klasses[klass];
+
+        return get_base_class(k['parent']);
+
+#
+# Loads class hierarchy and type information from "objects.h" etc.
+#
+def load_objects():
+        #
+        # Construct a dictionary for the classes we're sure should be present.
+        #
+        checktypes = {};
+        for klass in expected_classes:
+                checktypes[klass] = True;
+
+
+        for filename in sys.argv[2:]:
+                if not filename.endswith("-inl.h"):
+                        load_objects_from_file(filename, checktypes)
+
+        if (len(checktypes) > 0):
+                for klass in checktypes:
+                        print('error: expected class \"%s\" not found' % klass);
+
+                sys.exit(1);
+
+
+def load_objects_from_file(objfilename, checktypes):
+        objfile = io.open(objfilename, 'r', encoding='utf-8');
+        in_insttype = False;
+        in_torque_insttype = False
+        in_torque_fulldef = False
+
+        typestr = '';
+        torque_typestr = ''
+        torque_fulldefstr = ''
+        uncommented_file = ''
+
+        #
+        # Iterate the header file line-by-line to collect type and class
+        # information. For types, we accumulate a string representing the entire
+        # InstanceType enum definition and parse it later because it's easier to
+        # do so without the embedded newlines.
+        #
+        for line in objfile:
+                if (line.startswith('enum InstanceType : uint16_t {')):
+                        in_insttype = True;
+                        continue;
+
+                if (line.startswith('#define TORQUE_ASSIGNED_INSTANCE_TYPE_LIST')):
+                        in_torque_insttype = True
+                        continue
+
+                if (line.startswith('#define TORQUE_INSTANCE_CHECKERS_SINGLE_FULLY_DEFINED')):
+                        in_torque_fulldef = True
+                        continue
+
+                if (in_insttype and line.startswith('};')):
+                        in_insttype = False;
+                        continue;
+
+                if (in_torque_insttype and (not line or line.isspace())):
+                          in_torque_insttype = False
+                          continue
+
+                if (in_torque_fulldef and (not line or line.isspace())):
+                          in_torque_fulldef = False
+                          continue
+
+                line = re.sub('//.*', '', line.strip());
+
+                if (in_insttype):
+                        typestr += line;
+                        continue;
+
+                if (in_torque_insttype):
+                        torque_typestr += line
+                        continue
+
+                if (in_torque_fulldef):
+                        torque_fulldefstr += line
+                        continue
+
+                uncommented_file += '\n' + line
+
+        for match in re.finditer(r'\nclass(?:\s+V8_EXPORT(?:_PRIVATE)?)?'
+                                 r'\s+(\w[^:;]*)'
+                                 r'(?:: public (\w[^{]*))?\s*{\s*',
+                                 uncommented_file):
+                klass = match.group(1).strip();
+                pklass = match.group(2);
+                if (pklass):
+                        # Check for generated Torque class.
+                        gen_match = re.match(
+                            r'TorqueGenerated\w+\s*<\s*\w+,\s*(\w+)\s*>',
+                            pklass)
+                        if (gen_match):
+                                pklass = gen_match.group(1)
+                        # Strip potential template arguments from parent
+                        # class.
+                        match = re.match(r'(\w+)(<.*>)?', pklass.strip());
+                        pklass = match.group(1).strip();
+                klasses[klass] = { 'parent': pklass };
+
+        #
+        # Process the instance type declaration.
+        #
+        entries = typestr.split(',');
+        for entry in entries:
+                types[re.sub('\s*=.*', '', entry).lstrip()] = True;
+        entries = torque_typestr.split('\\')
+        for entry in entries:
+                types[re.sub(r' *V\(|\) *', '', entry)] = True
+        entries = torque_fulldefstr.split('\\')
+        for entry in entries:
+                entry = entry.strip()
+                if not entry:
+                    continue
+                idx = entry.find('(');
+                rest = entry[idx + 1: len(entry) - 1];
+                args = re.split('\s*,\s*', rest);
+                typename = args[0]
+                typeconst = args[1]
+                types[typeconst] = True
+                typeclasses[typeconst] = typename
+        #
+        # Infer class names for each type based on a systematic transformation.
+        # For example, "JS_FUNCTION_TYPE" becomes "JSFunction".  We find the
+        # class for each type rather than the other way around because there are
+        # fewer cases where one type maps to more than one class than the other
+        # way around.
+        #
+        for type in types:
+                usetype = type
+
+                #
+                # Remove the "_TYPE" suffix and then convert to camel case,
+                # except that a "JS" prefix remains uppercase (as in
+                # "JS_FUNCTION_TYPE" => "JSFunction").
+                #
+                if (not usetype.endswith('_TYPE')):
+                        continue;
+
+                usetype = usetype[0:len(usetype) - len('_TYPE')];
+                parts = usetype.split('_');
+                cctype = '';
+
+                if (parts[0] == 'JS'):
+                        cctype = 'JS';
+                        start = 1;
+                else:
+                        cctype = '';
+                        start = 0;
+
+                for ii in range(start, len(parts)):
+                        part = parts[ii];
+                        cctype += part[0].upper() + part[1:].lower();
+
+                #
+                # Mapping string types is more complicated.  Both types and
+                # class names for Strings specify a representation (e.g., Seq,
+                # Cons, External, or Sliced) and an encoding (TwoByte/OneByte),
+                # In the simplest case, both of these are explicit in both
+                # names, as in:
+                #
+                #       EXTERNAL_ONE_BYTE_STRING_TYPE => ExternalOneByteString
+                #
+                # However, either the representation or encoding can be omitted
+                # from the type name, in which case "Seq" and "TwoByte" are
+                # assumed, as in:
+                #
+                #       STRING_TYPE => SeqTwoByteString
+                #
+                # Additionally, sometimes the type name has more information
+                # than the class, as in:
+                #
+                #       CONS_ONE_BYTE_STRING_TYPE => ConsString
+                #
+                # To figure this out dynamically, we first check for a
+                # representation and encoding and add them if they're not
+                # present.  If that doesn't yield a valid class name, then we
+                # strip out the representation.
+                #
+                if (cctype.endswith('String')):
+                        if (cctype.find('Cons') == -1 and
+                            cctype.find('External') == -1 and
+                            cctype.find('Sliced') == -1):
+                                if (cctype.find('OneByte') != -1):
+                                        cctype = re.sub('OneByteString$',
+                                            'SeqOneByteString', cctype);
+                                else:
+                                        cctype = re.sub('String$',
+                                            'SeqString', cctype);
+
+                        if (cctype.find('OneByte') == -1):
+                                cctype = re.sub('String$', 'TwoByteString',
+                                    cctype);
+
+                        if (not (cctype in klasses)):
+                                cctype = re.sub('OneByte', '', cctype);
+                                cctype = re.sub('TwoByte', '', cctype);
+
+                #
+                # Despite all that, some types have no corresponding class.
+                #
+                if (cctype in klasses):
+                        typeclasses[type] = cctype;
+                        if (cctype in checktypes):
+                                del checktypes[cctype];
+
+#
+# For a given macro call, pick apart the arguments and return an object
+# describing the corresponding output constant.  See load_fields().
+#
+def parse_field(call):
+        # Replace newlines with spaces.
+        for ii in range(0, len(call)):
+                if (call[ii] == '\n'):
+                        call[ii] == ' ';
+
+        idx = call.find('(');
+        kind = call[0:idx];
+        rest = call[idx + 1: len(call) - 1];
+        args = re.split('\s*,\s*', rest);
+
+        consts = [];
+
+        klass = args[0];
+        field = args[1];
+        dtype = None
+        offset = None
+        if kind.startswith('WEAK_ACCESSORS'):
+                dtype = 'weak'
+                offset = args[2];
+        elif not (kind.startswith('SMI_ACCESSORS') or kind.startswith('ACCESSORS_TO_SMI')):
+                dtype = args[2].replace('<', '_').replace('>', '_')
+                offset = args[3];
+        else:
+                offset = args[2];
+                dtype = 'SMI'
+
+
+        assert(offset is not None and dtype is not None);
+        return ({
+            'name': 'class_%s__%s__%s' % (klass, field, dtype),
+            'value': '%s::%s' % (klass, offset)
+        });
+
+#
+# Load field offset information from objects-inl.h etc.
+#
+def load_fields():
+        for filename in sys.argv[2:]:
+                if filename.endswith("-inl.h"):
+                        load_fields_from_file(filename)
+
+        for body in extras_accessors:
+                fields.append(parse_field('ACCESSORS(%s)' % body));
+
+
+def load_fields_from_file(filename):
+        inlfile = io.open(filename, 'r', encoding='utf-8');
+
+        #
+        # Each class's fields and the corresponding offsets are described in the
+        # source by calls to macros like "ACCESSORS" (and friends).  All we do
+        # here is extract these macro invocations, taking into account that they
+        # may span multiple lines and may contain nested parentheses.  We also
+        # call parse_field() to pick apart the invocation.
+        #
+        prefixes = [ 'ACCESSORS', 'ACCESSORS2', 'ACCESSORS_GCSAFE',
+                     'SMI_ACCESSORS', 'ACCESSORS_TO_SMI',
+                     'RELEASE_ACQUIRE_ACCESSORS', 'WEAK_ACCESSORS' ];
+        prefixes += ([ prefix + "_CHECKED" for prefix in prefixes ] +
+                     [ prefix + "_CHECKED2" for prefix in prefixes ])
+        current = '';
+        opens = 0;
+
+        for line in inlfile:
+                if (opens > 0):
+                        # Continuation line
+                        for ii in range(0, len(line)):
+                                if (line[ii] == '('):
+                                        opens += 1;
+                                elif (line[ii] == ')'):
+                                        opens -= 1;
+
+                                if (opens == 0):
+                                        break;
+
+                        current += line[0:ii + 1];
+                        continue;
+
+                for prefix in prefixes:
+                        if (not line.startswith(prefix + '(')):
+                                continue;
+
+                        if (len(current) > 0):
+                                fields.append(parse_field(current));
+                                current = '';
+
+                        for ii in range(len(prefix), len(line)):
+                                if (line[ii] == '('):
+                                        opens += 1;
+                                elif (line[ii] == ')'):
+                                        opens -= 1;
+
+                                if (opens == 0):
+                                        break;
+
+                        current += line[0:ii + 1];
+
+        if (len(current) > 0):
+                fields.append(parse_field(current));
+                current = '';
+
+#
+# Emit a block of constants.
+#
+def emit_set(out, consts):
+        lines = set()  # To remove duplicates.
+
+        # Fix up overzealous parses.  This could be done inside the
+        # parsers but as there are several, it's easiest to do it here.
+        ws = re.compile('\s+')
+        for const in consts:
+                name = ws.sub('', const['name'])
+                value = ws.sub('', str(const['value']))  # Can be a number.
+                lines.add('V8_EXPORT int v8dbg_%s = %s;\n' % (name, value))
+
+        for line in lines:
+                out.write(line);
+        out.write('\n');
+
+#
+# Emit the whole output file.
+#
+def emit_config():
+        out = open(sys.argv[1], 'w');
+
+        out.write(header);
+
+        out.write('/* miscellaneous constants */\n');
+        emit_set(out, consts_misc);
+
+        out.write('/* class type information */\n');
+        consts = [];
+        for typename in sorted(typeclasses):
+                klass = typeclasses[typename];
+                consts.append({
+                    'name': 'type_%s__%s' % (klass, typename),
+                    'value': typename
+                });
+
+        emit_set(out, consts);
+
+        out.write('/* class hierarchy information */\n');
+        consts = [];
+        for klassname in sorted(klasses):
+                pklass = klasses[klassname]['parent'];
+                bklass = get_base_class(klassname);
+                if (bklass != 'Object'):
+                        continue;
+                if (pklass == None):
+                        continue;
+
+                consts.append({
+                    'name': 'parent_%s__%s' % (klassname, pklass),
+                    'value': 0
+                });
+
+        emit_set(out, consts);
+
+        out.write('/* field information */\n');
+        emit_set(out, fields);
+
+        out.write(footer);
+
+if (len(sys.argv) < 4):
+        print('usage: %s output.cc objects.h objects-inl.h' % sys.argv[0]);
+        sys.exit(2);
+
+load_objects();
+load_fields();
+emit_config();
diff --git a/src/third_party/v8/tools/generate-builtins-tests.py b/src/third_party/v8/tools/generate-builtins-tests.py
new file mode 100755
index 0000000..3abe750
--- /dev/null
+++ b/src/third_party/v8/tools/generate-builtins-tests.py
@@ -0,0 +1,161 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import json
+import optparse
+import os
+import random
+import shutil
+import subprocess
+import sys
+
+
+SKIPLIST = [
+  # Skip special d8 functions.
+  "load", "os", "print", "read", "readline", "quit"
+]
+
+
+def GetRandomObject():
+  return random.choice([
+    "0", "1", "2.5", "0x1000", "\"string\"", "{foo: \"bar\"}", "[1, 2, 3]",
+    "function() { return 0; }"
+  ])
+
+
+g_var_index = 0
+
+
+def GetVars(result, num, first = []):
+  global g_var_index
+  variables = []
+  for i in range(num):
+    variables.append("__v_%d" % g_var_index)
+    g_var_index += 1
+  for var in variables:
+    result.append("var %s = %s;" % (var, GetRandomObject()))
+  return ", ".join(first + variables)
+
+
+# Wraps |string| in try..catch.
+def TryCatch(result, string, exception_behavior = ""):
+  result.append("try { %s } catch(e) { %s }" % (string, exception_behavior))
+
+
+def BuildTests(function, full_name, options):
+  assert function["type"] == "function"
+  global g_var_index
+  g_var_index = 0
+  result = ["// AUTO-GENERATED BY tools/generate-builtins-tests.py.\n"]
+  result.append("// Function call test:")
+  length = function["length"]
+  TryCatch(result, "%s(%s);" % (full_name, GetVars(result, length)))
+
+  if "prototype" in function:
+    proto = function["prototype"]
+    result.append("\n// Constructor test:")
+    TryCatch(result,
+             "var recv = new %s(%s);" % (full_name, GetVars(result, length)),
+             "var recv = new Object();")
+
+    getters = []
+    methods = []
+    for prop in proto:
+      proto_property = proto[prop]
+      proto_property_type = proto_property["type"]
+      if proto_property_type == "getter":
+        getters.append(proto_property)
+        result.append("recv.__defineGetter__(\"%s\", "
+                      "function() { return %s; });" %
+                      (proto_property["name"], GetVars(result, 1)))
+      if proto_property_type == "number":
+        result.append("recv.__defineGetter__(\"%s\", "
+                      "function() { return %s; });" %
+                      (proto_property["name"], GetVars(result, 1)))
+      if proto_property_type == "function":
+        methods.append(proto_property)
+    if getters:
+      result.append("\n// Getter tests:")
+      for getter in getters:
+        result.append("print(recv.%s);" % getter["name"])
+    if methods:
+      result.append("\n// Method tests:")
+      for method in methods:
+        args = GetVars(result, method["length"], ["recv"])
+        call = "%s.prototype.%s.call(%s)" % (full_name, method["name"], args)
+        TryCatch(result, call)
+
+  filename = os.path.join(options.outdir, "%s.js" % (full_name))
+  with open(filename, "w") as f:
+    f.write("\n".join(result))
+    f.write("\n")
+
+
+def VisitObject(obj, path, options):
+  obj_type = obj["type"]
+  obj_name = "%s%s" % (path, obj["name"])
+  if obj_type == "function":
+    BuildTests(obj, obj_name, options)
+  if "properties" in obj:
+    for prop_name in obj["properties"]:
+      prop = obj["properties"][prop_name]
+      VisitObject(prop, "%s." % (obj_name), options)
+
+
+def ClearGeneratedFiles(options):
+  if os.path.exists(options.outdir):
+    shutil.rmtree(options.outdir)
+
+
+def GenerateTests(options):
+  ClearGeneratedFiles(options)  # Re-generate everything.
+  output = subprocess.check_output(
+      "%s %s" % (options.d8, options.script), shell=True).strip()
+  objects = json.loads(output)
+
+  os.makedirs(options.outdir)
+  for obj_name in objects:
+    if obj_name in SKIPLIST: continue
+    obj = objects[obj_name]
+    VisitObject(obj, "", options)
+
+
+def BuildOptions():
+  result = optparse.OptionParser()
+  result.add_option("--d8", help="d8 binary to use",
+                    default="out/ia32.release/d8")
+  result.add_option("--outdir", help="directory where to place generated tests",
+                    default="test/mjsunit/builtins-gen")
+  result.add_option("--script", help="builtins detector script to run in d8",
+                    default="tools/detect-builtins.js")
+  return result
+
+
+def Main():
+  parser = BuildOptions()
+  (options, args) = parser.parse_args()
+  if len(args) != 1 or args[0] == "help":
+    parser.print_help()
+    return 1
+  action = args[0]
+
+  if action == "generate":
+    GenerateTests(options)
+    return 0
+
+  if action == "clear":
+    ClearGeneratedFiles(options)
+    return 0
+
+  print("Unknown action: %s" % action)
+  parser.print_help()
+  return 1
+
+
+if __name__ == "__main__":
+  sys.exit(Main())
diff --git a/src/third_party/v8/tools/generate-header-include-checks.py b/src/third_party/v8/tools/generate-header-include-checks.py
new file mode 100755
index 0000000..909dafe
--- /dev/null
+++ b/src/third_party/v8/tools/generate-header-include-checks.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+# vim:fenc=utf-8:shiftwidth=2
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Check that each header can be included in isolation.
+
+For each header we generate one .cc file which only includes this one header.
+All these .cc files are then added to a sources.gni file which is included in
+BUILD.gn. Just compile to check whether there are any violations to the rule
+that each header must be includable in isolation.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import os
+import os.path
+import re
+import sys
+
+# TODO(clemensb): Extend to tests.
+DEFAULT_INPUT = ['base', 'src']
+DEFAULT_GN_FILE = 'BUILD.gn'
+MY_DIR = os.path.dirname(os.path.realpath(__file__))
+V8_DIR = os.path.dirname(MY_DIR)
+OUT_DIR = os.path.join(V8_DIR, 'check-header-includes')
+AUTO_EXCLUDE = [
+  # flag-definitions.h needs a mode set for being included.
+  'src/flags/flag-definitions.h',
+]
+AUTO_EXCLUDE_PATTERNS = [
+  'src/base/atomicops_internals_.*',
+  # TODO(petermarshall): Enable once Perfetto is built by default.
+  'src/libplatform/tracing/perfetto*',
+] + [
+  # platform-specific headers
+  '\\b{}\\b'.format(p) for p in
+    ('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', 's390',
+     'ppc')]
+
+args = None
+def parse_args():
+  global args
+  parser = argparse.ArgumentParser()
+  parser.add_argument('-i', '--input', type=str, action='append',
+                      help='Headers or directories to check (directories '
+                           'are scanned for headers recursively); default: ' +
+                           ','.join(DEFAULT_INPUT))
+  parser.add_argument('-x', '--exclude', type=str, action='append',
+                      help='Add an exclude pattern (regex)')
+  parser.add_argument('-v', '--verbose', action='store_true',
+                      help='Be verbose')
+  args = parser.parse_args()
+  args.exclude = (args.exclude or []) + AUTO_EXCLUDE_PATTERNS
+  args.exclude += ['^' + re.escape(x) + '$' for x in AUTO_EXCLUDE]
+  if not args.input:
+    args.input=DEFAULT_INPUT
+
+
+def printv(line):
+  if args.verbose:
+    print(line)
+
+
+def find_all_headers():
+  printv('Searching for headers...')
+  header_files = []
+  exclude_patterns = [re.compile(x) for x in args.exclude]
+  def add_recursively(filename):
+    full_name = os.path.join(V8_DIR, filename)
+    if not os.path.exists(full_name):
+      sys.exit('File does not exist: {}'.format(full_name))
+    if os.path.isdir(full_name):
+      for subfile in os.listdir(full_name):
+        full_name = os.path.join(filename, subfile)
+        printv('Scanning {}'.format(full_name))
+        add_recursively(full_name)
+    elif filename.endswith('.h'):
+      printv('--> Found header file {}'.format(filename))
+      for p in exclude_patterns:
+        if p.search(filename):
+          printv('--> EXCLUDED (matches {})'.format(p.pattern))
+          return
+      header_files.append(filename)
+
+  for filename in args.input:
+    add_recursively(filename)
+
+  return header_files
+
+
+def get_cc_file_name(header):
+  split = os.path.split(header)
+  header_dir = os.path.relpath(split[0], V8_DIR)
+  # Prefix with the directory name, to avoid collisions in the object files.
+  prefix = header_dir.replace(os.path.sep, '-')
+  cc_file_name = 'test-include-' + prefix + '-' + split[1][:-1] + 'cc'
+  return os.path.join(OUT_DIR, cc_file_name)
+
+
+def create_including_cc_files(header_files):
+  comment = 'check including this header in isolation'
+  for header in header_files:
+    cc_file_name = get_cc_file_name(header)
+    rel_cc_file_name = os.path.relpath(cc_file_name, V8_DIR)
+    content = '#include "{}"  // {}\n'.format(header, comment)
+    if os.path.exists(cc_file_name):
+      with open(cc_file_name) as cc_file:
+        if cc_file.read() == content:
+          printv('File {} is up to date'.format(rel_cc_file_name))
+          continue
+    printv('Creating file {}'.format(rel_cc_file_name))
+    with open(cc_file_name, 'w') as cc_file:
+      cc_file.write(content)
+
+
+def generate_gni(header_files):
+  gni_file = os.path.join(OUT_DIR, 'sources.gni')
+  printv('Generating file "{}"'.format(os.path.relpath(gni_file, V8_DIR)))
+  with open(gni_file, 'w') as gn:
+    gn.write("""\
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This list is filled automatically by tools/check_header_includes.py.
+check_header_includes_sources = [
+""");
+    for header in header_files:
+      cc_file_name = get_cc_file_name(header)
+      gn.write('    "{}",\n'.format(os.path.relpath(cc_file_name, V8_DIR)))
+    gn.write(']\n')
+
+
+def main():
+  parse_args()
+  header_files = find_all_headers()
+  if not os.path.exists(OUT_DIR):
+    os.mkdir(OUT_DIR)
+  create_including_cc_files(header_files)
+  generate_gni(header_files)
+
+if __name__ == '__main__':
+  main()
diff --git a/src/third_party/v8/tools/generate-runtime-call-stats.py b/src/third_party/v8/tools/generate-runtime-call-stats.py
new file mode 100755
index 0000000..ba10b79
--- /dev/null
+++ b/src/third_party/v8/tools/generate-runtime-call-stats.py
@@ -0,0 +1,490 @@
+#!/usr/bin/python3
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Runs chromium/src/run_benchmark for a given story and extracts the generated
+# runtime call stats.
+
+import argparse
+import csv
+import json
+import glob
+import os
+import pathlib
+import re
+import tabulate
+import shutil
+import statistics
+import subprocess
+import sys
+import tempfile
+
+from callstats_groups import RUNTIME_CALL_STATS_GROUPS
+
+
+JSON_FILE_EXTENSION=".pb_converted.json"
+
+def parse_args():
+  parser = argparse.ArgumentParser(
+      description="Run story and collect runtime call stats.")
+  parser.add_argument("story", metavar="story", nargs=1, help="story to run")
+  parser.add_argument(
+      "--group",
+      dest="group",
+      action="store_true",
+      help="group common stats together into buckets")
+  parser.add_argument(
+      "-r",
+      "--repeats",
+      dest="repeats",
+      metavar="N",
+      action="store",
+      type=int,
+      default=1,
+      help="number of times to run the story")
+  parser.add_argument(
+      "-v",
+      "--verbose",
+      dest="verbose",
+      action="store_true",
+      help="output benchmark runs to stdout")
+  parser.add_argument(
+      "--device",
+      dest="device",
+      action="store",
+      help="device to run the test on. Passed directly to run_benchmark")
+  parser.add_argument(
+      "-d",
+      "--dir",
+      dest="dir",
+      action="store",
+      help=("directory to look for already generated output in. This must "
+            "already exists and it won't re-run the benchmark"))
+  parser.add_argument(
+      "-f",
+      "--format",
+      dest="format",
+      action="store",
+      choices=["csv", "table"],
+      help="output as CSV")
+  parser.add_argument(
+      "-o",
+      "--output",
+      metavar="FILE",
+      dest="out_file",
+      action="store",
+      help="write table to FILE rather stdout")
+  parser.add_argument(
+      "--browser",
+      dest="browser",
+      metavar="BROWSER_TYPE",
+      action="store",
+      default="release",
+      help=("Passed directly to --browser option of run_benchmark. Ignored if "
+            "-executable is used"))
+  parser.add_argument(
+      "-e",
+      "--executable",
+      dest="executable",
+      metavar="EXECUTABLE",
+      action="store",
+      help=("path to executable to run. If not given it will pass '--browser "
+            "release' to run_benchmark"))
+  parser.add_argument(
+      "--chromium-dir",
+      dest="chromium_dir",
+      metavar="DIR",
+      action="store",
+      default=".",
+      help=("path to chromium directory. If not given, the script must be run "
+            "inside the chromium/src directory"))
+  parser.add_argument(
+      "--js-flags", dest="js_flags", action="store", help="flags to pass to v8")
+  parser.add_argument(
+      "--extra-browser-args",
+      dest="browser_args",
+      action="store",
+      help="flags to pass to chrome")
+  parser.add_argument(
+      "--benchmark",
+      dest="benchmark",
+      action="store",
+      default="v8.browsing_desktop",
+      help="benchmark to run")
+  parser.add_argument(
+      "--stdev",
+      dest="stdev",
+      action="store_true",
+      help="adds columns for the standard deviation")
+  parser.add_argument(
+      "--filter",
+      dest="filter",
+      action="append",
+      help="useable with --group to only show buckets specified by filter")
+  parser.add_argument(
+      "--retain",
+      dest="retain",
+      action="store",
+      default="json",
+      choices=["none", "json", "all"],
+      help=("controls artifacts to be retained after run. With none, all files "
+            "are deleted; only the json.gz file is retained for each run; and "
+            "all keep all files"))
+
+  return parser.parse_args()
+
+
+def process_trace(trace_file):
+  text_string = pathlib.Path(trace_file).read_text()
+  result = json.loads(text_string)
+
+  output = {}
+  result = result["traceEvents"]
+  for o in result:
+    o = o["args"]
+    if "runtime-call-stats" in o:
+      r = o["runtime-call-stats"]
+      for name in r:
+        count = r[name][0]
+        duration = r[name][1]
+        if name in output:
+          output[name]["count"] += count
+          output[name]["duration"] += duration
+        else:
+          output[name] = {"count": count, "duration": duration}
+
+  return output
+
+
+def run_benchmark(story,
+                  repeats=1,
+                  output_dir=".",
+                  verbose=False,
+                  js_flags=None,
+                  browser_args=None,
+                  chromium_dir=".",
+                  executable=None,
+                  benchmark="v8.browsing_desktop",
+                  device=None,
+                  browser="release"):
+
+  orig_chromium_dir = chromium_dir
+  xvfb = os.path.join(chromium_dir, "testing", "xvfb.py")
+  if not os.path.isfile(xvfb):
+    chromium_dir = os.path(chromium_dir, "src")
+    xvfb = os.path.join(chromium_dir, "testing", "xvfb.py")
+    if not os.path.isfile(xvfb):
+      print(("chromium_dir does not point to a valid chromium checkout: " +
+             orig_chromium_dir))
+      sys.exit(1)
+
+  command = [
+      xvfb,
+      os.path.join(chromium_dir, "tools", "perf", "run_benchmark"),
+      "run",
+      "--story",
+      story,
+      "--pageset-repeat",
+      str(repeats),
+      "--output-dir",
+      output_dir,
+      "--intermediate-dir",
+      os.path.join(output_dir, "artifacts"),
+      benchmark,
+  ]
+
+  if executable:
+    command += ["--browser-executable", executable]
+  else:
+    command += ["--browser", browser]
+
+  if device:
+    command += ["--device", device]
+  if browser_args:
+    command += ["--extra-browser-args", browser_args]
+  if js_flags:
+    command += ["--js-flags", js_flags]
+
+  if not benchmark.startswith("v8."):
+    # Most benchmarks by default don't collect runtime call stats so enable them
+    # manually.
+    categories = [
+        "v8",
+        "disabled-by-default-v8.runtime_stats",
+    ]
+
+    command += ["--extra-chrome-categories", ",".join(categories)]
+
+  print("Output directory: %s" % output_dir)
+  stdout = ""
+  print(f"Running: {' '.join(command)}\n")
+  proc = subprocess.Popen(
+      command,
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+      universal_newlines=True)
+  proc.stderr.close()
+  status_matcher = re.compile(r"\[ +(\w+) +\]")
+  for line in iter(proc.stdout.readline, ""):
+    stdout += line
+    match = status_matcher.match(line)
+    if verbose or match:
+      print(line, end="")
+
+  proc.stdout.close()
+
+  if proc.wait() != 0:
+    print("\nrun_benchmark failed:")
+    # If verbose then everything has already been printed.
+    if not verbose:
+      print(stdout)
+    sys.exit(1)
+
+  print("\nrun_benchmark completed")
+
+
+def write_output(f, table, headers, run_count, format="table"):
+  if format == "csv":
+    # strip new lines from CSV output
+    headers = [h.replace("\n", " ") for h in headers]
+    writer = csv.writer(f)
+    writer.writerow(headers)
+    writer.writerows(table)
+  else:
+    # First column is name, and then they alternate between counts and durations
+    summary_count = len(headers) - 2 * run_count - 1
+    floatfmt = ("",) + (".0f", ".2f") * run_count + (".2f",) * summary_count
+    f.write(tabulate.tabulate(table, headers=headers, floatfmt=floatfmt))
+    f.write("\n")
+
+
+class Row:
+
+  def __init__(self, name, run_count):
+    self.name = name
+    self.durations = [0] * run_count
+    self.counts = [0] * run_count
+    self.mean_duration = None
+    self.mean_count = None
+    self.stdev_duration = None
+    self.stdev_count = None
+
+  def __repr__(self):
+    data_str = ", ".join(
+        str((c, d)) for (c, d) in zip(self.counts, self.durations))
+    return (f"{self.name}: {data_str}, mean_count: {self.mean_count}, " +
+            f"mean_duration: {self.mean_duration}")
+
+  def add_data(self, counts, durations):
+    self.counts = counts
+    self.durations = durations
+
+  def add_data_point(self, run, count, duration):
+    self.counts[run] = count
+    self.durations[run] = duration
+
+  def prepare(self, stdev=False):
+    if len(self.durations) > 1:
+      self.mean_duration = statistics.mean(self.durations)
+      self.mean_count = statistics.mean(self.counts)
+      if stdev:
+        self.stdev_duration = statistics.stdev(self.durations)
+        self.stdev_count = statistics.stdev(self.counts)
+
+  def as_list(self):
+    l = [self.name]
+    for (c, d) in zip(self.counts, self.durations):
+      l += [c, d]
+    if self.mean_duration is not None:
+      l += [self.mean_count]
+      if self.stdev_count is not None:
+        l += [self.stdev_count]
+      l += [self.mean_duration]
+      if self.stdev_duration is not None:
+        l += [self.stdev_duration]
+    return l
+
+  def key(self):
+    if self.mean_duration is not None:
+      return self.mean_duration
+    else:
+      return self.durations[0]
+
+
+class Bucket:
+
+  def __init__(self, name, run_count):
+    self.name = name
+    self.run_count = run_count
+    self.data = {}
+    self.table = None
+    self.total_row = None
+
+  def __repr__(self):
+    s = "Bucket: " + self.name + " {\n"
+    if self.table:
+      s += "\n  ".join(str(row) for row in self.table) + "\n"
+    elif self.data:
+      s += "\n  ".join(str(row) for row in self.data.values()) + "\n"
+    if self.total_row:
+      s += "  " + str(self.total_row) + "\n"
+    return s + "}"
+
+  def add_data_point(self, name, run, count, duration):
+    if name not in self.data:
+      self.data[name] = Row(name, self.run_count)
+
+    self.data[name].add_data_point(run, count, duration)
+
+  def prepare(self, stdev=False):
+    if self.data:
+      for row in self.data.values():
+        row.prepare(stdev)
+
+      self.table = sorted(self.data.values(), key=Row.key)
+      self.total_row = Row("Total", self.run_count)
+      self.total_row.add_data([
+          sum(r.counts[i]
+              for r in self.data.values())
+          for i in range(0, self.run_count)
+      ], [
+          sum(r.durations[i]
+              for r in self.data.values())
+          for i in range(0, self.run_count)
+      ])
+      self.total_row.prepare(stdev)
+
+  def as_list(self, add_bucket_titles=True, filter=None):
+    t = []
+    if filter is None or self.name in filter:
+      if add_bucket_titles:
+        t += [["\n"], [self.name]]
+      t += [r.as_list() for r in self.table]
+      t += [self.total_row.as_list()]
+    return t
+
+
+def collect_buckets(story, group=True, repeats=1, output_dir="."):
+  if group:
+    groups = RUNTIME_CALL_STATS_GROUPS
+  else:
+    groups = []
+
+  buckets = {}
+
+  for i in range(0, repeats):
+    story_dir = f"{story.replace(':', '_')}_{i + 1}"
+    trace_dir = os.path.join(output_dir, "artifacts", story_dir, "trace",
+                             "traceEvents")
+
+    # run_benchmark now dumps two files: a .pb.gz file and a .pb_converted.json
+    # file. We only need the latter.
+    trace_file_glob = os.path.join(trace_dir, "*" + JSON_FILE_EXTENSION)
+    trace_files = glob.glob(trace_file_glob)
+    if not trace_files:
+      print("Could not find *%s file in %s" % (JSON_FILE_EXTENSION, trace_dir))
+      sys.exit(1)
+    if len(trace_files) > 1:
+      print("Expecting one file but got: %s" % trace_files)
+      sys.exit(1)
+
+    trace_file = trace_files[0]
+
+    output = process_trace(trace_file)
+    for name in output:
+      bucket_name = "Other"
+      for group in groups:
+        if group[1].match(name):
+          bucket_name = group[0]
+          break
+
+      value = output[name]
+      if bucket_name not in buckets:
+        bucket = Bucket(bucket_name, repeats)
+        buckets[bucket_name] = bucket
+      else:
+        bucket = buckets[bucket_name]
+
+      bucket.add_data_point(name, i, value["count"], value["duration"] / 1000.0)
+  return buckets
+
+
+def create_table(buckets, record_bucket_names=True, filter=None):
+  table = []
+  for bucket in buckets.values():
+    table += bucket.as_list(
+        add_bucket_titles=record_bucket_names, filter=filter)
+  return table
+
+
+def main():
+  args = parse_args()
+  story = args.story[0]
+
+  retain = args.retain
+  if args.dir is not None:
+    output_dir = args.dir
+    if not os.path.isdir(output_dir):
+      print("Specified output directory does not exist: " % output_dir)
+      sys.exit(1)
+  else:
+    output_dir = tempfile.mkdtemp(prefix="runtime_call_stats_")
+    run_benchmark(
+        story,
+        repeats=args.repeats,
+        output_dir=output_dir,
+        verbose=args.verbose,
+        js_flags=args.js_flags,
+        browser_args=args.browser_args,
+        chromium_dir=args.chromium_dir,
+        benchmark=args.benchmark,
+        executable=args.executable,
+        browser=args.browser,
+        device=args.device)
+
+  try:
+    buckets = collect_buckets(
+        story, group=args.group, repeats=args.repeats, output_dir=output_dir)
+
+    for b in buckets.values():
+      b.prepare(args.stdev)
+
+    table = create_table(
+        buckets, record_bucket_names=args.group, filter=args.filter)
+
+    headers = [""] + ["Count", "Duration\n(ms)"] * args.repeats
+    if args.repeats > 1:
+      if args.stdev:
+        headers += [
+            "Count\nMean", "Count\nStdev", "Duration\nMean (ms)",
+            "Duration\nStdev (ms)"
+        ]
+      else:
+        headers += ["Count\nMean", "Duration\nMean (ms)"]
+
+    if args.out_file:
+      with open(args.out_file, "w", newline="") as f:
+        write_output(f, table, headers, args.repeats, args.format)
+    else:
+      write_output(sys.stdout, table, headers, args.repeats, args.format)
+  finally:
+    if retain == "none":
+      shutil.rmtree(output_dir)
+    elif retain == "json":
+      # Delete all files bottom up except ones ending in JSON_FILE_EXTENSION and
+      # attempt to delete subdirectories (ignoring errors).
+      for dir_name, subdir_list, file_list in os.walk(
+          output_dir, topdown=False):
+        for file_name in file_list:
+          if not file_name.endswith(JSON_FILE_EXTENSION):
+            os.remove(os.path.join(dir_name, file_name))
+        for subdir in subdir_list:
+          try:
+            os.rmdir(os.path.join(dir_name, subdir))
+          except OSError:
+            pass
+
+
+if __name__ == "__main__":
+  sys.exit(main())
diff --git a/src/third_party/v8/tools/generate-ten-powers.scm b/src/third_party/v8/tools/generate-ten-powers.scm
new file mode 100644
index 0000000..eaeb7f4
--- /dev/null
+++ b/src/third_party/v8/tools/generate-ten-powers.scm
@@ -0,0 +1,286 @@
+;; Copyright 2010 the V8 project authors. All rights reserved.
+;; Redistribution and use in source and binary forms, with or without
+;; modification, are permitted provided that the following conditions are
+;; met:
+;;
+;;     * Redistributions of source code must retain the above copyright
+;;       notice, this list of conditions and the following disclaimer.
+;;     * Redistributions in binary form must reproduce the above
+;;       copyright notice, this list of conditions and the following
+;;       disclaimer in the documentation and/or other materials provided
+;;       with the distribution.
+;;     * Neither the name of Google Inc. nor the names of its
+;;       contributors may be used to endorse or promote products derived
+;;       from this software without specific prior written permission.
+;;
+;; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+;; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+;; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+;; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+;; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+;; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+;; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+;; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+;; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+;; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+;; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+;; This is a Scheme script for the Bigloo compiler. Bigloo must be compiled with
+;; support for bignums. The compilation of the script can be done as follows:
+;;   bigloo -static-bigloo -o generate-ten-powers generate-ten-powers.scm
+;;  
+;; Generate approximations of 10^k.
+
+(module gen-ten-powers
+   (static (class Cached-Fast
+	      v::bignum
+	      e::bint
+	      exact?::bool))
+   (main my-main))
+
+
+;;----------------bignum shifts -----------------------------------------------
+(define (bit-lshbx::bignum x::bignum by::bint)
+   (if (<fx by 0)
+       #z0
+       (*bx x (exptbx #z2 (fixnum->bignum by)))))
+
+(define (bit-rshbx::bignum x::bignum by::bint)
+   (if (<fx by 0)
+       #z0
+       (/bx x (exptbx #z2 (fixnum->bignum by)))))
+
+;;----------------the actual power generation -------------------------------
+
+;; e should be an indication. it might be too small.
+(define (round-n-cut n e nb-bits)
+   (define max-container (- (bit-lshbx #z1 nb-bits) 1))
+   (define (round n)
+      (case *round*
+	 ((down) n)
+	 ((up)
+	  (+bx n
+	       ;; with the -1 it will only round up if the cut off part is
+	       ;; non-zero
+	       (-bx (bit-lshbx #z1
+			       (-fx (+fx e nb-bits) 1))
+		    #z1)))
+	 ((round)
+	  (+bx n
+	       (bit-lshbx #z1
+			  (-fx (+fx e nb-bits) 2))))))
+   (let* ((shift (-fx (+fx e nb-bits) 1))
+	  (cut (bit-rshbx (round n) shift))
+	  (exact? (=bx n (bit-lshbx cut shift))))
+      (if (<=bx cut max-container)
+	  (values cut e exact?)
+	  (round-n-cut n (+fx e 1) nb-bits))))
+
+(define (rounded-/bx x y)
+   (case *round*
+      ((down)  (/bx x y))
+      ((up)    (+bx (/bx x y) #z1))
+      ((round) (let ((tmp (/bx (*bx #z2 x) y)))
+		  (if (zerobx? (remainderbx tmp #z2))
+		      (/bx tmp #z2)
+		      (+bx (/bx tmp #z2) #z1))))))
+
+(define (generate-powers from to mantissa-size)
+   (let* ((nb-bits mantissa-size)
+	  (offset (- from))
+	  (nb-elements (+ (- from) to 1))
+	  (vec (make-vector nb-elements))
+	  (max-container (- (bit-lshbx #z1 nb-bits) 1)))
+      ;; the negative ones. 10^-1, 10^-2, etc.
+      ;; We already know, that we can't be exact, so exact? will always be #f.
+      ;; Basically we will have a ten^i that we will *10 at each iteration. We
+      ;; want to create the matissa of 1/ten^i. However the mantissa must be
+      ;; normalized (start with a 1). -> we have to shift the number.
+      ;; We shift by multiplying with two^e. -> We encode two^e*(1/ten^i) ==
+      ;;  two^e/ten^i.
+      (let loop ((i 1)
+		 (ten^i #z10)
+		 (two^e #z1)
+		 (e 0))
+	 (unless (< (- i) from)
+	    (if (>bx (/bx (*bx #z2 two^e) ten^i) max-container)
+		;; another shift would make the number too big. We are
+		;; hence normalized now.
+		(begin
+		   (vector-set! vec (-fx offset i)
+				(instantiate::Cached-Fast
+				   (v (rounded-/bx two^e ten^i))
+				   (e (negfx e))
+				   (exact? #f)))
+		   (loop (+fx i 1) (*bx ten^i #z10) two^e e))
+		(loop i ten^i (bit-lshbx two^e 1) (+fx e 1)))))
+      ;; the positive ones 10^0, 10^1, etc.
+      ;; start with 1.0. mantissa: 10...0 (1 followed by nb-bits-1 bits)
+      ;;      -> e = -(nb-bits-1)
+      ;; exact? is true when the container can still hold the complete 10^i
+      (let loop ((i 0)
+		 (n (bit-lshbx #z1 (-fx nb-bits 1)))
+		 (e (-fx 1 nb-bits)))
+	 (when (<= i to)
+	    (receive (cut e exact?)
+	       (round-n-cut n e nb-bits)
+	       (vector-set! vec (+fx i offset)
+			    (instantiate::Cached-Fast
+			       (v cut)
+			       (e e)
+			       (exact? exact?)))
+	       (loop (+fx i 1) (*bx n #z10) e))))
+      vec))
+
+(define (print-c powers from to struct-type
+		 cache-name max-distance-name offset-name macro64)
+   (define (display-power power k)
+      (with-access::Cached-Fast power (v e exact?)
+	 (let ((tmp-p (open-output-string)))
+	    ;; really hackish way of getting the digits
+	    (display (format "~x" v) tmp-p)
+	    (let ((str (close-output-port tmp-p)))
+	       (printf "  {~a(0x~a, ~a), ~a, ~a},\n"
+		       macro64
+		       (substring str 0 8)
+		       (substring str 8 16)
+		       e
+		       k)))))
+   (define (print-powers-reduced n)
+      (print "static const " struct-type " " cache-name
+	     "(" n ")"
+	     "[] = {")
+      (let loop ((i 0)
+		 (nb-elements 0)
+		 (last-e 0)
+		 (max-distance 0))
+	 (cond
+	    ((>= i (vector-length powers))
+	     (print "  };")
+	     (print "static const int " max-distance-name "(" n ") = "
+		 max-distance ";")
+	     (print "// nb elements (" n "): " nb-elements))
+	    (else
+	     (let* ((power (vector-ref powers i))
+		    (e (Cached-Fast-e power)))
+	     (display-power power (+ i from))
+	     (loop (+ i n)
+		   (+ nb-elements 1)
+		   e
+		   (cond
+		      ((=fx i 0) max-distance)
+		      ((> (- e last-e) max-distance) (- e last-e))
+		      (else max-distance))))))))
+   (print "// Copyright 2010 the V8 project authors. All rights reserved.")
+   (print "// ------------ GENERATED FILE ----------------")
+   (print "// command used:")
+   (print "// "
+	  (apply string-append (map (lambda (str)
+				       (string-append " " str))
+				    *main-args*))
+	  "  // NOLINT")
+   (print)
+   (print
+    "// This file is intended to be included inside another .h or .cc files\n"
+    "// with the following defines set:\n"
+    "//  GRISU_CACHE_STRUCT: should expand to the name of a struct that will\n"
+    "//   hold the cached powers of ten. Each entry will hold a 64-bit\n"
+    "//   significand, a 16-bit signed binary exponent, and a 16-bit\n"
+    "//   signed decimal exponent. Each entry will be constructed as follows:\n"
+    "//      { significand, binary_exponent, decimal_exponent }.\n"
+    "//  GRISU_CACHE_NAME(i): generates the name for the different caches.\n"
+    "//   The parameter i will be a number in the range 1-20. A cache will\n"
+    "//   hold every i'th element of a full cache. GRISU_CACHE_NAME(1) will\n"
+    "//   thus hold all elements. The higher i the fewer elements it has.\n"
+    "//   Ideally the user should only reference one cache and let the\n"
+    "//   compiler remove the unused ones.\n"
+    "//  GRISU_CACHE_MAX_DISTANCE(i): generates the name for the maximum\n"
+    "//   binary exponent distance between all elements of a given cache.\n"
+    "//  GRISU_CACHE_OFFSET: is used as variable name for the decimal\n"
+    "//   exponent offset. It is equal to -cache[0].decimal_exponent.\n"
+    "//  GRISU_UINT64_C: used to construct 64-bit values in a platform\n"
+    "//   independent way. In order to encode 0x123456789ABCDEF0 the macro\n"
+    "//   will be invoked as follows: GRISU_UINT64_C(0x12345678,9ABCDEF0).\n")
+   (print)
+   (print-powers-reduced 1)
+   (print-powers-reduced 2)
+   (print-powers-reduced 3)
+   (print-powers-reduced 4)
+   (print-powers-reduced 5)
+   (print-powers-reduced 6)
+   (print-powers-reduced 7)
+   (print-powers-reduced 8)
+   (print-powers-reduced 9)
+   (print-powers-reduced 10)
+   (print-powers-reduced 11)
+   (print-powers-reduced 12)
+   (print-powers-reduced 13)
+   (print-powers-reduced 14)
+   (print-powers-reduced 15)
+   (print-powers-reduced 16)
+   (print-powers-reduced 17)
+   (print-powers-reduced 18)
+   (print-powers-reduced 19)
+   (print-powers-reduced 20)
+   (print "static const int GRISU_CACHE_OFFSET = " (- from) ";"))
+
+;;----------------main --------------------------------------------------------
+(define *main-args* #f)
+(define *mantissa-size* #f)
+(define *dest* #f)
+(define *round* #f)
+(define *from* #f)
+(define *to* #f)
+
+(define (my-main args)
+   (set! *main-args* args)
+   (args-parse (cdr args)
+      (section "Help")
+      (("?") (args-parse-usage #f))
+      ((("-h" "--help") (help "?, -h, --help" "This help message"))
+       (args-parse-usage #f))
+      (section "Misc")
+      (("-o" ?file (help "The output file"))
+       (set! *dest* file))
+      (("--mantissa-size" ?size (help "Container-size in bits"))
+       (set! *mantissa-size* (string->number size)))
+      (("--round" ?direction (help "Round bignums (down, round or up)"))
+       (set! *round* (string->symbol direction)))
+      (("--from" ?from (help "start at 10^from"))
+       (set! *from* (string->number from)))
+      (("--to" ?to (help "go up to 10^to"))
+       (set! *to* (string->number to)))
+      (else
+       (print "Illegal argument `" else "'. Usage:")
+       (args-parse-usage #f)))
+   (when (not *from*)
+      (error "generate-ten-powers"
+	     "Missing from"
+	     #f))
+   (when (not *to*)
+      (error "generate-ten-powers"
+	     "Missing to"
+	     #f))
+   (when (not *mantissa-size*)
+      (error "generate-ten-powers"
+	     "Missing mantissa size"
+	     #f))
+   (when (not (memv *round* '(up down round)))
+      (error "generate-ten-powers"
+	     "Missing round-method"
+	     *round*))
+
+   (let ((dividers (generate-powers *from* *to* *mantissa-size*))
+	 (p (if (not *dest*)
+		(current-output-port)
+		(open-output-file *dest*))))
+      (unwind-protect
+	 (with-output-to-port p
+	    (lambda ()
+	       (print-c dividers *from* *to*
+			"GRISU_CACHE_STRUCT" "GRISU_CACHE_NAME"
+			"GRISU_CACHE_MAX_DISTANCE" "GRISU_CACHE_OFFSET"
+			"GRISU_UINT64_C"
+			)))
+	 (if *dest*
+	     (close-output-port p)))))
diff --git a/src/third_party/v8/tools/generate_shim_headers/generate_shim_headers.py b/src/third_party/v8/tools/generate_shim_headers/generate_shim_headers.py
new file mode 100755
index 0000000..d0e6d06
--- /dev/null
+++ b/src/third_party/v8/tools/generate_shim_headers/generate_shim_headers.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Generates shim headers that mirror the directory structure of bundled headers,
+but just forward to the system ones.
+
+This allows seamless compilation against system headers with no changes
+to our source code.
+"""
+
+
+import optparse
+import os.path
+import sys
+
+
+def GeneratorMain(argv):
+  parser = optparse.OptionParser()
+  parser.add_option('--headers-root', action='append')
+  parser.add_option('--define', action='append')
+  parser.add_option('--output-directory')
+  parser.add_option('--prefix', default='')
+  parser.add_option('--use-include-next', action='store_true')
+  parser.add_option('--outputs', action='store_true')
+  parser.add_option('--generate', action='store_true')
+
+  options, args = parser.parse_args(argv)
+
+  if not options.headers_root:
+    parser.error('Missing --headers-root parameter.')
+  if not options.output_directory:
+    parser.error('Missing --output-directory parameter.')
+  if not args:
+    parser.error('Missing arguments - header file names.')
+
+  source_tree_root = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..', '..'))
+
+  for root in options.headers_root:
+    target_directory = os.path.join(
+      options.output_directory,
+      os.path.relpath(root, source_tree_root))
+    if options.generate and not os.path.exists(target_directory):
+      os.makedirs(target_directory)
+
+    for header_spec in args:
+      if ';' in header_spec:
+        (header_filename,
+         include_before,
+         include_after) = header_spec.split(';', 2)
+      else:
+        header_filename = header_spec
+        include_before = ''
+        include_after = ''
+      if options.outputs:
+        yield os.path.join(target_directory, header_filename)
+      if options.generate:
+        with open(os.path.join(target_directory, header_filename), 'w') as f:
+          if options.define:
+            for define in options.define:
+              key, value = define.split('=', 1)
+              # This non-standard push_macro extension is supported
+              # by compilers we support (GCC, clang).
+              f.write('#pragma push_macro("%s")\n' % key)
+              f.write('#undef %s\n' % key)
+              f.write('#define %s %s\n' % (key, value))
+
+          if include_before:
+            for header in include_before.split(':'):
+              f.write('#include %s\n' % header)
+
+          include_target = options.prefix + header_filename
+          if options.use_include_next:
+            f.write('#include_next <%s>\n' % include_target)
+          else:
+            f.write('#include <%s>\n' % include_target)
+
+          if include_after:
+            for header in include_after.split(':'):
+              f.write('#include %s\n' % header)
+
+          if options.define:
+            for define in options.define:
+              key, value = define.split('=', 1)
+              # This non-standard pop_macro extension is supported
+              # by compilers we support (GCC, clang).
+              f.write('#pragma pop_macro("%s")\n' % key)
+
+
+def DoMain(argv):
+  return '\n'.join(GeneratorMain(argv))
+
+
+if __name__ == '__main__':
+  DoMain(sys.argv[1:])
diff --git a/src/third_party/v8/tools/get_landmines.py b/src/third_party/v8/tools/get_landmines.py
new file mode 100755
index 0000000..bf8efa5
--- /dev/null
+++ b/src/third_party/v8/tools/get_landmines.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This file emits the list of reasons why a particular build needs to be clobbered
+(or a list of 'landmines').
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath(
+  os.path.join(os.path.dirname(__file__), '..', 'build')))
+
+import get_landmines as build_get_landmines
+
+
+def print_landmines():  # pylint: disable=invalid-name
+  """
+  ALL LANDMINES ARE EMITTED FROM HERE.
+  """
+  # DO NOT add landmines as part of a regular CL. Landmines are a last-effort
+  # bandaid fix if a CL that got landed has a build dependency bug and all bots
+  # need to be cleaned up. If you're writing a new CL that causes build
+  # dependency problems, fix the dependency problems instead of adding a
+  # landmine.
+  # See the Chromium version in src/build/get_landmines.py for usage examples.
+  print('Need to clobber after ICU52 roll.')
+  print('Landmines test.')
+  print('Activating MSVS 2013.')
+  print('Revert activation of MSVS 2013.')
+  print('Activating MSVS 2013 again.')
+  print('Clobber after ICU roll.')
+  print('Moar clobbering...')
+  print('Remove build/android.gypi')
+  print('Cleanup after windows ninja switch attempt.')
+  print('Switching to pinned msvs toolchain.')
+  print('Clobbering to hopefully resolve problem with mksnapshot')
+  print('Clobber after ICU roll.')
+  print('Clobber after Android NDK update.')
+  print('Clober to fix windows build problems.')
+  print('Clober again to fix windows build problems.')
+  print('Clobber to possibly resolve failure on win-32 bot.')
+  print('Clobber for http://crbug.com/668958.')
+  print('Clobber to possibly resolve build failure on Misc V8 Linux gcc.')
+  build_get_landmines.print_landmines()
+  return 0
+
+
+def main():
+  print_landmines()
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/src/third_party/v8/tools/grokdump.py b/src/third_party/v8/tools/grokdump.py
new file mode 100755
index 0000000..1edfd35
--- /dev/null
+++ b/src/third_party/v8/tools/grokdump.py
@@ -0,0 +1,3960 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# flake8: noqa  # https://bugs.chromium.org/p/v8/issues/detail?id=8784
+
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import BaseHTTPServer
+import bisect
+import cgi
+import cmd
+import codecs
+import ctypes
+import datetime
+import disasm
+import inspect
+import mmap
+import optparse
+import os
+import re
+import StringIO
+import sys
+import types
+import urllib
+import urlparse
+import v8heapconst
+import webbrowser
+
+PORT_NUMBER = 8081
+
+
+USAGE="""usage: %prog [OPTIONS] [DUMP-FILE]
+
+Minidump analyzer.
+
+Shows the processor state at the point of exception including the
+stack of the active thread and the referenced objects in the V8
+heap. Code objects are disassembled and the addresses linked from the
+stack (e.g. pushed return addresses) are marked with "=>".
+
+Examples:
+  $ %prog 12345678-1234-1234-1234-123456789abcd-full.dmp"""
+
+
+DEBUG=False
+
+
+def DebugPrint(s):
+  if not DEBUG: return
+  print(s)
+
+
+class Descriptor(object):
+  """Descriptor of a structure in a memory."""
+
+  def __init__(self, fields):
+    self.fields = fields
+    self.is_flexible = False
+    for _, type_or_func in fields:
+      if isinstance(type_or_func, types.FunctionType):
+        self.is_flexible = True
+        break
+    if not self.is_flexible:
+      self.ctype = Descriptor._GetCtype(fields)
+      self.size = ctypes.sizeof(self.ctype)
+
+  def Read(self, memory, offset):
+    if self.is_flexible:
+      fields_copy = self.fields[:]
+      last = 0
+      for name, type_or_func in fields_copy:
+        if isinstance(type_or_func, types.FunctionType):
+          partial_ctype = Descriptor._GetCtype(fields_copy[:last])
+          partial_object = partial_ctype.from_buffer(memory, offset)
+          type = type_or_func(partial_object)
+          if type is not None:
+            fields_copy[last] = (name, type)
+            last += 1
+        else:
+          last += 1
+      complete_ctype = Descriptor._GetCtype(fields_copy[:last])
+    else:
+      complete_ctype = self.ctype
+    return complete_ctype.from_buffer(memory, offset)
+
+  @staticmethod
+  def _GetCtype(fields):
+    class Raw(ctypes.Structure):
+      _fields_ = fields
+      _pack_ = 1
+
+      def __str__(self):
+        return "{" + ", ".join("%s: %s" % (field, self.__getattribute__(field))
+                               for field, _ in Raw._fields_) + "}"
+    return Raw
+
+
+def FullDump(reader, heap):
+  """Dump all available memory regions."""
+  def dump_region(reader, start, size, location):
+    print()
+    while start & 3 != 0:
+      start += 1
+      size -= 1
+      location += 1
+    is_executable = reader.IsProbableExecutableRegion(location, size)
+    is_ascii = reader.IsProbableASCIIRegion(location, size)
+
+    if is_executable is not False:
+      lines = reader.GetDisasmLines(start, size)
+      for line in lines:
+        print(FormatDisasmLine(start, heap, line))
+      print()
+
+    if is_ascii is not False:
+      # Output in the same format as the Unix hd command
+      addr = start
+      for i in range(0, size, 16):
+        slot = i + location
+        hex_line = ""
+        asc_line = ""
+        for i in range(16):
+          if slot + i < location + size:
+            byte = ctypes.c_uint8.from_buffer(reader.minidump, slot + i).value
+            if byte >= 0x20 and byte < 0x7f:
+              asc_line += chr(byte)
+            else:
+              asc_line += "."
+            hex_line += " %02x" % (byte)
+          else:
+            hex_line += "   "
+          if i == 7:
+            hex_line += " "
+        print("%s  %s |%s|" % (reader.FormatIntPtr(addr),
+                               hex_line,
+                               asc_line))
+        addr += 16
+
+    if is_executable is not True and is_ascii is not True:
+      print("%s - %s" % (reader.FormatIntPtr(start),
+                         reader.FormatIntPtr(start + size)))
+      print(start + size + 1);
+      for i in range(0, size, reader.PointerSize()):
+        slot = start + i
+        maybe_address = reader.ReadUIntPtr(slot)
+        heap_object = heap.FindObject(maybe_address)
+        print("%s: %s" % (reader.FormatIntPtr(slot),
+                          reader.FormatIntPtr(maybe_address)))
+        if heap_object:
+          heap_object.Print(Printer())
+          print()
+
+  reader.ForEachMemoryRegion(dump_region)
+
+# Heap constants generated by 'make grokdump' in v8heapconst module.
+INSTANCE_TYPES = v8heapconst.INSTANCE_TYPES
+KNOWN_MAPS = v8heapconst.KNOWN_MAPS
+KNOWN_OBJECTS = v8heapconst.KNOWN_OBJECTS
+FRAME_MARKERS = v8heapconst.FRAME_MARKERS
+
+# Markers pushed on the stack by PushStackTraceAndDie
+MAGIC_MARKER_PAIRS = (
+    (0xbbbbbbbb, 0xbbbbbbbb),
+    (0xfefefefe, 0xfefefeff),
+)
+# See StackTraceFailureMessage in isolate.h
+STACK_TRACE_MARKER = 0xdecade30
+# See FailureMessage in logging.cc
+ERROR_MESSAGE_MARKER = 0xdecade10
+
+# Set of structures and constants that describe the layout of minidump
+# files. Based on MSDN and Google Breakpad.
+
+MINIDUMP_HEADER = Descriptor([
+  ("signature", ctypes.c_uint32),
+  ("version", ctypes.c_uint32),
+  ("stream_count", ctypes.c_uint32),
+  ("stream_directories_rva", ctypes.c_uint32),
+  ("checksum", ctypes.c_uint32),
+  ("time_date_stampt", ctypes.c_uint32),
+  ("flags", ctypes.c_uint64)
+])
+
+MINIDUMP_LOCATION_DESCRIPTOR = Descriptor([
+  ("data_size", ctypes.c_uint32),
+  ("rva", ctypes.c_uint32)
+])
+
+MINIDUMP_STRING = Descriptor([
+  ("length", ctypes.c_uint32),
+  ("buffer", lambda t: ctypes.c_uint8 * (t.length + 2))
+])
+
+MINIDUMP_DIRECTORY = Descriptor([
+  ("stream_type", ctypes.c_uint32),
+  ("location", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
+])
+
+MD_EXCEPTION_MAXIMUM_PARAMETERS = 15
+
+MINIDUMP_EXCEPTION = Descriptor([
+  ("code", ctypes.c_uint32),
+  ("flags", ctypes.c_uint32),
+  ("record", ctypes.c_uint64),
+  ("address", ctypes.c_uint64),
+  ("parameter_count", ctypes.c_uint32),
+  ("unused_alignment", ctypes.c_uint32),
+  ("information", ctypes.c_uint64 * MD_EXCEPTION_MAXIMUM_PARAMETERS)
+])
+
+MINIDUMP_EXCEPTION_STREAM = Descriptor([
+  ("thread_id", ctypes.c_uint32),
+  ("unused_alignment", ctypes.c_uint32),
+  ("exception", MINIDUMP_EXCEPTION.ctype),
+  ("thread_context", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
+])
+
+# Stream types.
+MD_UNUSED_STREAM = 0
+MD_RESERVED_STREAM_0 = 1
+MD_RESERVED_STREAM_1 = 2
+MD_THREAD_LIST_STREAM = 3
+MD_MODULE_LIST_STREAM = 4
+MD_MEMORY_LIST_STREAM = 5
+MD_EXCEPTION_STREAM = 6
+MD_SYSTEM_INFO_STREAM = 7
+MD_THREAD_EX_LIST_STREAM = 8
+MD_MEMORY_64_LIST_STREAM = 9
+MD_COMMENT_STREAM_A = 10
+MD_COMMENT_STREAM_W = 11
+MD_HANDLE_DATA_STREAM = 12
+MD_FUNCTION_TABLE_STREAM = 13
+MD_UNLOADED_MODULE_LIST_STREAM = 14
+MD_MISC_INFO_STREAM = 15
+MD_MEMORY_INFO_LIST_STREAM = 16
+MD_THREAD_INFO_LIST_STREAM = 17
+MD_HANDLE_OPERATION_LIST_STREAM = 18
+
+MD_FLOATINGSAVEAREA_X86_REGISTERAREA_SIZE = 80
+
+MINIDUMP_FLOATING_SAVE_AREA_X86 = Descriptor([
+  ("control_word", ctypes.c_uint32),
+  ("status_word", ctypes.c_uint32),
+  ("tag_word", ctypes.c_uint32),
+  ("error_offset", ctypes.c_uint32),
+  ("error_selector", ctypes.c_uint32),
+  ("data_offset", ctypes.c_uint32),
+  ("data_selector", ctypes.c_uint32),
+  ("register_area", ctypes.c_uint8 * MD_FLOATINGSAVEAREA_X86_REGISTERAREA_SIZE),
+  ("cr0_npx_state", ctypes.c_uint32)
+])
+
+MD_CONTEXT_X86_EXTENDED_REGISTERS_SIZE = 512
+
+# Context flags.
+MD_CONTEXT_X86 = 0x00010000
+MD_CONTEXT_X86_CONTROL = (MD_CONTEXT_X86 | 0x00000001)
+MD_CONTEXT_X86_INTEGER = (MD_CONTEXT_X86 | 0x00000002)
+MD_CONTEXT_X86_SEGMENTS = (MD_CONTEXT_X86 | 0x00000004)
+MD_CONTEXT_X86_FLOATING_POINT = (MD_CONTEXT_X86 | 0x00000008)
+MD_CONTEXT_X86_DEBUG_REGISTERS = (MD_CONTEXT_X86 | 0x00000010)
+MD_CONTEXT_X86_EXTENDED_REGISTERS = (MD_CONTEXT_X86 | 0x00000020)
+
+def EnableOnFlag(type, flag):
+  return lambda o: [None, type][int((o.context_flags & flag) != 0)]
+
+MINIDUMP_CONTEXT_X86 = Descriptor([
+  ("context_flags", ctypes.c_uint32),
+  # MD_CONTEXT_X86_DEBUG_REGISTERS.
+  ("dr0", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
+  ("dr1", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
+  ("dr2", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
+  ("dr3", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
+  ("dr6", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
+  ("dr7", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_DEBUG_REGISTERS)),
+  # MD_CONTEXT_X86_FLOATING_POINT.
+  ("float_save", EnableOnFlag(MINIDUMP_FLOATING_SAVE_AREA_X86.ctype,
+                              MD_CONTEXT_X86_FLOATING_POINT)),
+  # MD_CONTEXT_X86_SEGMENTS.
+  ("gs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
+  ("fs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
+  ("es", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
+  ("ds", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_SEGMENTS)),
+  # MD_CONTEXT_X86_INTEGER.
+  ("edi", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
+  ("esi", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
+  ("ebx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
+  ("edx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
+  ("ecx", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
+  ("eax", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_INTEGER)),
+  # MD_CONTEXT_X86_CONTROL.
+  ("ebp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
+  ("eip", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
+  ("cs", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
+  ("eflags", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
+  ("esp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
+  ("ss", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_X86_CONTROL)),
+  # MD_CONTEXT_X86_EXTENDED_REGISTERS.
+  ("extended_registers",
+   EnableOnFlag(ctypes.c_uint8 * MD_CONTEXT_X86_EXTENDED_REGISTERS_SIZE,
+                MD_CONTEXT_X86_EXTENDED_REGISTERS))
+])
+
+MD_CONTEXT_ARM = 0x40000000
+MD_CONTEXT_ARM_INTEGER = (MD_CONTEXT_ARM | 0x00000002)
+MD_CONTEXT_ARM_FLOATING_POINT = (MD_CONTEXT_ARM | 0x00000004)
+MD_FLOATINGSAVEAREA_ARM_FPR_COUNT = 32
+MD_FLOATINGSAVEAREA_ARM_FPEXTRA_COUNT = 8
+
+MINIDUMP_FLOATING_SAVE_AREA_ARM = Descriptor([
+  ("fpscr", ctypes.c_uint64),
+  ("regs", ctypes.c_uint64 * MD_FLOATINGSAVEAREA_ARM_FPR_COUNT),
+  ("extra", ctypes.c_uint64 * MD_FLOATINGSAVEAREA_ARM_FPEXTRA_COUNT)
+])
+
+MINIDUMP_CONTEXT_ARM = Descriptor([
+  ("context_flags", ctypes.c_uint32),
+  # MD_CONTEXT_ARM_INTEGER.
+  ("r0", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r1", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r2", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r3", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r4", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r5", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r6", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r7", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r8", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r9", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r10", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r11", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("r12", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("sp", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("lr", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("pc", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_ARM_INTEGER)),
+  ("cpsr", ctypes.c_uint32),
+  ("float_save", EnableOnFlag(MINIDUMP_FLOATING_SAVE_AREA_ARM.ctype,
+                              MD_CONTEXT_ARM_FLOATING_POINT))
+])
+
+
+MD_CONTEXT_ARM64 =  0x80000000
+MD_CONTEXT_ARM64_INTEGER = (MD_CONTEXT_ARM64 | 0x00000002)
+MD_CONTEXT_ARM64_FLOATING_POINT = (MD_CONTEXT_ARM64 | 0x00000004)
+MD_FLOATINGSAVEAREA_ARM64_FPR_COUNT = 64
+
+MINIDUMP_FLOATING_SAVE_AREA_ARM = Descriptor([
+  ("fpscr", ctypes.c_uint64),
+  ("regs", ctypes.c_uint64 * MD_FLOATINGSAVEAREA_ARM64_FPR_COUNT),
+])
+
+MINIDUMP_CONTEXT_ARM64 = Descriptor([
+  ("context_flags", ctypes.c_uint64),
+  # MD_CONTEXT_ARM64_INTEGER.
+  ("r0", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r1", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r2", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r3", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r4", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r5", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r6", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r7", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r8", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r9", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r10", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r11", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r12", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r13", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r14", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r15", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r16", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r17", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r18", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r19", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r20", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r21", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r22", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r23", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r24", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r25", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r26", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r27", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("r28", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("fp", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("lr", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("sp", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("pc", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_ARM64_INTEGER)),
+  ("cpsr", ctypes.c_uint32),
+  ("float_save", EnableOnFlag(MINIDUMP_FLOATING_SAVE_AREA_ARM.ctype,
+                              MD_CONTEXT_ARM64_FLOATING_POINT))
+])
+
+
+MD_CONTEXT_AMD64 = 0x00100000
+MD_CONTEXT_AMD64_CONTROL = (MD_CONTEXT_AMD64 | 0x00000001)
+MD_CONTEXT_AMD64_INTEGER = (MD_CONTEXT_AMD64 | 0x00000002)
+MD_CONTEXT_AMD64_SEGMENTS = (MD_CONTEXT_AMD64 | 0x00000004)
+MD_CONTEXT_AMD64_FLOATING_POINT = (MD_CONTEXT_AMD64 | 0x00000008)
+MD_CONTEXT_AMD64_DEBUG_REGISTERS = (MD_CONTEXT_AMD64 | 0x00000010)
+
+MINIDUMP_CONTEXT_AMD64 = Descriptor([
+  ("p1_home", ctypes.c_uint64),
+  ("p2_home", ctypes.c_uint64),
+  ("p3_home", ctypes.c_uint64),
+  ("p4_home", ctypes.c_uint64),
+  ("p5_home", ctypes.c_uint64),
+  ("p6_home", ctypes.c_uint64),
+  ("context_flags", ctypes.c_uint32),
+  ("mx_csr", ctypes.c_uint32),
+  # MD_CONTEXT_AMD64_CONTROL.
+  ("cs", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_CONTROL)),
+  # MD_CONTEXT_AMD64_SEGMENTS
+  ("ds", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
+  ("es", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
+  ("fs", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
+  ("gs", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_SEGMENTS)),
+  # MD_CONTEXT_AMD64_CONTROL.
+  ("ss", EnableOnFlag(ctypes.c_uint16, MD_CONTEXT_AMD64_CONTROL)),
+  ("eflags", EnableOnFlag(ctypes.c_uint32, MD_CONTEXT_AMD64_CONTROL)),
+  # MD_CONTEXT_AMD64_DEBUG_REGISTERS.
+  ("dr0", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("dr1", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("dr2", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("dr3", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("dr6", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("dr7", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  # MD_CONTEXT_AMD64_INTEGER.
+  ("rax", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("rcx", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("rdx", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("rbx", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  # MD_CONTEXT_AMD64_CONTROL.
+  ("rsp", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_CONTROL)),
+  # MD_CONTEXT_AMD64_INTEGER.
+  ("rbp", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("rsi", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("rdi", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r8", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r9", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r10", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r11", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r12", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r13", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r14", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  ("r15", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_INTEGER)),
+  # MD_CONTEXT_AMD64_CONTROL.
+  ("rip", EnableOnFlag(ctypes.c_uint64, MD_CONTEXT_AMD64_CONTROL)),
+  # MD_CONTEXT_AMD64_FLOATING_POINT
+  ("sse_registers", EnableOnFlag(ctypes.c_uint8 * (16 * 26),
+                                 MD_CONTEXT_AMD64_FLOATING_POINT)),
+  ("vector_registers", EnableOnFlag(ctypes.c_uint8 * (16 * 26),
+                                    MD_CONTEXT_AMD64_FLOATING_POINT)),
+  ("vector_control", EnableOnFlag(ctypes.c_uint64,
+                                  MD_CONTEXT_AMD64_FLOATING_POINT)),
+  # MD_CONTEXT_AMD64_DEBUG_REGISTERS.
+  ("debug_control", EnableOnFlag(ctypes.c_uint64,
+                                 MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("last_branch_to_rip", EnableOnFlag(ctypes.c_uint64,
+                                      MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("last_branch_from_rip", EnableOnFlag(ctypes.c_uint64,
+                                        MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("last_exception_to_rip", EnableOnFlag(ctypes.c_uint64,
+                                         MD_CONTEXT_AMD64_DEBUG_REGISTERS)),
+  ("last_exception_from_rip", EnableOnFlag(ctypes.c_uint64,
+                                           MD_CONTEXT_AMD64_DEBUG_REGISTERS))
+])
+
+MINIDUMP_MEMORY_DESCRIPTOR = Descriptor([
+  ("start", ctypes.c_uint64),
+  ("memory", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
+])
+
+MINIDUMP_MEMORY_DESCRIPTOR64 = Descriptor([
+  ("start", ctypes.c_uint64),
+  ("size", ctypes.c_uint64)
+])
+
+MINIDUMP_MEMORY_LIST = Descriptor([
+  ("range_count", ctypes.c_uint32),
+  ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count)
+])
+
+MINIDUMP_MEMORY_LIST_Mac = Descriptor([
+  ("range_count", ctypes.c_uint32),
+  ("junk", ctypes.c_uint32),
+  ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR.ctype * m.range_count)
+])
+
+MINIDUMP_MEMORY_LIST64 = Descriptor([
+  ("range_count", ctypes.c_uint64),
+  ("base_rva", ctypes.c_uint64),
+  ("ranges", lambda m: MINIDUMP_MEMORY_DESCRIPTOR64.ctype * m.range_count)
+])
+
+MINIDUMP_THREAD = Descriptor([
+  ("id", ctypes.c_uint32),
+  ("suspend_count", ctypes.c_uint32),
+  ("priority_class", ctypes.c_uint32),
+  ("priority", ctypes.c_uint32),
+  ("ted", ctypes.c_uint64),
+  ("stack", MINIDUMP_MEMORY_DESCRIPTOR.ctype),
+  ("context", MINIDUMP_LOCATION_DESCRIPTOR.ctype)
+])
+
+MINIDUMP_THREAD_LIST = Descriptor([
+  ("thread_count", ctypes.c_uint32),
+  ("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count)
+])
+
+MINIDUMP_THREAD_LIST_Mac = Descriptor([
+  ("thread_count", ctypes.c_uint32),
+  ("junk", ctypes.c_uint32),
+  ("threads", lambda t: MINIDUMP_THREAD.ctype * t.thread_count)
+])
+
+MINIDUMP_VS_FIXEDFILEINFO = Descriptor([
+  ("dwSignature", ctypes.c_uint32),
+  ("dwStrucVersion", ctypes.c_uint32),
+  ("dwFileVersionMS", ctypes.c_uint32),
+  ("dwFileVersionLS", ctypes.c_uint32),
+  ("dwProductVersionMS", ctypes.c_uint32),
+  ("dwProductVersionLS", ctypes.c_uint32),
+  ("dwFileFlagsMask", ctypes.c_uint32),
+  ("dwFileFlags", ctypes.c_uint32),
+  ("dwFileOS", ctypes.c_uint32),
+  ("dwFileType", ctypes.c_uint32),
+  ("dwFileSubtype", ctypes.c_uint32),
+  ("dwFileDateMS", ctypes.c_uint32),
+  ("dwFileDateLS", ctypes.c_uint32)
+])
+
+MINIDUMP_RAW_MODULE = Descriptor([
+  ("base_of_image", ctypes.c_uint64),
+  ("size_of_image", ctypes.c_uint32),
+  ("checksum", ctypes.c_uint32),
+  ("time_date_stamp", ctypes.c_uint32),
+  ("module_name_rva", ctypes.c_uint32),
+  ("version_info", MINIDUMP_VS_FIXEDFILEINFO.ctype),
+  ("cv_record", MINIDUMP_LOCATION_DESCRIPTOR.ctype),
+  ("misc_record", MINIDUMP_LOCATION_DESCRIPTOR.ctype),
+  ("reserved0", ctypes.c_uint32 * 2),
+  ("reserved1", ctypes.c_uint32 * 2)
+])
+
+MINIDUMP_MODULE_LIST = Descriptor([
+  ("number_of_modules", ctypes.c_uint32),
+  ("modules", lambda t: MINIDUMP_RAW_MODULE.ctype * t.number_of_modules)
+])
+
+MINIDUMP_MODULE_LIST_Mac = Descriptor([
+  ("number_of_modules", ctypes.c_uint32),
+  ("junk", ctypes.c_uint32),
+  ("modules", lambda t: MINIDUMP_RAW_MODULE.ctype * t.number_of_modules)
+])
+
+MINIDUMP_RAW_SYSTEM_INFO = Descriptor([
+  ("processor_architecture", ctypes.c_uint16)
+])
+
+MD_CPU_ARCHITECTURE_X86 = 0
+MD_CPU_ARCHITECTURE_ARM = 5
+# Breakpad used a custom value of 0x8003 here; Crashpad uses the new
+# standardized value 12.
+MD_CPU_ARCHITECTURE_ARM64 = 12
+MD_CPU_ARCHITECTURE_ARM64_BREAKPAD_LEGACY = 0x8003
+MD_CPU_ARCHITECTURE_AMD64 = 9
+
+OBJDUMP_BIN = None
+DEFAULT_OBJDUMP_BIN = '/usr/bin/objdump'
+
+class FuncSymbol:
+  def __init__(self, start, size, name):
+    self.start = start
+    self.end = self.start + size
+    self.name = name
+
+  def __cmp__(self, other):
+    if isinstance(other, FuncSymbol):
+      return self.start - other.start
+    return self.start - other
+
+  def Covers(self, addr):
+    return (self.start <= addr) and (addr < self.end)
+
+class MinidumpReader(object):
+  """Minidump (.dmp) reader."""
+
+  _HEADER_MAGIC = 0x504d444d
+
+  def __init__(self, options, minidump_name):
+    self.minidump_name = minidump_name
+    if sys.platform == 'win32':
+      self.minidump_file = open(minidump_name, "a+")
+      self.minidump = mmap.mmap(self.minidump_file.fileno(), 0)
+    else:
+      self.minidump_file = open(minidump_name, "r")
+      self.minidump = mmap.mmap(self.minidump_file.fileno(), 0, mmap.MAP_PRIVATE)
+    self.header = MINIDUMP_HEADER.Read(self.minidump, 0)
+    if self.header.signature != MinidumpReader._HEADER_MAGIC:
+      print("Warning: Unsupported minidump header magic!", file=sys.stderr)
+    DebugPrint(self.header)
+    directories = []
+    offset = self.header.stream_directories_rva
+    for _ in range(self.header.stream_count):
+      directories.append(MINIDUMP_DIRECTORY.Read(self.minidump, offset))
+      offset += MINIDUMP_DIRECTORY.size
+    self.arch = None
+    self.exception = None
+    self.exception_context = None
+    self.memory_list = None
+    self.memory_list64 = None
+    self.module_list = None
+    self.thread_map = {}
+
+    self.symdir = options.symdir
+    self.modules_with_symbols = []
+    self.symbols = []
+
+    self._ReadArchitecture(directories)
+    self._ReadDirectories(directories)
+    self._FindObjdump(options)
+
+  def _ReadArchitecture(self, directories):
+    # Find MDRawSystemInfo stream and determine arch.
+    for d in directories:
+      if d.stream_type == MD_SYSTEM_INFO_STREAM:
+        system_info = MINIDUMP_RAW_SYSTEM_INFO.Read(
+            self.minidump, d.location.rva)
+        self.arch = system_info.processor_architecture
+        if self.arch == MD_CPU_ARCHITECTURE_ARM64_BREAKPAD_LEGACY:
+          self.arch = MD_CPU_ARCHITECTURE_ARM64
+        assert self.arch in [MD_CPU_ARCHITECTURE_AMD64,
+                             MD_CPU_ARCHITECTURE_ARM,
+                             MD_CPU_ARCHITECTURE_ARM64,
+                             MD_CPU_ARCHITECTURE_X86]
+    assert not self.arch is None
+
+  def _ReadDirectories(self, directories):
+    for d in directories:
+      DebugPrint(d)
+      if d.stream_type == MD_EXCEPTION_STREAM:
+        self.exception = MINIDUMP_EXCEPTION_STREAM.Read(
+          self.minidump, d.location.rva)
+        DebugPrint(self.exception)
+        self.exception_context = self.ContextDescriptor().Read(
+            self.minidump, self.exception.thread_context.rva)
+        DebugPrint(self.exception_context)
+      elif d.stream_type == MD_THREAD_LIST_STREAM:
+        thread_list = MINIDUMP_THREAD_LIST.Read(self.minidump, d.location.rva)
+        if ctypes.sizeof(thread_list) + 4 == d.location.data_size:
+          thread_list = MINIDUMP_THREAD_LIST_Mac.Read(
+              self.minidump, d.location.rva)
+        assert ctypes.sizeof(thread_list) == d.location.data_size
+        DebugPrint(thread_list)
+        for thread in thread_list.threads:
+          DebugPrint(thread)
+          self.thread_map[thread.id] = thread
+      elif d.stream_type == MD_MODULE_LIST_STREAM:
+        assert self.module_list is None
+        self.module_list = MINIDUMP_MODULE_LIST.Read(
+          self.minidump, d.location.rva)
+        if ctypes.sizeof(self.module_list) + 4 == d.location.data_size:
+          self.module_list = MINIDUMP_MODULE_LIST_Mac.Read(
+              self.minidump, d.location.rva)
+        assert ctypes.sizeof(self.module_list) == d.location.data_size
+        DebugPrint(self.module_list)
+      elif d.stream_type == MD_MEMORY_LIST_STREAM:
+        print("Warning: This is not a full minidump!", file=sys.stderr)
+        assert self.memory_list is None
+        self.memory_list = MINIDUMP_MEMORY_LIST.Read(
+          self.minidump, d.location.rva)
+        if ctypes.sizeof(self.memory_list) + 4 == d.location.data_size:
+          self.memory_list = MINIDUMP_MEMORY_LIST_Mac.Read(
+              self.minidump, d.location.rva)
+        assert ctypes.sizeof(self.memory_list) == d.location.data_size
+        DebugPrint(self.memory_list)
+      elif d.stream_type == MD_MEMORY_64_LIST_STREAM:
+        assert self.memory_list64 is None
+        self.memory_list64 = MINIDUMP_MEMORY_LIST64.Read(
+          self.minidump, d.location.rva)
+        assert ctypes.sizeof(self.memory_list64) == d.location.data_size
+        DebugPrint(self.memory_list64)
+
+  def _FindObjdump(self, options):
+    if options.objdump:
+        objdump_bin = options.objdump
+    else:
+      objdump_bin = self._FindThirdPartyObjdump()
+    if not objdump_bin or not os.path.exists(objdump_bin):
+      print("# Cannot find '%s', falling back to default objdump '%s'" % (
+          objdump_bin, DEFAULT_OBJDUMP_BIN))
+      objdump_bin  = DEFAULT_OBJDUMP_BIN
+    global OBJDUMP_BIN
+    OBJDUMP_BIN = objdump_bin
+    disasm.OBJDUMP_BIN = objdump_bin
+
+  def _FindThirdPartyObjdump(self):
+      # Try to find the platform specific objdump
+      third_party_dir = os.path.join(
+          os.path.dirname(os.path.dirname(__file__)), 'third_party')
+      objdumps = []
+      for root, dirs, files in os.walk(third_party_dir):
+        for file in files:
+          if file.endswith("objdump"):
+            objdumps.append(os.path.join(root, file))
+      if self.arch == MD_CPU_ARCHITECTURE_ARM:
+        platform_filter = 'arm-linux'
+      elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+        platform_filter = 'aarch64'
+      else:
+        # use default otherwise
+        return None
+      print(("# Looking for platform specific (%s) objdump in "
+             "third_party directory.") % platform_filter)
+      objdumps = filter(lambda file: platform_filter in file >= 0, objdumps)
+      if len(objdumps) == 0:
+        print("# Could not find platform specific objdump in third_party.")
+        print("# Make sure you installed the correct SDK.")
+        return None
+      return objdumps[0]
+
+  def ContextDescriptor(self):
+    if self.arch == MD_CPU_ARCHITECTURE_X86:
+      return MINIDUMP_CONTEXT_X86
+    elif self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return MINIDUMP_CONTEXT_AMD64
+    elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+      return MINIDUMP_CONTEXT_ARM
+    elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+      return MINIDUMP_CONTEXT_ARM64
+    else:
+      return None
+
+  def IsValidAlignedAddress(self, address):
+    return self.IsAlignedAddress(address) and self.IsValidAddress(address)
+
+  def IsValidAddress(self, address):
+    return self.FindLocation(address) is not None
+
+  def IsAlignedAddress(self, address):
+    return (address % self.PointerSize()) == 0
+
+  def IsExceptionStackAddress(self, address):
+    if not self.IsAlignedAddress(address): return False
+    return self.IsAnyExceptionStackAddress(address)
+
+  def IsAnyExceptionStackAddress(self, address):
+    return self.StackTop() <= address <= self.StackBottom()
+
+  def IsValidExceptionStackAddress(self, address):
+    if not self.IsValidAddress(address): return False
+    return self.IsExceptionStackAddress(address)
+
+  def IsModuleAddress(self, address):
+    return self.GetModuleForAddress(address) != None
+
+  def GetModuleForAddress(self, address):
+    for module in self.module_list.modules:
+      start = module.base_of_image
+      end = start + module.size_of_image
+      if start <= address < end: return module
+    return None
+
+  def ReadU8(self, address):
+    location = self.FindLocation(address)
+    return ctypes.c_uint8.from_buffer(self.minidump, location).value
+
+  def ReadU32(self, address):
+    location = self.FindLocation(address)
+    return ctypes.c_uint32.from_buffer(self.minidump, location).value
+
+  def ReadU64(self, address):
+    location = self.FindLocation(address)
+    return ctypes.c_uint64.from_buffer(self.minidump, location).value
+
+  def Is64(self):
+    return (self.arch == MD_CPU_ARCHITECTURE_ARM64 or
+            self.arch == MD_CPU_ARCHITECTURE_AMD64)
+
+  def ReadUIntPtr(self, address):
+    if self.Is64():
+      return self.ReadU64(address)
+    return self.ReadU32(address)
+
+  def ReadBytes(self, address, size):
+    location = self.FindLocation(address)
+    return self.minidump[location:location + size]
+
+  def _ReadWord(self, location):
+    if self.Is64():
+      return ctypes.c_uint64.from_buffer(self.minidump, location).value
+    return ctypes.c_uint32.from_buffer(self.minidump, location).value
+
+  def ReadAsciiPtr(self, address):
+    ascii_content = [c if c >= '\x20' and c <  '\x7f' else '.'
+                       for c in self.ReadBytes(address, self.PointerSize())]
+    return ''.join(ascii_content)
+
+  def ReadAsciiString(self, address):
+    string = ""
+    while self.IsValidAddress(address):
+      code = self.ReadU8(address)
+      if 0 < code < 128:
+        string += chr(code)
+      else:
+        break
+      address += 1
+    return string
+
+  def IsProbableASCIIRegion(self, location, length):
+    ascii_bytes = 0
+    non_ascii_bytes = 0
+    for i in range(length):
+      loc = location + i
+      byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
+      if byte >= 0x7f:
+        non_ascii_bytes += 1
+      if byte < 0x20 and byte != 0:
+        non_ascii_bytes += 1
+      if byte < 0x7f and byte >= 0x20:
+        ascii_bytes += 1
+      if byte == 0xa:  # newline
+        ascii_bytes += 1
+    if ascii_bytes * 10 <= length:
+      return False
+    if length > 0 and ascii_bytes > non_ascii_bytes * 7:
+      return True
+    if ascii_bytes > non_ascii_bytes * 3:
+      return None  # Maybe
+    return False
+
+  def IsProbableExecutableRegion(self, location, length):
+    opcode_bytes = 0
+    sixty_four = self.Is64()
+    for i in range(length):
+      loc = location + i
+      byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
+      if (byte == 0x8b or           # mov
+          byte == 0x89 or           # mov reg-reg
+          (byte & 0xf0) == 0x50 or  # push/pop
+          (sixty_four and (byte & 0xf0) == 0x40) or  # rex prefix
+          byte == 0xc3 or           # return
+          byte == 0x74 or           # jeq
+          byte == 0x84 or           # jeq far
+          byte == 0x75 or           # jne
+          byte == 0x85 or           # jne far
+          byte == 0xe8 or           # call
+          byte == 0xe9 or           # jmp far
+          byte == 0xeb):            # jmp near
+        opcode_bytes += 1
+    opcode_percent = (opcode_bytes * 100) / length
+    threshold = 20
+    if opcode_percent > threshold + 2:
+      return True
+    if opcode_percent > threshold - 2:
+      return None  # Maybe
+    return False
+
+  def FindRegion(self, addr):
+    answer = [-1, -1]
+    def is_in(reader, start, size, location):
+      if addr >= start and addr < start + size:
+        answer[0] = start
+        answer[1] = size
+    self.ForEachMemoryRegion(is_in)
+    if answer[0] == -1:
+      return None
+    return answer
+
+  def ForEachMemoryRegion(self, cb):
+    if self.memory_list64 is not None:
+      for r in self.memory_list64.ranges:
+        location = self.memory_list64.base_rva + offset
+        cb(self, r.start, r.size, location)
+        offset += r.size
+
+    if self.memory_list is not None:
+      for r in self.memory_list.ranges:
+        cb(self, r.start, r.memory.data_size, r.memory.rva)
+
+  def FindWord(self, word, alignment=0):
+    def search_inside_region(reader, start, size, location):
+      location = (location + alignment) & ~alignment
+      for i in range(size - self.PointerSize()):
+        loc = location + i
+        if reader._ReadWord(loc) == word:
+          slot = start + (loc - location)
+          print("%s: %s" % (reader.FormatIntPtr(slot),
+                            reader.FormatIntPtr(word)))
+    self.ForEachMemoryRegion(search_inside_region)
+
+  def FindWordList(self, word):
+    aligned_res = []
+    unaligned_res = []
+    def search_inside_region(reader, start, size, location):
+      for i in range(size - self.PointerSize()):
+        loc = location + i
+        if reader._ReadWord(loc) == word:
+          slot = start + (loc - location)
+          if self.IsAlignedAddress(slot):
+            aligned_res.append(slot)
+          else:
+            unaligned_res.append(slot)
+    self.ForEachMemoryRegion(search_inside_region)
+    return (aligned_res, unaligned_res)
+
+  def FindLocation(self, address):
+    offset = 0
+    if self.memory_list64 is not None:
+      for r in self.memory_list64.ranges:
+        if r.start <= address < r.start + r.size:
+          return self.memory_list64.base_rva + offset + address - r.start
+        offset += r.size
+    if self.memory_list is not None:
+      for r in self.memory_list.ranges:
+        if r.start <= address < r.start + r.memory.data_size:
+          return r.memory.rva + address - r.start
+    return None
+
+  def GetDisasmLines(self, address, size):
+    def CountUndefinedInstructions(lines):
+      pattern = "<UNDEFINED>"
+      return sum([line.count(pattern) for (ignore, line) in lines])
+
+    location = self.FindLocation(address)
+    if location is None: return []
+    arch = None
+    possible_objdump_flags = [""]
+    if self.arch == MD_CPU_ARCHITECTURE_X86:
+      arch = "ia32"
+    elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+      arch = "arm"
+      possible_objdump_flags = ["", "--disassembler-options=force-thumb"]
+    elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+      arch = "arm64"
+      possible_objdump_flags = ["", "--disassembler-options=force-thumb"]
+    elif self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      arch = "x64"
+    results = [ disasm.GetDisasmLines(self.minidump_name,
+                                     location,
+                                     size,
+                                     arch,
+                                     False,
+                                     objdump_flags)
+                for objdump_flags in possible_objdump_flags ]
+    return min(results, key=CountUndefinedInstructions)
+
+
+  def Dispose(self):
+    self.minidump.close()
+    self.minidump_file.close()
+
+  def ExceptionIP(self):
+    if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return self.exception_context.rip
+    elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+      return self.exception_context.pc
+    elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+      return self.exception_context.pc
+    elif self.arch == MD_CPU_ARCHITECTURE_X86:
+      return self.exception_context.eip
+
+  def ExceptionSP(self):
+    if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return self.exception_context.rsp
+    elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+      return self.exception_context.sp
+    elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+      return self.exception_context.sp
+    elif self.arch == MD_CPU_ARCHITECTURE_X86:
+      return self.exception_context.esp
+
+  def ExceptionFP(self):
+    if self.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return self.exception_context.rbp
+    elif self.arch == MD_CPU_ARCHITECTURE_ARM:
+      return None
+    elif self.arch == MD_CPU_ARCHITECTURE_ARM64:
+      return self.exception_context.fp
+    elif self.arch == MD_CPU_ARCHITECTURE_X86:
+      return self.exception_context.ebp
+
+  def ExceptionThread(self):
+    return self.thread_map[self.exception.thread_id]
+
+  def StackTop(self):
+    return self.ExceptionSP()
+
+  def StackBottom(self):
+    exception_thread = self.ExceptionThread()
+    return exception_thread.stack.start + \
+        exception_thread.stack.memory.data_size
+
+  def FormatIntPtr(self, value):
+    if self.Is64():
+      return "%016x" % value
+    return "%08x" % value
+
+  def PointerSize(self):
+    if self.Is64():
+      return 8
+    return 4
+
+  def Register(self, name):
+    return self.exception_context.__getattribute__(name)
+
+  def ReadMinidumpString(self, rva):
+    string = bytearray(MINIDUMP_STRING.Read(self.minidump, rva).buffer)
+    string = string.decode("utf16")
+    return string[0:len(string) - 1]
+
+  # Load FUNC records from a BreakPad symbol file
+  #
+  #    http://code.google.com/p/google-breakpad/wiki/SymbolFiles
+  #
+  def _LoadSymbolsFrom(self, symfile, baseaddr):
+    print("Loading symbols from %s" % (symfile))
+    funcs = []
+    with open(symfile) as f:
+      for line in f:
+        result = re.match(
+            r"^FUNC ([a-f0-9]+) ([a-f0-9]+) ([a-f0-9]+) (.*)$", line)
+        if result is not None:
+          start = int(result.group(1), 16)
+          size = int(result.group(2), 16)
+          name = result.group(4).rstrip()
+          bisect.insort_left(self.symbols,
+                             FuncSymbol(baseaddr + start, size, name))
+    print(" ... done")
+
+  def TryLoadSymbolsFor(self, modulename, module):
+    try:
+      symfile = os.path.join(self.symdir,
+                             modulename.replace('.', '_') + ".pdb.sym")
+      if os.path.isfile(symfile):
+        self._LoadSymbolsFrom(symfile, module.base_of_image)
+        self.modules_with_symbols.append(module)
+    except Exception as e:
+      print("  ... failure (%s)" % (e))
+
+  # Returns true if address is covered by some module that has loaded symbols.
+  def _IsInModuleWithSymbols(self, addr):
+    for module in self.modules_with_symbols:
+      start = module.base_of_image
+      end = start + module.size_of_image
+      if (start <= addr) and (addr < end):
+        return True
+    return False
+
+  # Find symbol covering the given address and return its name in format
+  #     <symbol name>+<offset from the start>
+  def FindSymbol(self, addr):
+    if not self._IsInModuleWithSymbols(addr):
+      return None
+
+    i = bisect.bisect_left(self.symbols, addr)
+    symbol = None
+    if (0 < i) and self.symbols[i - 1].Covers(addr):
+      symbol = self.symbols[i - 1]
+    elif (i < len(self.symbols)) and self.symbols[i].Covers(addr):
+      symbol = self.symbols[i]
+    else:
+      return None
+    diff = addr - symbol.start
+    return "%s+0x%x" % (symbol.name, diff)
+
+
+class Printer(object):
+  """Printer with indentation support."""
+
+  def __init__(self):
+    self.indent = 0
+
+  def Indent(self):
+    self.indent += 2
+
+  def Dedent(self):
+    self.indent -= 2
+
+  def Print(self, string):
+    print("%s%s" % (self._IndentString(), string))
+
+  def PrintLines(self, lines):
+    indent = self._IndentString()
+    print("\n".join("%s%s" % (indent, line) for line in lines))
+
+  def _IndentString(self):
+    return self.indent * " "
+
+
+ADDRESS_RE = re.compile(r"0x[0-9a-fA-F]+")
+
+
+def FormatDisasmLine(start, heap, line):
+  line_address = start + line[0]
+  stack_slot = heap.stack_map.get(line_address)
+  marker = "  "
+  if stack_slot:
+    marker = "=>"
+  code = AnnotateAddresses(heap, line[1])
+
+  # Compute the actual call target which the disassembler is too stupid
+  # to figure out (it adds the call offset to the disassembly offset rather
+  # than the absolute instruction address).
+  if heap.reader.arch == MD_CPU_ARCHITECTURE_X86:
+    if code.startswith("e8"):
+      words = code.split()
+      if len(words) > 6 and words[5] == "call":
+        offset = int(words[4] + words[3] + words[2] + words[1], 16)
+        target = (line_address + offset + 5) & 0xFFFFFFFF
+        code = code.replace(words[6], "0x%08x" % target)
+  # TODO(jkummerow): port this hack to ARM and x64.
+
+  return "%s%08x %08x: %s" % (marker, line_address, line[0], code)
+
+
+def AnnotateAddresses(heap, line):
+  extra = []
+  for m in ADDRESS_RE.finditer(line):
+    maybe_address = int(m.group(0), 16)
+    object = heap.FindObject(maybe_address)
+    if not object: continue
+    extra.append(str(object))
+  if len(extra) == 0: return line
+  return "%s  ;; %s" % (line, ", ".join(extra))
+
+
+class HeapObject(object):
+  def __init__(self, heap, map, address):
+    self.heap = heap
+    self.map = map
+    self.address = address
+
+  def Is(self, cls):
+    return isinstance(self, cls)
+
+  def Print(self, p):
+    p.Print(str(self))
+
+  def __str__(self):
+    instance_type = "???"
+    if self.map is not None:
+      instance_type = INSTANCE_TYPES[self.map.instance_type]
+    return "%s(%s, %s)" % (self.__class__.__name__,
+                           self.heap.reader.FormatIntPtr(self.address),
+                           instance_type)
+
+  def ObjectField(self, offset):
+    field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
+    return self.heap.FindObjectOrSmi(field_value)
+
+  def SmiField(self, offset):
+    field_value = self.heap.reader.ReadUIntPtr(self.address + offset)
+    if self.heap.IsSmi(field_value):
+      return self.heap.SmiUntag(field_value)
+    return None
+
+
+class Map(HeapObject):
+  def Decode(self, offset, size, value):
+    return (value >> offset) & ((1 << size) - 1)
+
+  # Instance Sizes
+  def InstanceSizesOffset(self):
+    return self.heap.PointerSize()
+
+  def InstanceSizeOffset(self):
+    return self.InstanceSizesOffset()
+
+  def InObjectProperties(self):
+    return self.InstanceSizeOffset() + 1
+
+  def UnusedByte(self):
+    return self.InObjectProperties() + 1
+
+  def VisitorId(self):
+    return self.UnusedByte() + 1
+
+  # Instance Attributes
+  def InstanceAttributesOffset(self):
+    return self.InstanceSizesOffset() + self.heap.IntSize()
+
+  def InstanceTypeOffset(self):
+    return self.InstanceAttributesOffset()
+
+  def BitFieldOffset(self):
+    return self.InstanceTypeOffset() + 1
+
+  def BitField2Offset(self):
+    return self.BitFieldOffset() + 1
+
+  def UnusedPropertyFieldsOffset(self):
+    return self.BitField2Offset() + 1
+
+  # Other fields
+  def BitField3Offset(self):
+    return self.InstanceAttributesOffset() + self.heap.IntSize()
+
+  def PrototypeOffset(self):
+    return self.BitField3Offset() + self.heap.PointerSize()
+
+  def ConstructorOrBackPointerOffset(self):
+    return self.PrototypeOffset() + self.heap.PointerSize()
+
+  def TransitionsOrPrototypeInfoOffset(self):
+    return self.ConstructorOrBackPointerOffset() + self.heap.PointerSize()
+
+  def DescriptorsOffset(self):
+    return self.TransitionsOrPrototypeInfoOffset() + self.heap.PointerSize()
+
+  def LayoutDescriptorOffset(self):
+    return self.DescriptorsOffset() + self.heap.PointerSize()
+
+  def CodeCacheOffset(self):
+    if (self.heap.reader.Is64()):
+      return self.LayoutDescriptorOffset() + self.heap.PointerSize()
+    return self.DescriptorsOffset() + self.heap.PointerSize()
+
+  def DependentCodeOffset(self):
+    return self.CodeCacheOffset() + self.heap.PointerSize()
+
+  def ReadByte(self, offset):
+    return self.heap.reader.ReadU8(self.address + offset)
+
+  def ReadWord(self, offset):
+    return self.heap.reader.ReadUIntPtr(self.address + offset)
+
+  def Print(self, p):
+    p.Print("Map(%08x)" % (self.address))
+    p.Print("  - size: %d, inobject: %d, (unused: %d), visitor: %d" % (
+        self.ReadByte(self.InstanceSizeOffset()),
+        self.ReadByte(self.InObjectProperties()),
+        self.ReadByte(self.UnusedByte()),
+        self.VisitorId()))
+
+    instance_type = INSTANCE_TYPES[self.ReadByte(self.InstanceTypeOffset())]
+    bitfield = self.ReadByte(self.BitFieldOffset())
+    bitfield2 = self.ReadByte(self.BitField2Offset())
+    unused = self.ReadByte(self.UnusedPropertyFieldsOffset())
+    p.Print("  - %s, bf: %d, bf2: %d, unused: %d" % (
+        instance_type, bitfield, bitfield2, unused))
+
+    p.Print("  - kind: %s" % (self.Decode(3, 5, bitfield2)))
+
+    bitfield3 = self.ReadWord(self.BitField3Offset())
+
+    p.Print(
+        "  - EnumLength: %d NumberOfOwnDescriptors: %d OwnsDescriptors: %s" % (
+            self.Decode(0, 10, bitfield3),
+            self.Decode(10, 10, bitfield3),
+            self.Decode(21, 1, bitfield3)))
+    p.Print("  - DictionaryMap: %s" % (self.Decode(20, 1, bitfield3)))
+    p.Print("  - Deprecated: %s" % (self.Decode(23, 1, bitfield3)))
+    p.Print("  - IsUnstable: %s" % (self.Decode(24, 1, bitfield3)))
+    p.Print("  - NewTargetIsBase: %s" % (self.Decode(27, 1, bitfield3)))
+
+    descriptors = self.ObjectField(self.DescriptorsOffset())
+    if descriptors.__class__ == FixedArray:
+      DescriptorArray(descriptors).Print(p)
+    else:
+      p.Print("  - Descriptors: %s" % (descriptors))
+
+    transitions = self.ObjectField(self.TransitionsOrPrototypeInfoOffset())
+    if transitions.__class__ == FixedArray:
+      TransitionArray(transitions).Print(p)
+    else:
+      p.Print("  - TransitionsOrPrototypeInfo: %s" % (transitions))
+
+    p.Print("  - Prototype: %s" % self.ObjectField(self.PrototypeOffset()))
+
+  def __init__(self, heap, map, address):
+    HeapObject.__init__(self, heap, map, address)
+    self.instance_type = \
+        heap.reader.ReadU8(self.address + self.InstanceTypeOffset())
+
+
+class String(HeapObject):
+  def LengthOffset(self):
+    # First word after the map is the hash, the second is the length.
+    return self.heap.PointerSize() * 2
+
+  def __init__(self, heap, map, address):
+    HeapObject.__init__(self, heap, map, address)
+    self.length = self.SmiField(self.LengthOffset())
+
+  def GetChars(self):
+    return "?string?"
+
+  def Print(self, p):
+    p.Print(str(self))
+
+  def __str__(self):
+    return "\"%s\"" % self.GetChars()
+
+
+class SeqString(String):
+  def CharsOffset(self):
+    return self.heap.PointerSize() * 3
+
+  def __init__(self, heap, map, address):
+    String.__init__(self, heap, map, address)
+    self.chars = heap.reader.ReadBytes(self.address + self.CharsOffset(),
+                                       self.length)
+
+  def GetChars(self):
+    return self.chars
+
+
+class ExternalString(String):
+  # TODO(vegorov) fix ExternalString for X64 architecture
+  RESOURCE_OFFSET = 12
+
+  WEBKIT_RESOUCE_STRING_IMPL_OFFSET = 4
+  WEBKIT_STRING_IMPL_CHARS_OFFSET = 8
+
+  def __init__(self, heap, map, address):
+    String.__init__(self, heap, map, address)
+    reader = heap.reader
+    self.resource = \
+        reader.ReadU32(self.address + ExternalString.RESOURCE_OFFSET)
+    self.chars = "?external string?"
+    if not reader.IsValidAddress(self.resource): return
+    string_impl_address = self.resource + \
+        ExternalString.WEBKIT_RESOUCE_STRING_IMPL_OFFSET
+    if not reader.IsValidAddress(string_impl_address): return
+    string_impl = reader.ReadU32(string_impl_address)
+    chars_ptr_address = string_impl + \
+        ExternalString.WEBKIT_STRING_IMPL_CHARS_OFFSET
+    if not reader.IsValidAddress(chars_ptr_address): return
+    chars_ptr = reader.ReadU32(chars_ptr_address)
+    if not reader.IsValidAddress(chars_ptr): return
+    raw_chars = reader.ReadBytes(chars_ptr, 2 * self.length)
+    self.chars = codecs.getdecoder("utf16")(raw_chars)[0]
+
+  def GetChars(self):
+    return self.chars
+
+
+class ConsString(String):
+  def LeftOffset(self):
+    return self.heap.PointerSize() * 3
+
+  def RightOffset(self):
+    return self.heap.PointerSize() * 4
+
+  def __init__(self, heap, map, address):
+    String.__init__(self, heap, map, address)
+    self.left = self.ObjectField(self.LeftOffset())
+    self.right = self.ObjectField(self.RightOffset())
+
+  def GetChars(self):
+    try:
+      return self.left.GetChars() + self.right.GetChars()
+    except:
+      return "***CAUGHT EXCEPTION IN GROKDUMP***"
+
+
+class Oddball(HeapObject):
+  #Should match declarations in objects.h
+  KINDS = [
+    "False",
+    "True",
+    "TheHole",
+    "Null",
+    "ArgumentMarker",
+    "Undefined",
+    "Other"
+  ]
+
+  def ToStringOffset(self):
+    return self.heap.PointerSize()
+
+  def ToNumberOffset(self):
+    return self.ToStringOffset() + self.heap.PointerSize()
+
+  def KindOffset(self):
+    return self.ToNumberOffset() + self.heap.PointerSize()
+
+  def __init__(self, heap, map, address):
+    HeapObject.__init__(self, heap, map, address)
+    self.to_string = self.ObjectField(self.ToStringOffset())
+    self.kind = self.SmiField(self.KindOffset())
+
+  def Print(self, p):
+    p.Print(str(self))
+
+  def __str__(self):
+    if self.to_string:
+      return "Oddball(%08x, <%s>)" % (self.address, str(self.to_string))
+    else:
+      kind = "???"
+      if 0 <= self.kind < len(Oddball.KINDS):
+        kind = Oddball.KINDS[self.kind]
+      return "Oddball(%08x, kind=%s)" % (self.address, kind)
+
+
+class FixedArray(HeapObject):
+  def LengthOffset(self):
+    return self.heap.PointerSize()
+
+  def ElementsOffset(self):
+    return self.heap.PointerSize() * 2
+
+  def MemberOffset(self, i):
+    return self.ElementsOffset() + self.heap.PointerSize() * i
+
+  def Get(self, i):
+    return self.ObjectField(self.MemberOffset(i))
+
+  def __init__(self, heap, map, address):
+    HeapObject.__init__(self, heap, map, address)
+    self.length = self.SmiField(self.LengthOffset())
+
+  def Print(self, p):
+    p.Print("FixedArray(%s) {" % self.heap.reader.FormatIntPtr(self.address))
+    p.Indent()
+    p.Print("length: %d" % self.length)
+    base_offset = self.ElementsOffset()
+    for i in range(self.length):
+      offset = base_offset + 4 * i
+      try:
+        p.Print("[%08d] = %s" % (i, self.ObjectField(offset)))
+      except TypeError:
+        p.Dedent()
+        p.Print("...")
+        p.Print("}")
+        return
+    p.Dedent()
+    p.Print("}")
+
+  def __str__(self):
+    return "FixedArray(%08x, length=%d)" % (self.address, self.length)
+
+
+class DescriptorArray(object):
+  def __init__(self, array):
+    self.array = array
+
+  def Length(self):
+    return self.array.Get(0)
+
+  def Decode(self, offset, size, value):
+    return (value >> offset) & ((1 << size) - 1)
+
+  TYPES = [
+      "normal",
+      "field",
+      "function",
+      "callbacks"
+  ]
+
+  def Type(self, value):
+    return DescriptorArray.TYPES[self.Decode(0, 3, value)]
+
+  def Attributes(self, value):
+    attributes = self.Decode(3, 3, value)
+    result = []
+    if (attributes & 0): result += ["ReadOnly"]
+    if (attributes & 1): result += ["DontEnum"]
+    if (attributes & 2): result += ["DontDelete"]
+    return "[" + (",".join(result)) + "]"
+
+  def Deleted(self, value):
+    return self.Decode(6, 1, value) == 1
+
+  def FieldIndex(self, value):
+    return self.Decode(20, 11, value)
+
+  def Pointer(self, value):
+    return self.Decode(6, 11, value)
+
+  def Details(self, di, value):
+    return (
+        di,
+        self.Type(value),
+        self.Attributes(value),
+        self.FieldIndex(value),
+        self.Pointer(value)
+    )
+
+
+  def Print(self, p):
+    length = self.Length()
+    array = self.array
+
+    p.Print("Descriptors(%08x, length=%d)" % (array.address, length))
+    p.Print("[et] %s" % (array.Get(1)))
+
+    for di in range(length):
+      i = 2 + di * 3
+      p.Print("0x%x" % (array.address + array.MemberOffset(i)))
+      p.Print("[%i] name:    %s" % (di, array.Get(i + 0)))
+      p.Print("[%i] details: %s %s field-index %i pointer %i" % \
+              self.Details(di, array.Get(i + 1)))
+      p.Print("[%i] value:   %s" % (di, array.Get(i + 2)))
+
+    end = self.array.length // 3
+    if length != end:
+      p.Print("[%i-%i] slack descriptors" % (length, end))
+
+
+class TransitionArray(object):
+  def __init__(self, array):
+    self.array = array
+
+  def IsSimpleTransition(self):
+    return self.array.length <= 2
+
+  def Length(self):
+    # SimpleTransition cases
+    if self.IsSimpleTransition():
+      return self.array.length - 1
+    return (self.array.length - 3) // 2
+
+  def Print(self, p):
+    length = self.Length()
+    array = self.array
+
+    p.Print("Transitions(%08x, length=%d)" % (array.address, length))
+    p.Print("[backpointer] %s" % (array.Get(0)))
+    if self.IsSimpleTransition():
+      if length == 1:
+        p.Print("[simple target] %s" % (array.Get(1)))
+      return
+
+    elements = array.Get(1)
+    if elements is not None:
+      p.Print("[elements   ] %s" % (elements))
+
+    prototype = array.Get(2)
+    if prototype is not None:
+      p.Print("[prototype  ] %s" % (prototype))
+
+    for di in range(length):
+      i = 3 + di * 2
+      p.Print("[%i] symbol: %s" % (di, array.Get(i + 0)))
+      p.Print("[%i] target: %s" % (di, array.Get(i + 1)))
+
+
+class JSFunction(HeapObject):
+  def CodeEntryOffset(self):
+    return 3 * self.heap.PointerSize()
+
+  def SharedOffset(self):
+    return 5 * self.heap.PointerSize()
+
+  def __init__(self, heap, map, address):
+    HeapObject.__init__(self, heap, map, address)
+    code_entry = \
+        heap.reader.ReadU32(self.address + self.CodeEntryOffset())
+    self.code = heap.FindObject(code_entry - Code.HeaderSize(heap) + 1)
+    self.shared = self.ObjectField(self.SharedOffset())
+
+  def Print(self, p):
+    source = "\n".join("  %s" % line for line in self._GetSource().split("\n"))
+    p.Print("JSFunction(%s) {" % self.heap.reader.FormatIntPtr(self.address))
+    p.Indent()
+    p.Print("inferred name: %s" % self.shared.inferred_name)
+    if self.shared.script.Is(Script) and self.shared.script.name.Is(String):
+      p.Print("script name: %s" % self.shared.script.name)
+    p.Print("source:")
+    p.PrintLines(self._GetSource().split("\n"))
+    p.Print("code:")
+    self.code.Print(p)
+    if self.code != self.shared.code:
+      p.Print("unoptimized code:")
+      self.shared.code.Print(p)
+    p.Dedent()
+    p.Print("}")
+
+  def __str__(self):
+    inferred_name = ""
+    if self.shared is not None and self.shared.Is(SharedFunctionInfo):
+      inferred_name = self.shared.inferred_name
+    return "JSFunction(%s, %s) " % \
+          (self.heap.reader.FormatIntPtr(self.address), inferred_name)
+
+  def _GetSource(self):
+    source = "?source?"
+    start = self.shared.start_position
+    end = self.shared.end_position
+    if not self.shared.script.Is(Script): return source
+    script_source = self.shared.script.source
+    if not script_source.Is(String): return source
+    if start and end:
+      source = script_source.GetChars()[start:end]
+    return source
+
+
+class SharedFunctionInfo(HeapObject):
+  def CodeOffset(self):
+    return 2 * self.heap.PointerSize()
+
+  def ScriptOffset(self):
+    return 7 * self.heap.PointerSize()
+
+  def InferredNameOffset(self):
+    return 9 * self.heap.PointerSize()
+
+  def EndPositionOffset(self):
+    return 12 * self.heap.PointerSize() + 4 * self.heap.IntSize()
+
+  def StartPositionAndTypeOffset(self):
+    return 12 * self.heap.PointerSize() + 5 * self.heap.IntSize()
+
+  def __init__(self, heap, map, address):
+    HeapObject.__init__(self, heap, map, address)
+    try:
+      self.code = self.ObjectField(self.CodeOffset())
+      self.script = self.ObjectField(self.ScriptOffset())
+      self.inferred_name = self.ObjectField(self.InferredNameOffset())
+      if heap.PointerSize() == 8:
+        start_position_and_type = \
+            heap.reader.ReadU32(self.StartPositionAndTypeOffset())
+        self.start_position = start_position_and_type >> 2
+        pseudo_smi_end_position = \
+            heap.reader.ReadU32(self.EndPositionOffset())
+        self.end_position = pseudo_smi_end_position >> 2
+      else:
+        start_position_and_type = \
+            self.SmiField(self.StartPositionAndTypeOffset())
+        if start_position_and_type:
+          self.start_position = start_position_and_type >> 2
+        else:
+          self.start_position = None
+        self.end_position = \
+            self.SmiField(self.EndPositionOffset())
+    except:
+      print("*** Error while reading SharedFunctionInfo")
+
+
+class Script(HeapObject):
+  def SourceOffset(self):
+    return self.heap.PointerSize()
+
+  def NameOffset(self):
+    return self.SourceOffset() + self.heap.PointerSize()
+
+  def __init__(self, heap, map, address):
+    HeapObject.__init__(self, heap, map, address)
+    self.source = self.ObjectField(self.SourceOffset())
+    self.name = self.ObjectField(self.NameOffset())
+
+
+class CodeCache(HeapObject):
+  def DefaultCacheOffset(self):
+    return self.heap.PointerSize()
+
+  def NormalTypeCacheOffset(self):
+    return self.DefaultCacheOffset() + self.heap.PointerSize()
+
+  def __init__(self, heap, map, address):
+    HeapObject.__init__(self, heap, map, address)
+    self.default_cache = self.ObjectField(self.DefaultCacheOffset())
+    self.normal_type_cache = self.ObjectField(self.NormalTypeCacheOffset())
+
+  def Print(self, p):
+    p.Print("CodeCache(%s) {" % self.heap.reader.FormatIntPtr(self.address))
+    p.Indent()
+    p.Print("default cache: %s" % self.default_cache)
+    p.Print("normal type cache: %s" % self.normal_type_cache)
+    p.Dedent()
+    p.Print("}")
+
+
+class Code(HeapObject):
+  CODE_ALIGNMENT_MASK = (1 << 5) - 1
+
+  def InstructionSizeOffset(self):
+    return self.heap.PointerSize()
+
+  @staticmethod
+  def HeaderSize(heap):
+    return (heap.PointerSize() + heap.IntSize() + \
+        4 * heap.PointerSize() + 3 * heap.IntSize() + \
+        Code.CODE_ALIGNMENT_MASK) & ~Code.CODE_ALIGNMENT_MASK
+
+  def __init__(self, heap, map, address):
+    HeapObject.__init__(self, heap, map, address)
+    self.entry = self.address + Code.HeaderSize(heap)
+    self.instruction_size = \
+        heap.reader.ReadU32(self.address + self.InstructionSizeOffset())
+
+  def Print(self, p):
+    lines = self.heap.reader.GetDisasmLines(self.entry, self.instruction_size)
+    p.Print("Code(%s) {" % self.heap.reader.FormatIntPtr(self.address))
+    p.Indent()
+    p.Print("instruction_size: %d" % self.instruction_size)
+    p.PrintLines(self._FormatLine(line) for line in lines)
+    p.Dedent()
+    p.Print("}")
+
+  def _FormatLine(self, line):
+    return FormatDisasmLine(self.entry, self.heap, line)
+
+
+class V8Heap(object):
+  CLASS_MAP = {
+    "SYMBOL_TYPE": SeqString,
+    "ONE_BYTE_SYMBOL_TYPE": SeqString,
+    "CONS_SYMBOL_TYPE": ConsString,
+    "CONS_ONE_BYTE_SYMBOL_TYPE": ConsString,
+    "EXTERNAL_SYMBOL_TYPE": ExternalString,
+    "EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+    "EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
+    "UNCACHED_EXTERNAL_SYMBOL_TYPE": ExternalString,
+    "UNCACHED_EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+    "UNCACHED_EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
+    "STRING_TYPE": SeqString,
+    "ONE_BYTE_STRING_TYPE": SeqString,
+    "CONS_STRING_TYPE": ConsString,
+    "CONS_ONE_BYTE_STRING_TYPE": ConsString,
+    "EXTERNAL_STRING_TYPE": ExternalString,
+    "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+    "EXTERNAL_ONE_BYTE_STRING_TYPE": ExternalString,
+    "MAP_TYPE": Map,
+    "ODDBALL_TYPE": Oddball,
+    "FIXED_ARRAY_TYPE": FixedArray,
+    "HASH_TABLE_TYPE": FixedArray,
+    "OBJECT_BOILERPLATE_DESCRIPTION_TYPE": FixedArray,
+    "SCOPE_INFO_TYPE": FixedArray,
+    "JS_FUNCTION_TYPE": JSFunction,
+    "SHARED_FUNCTION_INFO_TYPE": SharedFunctionInfo,
+    "SCRIPT_TYPE": Script,
+    "CODE_CACHE_TYPE": CodeCache,
+    "CODE_TYPE": Code,
+  }
+
+  def __init__(self, reader, stack_map):
+    self.reader = reader
+    self.stack_map = stack_map
+    self.objects = {}
+
+  def FindObjectOrSmi(self, tagged_address):
+    if self.IsSmi(tagged_address): return self.SmiUntag(tagged_address)
+    return self.FindObject(tagged_address)
+
+  def FindObject(self, tagged_address):
+    if tagged_address in self.objects:
+      return self.objects[tagged_address]
+    if not self.IsTaggedObjectAddress(tagged_address): return None
+    address = tagged_address - 1
+    if not self.reader.IsValidAddress(address): return None
+    map_tagged_address = self.reader.ReadUIntPtr(address)
+    if tagged_address == map_tagged_address:
+      # Meta map?
+      meta_map = Map(self, None, address)
+      instance_type_name = INSTANCE_TYPES.get(meta_map.instance_type)
+      if instance_type_name != "MAP_TYPE": return None
+      meta_map.map = meta_map
+      object = meta_map
+    else:
+      map = self.FindMap(map_tagged_address)
+      if map is None: return None
+      instance_type_name = INSTANCE_TYPES.get(map.instance_type)
+      if instance_type_name is None: return None
+      cls = V8Heap.CLASS_MAP.get(instance_type_name, HeapObject)
+      object = cls(self, map, address)
+    self.objects[tagged_address] = object
+    return object
+
+  def FindMap(self, tagged_address):
+    address = self.FindMapAddress(tagged_address)
+    if not address: return None
+    object = Map(self, None, address)
+    return object
+
+  def FindMapAddress(self, tagged_address):
+    if not self.IsTaggedMapAddress(tagged_address): return None
+    address = tagged_address - 1
+    if not self.reader.IsValidAddress(address): return None
+    return address
+
+  def IntSize(self):
+    return 4
+
+  def PointerSize(self):
+    return self.reader.PointerSize()
+
+  def ObjectAlignmentMask(self):
+    return self.PointerSize() - 1
+
+  def IsTaggedObjectAddress(self, address):
+    return (address & self.ObjectAlignmentMask()) == 1
+
+  def IsValidTaggedObjectAddress(self, address):
+    if not self.IsTaggedObjectAddress(address): return False
+    return self.reader.IsValidAddress(address)
+
+  def IsTaggedMapAddress(self, address):
+    return (address & self.MapAlignmentMask()) == 1
+
+  def MapAlignmentMask(self):
+    if self.reader.arch == MD_CPU_ARCHITECTURE_AMD64:
+      return (1 << 4) - 1
+    elif self.reader.arch == MD_CPU_ARCHITECTURE_ARM:
+      return (1 << 4) - 1
+    elif self.reader.arch == MD_CPU_ARCHITECTURE_ARM64:
+      return (1 << 4) - 1
+    elif self.reader.arch == MD_CPU_ARCHITECTURE_X86:
+      return (1 << 5) - 1
+
+  def PageAlignmentMask(self):
+    return (1 << 19) - 1
+
+  def IsTaggedAddress(self, address):
+    return (address & self.ObjectAlignmentMask()) == 1
+
+  def IsSmi(self, tagged_address):
+    if self.reader.Is64():
+      return (tagged_address & 0xFFFFFFFF) == 0
+    return not self.IsTaggedAddress(tagged_address)
+
+  def SmiUntag(self, tagged_address):
+    if self.reader.Is64(): return tagged_address >> 32
+    return tagged_address >> 1
+
+  def AddressTypeMarker(self, address):
+    if not self.reader.IsValidAddress(address): return " "
+    if self.reader.IsExceptionStackAddress(address): return "S"
+    if self.reader.IsModuleAddress(address): return "C"
+    if self.IsTaggedAddress(address):
+      # Cannot have an tagged pointer into the stack
+      if self.reader.IsAnyExceptionStackAddress(address): return "s"
+      return "T"
+    return "*"
+
+  def FormatIntPtr(self, address):
+    marker = self.AddressTypeMarker(address)
+    address = self.reader.FormatIntPtr(address)
+    if marker == " ": return address
+    return "%s %s" % (address, marker)
+
+  def RelativeOffset(self, slot, address):
+    if not self.reader.IsValidAlignedAddress(slot): return None
+    if self.IsTaggedObjectAddress(address):
+      address -= 1
+    if not self.reader.IsValidAlignedAddress(address): return None
+    offset = (address - slot) / self.PointerSize()
+
+    lower_limit = -32
+    upper_limit = 128
+    if self.reader.IsExceptionStackAddress(address):
+      upper_limit = 0xFFFFFF
+
+    if offset < lower_limit or upper_limit < offset: return None
+    target_address = self.reader.ReadUIntPtr(address)
+    return "[%+02d]=%s %s" % (offset, self.reader.FormatIntPtr(target_address),
+                             self.AddressTypeMarker(target_address))
+
+  def FindObjectPointers(self, start=0, end=0):
+    objects = set()
+    def find_object_in_region(reader, start, size, location):
+      for slot in range(start, start+size, self.reader.PointerSize()):
+        if not self.reader.IsValidAddress(slot): break
+        # Collect only tagged pointers (object) to tagged pointers (map)
+        tagged_address = self.reader.ReadUIntPtr(slot)
+        if not self.IsValidTaggedObjectAddress(tagged_address): continue
+        map_address = self.reader.ReadUIntPtr(tagged_address - 1)
+        if not self.IsTaggedMapAddress(map_address): continue
+        objects.add(tagged_address)
+
+    if not start and not end:
+      self.reader.ForEachMemoryRegion(find_object_in_region)
+    else:
+      find_object_in_region(self.reader, start, end-start, None)
+
+    return objects
+
+class KnownObject(HeapObject):
+  def __init__(self, heap, known_name):
+    HeapObject.__init__(self, heap, None, None)
+    self.known_name = known_name
+
+  def __str__(self):
+    return "<%s>" % self.known_name
+
+
+class KnownMap(HeapObject):
+  def __init__(self, heap, known_name, instance_type):
+    HeapObject.__init__(self, heap, None, None)
+    self.instance_type = instance_type
+    self.known_name = known_name
+
+  def __str__(self):
+    return "<%s>" % self.known_name
+
+
+COMMENT_RE = re.compile(r"^C (0x[0-9a-fA-F]+) (.*)$")
+PAGEADDRESS_RE = re.compile(
+    r"^P (mappage|oldpage) (0x[0-9a-fA-F]+)$")
+
+
+class InspectionInfo(object):
+  def __init__(self, minidump_name, reader):
+    self.comment_file = minidump_name + ".comments"
+    self.address_comments = {}
+    self.page_address = {}
+    if os.path.exists(self.comment_file):
+      with open(self.comment_file, "r") as f:
+        lines = f.readlines()
+        f.close()
+
+        for l in lines:
+          m = COMMENT_RE.match(l)
+          if m:
+            self.address_comments[int(m.group(1), 0)] = m.group(2)
+          m = PAGEADDRESS_RE.match(l)
+          if m:
+            self.page_address[m.group(1)] = int(m.group(2), 0)
+    self.reader = reader
+    self.styles = {}
+    self.color_addresses()
+    return
+
+  def get_page_address(self, page_kind):
+    return self.page_address.get(page_kind, 0)
+
+  def save_page_address(self, page_kind, address):
+    with open(self.comment_file, "a") as f:
+      f.write("P %s 0x%x\n" % (page_kind, address))
+      f.close()
+
+  def color_addresses(self):
+    # Color all stack addresses.
+    exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
+    stack_top = self.reader.ExceptionSP()
+    stack_bottom = exception_thread.stack.start + \
+        exception_thread.stack.memory.data_size
+    frame_pointer = self.reader.ExceptionFP()
+    self.styles[frame_pointer] = "frame"
+    for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
+      # stack address
+      self.styles[slot] = "sa"
+    for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
+      maybe_address = self.reader.ReadUIntPtr(slot)
+      # stack value
+      self.styles[maybe_address] = "sv"
+      if slot == frame_pointer:
+        self.styles[slot] = "frame"
+        frame_pointer = maybe_address
+    self.styles[self.reader.ExceptionIP()] = "pc"
+
+  def get_style_class(self, address):
+    return self.styles.get(address, None)
+
+  def get_style_class_string(self, address):
+    style = self.get_style_class(address)
+    if style != None:
+      return " class=%s " % style
+    else:
+      return ""
+
+  def set_comment(self, address, comment):
+    self.address_comments[address] = comment
+    with open(self.comment_file, "a") as f:
+      f.write("C 0x%x %s\n" % (address, comment))
+      f.close()
+
+  def get_comment(self, address):
+    return self.address_comments.get(address, "")
+
+
+class InspectionPadawan(object):
+  """The padawan can improve annotations by sensing well-known objects."""
+  def __init__(self, reader, heap):
+    self.reader = reader
+    self.heap = heap
+    self.known_first_map_page = 0
+    self.known_first_old_page = 0
+    self.context = None
+
+  def __getattr__(self, name):
+    """An InspectionPadawan can be used instead of V8Heap, even though
+       it does not inherit from V8Heap (aka. mixin)."""
+    return getattr(self.heap, name)
+
+  def GetPageOffset(self, tagged_address):
+    return tagged_address & self.heap.PageAlignmentMask()
+
+  def IsInKnownMapSpace(self, tagged_address):
+    page_address = tagged_address & ~self.heap.PageAlignmentMask()
+    return page_address == self.known_first_map_page
+
+  def IsInKnownOldSpace(self, tagged_address):
+    page_address = tagged_address & ~self.heap.PageAlignmentMask()
+    return page_address == self.known_first_old_page
+
+  def ContainingKnownOldSpaceName(self, tagged_address):
+    page_address = tagged_address & ~self.heap.PageAlignmentMask()
+    if page_address == self.known_first_old_page: return "OLD_SPACE"
+    return None
+
+  def FrameMarkerName(self, value):
+    # The frame marker is Smi-tagged but not Smi encoded and 0 is not a valid
+    # frame type.
+    value = (value >> 1) - 1
+    if 0 <= value < len(FRAME_MARKERS):
+      return "Possibly %s frame marker" % FRAME_MARKERS[value]
+    return None
+
+  def IsFrameMarker(self, slot, address):
+    if not slot: return False
+    # Frame markers only occur directly after a frame pointer and only on the
+    # stack.
+    if not self.reader.IsExceptionStackAddress(slot): return False
+    next_slot = slot + self.reader.PointerSize()
+    if not self.reader.IsValidAddress(next_slot): return False
+    next_address = self.reader.ReadUIntPtr(next_slot)
+    return self.reader.IsExceptionStackAddress(next_address)
+
+  def FormatSmi(self, address):
+    value = self.heap.SmiUntag(address)
+    # On 32-bit systems almost everything looks like a Smi.
+    if not self.reader.Is64() or value == 0: return None
+    return "Smi(%d)" % value
+
+  def SenseObject(self, address, slot=None):
+    if self.IsFrameMarker(slot, address):
+      return self.FrameMarkerName(address)
+    if self.heap.IsSmi(address):
+      return self.FormatSmi(address)
+    if not self.heap.IsTaggedAddress(address): return None
+    tagged_address = address
+    if self.IsInKnownOldSpace(tagged_address):
+      offset = self.GetPageOffset(tagged_address)
+      lookup_key = (self.ContainingKnownOldSpaceName(tagged_address), offset)
+      known_obj_name = KNOWN_OBJECTS.get(lookup_key)
+      if known_obj_name:
+        return KnownObject(self, known_obj_name)
+    if self.IsInKnownMapSpace(tagged_address):
+      known_map = self.SenseMap(tagged_address)
+      if known_map:
+        return known_map
+    found_obj = self.heap.FindObject(tagged_address)
+    if found_obj: return found_obj
+    address = tagged_address - 1
+    if self.reader.IsValidAddress(address):
+      map_tagged_address = self.reader.ReadUIntPtr(address)
+      map = self.SenseMap(map_tagged_address)
+      if map is None: return None
+      instance_type_name = INSTANCE_TYPES.get(map.instance_type)
+      if instance_type_name is None: return None
+      cls = V8Heap.CLASS_MAP.get(instance_type_name, HeapObject)
+      return cls(self, map, address)
+    return None
+
+  def SenseMap(self, tagged_address):
+    if self.IsInKnownMapSpace(tagged_address):
+      offset = self.GetPageOffset(tagged_address)
+      lookup_key = ("MAP_SPACE", offset)
+      known_map_info = KNOWN_MAPS.get(lookup_key)
+      if known_map_info:
+        known_map_type, known_map_name = known_map_info
+        return KnownMap(self, known_map_name, known_map_type)
+    found_map = self.heap.FindMap(tagged_address)
+    if found_map: return found_map
+    return None
+
+  def FindObjectOrSmi(self, tagged_address):
+    """When used as a mixin in place of V8Heap."""
+    found_obj = self.SenseObject(tagged_address)
+    if found_obj: return found_obj
+    if self.IsSmi(tagged_address):
+      return self.FormatSmi(tagged_address)
+    else:
+      return "Unknown(%s)" % self.reader.FormatIntPtr(tagged_address)
+
+  def FindObject(self, tagged_address):
+    """When used as a mixin in place of V8Heap."""
+    raise NotImplementedError
+
+  def FindMap(self, tagged_address):
+    """When used as a mixin in place of V8Heap."""
+    raise NotImplementedError
+
+  def PrintKnowledge(self):
+    print("  known_first_map_page = %s\n"\
+          "  known_first_old_page = %s" % (
+          self.reader.FormatIntPtr(self.known_first_map_page),
+          self.reader.FormatIntPtr(self.known_first_old_page)))
+
+  def FindFirstAsciiString(self, start, end=None, min_length=32):
+    """ Walk the memory until we find a large string """
+    if not end: end = start + 64
+    for slot in range(start, end):
+      if not self.reader.IsValidAddress(slot): break
+      message = self.reader.ReadAsciiString(slot)
+      if len(message) > min_length:
+        return (slot, message)
+    return (None,None)
+
+  def PrintStackTraceMessage(self, start=None, print_message=True):
+    """
+    Try to print a possible message from PushStackTraceAndDie.
+    Returns the first address where the normal stack starts again.
+    """
+    # Only look at the first 1k words on the stack
+    ptr_size = self.reader.PointerSize()
+    if start is None: start = self.reader.ExceptionSP()
+    if not self.reader.IsValidAddress(start): return start
+    end = start + ptr_size * 1024 * 4
+    magic1 = None
+    for slot in range(start, end, ptr_size):
+      if not self.reader.IsValidAddress(slot + ptr_size): break
+      magic1 = self.reader.ReadUIntPtr(slot)
+      magic2 = self.reader.ReadUIntPtr(slot + ptr_size)
+      pair = (magic1 & 0xFFFFFFFF, magic2 & 0xFFFFFFFF)
+      if pair in MAGIC_MARKER_PAIRS:
+        return self.TryExtractOldStyleStackTrace(slot, start, end,
+                                                 print_message)
+      if pair[0] == STACK_TRACE_MARKER:
+        return self.TryExtractStackTrace(slot, start, end, print_message)
+      elif pair[0] == ERROR_MESSAGE_MARKER:
+        return self.TryExtractErrorMessage(slot, start, end, print_message)
+    # Simple fallback in case not stack trace object was found
+    return self.TryExtractOldStyleStackTrace(0, start, end,
+                                             print_message)
+
+  def TryExtractStackTrace(self, slot, start, end, print_message):
+    ptr_size = self.reader.PointerSize()
+    assert self.reader.ReadUIntPtr(slot) & 0xFFFFFFFF == STACK_TRACE_MARKER
+    end_marker = STACK_TRACE_MARKER + 1;
+    header_size = 10
+    # Look for the end marker after the fields and the message buffer.
+    end_search = start + (32 * 1024) + (header_size * ptr_size);
+    end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
+    if not end_slot: return start
+    print("Stack Message (start=%s):" % self.heap.FormatIntPtr(slot))
+    slot += ptr_size
+    for name in ("isolate","ptr1", "ptr2", "ptr3", "ptr4", "codeObject1",
+                 "codeObject2", "codeObject3", "codeObject4"):
+      value = self.reader.ReadUIntPtr(slot)
+      print(" %s: %s" % (name.rjust(14), self.heap.FormatIntPtr(value)))
+      slot += ptr_size
+    print("  message start: %s" % self.heap.FormatIntPtr(slot))
+    stack_start = end_slot + ptr_size
+    print("  stack_start:   %s" % self.heap.FormatIntPtr(stack_start))
+    (message_start, message) = self.FindFirstAsciiString(slot)
+    self.FormatStackTrace(message, print_message)
+    return stack_start
+
+  def FindPtr(self, expected_value, start, end):
+    ptr_size = self.reader.PointerSize()
+    for slot in range(start, end, ptr_size):
+      if not self.reader.IsValidAddress(slot): return None
+      value = self.reader.ReadUIntPtr(slot)
+      if value == expected_value: return slot
+    return None
+
+  def TryExtractErrorMessage(self, slot, start, end, print_message):
+    ptr_size = self.reader.PointerSize()
+    end_marker = ERROR_MESSAGE_MARKER + 1;
+    header_size = 1
+    end_search = start + 1024 + (header_size * ptr_size);
+    end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
+    if not end_slot: return start
+    print("Error Message (start=%s):" % self.heap.FormatIntPtr(slot))
+    slot += ptr_size
+    (message_start, message) = self.FindFirstAsciiString(slot)
+    self.FormatStackTrace(message, print_message)
+    stack_start = end_slot + ptr_size
+    return stack_start
+
+  def TryExtractOldStyleStackTrace(self, message_slot, start, end,
+                                   print_message):
+    ptr_size = self.reader.PointerSize()
+    if message_slot == 0:
+      """
+      On Mac we don't always get proper magic markers, so just try printing
+      the first long ascii string found on the stack.
+      """
+      magic1 = None
+      magic2 = None
+      message_start, message = self.FindFirstAsciiString(start, end, 128)
+      if message_start is None: return start
+    else:
+      message_start = self.reader.ReadUIntPtr(message_slot + ptr_size * 4)
+      message = self.reader.ReadAsciiString(message_start)
+    stack_start = message_start + len(message) + 1
+    # Make sure the address is word aligned
+    stack_start =  stack_start - (stack_start % ptr_size)
+    if magic1 is None:
+      print("Stack Message:")
+      print("  message start: %s" % self.heap.FormatIntPtr(message_start))
+      print("  stack_start:   %s" % self.heap.FormatIntPtr(stack_start ))
+    else:
+      ptr1 = self.reader.ReadUIntPtr(slot + ptr_size * 2)
+      ptr2 = self.reader.ReadUIntPtr(slot + ptr_size * 3)
+      print("Stack Message:")
+      print("  magic1:        %s" % self.heap.FormatIntPtr(magic1))
+      print("  magic2:        %s" % self.heap.FormatIntPtr(magic2))
+      print("  ptr1:          %s" % self.heap.FormatIntPtr(ptr1))
+      print("  ptr2:          %s" % self.heap.FormatIntPtr(ptr2))
+      print("  message start: %s" % self.heap.FormatIntPtr(message_start))
+      print("  stack_start:   %s" % self.heap.FormatIntPtr(stack_start ))
+      print("")
+    self.FormatStackTrace(message, print_message)
+    return stack_start
+
+  def FormatStackTrace(self, message, print_message):
+    if not print_message:
+      print("  Use `dsa` to print the message with annotated addresses.")
+      print("")
+      return
+    ptr_size = self.reader.PointerSize()
+    # Annotate all addresses in the dumped message
+    prog = re.compile("[0-9a-fA-F]{%s}" % ptr_size*2)
+    addresses = list(set(prog.findall(message)))
+    for i in range(len(addresses)):
+      address_org = addresses[i]
+      address = self.heap.FormatIntPtr(int(address_org, 16))
+      if address_org != address:
+        message = message.replace(address_org, address)
+    print("Message:")
+    print("="*80)
+    print(message)
+    print("="*80)
+    print("")
+
+
+  def TryInferFramePointer(self, slot, address):
+    """ Assume we have a framepointer if we find 4 consecutive links """
+    for i in range(0, 4):
+      if not self.reader.IsExceptionStackAddress(address): return 0
+      next_address = self.reader.ReadUIntPtr(address)
+      if next_address == address: return 0
+      address = next_address
+    return slot
+
+  def TryInferContext(self, address):
+    if self.context: return
+    ptr_size = self.reader.PointerSize()
+    possible_context = dict()
+    count = 0
+    while self.reader.IsExceptionStackAddress(address):
+      prev_addr = self.reader.ReadUIntPtr(address-ptr_size)
+      if self.heap.IsTaggedObjectAddress(prev_addr):
+        if prev_addr in possible_context:
+          possible_context[prev_addr] += 1
+        else:
+          possible_context[prev_addr] = 1
+      address = self.reader.ReadUIntPtr(address)
+      count += 1
+    if count <= 5 or len(possible_context) == 0: return
+    # Find entry with highest count
+    possible_context = possible_context.items()
+    possible_context.sort(key=lambda pair: pair[1])
+    address,count = possible_context[-1]
+    if count <= 4: return
+    self.context = address
+
+  def InterpretMemory(self, start, end):
+    # On 64 bit we omit frame pointers, so we have to do some more guesswork.
+    frame_pointer = 0
+    if not self.reader.Is64():
+      frame_pointer = self.reader.ExceptionFP()
+      # Follow the framepointer into the address range
+      while frame_pointer and frame_pointer < start:
+        frame_pointer = self.reader.ReadUIntPtr(frame_pointer)
+        if not self.reader.IsExceptionStackAddress(frame_pointer) or \
+            not frame_pointer:
+          frame_pointer = 0
+          break
+    in_oom_dump_area  = False
+    is_stack = self.reader.IsExceptionStackAddress(start)
+    free_space_end = 0
+    ptr_size = self.reader.PointerSize()
+
+    for slot in range(start, end, ptr_size):
+      if not self.reader.IsValidAddress(slot):
+        print("%s: Address is not contained within the minidump!" % slot)
+        return
+      maybe_address = self.reader.ReadUIntPtr(slot)
+      address_info = []
+      # Mark continuous free space objects
+      if slot == free_space_end:
+        address_info.append("+")
+      elif slot <= free_space_end:
+        address_info.append("|")
+      else:
+        free_space_end = 0
+
+      heap_object = self.SenseObject(maybe_address, slot)
+      if heap_object:
+        # Detect Free-space ranges
+        if isinstance(heap_object, KnownMap) and \
+            heap_object.known_name == "FreeSpaceMap":
+          # The free-space length is is stored as a Smi in the next slot.
+          length = self.reader.ReadUIntPtr(slot + ptr_size)
+          if self.heap.IsSmi(length):
+            length = self.heap.SmiUntag(length)
+            free_space_end = slot + length - ptr_size
+        address_info.append(str(heap_object))
+      relative_offset = self.heap.RelativeOffset(slot, maybe_address)
+      if relative_offset:
+        address_info.append(relative_offset)
+      if maybe_address == self.context:
+        address_info.append("CONTEXT")
+
+      maybe_address_contents = None
+      if is_stack:
+        if self.reader.IsExceptionStackAddress(maybe_address):
+          maybe_address_contents = \
+              self.reader.ReadUIntPtr(maybe_address) & 0xFFFFFFFF
+          if maybe_address_contents == 0xdecade00:
+            in_oom_dump_area = True
+          if frame_pointer == 0:
+            frame_pointer = self.TryInferFramePointer(slot, maybe_address)
+            if frame_pointer != 0:
+              self.TryInferContext(slot)
+        maybe_symbol = self.reader.FindSymbol(maybe_address)
+        if in_oom_dump_area:
+          if maybe_address_contents == 0xdecade00:
+            address_info = ["<==== HeapStats start marker"]
+          elif maybe_address_contents == 0xdecade01:
+            address_info = ["<==== HeapStats end marker"]
+          elif maybe_address_contents is not None:
+            address_info = [" %d (%d Mbytes)" % (maybe_address_contents,
+                                                 maybe_address_contents >> 20)]
+        if slot == frame_pointer:
+          if not self.reader.IsExceptionStackAddress(maybe_address):
+            address_info.append("<==== BAD frame pointer")
+            frame_pointer = 0
+          else:
+            address_info.append("<==== Frame pointer")
+          frame_pointer = maybe_address
+      address_type_marker = self.heap.AddressTypeMarker(maybe_address)
+      string_value = self.reader.ReadAsciiPtr(slot)
+      print("%s: %s %s %s %s" % (self.reader.FormatIntPtr(slot),
+                           self.reader.FormatIntPtr(maybe_address),
+                           address_type_marker,
+                           string_value,
+                           ' | '.join(address_info)))
+      if maybe_address_contents == 0xdecade01:
+        in_oom_dump_area = False
+      heap_object = self.heap.FindObject(maybe_address)
+      if heap_object:
+        heap_object.Print(Printer())
+        print("")
+
+WEB_HEADER = """
+<!DOCTYPE html>
+<html>
+<head>
+<meta content="text/html; charset=utf-8" http-equiv="content-type">
+<style media="screen" type="text/css">
+
+.code {
+  font-family: monospace;
+}
+
+.dmptable {
+  border-collapse : collapse;
+  border-spacing : 0px;
+  table-layout: fixed;
+}
+
+.codedump {
+  border-collapse : collapse;
+  border-spacing : 0px;
+  table-layout: fixed;
+}
+
+.addrcomments {
+  border : 0px;
+}
+
+.register {
+  padding-right : 1em;
+}
+
+.header {
+  clear : both;
+}
+
+.header .navigation {
+  float : left;
+}
+
+.header .dumpname {
+  float : right;
+}
+
+tr.highlight-line {
+  background-color : yellow;
+}
+
+.highlight {
+  background-color : magenta;
+}
+
+tr.inexact-highlight-line {
+  background-color : pink;
+}
+
+input {
+  background-color: inherit;
+  border: 1px solid LightGray;
+}
+
+.dumpcomments {
+  border : 1px solid LightGray;
+  width : 32em;
+}
+
+.regions td {
+  padding:0 15px 0 15px;
+}
+
+.stackframe td {
+  background-color : cyan;
+}
+
+.stackaddress, .sa {
+  background-color : LightGray;
+}
+
+.stackval, .sv {
+  background-color : LightCyan;
+}
+
+.frame {
+  background-color : cyan;
+}
+
+.commentinput, .ci {
+  width : 20em;
+}
+
+/* a.nodump */
+a.nd:visited {
+  color : black;
+  text-decoration : none;
+}
+
+a.nd:link {
+  color : black;
+  text-decoration : none;
+}
+
+a:visited {
+  color : blueviolet;
+}
+
+a:link {
+  color : blue;
+}
+
+.disasmcomment {
+  color : DarkGreen;
+}
+
+</style>
+
+<script type="application/javascript">
+
+var address_str = "address-";
+var address_len = address_str.length;
+
+function comment() {
+  var s = event.srcElement.id;
+  var index = s.indexOf(address_str);
+  if (index >= 0) {
+    send_comment(s.substring(index + address_len), event.srcElement.value);
+  }
+}
+var c = comment;
+
+function send_comment(address, comment) {
+  xmlhttp = new XMLHttpRequest();
+  address = encodeURIComponent(address)
+  comment = encodeURIComponent(comment)
+  xmlhttp.open("GET",
+      "setcomment?%(query_dump)s&address=" + address +
+      "&comment=" + comment, true);
+  xmlhttp.send();
+}
+
+var dump_str = "dump-";
+var dump_len = dump_str.length;
+
+function dump_comment() {
+  var s = event.srcElement.id;
+  var index = s.indexOf(dump_str);
+  if (index >= 0) {
+    send_dump_desc(s.substring(index + dump_len), event.srcElement.value);
+  }
+}
+
+function send_dump_desc(name, desc) {
+  xmlhttp = new XMLHttpRequest();
+  name = encodeURIComponent(name)
+  desc = encodeURIComponent(desc)
+  xmlhttp.open("GET",
+      "setdumpdesc?dump=" + name +
+      "&description=" + desc, true);
+  xmlhttp.send();
+}
+
+function onpage(kind, address) {
+  xmlhttp = new XMLHttpRequest();
+  kind = encodeURIComponent(kind)
+  address = encodeURIComponent(address)
+  xmlhttp.onreadystatechange = function() {
+    if (xmlhttp.readyState==4 && xmlhttp.status==200) {
+      location.reload(true)
+    }
+  };
+  xmlhttp.open("GET",
+      "setpageaddress?%(query_dump)s&kind=" + kind +
+      "&address=" + address);
+  xmlhttp.send();
+}
+
+</script>
+
+<title>Dump %(dump_name)s</title>
+</head>
+
+<body>
+  <div class="header">
+    <form class="navigation" action="search.html">
+      <a href="summary.html?%(query_dump)s">Context info</a>&nbsp;&nbsp;&nbsp;
+      <a href="info.html?%(query_dump)s">Dump info</a>&nbsp;&nbsp;&nbsp;
+      <a href="modules.html?%(query_dump)s">Modules</a>&nbsp;&nbsp;&nbsp;
+      &nbsp;
+      <input type="search" name="val">
+      <input type="submit" name="search" value="Search">
+      <input type="hidden" name="dump" value="%(dump_name)s">
+    </form>
+    <form class="navigation" action="disasm.html#highlight">
+      &nbsp;
+      &nbsp;
+      &nbsp;
+      <input type="search" name="val">
+      <input type="submit" name="disasm" value="Disasm">
+      &nbsp;
+      &nbsp;
+      &nbsp;
+      <a href="dumps.html">Dumps...</a>
+    </form>
+  </div>
+  <br>
+  <hr>
+"""
+
+
+WEB_FOOTER = """
+</body>
+</html>
+"""
+
+
+class WebParameterError(Exception):
+  def __init__(self, message):
+    Exception.__init__(self, message)
+
+
+class InspectionWebHandler(BaseHTTPServer.BaseHTTPRequestHandler):
+  def formatter(self, query_components):
+    name = query_components.get("dump", [None])[0]
+    return self.server.get_dump_formatter(name)
+
+  def send_success_html_headers(self):
+    self.send_response(200)
+    self.send_header("Cache-Control", "no-cache, no-store, must-revalidate")
+    self.send_header("Pragma", "no-cache")
+    self.send_header("Expires", "0")
+    self.send_header('Content-type','text/html')
+    self.end_headers()
+    return
+
+  def do_GET(self):
+    try:
+      parsedurl = urlparse.urlparse(self.path)
+      query_components = urlparse.parse_qs(parsedurl.query)
+      if parsedurl.path == "/dumps.html":
+        self.send_success_html_headers()
+        out_buffer = StringIO.StringIO()
+        self.server.output_dumps(out_buffer)
+        self.wfile.write(out_buffer.getvalue())
+      elif parsedurl.path == "/summary.html":
+        self.send_success_html_headers()
+        out_buffer = StringIO.StringIO()
+        self.formatter(query_components).output_summary(out_buffer)
+        self.wfile.write(out_buffer.getvalue())
+      elif parsedurl.path == "/info.html":
+        self.send_success_html_headers()
+        out_buffer = StringIO.StringIO()
+        self.formatter(query_components).output_info(out_buffer)
+        self.wfile.write(out_buffer.getvalue())
+      elif parsedurl.path == "/modules.html":
+        self.send_success_html_headers()
+        out_buffer = StringIO.StringIO()
+        self.formatter(query_components).output_modules(out_buffer)
+        self.wfile.write(out_buffer.getvalue())
+      elif parsedurl.path == "/search.html" or parsedurl.path == "/s":
+        address = query_components.get("val", [])
+        if len(address) != 1:
+          self.send_error(404, "Invalid params")
+          return
+        self.send_success_html_headers()
+        out_buffer = StringIO.StringIO()
+        self.formatter(query_components).output_search_res(
+            out_buffer, address[0])
+        self.wfile.write(out_buffer.getvalue())
+      elif parsedurl.path == "/disasm.html":
+        address = query_components.get("val", [])
+        exact = query_components.get("exact", ["on"])
+        if len(address) != 1:
+          self.send_error(404, "Invalid params")
+          return
+        self.send_success_html_headers()
+        out_buffer = StringIO.StringIO()
+        self.formatter(query_components).output_disasm(
+            out_buffer, address[0], exact[0])
+        self.wfile.write(out_buffer.getvalue())
+      elif parsedurl.path == "/data.html":
+        address = query_components.get("val", [])
+        datakind = query_components.get("type", ["address"])
+        if len(address) == 1 and len(datakind) == 1:
+          self.send_success_html_headers()
+          out_buffer = StringIO.StringIO()
+          self.formatter(query_components).output_data(
+              out_buffer, address[0], datakind[0])
+          self.wfile.write(out_buffer.getvalue())
+        else:
+          self.send_error(404,'Invalid params')
+      elif parsedurl.path == "/setdumpdesc":
+        name = query_components.get("dump", [""])
+        description = query_components.get("description", [""])
+        if len(name) == 1 and len(description) == 1:
+          name = name[0]
+          description = description[0]
+          if self.server.set_dump_desc(name, description):
+            self.send_success_html_headers()
+            self.wfile.write("OK")
+            return
+        self.send_error(404,'Invalid params')
+      elif parsedurl.path == "/setcomment":
+        address = query_components.get("address", [])
+        comment = query_components.get("comment", [""])
+        if len(address) == 1 and len(comment) == 1:
+          address = address[0]
+          comment = comment[0]
+          self.formatter(query_components).set_comment(address, comment)
+          self.send_success_html_headers()
+          self.wfile.write("OK")
+        else:
+          self.send_error(404,'Invalid params')
+      elif parsedurl.path == "/setpageaddress":
+        kind = query_components.get("kind", [])
+        address = query_components.get("address", [""])
+        if len(kind) == 1 and len(address) == 1:
+          kind = kind[0]
+          address = address[0]
+          self.formatter(query_components).set_page_address(kind, address)
+          self.send_success_html_headers()
+          self.wfile.write("OK")
+        else:
+          self.send_error(404,'Invalid params')
+      else:
+        self.send_error(404,'File Not Found: %s' % self.path)
+
+    except IOError:
+      self.send_error(404,'File Not Found: %s' % self.path)
+
+    except WebParameterError as e:
+      self.send_error(404, 'Web parameter error: %s' % e.message)
+
+
+HTML_REG_FORMAT = "<span class=\"register\"><b>%s</b>:&nbsp;%s</span><br/>\n"
+
+
+class InspectionWebFormatter(object):
+  CONTEXT_FULL = 0
+  CONTEXT_SHORT = 1
+
+  def __init__(self, switches, minidump_name, http_server):
+    self.dumpfilename = os.path.split(minidump_name)[1]
+    self.encfilename = urllib.urlencode({ 'dump' : self.dumpfilename })
+    self.reader = MinidumpReader(switches, minidump_name)
+    self.server = http_server
+
+    # Set up the heap
+    exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
+    stack_top = self.reader.ExceptionSP()
+    stack_bottom = exception_thread.stack.start + \
+        exception_thread.stack.memory.data_size
+    stack_map = {self.reader.ExceptionIP(): -1}
+    for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
+      maybe_address = self.reader.ReadUIntPtr(slot)
+      if not maybe_address in stack_map:
+        stack_map[maybe_address] = slot
+    self.heap = V8Heap(self.reader, stack_map)
+
+    self.padawan = InspectionPadawan(self.reader, self.heap)
+    self.comments = InspectionInfo(minidump_name, self.reader)
+    self.padawan.known_first_old_page = (
+        self.comments.get_page_address("oldpage"))
+    self.padawan.known_first_map_page = (
+        self.comments.get_page_address("mappage"))
+
+  def set_comment(self, straddress, comment):
+    try:
+      address = int(straddress, 0)
+      self.comments.set_comment(address, comment)
+    except ValueError:
+      print("Invalid address")
+
+  def set_page_address(self, kind, straddress):
+    try:
+      address = int(straddress, 0)
+      if kind == "oldpage":
+        self.padawan.known_first_old_page = address
+      elif kind == "mappage":
+        self.padawan.known_first_map_page = address
+      self.comments.save_page_address(kind, address)
+    except ValueError:
+      print("Invalid address")
+
+  def td_from_address(self, f, address):
+    f.write("<td %s>" % self.comments.get_style_class_string(address))
+
+  def format_address(self, maybeaddress, straddress = None):
+    if maybeaddress is None:
+      return "not in dump"
+    else:
+      if straddress is None:
+        straddress = "0x" + self.reader.FormatIntPtr(maybeaddress)
+      style_class = ""
+      if not self.reader.IsValidAddress(maybeaddress):
+        style_class = "class=nd"
+      return ("<a %s href=s?%s&amp;val=%s>%s</a>" %
+              (style_class, self.encfilename, straddress, straddress))
+
+  def output_header(self, f):
+    f.write(WEB_HEADER %
+        { "query_dump" : self.encfilename,
+          "dump_name"  : cgi.escape(self.dumpfilename) })
+
+  def output_footer(self, f):
+    f.write(WEB_FOOTER)
+
+  MAX_CONTEXT_STACK = 2048
+
+  def output_summary(self, f):
+    self.output_header(f)
+    f.write('<div class="code">')
+    self.output_context(f, InspectionWebFormatter.CONTEXT_SHORT)
+    self.output_disasm_pc(f)
+
+    # Output stack
+    exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
+    stack_top = self.reader.ExceptionSP()
+    stack_bottom = min(exception_thread.stack.start + \
+        exception_thread.stack.memory.data_size,
+        stack_top + self.MAX_CONTEXT_STACK)
+    self.output_words(f, stack_top - 16, stack_bottom, stack_top, "Stack")
+
+    f.write('</div>')
+    self.output_footer(f)
+    return
+
+  def output_info(self, f):
+    self.output_header(f)
+    f.write("<h3>Dump info</h3>")
+    f.write("Description: ")
+    self.server.output_dump_desc_field(f, self.dumpfilename)
+    f.write("<br>")
+    f.write("Filename: ")
+    f.write("<span class=\"code\">%s</span><br>" % (self.dumpfilename))
+    dt = datetime.datetime.fromtimestamp(self.reader.header.time_date_stampt)
+    f.write("Timestamp: %s<br>" % dt.strftime('%Y-%m-%d %H:%M:%S'))
+    self.output_context(f, InspectionWebFormatter.CONTEXT_FULL)
+    self.output_address_ranges(f)
+    self.output_footer(f)
+    return
+
+  def output_address_ranges(self, f):
+    regions = {}
+    def print_region(_reader, start, size, _location):
+      regions[start] = size
+    self.reader.ForEachMemoryRegion(print_region)
+    f.write("<h3>Available memory regions</h3>")
+    f.write('<div class="code">')
+    f.write("<table class=\"regions\">")
+    f.write("<thead><tr>")
+    f.write("<th>Start address</th>")
+    f.write("<th>End address</th>")
+    f.write("<th>Number of bytes</th>")
+    f.write("</tr></thead>")
+    for start in sorted(regions):
+      size = regions[start]
+      f.write("<tr>")
+      f.write("<td>%s</td>" % self.format_address(start))
+      f.write("<td>&nbsp;%s</td>" % self.format_address(start + size))
+      f.write("<td>&nbsp;%d</td>" % size)
+      f.write("</tr>")
+    f.write("</table>")
+    f.write('</div>')
+    return
+
+  def output_module_details(self, f, module):
+    f.write("<b>%s</b>" % GetModuleName(self.reader, module))
+    file_version = GetVersionString(module.version_info.dwFileVersionMS,
+                                    module.version_info.dwFileVersionLS)
+    product_version = GetVersionString(module.version_info.dwProductVersionMS,
+                                       module.version_info.dwProductVersionLS)
+    f.write("<br>&nbsp;&nbsp;")
+    f.write("base: %s" % self.reader.FormatIntPtr(module.base_of_image))
+    f.write("<br>&nbsp;&nbsp;")
+    f.write("  end: %s" % self.reader.FormatIntPtr(module.base_of_image +
+                                            module.size_of_image))
+    f.write("<br>&nbsp;&nbsp;")
+    f.write("  file version: %s" % file_version)
+    f.write("<br>&nbsp;&nbsp;")
+    f.write("  product version: %s" % product_version)
+    f.write("<br>&nbsp;&nbsp;")
+    time_date_stamp = datetime.datetime.fromtimestamp(module.time_date_stamp)
+    f.write("  timestamp: %s" % time_date_stamp)
+    f.write("<br>");
+
+  def output_modules(self, f):
+    self.output_header(f)
+    f.write('<div class="code">')
+    for module in self.reader.module_list.modules:
+      self.output_module_details(f, module)
+    f.write("</div>")
+    self.output_footer(f)
+    return
+
+  def output_context(self, f, details):
+    exception_thread = self.reader.thread_map[self.reader.exception.thread_id]
+    f.write("<h3>Exception context</h3>")
+    f.write('<div class="code">')
+    f.write("Thread id: %d" % exception_thread.id)
+    f.write("&nbsp;&nbsp; Exception code: %08X<br/>" %
+            self.reader.exception.exception.code)
+    if details == InspectionWebFormatter.CONTEXT_FULL:
+      if self.reader.exception.exception.parameter_count > 0:
+        f.write("&nbsp;&nbsp; Exception parameters: ")
+        for i in range(0, self.reader.exception.exception.parameter_count):
+          f.write("%08x" % self.reader.exception.exception.information[i])
+        f.write("<br><br>")
+
+    for r in CONTEXT_FOR_ARCH[self.reader.arch]:
+      f.write(HTML_REG_FORMAT %
+              (r, self.format_address(self.reader.Register(r))))
+    # TODO(vitalyr): decode eflags.
+    if self.reader.arch in [MD_CPU_ARCHITECTURE_ARM, MD_CPU_ARCHITECTURE_ARM64]:
+      f.write("<b>cpsr</b>: %s" % bin(self.reader.exception_context.cpsr)[2:])
+    else:
+      f.write("<b>eflags</b>: %s" %
+              bin(self.reader.exception_context.eflags)[2:])
+    f.write('</div>')
+    return
+
+  def align_down(self, a, size):
+    alignment_correction = a % size
+    return a - alignment_correction
+
+  def align_up(self, a, size):
+    alignment_correction = (size - 1) - ((a + size - 1) % size)
+    return a + alignment_correction
+
+  def format_object(self, address):
+    heap_object = self.padawan.SenseObject(address)
+    return cgi.escape(str(heap_object or ""))
+
+  def output_data(self, f, straddress, datakind):
+    try:
+      self.output_header(f)
+      address = int(straddress, 0)
+      if not self.reader.IsValidAddress(address):
+        f.write("<h3>Address 0x%x not found in the dump.</h3>" % address)
+        return
+      region = self.reader.FindRegion(address)
+      if datakind == "address":
+        self.output_words(f, region[0], region[0] + region[1], address, "Dump")
+      elif datakind == "ascii":
+        self.output_ascii(f, region[0], region[0] + region[1], address)
+      self.output_footer(f)
+
+    except ValueError:
+      f.write("<h3>Unrecognized address format \"%s\".</h3>" % straddress)
+    return
+
+  def output_words(self, f, start_address, end_address,
+                   highlight_address, desc):
+    region = self.reader.FindRegion(highlight_address)
+    if region is None:
+      f.write("<h3>Address 0x%x not found in the dump.</h3>" %
+              (highlight_address))
+      return
+    size = self.heap.PointerSize()
+    start_address = self.align_down(start_address, size)
+    low = self.align_down(region[0], size)
+    high = self.align_up(region[0] + region[1], size)
+    if start_address < low:
+      start_address = low
+    end_address = self.align_up(end_address, size)
+    if end_address > high:
+      end_address = high
+
+    expand = ""
+    if start_address != low or end_address != high:
+      expand = ("(<a href=\"data.html?%s&amp;val=0x%x#highlight\">"
+                " more..."
+                " </a>)" %
+                (self.encfilename, highlight_address))
+
+    f.write("<h3>%s 0x%x - 0x%x, "
+            "highlighting <a href=\"#highlight\">0x%x</a> %s</h3>" %
+            (desc, start_address, end_address, highlight_address, expand))
+    f.write('<div class="code">')
+    f.write("<table class=codedump>")
+
+    for j in range(0, end_address - start_address, size):
+      slot = start_address + j
+      heap_object = ""
+      maybe_address = None
+      end_region = region[0] + region[1]
+      if slot < region[0] or slot + size > end_region:
+        straddress = "0x"
+        for i in range(end_region, slot + size):
+          straddress += "??"
+        for i in reversed(
+            range(max(slot, region[0]), min(slot + size, end_region))):
+          straddress += "%02x" % self.reader.ReadU8(i)
+        for i in range(slot, region[0]):
+          straddress += "??"
+      else:
+        maybe_address = self.reader.ReadUIntPtr(slot)
+        straddress = self.format_address(maybe_address)
+        if maybe_address:
+          heap_object = self.format_object(maybe_address)
+
+      address_fmt = "%s&nbsp;</td>"
+      if slot == highlight_address:
+        f.write("<tr class=highlight-line>")
+        address_fmt = "<a id=highlight></a>%s&nbsp;</td>"
+      elif slot < highlight_address and highlight_address < slot + size:
+        f.write("<tr class=inexact-highlight-line>")
+        address_fmt = "<a id=highlight></a>%s&nbsp;</td>"
+      else:
+        f.write("<tr>")
+
+      f.write("<td>")
+      self.output_comment_box(f, "da-", slot)
+      f.write("</td>")
+      self.td_from_address(f, slot)
+      f.write(address_fmt % self.format_address(slot))
+      self.td_from_address(f, maybe_address)
+      f.write(":&nbsp;%s&nbsp;</td>" % straddress)
+      f.write("<td>")
+      if maybe_address != None:
+        self.output_comment_box(
+            f, "sv-" + self.reader.FormatIntPtr(slot), maybe_address)
+      f.write("</td>")
+      f.write("<td>%s</td>" % (heap_object or ''))
+      f.write("</tr>")
+    f.write("</table>")
+    f.write("</div>")
+    return
+
+  def output_ascii(self, f, start_address, end_address, highlight_address):
+    region = self.reader.FindRegion(highlight_address)
+    if region is None:
+      f.write("<h3>Address %x not found in the dump.</h3>" %
+          highlight_address)
+      return
+    if start_address < region[0]:
+      start_address = region[0]
+    if end_address > region[0] + region[1]:
+      end_address = region[0] + region[1]
+
+    expand = ""
+    if start_address != region[0] or end_address != region[0] + region[1]:
+      link = ("data.html?%s&amp;val=0x%x&amp;type=ascii#highlight" %
+              (self.encfilename, highlight_address))
+      expand = "(<a href=\"%s\">more...</a>)" % link
+
+    f.write("<h3>ASCII dump 0x%x - 0x%x, highlighting 0x%x %s</h3>" %
+            (start_address, end_address, highlight_address, expand))
+
+    line_width = 64
+
+    f.write('<div class="code">')
+
+    start = self.align_down(start_address, line_width)
+
+    for i in range(end_address - start):
+      address = start + i
+      if address % 64 == 0:
+        if address != start:
+          f.write("<br>")
+        f.write("0x%08x:&nbsp;" % address)
+      if address < start_address:
+        f.write("&nbsp;")
+      else:
+        if address == highlight_address:
+          f.write("<span class=\"highlight\">")
+        code = self.reader.ReadU8(address)
+        if code < 127 and code >= 32:
+          f.write("&#")
+          f.write(str(code))
+          f.write(";")
+        else:
+          f.write("&middot;")
+        if address == highlight_address:
+          f.write("</span>")
+    f.write("</div>")
+    return
+
+  def output_disasm(self, f, straddress, strexact):
+    try:
+      self.output_header(f)
+      address = int(straddress, 0)
+      if not self.reader.IsValidAddress(address):
+        f.write("<h3>Address 0x%x not found in the dump.</h3>" % address)
+        return
+      region = self.reader.FindRegion(address)
+      self.output_disasm_range(
+          f, region[0], region[0] + region[1], address, strexact == "on")
+      self.output_footer(f)
+    except ValueError:
+      f.write("<h3>Unrecognized address format \"%s\".</h3>" % straddress)
+    return
+
+  def output_disasm_range(
+      self, f, start_address, end_address, highlight_address, exact):
+    region = self.reader.FindRegion(highlight_address)
+    if start_address < region[0]:
+      start_address = region[0]
+    if end_address > region[0] + region[1]:
+      end_address = region[0] + region[1]
+    count = end_address - start_address
+    lines = self.reader.GetDisasmLines(start_address, count)
+    found = False
+    if exact:
+      for line in lines:
+        if line[0] + start_address == highlight_address:
+          found = True
+          break
+      if not found:
+        start_address = highlight_address
+        count = end_address - start_address
+        lines = self.reader.GetDisasmLines(highlight_address, count)
+    expand = ""
+    if start_address != region[0] or end_address != region[0] + region[1]:
+      exactness = ""
+      if exact and not found and end_address == region[0] + region[1]:
+        exactness = "&amp;exact=off"
+      expand = ("(<a href=\"disasm.html?%s%s"
+                "&amp;val=0x%x#highlight\">more...</a>)" %
+                (self.encfilename, exactness, highlight_address))
+
+    f.write("<h3>Disassembling 0x%x - 0x%x, highlighting 0x%x %s</h3>" %
+            (start_address, end_address, highlight_address, expand))
+    f.write('<div class="code">')
+    f.write("<table class=\"codedump\">");
+    for i in range(len(lines)):
+      line = lines[i]
+      next_address = count
+      if i + 1 < len(lines):
+        next_line = lines[i + 1]
+        next_address = next_line[0]
+      self.format_disasm_line(
+          f, start_address, line, next_address, highlight_address)
+    f.write("</table>")
+    f.write("</div>")
+    return
+
+  def annotate_disasm_addresses(self, line):
+    extra = []
+    for m in ADDRESS_RE.finditer(line):
+      maybe_address = int(m.group(0), 16)
+      formatted_address = self.format_address(maybe_address, m.group(0))
+      line = line.replace(m.group(0), formatted_address)
+      object_info = self.padawan.SenseObject(maybe_address)
+      if not object_info:
+        continue
+      extra.append(cgi.escape(str(object_info)))
+    if len(extra) == 0:
+      return line
+    return ("%s <span class=disasmcomment>;; %s</span>" %
+            (line, ", ".join(extra)))
+
+  def format_disasm_line(
+      self, f, start, line, next_address, highlight_address):
+    line_address = start + line[0]
+    address_fmt = "  <td>%s</td>"
+    if line_address == highlight_address:
+      f.write("<tr class=highlight-line>")
+      address_fmt = "  <td><a id=highlight>%s</a></td>"
+    elif (line_address < highlight_address and
+          highlight_address < next_address + start):
+      f.write("<tr class=inexact-highlight-line>")
+      address_fmt = "  <td><a id=highlight>%s</a></td>"
+    else:
+      f.write("<tr>")
+    num_bytes = next_address - line[0]
+    stack_slot = self.heap.stack_map.get(line_address)
+    marker = ""
+    if stack_slot:
+      marker = "=>"
+
+    code = line[1]
+
+    # Some disassemblers insert spaces between each byte,
+    # while some do not.
+    if code[2] == " ":
+        op_offset = 3 * num_bytes - 1
+    else:
+        op_offset = 2 * num_bytes
+
+    # Compute the actual call target which the disassembler is too stupid
+    # to figure out (it adds the call offset to the disassembly offset rather
+    # than the absolute instruction address).
+    if self.heap.reader.arch == MD_CPU_ARCHITECTURE_X86:
+      if code.startswith("e8"):
+        words = code.split()
+        if len(words) > 6 and words[5] == "call":
+          offset = int(words[4] + words[3] + words[2] + words[1], 16)
+          target = (line_address + offset + 5) & 0xFFFFFFFF
+          code = code.replace(words[6], "0x%08x" % target)
+    # TODO(jkummerow): port this hack to ARM and x64.
+
+    opcodes = code[:op_offset]
+    code = self.annotate_disasm_addresses(code[op_offset:])
+    f.write("  <td>")
+    self.output_comment_box(f, "codel-", line_address)
+    f.write("</td>")
+    f.write(address_fmt % marker)
+    f.write("  ")
+    self.td_from_address(f, line_address)
+    f.write(self.format_address(line_address))
+    f.write(" (+0x%x)</td>" % line[0])
+    f.write("<td>:&nbsp;%s&nbsp;</td>" % opcodes)
+    f.write("<td>%s</td>" % code)
+    f.write("</tr>")
+
+  def output_comment_box(self, f, prefix, address):
+    comment = self.comments.get_comment(address)
+    value = ""
+    if comment:
+      value = " value=\"%s\"" % cgi.escape(comment)
+    f.write("<input type=text class=ci "
+            "id=%s-address-0x%s onchange=c()%s>" %
+            (prefix,
+             self.reader.FormatIntPtr(address),
+             value))
+
+  MAX_FOUND_RESULTS = 100
+
+  def output_find_results(self, f, results):
+    f.write("Addresses")
+    toomany = len(results) > self.MAX_FOUND_RESULTS
+    if toomany:
+      f.write("(found %i results, displaying only first %i)" %
+              (len(results), self.MAX_FOUND_RESULTS))
+    f.write(": ")
+    results = sorted(results)
+    results = results[:min(len(results), self.MAX_FOUND_RESULTS)]
+    for address in results:
+      f.write("<span %s>%s</span>" %
+              (self.comments.get_style_class_string(address),
+               self.format_address(address)))
+    if toomany:
+      f.write("...")
+
+
+  def output_page_info(self, f, page_kind, page_address, my_page_address):
+    if my_page_address == page_address and page_address != 0:
+      f.write("Marked first %s page." % page_kind)
+    else:
+      f.write("<span id=\"%spage\" style=\"display:none\">" % page_kind)
+      f.write("Marked first %s page." % page_kind)
+      f.write("</span>\n")
+      f.write("<button onclick=\"onpage('%spage', '0x%x')\">" %
+              (page_kind, my_page_address))
+      f.write("Mark as first %s page</button>" % page_kind)
+    return
+
+  def output_search_res(self, f, straddress):
+    try:
+      self.output_header(f)
+      f.write("<h3>Search results for %s</h3>" % straddress)
+
+      address = int(straddress, 0)
+
+      f.write("Comment: ")
+      self.output_comment_box(f, "search-", address)
+      f.write("<br>")
+
+      page_address = address & ~self.heap.PageAlignmentMask()
+
+      f.write("Page info: ")
+      self.output_page_info(f, "old", self.padawan.known_first_old_page, \
+                            page_address)
+      self.output_page_info(f, "map", self.padawan.known_first_map_page, \
+                            page_address)
+
+      if not self.reader.IsValidAddress(address):
+        f.write("<h3>The contents at address %s not found in the dump.</h3>" % \
+                straddress)
+      else:
+        # Print as words
+        self.output_words(f, address - 8, address + 32, address, "Dump")
+
+        # Print as ASCII
+        f.write("<hr>")
+        self.output_ascii(f, address, address + 256, address)
+
+        # Print as code
+        f.write("<hr>")
+        self.output_disasm_range(f, address - 16, address + 16, address, True)
+
+      aligned_res, unaligned_res = self.reader.FindWordList(address)
+
+      if len(aligned_res) > 0:
+        f.write("<h3>Occurrences of 0x%x at aligned addresses</h3>" %
+                address)
+        self.output_find_results(f, aligned_res)
+
+      if len(unaligned_res) > 0:
+        f.write("<h3>Occurrences of 0x%x at unaligned addresses</h3>" % \
+                address)
+        self.output_find_results(f, unaligned_res)
+
+      if len(aligned_res) + len(unaligned_res) == 0:
+        f.write("<h3>No occurrences of 0x%x found in the dump</h3>" % address)
+
+      self.output_footer(f)
+
+    except ValueError:
+      f.write("<h3>Unrecognized address format \"%s\".</h3>" % straddress)
+    return
+
+  def output_disasm_pc(self, f):
+    address = self.reader.ExceptionIP()
+    if not self.reader.IsValidAddress(address):
+      return
+    self.output_disasm_range(f, address - 16, address + 16, address, True)
+
+
+WEB_DUMPS_HEADER = """
+<!DOCTYPE html>
+<html>
+<head>
+<meta content="text/html; charset=utf-8" http-equiv="content-type">
+<style media="screen" type="text/css">
+
+.dumplist {
+  border-collapse : collapse;
+  border-spacing : 0px;
+  font-family: monospace;
+}
+
+.dumpcomments {
+  border : 1px solid LightGray;
+  width : 32em;
+}
+
+</style>
+
+<script type="application/javascript">
+
+var dump_str = "dump-";
+var dump_len = dump_str.length;
+
+function dump_comment() {
+  var s = event.srcElement.id;
+  var index = s.indexOf(dump_str);
+  if (index >= 0) {
+    send_dump_desc(s.substring(index + dump_len), event.srcElement.value);
+  }
+}
+
+function send_dump_desc(name, desc) {
+  xmlhttp = new XMLHttpRequest();
+  name = encodeURIComponent(name)
+  desc = encodeURIComponent(desc)
+  xmlhttp.open("GET",
+      "setdumpdesc?dump=" + name +
+      "&description=" + desc, true);
+  xmlhttp.send();
+}
+
+</script>
+
+<title>Dump list</title>
+</head>
+
+<body>
+"""
+
+WEB_DUMPS_FOOTER = """
+</body>
+</html>
+"""
+
+DUMP_FILE_RE = re.compile(r"[-_0-9a-zA-Z][-\._0-9a-zA-Z]*\.dmp$")
+
+
+class InspectionWebServer(BaseHTTPServer.HTTPServer):
+  def __init__(self, port_number, switches, minidump_name):
+    BaseHTTPServer.HTTPServer.__init__(
+        self, ('localhost', port_number), InspectionWebHandler)
+    splitpath = os.path.split(minidump_name)
+    self.dumppath = splitpath[0]
+    self.dumpfilename = splitpath[1]
+    self.default_formatter = InspectionWebFormatter(
+        switches, minidump_name, self)
+    self.formatters = { self.dumpfilename : self.default_formatter }
+    self.switches = switches
+
+  def output_dump_desc_field(self, f, name):
+    try:
+      descfile = open(os.path.join(self.dumppath, name + ".desc"), "r")
+      desc = descfile.readline()
+      descfile.close()
+    except IOError:
+      desc = ""
+    f.write("<input type=\"text\" class=\"dumpcomments\" "
+            "id=\"dump-%s\" onchange=\"dump_comment()\" value=\"%s\">\n" %
+            (cgi.escape(name), desc))
+
+  def set_dump_desc(self, name, description):
+    if not DUMP_FILE_RE.match(name):
+      return False
+    fname = os.path.join(self.dumppath, name)
+    if not os.path.isfile(fname):
+      return False
+    fname = fname + ".desc"
+    descfile = open(fname, "w")
+    descfile.write(description)
+    descfile.close()
+    return True
+
+  def get_dump_formatter(self, name):
+    if name is None:
+      return self.default_formatter
+    else:
+      if not DUMP_FILE_RE.match(name):
+        raise WebParameterError("Invalid name '%s'" % name)
+      formatter = self.formatters.get(name, None)
+      if formatter is None:
+        try:
+          formatter = InspectionWebFormatter(
+              self.switches, os.path.join(self.dumppath, name), self)
+          self.formatters[name] = formatter
+        except IOError:
+          raise WebParameterError("Could not open dump '%s'" % name)
+      return formatter
+
+  def output_dumps(self, f):
+    f.write(WEB_DUMPS_HEADER)
+    f.write("<h3>List of available dumps</h3>")
+    f.write("<table class=\"dumplist\">\n")
+    f.write("<thead><tr>")
+    f.write("<th>Name</th>")
+    f.write("<th>File time</th>")
+    f.write("<th>Comment</th>")
+    f.write("</tr></thead>")
+    dumps_by_time = {}
+    for fname in os.listdir(self.dumppath):
+      if DUMP_FILE_RE.match(fname):
+        mtime = os.stat(os.path.join(self.dumppath, fname)).st_mtime
+        fnames = dumps_by_time.get(mtime, [])
+        fnames.append(fname)
+        dumps_by_time[mtime] = fnames
+
+    for mtime in sorted(dumps_by_time, reverse=True):
+      fnames = dumps_by_time[mtime]
+      for fname in fnames:
+        f.write("<tr>\n")
+        f.write("<td><a href=\"summary.html?%s\">%s</a></td>\n" % (
+            (urllib.urlencode({ 'dump' : fname }), fname)))
+        f.write("<td>&nbsp;&nbsp;&nbsp;")
+        f.write(datetime.datetime.fromtimestamp(mtime))
+        f.write("</td>")
+        f.write("<td>&nbsp;&nbsp;&nbsp;")
+        self.output_dump_desc_field(f, fname)
+        f.write("</td>")
+        f.write("</tr>\n")
+    f.write("</table>\n")
+    f.write(WEB_DUMPS_FOOTER)
+    return
+
+class InspectionShell(cmd.Cmd):
+  def __init__(self, reader, heap):
+    cmd.Cmd.__init__(self)
+    self.reader = reader
+    self.heap = heap
+    self.padawan = InspectionPadawan(reader, heap)
+    self.prompt = "(grok) "
+
+    self.dd_start = 0
+    self.dd_num = 0x10
+    self.u_start = 0
+    self.u_num = 0
+
+  def EvalExpression(self, expr):
+    # Auto convert hex numbers to a python compatible format
+    if expr[:2] == "00":
+      expr = "0x"+expr
+    result = None
+    try:
+      # Ugly hack to patch in register values.
+      registers = [register
+                   for register,value in self.reader.ContextDescriptor().fields]
+      registers.sort(key=lambda r: len(r))
+      registers.reverse()
+      for register in registers:
+        expr = expr.replace("$"+register, str(self.reader.Register(register)))
+      result = eval(expr)
+    except Exception as e:
+      print("**** Could not evaluate '%s': %s" % (expr, e))
+      raise e
+    return result
+
+  def ParseAddressExpr(self, expr):
+    address = 0;
+    try:
+      result = self.EvalExpression(expr)
+    except:
+      return 0
+    try:
+      address = int(result)
+    except Exception as e:
+      print("**** Could not convert '%s' => %s to valid address: %s" % (
+          expr, result , e))
+    return address
+
+  def do_help(self, cmd=None):
+    if len(cmd) == 0:
+      print("Available commands")
+      print("=" * 79)
+      prefix = "do_"
+      methods = inspect.getmembers(InspectionShell, predicate=inspect.ismethod)
+      for name,method in methods:
+        if not name.startswith(prefix): continue
+        doc = inspect.getdoc(method)
+        if not doc: continue
+        name = prefix.join(name.split(prefix)[1:])
+        description = doc.splitlines()[0]
+        print((name + ": ").ljust(16) + description)
+      print("=" * 79)
+    else:
+      return super(InspectionShell, self).do_help(cmd)
+
+  def do_p(self, cmd):
+    """ see print """
+    return self.do_print(cmd)
+
+  def do_print(self, cmd):
+    """
+    Evaluate an arbitrary python command.
+    """
+    try:
+      print(self.EvalExpression(cmd))
+    except:
+      pass
+
+  def do_da(self, address):
+    """ see display_ascii"""
+    return self.do_display_ascii(address)
+
+  def do_display_ascii(self, address):
+    """
+     Print ASCII string starting at specified address.
+    """
+    address = self.ParseAddressExpr(address)
+    string = self.reader.ReadAsciiString(address)
+    if string == "":
+      print("Not an ASCII string at %s" % self.reader.FormatIntPtr(address))
+    else:
+      print("%s\n" % string)
+
+  def do_dsa(self, address):
+    """ see display_stack_ascii"""
+    return self.do_display_stack_ascii(address)
+
+  def do_display_stack_ascii(self, address):
+    """
+    Print ASCII stack error message.
+    """
+    if self.reader.exception is None:
+      print("Minidump has no exception info")
+      return
+    if len(address) == 0:
+      address = None
+    else:
+      address = self.ParseAddressExpr(address)
+    self.padawan.PrintStackTraceMessage(address)
+
+  def do_dd(self, args):
+    """
+     Interpret memory in the given region [address, address + num * word_size)
+
+     (if available) as a sequence of words. Automatic alignment is not performed.
+     If the num is not specified, a default value of 16 words is usif not self.Is
+     If no address is given, dd continues printing at the next word.
+
+     Synopsis: dd 0x<address>|$register [0x<num>]
+    """
+    if len(args) != 0:
+      args = args.split(' ')
+      self.dd_start = self.ParseAddressExpr(args[0])
+      self.dd_num = int(args[1], 16) if len(args) > 1 else 0x10
+    else:
+      self.dd_start += self.dd_num * self.reader.PointerSize()
+    if not self.reader.IsAlignedAddress(self.dd_start):
+      print("Warning: Dumping un-aligned memory, is this what you had in mind?")
+    end = self.dd_start + self.reader.PointerSize() * self.dd_num
+    self.padawan.InterpretMemory(self.dd_start, end)
+
+  def do_do(self, address):
+    """ see display_object """
+    return self.do_display_object(address)
+
+  def do_display_object(self, address):
+    """
+     Interpret memory at the given address as a V8 object.
+
+     Automatic alignment makes sure that you can pass tagged as well as
+     un-tagged addresses.
+    """
+    address = self.ParseAddressExpr(address)
+    if self.reader.IsAlignedAddress(address):
+      address = address + 1
+    elif not self.heap.IsTaggedObjectAddress(address):
+      print("Address doesn't look like a valid pointer!")
+      return
+    heap_object = self.padawan.SenseObject(address)
+    if heap_object:
+      heap_object.Print(Printer())
+    else:
+      print("Address cannot be interpreted as object!")
+
+  def do_dso(self, args):
+    """ see display_stack_objects """
+    return self.do_display_stack_objects(args)
+
+  def do_display_stack_objects(self, args):
+    """
+    Find and Print object pointers in the given range.
+
+    Print all possible object pointers that are on the stack or in the given
+    address range.
+
+    Usage: dso [START_ADDR,[END_ADDR]]
+    """
+    start = self.reader.StackTop()
+    end = self.reader.StackBottom()
+    if len(args) != 0:
+      args = args.split(' ')
+      start = self.ParseAddressExpr(args[0])
+      end = self.ParseAddressExpr(args[1]) if len(args) > 1 else end
+    objects = self.heap.FindObjectPointers(start, end)
+    for address in objects:
+      heap_object = self.padawan.SenseObject(address)
+      info = ""
+      if heap_object:
+        info = str(heap_object)
+      print("%s %s" % (self.padawan.FormatIntPtr(address), info))
+
+  def do_do_desc(self, address):
+    """
+      Print a descriptor array in a readable format.
+    """
+    start = self.ParseAddressExpr(address)
+    if ((start & 1) == 1): start = start - 1
+    DescriptorArray(FixedArray(self.heap, None, start)).Print(Printer())
+
+  def do_do_map(self, address):
+    """
+      Print a Map in a readable format.
+    """
+    start = self.ParseAddressExpr(address)
+    if ((start & 1) == 1): start = start - 1
+    Map(self.heap, None, start).Print(Printer())
+
+  def do_do_trans(self, address):
+    """
+      Print a transition array in a readable format.
+    """
+    start = self.ParseAddressExpr(address)
+    if ((start & 1) == 1): start = start - 1
+    TransitionArray(FixedArray(self.heap, None, start)).Print(Printer())
+
+  def do_dp(self, address):
+    """ see display_page """
+    return self.do_display_page(address)
+
+  def do_display_page(self, address):
+    """
+     Prints details about the V8 heap page of the given address.
+
+     Interpret memory at the given address as being on a V8 heap page
+     and print information about the page header (if available).
+    """
+    address = self.ParseAddressExpr(address)
+    page_address = address & ~self.heap.PageAlignmentMask()
+    if self.reader.IsValidAddress(page_address):
+      print("**** Not Implemented")
+      return
+    else:
+      print("Page header is not available!")
+
+  def do_k(self, arguments):
+    """
+     Teach V8 heap layout information to the inspector.
+
+     This increases the amount of annotations the inspector can produce while
+     dumping data. The first page of each heap space is of particular interest
+     because it contains known objects that do not move.
+    """
+    self.padawan.PrintKnowledge()
+
+  def do_ko(self, address):
+    """ see known_oldspace """
+    return self.do_known_oldspace(address)
+
+  def do_known_oldspace(self, address):
+    """
+     Teach V8 heap layout information to the inspector.
+
+     Set the first old space page by passing any pointer into that page.
+    """
+    address = self.ParseAddressExpr(address)
+    page_address = address & ~self.heap.PageAlignmentMask()
+    self.padawan.known_first_old_page = page_address
+
+  def do_km(self, address):
+    """ see known_map """
+    return self.do_known_map(address)
+
+  def do_known_map(self, address):
+    """
+     Teach V8 heap layout information to the inspector.
+
+     Set the first map-space page by passing any pointer into that page.
+    """
+    address = self.ParseAddressExpr(address)
+    page_address = address & ~self.heap.PageAlignmentMask()
+    self.padawan.known_first_map_page = page_address
+
+  def do_list(self, smth):
+    """
+     List all available memory regions.
+    """
+    def print_region(reader, start, size, location):
+      print("  %s - %s (%d bytes)" % (reader.FormatIntPtr(start),
+                                      reader.FormatIntPtr(start + size),
+                                      size))
+    print("Available memory regions:")
+    self.reader.ForEachMemoryRegion(print_region)
+
+  def do_lm(self, arg):
+    """ see list_modules """
+    return self.do_list_modules(arg)
+
+  def do_list_modules(self, arg):
+    """
+     List details for all loaded modules in the minidump.
+
+     An argument can be passed to limit the output to only those modules that
+     contain the argument as a substring (case insensitive match).
+    """
+    for module in self.reader.module_list.modules:
+      if arg:
+        name = GetModuleName(self.reader, module).lower()
+        if name.find(arg.lower()) >= 0:
+          PrintModuleDetails(self.reader, module)
+      else:
+        PrintModuleDetails(self.reader, module)
+    print()
+
+  def do_s(self, word):
+    """ see search """
+    return self.do_search(word)
+
+  def do_search(self, word):
+    """
+     Search for a given word in available memory regions.
+
+     The given word is expanded to full pointer size and searched at aligned
+     as well as un-aligned memory locations. Use 'sa' to search aligned locations
+     only.
+    """
+    try:
+      word = self.ParseAddressExpr(word)
+    except ValueError:
+      print("Malformed word, prefix with '0x' to use hexadecimal format.")
+      return
+    print(
+      "Searching for word %d/0x%s:" % (word, self.reader.FormatIntPtr(word)))
+    self.reader.FindWord(word)
+
+  def do_sh(self, none):
+    """
+     Search for the V8 Heap object in all available memory regions.
+
+     You might get lucky and find this rare treasure full of invaluable
+     information.
+    """
+    print("**** Not Implemented")
+
+  def do_u(self, args):
+    """ see disassemble """
+    return self.do_disassemble(args)
+
+  def do_disassemble(self, args):
+    """
+     Unassemble memory in the region [address, address + size).
+
+     If the size is not specified, a default value of 32 bytes is used.
+     Synopsis: u 0x<address> 0x<size>
+    """
+    if len(args) != 0:
+      args = args.split(' ')
+      self.u_start = self.ParseAddressExpr(args[0])
+      self.u_size = self.ParseAddressExpr(args[1]) if len(args) > 1 else 0x20
+      skip = False
+    else:
+      # Skip the first instruction if we reuse the last address.
+      skip = True
+
+    if not self.reader.IsValidAddress(self.u_start):
+      print("Address %s is not contained within the minidump!" % (
+          self.reader.FormatIntPtr(self.u_start)))
+      return
+    lines = self.reader.GetDisasmLines(self.u_start, self.u_size)
+    if len(lines) == 0:
+      print("Address %s could not be disassembled!" % (
+          self.reader.FormatIntPtr(self.u_start)))
+      print("    Could not disassemble using %s." % OBJDUMP_BIN)
+      print("    Pass path to architecture specific objdump via --objdump?")
+      return
+    for line in lines:
+      if skip:
+        skip = False
+        continue
+      print(FormatDisasmLine(self.u_start, self.heap, line))
+    # Set the next start address = last line
+    self.u_start += lines[-1][0]
+    print()
+
+  def do_EOF(self, none):
+    raise KeyboardInterrupt
+
+EIP_PROXIMITY = 64
+
+CONTEXT_FOR_ARCH = {
+    MD_CPU_ARCHITECTURE_AMD64:
+      ['rax', 'rbx', 'rcx', 'rdx', 'rdi', 'rsi', 'rbp', 'rsp', 'rip',
+       'r8', 'r9', 'r10', 'r11', 'r12', 'r13', 'r14', 'r15'],
+    MD_CPU_ARCHITECTURE_ARM:
+      ['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9',
+       'r10', 'r11', 'r12', 'sp', 'lr', 'pc'],
+    MD_CPU_ARCHITECTURE_ARM64:
+      ['r0', 'r1', 'r2', 'r3', 'r4', 'r5', 'r6', 'r7', 'r8', 'r9',
+       'r10', 'r11', 'r12', 'r13', 'r14', 'r15', 'r16', 'r17', 'r18', 'r19',
+       'r20', 'r21', 'r22', 'r23', 'r24', 'r25', 'r26', 'r27', 'r28',
+       'fp', 'lr', 'sp', 'pc'],
+    MD_CPU_ARCHITECTURE_X86:
+      ['eax', 'ebx', 'ecx', 'edx', 'edi', 'esi', 'ebp', 'esp', 'eip']
+}
+
+KNOWN_MODULES = {'chrome.exe', 'chrome.dll'}
+
+def GetVersionString(ms, ls):
+  return "%d.%d.%d.%d" % (ms >> 16, ms & 0xffff, ls >> 16, ls & 0xffff)
+
+
+def GetModuleName(reader, module):
+  name = reader.ReadMinidumpString(module.module_name_rva)
+  # simplify for path manipulation
+  name = name.encode('utf-8')
+  return str(os.path.basename(str(name).replace("\\", "/")))
+
+
+def PrintModuleDetails(reader, module):
+  print("%s" % GetModuleName(reader, module))
+  file_version = GetVersionString(module.version_info.dwFileVersionMS,
+                                  module.version_info.dwFileVersionLS);
+  product_version = GetVersionString(module.version_info.dwProductVersionMS,
+                                     module.version_info.dwProductVersionLS)
+  print("  base: %s" % reader.FormatIntPtr(module.base_of_image))
+  print("  end: %s" % reader.FormatIntPtr(module.base_of_image +
+                                          module.size_of_image))
+  print("  file version: %s" % file_version)
+  print("  product version: %s" % product_version)
+  time_date_stamp = datetime.datetime.fromtimestamp(module.time_date_stamp)
+  print("  timestamp: %s" % time_date_stamp)
+
+
+def AnalyzeMinidump(options, minidump_name):
+  reader = MinidumpReader(options, minidump_name)
+  heap = None
+
+  stack_top = reader.ExceptionSP()
+  stack_bottom = reader.StackBottom()
+  stack_map = {reader.ExceptionIP(): -1}
+  for slot in range(stack_top, stack_bottom, reader.PointerSize()):
+    maybe_address = reader.ReadUIntPtr(slot)
+    if not maybe_address in stack_map:
+      stack_map[maybe_address] = slot
+
+  heap = V8Heap(reader, stack_map)
+  padawan = InspectionPadawan(reader, heap)
+
+  DebugPrint("========================================")
+  if reader.exception is None:
+    print("Minidump has no exception info")
+  else:
+    print("Address markers:")
+    print("  T = valid tagged pointer in the minidump")
+    print("  S = address on the exception stack")
+    print("  C = address in loaded C/C++ module")
+    print("  * = address in the minidump")
+    print("")
+    print("Exception info:")
+    exception_thread = reader.ExceptionThread()
+    print("  thread id: %d" % exception_thread.id)
+    print("  code:      %08X" % reader.exception.exception.code)
+    print("  context:")
+    context = CONTEXT_FOR_ARCH[reader.arch]
+    maxWidth = max(map(lambda s: len(s), context))
+    for r in context:
+      register_value = reader.Register(r)
+      print("    %s: %s" % (r.rjust(maxWidth),
+                            heap.FormatIntPtr(register_value)))
+    # TODO(vitalyr): decode eflags.
+    if reader.arch in [MD_CPU_ARCHITECTURE_ARM, MD_CPU_ARCHITECTURE_ARM64]:
+      print("    cpsr: %s" % bin(reader.exception_context.cpsr)[2:])
+    else:
+      print("    eflags: %s" % bin(reader.exception_context.eflags)[2:])
+
+    print()
+    print("  modules:")
+    for module in reader.module_list.modules:
+      name = GetModuleName(reader, module)
+      if name in KNOWN_MODULES:
+        print("    %s at %08X" % (name, module.base_of_image))
+        reader.TryLoadSymbolsFor(name, module)
+    print()
+
+    print("  stack-top:    %s" % heap.FormatIntPtr(reader.StackTop()))
+    print("  stack-bottom: %s" % heap.FormatIntPtr(reader.StackBottom()))
+    print("")
+
+    if options.shell:
+      padawan.PrintStackTraceMessage(print_message=False)
+
+    print("Disassembly around exception.eip:")
+    eip_symbol = reader.FindSymbol(reader.ExceptionIP())
+    if eip_symbol is not None:
+      print(eip_symbol)
+    disasm_start = reader.ExceptionIP() - EIP_PROXIMITY
+    disasm_bytes = 2 * EIP_PROXIMITY
+    if (options.full):
+      full_range = reader.FindRegion(reader.ExceptionIP())
+      if full_range is not None:
+        disasm_start = full_range[0]
+        disasm_bytes = full_range[1]
+
+    lines = reader.GetDisasmLines(disasm_start, disasm_bytes)
+
+    if not lines:
+      print("Could not disassemble using %s." % OBJDUMP_BIN)
+      print("Pass path to architecture specific objdump via --objdump?")
+
+    for line in lines:
+      print(FormatDisasmLine(disasm_start, heap, line))
+    print()
+
+  if heap is None:
+    heap = V8Heap(reader, None)
+
+  if options.full:
+    FullDump(reader, heap)
+
+  if options.command:
+    InspectionShell(reader, heap).onecmd(options.command)
+
+  if options.shell:
+    try:
+      InspectionShell(reader, heap).cmdloop("type help to get help")
+    except KeyboardInterrupt:
+      print("Kthxbye.")
+  elif not options.command:
+    if reader.exception is not None:
+      print("Annotated stack (from exception.esp to bottom):")
+      stack_start = padawan.PrintStackTraceMessage()
+      padawan.InterpretMemory(stack_start, stack_bottom)
+  reader.Dispose()
+
+
+if __name__ == "__main__":
+  parser = optparse.OptionParser(USAGE)
+  parser.add_option("-s", "--shell", dest="shell", action="store_true",
+                    help="start an interactive inspector shell")
+  parser.add_option("-w", "--web", dest="web", action="store_true",
+                    help="start a web server on localhost:%i" % PORT_NUMBER)
+  parser.add_option("-c", "--command", dest="command", default="",
+                    help="run an interactive inspector shell command and exit")
+  parser.add_option("-f", "--full", dest="full", action="store_true",
+                    help="dump all information contained in the minidump")
+  parser.add_option("--symdir", dest="symdir", default=".",
+                    help="directory containing *.pdb.sym file with symbols")
+  parser.add_option("--objdump", default="",
+                    help="objdump tool to use [default: %s]" % (
+                        DEFAULT_OBJDUMP_BIN))
+  options, args = parser.parse_args()
+  if len(args) != 1:
+    parser.print_help()
+    sys.exit(1)
+  if options.web:
+    try:
+      server = InspectionWebServer(PORT_NUMBER, options, args[0])
+      print('Started httpserver on port ' , PORT_NUMBER)
+      webbrowser.open('http://localhost:%i/summary.html' % PORT_NUMBER)
+      server.serve_forever()
+    except KeyboardInterrupt:
+      print('^C received, shutting down the web server')
+      server.socket.close()
+  else:
+    AnalyzeMinidump(options, args[0])
diff --git a/src/third_party/v8/tools/heap-stats/README.md b/src/third_party/v8/tools/heap-stats/README.md
new file mode 100644
index 0000000..9cf6e56
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/README.md
@@ -0,0 +1,16 @@
+# Heap Stats
+
+Heap stats is a HTML-based tool for visualizing V8-internal object statistics.
+For example, the tool can be used to visualize how much heap memory is used for
+maintaining internal state versus actually allocated by the user.
+
+The tool consumes log files produced by d8 (or Chromium) by passing
+`--trace-gc-object-stats` or a trace captured using Chrome's tracing
+infrastructure. Chrome trace files can either be processed as gzip or raw text
+files.
+
+
+Hosting requires a web server, e.g.:
+
+    cd tools/heap-stats
+    python -m SimpleHTTPServer 8000
diff --git a/src/third_party/v8/tools/heap-stats/categories.js b/src/third_party/v8/tools/heap-stats/categories.js
new file mode 100644
index 0000000..2bd08fa
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/categories.js
@@ -0,0 +1,213 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Categories for instance types.
+export const CATEGORIES = new Map([
+  [
+    'user', new Set([
+      'CONS_ONE_BYTE_STRING_TYPE',
+      'CONS_STRING_TYPE',
+      'DESCRIPTOR_ARRAY_TYPE',
+      'ELEMENTS_TYPE',
+      'EXTERNAL_INTERNALIZED_STRING_TYPE',
+      'EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
+      'EXTERNAL_ONE_BYTE_STRING_TYPE',
+      'EXTERNAL_STRING_TYPE',
+      'EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE',
+      'FIXED_BIGINT64_ARRAY_TYPE',
+      'FIXED_BIGUINT64_ARRAY_TYPE',
+      'FIXED_DOUBLE_ARRAY_TYPE',
+      'FIXED_FLOAT32_ARRAY_TYPE',
+      'FIXED_FLOAT64_ARRAY_TYPE',
+      'FIXED_INT16_ARRAY_TYPE',
+      'FIXED_INT32_ARRAY_TYPE',
+      'FIXED_INT8_ARRAY_TYPE',
+      'FIXED_UINT16_ARRAY_TYPE',
+      'FIXED_UINT32_ARRAY_TYPE',
+      'FIXED_UINT8_ARRAY_TYPE',
+      'FIXED_UINT8_CLAMPED_ARRAY_TYPE',
+      'FUNCTION_CONTEXT_TYPE',
+      'GLOBAL_ELEMENTS_TYPE',
+      'GLOBAL_PROPERTIES_TYPE',
+      'HEAP_NUMBER_TYPE',
+      'INTERNALIZED_STRING_TYPE',
+      'JS_ARGUMENTS_OBJECT_TYPE',
+      'JS_ARRAY_BUFFER_TYPE',
+      'JS_ARRAY_ITERATOR_TYPE',
+      'JS_ARRAY_TYPE',
+      'JS_BOUND_FUNCTION_TYPE',
+      'JS_DATE_TYPE',
+      'JS_ERROR_TYPE',
+      'JS_FAST_ARRAY_KEY_ITERATOR_TYPE',
+      'JS_FAST_ARRAY_VALUE_ITERATOR_TYPE',
+      'JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE',
+      'JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE',
+      'JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE',
+      'JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE',
+      'JS_FUNCTION_TYPE',
+      'JS_GENERATOR_OBJECT_TYPE',
+      'JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE',
+      'JS_GLOBAL_OBJECT_TYPE',
+      'JS_GLOBAL_PROXY_TYPE',
+      'JS_COLLATOR_TYPE',
+      'JS_DATE_TIME_FORMAT_TYPE',
+      'JS_DISPLAY_NAMES_TYPE',
+      'JS_LIST_FORMAT_TYPE',
+      'JS_LOCALE_TYPE',
+      'JS_NUMBER_FORMAT_TYPE',
+      'JS_PLURAL_RULES_TYPE',
+      'JS_RELATIVE_TIME_FORMAT_TYPE',
+      'JS_SEGMENT_ITERATOR_TYPE',
+      'JS_SEGMENTER_TYPE',
+      'JS_SEGMENTS_TYPE',
+      'JS_V8_BREAK_ITERATOR_TYPE',
+      'JS_MAP_KEY_ITERATOR_TYPE',
+      'JS_MAP_KEY_VALUE_ITERATOR_TYPE',
+      'JS_MAP_TYPE',
+      'JS_MAP_VALUE_ITERATOR_TYPE',
+      'JS_MESSAGE_OBJECT_TYPE',
+      'JS_OBJECT_TYPE',
+      'JS_PRIMITIVE_WRAPPER_TYPE',
+      'JS_PROMISE_TYPE',
+      'JS_PROXY_TYPE',
+      'JS_REG_EXP_TYPE',
+      'JS_SET_KEY_VALUE_ITERATOR_TYPE',
+      'JS_SET_TYPE',
+      'JS_SET_VALUE_ITERATOR_TYPE',
+      'JS_STRING_ITERATOR_TYPE',
+      'JS_TO_WASM_FUNCTION',
+      'JS_TYPED_ARRAY_TYPE',
+      'JS_WEAK_MAP_TYPE',
+      'HEAP_NUMBER_TYPE',
+      'NATIVE_CONTEXT_TYPE',
+      'OBJECT_PROPERTY_DICTIONARY_TYPE',
+      'ONE_BYTE_INTERNALIZED_STRING_TYPE',
+      'ONE_BYTE_STRING_TYPE',
+      'OTHER_CONTEXT_TYPE',
+      'PROPERTY_ARRAY_TYPE',
+      'SLICED_ONE_BYTE_STRING_TYPE',
+      'SLICED_STRING_TYPE',
+      'STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE',
+      'STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE',
+      'STRING_TYPE',
+      'SYMBOL_TYPE',
+      'THIN_ONE_BYTE_STRING_TYPE',
+      'THIN_STRING_TYPE',
+      'UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE',
+      'UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
+      'UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE',
+      'UNCACHED_EXTERNAL_STRING_TYPE',
+      'WASM_INSTANCE_OBJECT_TYPE',
+      'WASM_MEMORY_OBJECT_TYPE',
+      'WASM_MODULE_OBJECT_TYPE',
+    ])
+  ],
+  [
+    'system', new Set([
+      'ACCESS_CHECK_INFO_TYPE',
+      'ACCESSOR_INFO_TYPE',
+      'ACCESSOR_PAIR_TYPE',
+      'ALLOCATION_MEMENTO_TYPE',
+      'ALLOCATION_SITE_TYPE',
+      'ARRAY_BOILERPLATE_DESCRIPTION_TYPE',
+      'ARRAY_BOILERPLATE_DESCRIPTION_ELEMENTS_TYPE',
+      'BOILERPLATE_ELEMENTS_TYPE',
+      'BOILERPLATE_PROPERTY_ARRAY_TYPE',
+      'BOILERPLATE_PROPERTY_DICTIONARY_TYPE',
+      'BYTE_ARRAY_TYPE',
+      'CALL_HANDLER_INFO_TYPE',
+      'CELL_TYPE',
+      'CODE_STUBS_TABLE_TYPE',
+      'CONTEXT_EXTENSION_TYPE',
+      'ENUM_CACHE_TYPE',
+      'ENUM_INDICES_CACHE_TYPE',
+      'FOREIGN_TYPE',
+      'FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE',
+      'FUNCTION_TEMPLATE_INFO_TYPE',
+      'INTERCEPTOR_INFO_TYPE',
+      'JS_API_OBJECT_TYPE',
+      'JS_ARRAY_BOILERPLATE_TYPE',
+      'JS_OBJECT_BOILERPLATE_TYPE',
+      'JS_SPECIAL_API_OBJECT_TYPE',
+      'MAP_TYPE',
+      'NUMBER_STRING_CACHE_TYPE',
+      'OBJECT_BOILERPLATE_DESCRIPTION_TYPE',
+      'OBJECT_TEMPLATE_INFO_TYPE',
+      'OBJECT_TO_CODE_TYPE',
+      'ODDBALL_TYPE',
+      'PROMISE_REACTION_JOB_INFO_TYPE',
+      'PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE',
+      'PROPERTY_CELL_TYPE',
+      'PROTOTYPE_INFO_TYPE',
+      'PROTOTYPE_USERS_TYPE',
+      'REGEXP_MULTIPLE_CACHE_TYPE',
+      'RETAINED_MAPS_TYPE',
+      'SCOPE_INFO_TYPE',
+      'SCRIPT_LIST_TYPE',
+      'SCRIPT_SHARED_FUNCTION_INFOS_TYPE',
+      'SERIALIZED_OBJECTS_TYPE',
+      'SINGLE_CHARACTER_STRING_CACHE_TYPE',
+      'STACK_FRAME_INFO_TYPE',
+      'STRING_SPLIT_CACHE_TYPE',
+      'STRING_TABLE_TYPE',
+      'TRANSITION_ARRAY_TYPE',
+      'WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE',
+    ])
+  ],
+  [
+    'code', new Set([
+      'BUILTIN',
+      'BYTECODE_ARRAY_CONSTANT_POOL_TYPE',
+      'BYTECODE_ARRAY_HANDLER_TABLE_TYPE',
+      'BYTECODE_ARRAY_TYPE',
+      'BYTECODE_HANDLER',
+      'CODE_DATA_CONTAINER_TYPE',
+      'DEOPTIMIZATION_DATA_TYPE',
+      'EMBEDDED_OBJECT_TYPE',
+      'FEEDBACK_CELL_TYPE',
+      'FEEDBACK_METADATA_TYPE',
+      'FEEDBACK_VECTOR_ENTRY_TYPE',
+      'FEEDBACK_VECTOR_HEADER_TYPE',
+      'FEEDBACK_VECTOR_SLOT_CALL_TYPE',
+      'FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE',
+      'FEEDBACK_VECTOR_SLOT_ENUM_TYPE',
+      'FEEDBACK_VECTOR_SLOT_LOAD_TYPE',
+      'FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE',
+      'FEEDBACK_VECTOR_SLOT_OTHER_TYPE',
+      'FEEDBACK_VECTOR_SLOT_STORE_TYPE',
+      'FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE',
+      'FEEDBACK_VECTOR_TYPE',
+      'LOAD_HANDLER_TYPE',
+      'NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE',
+      'OPTIMIZED_CODE_LITERALS_TYPE',
+      'OPTIMIZED_FUNCTION',
+      'PREPARSE_DATA_TYPE',
+      'REGEXP',
+      'RELOC_INFO_TYPE',
+      'SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE',
+      'SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE',
+      'SCRIPT_SOURCE_EXTERNAL_TYPE',
+      'SCRIPT_SOURCE_NON_EXTERNAL_ONE_BYTE_TYPE',
+      'SCRIPT_SOURCE_NON_EXTERNAL_TWO_BYTE_TYPE',
+      'SCRIPT_TYPE',
+      'SHARED_FUNCTION_INFO_TYPE',
+      'SOURCE_POSITION_TABLE_TYPE',
+      'STORE_HANDLER_TYPE',
+      'STUB',
+      'UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE',
+      'UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE',
+      'UNCOMPILED_JS_FUNCTION_TYPE',
+      'UNCOMPILED_SHARED_FUNCTION_INFO_TYPE'
+    ])
+  ],
+  ['unclassified', new Set()],
+]);
+
+// Maps category to description text that is shown in html.
+export const CATEGORY_NAMES = new Map([
+  ['user', 'JS'],
+  ['system', 'Metadata'],
+  ['code', 'Code'],
+  ['unclassified', 'Unclassified'],
+]);
diff --git a/src/third_party/v8/tools/heap-stats/details-selection-template.html b/src/third_party/v8/tools/heap-stats/details-selection-template.html
new file mode 100644
index 0000000..9f12bde
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/details-selection-template.html
@@ -0,0 +1,151 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<style>
+#dataSelectionSection {
+  display: none;
+}
+
+.box {
+  border-left: dashed 1px #666666;
+  border-right: dashed 1px #666666;
+  border-bottom: dashed 1px #666666;
+  padding: 10px;
+  overflow: hidden;
+  position: relative;
+}
+
+.box:nth-of-type(1) {
+  border-top: dashed 1px #666666;
+  border-radius: 5px 5px 0px 0px;
+}
+
+.box:last-of-type {
+    border-radius: 0px 0px 5px 5px;
+}
+
+.box > ul {
+  margin: 0px;
+  padding: 0px;
+}
+
+.box > ul > li {
+  display: inline-block;
+}
+
+.box > ul > li:not(:first-child) {
+  margin-left: 10px;
+}
+
+.box > ul > li:first-child {
+  font-weight: bold;
+}
+
+.instanceTypeSelectBox {
+  position: relative;
+  overflow: hidden;
+  float: left;
+  padding: 0px 5px 2px 0px;
+  margin: 3px;
+  border-radius: 3px;
+}
+
+.instanceTypeSelectBox > label {
+  font-size: xx-small;
+}
+
+.instanceTypeSelectBox > input {
+  vertical-align: middle;
+}
+
+.percentBackground {
+  position: absolute;
+  width: 200%;
+  height: 100%;
+  left: 0%;
+  top: 0px;
+  margin-left: -100%;
+  transition: all 1s ease-in-out;
+}
+
+.instanceTypeSelectBox > .percentBackground  {
+  background: linear-gradient(90deg, #68b0f7 50%, #b3d9ff 50%);
+  z-index: -1;
+}
+.box > .percentBackground  {
+  background: linear-gradient(90deg, #e0edfe 50%, #fff 50%);
+  z-index: -2;
+}
+
+#categories {
+  margin-top: 10px;
+}
+
+#category-filter {
+  text-align: right;
+  width: 50px;
+}
+
+.categorySelectionButtons {
+  float: right;
+}
+.categoryLabels {
+  float: left;
+  min-wdith: 200px;
+}
+.categoryContent {
+  clear: both;
+}
+
+</style>
+<section id="dataSelectionSection">
+  <h2>Data selection</h2>
+  <ul>
+    <li>
+      <label for="isolate-select">
+        Isolate
+      </label>
+      <select id="isolate-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <label for="data-view-select">
+        Data view
+      </label>
+      <select id="data-view-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <label for="dataset-select">
+        Data set
+      </label>
+      <select id="dataset-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <label for="gc-select">
+        Garbage collection (at a specific time in ms)
+      </label>
+      <select id="gc-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <input id="category-filter" type="text" value="0" disabled="disabled" />KB
+      <button id="category-filter-btn" disabled="disabled">
+        Filter categories with less memory
+      </button>
+      <button id="category-auto-filter-btn" disabled="disabled">
+        Show top 20 categories only
+      </button>
+    </li>
+    <li>
+      <button id="csv-export-btn" disabled="disabled">Export selection as CSV</button>
+    </li>
+  </ul>
+
+  <div id="categories"></div>
+</section>
diff --git a/src/third_party/v8/tools/heap-stats/details-selection.js b/src/third_party/v8/tools/heap-stats/details-selection.js
new file mode 100644
index 0000000..7130e19
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/details-selection.js
@@ -0,0 +1,447 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+import {CATEGORIES, CATEGORY_NAMES} from './categories.js';
+
+export const VIEW_BY_INSTANCE_TYPE = 'by-instance-type';
+export const VIEW_BY_INSTANCE_CATEGORY = 'by-instance-category';
+export const VIEW_BY_FIELD_TYPE = 'by-field-type';
+
+defineCustomElement('details-selection', (templateText) =>
+ class DetailsSelection extends HTMLElement {
+  constructor() {
+    super();
+    const shadowRoot = this.attachShadow({mode: 'open'});
+    shadowRoot.innerHTML = templateText;
+    this.isolateSelect.addEventListener(
+        'change', e => this.handleIsolateChange(e));
+    this.dataViewSelect.addEventListener(
+        'change', e => this.notifySelectionChanged(e));
+    this.datasetSelect.addEventListener(
+        'change', e => this.notifySelectionChanged(e));
+    this.gcSelect.addEventListener(
+      'change', e => this.notifySelectionChanged(e));
+    this.$('#csv-export-btn')
+        .addEventListener('click', e => this.exportCurrentSelection(e));
+    this.$('#category-filter-btn')
+        .addEventListener('click', e => this.filterCurrentSelection(e));
+    this.$('#category-auto-filter-btn')
+        .addEventListener('click', e => this.filterTop20Categories(e));
+    this._data = undefined;
+    this.selection = undefined;
+  }
+
+  connectedCallback() {
+    for (let category of CATEGORIES.keys()) {
+      this.$('#categories').appendChild(this.buildCategory(category));
+    }
+  }
+
+  dataChanged() {
+    this.selection = {categories: {}};
+    this.resetUI(true);
+    this.populateIsolateSelect();
+    this.handleIsolateChange();
+    this.$('#dataSelectionSection').style.display = 'block';
+  }
+
+  set data(value) {
+    this._data = value;
+    this.dataChanged();
+  }
+
+  get data() {
+    return this._data;
+  }
+
+  get selectedIsolate() {
+    return this._data[this.selection.isolate];
+  }
+
+  get selectedData() {
+    console.assert(this.data, 'invalid data');
+    console.assert(this.selection, 'invalid selection');
+    return this.selectedIsolate.gcs[this.selection.gc][this.selection.data_set];
+  }
+
+  $(id) {
+    return this.shadowRoot.querySelector(id);
+  }
+
+  querySelectorAll(query) {
+    return this.shadowRoot.querySelectorAll(query);
+  }
+
+  get dataViewSelect() {
+    return this.$('#data-view-select');
+  }
+
+  get datasetSelect() {
+    return this.$('#dataset-select');
+  }
+
+  get isolateSelect() {
+    return this.$('#isolate-select');
+  }
+
+  get gcSelect() {
+    return this.$('#gc-select');
+  }
+
+  buildCategory(name) {
+    const div = document.createElement('div');
+    div.id = name;
+    div.classList.add('box');
+
+    let ul = document.createElement('ul');
+    ul.className = 'categoryLabels'
+    {
+      const name_li = document.createElement('li');
+      name_li.textContent = CATEGORY_NAMES.get(name);
+      ul.appendChild(name_li);
+
+      const percent_li = document.createElement('li');
+      percent_li.textContent = '0%';
+      percent_li.id = name + 'PercentContent';
+      ul.appendChild(percent_li);
+    }
+    div.appendChild(ul);
+
+    ul = document.createElement('ul');
+    ul.className = 'categorySelectionButtons'
+    {
+      const all_li = document.createElement('li');
+      const all_button = document.createElement('button');
+      all_button.textContent = 'All';
+      all_button.addEventListener('click', e => this.selectCategory(name));
+      all_li.appendChild(all_button);
+      ul.appendChild(all_li);
+
+      const top_li = document.createElement('li');
+      const top_button = document.createElement('button');
+      top_button.textContent = 'Top 10';
+      top_button.addEventListener(
+          'click', e => this.selectCategoryTopEntries(name));
+      top_li.appendChild(top_button);
+      ul.appendChild(top_li);
+
+      const none_li = document.createElement('li');
+      const none_button = document.createElement('button');
+      none_button.textContent = 'None';
+      none_button.addEventListener('click', e => this.unselectCategory(name));
+      none_li.appendChild(none_button);
+      ul.appendChild(none_li);
+    }
+    div.appendChild(ul);
+
+    const innerDiv = document.createElement('div');
+    innerDiv.id = name + 'Content';
+    innerDiv.className = 'categoryContent';
+    div.appendChild(innerDiv);
+
+    const percentDiv = document.createElement('div');
+    percentDiv.className = 'percentBackground';
+    percentDiv.id = name + 'PercentBackground';
+    div.appendChild(percentDiv);
+    return div;
+  }
+
+  populateIsolateSelect() {
+    let isolates = Object.entries(this.data);
+    // Sorty by peak heap memory consumption.
+    isolates.sort((a, b) => b[1].peakMemory - a[1].peakMemory);
+    this.populateSelect(
+        '#isolate-select', isolates, (key, isolate) => isolate.getLabel());
+  }
+
+  resetUI(resetIsolateSelect) {
+    if (resetIsolateSelect) removeAllChildren(this.isolateSelect);
+
+    removeAllChildren(this.dataViewSelect);
+    removeAllChildren(this.datasetSelect);
+    removeAllChildren(this.gcSelect);
+    this.clearCategories();
+    this.setButtonState('disabled');
+  }
+
+  setButtonState(disabled) {
+    this.$('#csv-export-btn').disabled = disabled;
+    this.$('#category-filter').disabled = disabled;
+    this.$('#category-filter-btn').disabled = disabled;
+    this.$('#category-auto-filter-btn').disabled = disabled;
+  }
+
+  handleIsolateChange(e) {
+    this.selection.isolate = this.isolateSelect.value;
+    if (this.selection.isolate.length === 0) {
+      this.selection.isolate = null;
+      return;
+    }
+    this.resetUI(false);
+    this.populateSelect(
+        '#data-view-select', [
+          [VIEW_BY_INSTANCE_TYPE, 'Selected instance types'],
+          [VIEW_BY_INSTANCE_CATEGORY, 'Selected type categories'],
+          [VIEW_BY_FIELD_TYPE, 'Field type statistics']
+        ],
+        (key, label) => label, VIEW_BY_INSTANCE_TYPE);
+    this.populateSelect(
+        '#dataset-select', this.selectedIsolate.data_sets.entries(), null,
+        'live');
+    this.populateSelect(
+        '#gc-select',
+        Object.keys(this.selectedIsolate.gcs)
+            .map(id => [id, this.selectedIsolate.gcs[id].time]),
+        (key, time, index) => {
+          return (index + ': ').padStart(4, '0') +
+              formatSeconds(time).padStart(6, '0') + ' ' +
+              formatBytes(this.selectedIsolate.gcs[key].live.overall)
+                  .padStart(9, '0');
+        });
+    this.populateCategories();
+    this.notifySelectionChanged();
+  }
+
+  notifySelectionChanged(e) {
+    if (!this.selection.isolate) return;
+
+    this.selection.data_view = this.dataViewSelect.value;
+    this.selection.categories = {};
+    if (this.selection.data_view === VIEW_BY_FIELD_TYPE) {
+      this.$('#categories').style.display = 'none';
+    } else {
+      for (let category of CATEGORIES.keys()) {
+        const selected = this.selectedInCategory(category);
+        if (selected.length > 0) this.selection.categories[category] = selected;
+      }
+      this.$('#categories').style.display = 'block';
+    }
+    this.selection.category_names = CATEGORY_NAMES;
+    this.selection.data_set = this.datasetSelect.value;
+    this.selection.gc = this.gcSelect.value;
+    this.setButtonState(false);
+    this.updatePercentagesInCategory();
+    this.updatePercentagesInInstanceTypes();
+    this.dispatchEvent(new CustomEvent(
+        'change', {bubbles: true, composed: true, detail: this.selection}));
+  }
+
+  filterCurrentSelection(e) {
+    const minSize = this.$('#category-filter').value * KB;
+    this.filterCurrentSelectionWithThresold(minSize);
+  }
+
+  filterTop20Categories(e) {
+    // Limit to show top 20 categories only.
+    let minSize = 0;
+    let count = 0;
+    let sizes = this.selectedIsolate.instanceTypePeakMemory;
+    for (let key in sizes) {
+      if (count == 20) break;
+      minSize = sizes[key];
+      count++;
+    }
+    this.filterCurrentSelectionWithThresold(minSize);
+  }
+
+  filterCurrentSelectionWithThresold(minSize) {
+    if (minSize === 0) return;
+
+    this.selection.category_names.forEach((_, category) => {
+      for (let checkbox of this.querySelectorAll(
+               'input[name=' + category + 'Checkbox]')) {
+        checkbox.checked =
+            this.selectedData.instance_type_data[checkbox.instance_type]
+                .overall > minSize;
+        console.log(
+            checkbox.instance_type, checkbox.checked,
+            this.selectedData.instance_type_data[checkbox.instance_type]
+                .overall);
+      }
+    });
+    this.notifySelectionChanged();
+  }
+
+  updatePercentagesInCategory() {
+    const overalls = {};
+    let overall = 0;
+    // Reset all categories.
+    this.selection.category_names.forEach((_, category) => {
+      overalls[category] = 0;
+    });
+    // Only update categories that have selections.
+    Object.entries(this.selection.categories).forEach(([category, value]) => {
+      overalls[category] =
+          Object.values(value).reduce(
+              (accu, current) =>
+                  accu + this.selectedData.instance_type_data[current].overall,
+              0) /
+          KB;
+      overall += overalls[category];
+    });
+    Object.entries(overalls).forEach(([category, category_overall]) => {
+      let percents = category_overall / overall * 100;
+      this.$(`#${category}PercentContent`).textContent =
+          `${percents.toFixed(1)}%`;
+      this.$('#' + category + 'PercentBackground').style.left = percents + '%';
+    });
+  }
+
+  updatePercentagesInInstanceTypes() {
+    const instanceTypeData = this.selectedData.instance_type_data;
+    const maxInstanceType = this.selectedData.singleInstancePeakMemory;
+    this.querySelectorAll('.instanceTypeSelectBox  input').forEach(checkbox => {
+      let instanceType = checkbox.value;
+      let instanceTypeSize = instanceTypeData[instanceType].overall;
+      let percents = instanceTypeSize / maxInstanceType;
+      let percentDiv = checkbox.parentNode.querySelector('.percentBackground');
+      percentDiv.style.left = (percents * 100) + '%';
+
+    });
+  }
+
+  selectedInCategory(category) {
+    let tmp = [];
+    this.querySelectorAll('input[name=' + category + 'Checkbox]:checked')
+        .forEach(checkbox => tmp.push(checkbox.value));
+    return tmp;
+  }
+
+  categoryForType(instance_type) {
+    for (let [key, value] of CATEGORIES.entries()) {
+      if (value.has(instance_type)) return key;
+    }
+    return 'unclassified';
+  }
+
+  createOption(value, text) {
+    const option = document.createElement('option');
+    option.value = value;
+    option.text = text;
+    return option;
+  }
+
+  populateSelect(id, iterable, labelFn = null, autoselect = null) {
+    if (labelFn == null) labelFn = e => e;
+    let index = 0;
+    for (let [key, value] of iterable) {
+      index++;
+      const label = labelFn(key, value, index);
+      const option = this.createOption(key, label);
+      if (autoselect === key) {
+        option.selected = 'selected';
+      }
+      this.$(id).appendChild(option);
+    }
+  }
+
+  clearCategories() {
+    for (const category of CATEGORIES.keys()) {
+      let f = this.$('#' + category + 'Content');
+      while (f.firstChild) {
+        f.removeChild(f.firstChild);
+      }
+    }
+  }
+
+  populateCategories() {
+    this.clearCategories();
+    const categories = {__proto__:null};
+    for (let cat of CATEGORIES.keys()) {
+      categories[cat] = [];
+    }
+
+    for (let instance_type of this.selectedIsolate.non_empty_instance_types) {
+      const category = this.categoryForType(instance_type);
+      categories[category].push(instance_type);
+    }
+    for (let category of Object.keys(categories)) {
+      categories[category].sort();
+      for (let instance_type of categories[category]) {
+        this.$('#' + category + 'Content')
+            .appendChild(this.createCheckBox(instance_type, category));
+      }
+    }
+  }
+
+  unselectCategory(category) {
+    this.querySelectorAll('input[name=' + category + 'Checkbox]')
+        .forEach(checkbox => checkbox.checked = false);
+    this.notifySelectionChanged();
+  }
+
+  selectCategory(category) {
+    this.querySelectorAll('input[name=' + category + 'Checkbox]')
+        .forEach(checkbox => checkbox.checked = true);
+    this.notifySelectionChanged();
+  }
+
+  selectCategoryTopEntries(category) {
+    // unselect all checkboxes in this category.
+    this.querySelectorAll('input[name=' + category + 'Checkbox]')
+        .forEach(checkbox => checkbox.checked = false);
+    const data = this.selectedData.instance_type_data;
+
+    // Get the max values for instance_types in this category
+    const categoryInstanceTypes = Array.from(CATEGORIES.get(category));
+    categoryInstanceTypes.filter(each => each in data)
+      .sort((a,b) => {
+        return data[b].overall - data[a].overall;
+      }).slice(0, 10).forEach((category) => {
+        this.$('#' + category + 'Checkbox').checked = true;
+      });
+    this.notifySelectionChanged();
+  }
+
+  createCheckBox(instance_type, category) {
+    const div = document.createElement('div');
+    div.classList.add('instanceTypeSelectBox');
+    const input = document.createElement('input');
+    div.appendChild(input);
+    input.type = 'checkbox';
+    input.name = category + 'Checkbox';
+    input.checked = 'checked';
+    input.id = instance_type + 'Checkbox';
+    input.instance_type = instance_type;
+    input.value = instance_type;
+    input.addEventListener('change', e => this.notifySelectionChanged(e));
+    const label = document.createElement('label');
+    div.appendChild(label);
+    label.innerText = instance_type;
+    label.htmlFor = instance_type + 'Checkbox';
+    const percentDiv = document.createElement('div');
+    percentDiv.className = 'percentBackground';
+    div.appendChild(percentDiv);
+    return div;
+  }
+
+  exportCurrentSelection(e) {
+    const data = [];
+    const selected_data =
+        this.selectedIsolate.gcs[this.selection.gc][this.selection.data_set]
+            .instance_type_data;
+    Object.values(this.selection.categories).forEach(instance_types => {
+      instance_types.forEach(instance_type => {
+        data.push([instance_type, selected_data[instance_type].overall / KB]);
+      });
+    });
+    const createInlineContent = arrayOfRows => {
+      const content = arrayOfRows.reduce(
+          (accu, rowAsArray) => {return accu + `${rowAsArray.join(',')}\n`},
+          '');
+      return `data:text/csv;charset=utf-8,${content}`;
+    };
+    const encodedUri = encodeURI(createInlineContent(data));
+    const link = document.createElement('a');
+    link.setAttribute('href', encodedUri);
+    link.setAttribute(
+        'download',
+        `heap_objects_data_${this.selection.isolate}_${this.selection.gc}.csv`);
+    this.shadowRoot.appendChild(link);
+    link.click();
+    this.shadowRoot.removeChild(link);
+  }
+});
diff --git a/src/third_party/v8/tools/heap-stats/global-timeline-template.html b/src/third_party/v8/tools/heap-stats/global-timeline-template.html
new file mode 100644
index 0000000..bb11b44
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/global-timeline-template.html
@@ -0,0 +1,13 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<style>
+#chart {
+  width: 100%;
+  height: 500px;
+}
+</style>
+<div id="container" style="display: none;">
+  <h2>Timeline</h2>
+  <div id="chart"></div>
+</div>
diff --git a/src/third_party/v8/tools/heap-stats/global-timeline.js b/src/third_party/v8/tools/heap-stats/global-timeline.js
new file mode 100644
index 0000000..05c69f5
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/global-timeline.js
@@ -0,0 +1,227 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+import {
+  VIEW_BY_INSTANCE_TYPE,
+  VIEW_BY_INSTANCE_CATEGORY,
+  VIEW_BY_FIELD_TYPE
+} from './details-selection.js';
+
+defineCustomElement('global-timeline', (templateText) =>
+ class GlobalTimeline extends HTMLElement {
+  constructor() {
+    super();
+    const shadowRoot = this.attachShadow({mode: 'open'});
+    shadowRoot.innerHTML = templateText;
+  }
+
+  $(id) {
+    return this.shadowRoot.querySelector(id);
+  }
+
+  set data(value) {
+    this._data = value;
+    this.stateChanged();
+  }
+
+  get data() {
+    return this._data;
+  }
+
+  set selection(value) {
+    this._selection = value;
+    this.stateChanged();
+  }
+
+  get selection() {
+    return this._selection;
+  }
+
+  isValid() {
+    return this.data && this.selection;
+  }
+
+  hide() {
+    this.$('#container').style.display = 'none';
+  }
+
+  show() {
+    this.$('#container').style.display = 'block';
+  }
+
+  stateChanged() {
+    if (this.isValid()) {
+      this.drawChart();
+    } else {
+      this.hide();
+    }
+  }
+
+  getFieldData() {
+    const labels = [
+      {type: 'number', label: 'Time'},
+      {type: 'number', label: 'Ptr compression benefit'},
+      {type: 'string', role: 'tooltip'},
+      {type: 'number', label: 'Embedder fields'},
+      {type: 'number', label: 'Tagged fields (excl. in-object Smis)'},
+      {type: 'number', label: 'In-object Smi-only fields'},
+      {type: 'number', label: 'Other raw fields'},
+      {type: 'number', label: 'Unboxed doubles'},
+      {type: 'number', label: 'Boxed doubles'},
+      {type: 'number', label: 'String data'}
+    ];
+    const chart_data = [labels];
+    const isolate_data = this.data[this.selection.isolate];
+    let sum_total = 0;
+    let sum_ptr_compr_benefit_perc = 0;
+    let count = 0;
+    Object.keys(isolate_data.gcs).forEach(gc_key => {
+      const gc_data = isolate_data.gcs[gc_key];
+      const data_set = gc_data[this.selection.data_set].field_data;
+      const data = [];
+      data.push(gc_data.time * kMillis2Seconds);
+      const total = data_set.tagged_fields +
+                    data_set.inobject_smi_fields +
+                    data_set.embedder_fields +
+                    data_set.other_raw_fields +
+                    data_set.unboxed_double_fields +
+                    data_set.boxed_double_fields +
+                    data_set.string_data;
+      const ptr_compr_benefit =
+          (data_set.inobject_smi_fields + data_set.tagged_fields) / 2;
+      const ptr_compr_benefit_perc = ptr_compr_benefit / total * 100;
+      sum_total += total;
+      sum_ptr_compr_benefit_perc += ptr_compr_benefit_perc;
+      count++;
+      const tooltip = "Ptr compression benefit: " +
+                      (ptr_compr_benefit / KB).toFixed(2) + "KB " +
+                      " (" + ptr_compr_benefit_perc.toFixed(2) + "%)";
+      data.push(ptr_compr_benefit / KB);
+      data.push(tooltip);
+      data.push(data_set.embedder_fields / KB);
+      data.push(data_set.tagged_fields / KB);
+      data.push(data_set.inobject_smi_fields / KB);
+      data.push(data_set.other_raw_fields / KB);
+      data.push(data_set.unboxed_double_fields / KB);
+      data.push(data_set.boxed_double_fields / KB);
+      data.push(data_set.string_data / KB);
+      chart_data.push(data);
+    });
+    const avg_ptr_compr_benefit_perc =
+        count ? sum_ptr_compr_benefit_perc / count : 0;
+    console.log("==================================================");
+    console.log("= Average ptr compression benefit is " +
+                avg_ptr_compr_benefit_perc.toFixed(2) + "%");
+    console.log("= Average V8 heap size " +
+                (sum_total / count / KB).toFixed(2) + " KB");
+    console.log("==================================================");
+    return chart_data;
+  }
+
+  getCategoryData() {
+    const categories = Object.keys(this.selection.categories)
+                           .map(k => this.selection.category_names.get(k));
+    const labels = ['Time', ...categories];
+    const chart_data = [labels];
+    const isolate_data = this.data[this.selection.isolate];
+    Object.keys(isolate_data.gcs).forEach(gc_key => {
+      const gc_data = isolate_data.gcs[gc_key];
+      const data_set = gc_data[this.selection.data_set].instance_type_data;
+      const data = [];
+      data.push(gc_data.time * kMillis2Seconds);
+      Object.values(this.selection.categories).forEach(instance_types => {
+        data.push(
+            instance_types
+                .map(instance_type => {
+                  return data_set[instance_type].overall;
+                })
+                .reduce((accu, current) => accu + current, 0) /
+            KB);
+      });
+      chart_data.push(data);
+    });
+    return chart_data;
+  }
+
+  getInstanceTypeData() {
+    const instance_types =
+        Object.values(this.selection.categories)
+            .reduce((accu, current) => accu.concat(current), []);
+    const labels = ['Time', ...instance_types];
+    const chart_data = [labels];
+    const isolate_data = this.data[this.selection.isolate];
+    Object.keys(isolate_data.gcs).forEach(gc_key => {
+      const gc_data = isolate_data.gcs[gc_key];
+      const data_set = gc_data[this.selection.data_set].instance_type_data;
+      const data = [];
+      data.push(gc_data.time * kMillis2Seconds);
+      instance_types.forEach(instance_type => {
+        data.push(data_set[instance_type].overall / KB);
+      });
+      chart_data.push(data);
+    });
+    return chart_data;
+  }
+
+  getChartData() {
+    switch (this.selection.data_view) {
+      case VIEW_BY_FIELD_TYPE:
+        return this.getFieldData();
+      case VIEW_BY_INSTANCE_CATEGORY:
+        return this.getCategoryData();
+      case VIEW_BY_INSTANCE_TYPE:
+      default:
+        return this.getInstanceTypeData();
+    }
+  }
+
+  getChartOptions() {
+    const options = {
+      isStacked: true,
+      hAxis: {
+        format: '###.##s',
+        title: 'Time [s]',
+      },
+      vAxis: {
+        format: '#,###KB',
+        title: 'Memory consumption [KBytes]'
+      },
+      chartArea: {left:100, width: '85%', height: '70%'},
+      legend: {position: 'top', maxLines: '1'},
+      pointsVisible: true,
+      pointSize: 5,
+      explorer: {},
+    };
+    switch (this.selection.data_view) {
+      case VIEW_BY_FIELD_TYPE:
+        // Overlay pointer compression benefit on top of the graph
+        return Object.assign(options, {
+          series: {0: {type: 'line', lineDashStyle: [13, 13]}},
+        });
+      case VIEW_BY_INSTANCE_CATEGORY:
+      case VIEW_BY_INSTANCE_TYPE:
+      default:
+        return options;
+    }
+  }
+
+  drawChart() {
+    setTimeout(() => this._drawChart(), 10);
+  }
+
+  _drawChart() {
+    console.assert(this.data, 'invalid data');
+    console.assert(this.selection, 'invalid selection');
+
+    const chart_data = this.getChartData();
+
+    const data = google.visualization.arrayToDataTable(chart_data);
+    const options = this.getChartOptions();
+    const chart = new google.visualization.AreaChart(this.$('#chart'));
+    this.show();
+    chart.draw(data, google.charts.Line.convertOptions(options));
+  }
+});
diff --git a/src/third_party/v8/tools/heap-stats/helper.js b/src/third_party/v8/tools/heap-stats/helper.js
new file mode 100644
index 0000000..8416407
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/helper.js
@@ -0,0 +1,30 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const KB = 1024;
+const MB = KB * KB;
+const GB = MB * KB;
+const kMillis2Seconds = 1 / 1000;
+
+function formatBytes(bytes) {
+  const units = ['B', 'KiB', 'MiB', 'GiB'];
+  const divisor = 1024;
+  let index = 0;
+  while (index < units.length && bytes >= divisor) {
+    index++;
+    bytes /= divisor;
+  }
+  return bytes.toFixed(2) + units[index];
+}
+
+function formatSeconds(millis) {
+  return (millis * kMillis2Seconds).toFixed(2) + 's';
+}
+
+function defineCustomElement(name, generator) {
+  let htmlTemplatePath = name + '-template.html';
+  fetch(htmlTemplatePath)
+    .then(stream => stream.text())
+    .then(templateText => customElements.define(name, generator(templateText)));
+}
diff --git a/src/third_party/v8/tools/heap-stats/histogram-viewer-template.html b/src/third_party/v8/tools/heap-stats/histogram-viewer-template.html
new file mode 100644
index 0000000..42c8e70
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/histogram-viewer-template.html
@@ -0,0 +1,16 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<style>
+#chart {
+  width: 100%;
+  height: 800px;
+}
+</style>
+<div id="container" style="display: none;">
+  <h2>Details</h2>
+  <ul>
+    <li><span id="overall"></span></li>
+  </ul>
+  <div id="chart"></div>
+</div>
diff --git a/src/third_party/v8/tools/heap-stats/histogram-viewer.js b/src/third_party/v8/tools/heap-stats/histogram-viewer.js
new file mode 100644
index 0000000..4f1e02f
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/histogram-viewer.js
@@ -0,0 +1,191 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+import {
+  VIEW_BY_INSTANCE_TYPE,
+  VIEW_BY_INSTANCE_CATEGORY,
+  VIEW_BY_FIELD_TYPE
+} from './details-selection.js';
+
+defineCustomElement('histogram-viewer', (templateText) =>
+ class HistogramViewer extends HTMLElement {
+  constructor() {
+    super();
+    const shadowRoot = this.attachShadow({mode: 'open'});
+    shadowRoot.innerHTML = templateText;
+  }
+
+  $(id) {
+    return this.shadowRoot.querySelector(id);
+  }
+
+  set data(value) {
+    this._data = value;
+    this.stateChanged();
+  }
+
+  get data() {
+    return this._data;
+  }
+
+  set selection(value) {
+    this._selection = value;
+    this.stateChanged();
+  }
+
+  get selection() {
+    return this._selection;
+  }
+
+  isValid() {
+    return this.data && this.selection &&
+           (this.selection.data_view === VIEW_BY_INSTANCE_CATEGORY ||
+            this.selection.data_view === VIEW_BY_INSTANCE_TYPE);
+    ;
+  }
+
+  hide() {
+    this.$('#container').style.display = 'none';
+  }
+
+  show() {
+    this.$('#container').style.display = 'block';
+  }
+
+  getOverallValue() {
+    switch (this.selection.data_view) {
+      case VIEW_BY_FIELD_TYPE:
+        return NaN;
+      case VIEW_BY_INSTANCE_CATEGORY:
+        return this.getPropertyForCategory('overall');
+      case VIEW_BY_INSTANCE_TYPE:
+      default:
+        return this.getPropertyForInstanceTypes('overall');
+    }
+  }
+
+  stateChanged() {
+    if (this.isValid()) {
+      const overall_bytes = this.getOverallValue();
+      this.$('#overall').innerHTML = `Overall: ${overall_bytes / KB} KB`;
+      this.drawChart();
+    } else {
+      this.hide();
+    }
+  }
+
+  get selectedData() {
+    console.assert(this.data, 'invalid data');
+    console.assert(this.selection, 'invalid selection');
+    return this.data[this.selection.isolate]
+        .gcs[this.selection.gc][this.selection.data_set];
+  }
+
+  get selectedInstanceTypes() {
+    console.assert(this.selection, 'invalid selection');
+    return Object.values(this.selection.categories)
+        .reduce((accu, current) => accu.concat(current), []);
+  }
+
+  getPropertyForCategory(property) {
+    return Object.values(this.selection.categories)
+        .reduce(
+            (outer_accu, instance_types) => outer_accu +
+                instance_types.reduce(
+                    (inner_accu, instance_type) => inner_accu +
+                        this.selectedData
+                            .instance_type_data[instance_type][property],
+                    0),
+            0);
+  }
+
+  getPropertyForInstanceTypes(property) {
+    return this.selectedInstanceTypes.reduce(
+        (accu, instance_type) => accu +
+            this.selectedData.instance_type_data[instance_type][property],
+        0);
+  }
+
+  formatBytes(bytes) {
+    const units = ['B', 'KiB', 'MiB'];
+    const divisor = 1024;
+    let index = 0;
+    while (index < units.length && bytes >= divisor) {
+      index++;
+      bytes /= divisor;
+    }
+    return bytes + units[index];
+  }
+
+  getCategoryData() {
+    const labels = [
+      'Bucket',
+      ...Object.keys(this.selection.categories)
+          .map(k => this.selection.category_names.get(k))
+    ];
+    const data = this.selectedData.bucket_sizes.map(
+        (bucket_size, index) =>
+            [`<${this.formatBytes(bucket_size)}`,
+             ...Object.values(this.selection.categories)
+                 .map(
+                     instance_types =>
+                         instance_types
+                             .map(
+                                 instance_type =>
+                                     this.selectedData
+                                         .instance_type_data[instance_type]
+                                         .histogram[index])
+                             .reduce((accu, current) => accu + current, 0))]);
+    // Adjust last histogram bucket label.
+    data[data.length - 1][0] = 'rest';
+    return [labels, ...data];
+  }
+
+  getInstanceTypeData() {
+    const instance_types = this.selectedInstanceTypes;
+    const labels = ['Bucket', ...instance_types];
+    const data = this.selectedData.bucket_sizes.map(
+        (bucket_size, index) =>
+            [`<${bucket_size}`,
+             ...instance_types.map(
+                 instance_type =>
+                     this.selectedData.instance_type_data[instance_type]
+                         .histogram[index])]);
+    // Adjust last histogram bucket label.
+    data[data.length - 1][0] = 'rest';
+    return [labels, ...data];
+  }
+
+  getChartData() {
+    switch (this.selection.data_view) {
+      case VIEW_BY_FIELD_TYPE:
+        return this.getFieldData();
+      case VIEW_BY_INSTANCE_CATEGORY:
+        return this.getCategoryData();
+      case VIEW_BY_INSTANCE_TYPE:
+      default:
+        return this.getInstanceTypeData();
+    }
+  }
+
+  drawChart() {
+    const chart_data = this.getChartData();
+    const data = google.visualization.arrayToDataTable(chart_data);
+    const options = {
+      legend: {position: 'top', maxLines: '1'},
+      chartArea: {width: '85%', height: '85%'},
+      bar: {groupWidth: '80%'},
+      hAxis: {
+        title: 'Count',
+        minValue: 0
+      },
+      explorer: {},
+    };
+    const chart = new google.visualization.BarChart(this.$('#chart'));
+    this.show();
+    chart.draw(data, options);
+  }
+});
diff --git a/src/third_party/v8/tools/heap-stats/index.html b/src/third_party/v8/tools/heap-stats/index.html
new file mode 100644
index 0000000..efb74af
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/index.html
@@ -0,0 +1,102 @@
+<!DOCTYPE html>
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<html lang="en">
+
+<head>
+  <meta charset="UTF-8">
+  <title>V8 Heap Statistics</title>
+  <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet'>
+  <script
+          src="https://www.gstatic.com/charts/loader.js"></script>
+  <script
+          src="https://cdnjs.cloudflare.com/ajax/libs/pako/1.0.6/pako_inflate.min.js"
+          integrity="sha256-N1z6ddQzX83fjw8v7uSNe7/MgOmMKdwFUv1+AJMDqNM="
+          crossorigin="anonymous"></script>
+
+  <script src="https://cdnjs.cloudflare.com/ajax/libs/oboe.js/2.1.5/oboe-browser.min.js"
+          crossorigin="anonymous"></script>
+  <script src="helper.js"></script>
+
+  <script type="module" src="details-selection.js"></script>
+  <script type="module" src="global-timeline.js"></script>
+  <script type="module" src="histogram-viewer.js"></script>
+  <script type="module" src="trace-file-reader.js"></script>
+
+  <style>
+body {
+  font-family: 'Roboto', sans-serif;
+  margin-left: 5%;
+  margin-right: 5%;
+}
+
+  </style>
+  <script>
+'use strict';
+
+google.charts.load('current', {'packages':['line', 'corechart', 'bar']});
+
+function $(id) { return document.querySelector(id); }
+
+function removeAllChildren(node) {
+  while (node.firstChild) {
+    node.removeChild(node.firstChild);
+  }
+}
+
+let state = Object.create(null);
+
+function globalDataChanged(e) {
+  state.data = e.detail;
+  // Emit one entry with the whole model for debugging purposes.
+  console.log(state.data);
+  state.selection = null;
+  $('#global-timeline').selection = state.selection;
+  $('#global-timeline').data = state.data;
+  $('#histogram-viewer').selection = state.selection;
+  $('#histogram-viewer').data = state.data;
+  $('#details-selection').data = state.data;
+}
+
+function globalSelectionChangedA(e) {
+  state.selection = e.detail;
+  console.log(state.selection);
+  $('#global-timeline').selection = state.selection;
+  $('#histogram-viewer').selection = state.selection;
+}
+
+  </script>
+</head>
+
+<body>
+  <h1>V8 Heap Statistics</h1>
+  <trace-file-reader onchange="globalDataChanged(event)"></trace-file-reader>
+
+  <details-selection id="details-selection" onchange="globalSelectionChangedA(event)"></details-selection>
+  <global-timeline id="global-timeline"></global-timeline>
+  <histogram-viewer id="histogram-viewer"></histogram-viewer>
+
+  <p>Visualize object statistics that have been gathered using</p>
+  <ul>
+    <li><code>--trace-gc-object-stats</code> on V8</li>
+    <li>
+      <a
+        href="https://www.chromium.org/developers/how-tos/trace-event-profiling-tool">Chrome's
+        tracing infrastructure</a> collecting data for the category
+      <code>v8.gc_stats</code>.
+    </li>
+  </ul>
+  <p>
+    Note that you only get a data point on major GCs. You can enforce this by
+    using the <code>--gc-global</code> flag.
+  </p>
+  <p>
+    Note that the visualizer needs to run on a web server due to HTML imports
+    requiring <a
+         href="https://en.wikipedia.org/wiki/Cross-origin_resource_sharing">CORS</a>.
+  </p>
+</body>
+
+</html>
diff --git a/src/third_party/v8/tools/heap-stats/model.js b/src/third_party/v8/tools/heap-stats/model.js
new file mode 100644
index 0000000..d284d9b
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/model.js
@@ -0,0 +1,105 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+export class Isolate {
+  constructor(address) {
+    this.address = address;
+    this.start = null;
+    this.end = null;
+    this.samples = Object.create(null);
+    this.non_empty_instance_types = new Set();
+    this.gcs = Object.create(null);
+    this.zonetags = [];
+    this.samples = {zone: {}};
+    this.data_sets = new Set();
+    this.peakMemory = 0;
+    // Maps instance_types to their max memory consumption over all gcs.
+    this.instanceTypePeakMemory = Object.create(null);
+    // Peak memory consumed by any single instance type.
+    this.singleInstanceTypePeakMemory = 0;
+  }
+
+  finalize() {
+    Object.values(this.gcs).forEach(gc => this.finalizeGC(gc));
+    this.sortInstanceTypePeakMemory();
+  }
+
+  getLabel() {
+    let label = `${this.address}: gc=#${Object.keys(this.gcs).length}`;
+    label += ` peak=${formatBytes(this.peakMemory)}`
+    return label;
+  }
+
+  finalizeGC(gc_data) {
+    this.data_sets.forEach(key => this.finalizeDataSet(gc_data[key]));
+    if (!('live' in gc_data)) return;
+    let liveData = gc_data.live;
+    this.peakMemory = Math.max(this.peakMemory, liveData.overall);
+    let data = liveData.instance_type_data;
+    for (let name in data) {
+      let prev = this.instanceTypePeakMemory[name] || 0;
+      this.instanceTypePeakMemory[name] = Math.max(prev, data[name].overall);
+    }
+  }
+
+  finalizeDataSet(data_set) {
+    // Create a ranked instance type array that sorts instance types by
+    // memory size (overall).
+    let data = data_set.instance_type_data;
+    let ranked_instance_types =
+        [...data_set.non_empty_instance_types].sort((a, b) => {
+          return data[a].overall - data[b].overall;
+        });
+    // Reassemble the instance_type list sorted by size.
+    let sorted_data = Object.create(null);
+    let max = 0;
+    ranked_instance_types.forEach((name) => {
+      let entry = sorted_data[name] = data[name];
+      max = Math.max(max, entry.overall);
+    });
+    data_set.instance_type_data = data;
+    data_set.singleInstancePeakMemory = max;
+
+    Object.entries(data_set.instance_type_data).forEach(([name, entry]) => {
+      this.checkHistogram(
+          name, entry, data_set.bucket_sizes, 'histogram', ' overall');
+      this.checkHistogram(
+          name, entry, data_set.bucket_sizes, 'over_allocated_histogram',
+          ' over_allocated');
+    });
+  }
+
+  // Check that a lower bound for histogram memory does not exceed the
+  // overall counter.
+  checkHistogram(type, entry, bucket_sizes, histogram, overallProperty) {
+    let sum = 0;
+    for (let i = 1; i < entry[histogram].length; i++) {
+      sum += entry[histogram][i] * bucket_sizes[i - 1];
+    }
+    const overall = entry[overallProperty];
+    if (sum >= overall) {
+      console.error(
+          `${type}: sum('${histogram}') > overall (${sum} > ${overall})`);
+    }
+  }
+
+  sortInstanceTypePeakMemory() {
+    let entries = Object.entries(this.instanceTypePeakMemory);
+    entries.sort((a, b) => {return b[1] - a[1]});
+    this.instanceTypePeakMemory = Object.create(null);
+    let max = 0;
+    for (let [key, value] of entries) {
+      this.instanceTypePeakMemory[key] = value;
+      max = Math.max(max, value);
+    }
+    this.singleInstanceTypePeakMemory = max;
+  }
+
+  getInstanceTypePeakMemory(type) {
+    if (!(type in this.instanceTypePeakMemory)) return 0;
+    return this.instanceTypePeakMemory[type];
+  }
+}
diff --git a/src/third_party/v8/tools/heap-stats/trace-file-reader-template.html b/src/third_party/v8/tools/heap-stats/trace-file-reader-template.html
new file mode 100644
index 0000000..c0fadbf
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/trace-file-reader-template.html
@@ -0,0 +1,81 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<style>
+#fileReader {
+  width: 100%;
+  height: 100px;
+  line-height: 100px;
+  text-align: center;
+  border: solid 1px #000000;
+  border-radius: 5px;
+  cursor: pointer;
+  transition: all 0.5s ease-in-out;
+}
+
+#fileReader.done {
+    height: 20px;
+    line-height: 20px;
+}
+
+#fileReader:hover {
+  background-color: #e0edfe ;
+}
+
+.loading #fileReader {
+  cursor: wait;
+}
+
+#fileReader > input {
+  display: none;
+}
+
+
+#loader {
+  display: none;
+}
+
+.loading #loader {
+  display: block;
+  position: fixed;
+  top: 0px;
+  left: 0px;
+  width: 100%;
+  height: 100%;
+  background-color: rgba(255, 255, 255, 0.5);
+}
+
+#spinner {
+  position: absolute;
+  width: 100px;
+  height: 100px;
+  top: 40%;
+  left: 50%;
+  margin-left: -50px;
+  border: 30px solid #000;
+  border-top: 30px solid #36E;
+  border-radius: 50%;
+  animation: spin 1s ease-in-out infinite;
+}
+
+@keyframes spin {
+ 0% {
+    transform: rotate(0deg);
+ }
+ 100% {
+    transform: rotate(360deg);
+ }
+}
+</style>
+
+<section id="fileReaderSection">
+  <div id="fileReader" tabindex=1 >
+    <span id="label">
+      Drag and drop a trace file into this area, or click to choose from disk.
+     </span>
+    <input id="file" type="file" name="file" />
+  </div>
+  <div id="loader">
+    <div id="spinner"></div>
+  </div>
+</section>
diff --git a/src/third_party/v8/tools/heap-stats/trace-file-reader.js b/src/third_party/v8/tools/heap-stats/trace-file-reader.js
new file mode 100644
index 0000000..e297723
--- /dev/null
+++ b/src/third_party/v8/tools/heap-stats/trace-file-reader.js
@@ -0,0 +1,307 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+import {Isolate} from './model.js';
+
+defineCustomElement('trace-file-reader', (templateText) =>
+ class TraceFileReader extends HTMLElement {
+  constructor() {
+    super();
+    const shadowRoot = this.attachShadow({mode: 'open'});
+    shadowRoot.innerHTML = templateText;
+    this.addEventListener('click', e => this.handleClick(e));
+    this.addEventListener('dragover', e => this.handleDragOver(e));
+    this.addEventListener('drop', e => this.handleChange(e));
+    this.$('#file').addEventListener('change', e => this.handleChange(e));
+    this.$('#fileReader').addEventListener('keydown', e => this.handleKeyEvent(e));
+  }
+
+  $(id) {
+    return this.shadowRoot.querySelector(id);
+  }
+
+  get section() {
+    return this.$('#fileReaderSection');
+  }
+
+  updateLabel(text) {
+    this.$('#label').innerText = text;
+  }
+
+  handleKeyEvent(event) {
+    if (event.key == "Enter") this.handleClick(event);
+  }
+
+  handleClick(event) {
+    this.$('#file').click();
+  }
+
+  handleChange(event) {
+    // Used for drop and file change.
+    event.preventDefault();
+    var host = event.dataTransfer ? event.dataTransfer : event.target;
+    this.readFile(host.files[0]);
+  }
+
+  handleDragOver(event) {
+    event.preventDefault();
+  }
+
+  connectedCallback() {
+    this.$('#fileReader').focus();
+  }
+
+  readFile(file) {
+    if (!file) {
+      this.updateLabel('Failed to load file.');
+      return;
+    }
+    this.$('#fileReader').blur();
+
+    this.section.className = 'loading';
+    const reader = new FileReader();
+
+    if (['application/gzip', 'application/x-gzip'].includes(file.type)) {
+      reader.onload = (e) => {
+        try {
+          const textResult = pako.inflate(e.target.result, {to: 'string'});
+          this.processRawText(file, textResult);
+          this.section.className = 'success';
+          this.$('#fileReader').classList.add('done');
+        } catch (err) {
+          console.error(err);
+          this.section.className = 'failure';
+        }
+      };
+      // Delay the loading a bit to allow for CSS animations to happen.
+      setTimeout(() => reader.readAsArrayBuffer(file), 0);
+    } else {
+      reader.onload = (e) => {
+        try {
+          this.processRawText(file, e.target.result);
+          this.section.className = 'success';
+          this.$('#fileReader').classList.add('done');
+        } catch (err) {
+          console.error(err);
+          this.section.className = 'failure';
+        }
+      };
+      // Delay the loading a bit to allow for CSS animations to happen.
+      setTimeout(() => reader.readAsText(file), 0);
+    }
+  }
+
+  processRawText(file, result) {
+    let return_data;
+    if (result.includes('V8.GC_Objects_Stats')) {
+      return_data = this.createModelFromChromeTraceFile(result);
+    } else {
+      let contents = result.split('\n');
+      return_data = this.createModelFromV8TraceFile(contents);
+    }
+    this.extendAndSanitizeModel(return_data);
+    this.updateLabel('Finished loading \'' + file.name + '\'.');
+    this.dispatchEvent(new CustomEvent(
+        'change', {bubbles: true, composed: true, detail: return_data}));
+  }
+
+  createOrUpdateEntryIfNeeded(data, entry) {
+    console.assert(entry.isolate, 'entry should have an isolate');
+    if (!(entry.isolate in data)) {
+      data[entry.isolate] = new Isolate(entry.isolate);
+    }
+    const data_object = data[entry.isolate];
+    if (('id' in entry) && !(entry.id in data_object.gcs)) {
+      data_object.gcs[entry.id] = {non_empty_instance_types: new Set()};
+    }
+    if ('time' in entry) {
+      if (data_object.end === null || data_object.end < entry.time) {
+        data_object.end = entry.time;
+      }
+      if (data_object.start === null || data_object.start > entry.time) {
+        data_object.start = entry.time;
+      }
+    }
+  }
+
+  createDatasetIfNeeded(data, entry, data_set) {
+    if (!(data_set in data[entry.isolate].gcs[entry.id])) {
+      data[entry.isolate].gcs[entry.id][data_set] = {
+        instance_type_data: {},
+        non_empty_instance_types: new Set(),
+        overall: 0
+      };
+      data[entry.isolate].data_sets.add(data_set);
+    }
+  }
+
+  addFieldTypeData(data, isolate, gc_id, data_set, tagged_fields,
+                   inobject_smi_fields, embedder_fields, unboxed_double_fields,
+                   boxed_double_fields, string_data, other_raw_fields) {
+    data[isolate].gcs[gc_id][data_set].field_data = {
+      tagged_fields,
+      inobject_smi_fields,
+      embedder_fields,
+      unboxed_double_fields,
+      boxed_double_fields,
+      string_data,
+      other_raw_fields
+    };
+  }
+
+  addInstanceTypeData(data, isolate, gc_id, data_set, instance_type, entry) {
+    data[isolate].gcs[gc_id][data_set].instance_type_data[instance_type] = {
+      overall: entry.overall,
+      count: entry.count,
+      histogram: entry.histogram,
+      over_allocated: entry.over_allocated,
+      over_allocated_histogram: entry.over_allocated_histogram
+    };
+    data[isolate].gcs[gc_id][data_set].overall += entry.overall;
+    if (entry.overall !== 0) {
+      data[isolate].gcs[gc_id][data_set].non_empty_instance_types.add(
+          instance_type);
+      data[isolate].gcs[gc_id].non_empty_instance_types.add(instance_type);
+      data[isolate].non_empty_instance_types.add(instance_type);
+    }
+  }
+
+  extendAndSanitizeModel(data) {
+    const checkNonNegativeProperty = (obj, property) => {
+      console.assert(obj[property] >= 0, 'negative property', obj, property);
+    };
+
+    Object.values(data).forEach(isolate => isolate.finalize());
+  }
+
+  createModelFromChromeTraceFile(contents) {
+    const data = Object.create(null);  // Final data container.
+    const parseOneGCEvent = (actual_data) => {
+      Object.keys(actual_data).forEach(data_set => {
+        const string_entry = actual_data[data_set];
+        try {
+          const entry = JSON.parse(string_entry);
+          this.createOrUpdateEntryIfNeeded(data, entry);
+          this.createDatasetIfNeeded(data, entry, data_set);
+          const isolate = entry.isolate;
+          const time = entry.time;
+          const gc_id = entry.id;
+          data[isolate].gcs[gc_id].time = time;
+
+          const field_data = entry.field_data;
+          this.addFieldTypeData(data, isolate, gc_id, data_set,
+            field_data.tagged_fields,
+            field_data.inobject_smi_fields,
+            field_data.embedder_fields,
+            field_data.unboxed_double_fields,
+            field_data.boxed_double_fields,
+            field_data.string_data,
+            field_data.other_raw_fields);
+
+          data[isolate].gcs[gc_id][data_set].bucket_sizes =
+              entry.bucket_sizes;
+          for (let [instance_type, value] of Object.entries(
+                   entry.type_data)) {
+            // Trace file format uses markers that do not have actual
+            // properties.
+            if (!('overall' in value)) continue;
+            this.addInstanceTypeData(
+                data, isolate, gc_id, data_set, instance_type, value);
+          }
+        } catch (e) {
+          console.error('Unable to parse data set entry', e);
+        }
+      });
+    };
+    console.log(`Processing log as chrome trace file.`);
+    try {
+      let gc_events_filter = (event) => {
+        if (event.name == 'V8.GC_Objects_Stats') {
+          parseOneGCEvent(event.args);
+        }
+        return oboe.drop;
+      };
+
+      let oboe_stream = oboe();
+      // Trace files support two formats.
+      oboe_stream
+          // 1) {traceEvents: [ data ]}
+          .node('traceEvents.*', gc_events_filter)
+          // 2) [ data ]
+          .node('!.*', gc_events_filter)
+          .fail(() => { throw new Error("Trace data parse failed!"); });
+      oboe_stream.emit('data', contents);
+    } catch (e) {
+      console.error('Unable to parse chrome trace file.', e);
+    }
+    return data;
+  }
+
+  createModelFromV8TraceFile(contents) {
+    console.log('Processing log as V8 trace file.');
+    contents = contents.map(function(line) {
+      try {
+        // Strip away a potentially present adb logcat prefix.
+        line = line.replace(/^I\/v8\s*\(\d+\):\s+/g, '');
+        return JSON.parse(line);
+      } catch (e) {
+        console.log('Unable to parse line: \'' + line + '\' (' + e + ')');
+      }
+      return null;
+    });
+
+    const data = Object.create(null);  // Final data container.
+    for (var entry of contents) {
+      if (entry === null || entry.type === undefined) {
+        continue;
+      }
+      if (entry.type === 'zone') {
+        this.createOrUpdateEntryIfNeeded(data, entry);
+        const stacktrace = ('stacktrace' in entry) ? entry.stacktrace : [];
+        data[entry.isolate].samples.zone[entry.time] = {
+          allocated: entry.allocated,
+          pooled: entry.pooled,
+          stacktrace: stacktrace
+        };
+      } else if (
+          entry.type === 'zonecreation' || entry.type === 'zonedestruction') {
+        this.createOrUpdateEntryIfNeeded(data, entry);
+        data[entry.isolate].zonetags.push(
+            Object.assign({opening: entry.type === 'zonecreation'}, entry));
+      } else if (entry.type === 'gc_descriptor') {
+        this.createOrUpdateEntryIfNeeded(data, entry);
+        data[entry.isolate].gcs[entry.id].time = entry.time;
+        if ('zone' in entry)
+          data[entry.isolate].gcs[entry.id].malloced = entry.zone;
+      } else if (entry.type === 'field_data') {
+        this.createOrUpdateEntryIfNeeded(data, entry);
+        this.createDatasetIfNeeded(data, entry, entry.key);
+        this.addFieldTypeData(data, entry.isolate, entry.id, entry.key,
+          entry.tagged_fields, entry.embedder_fields, entry.inobject_smi_fields,
+          entry.unboxed_double_fields, entry.boxed_double_fields,
+          entry.string_data, entry.other_raw_fields);
+      } else if (entry.type === 'instance_type_data') {
+        if (entry.id in data[entry.isolate].gcs) {
+          this.createOrUpdateEntryIfNeeded(data, entry);
+          this.createDatasetIfNeeded(data, entry, entry.key);
+          this.addInstanceTypeData(
+              data, entry.isolate, entry.id, entry.key,
+              entry.instance_type_name, entry);
+        }
+      } else if (entry.type === 'bucket_sizes') {
+        if (entry.id in data[entry.isolate].gcs) {
+          this.createOrUpdateEntryIfNeeded(data, entry);
+          this.createDatasetIfNeeded(data, entry, entry.key);
+          data[entry.isolate].gcs[entry.id][entry.key].bucket_sizes =
+              entry.sizes;
+        }
+      } else {
+        console.log('Unknown entry type: ' + entry.type);
+      }
+    }
+    return data;
+  }
+});
diff --git a/src/third_party/v8/tools/ic-processor b/src/third_party/v8/tools/ic-processor
new file mode 100755
index 0000000..7b886b8
--- /dev/null
+++ b/src/third_party/v8/tools/ic-processor
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+  if ! expr "X${arg}" : "^X-" > /dev/null; then
+    log_file=${arg}
+  fi
+done
+
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+  d8_public=`which d8`
+  if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x "$d8_exec" ]; then
+  D8_PATH=`pwd`/out/native
+  d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x "$d8_exec" ]; then
+  d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x "$d8_exec" ]; then
+  echo "d8 shell not found in $D8_PATH"
+  echo "To build, execute 'make native' from the V8 directory"
+  exit 1
+fi
+
+# nm spits out 'no symbols found' messages to stderr.
+cat $log_file | $d8_exec \
+  --module $tools_path/ic-processor-driver.mjs -- $@ 2>/dev/null
diff --git a/src/third_party/v8/tools/ic-processor-driver.mjs b/src/third_party/v8/tools/ic-processor-driver.mjs
new file mode 100644
index 0000000..ef6d83e
--- /dev/null
+++ b/src/third_party/v8/tools/ic-processor-driver.mjs
@@ -0,0 +1,94 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { Processor } from "./system-analyzer/processor.mjs";
+import { WebInspector } from "./sourcemap.mjs";
+import { BaseArgumentsProcessor } from "./arguments.mjs";
+
+function processArguments(args) {
+  const processor = new ArgumentsProcessor(args);
+  if (processor.parse()) {
+    return processor.result();
+  } else {
+    processor.printUsageAndExit();
+  }
+}
+
+/**
+ * A thin wrapper around shell's 'read' function showing a file name on error.
+ */
+export function readFile(fileName) {
+  try {
+    return read(fileName);
+  } catch (e) {
+    print(fileName + ': ' + (e.message || e));
+    throw e;
+  }
+}
+
+function initSourceMapSupport() {
+  // Pull dev tools source maps into our name space.
+  SourceMap = WebInspector.SourceMap;
+
+  // Overwrite the load function to load scripts synchronously.
+  SourceMap.load = function(sourceMapURL) {
+    const content = readFile(sourceMapURL);
+    const sourceMapObject = (JSON.parse(content));
+    return new SourceMap(sourceMapURL, sourceMapObject);
+  };
+}
+
+class ArgumentsProcessor extends BaseArgumentsProcessor {
+  getArgsDispatch() {
+    return {
+      '--range': ['range', 'auto,auto',
+          'Specify the range limit as [start],[end]'],
+      '--source-map': ['sourceMap', null,
+          'Specify the source map that should be used for output']
+    };
+  }
+  getDefaultResults() {
+   return {
+      logFileName: 'v8.log',
+      range: 'auto,auto',
+    };
+  }
+}
+
+const params = processArguments(arguments);
+let sourceMap = null;
+if (params.sourceMap) {
+  initSourceMapSupport();
+  sourceMap = SourceMap.load(params.sourceMap);
+}
+const processor = new Processor();
+processor.processLogFile(params.logFileName);
+
+const typeAccumulator = new Map();
+
+const accumulator = {
+  __proto__: null, 
+  LoadGlobalIC: 0,
+  StoreGlobalIC: 0,
+  LoadIC: 0,
+  StoreIC: 0,
+  KeyedLoadIC: 0,
+  KeyedStoreIC: 0,
+  StoreInArrayLiteralIC: 0, 
+}
+for (const ic of processor.icTimeline.all) {
+  print(
+      ic.type + ' (' + ic.oldState + '->' + ic.newState + ic.modifier + ') at ' +
+      ic.filePosition + ' ' + ic.key +
+      ' (map 0x' + ic.map.toString(16) + ')' +
+      (ic.reason ? ` ${ic.reason}` : '') + ' time: ' + ic.time);
+  accumulator[ic.type]++;
+}
+
+print("========================================");
+for (const key of Object.keys(accumulator)) {
+  print(key + ": " + accumulator[key]);
+}
+
+
diff --git a/src/third_party/v8/tools/ignition/bytecode_dispatches_report.py b/src/third_party/v8/tools/ignition/bytecode_dispatches_report.py
new file mode 100755
index 0000000..aa5a9c9
--- /dev/null
+++ b/src/third_party/v8/tools/ignition/bytecode_dispatches_report.py
@@ -0,0 +1,284 @@
+#! /usr/bin/python
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import heapq
+import json
+from matplotlib import colors
+from matplotlib import pyplot
+import numpy
+import struct
+import sys
+
+
+__DESCRIPTION = """
+Process v8.ignition_dispatches_counters.json and list top counters,
+or plot a dispatch heatmap.
+
+Please note that those handlers that may not or will never dispatch
+(e.g. Return or Throw) do not show up in the results.
+"""
+
+
+__HELP_EPILOGUE = """
+examples:
+  # Print the hottest bytecodes in descending order, reading from
+  # default filename v8.ignition_dispatches_counters.json (default mode)
+  $ tools/ignition/bytecode_dispatches_report.py
+
+  # Print the hottest 15 bytecode dispatch pairs reading from data.json
+  $ tools/ignition/bytecode_dispatches_report.py -t -n 15 data.json
+
+  # Save heatmap to default filename v8.ignition_dispatches_counters.svg
+  $ tools/ignition/bytecode_dispatches_report.py -p
+
+  # Save heatmap to filename data.svg
+  $ tools/ignition/bytecode_dispatches_report.py -p -o data.svg
+
+  # Open the heatmap in an interactive viewer
+  $ tools/ignition/bytecode_dispatches_report.py -p -i
+
+  # Display the top 5 sources and destinations of dispatches to/from LdaZero
+  $ tools/ignition/bytecode_dispatches_report.py -f LdaZero -n 5
+"""
+
+__COUNTER_BITS = struct.calcsize("P") * 8  # Size in bits of a pointer
+__COUNTER_MAX = 2**__COUNTER_BITS - 1
+
+
+def warn_if_counter_may_have_saturated(dispatches_table):
+  for source, counters_from_source in iteritems(dispatches_table):
+    for destination, counter in iteritems(counters_from_source):
+      if counter == __COUNTER_MAX:
+        print("WARNING: {} -> {} may have saturated.".format(source,
+                                                             destination))
+
+
+def find_top_bytecode_dispatch_pairs(dispatches_table, top_count):
+  def flattened_counters_generator():
+    for source, counters_from_source in iteritems(dispatches_table):
+      for destination, counter in iteritems(counters_from_source):
+        yield source, destination, counter
+
+  return heapq.nlargest(top_count, flattened_counters_generator(),
+                        key=lambda x: x[2])
+
+
+def print_top_bytecode_dispatch_pairs(dispatches_table, top_count):
+  top_bytecode_dispatch_pairs = (
+    find_top_bytecode_dispatch_pairs(dispatches_table, top_count))
+  print("Top {} bytecode dispatch pairs:".format(top_count))
+  for source, destination, counter in top_bytecode_dispatch_pairs:
+    print("{:>12d}\t{} -> {}".format(counter, source, destination))
+
+
+def find_top_bytecodes(dispatches_table):
+  top_bytecodes = []
+  for bytecode, counters_from_bytecode in iteritems(dispatches_table):
+    top_bytecodes.append((bytecode, sum(itervalues(counters_from_bytecode))))
+
+  top_bytecodes.sort(key=lambda x: x[1], reverse=True)
+  return top_bytecodes
+
+
+def print_top_bytecodes(dispatches_table):
+  top_bytecodes = find_top_bytecodes(dispatches_table)
+  print("Top bytecodes:")
+  for bytecode, counter in top_bytecodes:
+    print("{:>12d}\t{}".format(counter, bytecode))
+
+
+def find_top_dispatch_sources_and_destinations(
+    dispatches_table, bytecode, top_count, sort_source_relative):
+  sources = []
+  for source, destinations in iteritems(dispatches_table):
+    total = float(sum(itervalues(destinations)))
+    if bytecode in destinations:
+      count = destinations[bytecode]
+      sources.append((source, count, count / total))
+
+  destinations = []
+  bytecode_destinations = dispatches_table[bytecode]
+  bytecode_total = float(sum(itervalues(bytecode_destinations)))
+  for destination, count in iteritems(bytecode_destinations):
+    destinations.append((destination, count, count / bytecode_total))
+
+  return (heapq.nlargest(top_count, sources,
+                         key=lambda x: x[2 if sort_source_relative else 1]),
+          heapq.nlargest(top_count, destinations, key=lambda x: x[1]))
+
+
+def print_top_dispatch_sources_and_destinations(dispatches_table, bytecode,
+                                                top_count, sort_relative):
+  top_sources, top_destinations = find_top_dispatch_sources_and_destinations(
+      dispatches_table, bytecode, top_count, sort_relative)
+  print("Top sources of dispatches to {}:".format(bytecode))
+  for source_name, counter, ratio in top_sources:
+    print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name))
+
+  print("\nTop destinations of dispatches from {}:".format(bytecode))
+  for destination_name, counter, ratio in top_destinations:
+    print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name))
+
+
+def build_counters_matrix(dispatches_table):
+  labels = sorted(dispatches_table.keys())
+
+  counters_matrix = numpy.empty([len(labels), len(labels)], dtype=int)
+  for from_index, from_name in enumerate(labels):
+    current_row = dispatches_table[from_name];
+    for to_index, to_name in enumerate(labels):
+      counters_matrix[from_index, to_index] = current_row.get(to_name, 0)
+
+  # Reverse y axis for a nicer appearance
+  xlabels = labels
+  ylabels = list(reversed(xlabels))
+  counters_matrix = numpy.flipud(counters_matrix)
+
+  return counters_matrix, xlabels, ylabels
+
+
+def plot_dispatches_table(dispatches_table, figure, axis):
+  counters_matrix, xlabels, ylabels = build_counters_matrix(dispatches_table)
+
+  image = axis.pcolor(
+    counters_matrix,
+    cmap="jet",
+    norm=colors.LogNorm(),
+    edgecolor="grey",
+    linestyle="dotted",
+    linewidth=0.5
+  )
+
+  axis.xaxis.set(
+    ticks=numpy.arange(0.5, len(xlabels)),
+    label="From bytecode handler"
+  )
+  axis.xaxis.tick_top()
+  axis.set_xlim(0, len(xlabels))
+  axis.set_xticklabels(xlabels, rotation="vertical")
+
+  axis.yaxis.set(
+    ticks=numpy.arange(0.5, len(ylabels)),
+    label="To bytecode handler",
+    ticklabels=ylabels
+  )
+  axis.set_ylim(0, len(ylabels))
+
+  figure.colorbar(
+    image,
+    ax=axis,
+    fraction=0.01,
+    pad=0.01
+  )
+
+
+def parse_command_line():
+  command_line_parser = argparse.ArgumentParser(
+    formatter_class=argparse.RawDescriptionHelpFormatter,
+    description=__DESCRIPTION,
+    epilog=__HELP_EPILOGUE
+  )
+  command_line_parser.add_argument(
+    "--plot-size", "-s",
+    metavar="N",
+    default=30,
+    help="shorter side in inches of the output plot (default 30)"
+  )
+  command_line_parser.add_argument(
+    "--plot", "-p",
+    action="store_true",
+    help="plot dispatch pairs heatmap"
+  )
+  command_line_parser.add_argument(
+    "--interactive", "-i",
+    action="store_true",
+    help="open the heatmap in an interactive viewer, instead of writing to file"
+  )
+  command_line_parser.add_argument(
+    "--top-bytecode-dispatch-pairs", "-t",
+    action="store_true",
+    help="print the top bytecode dispatch pairs"
+  )
+  command_line_parser.add_argument(
+    "--top-entries-count", "-n",
+    metavar="N",
+    type=int,
+    default=10,
+    help="print N top entries when running with -t or -f (default 10)"
+  )
+  command_line_parser.add_argument(
+    "--top-dispatches-for-bytecode", "-f",
+    metavar="<bytecode name>",
+    help="print top dispatch sources and destinations to the specified bytecode"
+  )
+  command_line_parser.add_argument(
+    "--output-filename", "-o",
+    metavar="<output filename>",
+    default="v8.ignition_dispatches_table.svg",
+    help=("file to save the plot file to. File type is deduced from the "
+          "extension. PDF, SVG, PNG supported")
+  )
+  command_line_parser.add_argument(
+    "--sort-sources-relative", "-r",
+    action="store_true",
+    help=("print top sources in order to how often they dispatch to the "
+          "specified bytecode, only applied when using -f")
+  )
+  command_line_parser.add_argument(
+    "input_filename",
+    metavar="<input filename>",
+    default="v8.ignition_dispatches_table.json",
+    nargs='?',
+    help="Ignition counters JSON file"
+  )
+
+  return command_line_parser.parse_args()
+
+
+def itervalues(d):
+  return d.values() if sys.version_info[0] > 2 else d.itervalues()
+
+
+def iteritems(d):
+  return d.items() if sys.version_info[0] > 2 else d.iteritems()
+
+
+def main():
+  program_options = parse_command_line()
+
+  with open(program_options.input_filename) as stream:
+    dispatches_table = json.load(stream)
+
+  warn_if_counter_may_have_saturated(dispatches_table)
+
+  if program_options.plot:
+    figure, axis = pyplot.subplots()
+    plot_dispatches_table(dispatches_table, figure, axis)
+
+    if program_options.interactive:
+      pyplot.show()
+    else:
+      figure.set_size_inches(program_options.plot_size,
+                             program_options.plot_size)
+      pyplot.savefig(program_options.output_filename)
+  elif program_options.top_bytecode_dispatch_pairs:
+    print_top_bytecode_dispatch_pairs(
+      dispatches_table, program_options.top_entries_count)
+  elif program_options.top_dispatches_for_bytecode:
+    print_top_dispatch_sources_and_destinations(
+      dispatches_table, program_options.top_dispatches_for_bytecode,
+      program_options.top_entries_count, program_options.sort_sources_relative)
+  else:
+    print_top_bytecodes(dispatches_table)
+
+
+if __name__ == "__main__":
+  main()
diff --git a/src/third_party/v8/tools/ignition/bytecode_dispatches_report_test.py b/src/third_party/v8/tools/ignition/bytecode_dispatches_report_test.py
new file mode 100644
index 0000000..9be19e7
--- /dev/null
+++ b/src/third_party/v8/tools/ignition/bytecode_dispatches_report_test.py
@@ -0,0 +1,62 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import bytecode_dispatches_report as bdr
+import unittest
+
+
+class BytecodeDispatchesReportTest(unittest.TestCase):
+  def test_find_top_counters(self):
+    top_counters = bdr.find_top_bytecode_dispatch_pairs({
+      "a": {"a": 10, "b": 8, "c": 99},
+      "b": {"a":  1, "b": 4, "c":  1},
+      "c": {"a": 42, "b": 3, "c":  7}}, 5)
+    self.assertListEqual(top_counters, [
+      ('a', 'c', 99),
+      ('c', 'a', 42),
+      ('a', 'a', 10),
+      ('a', 'b',  8),
+      ('c', 'c',  7)])
+
+  def test_build_counters_matrix(self):
+    counters_matrix, xlabels, ylabels = bdr.build_counters_matrix({
+      "a": {"a": 10, "b":  8, "c":  7},
+      "b": {"a":  1, "c":  4},
+      "c": {"a": 42, "b": 12, "c": 99}})
+    self.assertTrue((counters_matrix == [[42, 12, 99],
+                                         [ 1,  0,  4],
+                                         [10,  8,  7]]).all())
+    self.assertListEqual(xlabels, ['a', 'b', 'c'])
+    self.assertListEqual(ylabels, ['c', 'b', 'a'])
+
+  def test_find_top_bytecodes(self):
+    top_dispatch_sources = bdr.find_top_bytecodes({
+      "a": {"a": 10, "b":  8, "c":  7},
+      "b": {"a":  1, "c":  4},
+      "c": {"a": 42, "b": 12, "c": 99}
+    })
+    self.assertListEqual(top_dispatch_sources, [
+      ('c', 153),
+      ('a',  25),
+      ('b',   5)
+    ])
+
+  def test_find_top_dispatch_sources_and_destinations(self):
+    d = {
+      "a": {"a":  4, "b":  2, "c":  4},
+      "b": {"a":  1, "c":  4},
+      "c": {"a": 40, "b": 10, "c": 50}
+    }
+    top_sources, top_dests = bdr.find_top_dispatch_sources_and_destinations(
+      d, "b", 10, False)
+    self.assertListEqual(top_sources, [
+      ("c", 10, 0.1),
+      ("a", 2, 0.2)
+    ])
+    top_sources, top_dests = bdr.find_top_dispatch_sources_and_destinations(
+      d, "b", 10, True)
+    self.assertListEqual(top_sources, [
+      ("a", 2, 0.2),
+      ("c", 10, 0.1)
+    ])
diff --git a/src/third_party/v8/tools/ignition/linux_perf_bytecode_annotate.py b/src/third_party/v8/tools/ignition/linux_perf_bytecode_annotate.py
new file mode 100755
index 0000000..96cb0ee
--- /dev/null
+++ b/src/third_party/v8/tools/ignition/linux_perf_bytecode_annotate.py
@@ -0,0 +1,177 @@
+#! /usr/bin/python
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import collections
+import os
+import subprocess
+import sys
+
+
+__DESCRIPTION = """
+Processes a perf.data sample file and annotates the hottest instructions in a
+given bytecode handler.
+"""
+
+
+__HELP_EPILOGUE = """
+Note:
+  This tool uses the disassembly of interpreter's bytecode handler codegen
+  from out/<arch>.debug/d8. you should ensure that this binary is in-sync with
+  the version used to generate the perf profile.
+
+  Also, the tool depends on the symbol offsets from perf samples being accurate.
+  As such, you should use the ":pp" suffix for events.
+
+Examples:
+  EVENT_TYPE=cycles:pp tools/run-perf.sh out/x64.release/d8
+  tools/ignition/linux_perf_bytecode_annotate.py Add
+"""
+
+
+def bytecode_offset_generator(perf_stream, bytecode_name):
+  skip_until_end_of_chain = False
+  bytecode_symbol = "BytecodeHandler:" + bytecode_name;
+
+  for line in perf_stream:
+    # Lines starting with a "#" are comments, skip them.
+    if line[0] == "#":
+      continue
+    line = line.strip()
+
+    # Empty line signals the end of the callchain.
+    if not line:
+      skip_until_end_of_chain = False
+      continue
+
+    if skip_until_end_of_chain:
+      continue
+
+    symbol_and_offset = line.split(" ", 1)[1]
+
+    if symbol_and_offset.startswith("BytecodeHandler:"):
+      skip_until_end_of_chain = True
+
+      if symbol_and_offset.startswith(bytecode_symbol):
+        yield int(symbol_and_offset.split("+", 1)[1], 16)
+
+
+def bytecode_offset_counts(bytecode_offsets):
+  offset_counts = collections.defaultdict(int)
+  for offset in bytecode_offsets:
+    offset_counts[offset] += 1
+  return offset_counts
+
+
+def bytecode_disassembly_generator(ignition_codegen, bytecode_name):
+  name_string = "name = " + bytecode_name
+  for line in ignition_codegen:
+    if line.startswith(name_string):
+      break
+
+  # Found the bytecode disassembly.
+  for line in ignition_codegen:
+    line = line.strip()
+    # Blank line marks the end of the bytecode's disassembly.
+    if not line:
+      return
+
+    # Only yield disassembly output.
+    if not line.startswith("0x"):
+      continue
+
+    yield line
+
+
+def print_disassembly_annotation(offset_counts, bytecode_disassembly):
+  total = sum(offset_counts.values())
+  offsets = sorted(offset_counts, reverse=True)
+  def next_offset():
+    return offsets.pop() if offsets else -1
+
+  current_offset = next_offset()
+  print(current_offset);
+
+  for line in bytecode_disassembly:
+    disassembly_offset = int(line.split()[1])
+    if disassembly_offset == current_offset:
+      count = offset_counts[current_offset]
+      percentage = 100.0 * count / total
+      print("{:>8d} ({:>5.1f}%) ".format(count, percentage), end=' ')
+      current_offset = next_offset()
+    else:
+      print("                ", end=' ')
+    print(line)
+
+  if offsets:
+    print ("WARNING: Offsets not empty. Output is most likely invalid due to "
+           "a mismatch between perf output and debug d8 binary.")
+
+
+def parse_command_line():
+  command_line_parser = argparse.ArgumentParser(
+      formatter_class=argparse.RawDescriptionHelpFormatter,
+      description=__DESCRIPTION,
+      epilog=__HELP_EPILOGUE)
+
+  command_line_parser.add_argument(
+      "--arch", "-a",
+      help="The architecture (default: x64)",
+      default="x64",
+  )
+  command_line_parser.add_argument(
+      "--input", "-i",
+      help="perf sample file to process (default: perf.data)",
+      default="perf.data",
+      metavar="<perf filename>",
+      dest="perf_filename"
+  )
+  command_line_parser.add_argument(
+      "--output", "-o",
+      help="output file name (stdout if omitted)",
+      type=argparse.FileType("wt"),
+      default=sys.stdout,
+      metavar="<output filename>",
+      dest="output_stream"
+  )
+  command_line_parser.add_argument(
+      "bytecode_name",
+      metavar="<bytecode name>",
+      nargs="?",
+      help="The bytecode handler to annotate"
+  )
+
+  return command_line_parser.parse_args()
+
+
+def main():
+  program_options = parse_command_line()
+  perf = subprocess.Popen(["perf", "script", "-f", "ip,sym,symoff",
+                           "-i", program_options.perf_filename],
+                          stdout=subprocess.PIPE)
+
+  v8_root_path = os.path.dirname(__file__) + "/../../"
+  d8_path = "{}/out/{}.debug/d8".format(v8_root_path, program_options.arch)
+  d8_codegen = subprocess.Popen([d8_path, "--trace-ignition-codegen",
+                                 "-e", "1"],
+                                stdout=subprocess.PIPE)
+
+  bytecode_offsets = bytecode_offset_generator(
+      perf.stdout, program_options.bytecode_name)
+  offset_counts = bytecode_offset_counts(bytecode_offsets)
+
+  bytecode_disassembly = bytecode_disassembly_generator(
+      d8_codegen.stdout, program_options.bytecode_name)
+
+  print_disassembly_annotation(offset_counts, bytecode_disassembly)
+
+
+if __name__ == "__main__":
+  main()
diff --git a/src/third_party/v8/tools/ignition/linux_perf_bytecode_annotate_test.py b/src/third_party/v8/tools/ignition/linux_perf_bytecode_annotate_test.py
new file mode 100644
index 0000000..15abbed
--- /dev/null
+++ b/src/third_party/v8/tools/ignition/linux_perf_bytecode_annotate_test.py
@@ -0,0 +1,85 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import StringIO
+import unittest
+import linux_perf_bytecode_annotate as bytecode_annotate
+
+
+PERF_SCRIPT_OUTPUT = """
+# This line is a comment
+# This should be ignored too
+#
+#    cdefab01  aRandomSymbol::Name(to, be, ignored)
+
+   00000000 firstSymbol
+   00000123 secondSymbol
+
+   01234567 foo
+   abcdef76 BytecodeHandler:bar+0x12
+   76543210 baz
+   abcdef76 BytecodeHandler:bar+0x16
+   76543210 baz
+
+   01234567 foo
+   abcdef76 BytecodeHandler:foo+0x1
+   76543210 baz
+   abcdef76 BytecodeHandler:bar+0x2
+   76543210 bar
+
+   abcdef76 BytecodeHandler:bar+0x19
+
+   abcdef76 BytecodeHandler:bar+0x12
+
+   abcdef76 BytecodeHandler:bar+0x12
+"""
+
+
+D8_CODEGEN_OUTPUT = """
+kind = BYTECODE_HANDLER
+name = foo
+compiler = turbofan
+Instructions (size = 3)
+0x3101394a3c0     0  55             push rbp
+0x3101394a3c1     1  ffe3           jmp rbx
+
+kind = BYTECODE_HANDLER
+name = bar
+compiler = turbofan
+Instructions (size = 5)
+0x3101394b3c0     0  55             push rbp
+0x3101394b3c1     1  4883c428       REX.W addq rsp,0x28
+# Unexpected comment
+0x3101394b3c5     5  ffe3           jmp rbx
+
+kind = BYTECODE_HANDLER
+name = baz
+compiler = turbofan
+Instructions (size = 5)
+0x3101394c3c0     0  55             push rbp
+0x3101394c3c1     1  4883c428       REX.W addq rsp,0x28
+0x3101394c3c5     5  ffe3           jmp rbx
+"""
+
+
+class LinuxPerfBytecodeAnnotateTest(unittest.TestCase):
+
+  def test_bytecode_offset_generator(self):
+    perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
+    offsets = list(
+        bytecode_annotate.bytecode_offset_generator(perf_stream, "bar"))
+    self.assertListEqual(offsets, [18, 25, 18, 18])
+
+  def test_bytecode_disassembly_generator(self):
+    codegen_stream = StringIO.StringIO(D8_CODEGEN_OUTPUT)
+    disassembly = list(
+        bytecode_annotate.bytecode_disassembly_generator(codegen_stream, "bar"))
+    self.assertListEqual(disassembly, [
+        "0x3101394b3c0     0  55             push rbp",
+        "0x3101394b3c1     1  4883c428       REX.W addq rsp,0x28",
+        "0x3101394b3c5     5  ffe3           jmp rbx"])
+
+
+if __name__ == "__main__":
+  unittest.main()
diff --git a/src/third_party/v8/tools/ignition/linux_perf_report.py b/src/third_party/v8/tools/ignition/linux_perf_report.py
new file mode 100755
index 0000000..268bed5
--- /dev/null
+++ b/src/third_party/v8/tools/ignition/linux_perf_report.py
@@ -0,0 +1,260 @@
+#! /usr/bin/python2
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+import argparse
+import collections
+import re
+import subprocess
+import sys
+
+
+__DESCRIPTION = """
+Processes a perf.data sample file and reports the hottest Ignition bytecodes,
+or write an input file for flamegraph.pl.
+"""
+
+
+__HELP_EPILOGUE = """
+examples:
+  # Get a flamegraph for Ignition bytecode handlers on Octane benchmark,
+  # without considering the time spent compiling JS code, entry trampoline
+  # samples and other non-Ignition samples.
+  #
+  $ tools/run-perf.sh out/x64.release/d8 --noopt run.js
+  $ tools/ignition/linux_perf_report.py --flamegraph -o out.collapsed
+  $ flamegraph.pl --colors js out.collapsed > out.svg
+
+  # Same as above, but show all samples, including time spent compiling JS code,
+  # entry trampoline samples and other samples.
+  $ # ...
+  $ tools/ignition/linux_perf_report.py \\
+      --flamegraph --show-all -o out.collapsed
+  $ # ...
+
+  # Same as above, but show full function signatures in the flamegraph.
+  $ # ...
+  $ tools/ignition/linux_perf_report.py \\
+      --flamegraph --show-full-signatures -o out.collapsed
+  $ # ...
+
+  # See the hottest bytecodes on Octane benchmark, by number of samples.
+  #
+  $ tools/run-perf.sh out/x64.release/d8 --noopt octane/run.js
+  $ tools/ignition/linux_perf_report.py
+"""
+
+
+COMPILER_SYMBOLS_RE = re.compile(
+  r"v8::internal::(?:\(anonymous namespace\)::)?Compile|v8::internal::Parser")
+JIT_CODE_SYMBOLS_RE = re.compile(
+  r"(LazyCompile|Compile|Eval|Script):(\*|~)")
+GC_SYMBOLS_RE = re.compile(
+  r"v8::internal::Heap::CollectGarbage")
+
+
+def strip_function_parameters(symbol):
+  if symbol[-1] != ')': return symbol
+  pos = 1
+  parenthesis_count = 0
+  for c in reversed(symbol):
+    if c == ')':
+      parenthesis_count += 1
+    elif c == '(':
+      parenthesis_count -= 1
+    if parenthesis_count == 0:
+      break
+    else:
+      pos += 1
+  return symbol[:-pos]
+
+
+def collapsed_callchains_generator(perf_stream, hide_other=False,
+                                   hide_compiler=False, hide_jit=False,
+                                   hide_gc=False, show_full_signatures=False):
+  current_chain = []
+  skip_until_end_of_chain = False
+  compiler_symbol_in_chain = False
+
+  for line in perf_stream:
+    # Lines starting with a "#" are comments, skip them.
+    if line[0] == "#":
+      continue
+
+    line = line.strip()
+
+    # Empty line signals the end of the callchain.
+    if not line:
+      if (not skip_until_end_of_chain and current_chain
+          and not hide_other):
+        current_chain.append("[other]")
+        yield current_chain
+      # Reset parser status.
+      current_chain = []
+      skip_until_end_of_chain = False
+      compiler_symbol_in_chain = False
+      continue
+
+    if skip_until_end_of_chain:
+      continue
+
+    # Trim the leading address and the trailing +offset, if present.
+    symbol = line.split(" ", 1)[1].split("+", 1)[0]
+    if not show_full_signatures:
+      symbol = strip_function_parameters(symbol)
+
+    # Avoid chains of [unknown]
+    if (symbol == "[unknown]" and current_chain and
+        current_chain[-1] == "[unknown]"):
+      continue
+
+    current_chain.append(symbol)
+
+    if symbol.startswith("BytecodeHandler:"):
+      current_chain.append("[interpreter]")
+      yield current_chain
+      skip_until_end_of_chain = True
+    elif JIT_CODE_SYMBOLS_RE.match(symbol):
+      if not hide_jit:
+        current_chain.append("[jit]")
+        yield current_chain
+        skip_until_end_of_chain = True
+    elif GC_SYMBOLS_RE.match(symbol):
+      if not hide_gc:
+        current_chain.append("[gc]")
+        yield current_chain
+        skip_until_end_of_chain = True
+    elif symbol == "Stub:CEntryStub" and compiler_symbol_in_chain:
+      if not hide_compiler:
+        current_chain.append("[compiler]")
+        yield current_chain
+      skip_until_end_of_chain = True
+    elif COMPILER_SYMBOLS_RE.match(symbol):
+      compiler_symbol_in_chain = True
+    elif symbol == "Builtin:InterpreterEntryTrampoline":
+      if len(current_chain) == 1:
+        yield ["[entry trampoline]"]
+      else:
+        # If we see an InterpreterEntryTrampoline which is not at the top of the
+        # chain and doesn't have a BytecodeHandler above it, then we have
+        # skipped the top BytecodeHandler due to the top-level stub not building
+        # a frame. File the chain in the [misattributed] bucket.
+        current_chain[-1] = "[misattributed]"
+        yield current_chain
+      skip_until_end_of_chain = True
+
+
+def calculate_samples_count_per_callchain(callchains):
+  chain_counters = collections.defaultdict(int)
+  for callchain in callchains:
+    key = ";".join(reversed(callchain))
+    chain_counters[key] += 1
+  return chain_counters.items()
+
+
+def calculate_samples_count_per_handler(callchains):
+  def strip_handler_prefix_if_any(handler):
+    return handler if handler[0] == "[" else handler.split(":", 1)[1]
+
+  handler_counters = collections.defaultdict(int)
+  for callchain in callchains:
+    handler = strip_handler_prefix_if_any(callchain[-1])
+    handler_counters[handler] += 1
+  return handler_counters.items()
+
+
+def write_flamegraph_input_file(output_stream, callchains):
+  for callchain, count in calculate_samples_count_per_callchain(callchains):
+    output_stream.write("{}; {}\n".format(callchain, count))
+
+
+def write_handlers_report(output_stream, callchains):
+  handler_counters = calculate_samples_count_per_handler(callchains)
+  samples_num = sum(counter for _, counter in handler_counters)
+  # Sort by decreasing number of samples
+  handler_counters.sort(key=lambda entry: entry[1], reverse=True)
+  for bytecode_name, count in handler_counters:
+    output_stream.write(
+      "{}\t{}\t{:.3f}%\n".format(bytecode_name, count,
+                                 100. * count / samples_num))
+
+
+def parse_command_line():
+  command_line_parser = argparse.ArgumentParser(
+    formatter_class=argparse.RawDescriptionHelpFormatter,
+    description=__DESCRIPTION,
+    epilog=__HELP_EPILOGUE)
+
+  command_line_parser.add_argument(
+    "perf_filename",
+    help="perf sample file to process (default: perf.data)",
+    nargs="?",
+    default="perf.data",
+    metavar="<perf filename>"
+  )
+  command_line_parser.add_argument(
+    "--flamegraph", "-f",
+    help="output an input file for flamegraph.pl, not a report",
+    action="store_true",
+    dest="output_flamegraph"
+  )
+  command_line_parser.add_argument(
+    "--hide-other",
+    help="Hide other samples",
+    action="store_true"
+  )
+  command_line_parser.add_argument(
+    "--hide-compiler",
+    help="Hide samples during compilation",
+    action="store_true"
+  )
+  command_line_parser.add_argument(
+    "--hide-jit",
+    help="Hide samples from JIT code execution",
+    action="store_true"
+  )
+  command_line_parser.add_argument(
+    "--hide-gc",
+    help="Hide samples from garbage collection",
+    action="store_true"
+  )
+  command_line_parser.add_argument(
+    "--show-full-signatures", "-s",
+    help="show full signatures instead of function names",
+    action="store_true"
+  )
+  command_line_parser.add_argument(
+    "--output", "-o",
+    help="output file name (stdout if omitted)",
+    type=argparse.FileType('wt'),
+    default=sys.stdout,
+    metavar="<output filename>",
+    dest="output_stream"
+  )
+
+  return command_line_parser.parse_args()
+
+
+def main():
+  program_options = parse_command_line()
+
+  perf = subprocess.Popen(["perf", "script", "--fields", "ip,sym",
+                           "-i", program_options.perf_filename],
+                          stdout=subprocess.PIPE)
+
+  callchains = collapsed_callchains_generator(
+    perf.stdout, program_options.hide_other, program_options.hide_compiler,
+    program_options.hide_jit, program_options.hide_gc,
+    program_options.show_full_signatures)
+
+  if program_options.output_flamegraph:
+    write_flamegraph_input_file(program_options.output_stream, callchains)
+  else:
+    write_handlers_report(program_options.output_stream, callchains)
+
+
+if __name__ == "__main__":
+  main()
diff --git a/src/third_party/v8/tools/ignition/linux_perf_report_test.py b/src/third_party/v8/tools/ignition/linux_perf_report_test.py
new file mode 100644
index 0000000..9d163c8
--- /dev/null
+++ b/src/third_party/v8/tools/ignition/linux_perf_report_test.py
@@ -0,0 +1,166 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import linux_perf_report as ipr
+import StringIO
+import unittest
+
+
+PERF_SCRIPT_OUTPUT = """
+# This line is a comment
+# This should be ignored too
+#
+#    cdefab01  aRandomSymbol::Name(to, be, ignored)
+
+   00000000 firstSymbol
+   00000123 secondSymbol
+
+   01234567 foo
+   abcdef76 BytecodeHandler:bar
+   76543210 baz
+
+# Indentation shouldn't matter (neither should this line)
+
+    01234567 foo
+      abcdef76 BytecodeHandler:bar
+        76543210 baz
+
+      01234567 beep
+   abcdef76 BytecodeHandler:bar
+    76543210 baz
+
+   01234567 hello
+   abcdef76 v8::internal::Compiler
+   00000000 Stub:CEntryStub
+   76543210 world
+   11111111 BytecodeHandler:nope
+
+   00000000 Lost
+   11111111 Builtin:InterpreterEntryTrampoline
+   22222222 bar
+
+   00000000 hello
+   11111111 LazyCompile:~Foo
+
+   11111111 Builtin:InterpreterEntryTrampoline
+   22222222 bar
+"""
+
+
+class LinuxPerfReportTest(unittest.TestCase):
+  def test_collapsed_callchains_generator(self):
+    perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
+    callchains = list(ipr.collapsed_callchains_generator(perf_stream))
+    self.assertListEqual(callchains, [
+      ['firstSymbol', 'secondSymbol', '[other]'],
+      ["foo", "BytecodeHandler:bar", "[interpreter]"],
+      ["foo", "BytecodeHandler:bar", "[interpreter]"],
+      ["beep", "BytecodeHandler:bar", "[interpreter]"],
+      ["hello", "v8::internal::Compiler", "Stub:CEntryStub", "[compiler]"],
+      ["Lost", "[misattributed]"],
+      ["hello", "LazyCompile:~Foo", "[jit]"],
+      ["[entry trampoline]"],
+    ])
+
+  def test_collapsed_callchains_generator_hide_other(self):
+    perf_stream = StringIO.StringIO(PERF_SCRIPT_OUTPUT)
+    callchains = list(ipr.collapsed_callchains_generator(perf_stream,
+                                                         hide_other=True,
+                                                         hide_compiler=True,
+                                                         hide_jit=True))
+    self.assertListEqual(callchains, [
+      ["foo", "BytecodeHandler:bar", "[interpreter]"],
+      ["foo", "BytecodeHandler:bar", "[interpreter]"],
+      ["beep", "BytecodeHandler:bar", "[interpreter]"],
+      ["Lost", "[misattributed]"],
+      ["[entry trampoline]"],
+    ])
+
+  def test_calculate_samples_count_per_callchain(self):
+    counters = ipr.calculate_samples_count_per_callchain([
+      ["foo", "BytecodeHandler:bar"],
+      ["foo", "BytecodeHandler:bar"],
+      ["beep", "BytecodeHandler:bar"],
+      ["hello", "v8::internal::Compiler", "[compiler]"],
+    ])
+    self.assertItemsEqual(counters, [
+      ('BytecodeHandler:bar;foo', 2),
+      ('BytecodeHandler:bar;beep', 1),
+      ('[compiler];v8::internal::Compiler;hello', 1),
+    ])
+
+  def test_calculate_samples_count_per_callchain(self):
+    counters = ipr.calculate_samples_count_per_callchain([
+      ["foo", "BytecodeHandler:bar"],
+      ["foo", "BytecodeHandler:bar"],
+      ["beep", "BytecodeHandler:bar"],
+    ])
+    self.assertItemsEqual(counters, [
+      ('BytecodeHandler:bar;foo', 2),
+      ('BytecodeHandler:bar;beep', 1),
+    ])
+
+  def test_calculate_samples_count_per_handler_show_compile(self):
+    counters = ipr.calculate_samples_count_per_handler([
+      ["foo", "BytecodeHandler:bar"],
+      ["foo", "BytecodeHandler:bar"],
+      ["beep", "BytecodeHandler:bar"],
+      ["hello", "v8::internal::Compiler", "[compiler]"],
+    ])
+    self.assertItemsEqual(counters, [
+      ("bar", 3),
+      ("[compiler]", 1)
+    ])
+
+  def test_calculate_samples_count_per_handler_(self):
+    counters = ipr.calculate_samples_count_per_handler([
+      ["foo", "BytecodeHandler:bar"],
+      ["foo", "BytecodeHandler:bar"],
+      ["beep", "BytecodeHandler:bar"],
+    ])
+    self.assertItemsEqual(counters, [("bar", 3)])
+
+  def test_multiple_handlers(self):
+    perf_stream = StringIO.StringIO("""
+        0000 foo(bar)
+        1234 BytecodeHandler:first
+        5678 a::random::call<to>(something, else)
+        9abc BytecodeHandler:second
+        def0 otherIrrelevant(stuff)
+        1111 entrypoint
+    """)
+    callchains = list(ipr.collapsed_callchains_generator(perf_stream, False))
+    self.assertListEqual(callchains, [
+      ["foo", "BytecodeHandler:first", "[interpreter]"],
+    ])
+
+  def test_compiler_symbols_regex(self):
+    compiler_symbols = [
+      "v8::internal::Parser",
+      "v8::internal::(anonymous namespace)::Compile",
+      "v8::internal::Compiler::foo",
+    ]
+    for compiler_symbol in compiler_symbols:
+      self.assertTrue(ipr.COMPILER_SYMBOLS_RE.match(compiler_symbol))
+
+  def test_jit_code_symbols_regex(self):
+    jit_code_symbols = [
+      "LazyCompile:~Foo blah.js",
+      "Eval:*",
+      "Script:*Bar tmp.js",
+    ]
+    for jit_code_symbol in jit_code_symbols:
+      self.assertTrue(ipr.JIT_CODE_SYMBOLS_RE.match(jit_code_symbol))
+
+  def test_strip_function_parameters(self):
+    def should_match(signature, name):
+      self.assertEqual(ipr.strip_function_parameters(signature), name)
+
+    should_match("foo(bar)", "foo"),
+    should_match("Foo(foomatic::(anonymous)::bar(baz))", "Foo"),
+    should_match("v8::(anonymous ns)::bar<thing(with, parentheses)>(baz, poe)",
+       "v8::(anonymous ns)::bar<thing(with, parentheses)>")
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/third_party/v8/tools/index.html b/src/third_party/v8/tools/index.html
new file mode 100644
index 0000000..93155df
--- /dev/null
+++ b/src/third_party/v8/tools/index.html
@@ -0,0 +1,110 @@
+<!DOCTYPE html>
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<html lang="en">
+
+<head>
+  <meta charset="UTF-8">
+  <title>V8 Tools Landing Page</title>
+<style>
+body {
+  font-family: sans-serif;
+  color: white;
+  margin-left: 5%;
+  margin-right: 5%;
+  background-color: #000000;
+  text-align: center;
+}
+.content{
+  background-color: #000000;
+  padding: 10px 5px 5px 10px ;
+  margin: auto;
+  max-width: 80%;
+}
+a:link, a:visited {
+  background-color: #BB86FC;
+  color: black;
+  padding: 15px 25px;
+  text-align: center;
+  text-decoration: none;
+  display: inline-block;
+}
+a:hover, a:active {
+  background-color: white;
+  color:black;
+}
+.grid-container {
+  display: grid;
+  grid-template-columns: auto auto auto;
+  padding: auto;
+  background-color: #000000;
+  grid-gap: 15px;
+}
+.card {
+  text-align: center;
+  padding: 10px 50px 10px 50px ;
+  box-shadow: 0 4px 8px 0 rgba(0,0,0,0.2);
+  background-color: #121212;
+  width: auto;
+}
+.card:hover {
+  box-shadow: 0 8px 16px 0 rgba(0,0,0,0.2);
+}
+dd, dt {
+  padding: 10px 10px 10px 10px;
+  margin: auto;
+}
+</style>
+</head>
+  <body>
+    <div class="content">
+      <h1>Welcome to the V8 Tools Landing Page</h1>
+      <p>Search through this page to find about the V8 tools to debug, trace and analyze the log files.</p>
+        <dl class="grid-container">
+          <div class="card">
+            <dt><a href="./system-analyzer/index.html">System Analyzer</a></dt>
+            <dd>A unified web interface to trace, debug and analyse patterns of how Maps/ICs are created in the real world applications.</dd>
+          </div>
+          <div class="card">
+            <dt><a href="./callstats.html">Callstats</a></dt>
+            <dd>Visualize and compare runtime call stats.</dd>
+          </div>
+          <div class="card">
+            <dt><a href="./heap-stats/index.html">Heap Stats</a></dt>
+            <dd>Visualize heap memory usage.</dd>
+          </div>
+          <div class="card">
+            <dt><a href="./parse-processor.html">Parse Processor</a></dt>
+            <dd>Analyse parse, compile and first-execution.</dd>
+          </div>
+          <div class="card">
+            <dt><a href="./profview/index.html">Profview</a></dt>
+            <dd>Fancy sampling profile viewer.</dd>
+          </div>
+          <div class="card">
+            <dt><a href="./tick-processor.html">Tick Processor</a></dt>
+            <dd>Simple sampling profile viewer.</dd>
+          </div>
+          <div class="card">
+            <dt><a href="./turbolizer/index.html">Turbolizer</a></dt>
+            <dd>Visualise the sea of nodes graph generated by TurboFan.</dd>
+          </div>
+          <div class="card">
+            <dt><a href="./zone-stats/index.html">Zone Stats</a></dt>
+            <dd>Analyse zone memory usage.</dd>
+          </div>
+          <div class="card">
+            <dt><a href="https://v8.dev/tools">Other V8 Versions</a></dt>
+            <dd>Check out the V8 website for available tool versions.</dd>
+          </div>
+          <div class="card">
+            <dt><a href="https://v8.dev">V8.Dev</a></dt>
+            <dd>Check out the V8 website for more information.</dd>
+          </div>
+        </dl>
+      </div>
+    </div>
+  </body>
+</html>
diff --git a/src/third_party/v8/tools/inspect-d8.js b/src/third_party/v8/tools/inspect-d8.js
new file mode 100644
index 0000000..b87a758
--- /dev/null
+++ b/src/third_party/v8/tools/inspect-d8.js
@@ -0,0 +1,30 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This helper allows to debug d8 using Chrome DevTools.
+//
+// It runs a simple REPL for inspector messages and relies on
+// websocketd (https://github.com/joewalnes/websocketd) for the WebSocket
+// communication.
+//
+// You can start a session with a debug build of d8 like:
+//
+// $ websocketd out/x64.debug/d8 YOUR_SCRIPT.js tools/inspect-d8.js
+//
+// After that, copy the URL from console and pass it as `ws=` parameter to
+// the Chrome DevTools frontend like:
+//
+// chrome-devtools://devtools/bundled/js_app.html?ws=localhost:80
+
+function receive(msg) {
+  print(msg);
+}
+
+function handleInspectorMessage() {
+  send(readline());
+}
+
+while (true) {
+  handleInspectorMessage();
+}
diff --git a/src/third_party/v8/tools/js2c.py b/src/third_party/v8/tools/js2c.py
new file mode 100755
index 0000000..cc050d5
--- /dev/null
+++ b/src/third_party/v8/tools/js2c.py
@@ -0,0 +1,367 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This is a utility for converting JavaScript source code into C-style
+# char arrays. It is used for embedded JavaScript code in the V8
+# library.
+
+# for py2/py3 compatibility
+from functools import reduce
+
+import os, re
+import optparse
+import textwrap
+
+
+class Error(Exception):
+  def __init__(self, msg):
+    Exception.__init__(self, msg)
+
+
+def ToCArray(byte_sequence):
+  result = []
+  for chr in byte_sequence:
+    result.append(str(ord(chr)))
+  joined = ", ".join(result)
+  return textwrap.fill(joined, 80)
+
+
+def RemoveCommentsEmptyLinesAndWhitespace(lines):
+  lines = re.sub(r'\n+', '\n', lines) # empty lines
+  lines = re.sub(r'//.*\n', '\n', lines) # end-of-line comments
+  lines = re.sub(re.compile(r'/\*.*?\*/', re.DOTALL), '', lines) # comments.
+  lines = re.sub(r'\s+\n', '\n', lines) # trailing whitespace
+  lines = re.sub(r'\n\s+', '\n', lines) # initial whitespace
+  return lines
+
+
+def ReadFile(filename):
+  file = open(filename, "rt")
+  try:
+    lines = file.read()
+  finally:
+    file.close()
+  return lines
+
+
+EVAL_PATTERN = re.compile(r'\beval\s*\(')
+WITH_PATTERN = re.compile(r'\bwith\s*\(')
+INVALID_ERROR_MESSAGE_PATTERN = re.compile(
+    r'Make(?!Generic)\w*Error\(([kA-Z]\w+)')
+NEW_ERROR_PATTERN = re.compile(r'new \$\w*Error\((?!\))')
+
+def Validate(lines):
+  # Because of simplified context setup, eval and with is not
+  # allowed in the natives files.
+  if EVAL_PATTERN.search(lines):
+    raise Error("Eval disallowed in natives.")
+  if WITH_PATTERN.search(lines):
+    raise Error("With statements disallowed in natives.")
+  invalid_error = INVALID_ERROR_MESSAGE_PATTERN.search(lines)
+  if invalid_error:
+    raise Error("Unknown error message template '%s'" % invalid_error.group(1))
+  if NEW_ERROR_PATTERN.search(lines):
+    raise Error("Error constructed without message template.")
+  # Pass lines through unchanged.
+  return lines
+
+
+def ExpandConstants(lines, constants):
+  for key, value in constants:
+    lines = key.sub(str(value), lines)
+  return lines
+
+
+HEADER_TEMPLATE = """\
+// Copyright 2011 Google Inc. All Rights Reserved.
+
+// This file was generated from .js source files by GYP.  If you
+// want to make changes to this file you should either change the
+// javascript source files or the GYP script.
+
+#include "src/init/v8.h"
+#include "src/snapshot/natives.h"
+#include "src/utils/utils.h"
+
+namespace v8 {
+namespace internal {
+
+%(sources_declaration)s\
+
+  template <>
+  int NativesCollection<%(type)s>::GetBuiltinsCount() {
+    return %(builtin_count)i;
+  }
+
+  template <>
+  int NativesCollection<%(type)s>::GetIndex(const char* name) {
+%(get_index_cases)s\
+    return -1;
+  }
+
+  template <>
+  Vector<const char> NativesCollection<%(type)s>::GetScriptSource(int index) {
+%(get_script_source_cases)s\
+    return Vector<const char>("", 0);
+  }
+
+  template <>
+  Vector<const char> NativesCollection<%(type)s>::GetScriptName(int index) {
+%(get_script_name_cases)s\
+    return Vector<const char>("", 0);
+  }
+
+  template <>
+  Vector<const char> NativesCollection<%(type)s>::GetScriptsSource() {
+    return Vector<const char>(sources, %(total_length)i);
+  }
+}  // internal
+}  // v8
+"""
+
+SOURCES_DECLARATION = """\
+  static const char sources[] = { %s };
+"""
+
+
+GET_INDEX_CASE = """\
+    if (strcmp(name, "%(id)s") == 0) return %(i)i;
+"""
+
+
+GET_SCRIPT_SOURCE_CASE = """\
+    if (index == %(i)i) return Vector<const char>(sources + %(offset)i, %(source_length)i);
+"""
+
+
+GET_SCRIPT_NAME_CASE = """\
+    if (index == %(i)i) return Vector<const char>("%(name)s", %(length)i);
+"""
+
+
+def BuildFilterChain():
+  """Build the chain of filter functions to be applied to the sources.
+
+  Returns:
+    A function (string -> string) that processes a source file.
+  """
+  filter_chain = [
+    RemoveCommentsEmptyLinesAndWhitespace,
+    Validate,
+  ]
+
+  def chain(f1, f2):
+    return lambda x: f2(f1(x))
+
+  return reduce(chain, filter_chain)
+
+def BuildExtraFilterChain():
+  return lambda x: RemoveCommentsEmptyLinesAndWhitespace(Validate(x))
+
+class Sources:
+  def __init__(self):
+    self.names = []
+    self.modules = []
+
+def PrepareSources(source_files, native_type, emit_js):
+  """Read, prepare and assemble the list of source files.
+
+  Args:
+    source_files: List of JavaScript-ish source files.
+    native_type: String corresponding to a NativeType enum value, allowing us
+        to treat different types of sources differently.
+    emit_js: True if we should skip the byte conversion and just leave the
+        sources as JS strings.
+
+  Returns:
+    An instance of Sources.
+  """
+  result = Sources()
+  filters = BuildFilterChain()
+
+  source_files_and_contents = [(f, ReadFile(f)) for f in source_files]
+
+  for (source, contents) in source_files_and_contents:
+    try:
+      lines = filters(contents)
+    except Error as e:
+      raise Error("In file %s:\n%s" % (source, str(e)))
+
+    result.modules.append(lines)
+
+    name = os.path.basename(source)[:-3]
+    result.names.append(name)
+
+  return result
+
+
+def BuildMetadata(sources, source_bytes, native_type):
+  """Build the meta data required to generate a libaries file.
+
+  Args:
+    sources: A Sources instance with the prepared sources.
+    source_bytes: A list of source bytes.
+        (The concatenation of all sources; might be compressed.)
+    native_type: The parameter for the NativesCollection template.
+
+  Returns:
+    A dictionary for use with HEADER_TEMPLATE.
+  """
+  total_length = len(source_bytes)
+  raw_sources = "".join(sources.modules)
+
+  # The sources are expected to be ASCII-only.
+  try:
+    raw_sources.encode('ascii')
+  except UnicodeEncodeError:
+    assert False
+
+  # Loop over modules and build up indices into the source blob:
+  get_index_cases = []
+  get_script_name_cases = []
+  get_script_source_cases = []
+  offset = 0
+  for i in range(len(sources.modules)):
+    native_name = "native %s.js" % sources.names[i]
+    d = {
+        "i": i,
+        "id": sources.names[i],
+        "name": native_name,
+        "length": len(native_name),
+        "offset": offset,
+        "source_length": len(sources.modules[i]),
+    }
+    get_index_cases.append(GET_INDEX_CASE % d)
+    get_script_name_cases.append(GET_SCRIPT_NAME_CASE % d)
+    get_script_source_cases.append(GET_SCRIPT_SOURCE_CASE % d)
+    offset += len(sources.modules[i])
+  assert offset == len(raw_sources)
+
+  metadata = {
+    "builtin_count": len(sources.modules),
+    "sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes),
+    "total_length": total_length,
+    "get_index_cases": "".join(get_index_cases),
+    "get_script_source_cases": "".join(get_script_source_cases),
+    "get_script_name_cases": "".join(get_script_name_cases),
+    "type": native_type,
+  }
+  return metadata
+
+
+def PutInt(blob_file, value):
+  assert(value >= 0 and value < (1 << 28))
+  if (value < 1 << 6):
+    size = 1
+  elif (value < 1 << 14):
+    size = 2
+  elif (value < 1 << 22):
+    size = 3
+  else:
+    size = 4
+  value_with_length = (value << 2) | (size - 1)
+
+  byte_sequence = bytearray()
+  for i in range(size):
+    byte_sequence.append(value_with_length & 255)
+    value_with_length >>= 8;
+  blob_file.write(byte_sequence)
+
+
+def PutStr(blob_file, value):
+  PutInt(blob_file, len(value.encode()))
+  blob_file.write(value.encode())
+
+
+def WriteStartupBlob(sources, startup_blob):
+  """Write a startup blob, as expected by V8 Initialize ...
+    TODO(vogelheim): Add proper method name.
+
+  Args:
+    sources: A Sources instance with the prepared sources.
+    startup_blob_file: Name of file to write the blob to.
+  """
+  output = open(startup_blob, "wb")
+
+  PutInt(output, len(sources.names))
+  for i in range(len(sources.names)):
+    PutStr(output, sources.names[i]);
+    PutStr(output, sources.modules[i]);
+
+  output.close()
+
+
+def JS2C(sources, target, native_type, raw_file, startup_blob, emit_js):
+  prepared_sources = PrepareSources(sources, native_type, emit_js)
+  sources_output = "".join(prepared_sources.modules)
+  metadata = BuildMetadata(prepared_sources, sources_output, native_type)
+
+  # Optionally emit raw file.
+  if raw_file:
+    output = open(raw_file, "w")
+    output.write(sources_output)
+    output.close()
+
+  if startup_blob:
+    WriteStartupBlob(prepared_sources, startup_blob)
+
+  # Emit resulting source file.
+  output = open(target, "w")
+  if emit_js:
+    output.write(sources_output)
+  else:
+    output.write(HEADER_TEMPLATE % metadata)
+  output.close()
+
+
+def main():
+  parser = optparse.OptionParser()
+  parser.add_option("--raw",
+                    help="file to write the processed sources array to.")
+  parser.add_option("--startup_blob",
+                    help="file to write the startup blob to.")
+  parser.add_option("--js",
+                    help="writes a JS file output instead of a C file",
+                    action="store_true", default=False, dest='js')
+  parser.add_option("--nojs", action="store_false", default=False, dest='js')
+  parser.set_usage("""js2c out.cc type sources.js ...
+        out.cc: C code to be generated.
+        type: type parameter for NativesCollection template.
+        sources.js: JS internal sources.""")
+  (options, args) = parser.parse_args()
+  JS2C(args[2:],
+       args[0],
+       args[1],
+       options.raw,
+       options.startup_blob,
+       options.js)
+
+
+if __name__ == "__main__":
+  main()
diff --git a/src/third_party/v8/tools/jsfunfuzz/BUILD.gn b/src/third_party/v8/tools/jsfunfuzz/BUILD.gn
new file mode 100644
index 0000000..3c40460
--- /dev/null
+++ b/src/third_party/v8/tools/jsfunfuzz/BUILD.gn
@@ -0,0 +1,19 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+group("v8_jsfunfuzz") {
+  testonly = true
+
+  data_deps = [
+    "../..:d8",
+  ]
+
+  data = [
+    # Grab current directory. This avoids adding logic for checking the
+    # existence of the jsfunfuzz subdirectory.
+    "./",
+  ]
+}
diff --git a/src/third_party/v8/tools/jsfunfuzz/fuzz-harness.sh b/src/third_party/v8/tools/jsfunfuzz/fuzz-harness.sh
new file mode 100755
index 0000000..fa4f9d9
--- /dev/null
+++ b/src/third_party/v8/tools/jsfunfuzz/fuzz-harness.sh
@@ -0,0 +1,99 @@
+#!/bin/bash
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# A simple harness that downloads and runs 'jsfunfuzz' against d8. This
+# takes a long time because it runs many iterations and is intended for
+# automated usage. The package containing 'jsfunfuzz' can be found as an
+# attachment to this bug:
+# https://bugzilla.mozilla.org/show_bug.cgi?id=jsfunfuzz
+
+JSFUNFUZZ_URL="https://bugzilla.mozilla.org/attachment.cgi?id=310631"
+JSFUNFUZZ_MD5="d0e497201c5cd7bffbb1cdc1574f4e32"
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../../)
+jsfunfuzz_dir="$v8_root/tools/jsfunfuzz"
+cd "$jsfunfuzz_dir"
+
+if [ -n "$1" ]; then
+  d8="${v8_root}/$1"
+else
+  d8="${v8_root}/d8"
+fi
+
+if [ ! -f "$d8" ]; then
+  echo "Failed to find d8 binary: $d8"
+  exit 1
+fi
+
+# Deprecated download method. A prepatched archive is downloaded as a hook
+# if jsfunfuzz=1 is specified as a gyp flag. Requires google.com authentication
+# for google storage.
+if [ "$3" == "--download" ]; then
+
+  jsfunfuzz_file="$v8_root/tools/jsfunfuzz.zip"
+  if [ ! -f "$jsfunfuzz_file" ]; then
+    echo "Downloading $jsfunfuzz_file ..."
+    wget -q -O "$jsfunfuzz_file" $JSFUNFUZZ_URL || exit 1
+  fi
+
+  jsfunfuzz_sum=$(md5sum "$jsfunfuzz_file" | awk '{ print $1 }')
+  if [ $jsfunfuzz_sum != $JSFUNFUZZ_MD5 ]; then
+    echo "Failed to verify checksum!"
+    exit 1
+  fi
+
+  if [ ! -d "$jsfunfuzz_dir" ]; then
+    echo "Unpacking into $jsfunfuzz_dir ..."
+    unzip "$jsfunfuzz_file" -d "$jsfunfuzz_dir" || exit 1
+    echo "Patching runner ..."
+    cat << EOF | patch -s -p0 -d "$v8_root"
+--- tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py~
++++ tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py
+@@ -118,19 +118,19 @@
+-def showtail(logfilename):
++def showtail(logfilename, method="tail"):
+-   cmd = "tail -n 20 %s" % logfilename
++   cmd = "%s -n 20 %s" % (method, logfilename)
+    print cmd
+    print ""
+    os.system(cmd)
+    print ""
+    print ""
+
+ def many_timed_runs():
+     iteration = 0
+-    while True:
++    while iteration < 100:
+         iteration += 1
+         logfilename = "w%d" % iteration
+         one_timed_run(logfilename)
+         if not succeeded(logfilename):
+             showtail(logfilename)
+-            showtail("err-" + logfilename)
++            showtail("err-" + logfilename, method="head")
+
+             many_timed_runs()
+EOF
+  fi
+
+fi
+
+flags='--expose-gc --verify-gc'
+python -u "$jsfunfuzz_dir/jsfunfuzz/multi_timed_run.py" 300 \
+    "$d8" $flags "$jsfunfuzz_dir/jsfunfuzz/jsfunfuzz.js"
+exit_code=$(cat w* | grep " looking good" -c)
+exit_code=$((100-exit_code))
+
+if [ -n "$2" ]; then
+  archive="$2"
+else
+  archive=fuzz-results-$(date +%Y%m%d%H%M%S).tar.bz2
+fi
+echo "Creating archive $archive"
+tar -cjf $archive err-* w*
+rm -f err-* w*
+
+echo "Total failures: $exit_code"
+exit $exit_code
diff --git a/src/third_party/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1 b/src/third_party/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1
new file mode 100644
index 0000000..d12877e
--- /dev/null
+++ b/src/third_party/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1
@@ -0,0 +1 @@
+936f3baf5a24313da5eb98195d5e01d76fe602fb
\ No newline at end of file
diff --git a/src/third_party/v8/tools/link_clicker.extension/README.txt b/src/third_party/v8/tools/link_clicker.extension/README.txt
new file mode 100644
index 0000000..35e88b1
--- /dev/null
+++ b/src/third_party/v8/tools/link_clicker.extension/README.txt
@@ -0,0 +1,12 @@
+This extension can be used to repro infrequent crashers on an unclear url-set
+for a given domain. It follows a random link that matches a predefined pattern,
+imitating something like real user interaction on a page.
+
+Usage:
+1. Open chrome://extensions
+2. Enable developer mode
+3. Click "Load unpacked extension"
+4. Click the orange link-clicker extension button in the toolbar
+5. Set the parameters and click "Enable" to start following links on all tabs
+   open in the current window. Beware, this extension will follow arbitrary
+   links. You probably don't want to be logged in with any important account.
diff --git a/src/third_party/v8/tools/link_clicker.extension/background.js b/src/third_party/v8/tools/link_clicker.extension/background.js
new file mode 100644
index 0000000..43470cb
--- /dev/null
+++ b/src/third_party/v8/tools/link_clicker.extension/background.js
@@ -0,0 +1,74 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function linkClickerBackgroundScript() {
+
+  // time in ms.
+  let minInterval = 1*1000;
+  let maxInterval = 20*1000;
+  let pattern = /.*/;
+  let enabled = false;
+
+  let animateIconIntervalId;
+
+  // ===========================================================================
+
+  chrome.runtime.onMessage.addListener(function(msg, sender, response) {
+    let result;
+    if (msg.type == 'update') result = updateFromMessage(msg);
+    if (msg.type == 'get') result = getValues();
+    response(result);
+  });
+
+  // ===========================================================================
+  function updateFromMessage(msg) {
+    console.log(msg);
+    minInterval = Number(msg.minInterval)
+    maxInterval = Number(msg.maxInterval);
+    if (maxInterval < minInterval) {
+      let tmpMin = Math.min(minInterval, maxInterval);
+      maxInterval = Math.max(minInterval, maxInterval);
+      minInterval = tmpMin;
+    }
+    pattern = new RegExp(msg.pattern);
+    enabled = Boolean(msg.enabled);
+    updateTabs();
+    scheduleIconAnimation();
+    return getValues();
+  }
+
+  function getValues() {
+    return {
+      type: 'update',
+      minInterval: minInterval,
+      maxInterval: maxInterval,
+      pattern: pattern.source,
+      enabled: enabled
+    }
+  }
+
+  function updateTabs() {
+    chrome.tabs.query({active: true, currentWindow: true}, function(tabs) {
+      let message = getValues();
+      for (let i = 0; i < tabs.length; ++i) {
+        chrome.tabs.sendMessage(tabs[i].id, message);
+      }
+    });
+  }
+
+  let animationIndex = 0;
+  function animateIcon() {
+    animationIndex = (animationIndex + 1) % 4;
+    chrome.browserAction.setBadgeText( { text: ".".repeat(animationIndex) } );
+  }
+
+  function scheduleIconAnimation() {
+    chrome.browserAction.setBadgeText( { text: "" } );
+    clearInterval(animateIconIntervalId);
+    if (enabled) {
+      animateIconIntervalId = setInterval(animateIcon, 500);
+    }
+  }
+
+})();
diff --git a/src/third_party/v8/tools/link_clicker.extension/content.js b/src/third_party/v8/tools/link_clicker.extension/content.js
new file mode 100644
index 0000000..4ab825e
--- /dev/null
+++ b/src/third_party/v8/tools/link_clicker.extension/content.js
@@ -0,0 +1,66 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function linkClickerContentScript() {
+  // time in ms
+  let minInterval;
+  let maxInterval;
+  let pattern;
+  let enabled;
+  let timeoutId;
+
+  // Initialize variables.
+  chrome.runtime.sendMessage({type:'get'}, function(msg) {
+    if (msg.type == 'update') updateFromMessage(msg);
+  });
+
+  chrome.runtime.onMessage.addListener(
+    function(msg, sender, sendResponse) {
+      if (msg.type == 'update') updateFromMessage(msg);
+    });
+
+  function findAllLinks() {
+    let links = document.links;
+    let results = new Set();
+    for (let i = 0; i < links.length; i++) {
+      let href = links[i].href;
+      if (!href) continue;
+      if (href && href.match(pattern)) results.add(href);
+    }
+    return Array.from(results);
+  }
+
+  function updateFromMessage(msg) {
+    console.log(msg);
+    minInterval = Number(msg.minInterval)
+    maxInterval = Number(msg.maxInterval);
+    pattern = new RegExp(msg.pattern);
+    enabled = Boolean(msg.enabled);
+    if (enabled) schedule();
+  }
+
+  function followLink() {
+    if (!enabled) return;
+    let links = findAllLinks();
+    if (links.length <= 5) {
+      // navigate back if the page has not enough links
+      window.history.back()
+      console.log("navigate back");
+    } else {
+      let link = links[Math.round(Math.random() * (links.length-1))];
+      console.log(link);
+      window.location.href = link;
+      // Schedule in case we just followed an anchor.
+      schedule();
+    }
+  }
+
+  function schedule() {
+    clearTimeout(timeoutId);
+    let delta = maxInterval - minInterval;
+    let duration = minInterval + (Math.random() * delta);
+    console.log(duration);
+    timeoutId = setTimeout(followLink, duration);
+  }
+})();
diff --git a/src/third_party/v8/tools/link_clicker.extension/icon.png b/src/third_party/v8/tools/link_clicker.extension/icon.png
new file mode 100644
index 0000000..1ce0ca3
--- /dev/null
+++ b/src/third_party/v8/tools/link_clicker.extension/icon.png
Binary files differ
diff --git a/src/third_party/v8/tools/link_clicker.extension/manifest.json b/src/third_party/v8/tools/link_clicker.extension/manifest.json
new file mode 100644
index 0000000..8ca8579
--- /dev/null
+++ b/src/third_party/v8/tools/link_clicker.extension/manifest.json
@@ -0,0 +1,21 @@
+{
+  "name": "A browser action with a popup that automatically clicks links matching a regexp",
+  "description": "Follow links",
+  "version": "1.0",
+  "permissions": [
+    "tabs", "http://*/*", "https://*/*"
+  ],
+  "background": { "scripts": ["background.js"] },
+  "browser_action": {
+      "default_title": "Follow links.",
+      "default_icon": "icon.png",
+      "default_popup": "popup.html"
+  },
+  "content_scripts": [
+    {
+      "matches": ["http://*/*", "https://*/*"],
+      "js": ["content.js"]
+    }
+  ],
+  "manifest_version": 2
+}
diff --git a/src/third_party/v8/tools/link_clicker.extension/popup.html b/src/third_party/v8/tools/link_clicker.extension/popup.html
new file mode 100644
index 0000000..cce9566
--- /dev/null
+++ b/src/third_party/v8/tools/link_clicker.extension/popup.html
@@ -0,0 +1,50 @@
+<!doctype html>
+<!--
+Copyright 2017 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file.
+-->
+<html>
+  <head>
+    <style>
+    body {
+      overflow: hidden;
+      padding: 5px;
+      width: 310px;
+    }
+    input, textarea, select, button {
+      width : 300px;
+      margin: 0;
+      box-sizing: border-box;
+    }
+    label {
+      clear: both;
+    }
+    </style>
+    <script src="popup.js"></script>
+  </head>
+  <body>
+    <form>
+      <p>
+        <label>Min click-interval <span id="minIntervalValue"></span>:
+          <input type="range" id="minInterval" min="1000" max="60000">
+        </label>
+      </p>
+      <p>
+        <label> Max click-interval <span id="maxIntervalValue"></span>:
+          <input type="range" id="maxInterval" min="1000" max="60000">
+        </label>
+      </p>
+      <p>
+        <label>Link regexp:
+          <input type="input" id="pattern" >
+        </label>
+      </p>
+      <p>
+        <label>Enable:
+          <input type="checkbox" id="enabled" >
+        </label>
+      </p>
+    </form>
+  </body>
+</html>
diff --git a/src/third_party/v8/tools/link_clicker.extension/popup.js b/src/third_party/v8/tools/link_clicker.extension/popup.js
new file mode 100644
index 0000000..865a948
--- /dev/null
+++ b/src/third_party/v8/tools/link_clicker.extension/popup.js
@@ -0,0 +1,53 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function $(id) {
+  return document.querySelector(id);
+}
+
+// ===========================================================================
+document.addEventListener('DOMContentLoaded', function () {
+  installFormChangeHandler()
+});
+
+
+function installFormChangeHandler() {
+  initForm();
+  let inputs = document.getElementsByTagName("input");
+  for (let i = 0; i < inputs.length; i++){
+     inputs[i].onchange = onFormChange;
+  }
+}
+
+function initForm() {
+  chrome.runtime.sendMessage({type:'get'}, function(response) {
+    updateFromMessage(response);
+  });
+}
+// ===========================================================================
+
+function updateFromMessage(msg) {
+  $("#minInterval").value = msg.minInterval;
+  $("#maxInterval").value = msg.maxInterval;
+  $("#pattern").value = msg.pattern;
+  $("#enabled").checked = msg.enabled;
+  $("#minIntervalValue").innerText = msg.minInterval+"ms";
+  $("#maxIntervalValue").innerText = msg.maxInterval+"ms";
+}
+
+function onFormChange() {
+  let minInterval = $("#minInterval").value;
+  let maxInterval = $("#maxInterval").value;
+
+  let message = {
+    type: 'update',
+    minInterval: minInterval,
+    maxInterval: maxInterval,
+    pattern: $("#pattern").value,
+    enabled: $("#enabled").checked
+  }
+  chrome.runtime.sendMessage(message, function(response) {
+    updateFromMessage(response);
+  });
+}
diff --git a/src/third_party/v8/tools/linux-tick-processor b/src/third_party/v8/tools/linux-tick-processor
new file mode 100755
index 0000000..a2ae2b5
--- /dev/null
+++ b/src/third_party/v8/tools/linux-tick-processor
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+  if ! expr "X${arg}" : "^X-" > /dev/null; then
+    log_file=${arg}
+  fi
+done
+
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+  d8_public=`which d8`
+  if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x "$d8_exec" ]; then
+  D8_PATH=`pwd`/out/native
+  d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x "$d8_exec" ]; then
+  d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x "$d8_exec" ]; then
+  echo "d8 shell not found in $D8_PATH" >&2
+  echo "Please provide path to d8 as env var in D8_PATH" >&2
+  exit 1
+fi
+
+# nm spits out 'no symbols found' messages to stderr.
+cat $log_file | $d8_exec --enable-os-system \
+  --module $tools_path/tickprocessor-driver.mjs -- $@
diff --git a/src/third_party/v8/tools/ll_prof.py b/src/third_party/v8/tools/ll_prof.py
new file mode 100755
index 0000000..4499874
--- /dev/null
+++ b/src/third_party/v8/tools/ll_prof.py
@@ -0,0 +1,992 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import bisect
+import collections
+import ctypes
+import disasm
+import mmap
+import optparse
+import os
+import re
+import subprocess
+import sys
+import time
+
+
+USAGE="""usage: %prog [OPTION]...
+
+Analyses V8 and perf logs to produce profiles.
+
+Perf logs can be collected using a command like:
+  $ perf record -R -e cycles -c 10000 -f -i ./d8 bench.js --ll-prof
+  # -R: collect all data
+  # -e cycles: use cpu-cycles event (run "perf list" for details)
+  # -c 10000: write a sample after each 10000 events
+  # -f: force output file overwrite
+  # -i: limit profiling to our process and the kernel
+  # --ll-prof shell flag enables the right V8 logs
+This will produce a binary trace file (perf.data) that %prog can analyse.
+
+IMPORTANT:
+  The kernel has an internal maximum for events per second, it is 100K by
+  default. That's not enough for "-c 10000". Set it to some higher value:
+  $ echo 10000000 | sudo tee /proc/sys/kernel/perf_event_max_sample_rate
+  You can also make the warning about kernel address maps go away:
+  $ echo 0 | sudo tee /proc/sys/kernel/kptr_restrict
+
+We have a convenience script that handles all of the above for you:
+  $ tools/run-llprof.sh ./d8 bench.js
+
+Examples:
+  # Print flat profile with annotated disassembly for the 10 top
+  # symbols. Use default log names.
+  $ %prog --disasm-top=10
+
+  # Print flat profile with annotated disassembly for all used symbols.
+  # Use default log names and include kernel symbols into analysis.
+  $ %prog --disasm-all --kernel
+
+  # Print flat profile. Use custom log names.
+  $ %prog --log=foo.log --trace=foo.data
+"""
+
+
+JS_ORIGIN = "js"
+
+
+class Code(object):
+  """Code object."""
+
+  _id = 0
+  UNKNOWN = 0
+  V8INTERNAL = 1
+  FULL_CODEGEN = 2
+  OPTIMIZED = 3
+
+  def __init__(self, name, start_address, end_address, origin, origin_offset):
+    self.id = Code._id
+    Code._id += 1
+    self.name = name
+    self.other_names = None
+    self.start_address = start_address
+    self.end_address = end_address
+    self.origin = origin
+    self.origin_offset = origin_offset
+    self.self_ticks = 0
+    self.self_ticks_map = None
+    self.callee_ticks = None
+    if name.startswith("LazyCompile:*"):
+      self.codetype = Code.OPTIMIZED
+    elif name.startswith("LazyCompile:"):
+      self.codetype = Code.FULL_CODEGEN
+    elif name.startswith("v8::internal::"):
+      self.codetype = Code.V8INTERNAL
+    else:
+      self.codetype = Code.UNKNOWN
+
+  def AddName(self, name):
+    assert self.name != name
+    if self.other_names is None:
+      self.other_names = [name]
+      return
+    if not name in self.other_names:
+      self.other_names.append(name)
+
+  def FullName(self):
+    if self.other_names is None:
+      return self.name
+    self.other_names.sort()
+    return "%s (aka %s)" % (self.name, ", ".join(self.other_names))
+
+  def IsUsed(self):
+    return self.self_ticks > 0 or self.callee_ticks is not None
+
+  def Tick(self, pc):
+    self.self_ticks += 1
+    if self.self_ticks_map is None:
+      self.self_ticks_map = collections.defaultdict(lambda: 0)
+    offset = pc - self.start_address
+    self.self_ticks_map[offset] += 1
+
+  def CalleeTick(self, callee):
+    if self.callee_ticks is None:
+      self.callee_ticks = collections.defaultdict(lambda: 0)
+    self.callee_ticks[callee] += 1
+
+  def PrintAnnotated(self, arch, options):
+    if self.self_ticks_map is None:
+      ticks_map = []
+    else:
+      ticks_map = self.self_ticks_map.items()
+    # Convert the ticks map to offsets and counts arrays so that later
+    # we can do binary search in the offsets array.
+    ticks_map.sort(key=lambda t: t[0])
+    ticks_offsets = [t[0] for t in ticks_map]
+    ticks_counts = [t[1] for t in ticks_map]
+    # Get a list of disassembled lines and their addresses.
+    lines = self._GetDisasmLines(arch, options)
+    if len(lines) == 0:
+      return
+    # Print annotated lines.
+    address = lines[0][0]
+    total_count = 0
+    for i in range(len(lines)):
+      start_offset = lines[i][0] - address
+      if i == len(lines) - 1:
+        end_offset = self.end_address - self.start_address
+      else:
+        end_offset = lines[i + 1][0] - address
+      # Ticks (reported pc values) are not always precise, i.e. not
+      # necessarily point at instruction starts. So we have to search
+      # for ticks that touch the current instruction line.
+      j = bisect.bisect_left(ticks_offsets, end_offset)
+      count = 0
+      for offset, cnt in reversed(zip(ticks_offsets[:j], ticks_counts[:j])):
+        if offset < start_offset:
+          break
+        count += cnt
+      total_count += count
+      percent = 100.0 * count / self.self_ticks
+      offset = lines[i][0]
+      if percent >= 0.01:
+        # 5 spaces for tick count
+        # 1 space following
+        # 1 for '|'
+        # 1 space following
+        # 6 for the percentage number, incl. the '.'
+        # 1 for the '%' sign
+        # => 15
+        print("%5d | %6.2f%% %x(%d): %s" % (count, percent, offset, offset, lines[i][1]))
+      else:
+        print("%s %x(%d): %s" % (" " * 15, offset, offset, lines[i][1]))
+    print()
+    assert total_count == self.self_ticks, \
+        "Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
+
+  def __str__(self):
+    return "%s [0x%x, 0x%x) size: %d origin: %s" % (
+      self.name,
+      self.start_address,
+      self.end_address,
+      self.end_address - self.start_address,
+      self.origin)
+
+  def _GetDisasmLines(self, arch, options):
+    if self.origin == JS_ORIGIN:
+      inplace = False
+      filename = options.log + ".ll"
+    else:
+      inplace = True
+      filename = self.origin
+    return disasm.GetDisasmLines(filename,
+                                 self.origin_offset,
+                                 self.end_address - self.start_address,
+                                 arch,
+                                 inplace)
+
+
+class CodePage(object):
+  """Group of adjacent code objects."""
+
+  SHIFT = 20  # 1M pages
+  SIZE = (1 << SHIFT)
+  MASK = ~(SIZE - 1)
+
+  @staticmethod
+  def PageAddress(address):
+    return address & CodePage.MASK
+
+  @staticmethod
+  def PageId(address):
+    return address >> CodePage.SHIFT
+
+  @staticmethod
+  def PageAddressFromId(id):
+    return id << CodePage.SHIFT
+
+  def __init__(self, address):
+    self.address = address
+    self.code_objects = []
+
+  def Add(self, code):
+    self.code_objects.append(code)
+
+  def Remove(self, code):
+    self.code_objects.remove(code)
+
+  def Find(self, pc):
+    code_objects = self.code_objects
+    for i, code in enumerate(code_objects):
+      if code.start_address <= pc < code.end_address:
+        code_objects[0], code_objects[i] = code, code_objects[0]
+        return code
+    return None
+
+  def __iter__(self):
+    return self.code_objects.__iter__()
+
+
+class CodeMap(object):
+  """Code object map."""
+
+  def __init__(self):
+    self.pages = {}
+    self.min_address = 1 << 64
+    self.max_address = -1
+
+  def Add(self, code, max_pages=-1):
+    page_id = CodePage.PageId(code.start_address)
+    limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
+    pages = 0
+    while page_id < limit_id:
+      if max_pages >= 0 and pages > max_pages:
+        print("Warning: page limit (%d) reached for %s [%s]" % (
+            max_pages, code.name, code.origin), file=sys.stderr)
+        break
+      if page_id in self.pages:
+        page = self.pages[page_id]
+      else:
+        page = CodePage(CodePage.PageAddressFromId(page_id))
+        self.pages[page_id] = page
+      page.Add(code)
+      page_id += 1
+      pages += 1
+    self.min_address = min(self.min_address, code.start_address)
+    self.max_address = max(self.max_address, code.end_address)
+
+  def Remove(self, code):
+    page_id = CodePage.PageId(code.start_address)
+    limit_id = CodePage.PageId(code.end_address + CodePage.SIZE - 1)
+    removed = False
+    while page_id < limit_id:
+      if page_id not in self.pages:
+        page_id += 1
+        continue
+      page = self.pages[page_id]
+      page.Remove(code)
+      removed = True
+      page_id += 1
+    return removed
+
+  def AllCode(self):
+    for page in self.pages.itervalues():
+      for code in page:
+        if CodePage.PageAddress(code.start_address) == page.address:
+          yield code
+
+  def UsedCode(self):
+    for code in self.AllCode():
+      if code.IsUsed():
+        yield code
+
+  def Print(self):
+    for code in self.AllCode():
+      print(code)
+
+  def Find(self, pc):
+    if pc < self.min_address or pc >= self.max_address:
+      return None
+    page_id = CodePage.PageId(pc)
+    if page_id not in self.pages:
+      return None
+    return self.pages[page_id].Find(pc)
+
+
+class CodeInfo(object):
+  """Generic info about generated code objects."""
+
+  def __init__(self, arch, header_size):
+    self.arch = arch
+    self.header_size = header_size
+
+
+class LogReader(object):
+  """V8 low-level (binary) log reader."""
+
+  _ARCH_TO_POINTER_TYPE_MAP = {
+    "ia32": ctypes.c_uint32,
+    "arm": ctypes.c_uint32,
+    "mips": ctypes.c_uint32,
+    "x64": ctypes.c_uint64,
+    "arm64": ctypes.c_uint64
+  }
+
+  _CODE_CREATE_TAG = "C"
+  _CODE_MOVE_TAG = "M"
+  _CODE_MOVING_GC_TAG = "G"
+
+  def __init__(self, log_name, code_map):
+    self.log_file = open(log_name, "r")
+    self.log = mmap.mmap(self.log_file.fileno(), 0, mmap.MAP_PRIVATE)
+    self.log_pos = 0
+    self.code_map = code_map
+
+    self.arch = self.log[:self.log.find("\0")]
+    self.log_pos += len(self.arch) + 1
+    assert self.arch in LogReader._ARCH_TO_POINTER_TYPE_MAP, \
+        "Unsupported architecture %s" % self.arch
+    pointer_type = LogReader._ARCH_TO_POINTER_TYPE_MAP[self.arch]
+
+    self.code_create_struct = LogReader._DefineStruct([
+        ("name_size", ctypes.c_int32),
+        ("code_address", pointer_type),
+        ("code_size", ctypes.c_int32)])
+
+    self.code_move_struct = LogReader._DefineStruct([
+        ("from_address", pointer_type),
+        ("to_address", pointer_type)])
+
+    self.code_delete_struct = LogReader._DefineStruct([
+        ("address", pointer_type)])
+
+  def ReadUpToGC(self):
+    while self.log_pos < self.log.size():
+      tag = self.log[self.log_pos]
+      self.log_pos += 1
+
+      if tag == LogReader._CODE_MOVING_GC_TAG:
+        return
+
+      if tag == LogReader._CODE_CREATE_TAG:
+        event = self.code_create_struct.from_buffer(self.log, self.log_pos)
+        self.log_pos += ctypes.sizeof(event)
+        start_address = event.code_address
+        end_address = start_address + event.code_size
+        name = self.log[self.log_pos:self.log_pos + event.name_size]
+        origin = JS_ORIGIN
+        self.log_pos += event.name_size
+        origin_offset = self.log_pos
+        self.log_pos += event.code_size
+        code = Code(name, start_address, end_address, origin, origin_offset)
+        conficting_code = self.code_map.Find(start_address)
+        if conficting_code:
+          if not (conficting_code.start_address == code.start_address and
+            conficting_code.end_address == code.end_address):
+            self.code_map.Remove(conficting_code)
+          else:
+            LogReader._HandleCodeConflict(conficting_code, code)
+            # TODO(vitalyr): this warning is too noisy because of our
+            # attempts to reconstruct code log from the snapshot.
+            # print >>sys.stderr, \
+            #     "Warning: Skipping duplicate code log entry %s" % code
+            continue
+        self.code_map.Add(code)
+        continue
+
+      if tag == LogReader._CODE_MOVE_TAG:
+        event = self.code_move_struct.from_buffer(self.log, self.log_pos)
+        self.log_pos += ctypes.sizeof(event)
+        old_start_address = event.from_address
+        new_start_address = event.to_address
+        if old_start_address == new_start_address:
+          # Skip useless code move entries.
+          continue
+        code = self.code_map.Find(old_start_address)
+        if not code:
+          print("Warning: Not found %x" % old_start_address, file=sys.stderr)
+          continue
+        assert code.start_address == old_start_address, \
+            "Inexact move address %x for %s" % (old_start_address, code)
+        self.code_map.Remove(code)
+        size = code.end_address - code.start_address
+        code.start_address = new_start_address
+        code.end_address = new_start_address + size
+        self.code_map.Add(code)
+        continue
+
+      assert False, "Unknown tag %s" % tag
+
+  def Dispose(self):
+    self.log.close()
+    self.log_file.close()
+
+  @staticmethod
+  def _DefineStruct(fields):
+    class Struct(ctypes.Structure):
+      _fields_ = fields
+    return Struct
+
+  @staticmethod
+  def _HandleCodeConflict(old_code, new_code):
+    assert (old_code.start_address == new_code.start_address and
+            old_code.end_address == new_code.end_address), \
+        "Conficting code log entries %s and %s" % (old_code, new_code)
+    if old_code.name == new_code.name:
+      return
+    # Code object may be shared by a few functions. Collect the full
+    # set of names.
+    old_code.AddName(new_code.name)
+
+
+class Descriptor(object):
+  """Descriptor of a structure in the binary trace log."""
+
+  CTYPE_MAP = {
+    "u16": ctypes.c_uint16,
+    "u32": ctypes.c_uint32,
+    "u64": ctypes.c_uint64
+  }
+
+  def __init__(self, fields):
+    class TraceItem(ctypes.Structure):
+      _fields_ = Descriptor.CtypesFields(fields)
+
+      def __str__(self):
+        return ", ".join("%s: %s" % (field, self.__getattribute__(field))
+                         for field, _ in TraceItem._fields_)
+
+    self.ctype = TraceItem
+
+  def Read(self, trace, offset):
+    return self.ctype.from_buffer(trace, offset)
+
+  @staticmethod
+  def CtypesFields(fields):
+    return [(field, Descriptor.CTYPE_MAP[format]) for (field, format) in fields]
+
+
+# Please see http://git.kernel.org/?p=linux/kernel/git/torvalds/linux-2.6.git;a=tree;f=tools/perf
+# for the gory details.
+
+
+# Reference: struct perf_file_header in kernel/tools/perf/util/header.h
+TRACE_HEADER_DESC = Descriptor([
+  ("magic", "u64"),
+  ("size", "u64"),
+  ("attr_size", "u64"),
+  ("attrs_offset", "u64"),
+  ("attrs_size", "u64"),
+  ("data_offset", "u64"),
+  ("data_size", "u64"),
+  ("event_types_offset", "u64"),
+  ("event_types_size", "u64")
+])
+
+
+# Reference: /usr/include/linux/perf_event.h
+PERF_EVENT_ATTR_DESC = Descriptor([
+  ("type", "u32"),
+  ("size", "u32"),
+  ("config", "u64"),
+  ("sample_period_or_freq", "u64"),
+  ("sample_type", "u64"),
+  ("read_format", "u64"),
+  ("flags", "u64"),
+  ("wakeup_events_or_watermark", "u32"),
+  ("bp_type", "u32"),
+  ("bp_addr", "u64"),
+  ("bp_len", "u64")
+])
+
+
+# Reference: /usr/include/linux/perf_event.h
+PERF_EVENT_HEADER_DESC = Descriptor([
+  ("type", "u32"),
+  ("misc", "u16"),
+  ("size", "u16")
+])
+
+
+# Reference: kernel/tools/perf/util/event.h
+PERF_MMAP_EVENT_BODY_DESC = Descriptor([
+  ("pid", "u32"),
+  ("tid", "u32"),
+  ("addr", "u64"),
+  ("len", "u64"),
+  ("pgoff", "u64")
+])
+
+# Reference: kernel/tools/perf/util/event.h
+PERF_MMAP2_EVENT_BODY_DESC = Descriptor([
+  ("pid", "u32"),
+  ("tid", "u32"),
+  ("addr", "u64"),
+  ("len", "u64"),
+  ("pgoff", "u64"),
+  ("maj", "u32"),
+  ("min", "u32"),
+  ("ino", "u64"),
+  ("ino_generation", "u64"),
+  ("prot", "u32"),
+  ("flags","u32")
+])
+
+# perf_event_attr.sample_type bits control the set of
+# perf_sample_event fields.
+PERF_SAMPLE_IP = 1 << 0
+PERF_SAMPLE_TID = 1 << 1
+PERF_SAMPLE_TIME = 1 << 2
+PERF_SAMPLE_ADDR = 1 << 3
+PERF_SAMPLE_READ = 1 << 4
+PERF_SAMPLE_CALLCHAIN = 1 << 5
+PERF_SAMPLE_ID = 1 << 6
+PERF_SAMPLE_CPU = 1 << 7
+PERF_SAMPLE_PERIOD = 1 << 8
+PERF_SAMPLE_STREAM_ID = 1 << 9
+PERF_SAMPLE_RAW = 1 << 10
+
+
+# Reference: /usr/include/perf_event.h, the comment for PERF_RECORD_SAMPLE.
+PERF_SAMPLE_EVENT_BODY_FIELDS = [
+  ("ip", "u64", PERF_SAMPLE_IP),
+  ("pid", "u32", PERF_SAMPLE_TID),
+  ("tid", "u32", PERF_SAMPLE_TID),
+  ("time", "u64", PERF_SAMPLE_TIME),
+  ("addr", "u64", PERF_SAMPLE_ADDR),
+  ("id", "u64", PERF_SAMPLE_ID),
+  ("stream_id", "u64", PERF_SAMPLE_STREAM_ID),
+  ("cpu", "u32", PERF_SAMPLE_CPU),
+  ("res", "u32", PERF_SAMPLE_CPU),
+  ("period", "u64", PERF_SAMPLE_PERIOD),
+  # Don't want to handle read format that comes after the period and
+  # before the callchain and has variable size.
+  ("nr", "u64", PERF_SAMPLE_CALLCHAIN)
+  # Raw data follows the callchain and is ignored.
+]
+
+
+PERF_SAMPLE_EVENT_IP_FORMAT = "u64"
+
+
+PERF_RECORD_MMAP = 1
+PERF_RECORD_MMAP2 = 10
+PERF_RECORD_SAMPLE = 9
+
+
+class TraceReader(object):
+  """Perf (linux-2.6/tools/perf) trace file reader."""
+
+  _TRACE_HEADER_MAGIC = 4993446653023372624
+
+  def __init__(self, trace_name):
+    self.trace_file = open(trace_name, "r")
+    self.trace = mmap.mmap(self.trace_file.fileno(), 0, mmap.MAP_PRIVATE)
+    self.trace_header = TRACE_HEADER_DESC.Read(self.trace, 0)
+    if self.trace_header.magic != TraceReader._TRACE_HEADER_MAGIC:
+      print("Warning: unsupported trace header magic", file=sys.stderr)
+    self.offset = self.trace_header.data_offset
+    self.limit = self.trace_header.data_offset + self.trace_header.data_size
+    assert self.limit <= self.trace.size(), \
+        "Trace data limit exceeds trace file size"
+    self.header_size = ctypes.sizeof(PERF_EVENT_HEADER_DESC.ctype)
+    assert self.trace_header.attrs_size != 0, \
+        "No perf event attributes found in the trace"
+    perf_event_attr = PERF_EVENT_ATTR_DESC.Read(self.trace,
+                                                self.trace_header.attrs_offset)
+    self.sample_event_body_desc = self._SampleEventBodyDesc(
+        perf_event_attr.sample_type)
+    self.callchain_supported = \
+        (perf_event_attr.sample_type & PERF_SAMPLE_CALLCHAIN) != 0
+    if self.callchain_supported:
+      self.ip_struct = Descriptor.CTYPE_MAP[PERF_SAMPLE_EVENT_IP_FORMAT]
+      self.ip_size = ctypes.sizeof(self.ip_struct)
+
+  def ReadEventHeader(self):
+    if self.offset >= self.limit:
+      return None, 0
+    offset = self.offset
+    header = PERF_EVENT_HEADER_DESC.Read(self.trace, self.offset)
+    self.offset += header.size
+    return header, offset
+
+  def ReadMmap(self, header, offset):
+    mmap_info = PERF_MMAP_EVENT_BODY_DESC.Read(self.trace,
+                                               offset + self.header_size)
+    # Read null-terminated filename.
+    filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
+                          offset + header.size]
+    mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
+    return mmap_info
+
+  def ReadMmap2(self, header, offset):
+    mmap_info = PERF_MMAP2_EVENT_BODY_DESC.Read(self.trace,
+                                                offset + self.header_size)
+    # Read null-terminated filename.
+    filename = self.trace[offset + self.header_size + ctypes.sizeof(mmap_info):
+                          offset + header.size]
+    mmap_info.filename = HOST_ROOT + filename[:filename.find(chr(0))]
+    return mmap_info
+
+  def ReadSample(self, header, offset):
+    sample = self.sample_event_body_desc.Read(self.trace,
+                                              offset + self.header_size)
+    if not self.callchain_supported:
+      return sample
+    sample.ips = []
+    offset += self.header_size + ctypes.sizeof(sample)
+    for _ in range(sample.nr):
+      sample.ips.append(
+        self.ip_struct.from_buffer(self.trace, offset).value)
+      offset += self.ip_size
+    return sample
+
+  def Dispose(self):
+    self.trace.close()
+    self.trace_file.close()
+
+  def _SampleEventBodyDesc(self, sample_type):
+    assert (sample_type & PERF_SAMPLE_READ) == 0, \
+           "Can't hande read format in samples"
+    fields = [(field, format)
+              for (field, format, bit) in PERF_SAMPLE_EVENT_BODY_FIELDS
+              if (bit & sample_type) != 0]
+    return Descriptor(fields)
+
+
+OBJDUMP_SECTION_HEADER_RE = re.compile(
+  r"^\s*\d+\s(\.\S+)\s+[a-f0-9]")
+OBJDUMP_SYMBOL_LINE_RE = re.compile(
+  r"^([a-f0-9]+)\s(.{7})\s(\S+)\s+([a-f0-9]+)\s+(?:\.hidden\s+)?(.*)$")
+OBJDUMP_DYNAMIC_SYMBOLS_START_RE = re.compile(
+  r"^DYNAMIC SYMBOL TABLE")
+OBJDUMP_SKIP_RE = re.compile(
+  r"^.*ld\.so\.cache$")
+KERNEL_ALLSYMS_FILE = "/proc/kallsyms"
+PERF_KERNEL_ALLSYMS_RE = re.compile(
+  r".*kallsyms.*")
+KERNEL_ALLSYMS_LINE_RE = re.compile(
+  r"^([a-f0-9]+)\s(?:t|T)\s(\S+)$")
+
+
+class LibraryRepo(object):
+  def __init__(self):
+    self.infos = []
+    self.names = set()
+    self.ticks = {}
+
+
+  def HasDynamicSymbols(self, filename):
+    if filename.endswith(".ko"): return False
+    process = subprocess.Popen(
+      "%s -h %s" % (OBJDUMP_BIN, filename),
+      shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    pipe = process.stdout
+    try:
+      for line in pipe:
+        match = OBJDUMP_SECTION_HEADER_RE.match(line)
+        if match and match.group(1) == 'dynsym': return True
+    finally:
+      pipe.close()
+    assert process.wait() == 0, "Failed to objdump -h %s" % filename
+    return False
+
+
+  def Load(self, mmap_info, code_map, options):
+    # Skip kernel mmaps when requested using the fact that their tid
+    # is 0.
+    if mmap_info.tid == 0 and not options.kernel:
+      return True
+    if OBJDUMP_SKIP_RE.match(mmap_info.filename):
+      return True
+    if PERF_KERNEL_ALLSYMS_RE.match(mmap_info.filename):
+      return self._LoadKernelSymbols(code_map)
+    self.infos.append(mmap_info)
+    mmap_info.ticks = 0
+    mmap_info.unique_name = self._UniqueMmapName(mmap_info)
+    if not os.path.exists(mmap_info.filename):
+      return True
+    # Request section headers (-h), symbols (-t), and dynamic symbols
+    # (-T) from objdump.
+    # Unfortunately, section headers span two lines, so we have to
+    # keep the just seen section name (from the first line in each
+    # section header) in the after_section variable.
+    if self.HasDynamicSymbols(mmap_info.filename):
+      dynamic_symbols = "-T"
+    else:
+      dynamic_symbols = ""
+    process = subprocess.Popen(
+      "%s -h -t %s -C %s" % (OBJDUMP_BIN, dynamic_symbols, mmap_info.filename),
+      shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+    pipe = process.stdout
+    after_section = None
+    code_sections = set()
+    reloc_sections = set()
+    dynamic = False
+    try:
+      for line in pipe:
+        if after_section:
+          if line.find("CODE") != -1:
+            code_sections.add(after_section)
+          if line.find("RELOC") != -1:
+            reloc_sections.add(after_section)
+          after_section = None
+          continue
+
+        match = OBJDUMP_SECTION_HEADER_RE.match(line)
+        if match:
+          after_section = match.group(1)
+          continue
+
+        if OBJDUMP_DYNAMIC_SYMBOLS_START_RE.match(line):
+          dynamic = True
+          continue
+
+        match = OBJDUMP_SYMBOL_LINE_RE.match(line)
+        if match:
+          start_address = int(match.group(1), 16)
+          origin_offset = start_address
+          flags = match.group(2)
+          section = match.group(3)
+          if section in code_sections:
+            if dynamic or section in reloc_sections:
+              start_address += mmap_info.addr
+            size = int(match.group(4), 16)
+            name = match.group(5)
+            origin = mmap_info.filename
+            code_map.Add(Code(name, start_address, start_address + size,
+                              origin, origin_offset))
+    finally:
+      pipe.close()
+    assert process.wait() == 0, "Failed to objdump %s" % mmap_info.filename
+
+  def Tick(self, pc):
+    for i, mmap_info in enumerate(self.infos):
+      if mmap_info.addr <= pc < (mmap_info.addr + mmap_info.len):
+        mmap_info.ticks += 1
+        self.infos[0], self.infos[i] = mmap_info, self.infos[0]
+        return True
+    return False
+
+  def _UniqueMmapName(self, mmap_info):
+    name = mmap_info.filename
+    index = 1
+    while name in self.names:
+      name = "%s-%d" % (mmap_info.filename, index)
+      index += 1
+    self.names.add(name)
+    return name
+
+  def _LoadKernelSymbols(self, code_map):
+    if not os.path.exists(KERNEL_ALLSYMS_FILE):
+      print("Warning: %s not found" % KERNEL_ALLSYMS_FILE, file=sys.stderr)
+      return False
+    kallsyms = open(KERNEL_ALLSYMS_FILE, "r")
+    code = None
+    for line in kallsyms:
+      match = KERNEL_ALLSYMS_LINE_RE.match(line)
+      if match:
+        start_address = int(match.group(1), 16)
+        end_address = start_address
+        name = match.group(2)
+        if code:
+          code.end_address = start_address
+          code_map.Add(code, 16)
+        code = Code(name, start_address, end_address, "kernel", 0)
+    return True
+
+
+def PrintReport(code_map, library_repo, arch, ticks, options):
+  print("Ticks per symbol:")
+  used_code = [code for code in code_map.UsedCode()]
+  used_code.sort(key=lambda x: x.self_ticks, reverse=True)
+  for i, code in enumerate(used_code):
+    code_ticks = code.self_ticks
+    print("%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
+                                    code.FullName(), code.origin))
+    if options.disasm_all or i < options.disasm_top:
+      code.PrintAnnotated(arch, options)
+  print()
+  print("Ticks per library:")
+  mmap_infos = [m for m in library_repo.infos if m.ticks > 0]
+  mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
+  for mmap_info in mmap_infos:
+    mmap_ticks = mmap_info.ticks
+    print("%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
+                               mmap_info.unique_name))
+
+
+def PrintDot(code_map, options):
+  print("digraph G {")
+  for code in code_map.UsedCode():
+    if code.self_ticks < 10:
+      continue
+    print("n%d [shape=box,label=\"%s\"];" % (code.id, code.name))
+    if code.callee_ticks:
+      for callee, ticks in code.callee_ticks.iteritems():
+        print("n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks))
+  print("}")
+
+
+if __name__ == "__main__":
+  parser = optparse.OptionParser(USAGE)
+  parser.add_option("--log",
+                    default="v8.log",
+                    help="V8 log file name [default: %default]")
+  parser.add_option("--trace",
+                    default="perf.data",
+                    help="perf trace file name [default: %default]")
+  parser.add_option("--kernel",
+                    default=False,
+                    action="store_true",
+                    help="process kernel entries [default: %default]")
+  parser.add_option("--disasm-top",
+                    default=0,
+                    type="int",
+                    help=("number of top symbols to disassemble and annotate "
+                          "[default: %default]"))
+  parser.add_option("--disasm-all",
+                    default=False,
+                    action="store_true",
+                    help=("disassemble and annotate all used symbols "
+                          "[default: %default]"))
+  parser.add_option("--dot",
+                    default=False,
+                    action="store_true",
+                    help="produce dot output (WIP) [default: %default]")
+  parser.add_option("--quiet", "-q",
+                    default=False,
+                    action="store_true",
+                    help="no auxiliary messages [default: %default]")
+  parser.add_option("--gc-fake-mmap",
+                    default="/tmp/__v8_gc__",
+                    help="gc fake mmap file [default: %default]")
+  parser.add_option("--objdump",
+                    default="/usr/bin/objdump",
+                    help="objdump tool to use [default: %default]")
+  parser.add_option("--host-root",
+                    default="",
+                    help="Path to the host root [default: %default]")
+  options, args = parser.parse_args()
+
+  if not options.quiet:
+    print("V8 log: %s, %s.ll" % (options.log, options.log))
+    print("Perf trace file: %s" % options.trace)
+
+  V8_GC_FAKE_MMAP = options.gc_fake_mmap
+  HOST_ROOT = options.host_root
+  if os.path.exists(options.objdump):
+    disasm.OBJDUMP_BIN = options.objdump
+    OBJDUMP_BIN = options.objdump
+  else:
+    print("Cannot find %s, falling back to default objdump" % options.objdump)
+
+  # Stats.
+  events = 0
+  ticks = 0
+  missed_ticks = 0
+  really_missed_ticks = 0
+  optimized_ticks = 0
+  generated_ticks = 0
+  v8_internal_ticks = 0
+  mmap_time = 0
+  sample_time = 0
+
+  # Initialize the log reader.
+  code_map = CodeMap()
+  log_reader = LogReader(log_name=options.log + ".ll",
+                         code_map=code_map)
+  if not options.quiet:
+    print("Generated code architecture: %s" % log_reader.arch)
+    print()
+    sys.stdout.flush()
+
+  # Process the code and trace logs.
+  library_repo = LibraryRepo()
+  log_reader.ReadUpToGC()
+  trace_reader = TraceReader(options.trace)
+  while True:
+    header, offset = trace_reader.ReadEventHeader()
+    if not header:
+      break
+    events += 1
+    if header.type == PERF_RECORD_MMAP:
+      start = time.time()
+      mmap_info = trace_reader.ReadMmap(header, offset)
+      if mmap_info.filename == HOST_ROOT + V8_GC_FAKE_MMAP:
+        log_reader.ReadUpToGC()
+      else:
+        library_repo.Load(mmap_info, code_map, options)
+      mmap_time += time.time() - start
+    elif header.type == PERF_RECORD_MMAP2:
+      start = time.time()
+      mmap_info = trace_reader.ReadMmap2(header, offset)
+      if mmap_info.filename == HOST_ROOT + V8_GC_FAKE_MMAP:
+        log_reader.ReadUpToGC()
+      else:
+        library_repo.Load(mmap_info, code_map, options)
+      mmap_time += time.time() - start
+    elif header.type == PERF_RECORD_SAMPLE:
+      ticks += 1
+      start = time.time()
+      sample = trace_reader.ReadSample(header, offset)
+      code = code_map.Find(sample.ip)
+      if code:
+        code.Tick(sample.ip)
+        if code.codetype == Code.OPTIMIZED:
+          optimized_ticks += 1
+        elif code.codetype == Code.FULL_CODEGEN:
+          generated_ticks += 1
+        elif code.codetype == Code.V8INTERNAL:
+          v8_internal_ticks += 1
+      else:
+        missed_ticks += 1
+      if not library_repo.Tick(sample.ip) and not code:
+        really_missed_ticks += 1
+      if trace_reader.callchain_supported:
+        for ip in sample.ips:
+          caller_code = code_map.Find(ip)
+          if caller_code:
+            if code:
+              caller_code.CalleeTick(code)
+            code = caller_code
+      sample_time += time.time() - start
+
+  if options.dot:
+    PrintDot(code_map, options)
+  else:
+    PrintReport(code_map, library_repo, log_reader.arch, ticks, options)
+
+    if not options.quiet:
+      def PrintTicks(number, total, description):
+        print("%10d %5.1f%% ticks in %s" %
+              (number, 100.0*number/total, description))
+      print()
+      print("Stats:")
+      print("%10d total trace events" % events)
+      print("%10d total ticks" % ticks)
+      print("%10d ticks not in symbols" % missed_ticks)
+      unaccounted = "unaccounted ticks"
+      if really_missed_ticks > 0:
+        unaccounted += " (probably in the kernel, try --kernel)"
+      PrintTicks(really_missed_ticks, ticks, unaccounted)
+      PrintTicks(optimized_ticks, ticks, "ticks in optimized code")
+      PrintTicks(generated_ticks, ticks, "ticks in other lazily compiled code")
+      PrintTicks(v8_internal_ticks, ticks, "ticks in v8::internal::*")
+      print("%10d total symbols" % len([c for c in code_map.AllCode()]))
+      print("%10d used symbols" % len([c for c in code_map.UsedCode()]))
+      print("%9.2fs library processing time" % mmap_time)
+      print("%9.2fs tick processing time" % sample_time)
+
+  log_reader.Dispose()
+  trace_reader.Dispose()
diff --git a/src/third_party/v8/tools/lldb_commands.py b/src/third_party/v8/tools/lldb_commands.py
new file mode 100644
index 0000000..dc96e57
--- /dev/null
+++ b/src/third_party/v8/tools/lldb_commands.py
@@ -0,0 +1,124 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Load this file by adding this to your ~/.lldbinit:
+# command script import <this_dir>/lldb_commands.py
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import lldb
+import re
+
+#####################
+# Helper functions. #
+#####################
+def current_thread(debugger):
+  return debugger.GetSelectedTarget().GetProcess().GetSelectedThread()
+
+def current_frame(debugger):
+  return current_thread(debugger).GetSelectedFrame()
+
+def no_arg_cmd(debugger, cmd):
+  evaluate_result = current_frame(debugger).EvaluateExpression(cmd)
+  # When a void function is called the return value type is 0x1001 which
+  # is specified in http://tiny.cc/bigskz. This does not indicate
+  # an error so we check for that value below.
+  kNoResult = 0x1001
+  error = evaluate_result.GetError()
+  if error.fail and error.value != kNoResult:
+      print("Failed to evaluate command {} :".format(cmd))
+      print(error.description)
+  else:
+    print("")
+
+def ptr_arg_cmd(debugger, name, param, cmd):
+  if not param:
+    print("'{}' requires an argument".format(name))
+    return
+  param = '(void*)({})'.format(param)
+  no_arg_cmd(debugger, cmd.format(param))
+
+#####################
+# lldb commands.    #
+#####################
+def job(debugger, param, *args):
+  """Print a v8 heap object"""
+  ptr_arg_cmd(debugger, 'job', param, "_v8_internal_Print_Object({})")
+
+def jlh(debugger, param, *args):
+  """Print v8::Local handle value"""
+  ptr_arg_cmd(debugger, 'jlh', param,
+              "_v8_internal_Print_Object(*(v8::internal::Object**)({}.val_))")
+
+def jco(debugger, param, *args):
+  """Print the code object at the given pc (default: current pc)"""
+  if not param:
+    param = str(current_frame(debugger).FindRegister("pc").value)
+  ptr_arg_cmd(debugger, 'jco', param, "_v8_internal_Print_Code({})")
+
+def jld(debugger, param, *args):
+  """Print a v8 LayoutDescriptor object"""
+  ptr_arg_cmd(debugger, 'jld', param,
+              "_v8_internal_Print_LayoutDescriptor({})")
+
+def jtt(debugger, param, *args):
+  """Print the transition tree of a v8 Map"""
+  ptr_arg_cmd(debugger, 'jtt', param, "_v8_internal_Print_TransitionTree({})")
+
+def jst(debugger, *args):
+  """Print the current JavaScript stack trace"""
+  no_arg_cmd(debugger, "_v8_internal_Print_StackTrace()")
+
+def jss(debugger, *args):
+  """Skip the jitted stack on x64 to where we entered JS last"""
+  frame = current_frame(debugger)
+  js_entry_sp = frame.EvaluateExpression(
+      "v8::internal::Isolate::Current()->thread_local_top()->js_entry_sp_;") \
+       .GetValue()
+  sizeof_void = frame.EvaluateExpression("sizeof(void*)").GetValue()
+  rbp = frame.FindRegister("rbp")
+  rsp = frame.FindRegister("rsp")
+  pc = frame.FindRegister("pc")
+  rbp = js_entry_sp
+  rsp = js_entry_sp + 2 *sizeof_void
+  pc.value = js_entry_sp + sizeof_void
+
+def bta(debugger, *args):
+  """Print stack trace with assertion scopes"""
+  func_name_re = re.compile("([^(<]+)(?:\(.+\))?")
+  assert_re = re.compile(
+      "^v8::internal::Per\w+AssertType::(\w+)_ASSERT, (false|true)>")
+  thread = current_thread(debugger)
+  for frame in thread:
+    functionSignature = frame.GetDisplayFunctionName()
+    if functionSignature is None:
+      continue
+    functionName = func_name_re.match(functionSignature)
+    line = frame.GetLineEntry().GetLine()
+    sourceFile = frame.GetLineEntry().GetFileSpec().GetFilename()
+    if line:
+      sourceFile = sourceFile + ":" + str(line)
+
+    if sourceFile is None:
+      sourceFile = ""
+    print("[%-2s] %-60s %-40s" % (frame.GetFrameID(),
+                                  functionName.group(1),
+                                  sourceFile))
+    match = assert_re.match(str(functionSignature))
+    if match:
+      if match.group(3) == "false":
+        prefix = "Disallow"
+        color = "\033[91m"
+      else:
+        prefix = "Allow"
+        color = "\033[92m"
+      print("%s -> %s %s (%s)\033[0m" % (
+          color, prefix, match.group(2), match.group(1)))
+
+def __lldb_init_module(debugger, dict):
+  debugger.HandleCommand('settings set target.x86-disassembly-flavor intel')
+  for cmd in ('job', 'jlh', 'jco', 'jld', 'jtt', 'jst', 'jss', 'bta'):
+    debugger.HandleCommand(
+      'command script add -f lldb_commands.{} {}'.format(cmd, cmd))
diff --git a/src/third_party/v8/tools/locs.py b/src/third_party/v8/tools/locs.py
new file mode 100755
index 0000000..bd1609d
--- /dev/null
+++ b/src/third_party/v8/tools/locs.py
@@ -0,0 +1,458 @@
+#!/usr/bin/env python
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" locs.py - Count lines of code before and after preprocessor expansion
+  Consult --help for more information.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import json
+import multiprocessing
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import time
+from collections import defaultdict
+from concurrent.futures import ThreadPoolExecutor
+from pathlib import Path
+
+# for py2/py3 compatibility
+try:
+  FileNotFoundError
+except NameError:
+  FileNotFoundError = IOError
+
+ARGPARSE = argparse.ArgumentParser(
+    description=("A script that computes LoC for a build dir"),
+    epilog="""Examples:
+ Count with default settings for build in out/Default:
+   locs.py --build-dir out/Default
+ Count only a custom group of files settings for build in out/Default:
+   tools/locs.py --build-dir out/Default
+                 --group src-compiler '\.\./\.\./src/compiler'
+                 --only src-compiler
+ Report the 10 files with the worst expansion:
+   tools/locs.py --build-dir out/Default --worst 10
+ Report the 10 files with the worst expansion in src/compiler:
+   tools/locs.py --build-dir out/Default --worst 10
+                 --group src-compiler '\.\./\.\./src/compiler'
+                 --only src-compiler
+ Report the 10 largest files after preprocessing:
+   tools/locs.py --build-dir out/Default --largest 10
+ Report the 10 smallest input files:
+   tools/locs.py --build-dir out/Default --smallest 10""",
+    formatter_class=argparse.RawTextHelpFormatter
+)
+
+ARGPARSE.add_argument(
+    '--json',
+    action='store_true',
+    default=False,
+    help="output json instead of short summary")
+ARGPARSE.add_argument(
+    '--build-dir',
+    type=str,
+    help="Use specified build dir and generate necessary files",
+    required=True)
+ARGPARSE.add_argument(
+    '--echocmd',
+    action='store_true',
+    default=False,
+    help="output command used to compute LoC")
+ARGPARSE.add_argument(
+    '--only',
+    action='append',
+    default=[],
+    help="Restrict counting to report group (can be passed multiple times)")
+ARGPARSE.add_argument(
+    '--not',
+    action='append',
+    default=[],
+    help="Exclude specific group (can be passed multiple times)")
+ARGPARSE.add_argument(
+    '--list-groups',
+    action='store_true',
+    default=False,
+    help="List groups and associated regular expressions")
+ARGPARSE.add_argument(
+    '--group',
+    nargs=2,
+    action='append',
+    default=[],
+    help="Add a report group (can be passed multiple times)")
+ARGPARSE.add_argument(
+    '--largest',
+    type=int,
+    nargs='?',
+    default=0,
+    const=3,
+    help="Output the n largest files after preprocessing")
+ARGPARSE.add_argument(
+    '--worst',
+    type=int,
+    nargs='?',
+    default=0,
+    const=3,
+    help="Output the n files with worst expansion by preprocessing")
+ARGPARSE.add_argument(
+    '--smallest',
+    type=int,
+    nargs='?',
+    default=0,
+    const=3,
+    help="Output the n smallest input files")
+ARGPARSE.add_argument(
+    '--files',
+    type=int,
+    nargs='?',
+    default=0,
+    const=3,
+    help="Output results for each file separately")
+ARGPARSE.add_argument(
+    '--jobs',
+    type=int,
+    default=multiprocessing.cpu_count(),
+    help="Process specified number of files concurrently")
+
+ARGS = vars(ARGPARSE.parse_args())
+
+
+def MaxWidth(strings):
+  max_width = 0
+  for s in strings:
+    max_width = max(max_width, len(s))
+  return max_width
+
+
+def GenerateCompileCommandsAndBuild(build_dir, out):
+  if not os.path.isdir(build_dir):
+    print("Error: Specified build dir {} is not a directory.".format(
+        build_dir), file=sys.stderr)
+    exit(1)
+
+  autoninja = "autoninja -C {}".format(build_dir)
+  if subprocess.call(autoninja, shell=True, stdout=out) != 0:
+    print("Error: Building {} failed.".format(build_dir), file=sys.stderr)
+    exit(1)
+
+  compile_commands_file = "{}/compile_commands.json".format(build_dir)
+  print("Generating compile commands in {}.".format(
+      compile_commands_file), file=out)
+  ninja = "ninja -C {} -t compdb cxx cc > {}".format(
+      build_dir, compile_commands_file)
+  if subprocess.call(ninja, shell=True, stdout=out) != 0:
+    print("Error: Cound not generate {} for {}.".format(
+        compile_commands_file, build_dir), file=sys.stderr)
+    exit(1)
+
+  ninja_deps_file = "{}/ninja-deps.txt".format(build_dir)
+  print("Generating ninja dependencies in {}.".format(
+      ninja_deps_file), file=out)
+  ninja = "ninja -C {} -t deps > {}".format(
+      build_dir, ninja_deps_file)
+  if subprocess.call(ninja, shell=True, stdout=out) != 0:
+    print("Error: Cound not generate {} for {}.".format(
+        ninja_deps_file, build_dir), file=sys.stderr)
+    exit(1)
+
+  return compile_commands_file, ninja_deps_file
+
+
+def fmt_bytes(num_bytes):
+  if num_bytes > 1024*1024*1024:
+    return int(num_bytes / (1024*1024)), "MB"
+  elif num_bytes > 1024*1024:
+    return int(num_bytes / (1024)), "kB"
+  return int(num_bytes), " B"
+
+
+class CompilationData:
+  def __init__(self, loc, in_bytes, expanded, expanded_bytes):
+    self.loc = loc
+    self.in_bytes = in_bytes
+    self.expanded = expanded
+    self.expanded_bytes = expanded_bytes
+
+  def ratio(self):
+    return self.expanded / (self.loc+1)
+
+  def to_string(self):
+    exp_bytes, exp_unit = fmt_bytes(self.expanded_bytes)
+    in_bytes, in_unit = fmt_bytes(self.in_bytes)
+    return "{:>9,} LoC ({:>7,} {}) to {:>12,} LoC ({:>7,} {}) ({:>5.0f}x)".format(
+        self.loc, in_bytes, in_unit, self.expanded, exp_bytes, exp_unit, self.ratio())
+
+
+class File(CompilationData):
+  def __init__(self, file, target, loc, in_bytes, expanded, expanded_bytes):
+    super().__init__(loc, in_bytes, expanded, expanded_bytes)
+    self.file = file
+    self.target = target
+
+  def to_string(self):
+    return "{} {} {}".format(super().to_string(), self.file, self.target)
+
+
+class Group(CompilationData):
+  def __init__(self, name, regexp_string):
+    super().__init__(0, 0, 0, 0)
+    self.name = name
+    self.count = 0
+    self.regexp = re.compile(regexp_string)
+
+  def account(self, unit):
+    if (self.regexp.match(unit.file)):
+      self.loc += unit.loc
+      self.in_bytes += unit.in_bytes
+      self.expanded += unit.expanded
+      self.expanded_bytes += unit.expanded_bytes
+      self.count += 1
+
+  def to_string(self, name_width):
+    return "{:<{}} ({:>5} files): {}".format(
+        self.name, name_width, self.count, super().to_string())
+
+
+def SetupReportGroups():
+  default_report_groups = {"total": '.*',
+                           "src": '\\.\\./\\.\\./src',
+                           "test": '\\.\\./\\.\\./test',
+                           "third_party": '\\.\\./\\.\\./third_party',
+                           "gen": 'gen'}
+
+  report_groups = default_report_groups.copy()
+  report_groups.update(dict(ARGS['group']))
+
+  if ARGS['only']:
+    for only_arg in ARGS['only']:
+      if not only_arg in report_groups.keys():
+        print("Error: specified report group '{}' is not defined.".format(
+            ARGS['only']))
+        exit(1)
+      else:
+        report_groups = {
+            k: v for (k, v) in report_groups.items() if k in ARGS['only']}
+
+  if ARGS['not']:
+    report_groups = {
+        k: v for (k, v) in report_groups.items() if k not in ARGS['not']}
+
+  if ARGS['list_groups']:
+    print_cat_max_width = MaxWidth(list(report_groups.keys()) + ["Category"])
+    print("  {:<{}}  {}".format("Category",
+                                print_cat_max_width, "Regular expression"))
+    for cat, regexp_string in report_groups.items():
+      print("  {:<{}}: {}".format(
+          cat, print_cat_max_width, regexp_string))
+
+  report_groups = {k: Group(k, v) for (k, v) in report_groups.items()}
+
+  return report_groups
+
+
+class Results:
+  def __init__(self):
+    self.groups = SetupReportGroups()
+    self.units = {}
+    self.source_dependencies = {}
+    self.header_dependents = {}
+
+  def track(self, filename):
+    is_tracked = False
+    for group in self.groups.values():
+      if group.regexp.match(filename):
+        is_tracked = True
+    return is_tracked
+
+  def recordFile(self, filename, targetname, loc, in_bytes, expanded, expanded_bytes):
+    unit = File(filename, targetname, loc, in_bytes, expanded, expanded_bytes)
+    self.units[filename] = unit
+    for group in self.groups.values():
+      group.account(unit)
+
+  def maxGroupWidth(self):
+    return MaxWidth([v.name for v in self.groups.values()])
+
+  def printGroupResults(self, file):
+    for key in sorted(self.groups.keys()):
+      print(self.groups[key].to_string(self.maxGroupWidth()), file=file)
+
+  def printSorted(self, key, count, reverse, out):
+    for unit in sorted(list(self.units.values()), key=key, reverse=reverse)[:count]:
+      print(unit.to_string(), file=out)
+
+  def addHeaderDeps(self, source_dependencies, header_dependents):
+    self.source_dependencies = source_dependencies
+    self.header_dependents = header_dependents
+
+
+class LocsEncoder(json.JSONEncoder):
+  def default(self, o):
+    if isinstance(o, File):
+      return {"file": o.file, "target": o.target, "loc": o.loc, "in_bytes": o.in_bytes,
+              "expanded": o.expanded, "expanded_bytes": o.expanded_bytes}
+    if isinstance(o, Group):
+      return {"name": o.name, "loc": o.loc, "in_bytes": o.in_bytes,
+              "expanded": o.expanded, "expanded_bytes": o.expanded_bytes}
+    if isinstance(o, Results):
+      return {"groups": o.groups, "units": o.units,
+              "source_dependencies": o.source_dependencies,
+              "header_dependents": o.header_dependents}
+    return json.JSONEncoder.default(self, o)
+
+
+class StatusLine:
+  def __init__(self):
+    self.max_width = 0
+
+  def print(self, statusline, end="\r", file=sys.stdout):
+    self.max_width = max(self.max_width, len(statusline))
+    print("{0:<{1}}".format(statusline, self.max_width),
+          end=end, file=file, flush=True)
+
+
+class CommandSplitter:
+  def __init__(self):
+    self.cmd_pattern = re.compile(
+        "([^\\s]*\\s+)?(?P<clangcmd>[^\\s]*clang.*)"
+        " -c (?P<infile>.*) -o (?P<outfile>.*)")
+
+  def process(self, compilation_unit):
+    cmd = self.cmd_pattern.match(compilation_unit['command'])
+    outfilename = cmd.group('outfile')
+    infilename = cmd.group('infile')
+    infile = Path(compilation_unit['directory']).joinpath(infilename)
+    return (cmd.group('clangcmd'), infilename, infile, outfilename)
+
+
+def parse_ninja_deps(ninja_deps):
+  source_dependencies = {}
+  header_dependents = defaultdict(int)
+  current_target = None
+  for line in ninja_deps:
+    line = line.rstrip()
+    # Ignore empty lines
+    if not line:
+      current_target = None
+      continue
+    if line[0] == ' ':
+      # New dependency
+      if len(line) < 5 or line[0:4] != '    ' or line[5] == ' ':
+        sys.exit('Lines must have no indentation or exactly four ' +
+                 'spaces.')
+      dep = line[4:]
+      if not re.search(r"\.(h|hpp)$", dep):
+        continue
+      header_dependents[dep] += 1
+      continue
+    # New target
+    colon_pos = line.find(':')
+    if colon_pos < 0:
+      sys.exit('Unindented line must have a colon')
+    if current_target is not None:
+      sys.exit('Missing empty line before new target')
+    current_target = line[0:colon_pos]
+    match = re.search(r"#deps (\d+)", line)
+    deps_number = match.group(1)
+    source_dependencies[current_target] = int(deps_number)
+
+  return (source_dependencies, header_dependents)
+
+
+def Main():
+  out = sys.stdout
+  if ARGS['json']:
+    out = sys.stderr
+
+  compile_commands_file, ninja_deps_file = GenerateCompileCommandsAndBuild(
+      ARGS['build_dir'], out)
+
+  result = Results()
+  status = StatusLine()
+
+  try:
+    with open(compile_commands_file) as file:
+      compile_commands = json.load(file)
+    with open(ninja_deps_file) as file:
+      source_dependencies, header_dependents = parse_ninja_deps(file)
+      result.addHeaderDeps(source_dependencies, header_dependents)
+  except FileNotFoundError:
+    print("Error: Cannot read '{}'. Consult --help to get started.".format(
+        ninja_deps_file))
+    exit(1)
+
+  cmd_splitter = CommandSplitter()
+
+  def count_lines_of_unit(ikey):
+    i, key = ikey
+    if not result.track(key['file']):
+      return
+    message = "[{}/{}] Counting LoCs of {}".format(
+        i, len(compile_commands), key['file'])
+    status.print(message, file=out)
+    clangcmd, infilename, infile, outfilename = cmd_splitter.process(key)
+    if not infile.is_file():
+      return
+
+    clangcmd = clangcmd + " -E -P " + \
+        str(infile) + " -o /dev/stdout | sed '/^\\s*$/d' | wc -lc"
+    loccmd = ("cat {}  | sed '\\;^\\s*//;d' | sed '\\;^/\\*;d'"
+              " | sed '/^\\*/d' | sed '/^\\s*$/d' | wc -lc")
+    loccmd = loccmd.format(infile)
+    runcmd = " {} ; {}".format(clangcmd, loccmd)
+    if ARGS['echocmd']:
+      print(runcmd)
+    process = subprocess.Popen(
+        runcmd, shell=True, cwd=key['directory'], stdout=subprocess.PIPE)
+    p = {'process': process, 'infile': infilename, 'outfile': outfilename}
+    output, _ = p['process'].communicate()
+    expanded, expanded_bytes, loc, in_bytes = list(map(int, output.split()))
+    result.recordFile(p['infile'], p['outfile'], loc,
+                      in_bytes, expanded, expanded_bytes)
+
+  with tempfile.TemporaryDirectory(dir='/tmp/', prefix="locs.") as temp:
+    start = time.time()
+
+    with ThreadPoolExecutor(max_workers=ARGS['jobs']) as executor:
+      list(executor.map(count_lines_of_unit, enumerate(compile_commands)))
+
+    end = time.time()
+    if ARGS['json']:
+      print(json.dumps(result, ensure_ascii=False, cls=LocsEncoder))
+    status.print("Processed {:,} files in {:,.2f} sec.".format(
+        len(compile_commands), end-start), end="\n", file=out)
+    result.printGroupResults(file=out)
+
+    if ARGS['largest']:
+      print("Largest {} files after expansion:".format(ARGS['largest']))
+      result.printSorted(
+          lambda v: v.expanded, ARGS['largest'], reverse=True, out=out)
+
+    if ARGS['worst']:
+      print("Worst expansion ({} files):".format(ARGS['worst']))
+      result.printSorted(
+          lambda v: v.ratio(), ARGS['worst'], reverse=True, out=out)
+
+    if ARGS['smallest']:
+      print("Smallest {} input files:".format(ARGS['smallest']))
+      result.printSorted(
+          lambda v: v.loc, ARGS['smallest'], reverse=False, out=out)
+
+    if ARGS['files']:
+      print("List of input files:")
+      result.printSorted(
+          lambda v: v.file, ARGS['files'], reverse=False, out=out)
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(Main())
diff --git a/src/third_party/v8/tools/logreader.js b/src/third_party/v8/tools/logreader.js
new file mode 100644
index 0000000..ff0a71a
--- /dev/null
+++ b/src/third_party/v8/tools/logreader.js
@@ -0,0 +1,247 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/**
+ * @fileoverview Log Reader is used to process log file produced by V8.
+ */
+
+
+/**
+ * Base class for processing log files.
+ *
+ * @param {Array.<Object>} dispatchTable A table used for parsing and processing
+ *     log records.
+ * @param {boolean} timedRange Ignore ticks outside timed range.
+ * @param {boolean} pairwiseTimedRange Ignore ticks outside pairs of timer
+ *     markers.
+ * @constructor
+ */
+function LogReader(dispatchTable, timedRange, pairwiseTimedRange) {
+  /**
+   * @type {Array.<Object>}
+   */
+  this.dispatchTable_ = dispatchTable;
+
+  /**
+   * @type {boolean}
+   */
+  this.timedRange_ = timedRange;
+
+  /**
+   * @type {boolean}
+   */
+  this.pairwiseTimedRange_ = pairwiseTimedRange;
+  if (pairwiseTimedRange) {
+    this.timedRange_ = true;
+  }
+
+  /**
+   * Current line.
+   * @type {number}
+   */
+  this.lineNum_ = 0;
+
+  /**
+   * CSV lines parser.
+   * @type {CsvParser}
+   */
+  this.csvParser_ = new CsvParser();
+
+  /**
+   * Keeps track of whether we've seen a "current-time" tick yet.
+   * @type {boolean}
+   */
+  this.hasSeenTimerMarker_ = false;
+
+  /**
+   * List of log lines seen since last "current-time" tick.
+   * @type {Array.<String>}
+   */
+  this.logLinesSinceLastTimerMarker_ = [];
+};
+
+
+/**
+ * Used for printing error messages.
+ *
+ * @param {string} str Error message.
+ */
+LogReader.prototype.printError = function(str) {
+  // Do nothing.
+};
+
+
+/**
+ * Processes a portion of V8 profiler event log.
+ *
+ * @param {string} chunk A portion of log.
+ */
+LogReader.prototype.processLogChunk = function(chunk) {
+  this.processLog_(chunk.split('\n'));
+};
+
+
+/**
+ * Processes a line of V8 profiler event log.
+ *
+ * @param {string} line A line of log.
+ */
+LogReader.prototype.processLogLine = function(line) {
+  if (!this.timedRange_) {
+    this.processLogLine_(line);
+    return;
+  }
+  if (line.startsWith("current-time")) {
+    if (this.hasSeenTimerMarker_) {
+      this.processLog_(this.logLinesSinceLastTimerMarker_);
+      this.logLinesSinceLastTimerMarker_ = [];
+      // In pairwise mode, a "current-time" line ends the timed range.
+      if (this.pairwiseTimedRange_) {
+        this.hasSeenTimerMarker_ = false;
+      }
+    } else {
+      this.hasSeenTimerMarker_ = true;
+    }
+  } else {
+    if (this.hasSeenTimerMarker_) {
+      this.logLinesSinceLastTimerMarker_.push(line);
+    } else if (!line.startsWith("tick")) {
+      this.processLogLine_(line);
+    }
+  }
+};
+
+
+/**
+ * Processes stack record.
+ *
+ * @param {number} pc Program counter.
+ * @param {number} func JS Function.
+ * @param {Array.<string>} stack String representation of a stack.
+ * @return {Array.<number>} Processed stack.
+ */
+LogReader.prototype.processStack = function(pc, func, stack) {
+  var fullStack = func ? [pc, func] : [pc];
+  var prevFrame = pc;
+  for (var i = 0, n = stack.length; i < n; ++i) {
+    var frame = stack[i];
+    var firstChar = frame.charAt(0);
+    if (firstChar == '+' || firstChar == '-') {
+      // An offset from the previous frame.
+      prevFrame += parseInt(frame, 16);
+      fullStack.push(prevFrame);
+    // Filter out possible 'overflow' string.
+    } else if (firstChar != 'o') {
+      fullStack.push(parseInt(frame, 16));
+    } else {
+      this.printError("dropping: " + frame);
+    }
+  }
+  return fullStack;
+};
+
+
+/**
+ * Returns whether a particular dispatch must be skipped.
+ *
+ * @param {!Object} dispatch Dispatch record.
+ * @return {boolean} True if dispatch must be skipped.
+ */
+LogReader.prototype.skipDispatch = function(dispatch) {
+  return false;
+};
+
+// Parses dummy variable for readability;
+const parseString = 'parse-string';
+const parseVarArgs = 'parse-var-args';
+
+/**
+ * Does a dispatch of a log record.
+ *
+ * @param {Array.<string>} fields Log record.
+ * @private
+ */
+LogReader.prototype.dispatchLogRow_ = function(fields) {
+  // Obtain the dispatch.
+  var command = fields[0];
+  var dispatch = this.dispatchTable_[command];
+  if (dispatch === undefined) return;
+  if (dispatch === null || this.skipDispatch(dispatch)) {
+    return;
+  }
+
+  // Parse fields.
+  var parsedFields = [];
+  for (var i = 0; i < dispatch.parsers.length; ++i) {
+    var parser = dispatch.parsers[i];
+    if (parser === parseString) {
+      parsedFields.push(fields[1 + i]);
+    } else if (typeof parser == 'function') {
+      parsedFields.push(parser(fields[1 + i]));
+    } else if (parser === parseVarArgs) {
+      // var-args
+      parsedFields.push(fields.slice(1 + i));
+      break;
+    } else {
+      throw new Error("Invalid log field parser: " + parser);
+    }
+  }
+
+  // Run the processor.
+  dispatch.processor.apply(this, parsedFields);
+};
+
+
+/**
+ * Processes log lines.
+ *
+ * @param {Array.<string>} lines Log lines.
+ * @private
+ */
+LogReader.prototype.processLog_ = function(lines) {
+  for (var i = 0, n = lines.length; i < n; ++i) {
+    this.processLogLine_(lines[i]);
+  }
+}
+
+/**
+ * Processes a single log line.
+ *
+ * @param {String} a log line
+ * @private
+ */
+LogReader.prototype.processLogLine_ = function(line) {
+  if (line.length > 0) {
+    try {
+      var fields = this.csvParser_.parseLine(line);
+      this.dispatchLogRow_(fields);
+    } catch (e) {
+      this.printError('line ' + (this.lineNum_ + 1) + ': ' + (e.message || e) + '\n' + e.stack);
+    }
+  }
+  this.lineNum_++;
+};
diff --git a/src/third_party/v8/tools/logreader.mjs b/src/third_party/v8/tools/logreader.mjs
new file mode 100644
index 0000000..1bd9a4b
--- /dev/null
+++ b/src/third_party/v8/tools/logreader.mjs
@@ -0,0 +1,246 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/**
+ * @fileoverview Log Reader is used to process log file produced by V8.
+ */
+ 
+ import { CsvParser } from "./csvparser.mjs";
+
+/**
+ * Base class for processing log files.
+ *
+ * @param {Array.<Object>} dispatchTable A table used for parsing and processing
+ *     log records.
+ * @param {boolean} timedRange Ignore ticks outside timed range.
+ * @param {boolean} pairwiseTimedRange Ignore ticks outside pairs of timer
+ *     markers.
+ * @constructor
+ */
+export function LogReader(dispatchTable, timedRange, pairwiseTimedRange) {
+  /**
+   * @type {Array.<Object>}
+   */
+  this.dispatchTable_ = dispatchTable;
+
+  /**
+   * @type {boolean}
+   */
+  this.timedRange_ = timedRange;
+
+  /**
+   * @type {boolean}
+   */
+  this.pairwiseTimedRange_ = pairwiseTimedRange;
+  if (pairwiseTimedRange) {
+    this.timedRange_ = true;
+  }
+
+  /**
+   * Current line.
+   * @type {number}
+   */
+  this.lineNum_ = 0;
+
+  /**
+   * CSV lines parser.
+   * @type {CsvParser}
+   */
+  this.csvParser_ = new CsvParser();
+
+  /**
+   * Keeps track of whether we've seen a "current-time" tick yet.
+   * @type {boolean}
+   */
+  this.hasSeenTimerMarker_ = false;
+
+  /**
+   * List of log lines seen since last "current-time" tick.
+   * @type {Array.<String>}
+   */
+  this.logLinesSinceLastTimerMarker_ = [];
+};
+
+
+/**
+ * Used for printing error messages.
+ *
+ * @param {string} str Error message.
+ */
+LogReader.prototype.printError = function(str) {
+  // Do nothing.
+};
+
+
+/**
+ * Processes a portion of V8 profiler event log.
+ *
+ * @param {string} chunk A portion of log.
+ */
+LogReader.prototype.processLogChunk = function(chunk) {
+  this.processLog_(chunk.split('\n'));
+};
+
+
+/**
+ * Processes a line of V8 profiler event log.
+ *
+ * @param {string} line A line of log.
+ */
+LogReader.prototype.processLogLine = function(line) {
+  if (!this.timedRange_) {
+    this.processLogLine_(line);
+    return;
+  }
+  if (line.startsWith("current-time")) {
+    if (this.hasSeenTimerMarker_) {
+      this.processLog_(this.logLinesSinceLastTimerMarker_);
+      this.logLinesSinceLastTimerMarker_ = [];
+      // In pairwise mode, a "current-time" line ends the timed range.
+      if (this.pairwiseTimedRange_) {
+        this.hasSeenTimerMarker_ = false;
+      }
+    } else {
+      this.hasSeenTimerMarker_ = true;
+    }
+  } else {
+    if (this.hasSeenTimerMarker_) {
+      this.logLinesSinceLastTimerMarker_.push(line);
+    } else if (!line.startsWith("tick")) {
+      this.processLogLine_(line);
+    }
+  }
+};
+
+
+/**
+ * Processes stack record.
+ *
+ * @param {number} pc Program counter.
+ * @param {number} func JS Function.
+ * @param {Array.<string>} stack String representation of a stack.
+ * @return {Array.<number>} Processed stack.
+ */
+LogReader.prototype.processStack = function(pc, func, stack) {
+  const fullStack = func ? [pc, func] : [pc];
+  let prevFrame = pc;
+  for (let i = 0, n = stack.length; i < n; ++i) {
+    const frame = stack[i];
+    const firstChar = frame.charAt(0);
+    if (firstChar == '+' || firstChar == '-') {
+      // An offset from the previous frame.
+      prevFrame += parseInt(frame, 16);
+      fullStack.push(prevFrame);
+    // Filter out possible 'overflow' string.
+    } else if (firstChar != 'o') {
+      fullStack.push(parseInt(frame, 16));
+    } else {
+      this.printError(`dropping: ${frame}`);
+    }
+  }
+  return fullStack;
+};
+
+
+/**
+ * Returns whether a particular dispatch must be skipped.
+ *
+ * @param {!Object} dispatch Dispatch record.
+ * @return {boolean} True if dispatch must be skipped.
+ */
+LogReader.prototype.skipDispatch = dispatch => false;
+
+// Parses dummy variable for readability;
+export const parseString = 'parse-string';
+export const parseVarArgs = 'parse-var-args';
+
+/**
+ * Does a dispatch of a log record.
+ *
+ * @param {Array.<string>} fields Log record.
+ * @private
+ */
+LogReader.prototype.dispatchLogRow_ = function(fields) {
+  // Obtain the dispatch.
+  const command = fields[0];
+  const dispatch = this.dispatchTable_[command];
+  if (dispatch === undefined) return;
+  if (dispatch === null || this.skipDispatch(dispatch)) {
+    return;
+  }
+
+  // Parse fields.
+  const parsedFields = [];
+  for (let i = 0; i < dispatch.parsers.length; ++i) {
+    const parser = dispatch.parsers[i];
+    if (parser === parseString) {
+      parsedFields.push(fields[1 + i]);
+    } else if (typeof parser == 'function') {
+      parsedFields.push(parser(fields[1 + i]));
+    } else if (parser === parseVarArgs) {
+      // var-args
+      parsedFields.push(fields.slice(1 + i));
+      break;
+    } else {
+      throw new Error(`Invalid log field parser: ${parser}`);
+    }
+  }
+
+  // Run the processor.
+  dispatch.processor.apply(this, parsedFields);
+};
+
+
+/**
+ * Processes log lines.
+ *
+ * @param {Array.<string>} lines Log lines.
+ * @private
+ */
+LogReader.prototype.processLog_ = function(lines) {
+  for (let i = 0, n = lines.length; i < n; ++i) {
+    this.processLogLine_(lines[i]);
+  }
+}
+
+/**
+ * Processes a single log line.
+ *
+ * @param {String} a log line
+ * @private
+ */
+LogReader.prototype.processLogLine_ = function(line) {
+  if (line.length > 0) {
+    try {
+      const fields = this.csvParser_.parseLine(line);
+      this.dispatchLogRow_(fields);
+    } catch (e) {
+      this.printError(`line ${this.lineNum_ + 1}: ${e.message || e}\n${e.stack}`);
+    }
+  }
+  this.lineNum_++;
+};
diff --git a/src/third_party/v8/tools/mac-nm b/src/third_party/v8/tools/mac-nm
new file mode 100755
index 0000000..cc5f6c7
--- /dev/null
+++ b/src/third_party/v8/tools/mac-nm
@@ -0,0 +1,19 @@
+#!/bin/sh
+
+# This script is a wrapper for OS X nm(1) tool. nm(1) perform C++ function
+# names demangling, so we're piping its output to c++filt(1) tool which does it.
+# But c++filt(1) comes with XCode (as a part of GNU binutils), so it doesn't
+# guaranteed to exist on a system.
+#
+# An alternative approach is to perform demangling in tick processor, but
+# for GNU C++ ABI this is a complex process (see cp-demangle.c sources), and
+# can't be done partially, because term boundaries are plain text symbols, such
+# as 'N', 'E', so one can't just do a search through a function name, it really
+# needs to be parsed, which requires a lot of knowledge to be coded in.
+
+if [ "`which c++filt`" == "" ]; then
+  nm "$@"
+else
+  nm "$@" | sed -n "s/\([0-9a-fA-F]\{8,16\}\) [iItT] \(.*\)/\\1 \\2/p"\
+    | c++filt -p -i
+fi
diff --git a/src/third_party/v8/tools/mac-tick-processor b/src/third_party/v8/tools/mac-tick-processor
new file mode 100755
index 0000000..5fba622
--- /dev/null
+++ b/src/third_party/v8/tools/mac-tick-processor
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+# A wrapper script to call 'linux-tick-processor' with Mac-specific settings.
+
+tools_path=`cd $(dirname "$0");pwd`
+$tools_path/linux-tick-processor --mac --nm=$tools_path/mac-nm $@
diff --git a/src/third_party/v8/tools/mb/README.md b/src/third_party/v8/tools/mb/README.md
new file mode 100644
index 0000000..4e73a8e
--- /dev/null
+++ b/src/third_party/v8/tools/mb/README.md
@@ -0,0 +1,22 @@
+# MB - The Meta-Build wrapper
+
+MB is a simple wrapper intended to provide a uniform interface to either
+GYP or GN, such that users and bots can call one script and not need to
+worry about whether a given bot is meant to use GN or GYP.
+
+It supports two main functions:
+
+1. "gen" - the main `gyp_chromium` / `gn gen` invocation that generates the
+   Ninja files needed for the build.
+
+2. "analyze" - the step that takes a list of modified files and a list of
+   desired targets and reports which targets will need to be rebuilt.
+
+We also use MB as a forcing function to collect all of the different 
+build configurations that we actually support for Chromium builds into
+one place, in `//tools/mb/mb_config.pyl`.
+
+For more information, see:
+
+* [The User Guide](docs/user_guide.md)
+* [The Design Spec](docs/design_spec.md)
diff --git a/src/third_party/v8/tools/mb/docs/README.md b/src/third_party/v8/tools/mb/docs/README.md
new file mode 100644
index 0000000..f29007d
--- /dev/null
+++ b/src/third_party/v8/tools/mb/docs/README.md
@@ -0,0 +1,4 @@
+# The MB (Meta-Build wrapper) documentation
+
+* The [User Guide](user_guide.md)
+* The [Design Spec](design_spec.md)
diff --git a/src/third_party/v8/tools/mb/docs/design_spec.md b/src/third_party/v8/tools/mb/docs/design_spec.md
new file mode 100644
index 0000000..c119e65
--- /dev/null
+++ b/src/third_party/v8/tools/mb/docs/design_spec.md
@@ -0,0 +1,426 @@
+# The MB (Meta-Build wrapper) design spec
+
+[TOC]
+
+## Intro
+
+MB is intended to address two major aspects of the GYP -> GN transition
+for Chromium:
+
+1. "bot toggling" - make it so that we can easily flip a given bot
+   back and forth between GN and GYP.
+
+2. "bot configuration" - provide a single source of truth for all of
+   the different configurations (os/arch/`gyp_define` combinations) of
+   Chromium that are supported.
+
+MB must handle at least the `gen` and `analyze` steps on the bots, i.e.,
+we need to wrap both the `gyp_chromium` invocation to generate the
+Ninja files, and the `analyze` step that takes a list of modified files
+and a list of targets to build and returns which targets are affected by
+the files.
+
+For more information on how to actually use MB, see
+[the user guide](user_guide.md).
+
+## Design
+
+MB is intended to be as simple as possible, and to defer as much work as
+possible to GN or GYP. It should live as a very simple Python wrapper
+that offers little in the way of surprises.
+
+### Command line
+
+It is structured as a single binary that supports a list of subcommands:
+
+* `mb gen -c linux_rel_bot //out/Release`
+* `mb analyze -m tryserver.chromium.linux -b linux_rel /tmp/input.json /tmp/output.json`
+
+### Configurations
+
+`mb` will first look for a bot config file in a set of different locations
+(initially just in //ios/build/bots). Bot config files are JSON files that
+contain keys for 'GYP_DEFINES' (a list of strings that will be joined together
+with spaces and passed to GYP, or a dict that will be similarly converted),
+'gn_args' (a list of strings that will be joined together), and an
+'mb_type' field that says whether to use GN or GYP. Bot config files
+require the full list of settings to be given explicitly.
+
+If no matching bot config file is found, `mb` looks in the
+`//tools/mb/mb_config.pyl` config file to determine whether to use GYP or GN
+for a particular build directory, and what set of flags (`GYP_DEFINES` or `gn
+args`) to use.
+
+A config can either be specified directly (useful for testing) or by specifying
+the master name and builder name (useful on the bots so that they do not need
+to specify a config directly and can be hidden from the details).
+
+See the [user guide](user_guide.md#mb_config.pyl) for details.
+
+### Handling the analyze step
+
+The interface to `mb analyze` is described in the
+[user\_guide](user_guide.md#mb_analyze).
+
+The way analyze works can be subtle and complicated (see below).
+
+Since the interface basically mirrors the way the "analyze" step on the bots
+invokes `gyp_chromium` today, when the config is found to be a gyp config,
+the arguments are passed straight through.
+
+It implements the equivalent functionality in GN by calling `gn refs
+[list of files] --type=executable --all --as=output` and filtering the
+output to match the list of targets.
+
+## Analyze
+
+The goal of the `analyze` step is to speed up the cycle time of the try servers
+by only building and running the tests affected by the files in a patch, rather
+than everything that might be out of date. Doing this ends up being tricky.
+
+We start with the following requirements and observations:
+
+* In an ideal (un-resource-constrained) world, we would build and test
+  everything that a patch affected on every patch. This does not
+  necessarily mean that we would build 'all' on every patch (see below).
+
+* In the real world, however, we do not have an infinite number of machines,
+  and try jobs are not infinitely fast, so we need to balance the desire
+  to get maximum test coverage against the desire to have reasonable cycle
+  times, given the number of machines we have.
+
+* Also, since we run most try jobs against tip-of-tree Chromium, by
+  the time one job completes on the bot, new patches have probably landed,
+  rendering the build out of date.
+
+* This means that the next try job may have to do a build that is out of
+  date due to a combination of files affected by a given patch, and files
+  affected for unrelated reasons. We want to rebuild and test only the
+  targets affected by the patch, so that we don't blame or punish the
+  patch author for unrelated changes.
+
+So:
+
+1. We need a way to indicate which changed files we care about and which
+   we don't (the affected files of a patch).
+
+2. We need to know which tests we might potentially want to run, and how
+   those are mapped onto build targets. For some kinds of tests (like
+   GTest-based tests), the mapping is 1:1 - if you want to run base_unittests,
+   you need to build base_unittests. For others (like the telemetry and
+   layout tests), you might need to build several executables in order to
+   run the tests, and that mapping might best be captured by a *meta*
+   target (a GN group or a GYP 'none' target like `webkit_tests`) that
+   depends on the right list of files. Because the GN and GYP files know
+   nothing about test steps, we have to have some way of mapping back
+   and forth between test steps and build targets. That mapping
+   is *not* currently available to MB (or GN or GYP), and so we have to 
+   enough information to make it possible for the caller to do the mapping.
+
+3. We might also want to know when test targets are affected by data files
+   that aren't compiled (python scripts, or the layout tests themselves).
+   There's no good way to do this in GYP, but GN supports this.
+
+4. We also want to ensure that particular targets still compile even if they
+   are not actually tested; consider testing the installers themselves, or
+   targets that don't yet have good test coverage. We might want to use meta
+   targets for this purpose as well.
+
+5. However, for some meta targets, we don't necessarily want to rebuild the
+   meta target itself, perhaps just the dependencies of the meta target that
+   are affected by the patch. For example, if you have a meta target like
+   `blink_tests` that might depend on ten different test binaries. If a patch
+   only affects one of them (say `wtf_unittests`), you don't want to
+   build `blink_tests`, because that might actually also build the other nine
+   targets.  In other words, some meta targets are *prunable*.
+
+6. As noted above, in the ideal case we actually have enough resources and
+   things are fast enough that we can afford to build everything affected by a
+   patch, but listing every possible target explicitly would be painful. The
+   GYP and GN Ninja generators provide an 'all' target that captures (nearly,
+   see [crbug.com/503241](crbug.com/503241)) everything, but unfortunately
+   neither GN nor GYP actually represents 'all' as a meta target in the build
+   graph, so we will need to write code to handle that specially.
+
+7. In some cases, we will not be able to correctly analyze the build graph to
+   determine the impact of a patch, and need to bail out (e.g,. if you change a
+   build file itself, it may not be easy to tell how that affects the graph).
+   In that case we should simply build and run everything.
+
+The interaction between 2) and 5) means that we need to treat meta targets
+two different ways, and so we need to know which targets should be
+pruned in the sense of 5) and which targets should be returned unchanged
+so that we can map them back to the appropriate tests.
+
+So, we need three things as input:
+
+* `files`: the list of files in the patch
+* `test_targets`: the list of ninja targets which, if affected by a patch,
+  should be reported back so that we can map them back to the appropriate
+  tests to run. Any meta targets in this list should *not* be pruned.
+* `additional_compile_targets`: the list of ninja targets we wish to compile
+  *in addition to* the list in `test_targets`. Any meta targets
+  present in this list should be pruned (we don't need to return the
+  meta targets because they aren't mapped back to tests, and we don't want
+  to build them because we might build too much).
+
+We can then return two lists as output:
+
+* `compile_targets`, which is a list of pruned targets to be
+  passed to Ninja to build. It is acceptable to replace a list of
+  pruned targets by a meta target if it turns out that all of the
+  dependencies of the target are affected by the patch (i.e.,
+  all ten binaries that blink_tests depends on), but doing so is
+  not required.
+* `test_targets`, which is a list of unpruned targets to be mapped
+  back to determine which tests to run.
+
+There may be substantial overlap between the two lists, but there is
+no guarantee that one is a subset of the other and the two cannot be
+used interchangeably or merged together without losing information and
+causing the wrong thing to happen.
+
+The implementation is responsible for recognizing 'all' as a magic string
+and mapping it onto the list of all root nodes in the build graph.
+
+There may be files listed in the input that don't actually exist in the build
+graph: this could be either the result of an error (the file should be in the
+build graph, but isn't), or perfectly fine (the file doesn't affect the build
+graph at all). We can't tell these two apart, so we should ignore missing
+files.
+
+There may be targets listed in the input that don't exist in the build
+graph; unlike missing files, this can only indicate a configuration error,
+and so we should return which targets are missing so the caller can
+treat this as an error, if so desired.
+
+Any of the three inputs may be an empty list:
+
+* It normally doesn't make sense to call analyze at all if no files
+  were modified, but in rare cases we can hit a race where we try to
+  test a patch after it has already been committed, in which case
+  the list of modified files is empty. We should return 'no dependency'
+  in that case.
+
+* Passing an empty list for one or the other of test_targets and
+  additional_compile_targets is perfectly sensible: in the former case,
+  it can indicate that you don't want to run any tests, and in the latter,
+  it can indicate that you don't want to do build anything else in
+  addition to the test targets.
+
+* It doesn't make sense to call analyze if you don't want to compile
+  anything at all, so passing [] for both test_targets and 
+  additional_compile_targets should probably return an error.
+
+In the output case, an empty list indicates that there was nothing to
+build, or that there were no affected test targets as appropriate.
+
+Note that passing no arguments to Ninja is equivalent to passing
+`all` to Ninja (at least given how GN and GYP work); however, we
+don't want to take advantage of this in most cases because we don't
+actually want to build every out of date target, only the targets
+potentially affected by the files. One could try to indicate
+to analyze that we wanted to use no arguments instead of an empty
+list, but using the existing fields for this seems fragile and/or
+confusing, and adding a new field for this seems unwarranted at this time.
+
+There is an "error" field in case something goes wrong (like the
+empty file list case, above, or an internal error in MB/GYP/GN). The
+analyze code should also return an error code to the shell if appropriate
+to indicate that the command failed.
+
+In the case where build files themselves are modified and analyze may
+not be able to determine a correct answer (point 7 above, where we return
+"Found dependency (all)"), we should also return the `test_targets` unmodified
+and return the union of `test_targets` and `additional_compile_targets` for
+`compile_targets`, to avoid confusion.
+
+### Examples
+
+Continuing the example given above, suppose we have the following build
+graph:
+
+* `blink_tests` is a meta target that depends on `webkit_unit_tests`,
+  `wtf_unittests`, and `webkit_tests` and represents all of the targets
+  needed to fully test Blink. Each of those is a separate test step.
+* `webkit_tests` is also a meta target; it depends on `content_shell`
+  and `image_diff`.
+* `base_unittests` is a separate test binary.
+* `wtf_unittests` depends on `Assertions.cpp` and `AssertionsTest.cpp`.
+* `webkit_unit_tests` depends on `WebNode.cpp` and `WebNodeTest.cpp`.
+* `content_shell` depends on `WebNode.cpp` and `Assertions.cpp`.
+* `base_unittests` depends on `logging.cc` and `logging_unittest.cc`.
+
+#### Example 1
+
+We wish to run 'wtf_unittests' and 'webkit_tests' on a bot, but not
+compile any additional targets.
+
+If a patch touches WebNode.cpp, then analyze gets as input:
+
+    {
+      "files": ["WebNode.cpp"],
+      "test_targets": ["wtf_unittests", "webkit_tests"],
+      "additional_compile_targets": []
+    }
+
+and should return as output:
+
+    {
+      "status": "Found dependency",
+      "compile_targets": ["webkit_unit_tests"],
+      "test_targets": ["webkit_tests"]
+    }
+
+Note how `webkit_tests` was pruned in compile_targets but not in test_targets.
+
+#### Example 2
+
+Using the same patch as Example 1, assume we wish to run only `wtf_unittests`,
+but additionally build everything needed to test Blink (`blink_tests`):
+
+We pass as input:
+
+    {
+      "files": ["WebNode.cpp"],
+      "test_targets": ["wtf_unittests"],
+      "additional_compile_targets": ["blink_tests"]
+    }
+
+And should get as output:
+
+    {
+      "status": "Found dependency",
+      "compile_targets": ["webkit_unit_tests"],
+      "test_targets": []
+    }
+
+Here `blink_tests` was pruned in the output compile_targets, and
+test_targets was empty, since blink_tests was not listed in the input
+test_targets.
+
+#### Example 3
+
+Build everything, but do not run any tests.
+
+Input:
+
+    {
+      "files": ["WebNode.cpp"],
+      "test_targets": [],
+      "additional_compile_targets": ["all"]
+    }
+
+Output:
+
+    {
+      "status": "Found dependency",
+      "compile_targets": ["webkit_unit_tests", "content_shell"],
+      "test_targets": []
+    }
+
+#### Example 4
+
+Same as Example 2, but a build file was modified instead of a source file.
+
+Input:
+
+    {
+      "files": ["BUILD.gn"],
+      "test_targets": ["wtf_unittests"],
+      "additional_compile_targets": ["blink_tests"]
+    }
+
+Output:
+
+    {
+      "status": "Found dependency (all)",
+      "compile_targets": ["webkit_unit_tests", "wtf_unittests"],
+      "test_targets": ["wtf_unittests"]
+    }
+
+test_targets was returned unchanged, compile_targets was pruned.
+
+## Random Requirements and Rationale
+
+This section is collection of semi-organized notes on why MB is the way
+it is ...
+
+### in-tree or out-of-tree
+
+The first issue is whether or not this should exist as a script in
+Chromium at all; an alternative would be to simply change the bot
+configurations to know whether to use GYP or GN, and which flags to
+pass.
+
+That would certainly work, but experience over the past two years
+suggests a few things:
+
+  * we should push as much logic as we can into the source repositories
+    so that they can be versioned and changed atomically with changes to
+    the product code; having to coordinate changes between src/ and
+    build/ is at best annoying and can lead to weird errors.
+  * the infra team would really like to move to providing
+    product-independent services (i.e., not have to do one thing for
+    Chromium, another for NaCl, a third for V8, etc.).
+  * we found that during the SVN->GIT migration the ability to flip bot
+    configurations between the two via changes to a file in chromium
+    was very useful.
+
+All of this suggests that the interface between bots and Chromium should
+be a simple one, hiding as much of the chromium logic as possible.
+
+### Why not have MB be smarter about de-duping flags?
+
+This just adds complexity to the MB implementation, and duplicates logic
+that GYP and GN already have to support anyway; in particular, it might
+require MB to know how to parse GYP and GN values. The belief is that
+if MB does *not* do this, it will lead to fewer surprises.
+
+It will not be hard to change this if need be.
+
+### Integration w/ gclient runhooks
+
+On the bots, we will disable `gyp_chromium` as part of runhooks (using
+`GYP_CHROMIUM_NO_ACTION=1`), so that mb shows up as a separate step.
+
+At the moment, we expect most developers to either continue to use
+`gyp_chromium` in runhooks or to disable at as above if they have no
+use for GYP at all. We may revisit how this works once we encourage more
+people to use GN full-time (i.e., we might take `gyp_chromium` out of
+runhooks altogether).
+
+### Config per flag set or config per (os/arch/flag set)?
+
+Currently, mb_config.pyl does not specify the host_os, target_os, host_cpu, or
+target_cpu values for every config that Chromium runs on, it only specifies
+them for when the values need to be explicitly set on the command line.
+
+Instead, we have one config per unique combination of flags only.
+
+In other words, rather than having `linux_rel_bot`, `win_rel_bot`, and
+`mac_rel_bot`, we just have `rel_bot`.
+
+This design allows us to determine easily all of the different sets
+of flags that we need to support, but *not* which flags are used on which
+host/target combinations.
+
+It may be that we should really track the latter. Doing so is just a
+config file change, however.
+
+### Non-goals
+
+* MB is not intended to replace direct invocation of GN or GYP for
+  complicated build scenarios (a.k.a. Chrome OS), where multiple flags need
+  to be set to user-defined paths for specific toolchains (e.g., where
+  Chrome OS needs to specify specific board types and compilers).
+
+* MB is not intended at this time to be something developers use frequently,
+  or to add a lot of features to. We hope to be able to get rid of it once
+  the GYP->GN migration is done, and so we should not add things for
+  developers that can't easily be added to GN itself.
+
+* MB is not intended to replace the
+  [CR tool](https://code.google.com/p/chromium/wiki/CRUserManual). Not
+  only is it only intended to replace the gyp\_chromium part of `'gclient
+  runhooks'`, it is not really meant as a developer-facing tool.
diff --git a/src/third_party/v8/tools/mb/docs/user_guide.md b/src/third_party/v8/tools/mb/docs/user_guide.md
new file mode 100644
index 0000000..75c195a
--- /dev/null
+++ b/src/third_party/v8/tools/mb/docs/user_guide.md
@@ -0,0 +1,312 @@
+# The MB (Meta-Build wrapper) user guide
+
+[TOC]
+
+## Introduction
+
+`mb` is a simple python wrapper around the GYP and GN meta-build tools to
+be used as part of the GYP->GN migration.
+
+It is intended to be used by bots to make it easier to manage the configuration
+each bot builds (i.e., the configurations can be changed from chromium
+commits), and to consolidate the list of all of the various configurations
+that Chromium is built in.
+
+Ideally this tool will no longer be needed after the migration is complete.
+
+For more discussion of MB, see also [the design spec](design_spec.md).
+
+## MB subcommands
+
+### `mb analyze`
+
+`mb analyze` is responsible for determining what targets are affected by
+a list of files (e.g., the list of files in a patch on a trybot):
+
+```
+mb analyze -c chromium_linux_rel //out/Release input.json output.json
+```
+
+Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags
+must be specified so that `mb` can figure out which config to use.
+
+The first positional argument must be a GN-style "source-absolute" path
+to the build directory.
+
+The second positional argument is a (normal) path to a JSON file containing
+a single object with the following fields:
+
+  * `files`: an array of the modified filenames to check (as paths relative to
+    the checkout root).
+  * `test_targets`: an array of (ninja) build targets that needed to run the
+    tests we wish to run. An empty array will be treated as if there are
+    no tests that will be run.
+  * `additional_compile_targets`: an array of (ninja) build targets that
+    reflect the stuff we might want to build *in addition to* the list
+    passed in `test_targets`. Targets in this list will be treated 
+    specially, in the following way: if a given target is a "meta"
+    (GN: group, GYP: none) target like 'blink_tests' or or even the
+    ninja-specific 'all' target, then only the *dependencies* of the
+    target that are affected by the modified files will be rebuilt
+    (not the target itself, which might also cause unaffected dependencies
+    to be rebuilt). An empty list will be treated as if there are no additional
+    targets to build.
+    Empty lists for both `test_targets` and `additional_compile_targets`
+    would cause no work to be done, so will result in an error.
+  * `targets`: a legacy field that resembled a union of `compile_targets`
+    and `test_targets`. Support for this field will be removed once the
+    bots have been updated to use compile_targets and test_targets instead.
+
+The third positional argument is a (normal) path to where mb will write
+the result, also as a JSON object. This object may contain the following
+fields:
+
+  * `error`: this should only be present if something failed.
+  * `compile_targets`: the list of ninja targets that should be passed
+    directly to the corresponding ninja / compile.py invocation. This
+    list may contain entries that are *not* listed in the input (see
+    the description of `additional_compile_targets` above and 
+    [design_spec.md](the design spec) for how this works).
+  * `invalid_targets`: a list of any targets that were passed in
+    either of the input lists that weren't actually found in the graph.
+  * `test_targets`: the subset of the input `test_targets` that are
+    potentially out of date, indicating that the matching test steps
+    should be re-run.
+  * `targets`: a legacy field that indicates the subset of the input `targets`
+    that depend on the input `files`.
+  * `build_targets`: a legacy field that indicates the minimal subset of
+    targets needed to build all of `targets` that were affected.
+  * `status`: a field containing one of three strings:
+
+    * `"Found dependency"` (build the `compile_targets`)
+    * `"No dependency"` (i.e., no build needed)
+    * `"Found dependency (all)"` (`test_targets` is returned as-is;
+       `compile_targets` should contain the union of `test_targets` and
+       `additional_compile_targets`. In this case the targets do not
+       need to be pruned).
+
+See [design_spec.md](the design spec) for more details and examples; the
+differences can be subtle.  We won't even go into how the `targets` and
+`build_targets` differ from each other or from `compile_targets` and
+`test_targets`.
+
+The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`,
+`-q/--quiet`, and `-v/--verbose` flags work as documented for `mb gen`.
+
+### `mb audit`
+
+`mb audit` is used to track the progress of the GYP->GN migration. You can
+use it to check a single master, or all the masters we care about. See
+`mb help audit` for more details (most people are not expected to care about
+this).
+
+### `mb gen`
+
+`mb gen` is responsible for generating the Ninja files by invoking either GYP
+or GN as appropriate. It takes arguments to specify a build config and
+a directory, then runs GYP or GN as appropriate:
+
+```
+% mb gen -m tryserver.chromium.linux -b linux_rel //out/Release
+% mb gen -c linux_rel_trybot //out/Release
+```
+
+Either the `-c/--config` flag or the `-m/--master` and `-b/--builder` flags
+must be specified so that `mb` can figure out which config to use. The
+`--phase` flag must also be used with builders that have multiple
+build/compile steps (and only with those builders).
+
+By default, MB will look for a bot config file under `//ios/build/bots` (see
+[design_spec.md](the design spec) for details of how the bot config files
+work). If no matching one is found, will then look in
+`//tools/mb/mb_config.pyl` to look up the config information, but you can
+specify a custom config file using the `-f/--config-file` flag.
+
+The path must be a GN-style "source-absolute" path (as above).
+
+You can pass the `-n/--dryrun` flag to mb gen to see what will happen without
+actually writing anything.
+
+You can pass the `-q/--quiet` flag to get mb to be silent unless there is an
+error, and pass the `-v/--verbose` flag to get mb to log all of the files
+that are read and written, and all the commands that are run.
+
+If the build config will use the Goma distributed-build system, you can pass
+the path to your Goma client in the `-g/--goma-dir` flag, and it will be
+incorporated into the appropriate flags for GYP or GN as needed.
+
+If gen ends up using GYP, the path must have a valid GYP configuration as the
+last component of the path (i.e., specify `//out/Release_x64`, not `//out`).
+The gyp script defaults to `//build/gyp_chromium`, but can be overridden with
+the `--gyp-script` flag, e.g. `--gyp-script=gypfiles/gyp_v8`.
+
+### `mb help`
+
+Produces help output on the other subcommands
+
+### `mb lookup`
+
+Prints what command will be run by `mb gen` (like `mb gen -n` but does
+not require you to specify a path).
+
+The `-b/--builder`, `-c/--config`, `-f/--config-file`, `-m/--master`,
+`--phase`, `-q/--quiet`, and `-v/--verbose` flags work as documented for
+`mb gen`.
+
+### `mb validate`
+
+Does internal checking to make sure the config file is syntactically
+valid and that all of the entries are used properly. It does not validate
+that the flags make sense, or that the builder names are legal or
+comprehensive, but it does complain about configs and mixins that aren't
+used.
+
+The `-f/--config-file` and `-q/--quiet` flags work as documented for
+`mb gen`.
+
+This is mostly useful as a presubmit check and for verifying changes to
+the config file.
+
+### `mb gerrit-buildbucket-config`
+
+Generates a gerrit buildbucket configuration file and prints it to
+stdout. This file contains the list of trybots shown in gerrit's UI.
+
+The master copy of the buildbucket.config file lives
+in a separate branch of the chromium repository. Run `mb
+gerrit-buildbucket-config > buildbucket.config.new && git fetch origin
+refs/meta/config:refs/remotes/origin/meta/config && git checkout
+-t -b meta_config origin/meta/config && mv buildbucket.config.new
+buildbucket.config` to update the file.
+
+Note that after committing, `git cl upload` will not work. Instead, use `git
+push origin HEAD:refs/for/refs/meta/config` to upload the CL for review.
+
+## Isolates and Swarming
+
+`mb gen` is also responsible for generating the `.isolate` and
+`.isolated.gen.json` files needed to run test executables through swarming
+in a GN build (in a GYP build, this is done as part of the compile step).
+
+If you wish to generate the isolate files, pass `mb gen` the
+`--swarming-targets-file` command line argument; that arg should be a path
+to a file containing a list of ninja build targets to compute the runtime
+dependencies for (on Windows, use the ninja target name, not the file, so
+`base_unittests`, not `base_unittests.exe`).
+
+MB will take this file, translate each build target to the matching GN
+label (e.g., `base_unittests` -> `//base:base_unittests`, write that list
+to a file called `runtime_deps` in the build directory, and pass that to
+`gn gen $BUILD ... --runtime-deps-list-file=$BUILD/runtime_deps`.
+
+Once GN has computed the lists of runtime dependencies, MB will then
+look up the command line for each target (currently this is hard-coded
+in [mb.py](https://code.google.com/p/chromium/codesearch?q=mb.py#chromium/src/tools/mb/mb.py&q=mb.py%20GetIsolateCommand&sq=package:chromium&type=cs)), and write out the
+matching `.isolate` and `.isolated.gen.json` files.
+
+## The `mb_config.pyl` config file
+
+The `mb_config.pyl` config file is intended to enumerate all of the
+supported build configurations for Chromium. Generally speaking, you
+should never need to (or want to) build a configuration that isn't
+listed here, and so by using the configs in this file you can avoid
+having to juggle long lists of GYP_DEFINES and gn args by hand.
+
+`mb_config.pyl` is structured as a file containing a single PYthon Literal
+expression: a dictionary with three main keys, `masters`, `configs` and
+`mixins`.
+
+The `masters` key contains a nested series of dicts containing mappings
+of master -> builder -> config . This allows us to isolate the buildbot
+recipes from the actual details of the configs. The config should either
+be a single string value representing a key in the `configs` dictionary,
+or a list of strings, each of which is a key in the `configs` dictionary;
+the latter case is for builders that do multiple compiles with different
+arguments in a single build, and must *only* be used for such builders
+(where a --phase argument must be supplied in each lookup or gen call).
+
+The `configs` key points to a dictionary of named build configurations.
+
+There should be an key in this dict for every supported configuration
+of Chromium, meaning every configuration we have a bot for, and every
+configuration commonly used by developers but that we may not have a bot
+for.
+
+The value of each key is a list of "mixins" that will define what that
+build_config does. Each item in the list must be an entry in the dictionary
+value of the `mixins` key.
+
+Each mixin value is itself a dictionary that contains one or more of the
+following keys:
+
+  * `gyp_crosscompile`: a boolean; if true, GYP_CROSSCOMPILE=1 is set in
+    the environment and passed to GYP.
+  * `gyp_defines`: a string containing a list of GYP_DEFINES.
+  * `gn_args`: a string containing a list of values passed to gn --args.
+  * `mixins`: a list of other mixins that should be included.
+  * `type`: a string with either the value `gyp` or `gn`;
+    setting this indicates which meta-build tool to use.
+
+When `mb gen` or `mb analyze` executes, it takes a config name, looks it
+up in the 'configs' dict, and then does a left-to-right expansion of the
+mixins; gyp_defines and gn_args values are concatenated, and the type values
+override each other.
+
+For example, if you had:
+
+```
+{
+  'configs`: {
+    'linux_release_trybot': ['gyp_release', 'trybot'],
+    'gn_shared_debug': None,
+  }
+  'mixins': {
+    'bot': {
+      'gyp_defines': 'use_goma=1 dcheck_always_on=0',
+      'gn_args': 'use_goma=true dcheck_always_on=false',
+    },
+    'debug': {
+      'gn_args': 'is_debug=true',
+    },
+    'gn': {'type': 'gn'},
+    'gyp_release': {
+      'mixins': ['release'],
+      'type': 'gyp',
+    },
+    'release': {
+      'gn_args': 'is_debug=false',
+    }
+    'shared': {
+      'gn_args': 'is_component_build=true',
+      'gyp_defines': 'component=shared_library',
+    },
+    'trybot': {
+      'gyp_defines': 'dcheck_always_on=1',
+      'gn_args': 'dcheck_always_on=true',
+    }
+  }
+}
+```
+
+and you ran `mb gen -c linux_release_trybot //out/Release`, it would
+translate into a call to `gyp_chromium -G Release` with `GYP_DEFINES` set to
+`"use_goma=true dcheck_always_on=false dcheck_always_on=true"`.
+
+(From that you can see that mb is intentionally dumb and does not
+attempt to de-dup the flags, it lets gyp do that).
+
+## Debugging MB
+
+By design, MB should be simple enough that very little can go wrong.
+
+The most obvious issue is that you might see different commands being
+run than you expect; running `'mb -v'` will print what it's doing and
+run the commands; `'mb -n'` will print what it will do but *not* run
+the commands.
+
+If you hit weirder things than that, add some print statements to the
+python script, send a question to gn-dev@chromium.org, or
+[file a bug](https://crbug.com/new) with the label
+'mb' and cc: dpranke@chromium.org.
+
+
diff --git a/src/third_party/v8/tools/mb/mb b/src/third_party/v8/tools/mb/mb
new file mode 100755
index 0000000..d3a0cdf
--- /dev/null
+++ b/src/third_party/v8/tools/mb/mb
@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+base_dir=$(dirname "$0")
+
+PYTHONDONTWRITEBYTECODE=1 exec python "$base_dir/mb.py" "$@"
diff --git a/src/third_party/v8/tools/mb/mb.bat b/src/third_party/v8/tools/mb/mb.bat
new file mode 100755
index 0000000..a82770e
--- /dev/null
+++ b/src/third_party/v8/tools/mb/mb.bat
@@ -0,0 +1,6 @@
+@echo off
+setlocal
+:: This is required with cygwin only.
+PATH=%~dp0;%PATH%
+set PYTHONDONTWRITEBYTECODE=1
+call python "%~dp0mb.py" %*
diff --git a/src/third_party/v8/tools/mb/mb.py b/src/third_party/v8/tools/mb/mb.py
new file mode 100755
index 0000000..f3e4615
--- /dev/null
+++ b/src/third_party/v8/tools/mb/mb.py
@@ -0,0 +1,1290 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""MB - the Meta-Build wrapper around GN.
+
+MB is a wrapper script for GN that can be used to generate build files
+for sets of canned configurations and analyze them.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import ast
+import errno
+import json
+import os
+import pipes
+import platform
+import pprint
+import re
+import shutil
+import sys
+import subprocess
+import tempfile
+import traceback
+import urllib2
+
+from collections import OrderedDict
+
+CHROMIUM_SRC_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path = [os.path.join(CHROMIUM_SRC_DIR, 'build')] + sys.path
+
+import gn_helpers
+
+try:
+  cmp              # Python 2
+except NameError:  # Python 3
+  def cmp(x, y):   # pylint: disable=redefined-builtin
+    return (x > y) - (x < y)
+
+
+def main(args):
+  mbw = MetaBuildWrapper()
+  return mbw.Main(args)
+
+
+class MetaBuildWrapper(object):
+  def __init__(self):
+    self.chromium_src_dir = CHROMIUM_SRC_DIR
+    self.default_config = os.path.join(self.chromium_src_dir, 'infra', 'mb',
+                                       'mb_config.pyl')
+    self.default_isolate_map = os.path.join(self.chromium_src_dir, 'infra',
+                                            'mb', 'gn_isolate_map.pyl')
+    self.executable = sys.executable
+    self.platform = sys.platform
+    self.sep = os.sep
+    self.args = argparse.Namespace()
+    self.configs = {}
+    self.luci_tryservers = {}
+    self.masters = {}
+    self.mixins = {}
+    self.isolate_exe = 'isolate.exe' if self.platform.startswith(
+        'win') else 'isolate'
+
+  def Main(self, args):
+    self.ParseArgs(args)
+    try:
+      ret = self.args.func()
+      if ret:
+        self.DumpInputFiles()
+      return ret
+    except KeyboardInterrupt:
+      self.Print('interrupted, exiting')
+      return 130
+    except Exception:
+      self.DumpInputFiles()
+      s = traceback.format_exc()
+      for l in s.splitlines():
+        self.Print(l)
+      return 1
+
+  def ParseArgs(self, argv):
+    def AddCommonOptions(subp):
+      subp.add_argument('-b', '--builder',
+                        help='builder name to look up config from')
+      subp.add_argument('-m', '--master',
+                        help='master name to look up config from')
+      subp.add_argument('-c', '--config',
+                        help='configuration to analyze')
+      subp.add_argument('--phase',
+                        help='optional phase name (used when builders '
+                             'do multiple compiles with different '
+                             'arguments in a single build)')
+      subp.add_argument('-f', '--config-file', metavar='PATH',
+                        default=self.default_config,
+                        help='path to config file '
+                             '(default is %(default)s)')
+      subp.add_argument('-i', '--isolate-map-file', metavar='PATH',
+                        help='path to isolate map file '
+                             '(default is %(default)s)',
+                        default=[],
+                        action='append',
+                        dest='isolate_map_files')
+      subp.add_argument('-g', '--goma-dir',
+                        help='path to goma directory')
+      subp.add_argument('--android-version-code',
+                        help='Sets GN arg android_default_version_code')
+      subp.add_argument('--android-version-name',
+                        help='Sets GN arg android_default_version_name')
+      subp.add_argument('-n', '--dryrun', action='store_true',
+                        help='Do a dry run (i.e., do nothing, just print '
+                             'the commands that will run)')
+      subp.add_argument('-v', '--verbose', action='store_true',
+                        help='verbose logging')
+
+    parser = argparse.ArgumentParser(prog='mb')
+    subps = parser.add_subparsers()
+
+    subp = subps.add_parser('analyze',
+                            help='analyze whether changes to a set of files '
+                                 'will cause a set of binaries to be rebuilt.')
+    AddCommonOptions(subp)
+    subp.add_argument('path', nargs=1,
+                      help='path build was generated into.')
+    subp.add_argument('input_path', nargs=1,
+                      help='path to a file containing the input arguments '
+                           'as a JSON object.')
+    subp.add_argument('output_path', nargs=1,
+                      help='path to a file containing the output arguments '
+                           'as a JSON object.')
+    subp.add_argument('--json-output',
+                      help='Write errors to json.output')
+    subp.set_defaults(func=self.CmdAnalyze)
+
+    subp = subps.add_parser('export',
+                            help='print out the expanded configuration for'
+                                 'each builder as a JSON object')
+    subp.add_argument('-f', '--config-file', metavar='PATH',
+                      default=self.default_config,
+                      help='path to config file (default is %(default)s)')
+    subp.add_argument('-g', '--goma-dir',
+                      help='path to goma directory')
+    subp.set_defaults(func=self.CmdExport)
+
+    subp = subps.add_parser('gen',
+                            help='generate a new set of build files')
+    AddCommonOptions(subp)
+    subp.add_argument('--swarming-targets-file',
+                      help='save runtime dependencies for targets listed '
+                           'in file.')
+    subp.add_argument('--json-output',
+                      help='Write errors to json.output')
+    subp.add_argument('path', nargs=1,
+                      help='path to generate build into')
+    subp.set_defaults(func=self.CmdGen)
+
+    subp = subps.add_parser('isolate',
+                            help='generate the .isolate files for a given'
+                                 'binary')
+    AddCommonOptions(subp)
+    subp.add_argument('path', nargs=1,
+                      help='path build was generated into')
+    subp.add_argument('target', nargs=1,
+                      help='ninja target to generate the isolate for')
+    subp.set_defaults(func=self.CmdIsolate)
+
+    subp = subps.add_parser('lookup',
+                            help='look up the command for a given config or '
+                                 'builder')
+    AddCommonOptions(subp)
+    subp.add_argument('--quiet', default=False, action='store_true',
+                      help='Print out just the arguments, '
+                           'do not emulate the output of the gen subcommand.')
+    subp.add_argument('--recursive', default=False, action='store_true',
+                      help='Lookup arguments from imported files, '
+                           'implies --quiet')
+    subp.set_defaults(func=self.CmdLookup)
+
+    subp = subps.add_parser(
+        'run',
+        help='build and run the isolated version of a '
+             'binary',
+        formatter_class=argparse.RawDescriptionHelpFormatter)
+    subp.description = (
+        'Build, isolate, and run the given binary with the command line\n'
+        'listed in the isolate. You may pass extra arguments after the\n'
+        'target; use "--" if the extra arguments need to include switches.\n'
+        '\n'
+        'Examples:\n'
+        '\n'
+        '  % tools/mb/mb.py run -m chromium.linux -b "Linux Builder" \\\n'
+        '    //out/Default content_browsertests\n'
+        '\n'
+        '  % tools/mb/mb.py run out/Default content_browsertests\n'
+        '\n'
+        '  % tools/mb/mb.py run out/Default content_browsertests -- \\\n'
+        '    --test-launcher-retry-limit=0'
+        '\n'
+    )
+    AddCommonOptions(subp)
+    subp.add_argument('-j', '--jobs', dest='jobs', type=int,
+                      help='Number of jobs to pass to ninja')
+    subp.add_argument('--no-build', dest='build', default=True,
+                      action='store_false',
+                      help='Do not build, just isolate and run')
+    subp.add_argument('path', nargs=1,
+                      help=('path to generate build into (or use).'
+                            ' This can be either a regular path or a '
+                            'GN-style source-relative path like '
+                            '//out/Default.'))
+    subp.add_argument('-s', '--swarmed', action='store_true',
+                      help='Run under swarming with the default dimensions')
+    subp.add_argument('-d', '--dimension', default=[], action='append', nargs=2,
+                      dest='dimensions', metavar='FOO bar',
+                      help='dimension to filter on')
+    subp.add_argument('--no-default-dimensions', action='store_false',
+                      dest='default_dimensions', default=True,
+                      help='Do not automatically add dimensions to the task')
+    subp.add_argument('target', nargs=1,
+                      help='ninja target to build and run')
+    subp.add_argument('extra_args', nargs='*',
+                      help=('extra args to pass to the isolate to run. Use '
+                            '"--" as the first arg if you need to pass '
+                            'switches'))
+    subp.set_defaults(func=self.CmdRun)
+
+    subp = subps.add_parser('validate',
+                            help='validate the config file')
+    subp.add_argument('-f', '--config-file', metavar='PATH',
+                      default=self.default_config,
+                      help='path to config file (default is %(default)s)')
+    subp.set_defaults(func=self.CmdValidate)
+
+    subp = subps.add_parser('gerrit-buildbucket-config',
+                            help='Print buildbucket.config for gerrit '
+                            '(see MB user guide)')
+    subp.add_argument('-f', '--config-file', metavar='PATH',
+                      default=self.default_config,
+                      help='path to config file (default is %(default)s)')
+    subp.set_defaults(func=self.CmdBuildbucket)
+
+    subp = subps.add_parser('help',
+                            help='Get help on a subcommand.')
+    subp.add_argument(nargs='?', action='store', dest='subcommand',
+                      help='The command to get help for.')
+    subp.set_defaults(func=self.CmdHelp)
+
+    self.args = parser.parse_args(argv)
+
+  def DumpInputFiles(self):
+
+    def DumpContentsOfFilePassedTo(arg_name, path):
+      if path and self.Exists(path):
+        self.Print("\n# To recreate the file passed to %s:" % arg_name)
+        self.Print("%% cat > %s <<EOF" % path)
+        contents = self.ReadFile(path)
+        self.Print(contents)
+        self.Print("EOF\n%\n")
+
+    if getattr(self.args, 'input_path', None):
+      DumpContentsOfFilePassedTo(
+          'argv[0] (input_path)', self.args.input_path[0])
+    if getattr(self.args, 'swarming_targets_file', None):
+      DumpContentsOfFilePassedTo(
+          '--swarming-targets-file', self.args.swarming_targets_file)
+
+  def CmdAnalyze(self):
+    vals = self.Lookup()
+    return self.RunGNAnalyze(vals)
+
+  def CmdExport(self):
+    self.ReadConfigFile()
+    obj = {}
+    for master, builders in self.masters.items():
+      obj[master] = {}
+      for builder in builders:
+        config = self.masters[master][builder]
+        if not config:
+          continue
+
+        if isinstance(config, dict):
+          args = {k: self.FlattenConfig(v)['gn_args']
+                  for k, v in config.items()}
+        elif config.startswith('//'):
+          args = config
+        else:
+          args = self.FlattenConfig(config)['gn_args']
+          if 'error' in args:
+            continue
+
+        obj[master][builder] = args
+
+    # Dump object and trim trailing whitespace.
+    s = '\n'.join(l.rstrip() for l in
+                  json.dumps(obj, sort_keys=True, indent=2).splitlines())
+    self.Print(s)
+    return 0
+
+  def CmdGen(self):
+    vals = self.Lookup()
+    return self.RunGNGen(vals)
+
+  def CmdHelp(self):
+    if self.args.subcommand:
+      self.ParseArgs([self.args.subcommand, '--help'])
+    else:
+      self.ParseArgs(['--help'])
+
+  def CmdIsolate(self):
+    vals = self.GetConfig()
+    if not vals:
+      return 1
+    return self.RunGNIsolate()
+
+  def CmdLookup(self):
+    vals = self.Lookup()
+    gn_args = self.GNArgs(vals, expand_imports=self.args.recursive)
+    if self.args.quiet or self.args.recursive:
+      self.Print(gn_args, end='')
+    else:
+      cmd = self.GNCmd('gen', '_path_')
+      self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
+      env = None
+
+      self.PrintCmd(cmd, env)
+    return 0
+
+  def CmdRun(self):
+    vals = self.GetConfig()
+    if not vals:
+      return 1
+
+    build_dir = self.args.path[0]
+    target = self.args.target[0]
+
+    if self.args.build:
+      ret = self.Build(target)
+      if ret:
+        return ret
+    ret = self.RunGNIsolate()
+    if ret:
+      return ret
+
+    if self.args.swarmed:
+      return self._RunUnderSwarming(build_dir, target)
+    else:
+      return self._RunLocallyIsolated(build_dir, target)
+
+  def _RunUnderSwarming(self, build_dir, target):
+    # TODO(dpranke): Look up the information for the target in
+    # the //testing/buildbot.json file, if possible, so that we
+    # can determine the isolate target, command line, and additional
+    # swarming parameters, if possible.
+    #
+    # TODO(dpranke): Also, add support for sharding and merging results.
+    dimensions = []
+    for k, v in self._DefaultDimensions() + self.args.dimensions:
+      dimensions += ['-d', k, v]
+
+    archive_json_path = self.ToSrcRelPath(
+        '%s/%s.archive.json' % (build_dir, target))
+    cmd = [
+        self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
+                      self.isolate_exe),
+        'archive',
+        '-i',
+        self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
+        '-s',
+        self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
+        '-I', 'isolateserver.appspot.com',
+        '-dump-json',
+        archive_json_path,
+      ]
+    ret, _, _ = self.Run(cmd, force_verbose=False)
+    if ret:
+      return ret
+
+    try:
+      archive_hashes = json.loads(self.ReadFile(archive_json_path))
+    except Exception:
+      self.Print(
+          'Failed to read JSON file "%s"' % archive_json_path, file=sys.stderr)
+      return 1
+    try:
+      isolated_hash = archive_hashes[target]
+    except Exception:
+      self.Print(
+          'Cannot find hash for "%s" in "%s", file content: %s' %
+          (target, archive_json_path, archive_hashes),
+          file=sys.stderr)
+      return 1
+
+    cmd = [
+        self.executable,
+        self.PathJoin('tools', 'swarming_client', 'swarming.py'),
+          'run',
+          '-s', isolated_hash,
+          '-I', 'isolateserver.appspot.com',
+          '-S', 'chromium-swarm.appspot.com',
+      ] + dimensions
+    if self.args.extra_args:
+      cmd += ['--'] + self.args.extra_args
+    ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
+    return ret
+
+  def _RunLocallyIsolated(self, build_dir, target):
+    cmd = [
+        self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
+                      self.isolate_exe),
+        'run',
+        '-i',
+        self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
+      ]
+    if self.args.extra_args:
+      cmd += ['--'] + self.args.extra_args
+    ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
+    return ret
+
+  def _DefaultDimensions(self):
+    if not self.args.default_dimensions:
+      return []
+
+    # This code is naive and just picks reasonable defaults per platform.
+    if self.platform == 'darwin':
+      os_dim = ('os', 'Mac-10.12')
+    elif self.platform.startswith('linux'):
+      os_dim = ('os', 'Ubuntu-16.04')
+    elif self.platform == 'win32':
+      os_dim = ('os', 'Windows-10')
+    else:
+      raise MBErr('unrecognized platform string "%s"' % self.platform)
+
+    return [('pool', 'Chrome'),
+            ('cpu', 'x86-64'),
+            os_dim]
+
+  def CmdBuildbucket(self):
+    self.ReadConfigFile()
+
+    self.Print('# This file was generated using '
+               '"tools/mb/mb.py gerrit-buildbucket-config".')
+
+    for luci_tryserver in sorted(self.luci_tryservers):
+      self.Print('[bucket "luci.%s"]' % luci_tryserver)
+      for bot in sorted(self.luci_tryservers[luci_tryserver]):
+        self.Print('\tbuilder = %s' % bot)
+
+    for master in sorted(self.masters):
+      if master.startswith('tryserver.'):
+        self.Print('[bucket "master.%s"]' % master)
+        for bot in sorted(self.masters[master]):
+          self.Print('\tbuilder = %s' % bot)
+
+    return 0
+
+  def CmdValidate(self, print_ok=True):
+    errs = []
+
+    # Read the file to make sure it parses.
+    self.ReadConfigFile()
+
+    # Build a list of all of the configs referenced by builders.
+    all_configs = {}
+    for master in self.masters:
+      for config in self.masters[master].values():
+        if isinstance(config, dict):
+          for c in config.values():
+            all_configs[c] = master
+        else:
+          all_configs[config] = master
+
+    # Check that every referenced args file or config actually exists.
+    for config, loc in all_configs.items():
+      if config.startswith('//'):
+        if not self.Exists(self.ToAbsPath(config)):
+          errs.append('Unknown args file "%s" referenced from "%s".' %
+                      (config, loc))
+      elif not config in self.configs:
+        errs.append('Unknown config "%s" referenced from "%s".' %
+                    (config, loc))
+
+    # Check that every actual config is actually referenced.
+    for config in self.configs:
+      if not config in all_configs:
+        errs.append('Unused config "%s".' % config)
+
+    # Figure out the whole list of mixins, and check that every mixin
+    # listed by a config or another mixin actually exists.
+    referenced_mixins = set()
+    for config, mixins in self.configs.items():
+      for mixin in mixins:
+        if not mixin in self.mixins:
+          errs.append('Unknown mixin "%s" referenced by config "%s".' %
+                      (mixin, config))
+        referenced_mixins.add(mixin)
+
+    for mixin in self.mixins:
+      for sub_mixin in self.mixins[mixin].get('mixins', []):
+        if not sub_mixin in self.mixins:
+          errs.append('Unknown mixin "%s" referenced by mixin "%s".' %
+                      (sub_mixin, mixin))
+        referenced_mixins.add(sub_mixin)
+
+    # Check that every mixin defined is actually referenced somewhere.
+    for mixin in self.mixins:
+      if not mixin in referenced_mixins:
+        errs.append('Unreferenced mixin "%s".' % mixin)
+
+    if errs:
+      raise MBErr(('mb config file %s has problems:' % self.args.config_file) +
+                    '\n  ' + '\n  '.join(errs))
+
+    if print_ok:
+      self.Print('mb config file %s looks ok.' % self.args.config_file)
+    return 0
+
+  def GetConfig(self):
+    build_dir = self.args.path[0]
+
+    vals = self.DefaultVals()
+    if self.args.builder or self.args.master or self.args.config:
+      vals = self.Lookup()
+      # Re-run gn gen in order to ensure the config is consistent with the
+      # build dir.
+      self.RunGNGen(vals)
+      return vals
+
+    toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
+                                   'toolchain.ninja')
+    if not self.Exists(toolchain_path):
+      self.Print('Must either specify a path to an existing GN build dir '
+                 'or pass in a -m/-b pair or a -c flag to specify the '
+                 'configuration')
+      return {}
+
+    vals['gn_args'] = self.GNArgsFromDir(build_dir)
+    return vals
+
+  def GNArgsFromDir(self, build_dir):
+    args_contents = ""
+    gn_args_path = self.PathJoin(self.ToAbsPath(build_dir), 'args.gn')
+    if self.Exists(gn_args_path):
+      args_contents = self.ReadFile(gn_args_path)
+    gn_args = []
+    for l in args_contents.splitlines():
+      fields = l.split(' ')
+      name = fields[0]
+      val = ' '.join(fields[2:])
+      gn_args.append('%s=%s' % (name, val))
+
+    return ' '.join(gn_args)
+
+  def Lookup(self):
+    vals = self.ReadIOSBotConfig()
+    if not vals:
+      self.ReadConfigFile()
+      config = self.ConfigFromArgs()
+      if config.startswith('//'):
+        if not self.Exists(self.ToAbsPath(config)):
+          raise MBErr('args file "%s" not found' % config)
+        vals = self.DefaultVals()
+        vals['args_file'] = config
+      else:
+        if not config in self.configs:
+          raise MBErr('Config "%s" not found in %s' %
+                      (config, self.args.config_file))
+        vals = self.FlattenConfig(config)
+    return vals
+
+  def ReadIOSBotConfig(self):
+    if not self.args.master or not self.args.builder:
+      return {}
+    path = self.PathJoin(self.chromium_src_dir, 'ios', 'build', 'bots',
+                         self.args.master, self.args.builder + '.json')
+    if not self.Exists(path):
+      return {}
+
+    contents = json.loads(self.ReadFile(path))
+    gn_args = ' '.join(contents.get('gn_args', []))
+
+    vals = self.DefaultVals()
+    vals['gn_args'] = gn_args
+    return vals
+
+  def ReadConfigFile(self):
+    if not self.Exists(self.args.config_file):
+      raise MBErr('config file not found at %s' % self.args.config_file)
+
+    try:
+      contents = ast.literal_eval(self.ReadFile(self.args.config_file))
+    except SyntaxError as e:
+      raise MBErr('Failed to parse config file "%s": %s' %
+                 (self.args.config_file, e))
+
+    self.configs = contents['configs']
+    self.luci_tryservers = contents.get('luci_tryservers', {})
+    self.masters = contents['masters']
+    self.mixins = contents['mixins']
+
+  def ReadIsolateMap(self):
+    if not self.args.isolate_map_files:
+      self.args.isolate_map_files = [self.default_isolate_map]
+
+    for f in self.args.isolate_map_files:
+      if not self.Exists(f):
+        raise MBErr('isolate map file not found at %s' % f)
+    isolate_maps = {}
+    for isolate_map in self.args.isolate_map_files:
+      try:
+        isolate_map = ast.literal_eval(self.ReadFile(isolate_map))
+        duplicates = set(isolate_map).intersection(isolate_maps)
+        if duplicates:
+          raise MBErr(
+              'Duplicate targets in isolate map files: %s.' %
+              ', '.join(duplicates))
+        isolate_maps.update(isolate_map)
+      except SyntaxError as e:
+        raise MBErr(
+            'Failed to parse isolate map file "%s": %s' % (isolate_map, e))
+    return isolate_maps
+
+  def ConfigFromArgs(self):
+    if self.args.config:
+      if self.args.master or self.args.builder:
+        raise MBErr('Can not specific both -c/--config and -m/--master or '
+                    '-b/--builder')
+
+      return self.args.config
+
+    if not self.args.master or not self.args.builder:
+      raise MBErr('Must specify either -c/--config or '
+                  '(-m/--master and -b/--builder)')
+
+    if not self.args.master in self.masters:
+      raise MBErr('Master name "%s" not found in "%s"' %
+                  (self.args.master, self.args.config_file))
+
+    if not self.args.builder in self.masters[self.args.master]:
+      raise MBErr('Builder name "%s"  not found under masters[%s] in "%s"' %
+                  (self.args.builder, self.args.master, self.args.config_file))
+
+    config = self.masters[self.args.master][self.args.builder]
+    if isinstance(config, dict):
+      if self.args.phase is None:
+        raise MBErr('Must specify a build --phase for %s on %s' %
+                    (self.args.builder, self.args.master))
+      phase = str(self.args.phase)
+      if phase not in config:
+        raise MBErr('Phase %s doesn\'t exist for %s on %s' %
+                    (phase, self.args.builder, self.args.master))
+      return config[phase]
+
+    if self.args.phase is not None:
+      raise MBErr('Must not specify a build --phase for %s on %s' %
+                  (self.args.builder, self.args.master))
+    return config
+
+  def FlattenConfig(self, config):
+    mixins = self.configs[config]
+    vals = self.DefaultVals()
+
+    visited = []
+    self.FlattenMixins(mixins, vals, visited)
+    return vals
+
+  def DefaultVals(self):
+    return {
+      'args_file': '',
+      'cros_passthrough': False,
+      'gn_args': '',
+    }
+
+  def FlattenMixins(self, mixins, vals, visited):
+    for m in mixins:
+      if m not in self.mixins:
+        raise MBErr('Unknown mixin "%s"' % m)
+
+      visited.append(m)
+
+      mixin_vals = self.mixins[m]
+
+      if 'cros_passthrough' in mixin_vals:
+        vals['cros_passthrough'] = mixin_vals['cros_passthrough']
+      if 'args_file' in mixin_vals:
+        if vals['args_file']:
+            raise MBErr('args_file specified multiple times in mixins '
+                        'for %s on %s' % (self.args.builder, self.args.master))
+        vals['args_file'] = mixin_vals['args_file']
+      if 'gn_args' in mixin_vals:
+        if vals['gn_args']:
+          vals['gn_args'] += ' ' + mixin_vals['gn_args']
+        else:
+          vals['gn_args'] = mixin_vals['gn_args']
+
+      if 'mixins' in mixin_vals:
+        self.FlattenMixins(mixin_vals['mixins'], vals, visited)
+    return vals
+
+  def RunGNGen(self, vals, compute_grit_inputs_for_analyze=False):
+    build_dir = self.args.path[0]
+
+    cmd = self.GNCmd('gen', build_dir, '--check')
+    gn_args = self.GNArgs(vals)
+    if compute_grit_inputs_for_analyze:
+      gn_args += ' compute_grit_inputs_for_analyze=true'
+
+    # Since GN hasn't run yet, the build directory may not even exist.
+    self.MaybeMakeDirectory(self.ToAbsPath(build_dir))
+
+    gn_args_path = self.ToAbsPath(build_dir, 'args.gn')
+    self.WriteFile(gn_args_path, gn_args, force_verbose=True)
+
+    swarming_targets = []
+    if getattr(self.args, 'swarming_targets_file', None):
+      # We need GN to generate the list of runtime dependencies for
+      # the compile targets listed (one per line) in the file so
+      # we can run them via swarming. We use gn_isolate_map.pyl to convert
+      # the compile targets to the matching GN labels.
+      path = self.args.swarming_targets_file
+      if not self.Exists(path):
+        self.WriteFailureAndRaise('"%s" does not exist' % path,
+                                  output_path=None)
+      contents = self.ReadFile(path)
+      swarming_targets = set(contents.splitlines())
+
+      isolate_map = self.ReadIsolateMap()
+      err, labels = self.MapTargetsToLabels(isolate_map, swarming_targets)
+      if err:
+          raise MBErr(err)
+
+      gn_runtime_deps_path = self.ToAbsPath(build_dir, 'runtime_deps')
+      self.WriteFile(gn_runtime_deps_path, '\n'.join(labels) + '\n')
+      cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
+
+    ret, output, _ = self.Run(cmd)
+    if ret:
+        if self.args.json_output:
+          # write errors to json.output
+          self.WriteJSON({'output': output}, self.args.json_output)
+        # If `gn gen` failed, we should exit early rather than trying to
+        # generate isolates. Run() will have already logged any error output.
+        self.Print('GN gen failed: %d' % ret)
+        return ret
+
+    android = 'target_os="android"' in vals['gn_args']
+    for target in swarming_targets:
+      if android:
+        # Android targets may be either android_apk or executable. The former
+        # will result in runtime_deps associated with the stamp file, while the
+        # latter will result in runtime_deps associated with the executable.
+        label = isolate_map[target]['label']
+        runtime_deps_targets = [
+            target + '.runtime_deps',
+            'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
+      elif (isolate_map[target]['type'] == 'script' or
+            isolate_map[target].get('label_type') == 'group'):
+        # For script targets, the build target is usually a group,
+        # for which gn generates the runtime_deps next to the stamp file
+        # for the label, which lives under the obj/ directory, but it may
+        # also be an executable.
+        label = isolate_map[target]['label']
+        runtime_deps_targets = [
+            'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
+        if self.platform == 'win32':
+          runtime_deps_targets += [ target + '.exe.runtime_deps' ]
+        else:
+          runtime_deps_targets += [ target + '.runtime_deps' ]
+      elif self.platform == 'win32':
+        runtime_deps_targets = [target + '.exe.runtime_deps']
+      else:
+        runtime_deps_targets = [target + '.runtime_deps']
+
+      for r in runtime_deps_targets:
+        runtime_deps_path = self.ToAbsPath(build_dir, r)
+        if self.Exists(runtime_deps_path):
+          break
+      else:
+        raise MBErr('did not generate any of %s' %
+                    ', '.join(runtime_deps_targets))
+
+      runtime_deps = self.ReadFile(runtime_deps_path).splitlines()
+
+      self.WriteIsolateFiles(build_dir, target, runtime_deps)
+
+    return 0
+
+  def RunGNIsolate(self):
+    target = self.args.target[0]
+    isolate_map = self.ReadIsolateMap()
+    err, labels = self.MapTargetsToLabels(isolate_map, [target])
+    if err:
+      raise MBErr(err)
+    label = labels[0]
+
+    build_dir = self.args.path[0]
+
+    cmd = self.GNCmd('desc', build_dir, label, 'runtime_deps')
+    ret, out, _ = self.Call(cmd)
+    if ret:
+      if out:
+        self.Print(out)
+      return ret
+
+    runtime_deps = out.splitlines()
+
+    self.WriteIsolateFiles(build_dir, target, runtime_deps)
+
+    ret, _, _ = self.Run([
+        self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
+                      self.isolate_exe),
+        'check',
+        '-i',
+        self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target))],
+        buffer_output=False)
+
+    return ret
+
+  def WriteIsolateFiles(self, build_dir, target, runtime_deps):
+    isolate_path = self.ToAbsPath(build_dir, target + '.isolate')
+    self.WriteFile(isolate_path,
+      pprint.pformat({
+        'variables': {
+          'files': sorted(runtime_deps),
+        }
+      }) + '\n')
+
+    self.WriteJSON(
+      {
+        'args': [
+          '--isolated',
+          self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
+          '--isolate',
+          self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
+        ],
+        'dir': self.chromium_src_dir,
+        'version': 1,
+      },
+      isolate_path + 'd.gen.json',
+    )
+
+  def MapTargetsToLabels(self, isolate_map, targets):
+    labels = []
+    err = ''
+
+    for target in targets:
+      if target == 'all':
+        labels.append(target)
+      elif target.startswith('//'):
+        labels.append(target)
+      else:
+        if target in isolate_map:
+          if isolate_map[target]['type'] == 'unknown':
+            err += ('test target "%s" type is unknown\n' % target)
+          else:
+            labels.append(isolate_map[target]['label'])
+        else:
+          err += ('target "%s" not found in '
+                  '//infra/mb/gn_isolate_map.pyl\n' % target)
+
+    return err, labels
+
+  def GNCmd(self, subcommand, path, *args):
+    if self.platform == 'linux2':
+      subdir, exe = 'linux64', 'gn'
+    elif self.platform == 'darwin':
+      subdir, exe = 'mac', 'gn'
+    else:
+      subdir, exe = 'win', 'gn.exe'
+
+    arch = platform.machine()
+    if (arch.startswith('s390') or arch.startswith('ppc') or
+        self.platform.startswith('aix')):
+      # use gn in PATH
+      gn_path = 'gn'
+    else:
+      gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, exe)
+    return [gn_path, subcommand, path] + list(args)
+
+
+  def GNArgs(self, vals, expand_imports=False):
+    if vals['cros_passthrough']:
+      if not 'GN_ARGS' in os.environ:
+        raise MBErr('MB is expecting GN_ARGS to be in the environment')
+      gn_args = os.environ['GN_ARGS']
+      if not re.search('target_os.*=.*"chromeos"', gn_args):
+        raise MBErr('GN_ARGS is missing target_os = "chromeos": (GN_ARGS=%s)' %
+                    gn_args)
+    else:
+      gn_args = vals['gn_args']
+
+    if self.args.goma_dir:
+      gn_args += ' goma_dir="%s"' % self.args.goma_dir
+
+    android_version_code = self.args.android_version_code
+    if android_version_code:
+      gn_args += ' android_default_version_code="%s"' % android_version_code
+
+    android_version_name = self.args.android_version_name
+    if android_version_name:
+      gn_args += ' android_default_version_name="%s"' % android_version_name
+
+    args_gn_lines = []
+    parsed_gn_args = {}
+
+    args_file = vals.get('args_file', None)
+    if args_file:
+      if expand_imports:
+        content = self.ReadFile(self.ToAbsPath(args_file))
+        parsed_gn_args = gn_helpers.FromGNArgs(content)
+      else:
+        args_gn_lines.append('import("%s")' % args_file)
+
+    # Canonicalize the arg string into a sorted, newline-separated list
+    # of key-value pairs, and de-dup the keys if need be so that only
+    # the last instance of each arg is listed.
+    parsed_gn_args.update(gn_helpers.FromGNArgs(gn_args))
+    args_gn_lines.append(gn_helpers.ToGNString(parsed_gn_args))
+
+    return '\n'.join(args_gn_lines)
+
+  def ToAbsPath(self, build_path, *comps):
+    return self.PathJoin(self.chromium_src_dir,
+                         self.ToSrcRelPath(build_path),
+                         *comps)
+
+  def ToSrcRelPath(self, path):
+    """Returns a relative path from the top of the repo."""
+    if path.startswith('//'):
+      return path[2:].replace('/', self.sep)
+    return self.RelPath(path, self.chromium_src_dir)
+
+  def RunGNAnalyze(self, vals):
+    # Analyze runs before 'gn gen' now, so we need to run gn gen
+    # in order to ensure that we have a build directory.
+    ret = self.RunGNGen(vals, compute_grit_inputs_for_analyze=True)
+    if ret:
+      return ret
+
+    build_path = self.args.path[0]
+    input_path = self.args.input_path[0]
+    gn_input_path = input_path + '.gn'
+    output_path = self.args.output_path[0]
+    gn_output_path = output_path + '.gn'
+
+    inp = self.ReadInputJSON(['files', 'test_targets',
+                              'additional_compile_targets'])
+    if self.args.verbose:
+      self.Print()
+      self.Print('analyze input:')
+      self.PrintJSON(inp)
+      self.Print()
+
+
+    # This shouldn't normally happen, but could due to unusual race conditions,
+    # like a try job that gets scheduled before a patch lands but runs after
+    # the patch has landed.
+    if not inp['files']:
+      self.Print('Warning: No files modified in patch, bailing out early.')
+      self.WriteJSON({
+            'status': 'No dependency',
+            'compile_targets': [],
+            'test_targets': [],
+          }, output_path)
+      return 0
+
+    gn_inp = {}
+    gn_inp['files'] = ['//' + f for f in inp['files'] if not f.startswith('//')]
+
+    isolate_map = self.ReadIsolateMap()
+    err, gn_inp['additional_compile_targets'] = self.MapTargetsToLabels(
+        isolate_map, inp['additional_compile_targets'])
+    if err:
+      raise MBErr(err)
+
+    err, gn_inp['test_targets'] = self.MapTargetsToLabels(
+        isolate_map, inp['test_targets'])
+    if err:
+      raise MBErr(err)
+    labels_to_targets = {}
+    for i, label in enumerate(gn_inp['test_targets']):
+      labels_to_targets[label] = inp['test_targets'][i]
+
+    try:
+      self.WriteJSON(gn_inp, gn_input_path)
+      cmd = self.GNCmd('analyze', build_path, gn_input_path, gn_output_path)
+      ret, output, _ = self.Run(cmd, force_verbose=True)
+      if ret:
+        if self.args.json_output:
+          # write errors to json.output
+          self.WriteJSON({'output': output}, self.args.json_output)
+        return ret
+
+      gn_outp_str = self.ReadFile(gn_output_path)
+      try:
+        gn_outp = json.loads(gn_outp_str)
+      except Exception as e:
+        self.Print("Failed to parse the JSON string GN returned: %s\n%s"
+                   % (repr(gn_outp_str), str(e)))
+        raise
+
+      outp = {}
+      if 'status' in gn_outp:
+        outp['status'] = gn_outp['status']
+      if 'error' in gn_outp:
+        outp['error'] = gn_outp['error']
+      if 'invalid_targets' in gn_outp:
+        outp['invalid_targets'] = gn_outp['invalid_targets']
+      if 'compile_targets' in gn_outp:
+        all_input_compile_targets = sorted(
+            set(inp['test_targets'] + inp['additional_compile_targets']))
+
+        # If we're building 'all', we can throw away the rest of the targets
+        # since they're redundant.
+        if 'all' in gn_outp['compile_targets']:
+          outp['compile_targets'] = ['all']
+        else:
+          outp['compile_targets'] = gn_outp['compile_targets']
+
+        # crbug.com/736215: When GN returns targets back, for targets in
+        # the default toolchain, GN will have generated a phony ninja
+        # target matching the label, and so we can safely (and easily)
+        # transform any GN label into the matching ninja target. For
+        # targets in other toolchains, though, GN doesn't generate the
+        # phony targets, and we don't know how to turn the labels into
+        # compile targets. In this case, we also conservatively give up
+        # and build everything. Probably the right thing to do here is
+        # to have GN return the compile targets directly.
+        if any("(" in target for target in outp['compile_targets']):
+          self.Print('WARNING: targets with non-default toolchains were '
+                     'found, building everything instead.')
+          outp['compile_targets'] = all_input_compile_targets
+        else:
+          outp['compile_targets'] = [
+              label.replace('//', '') for label in outp['compile_targets']]
+
+        # Windows has a maximum command line length of 8k; even Linux
+        # maxes out at 128k; if analyze returns a *really long* list of
+        # targets, we just give up and conservatively build everything instead.
+        # Probably the right thing here is for ninja to support response
+        # files as input on the command line
+        # (see https://github.com/ninja-build/ninja/issues/1355).
+        if len(' '.join(outp['compile_targets'])) > 7*1024:
+          self.Print('WARNING: Too many compile targets were affected.')
+          self.Print('WARNING: Building everything instead to avoid '
+                     'command-line length issues.')
+          outp['compile_targets'] = all_input_compile_targets
+
+
+      if 'test_targets' in gn_outp:
+        outp['test_targets'] = [
+          labels_to_targets[label] for label in gn_outp['test_targets']]
+
+      if self.args.verbose:
+        self.Print()
+        self.Print('analyze output:')
+        self.PrintJSON(outp)
+        self.Print()
+
+      self.WriteJSON(outp, output_path)
+
+    finally:
+      if self.Exists(gn_input_path):
+        self.RemoveFile(gn_input_path)
+      if self.Exists(gn_output_path):
+        self.RemoveFile(gn_output_path)
+
+    return 0
+
+  def ReadInputJSON(self, required_keys):
+    path = self.args.input_path[0]
+    output_path = self.args.output_path[0]
+    if not self.Exists(path):
+      self.WriteFailureAndRaise('"%s" does not exist' % path, output_path)
+
+    try:
+      inp = json.loads(self.ReadFile(path))
+    except Exception as e:
+      self.WriteFailureAndRaise('Failed to read JSON input from "%s": %s' %
+                                (path, e), output_path)
+
+    for k in required_keys:
+      if not k in inp:
+        self.WriteFailureAndRaise('input file is missing a "%s" key' % k,
+                                  output_path)
+
+    return inp
+
+  def WriteFailureAndRaise(self, msg, output_path):
+    if output_path:
+      self.WriteJSON({'error': msg}, output_path, force_verbose=True)
+    raise MBErr(msg)
+
+  def WriteJSON(self, obj, path, force_verbose=False):
+    try:
+      self.WriteFile(path, json.dumps(obj, indent=2, sort_keys=True) + '\n',
+                     force_verbose=force_verbose)
+    except Exception as e:
+      raise MBErr('Error %s writing to the output path "%s"' %
+                 (e, path))
+
+  def CheckCompile(self, master, builder):
+    url_template = self.args.url_template + '/{builder}/builds/_all?as_text=1'
+    url = urllib2.quote(url_template.format(master=master, builder=builder),
+                        safe=':/()?=')
+    try:
+      builds = json.loads(self.Fetch(url))
+    except Exception as e:
+      return str(e)
+    successes = sorted(
+        [int(x) for x in builds.keys() if "text" in builds[x] and
+          cmp(builds[x]["text"][:2], ["build", "successful"]) == 0],
+        reverse=True)
+    if not successes:
+      return "no successful builds"
+    build = builds[str(successes[0])]
+    step_names = set([step["name"] for step in build["steps"]])
+    compile_indicators = set(["compile", "compile (with patch)", "analyze"])
+    if compile_indicators & step_names:
+      return "compiles"
+    return "does not compile"
+
+  def PrintCmd(self, cmd, env):
+    if self.platform == 'win32':
+      env_prefix = 'set '
+      env_quoter = QuoteForSet
+      shell_quoter = QuoteForCmd
+    else:
+      env_prefix = ''
+      env_quoter = pipes.quote
+      shell_quoter = pipes.quote
+
+    def print_env(var):
+      if env and var in env:
+        self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
+
+    print_env('LLVM_FORCE_HEAD_REVISION')
+
+    if cmd[0] == self.executable:
+      cmd = ['python'] + cmd[1:]
+    self.Print(*[shell_quoter(arg) for arg in cmd])
+
+  def PrintJSON(self, obj):
+    self.Print(json.dumps(obj, indent=2, sort_keys=True))
+
+  def Build(self, target):
+    build_dir = self.ToSrcRelPath(self.args.path[0])
+    ninja_cmd = ['ninja', '-C', build_dir]
+    if self.args.jobs:
+      ninja_cmd.extend(['-j', '%d' % self.args.jobs])
+    ninja_cmd.append(target)
+    ret, _, _ = self.Run(ninja_cmd, force_verbose=False, buffer_output=False)
+    return ret
+
+  def Run(self, cmd, env=None, force_verbose=True, buffer_output=True):
+    # This function largely exists so it can be overridden for testing.
+    if self.args.dryrun or self.args.verbose or force_verbose:
+      self.PrintCmd(cmd, env)
+    if self.args.dryrun:
+      return 0, '', ''
+
+    ret, out, err = self.Call(cmd, env=env, buffer_output=buffer_output)
+    if self.args.verbose or force_verbose:
+      if ret:
+        self.Print('  -> returned %d' % ret)
+      if out:
+        self.Print(out, end='')
+      if err:
+        self.Print(err, end='', file=sys.stderr)
+    return ret, out, err
+
+  def Call(self, cmd, env=None, buffer_output=True):
+    if buffer_output:
+      p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
+                           stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+                           env=env)
+      out, err = p.communicate()
+    else:
+      p = subprocess.Popen(cmd, shell=False, cwd=self.chromium_src_dir,
+                           env=env)
+      p.wait()
+      out = err = ''
+    return p.returncode, out, err
+
+  def ExpandUser(self, path):
+    # This function largely exists so it can be overridden for testing.
+    return os.path.expanduser(path)
+
+  def Exists(self, path):
+    # This function largely exists so it can be overridden for testing.
+    return os.path.exists(path)
+
+  def Fetch(self, url):
+    # This function largely exists so it can be overridden for testing.
+    f = urllib2.urlopen(url)
+    contents = f.read()
+    f.close()
+    return contents
+
+  def MaybeMakeDirectory(self, path):
+    try:
+      os.makedirs(path)
+    except OSError as e:
+      if e.errno != errno.EEXIST:
+        raise
+
+  def PathJoin(self, *comps):
+    # This function largely exists so it can be overriden for testing.
+    return os.path.join(*comps)
+
+  def Print(self, *args, **kwargs):
+    # This function largely exists so it can be overridden for testing.
+    print(*args, **kwargs)
+    if kwargs.get('stream', sys.stdout) == sys.stdout:
+      sys.stdout.flush()
+
+  def ReadFile(self, path):
+    # This function largely exists so it can be overriden for testing.
+    with open(path) as fp:
+      return fp.read()
+
+  def RelPath(self, path, start='.'):
+    # This function largely exists so it can be overriden for testing.
+    return os.path.relpath(path, start)
+
+  def RemoveFile(self, path):
+    # This function largely exists so it can be overriden for testing.
+    os.remove(path)
+
+  def RemoveDirectory(self, abs_path):
+    if self.platform == 'win32':
+      # In other places in chromium, we often have to retry this command
+      # because we're worried about other processes still holding on to
+      # file handles, but when MB is invoked, it will be early enough in the
+      # build that their should be no other processes to interfere. We
+      # can change this if need be.
+      self.Run(['cmd.exe', '/c', 'rmdir', '/q', '/s', abs_path])
+    else:
+      shutil.rmtree(abs_path, ignore_errors=True)
+
+  def TempFile(self, mode='w'):
+    # This function largely exists so it can be overriden for testing.
+    return tempfile.NamedTemporaryFile(mode=mode, delete=False)
+
+  def WriteFile(self, path, contents, force_verbose=False):
+    # This function largely exists so it can be overriden for testing.
+    if self.args.dryrun or self.args.verbose or force_verbose:
+      self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
+    with open(path, 'w') as fp:
+      return fp.write(contents)
+
+
+class MBErr(Exception):
+  pass
+
+
+# See http://goo.gl/l5NPDW and http://goo.gl/4Diozm for the painful
+# details of this next section, which handles escaping command lines
+# so that they can be copied and pasted into a cmd window.
+UNSAFE_FOR_SET = set('^<>&|')
+UNSAFE_FOR_CMD = UNSAFE_FOR_SET.union(set('()%'))
+ALL_META_CHARS = UNSAFE_FOR_CMD.union(set('"'))
+
+
+def QuoteForSet(arg):
+  if any(a in UNSAFE_FOR_SET for a in arg):
+    arg = ''.join('^' + a if a in UNSAFE_FOR_SET else a for a in arg)
+  return arg
+
+
+def QuoteForCmd(arg):
+  # First, escape the arg so that CommandLineToArgvW will parse it properly.
+  if arg == '' or ' ' in arg or '"' in arg:
+    quote_re = re.compile(r'(\\*)"')
+    arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
+
+  # Then check to see if the arg contains any metacharacters other than
+  # double quotes; if it does, quote everything (including the double
+  # quotes) for safety.
+  if any(a in UNSAFE_FOR_CMD for a in arg):
+    arg = ''.join('^' + a if a in ALL_META_CHARS else a for a in arg)
+  return arg
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/src/third_party/v8/tools/mb/mb_unittest.py b/src/third_party/v8/tools/mb/mb_unittest.py
new file mode 100755
index 0000000..765cacb
--- /dev/null
+++ b/src/third_party/v8/tools/mb/mb_unittest.py
@@ -0,0 +1,643 @@
+#!/usr/bin/python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for mb.py."""
+
+import json
+import StringIO
+import os
+import sys
+import unittest
+
+import mb
+
+
+class FakeMBW(mb.MetaBuildWrapper):
+  def __init__(self, win32=False):
+    super(FakeMBW, self).__init__()
+
+    # Override vars for test portability.
+    if win32:
+      self.chromium_src_dir = 'c:\\fake_src'
+      self.default_config = 'c:\\fake_src\\tools\\mb\\mb_config.pyl'
+      self.default_isolate_map = ('c:\\fake_src\\testing\\buildbot\\'
+                                  'gn_isolate_map.pyl')
+      self.platform = 'win32'
+      self.executable = 'c:\\python\\python.exe'
+      self.sep = '\\'
+    else:
+      self.chromium_src_dir = '/fake_src'
+      self.default_config = '/fake_src/tools/mb/mb_config.pyl'
+      self.default_isolate_map = '/fake_src/testing/buildbot/gn_isolate_map.pyl'
+      self.executable = '/usr/bin/python'
+      self.platform = 'linux2'
+      self.sep = '/'
+
+    self.files = {}
+    self.calls = []
+    self.cmds = []
+    self.cross_compile = None
+    self.out = ''
+    self.err = ''
+    self.rmdirs = []
+
+  def ExpandUser(self, path):
+    return '$HOME/%s' % path
+
+  def Exists(self, path):
+    return self.files.get(path) is not None
+
+  def MaybeMakeDirectory(self, path):
+    self.files[path] = True
+
+  def PathJoin(self, *comps):
+    return self.sep.join(comps)
+
+  def ReadFile(self, path):
+    return self.files[path]
+
+  def WriteFile(self, path, contents, force_verbose=False):
+    if self.args.dryrun or self.args.verbose or force_verbose:
+      self.Print('\nWriting """\\\n%s""" to %s.\n' % (contents, path))
+    self.files[path] = contents
+
+  def Call(self, cmd, env=None, buffer_output=True):
+    self.calls.append(cmd)
+    if self.cmds:
+      return self.cmds.pop(0)
+    return 0, '', ''
+
+  def Print(self, *args, **kwargs):
+    sep = kwargs.get('sep', ' ')
+    end = kwargs.get('end', '\n')
+    f = kwargs.get('file', sys.stdout)
+    if f == sys.stderr:
+      self.err += sep.join(args) + end
+    else:
+      self.out += sep.join(args) + end
+
+  def TempFile(self, mode='w'):
+    return FakeFile(self.files)
+
+  def RemoveFile(self, path):
+    del self.files[path]
+
+  def RemoveDirectory(self, path):
+    self.rmdirs.append(path)
+    files_to_delete = [f for f in self.files if f.startswith(path)]
+    for f in files_to_delete:
+      self.files[f] = None
+
+
+class FakeFile(object):
+  def __init__(self, files):
+    self.name = '/tmp/file'
+    self.buf = ''
+    self.files = files
+
+  def write(self, contents):
+    self.buf += contents
+
+  def close(self):
+     self.files[self.name] = self.buf
+
+
+TEST_CONFIG = """\
+{
+  'masters': {
+    'chromium': {},
+    'fake_master': {
+      'fake_builder': 'rel_bot',
+      'fake_debug_builder': 'debug_goma',
+      'fake_args_bot': '//build/args/bots/fake_master/fake_args_bot.gn',
+      'fake_multi_phase': { 'phase_1': 'phase_1', 'phase_2': 'phase_2'},
+      'fake_args_file': 'args_file_goma',
+      'fake_args_file_twice': 'args_file_twice',
+    },
+  },
+  'configs': {
+    'args_file_goma': ['args_file', 'goma'],
+    'args_file_twice': ['args_file', 'args_file'],
+    'rel_bot': ['rel', 'goma', 'fake_feature1'],
+    'debug_goma': ['debug', 'goma'],
+    'phase_1': ['phase_1'],
+    'phase_2': ['phase_2'],
+  },
+  'mixins': {
+    'fake_feature1': {
+      'gn_args': 'enable_doom_melon=true',
+    },
+    'goma': {
+      'gn_args': 'use_goma=true',
+    },
+    'args_file': {
+      'args_file': '//build/args/fake.gn',
+    },
+    'phase_1': {
+      'gn_args': 'phase=1',
+    },
+    'phase_2': {
+      'gn_args': 'phase=2',
+    },
+    'rel': {
+      'gn_args': 'is_debug=false',
+    },
+    'debug': {
+      'gn_args': 'is_debug=true',
+    },
+  },
+}
+"""
+
+
+TRYSERVER_CONFIG = """\
+{
+  'masters': {
+    'not_a_tryserver': {
+      'fake_builder': 'fake_config',
+    },
+    'tryserver.chromium.linux': {
+      'try_builder': 'fake_config',
+    },
+    'tryserver.chromium.mac': {
+      'try_builder2': 'fake_config',
+    },
+  },
+  'luci_tryservers': {
+    'luci_tryserver1': ['luci_builder1'],
+    'luci_tryserver2': ['luci_builder2'],
+  },
+  'configs': {},
+  'mixins': {},
+}
+"""
+
+
+class UnitTest(unittest.TestCase):
+  def fake_mbw(self, files=None, win32=False):
+    mbw = FakeMBW(win32=win32)
+    mbw.files.setdefault(mbw.default_config, TEST_CONFIG)
+    mbw.files.setdefault(
+      mbw.ToAbsPath('//testing/buildbot/gn_isolate_map.pyl'),
+      '''{
+        "foo_unittests": {
+          "label": "//foo:foo_unittests",
+          "type": "console_test_launcher",
+          "args": [],
+        },
+      }''')
+    mbw.files.setdefault(
+        mbw.ToAbsPath('//build/args/bots/fake_master/fake_args_bot.gn'),
+        'is_debug = false\n')
+    if files:
+      for path, contents in files.items():
+        mbw.files[path] = contents
+    return mbw
+
+  def check(self, args, mbw=None, files=None, out=None, err=None, ret=None):
+    if not mbw:
+      mbw = self.fake_mbw(files)
+
+    actual_ret = mbw.Main(args)
+
+    self.assertEqual(actual_ret, ret)
+    if out is not None:
+      self.assertEqual(mbw.out, out)
+    if err is not None:
+      self.assertEqual(mbw.err, err)
+    return mbw
+
+  def test_analyze(self):
+    files = {'/tmp/in.json': '''{\
+               "files": ["foo/foo_unittest.cc"],
+               "test_targets": ["foo_unittests"],
+               "additional_compile_targets": ["all"]
+             }''',
+             '/tmp/out.json.gn': '''{\
+               "status": "Found dependency",
+               "compile_targets": ["//foo:foo_unittests"],
+               "test_targets": ["//foo:foo_unittests"]
+             }'''}
+
+    mbw = self.fake_mbw(files)
+    mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
+
+    self.check(['analyze', '-c', 'debug_goma', '//out/Default',
+                '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+    out = json.loads(mbw.files['/tmp/out.json'])
+    self.assertEqual(out, {
+      'status': 'Found dependency',
+      'compile_targets': ['foo:foo_unittests'],
+      'test_targets': ['foo_unittests']
+    })
+
+  def test_analyze_optimizes_compile_for_all(self):
+    files = {'/tmp/in.json': '''{\
+               "files": ["foo/foo_unittest.cc"],
+               "test_targets": ["foo_unittests"],
+               "additional_compile_targets": ["all"]
+             }''',
+             '/tmp/out.json.gn': '''{\
+               "status": "Found dependency",
+               "compile_targets": ["//foo:foo_unittests", "all"],
+               "test_targets": ["//foo:foo_unittests"]
+             }'''}
+
+    mbw = self.fake_mbw(files)
+    mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
+
+    self.check(['analyze', '-c', 'debug_goma', '//out/Default',
+                '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+    out = json.loads(mbw.files['/tmp/out.json'])
+
+    # check that 'foo_unittests' is not in the compile_targets
+    self.assertEqual(['all'], out['compile_targets'])
+
+  def test_analyze_handles_other_toolchains(self):
+    files = {'/tmp/in.json': '''{\
+               "files": ["foo/foo_unittest.cc"],
+               "test_targets": ["foo_unittests"],
+               "additional_compile_targets": ["all"]
+             }''',
+             '/tmp/out.json.gn': '''{\
+               "status": "Found dependency",
+               "compile_targets": ["//foo:foo_unittests",
+                                   "//foo:foo_unittests(bar)"],
+               "test_targets": ["//foo:foo_unittests"]
+             }'''}
+
+    mbw = self.fake_mbw(files)
+    mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
+
+    self.check(['analyze', '-c', 'debug_goma', '//out/Default',
+                '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+    out = json.loads(mbw.files['/tmp/out.json'])
+
+    # crbug.com/736215: If GN returns a label containing a toolchain,
+    # MB (and Ninja) don't know how to handle it; to work around this,
+    # we give up and just build everything we were asked to build. The
+    # output compile_targets should include all of the input test_targets and
+    # additional_compile_targets.
+    self.assertEqual(['all', 'foo_unittests'], out['compile_targets'])
+
+  def test_analyze_handles_way_too_many_results(self):
+    too_many_files = ', '.join(['"//foo:foo%d"' % i for i in range(4 * 1024)])
+    files = {'/tmp/in.json': '''{\
+               "files": ["foo/foo_unittest.cc"],
+               "test_targets": ["foo_unittests"],
+               "additional_compile_targets": ["all"]
+             }''',
+             '/tmp/out.json.gn': '''{\
+               "status": "Found dependency",
+               "compile_targets": [''' + too_many_files + '''],
+               "test_targets": ["//foo:foo_unittests"]
+             }'''}
+
+    mbw = self.fake_mbw(files)
+    mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
+
+    self.check(['analyze', '-c', 'debug_goma', '//out/Default',
+                '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
+    out = json.loads(mbw.files['/tmp/out.json'])
+
+    # If GN returns so many compile targets that we might have command-line
+    # issues, we should give up and just build everything we were asked to
+    # build. The output compile_targets should include all of the input
+    # test_targets and additional_compile_targets.
+    self.assertEqual(['all', 'foo_unittests'], out['compile_targets'])
+
+  def test_gen(self):
+    mbw = self.fake_mbw()
+    self.check(['gen', '-c', 'debug_goma', '//out/Default', '-g', '/goma'],
+               mbw=mbw, ret=0)
+    self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'],
+                              ('goma_dir = "/goma"\n'
+                               'is_debug = true\n'
+                               'use_goma = true\n'))
+
+    # Make sure we log both what is written to args.gn and the command line.
+    self.assertIn('Writing """', mbw.out)
+    self.assertIn('/fake_src/buildtools/linux64/gn gen //out/Default --check',
+                  mbw.out)
+
+    mbw = self.fake_mbw(win32=True)
+    self.check(['gen', '-c', 'debug_goma', '-g', 'c:\\goma', '//out/Debug'],
+               mbw=mbw, ret=0)
+    self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'],
+                              ('goma_dir = "c:\\\\goma"\n'
+                               'is_debug = true\n'
+                               'use_goma = true\n'))
+    self.assertIn('c:\\fake_src\\buildtools\\win\\gn.exe gen //out/Debug '
+                  '--check\n', mbw.out)
+
+    mbw = self.fake_mbw()
+    self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_bot',
+                '//out/Debug'],
+               mbw=mbw, ret=0)
+    # TODO(almuthanna): disable test temporarily to
+    #   solve this issue https://crbug.com/v8/11102
+    # self.assertEqual(
+    #     mbw.files['/fake_src/out/Debug/args.gn'],
+    #     'import("//build/args/bots/fake_master/fake_args_bot.gn")\n')
+
+  def test_gen_args_file_mixins(self):
+    mbw = self.fake_mbw()
+    self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file',
+                '//out/Debug'], mbw=mbw, ret=0)
+
+    self.assertEqual(
+        mbw.files['/fake_src/out/Debug/args.gn'],
+        ('import("//build/args/fake.gn")\n'
+         'use_goma = true\n'))
+
+    mbw = self.fake_mbw()
+    self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file_twice',
+                '//out/Debug'], mbw=mbw, ret=1)
+
+  def test_gen_fails(self):
+    mbw = self.fake_mbw()
+    mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
+    self.check(['gen', '-c', 'debug_goma', '//out/Default'], mbw=mbw, ret=1)
+
+  def test_gen_swarming(self):
+    files = {
+      '/tmp/swarming_targets': 'base_unittests\n',
+      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+          "{'base_unittests': {"
+          "  'label': '//base:base_unittests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      '/fake_src/out/Default/base_unittests.runtime_deps': (
+          "base_unittests\n"
+      ),
+    }
+    mbw = self.fake_mbw(files)
+    self.check(['gen',
+                '-c', 'debug_goma',
+                '--swarming-targets-file', '/tmp/swarming_targets',
+                '//out/Default'], mbw=mbw, ret=0)
+    self.assertIn('/fake_src/out/Default/base_unittests.isolate',
+                  mbw.files)
+    self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json',
+                  mbw.files)
+
+  def test_gen_swarming_script(self):
+    files = {
+      '/tmp/swarming_targets': 'cc_perftests\n',
+      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+          "{'cc_perftests': {"
+          "  'label': '//cc:cc_perftests',"
+          "  'type': 'script',"
+          "  'script': '/fake_src/out/Default/test_script.py',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
+          "cc_perftests\n"
+      ),
+    }
+    mbw = self.fake_mbw(files=files, win32=True)
+    self.check(['gen',
+                '-c', 'debug_goma',
+                '--swarming-targets-file', '/tmp/swarming_targets',
+                '--isolate-map-file',
+                '/fake_src/testing/buildbot/gn_isolate_map.pyl',
+                '//out/Default'], mbw=mbw, ret=0)
+    self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate',
+                  mbw.files)
+    self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
+                  mbw.files)
+
+
+  def test_multiple_isolate_maps(self):
+    files = {
+      '/tmp/swarming_targets': 'cc_perftests\n',
+      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+          "{'cc_perftests': {"
+          "  'label': '//cc:cc_perftests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      '/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
+          "{'cc_perftests2': {"
+          "  'label': '//cc:cc_perftests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
+          "cc_perftests\n"
+      ),
+    }
+    mbw = self.fake_mbw(files=files, win32=True)
+    self.check(['gen',
+                '-c', 'debug_goma',
+                '--swarming-targets-file', '/tmp/swarming_targets',
+                '--isolate-map-file',
+                '/fake_src/testing/buildbot/gn_isolate_map.pyl',
+                '--isolate-map-file',
+                '/fake_src/testing/buildbot/gn_isolate_map2.pyl',
+                '//out/Default'], mbw=mbw, ret=0)
+    self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate',
+                  mbw.files)
+    self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
+                  mbw.files)
+
+
+  def test_duplicate_isolate_maps(self):
+    files = {
+      '/tmp/swarming_targets': 'cc_perftests\n',
+      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+          "{'cc_perftests': {"
+          "  'label': '//cc:cc_perftests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      '/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
+          "{'cc_perftests': {"
+          "  'label': '//cc:cc_perftests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
+          "cc_perftests\n"
+      ),
+    }
+    mbw = self.fake_mbw(files=files, win32=True)
+    # Check that passing duplicate targets into mb fails.
+    self.check(['gen',
+                '-c', 'debug_goma',
+                '--swarming-targets-file', '/tmp/swarming_targets',
+                '--isolate-map-file',
+                '/fake_src/testing/buildbot/gn_isolate_map.pyl',
+                '--isolate-map-file',
+                '/fake_src/testing/buildbot/gn_isolate_map2.pyl',
+                '//out/Default'], mbw=mbw, ret=1)
+
+  def test_isolate(self):
+    files = {
+      '/fake_src/out/Default/toolchain.ninja': "",
+      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+          "{'base_unittests': {"
+          "  'label': '//base:base_unittests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      '/fake_src/out/Default/base_unittests.runtime_deps': (
+          "base_unittests\n"
+      ),
+    }
+    self.check(['isolate', '-c', 'debug_goma', '//out/Default',
+                'base_unittests'], files=files, ret=0)
+
+    # test running isolate on an existing build_dir
+    files['/fake_src/out/Default/args.gn'] = 'is_debug = True\n'
+    self.check(['isolate', '//out/Default', 'base_unittests'],
+               files=files, ret=0)
+
+    self.check(['isolate', '//out/Default', 'base_unittests'],
+               files=files, ret=0)
+
+  def test_run(self):
+    files = {
+      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+          "{'base_unittests': {"
+          "  'label': '//base:base_unittests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      '/fake_src/out/Default/base_unittests.runtime_deps': (
+          "base_unittests\n"
+      ),
+    }
+    self.check(['run', '-c', 'debug_goma', '//out/Default',
+                'base_unittests'], files=files, ret=0)
+
+  def test_run_swarmed(self):
+    files = {
+      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+          "{'base_unittests': {"
+          "  'label': '//base:base_unittests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      '/fake_src/out/Default/base_unittests.runtime_deps': (
+          "base_unittests\n"
+      ),
+      'out/Default/base_unittests.archive.json':
+        ("{\"base_unittests\":\"fake_hash\"}"),
+    }
+
+    mbw = self.fake_mbw(files=files)
+    self.check(['run', '-s', '-c', 'debug_goma', '//out/Default',
+                'base_unittests'], mbw=mbw, ret=0)
+    self.check(['run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7',
+                '//out/Default', 'base_unittests'], mbw=mbw, ret=0)
+
+  def test_lookup(self):
+    self.check(['lookup', '-c', 'debug_goma'], ret=0,
+               out=('\n'
+                    'Writing """\\\n'
+                    'is_debug = true\n'
+                    'use_goma = true\n'
+                    '""" to _path_/args.gn.\n\n'
+                    '/fake_src/buildtools/linux64/gn gen _path_\n'))
+
+  def test_quiet_lookup(self):
+    self.check(['lookup', '-c', 'debug_goma', '--quiet'], ret=0,
+               out=('is_debug = true\n'
+                    'use_goma = true\n'))
+
+  def test_lookup_goma_dir_expansion(self):
+    self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'], ret=0,
+               out=('\n'
+                    'Writing """\\\n'
+                    'enable_doom_melon = true\n'
+                    'goma_dir = "/foo"\n'
+                    'is_debug = false\n'
+                    'use_goma = true\n'
+                    '""" to _path_/args.gn.\n\n'
+                    '/fake_src/buildtools/linux64/gn gen _path_\n'))
+
+  def test_help(self):
+    orig_stdout = sys.stdout
+    try:
+      sys.stdout = StringIO.StringIO()
+      self.assertRaises(SystemExit, self.check, ['-h'])
+      self.assertRaises(SystemExit, self.check, ['help'])
+      self.assertRaises(SystemExit, self.check, ['help', 'gen'])
+    finally:
+      sys.stdout = orig_stdout
+
+  def test_multiple_phases(self):
+    # Check that not passing a --phase to a multi-phase builder fails.
+    mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase'],
+                     ret=1)
+    self.assertIn('Must specify a build --phase', mbw.out)
+
+    # Check that passing a --phase to a single-phase builder fails.
+    mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_builder',
+                      '--phase', 'phase_1'], ret=1)
+    self.assertIn('Must not specify a build --phase', mbw.out)
+
+    # Check that passing a wrong phase key to a multi-phase builder fails.
+    mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
+                      '--phase', 'wrong_phase'], ret=1)
+    self.assertIn('Phase wrong_phase doesn\'t exist', mbw.out)
+
+    # Check that passing a correct phase key to a multi-phase builder passes.
+    mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
+                      '--phase', 'phase_1'], ret=0)
+    self.assertIn('phase = 1', mbw.out)
+
+    mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_multi_phase',
+                      '--phase', 'phase_2'], ret=0)
+    self.assertIn('phase = 2', mbw.out)
+
+  def test_recursive_lookup(self):
+    files = {
+        '/fake_src/build/args/fake.gn': (
+          'enable_doom_melon = true\n'
+          'enable_antidoom_banana = true\n'
+        )
+    }
+    self.check(['lookup', '-m', 'fake_master', '-b', 'fake_args_file',
+                '--recursive'], files=files, ret=0,
+               out=('enable_antidoom_banana = true\n'
+                    'enable_doom_melon = true\n'
+                    'use_goma = true\n'))
+
+  def test_validate(self):
+    mbw = self.fake_mbw()
+    self.check(['validate'], mbw=mbw, ret=0)
+
+  def test_buildbucket(self):
+    mbw = self.fake_mbw()
+    mbw.files[mbw.default_config] = TRYSERVER_CONFIG
+    self.check(['gerrit-buildbucket-config'], mbw=mbw,
+               ret=0,
+               out=('# This file was generated using '
+                    '"tools/mb/mb.py gerrit-buildbucket-config".\n'
+                    '[bucket "luci.luci_tryserver1"]\n'
+                    '\tbuilder = luci_builder1\n'
+                    '[bucket "luci.luci_tryserver2"]\n'
+                    '\tbuilder = luci_builder2\n'
+                    '[bucket "master.tryserver.chromium.linux"]\n'
+                    '\tbuilder = try_builder\n'
+                    '[bucket "master.tryserver.chromium.mac"]\n'
+                    '\tbuilder = try_builder2\n'))
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/third_party/v8/tools/memory/asan/blacklist.txt b/src/third_party/v8/tools/memory/asan/blacklist.txt
new file mode 100644
index 0000000..2bb1aa9
--- /dev/null
+++ b/src/third_party/v8/tools/memory/asan/blacklist.txt
@@ -0,0 +1,4 @@
+# The rules in this file are only applied at compile time. If you can modify the
+# source in question, consider function attributes to disable instrumentation.
+#
+# Please think twice before you add or remove these rules.
\ No newline at end of file
diff --git a/src/third_party/v8/tools/memory/asan/blacklist_win.txt b/src/third_party/v8/tools/memory/asan/blacklist_win.txt
new file mode 100644
index 0000000..2bb1aa9
--- /dev/null
+++ b/src/third_party/v8/tools/memory/asan/blacklist_win.txt
@@ -0,0 +1,4 @@
+# The rules in this file are only applied at compile time. If you can modify the
+# source in question, consider function attributes to disable instrumentation.
+#
+# Please think twice before you add or remove these rules.
\ No newline at end of file
diff --git a/src/third_party/v8/tools/memory/tsan_v2/ignores.txt b/src/third_party/v8/tools/memory/tsan_v2/ignores.txt
new file mode 100644
index 0000000..80babf4
--- /dev/null
+++ b/src/third_party/v8/tools/memory/tsan_v2/ignores.txt
@@ -0,0 +1,5 @@
+# The rules in this file are only applied at compile time. If you can modify the
+# source in question, consider function attributes to disable instrumentation.
+#
+# Please think twice before you add or remove these rules.
+# Data races should typically go to suppressions.txt.
\ No newline at end of file
diff --git a/src/third_party/v8/tools/msan/blacklist.txt b/src/third_party/v8/tools/msan/blacklist.txt
new file mode 100644
index 0000000..2bb1aa9
--- /dev/null
+++ b/src/third_party/v8/tools/msan/blacklist.txt
@@ -0,0 +1,4 @@
+# The rules in this file are only applied at compile time. If you can modify the
+# source in question, consider function attributes to disable instrumentation.
+#
+# Please think twice before you add or remove these rules.
\ No newline at end of file
diff --git a/src/third_party/v8/tools/ninja/ninja_output.py b/src/third_party/v8/tools/ninja/ninja_output.py
new file mode 100644
index 0000000..ec4d27e
--- /dev/null
+++ b/src/third_party/v8/tools/ninja/ninja_output.py
@@ -0,0 +1,44 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import os
+import os.path
+
+
+def GetNinjaOutputDirectory(v8_root, configuration=None):
+  """Returns <v8_root>/<output_dir>/(Release|Debug).
+
+  The configuration chosen is the one most recently generated/built, but can be
+  overriden via the <configuration> parameter. Detects a custom output_dir
+  specified by GYP_GENERATOR_FLAGS."""
+
+  output_dir = 'out'
+  generator_flags = os.getenv('GYP_GENERATOR_FLAGS', '').split(' ')
+  for flag in generator_flags:
+    name_value = flag.split('=', 1)
+    if len(name_value) == 2 and name_value[0] == 'output_dir':
+      output_dir = name_value[1]
+
+  root = os.path.join(v8_root, output_dir)
+  if configuration:
+    return os.path.join(root, configuration)
+
+  debug_path = os.path.join(root, 'Debug')
+  release_path = os.path.join(root, 'Release')
+
+  def is_release_newer(test_path):
+    try:
+      debug_mtime = os.path.getmtime(os.path.join(debug_path, test_path))
+    except os.error:
+      debug_mtime = 0
+    try:
+      rel_mtime = os.path.getmtime(os.path.join(release_path, test_path))
+    except os.error:
+      rel_mtime = 0
+    return rel_mtime >= debug_mtime
+
+  if is_release_newer('.ninja_log') or is_release_newer('.ninja_deps'):
+    return release_path
+  return debug_path
diff --git a/src/third_party/v8/tools/node/README.md b/src/third_party/v8/tools/node/README.md
new file mode 100644
index 0000000..dc16c91
--- /dev/null
+++ b/src/third_party/v8/tools/node/README.md
@@ -0,0 +1,12 @@
+# Node.js Backports
+
+We no longer maintain our own backport script.
+
+For backporting V8 changes to Node.js, there is a useful script in
+[node-core-utils][1]. You can use the `git node v8 backport` command, which will
+bump the necessary V8 version numbers depending on the specific branch.
+
+See the [Node.js documentation][2] on V8 backports for a guide.
+
+[1]: https://github.com/nodejs/node-core-utils
+[2]: https://github.com/nodejs/node/blob/master/doc/guides/maintaining-V8.md
diff --git a/src/third_party/v8/tools/node/fetch_deps.py b/src/third_party/v8/tools/node/fetch_deps.py
new file mode 100755
index 0000000..ee5b629
--- /dev/null
+++ b/src/third_party/v8/tools/node/fetch_deps.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Use this script to fetch all dependencies for V8 to run build_gn.py.
+
+Usage: fetch_deps.py <v8-path>
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import subprocess
+import sys
+
+import node_common
+
+GCLIENT_SOLUTION = [
+  { "name"        : "v8",
+    "url"         : "https://chromium.googlesource.com/v8/v8.git",
+    "deps_file"   : "DEPS",
+    "managed"     : False,
+    "custom_deps" : {
+      # These deps are already part of Node.js.
+      "v8/base/trace_event/common"            : None,
+      "v8/third_party/googletest/src"         : None,
+      # These deps are unnecessary for building.
+      "v8/test/benchmarks/data"               : None,
+      "v8/testing/gmock"                      : None,
+      "v8/test/mozilla/data"                  : None,
+      "v8/test/test262/data"                  : None,
+      "v8/test/test262/harness"               : None,
+      "v8/third_party/android_ndk"            : None,
+      "v8/third_party/android_sdk"            : None,
+      "v8/third_party/catapult"               : None,
+      "v8/third_party/colorama/src"           : None,
+      "v8/third_party/fuchsia-sdk"            : None,
+      "v8/third_party/instrumented_libraries" : None,
+      "v8/tools/luci-go"                      : None,
+      "v8/tools/swarming_client"              : None,
+      "v8/third_party/qemu-linux-x64"         : None,
+    },
+  },
+]
+
+def EnsureGit(v8_path):
+  def git(args):
+    # shell=True needed on Windows to resolve git.bat.
+    return subprocess.check_output(
+        "git " + args, cwd=v8_path, shell=True).strip()
+
+  expected_git_dir = os.path.join(v8_path, ".git")
+  actual_git_dir = git("rev-parse --absolute-git-dir")
+  if expected_git_dir == actual_git_dir:
+    print("V8 is tracked stand-alone by git.")
+    return False
+  print("Initializing temporary git repository in v8.")
+  git("init")
+  git("config user.name \"Ada Lovelace\"")
+  git("config user.email ada@lovela.ce")
+  git("commit --allow-empty -m init")
+  return True
+
+def FetchDeps(v8_path):
+  # Verify path.
+  v8_path = os.path.abspath(v8_path)
+  assert os.path.isdir(v8_path)
+
+  # Check out depot_tools if necessary.
+  depot_tools = node_common.EnsureDepotTools(v8_path, True)
+
+  temporary_git = EnsureGit(v8_path)
+  try:
+    print("Fetching dependencies.")
+    env = os.environ.copy()
+    # gclient needs to have depot_tools in the PATH.
+    env["PATH"] = depot_tools + os.pathsep + env["PATH"]
+    gclient = os.path.join(depot_tools, "gclient.py")
+    spec = "solutions = %s" % GCLIENT_SOLUTION
+    subprocess.check_call([sys.executable, gclient, "sync", "--spec", spec],
+                           cwd=os.path.join(v8_path, os.path.pardir),
+                           env=env)
+  except:
+    raise
+  finally:
+    if temporary_git:
+      node_common.UninitGit(v8_path)
+    # Clean up .gclient_entries file.
+    gclient_entries = os.path.normpath(
+        os.path.join(v8_path, os.pardir, ".gclient_entries"))
+    if os.path.isfile(gclient_entries):
+      os.remove(gclient_entries)
+
+  return depot_tools
+
+
+if __name__ == "__main__":
+  FetchDeps(sys.argv[1])
diff --git a/src/third_party/v8/tools/node/node_common.py b/src/third_party/v8/tools/node/node_common.py
new file mode 100755
index 0000000..2efb218
--- /dev/null
+++ b/src/third_party/v8/tools/node/node_common.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import pipes
+import shutil
+import stat
+import subprocess
+import sys
+
+DEPOT_TOOLS_URL = \
+  "https://chromium.googlesource.com/chromium/tools/depot_tools.git"
+
+def EnsureDepotTools(v8_path, fetch_if_not_exist):
+  def _Get(v8_path):
+    depot_tools = os.path.join(v8_path, "_depot_tools")
+    try:
+      gclient_path = os.path.join(depot_tools, "gclient.py")
+      if os.path.isfile(gclient_path):
+        return depot_tools
+    except:
+      pass
+    if fetch_if_not_exist:
+      print("Checking out depot_tools.")
+      # shell=True needed on Windows to resolve git.bat.
+      subprocess.check_call("git clone {} {}".format(
+          pipes.quote(DEPOT_TOOLS_URL),
+          pipes.quote(depot_tools)), shell=True)
+      # Using check_output to hide warning messages.
+      subprocess.check_output(
+          [sys.executable, gclient_path, "metrics", "--opt-out"],
+          cwd=depot_tools)
+      return depot_tools
+    return None
+  depot_tools = _Get(v8_path)
+  assert depot_tools is not None
+  print("Using depot tools in %s" % depot_tools)
+  return depot_tools
+
+def UninitGit(v8_path):
+  print("Uninitializing temporary git repository")
+  target = os.path.join(v8_path, ".git")
+  if os.path.isdir(target):
+    print(">> Cleaning up %s" % target)
+    def OnRmError(func, path, exec_info):
+      # This might happen on Windows
+      os.chmod(path, stat.S_IWRITE)
+      os.unlink(path)
+    shutil.rmtree(target, onerror=OnRmError)
diff --git a/src/third_party/v8/tools/objdump-v8 b/src/third_party/v8/tools/objdump-v8
new file mode 100755
index 0000000..dc7310a
--- /dev/null
+++ b/src/third_party/v8/tools/objdump-v8
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os.path
+import re
+import subprocess
+import sys
+
+
+def get_address_bounds():
+  start = -1
+  end = -1
+  for arg in sys.argv:
+    if arg.startswith("--start-address="):
+      start = int(arg[-12:], 16)
+    if arg.startswith("--stop-address="):
+      end = int(arg[-12:], 16)
+  return start, end
+
+
+def format_line(line):
+  pieces = line.split(None, 3)
+  return " " + pieces[0][2:] + ":\t" + pieces[3]
+
+
+def is_comment(line):
+  stripped = line.strip()
+  return stripped.startswith("--") or stripped.startswith(";;")
+
+def main():
+  filename = sys.argv[-1]
+  match = re.match(r"/tmp/perf-(.*)\.map", filename)
+  if match:
+    start, end = get_address_bounds()
+    process_codefile = "code-" + match.group(1) + "-1.asm"
+    if os.path.exists(process_codefile):
+      codefile = open(process_codefile, "r")
+    else:
+      codefile = open("code.asm", "r")
+    with codefile:
+      printing = False
+      for line in codefile:
+        if line.startswith("0x"):
+          addr = int(line.split()[0], 0)
+          if start <= addr <= end:
+            printing = True
+            sys.stdout.write(format_line(line))
+          elif printing:
+            break
+        elif printing:
+          if not is_comment(line):
+            break
+          else:
+            sys.stdout.write(line)
+  else:
+    sys.argv[0] = "objdump"
+    sys.exit(subprocess.call(sys.argv))
+
+if __name__ == "__main__":
+  main()
diff --git a/src/third_party/v8/tools/parse-processor b/src/third_party/v8/tools/parse-processor
new file mode 100755
index 0000000..b601dda
--- /dev/null
+++ b/src/third_party/v8/tools/parse-processor
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+# find the name of the log file to process, it must not start with a dash.
+log_file="v8.log"
+for arg in "$@"
+do
+  if ! expr "X${arg}" : "^X-" > /dev/null; then
+    log_file=${arg}
+  fi
+done
+
+tools_path=`cd $(dirname "$0");pwd`
+if [ ! "$D8_PATH" ]; then
+  d8_public=`which d8`
+  if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
+fi
+[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
+d8_exec=$D8_PATH/d8
+
+if [ ! -x "$d8_exec" ]; then
+  D8_PATH=`pwd`/out.gn/optdebug
+  d8_exec=$D8_PATH/d8
+fi
+
+if [ ! -x "$d8_exec" ]; then
+  d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
+fi
+
+if [ ! -x "$d8_exec" ]; then
+  echo "d8 shell not found in $D8_PATH"
+  echo "Please provide path to d8 as env var in D8_PATH"
+  exit 1
+fi
+
+# nm spits out 'no symbols found' messages to stderr.
+cat $log_file | $d8_exec --allow-natives-syntax \
+  --module $tools_path/parse-processor-driver.mjs -- $@ 2>/dev/null
diff --git a/src/third_party/v8/tools/parse-processor-driver.mjs b/src/third_party/v8/tools/parse-processor-driver.mjs
new file mode 100644
index 0000000..bec5b78
--- /dev/null
+++ b/src/third_party/v8/tools/parse-processor-driver.mjs
@@ -0,0 +1,38 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { WebInspector } from "./sourcemap.mjs";
+import {
+    ParseProcessor, ArgumentsProcessor, readFile,
+  } from "./parse-processor.mjs";
+
+function processArguments(args) {
+  const processor = new ArgumentsProcessor(args);
+  if (processor.parse()) {
+    return processor.result();
+  } else {
+    processor.printUsageAndExit();
+  }
+}
+
+function initSourceMapSupport() {
+  // Pull dev tools source maps  into our name space.
+  SourceMap = WebInspector.SourceMap;
+
+  // Overwrite the load function to load scripts synchronously.
+  SourceMap.load = function(sourceMapURL) {
+    const content = readFile(sourceMapURL);
+    const sourceMapObject = (JSON.parse(content));
+    return new SourceMap(sourceMapURL, sourceMapObject);
+  };
+}
+
+const params = processArguments(arguments);
+let sourceMap = null;
+if (params.sourceMap) {
+  initSourceMapSupport();
+  sourceMap = SourceMap.load(params.sourceMap);
+}
+const parseProcessor = new ParseProcessor();
+parseProcessor.processLogFile(params.logFileName);
diff --git a/src/third_party/v8/tools/parse-processor.html b/src/third_party/v8/tools/parse-processor.html
new file mode 100644
index 0000000..9d78bbf
--- /dev/null
+++ b/src/third_party/v8/tools/parse-processor.html
@@ -0,0 +1,412 @@
+<!DOCTYPE html>
+<html>
+<!--
+Copyright 2016 the V8 project authors. All rights reserved.  Use of this source
+code is governed by a BSD-style license that can be found in the LICENSE file.
+-->
+
+<head>
+<meta charset="utf-8">
+<title>V8 Parse Processor</title>
+<style>
+  html {
+    font-family: monospace;
+  }
+
+  .parse {
+    background-color: red;
+    border: 1px red solid;
+  }
+
+  .preparse {
+    background-color: orange;
+    border: 1px orange solid;
+  }
+
+  .resolution {
+    background-color: green;
+    border: 1px green solid;
+  }
+
+  .execution {
+    background-color: black;
+    border-left: 2px black solid;
+    z-index: -1;
+  }
+
+  .script {
+    margin-top: 1em;
+    overflow: visible;
+    clear: both;
+      border-top: 2px black dotted;
+  }
+  .script h3 {
+    height: 20px;
+    margin-bottom: 0.5em;
+    white-space: nowrap;
+  }
+
+  .script-details {
+    float: left;
+  }
+
+  .chart {
+    float: left;
+    margin-right: 2em;
+  }
+
+  .funktion-list {
+    float: left;
+    height: 400px;
+  }
+
+  .funktion-list > ul {
+    height: 80%;
+    overflow-y: scroll;
+  }
+
+  .funktion {
+  }
+
+  .script-size {
+    display: inline-flex;
+    background-color: #505050;
+    border-radius: 3px;
+    padding: 3px;
+    margin: 2px;
+    white-space: nowrap;
+    overflow: hidden;
+    text-decoration: none;
+    color: white;
+  }
+  .script-size.eval {
+    background-color: #ee6300fc;
+  }
+  .script-size.streaming {
+    background-color: #008aff;
+  }
+  .script-size.deserialized {
+    background-color: #1fad00fc;
+  }
+
+  .script-details {
+    padding-right: 5px;
+    margin-right: 4px;
+  }
+  /* all but the last need a border  */
+  .script-details:nth-last-child(n+2) {
+    border-right: 1px white solid;
+  }
+
+  .script-details.id {
+    min-width: 2em;
+    text-align: right;
+  }
+</style>
+<script src="https://www.gstatic.com/charts/loader.js"></script>
+<script type="module">
+
+import { ParseProcessor, kSecondsToMillis } from "./parse-processor.mjs";
+
+google.charts.load('current', {packages: ['corechart']});
+
+function $(query) {
+  return document.querySelector(query);
+}
+
+window.addEventListener('DOMContentLoaded', (event) => {
+  $("#uploadInput").focus();
+});
+
+document.loadFile = function() {
+  let files = $('#uploadInput').files;
+
+  let file = files[0];
+  let reader = new FileReader();
+
+  reader.onload = function(evt) {
+    const kTimerName = 'parse log file';
+    console.time(kTimerName);
+    let parseProcessor = new ParseProcessor();
+    parseProcessor.processString(this.result);
+    console.timeEnd(kTimerName);
+    renderParseResults(parseProcessor);
+    document.parseProcessor = parseProcessor;
+  }
+  reader.readAsText(file);
+}
+
+function createNode(tag, classNames) {
+  let node = document.createElement(tag);
+  if (classNames) {
+    if (Array.isArray(classNames)) {
+      node.classList.add(...classNames);
+    } else {
+      node.className = classNames;
+    }
+  }
+  return node;
+}
+
+function div(...args) {
+  return createNode('div', ...args);
+}
+
+function h1(string) {
+  let node = createNode('h1');
+  node.appendChild(text(string));
+  return node;
+}
+
+function h3(string, ...args) {
+  let node = createNode('h3', ...args);
+  if (string) node.appendChild(text(string));
+  return node;
+}
+
+function a(href, string, ...args) {
+  let link = createNode('a', ...args);
+  if (href.length) link.href = href;
+  if (string) link.appendChild(text(string));
+  return link;
+}
+
+function text(string) {
+  return document.createTextNode(string);
+}
+
+function delay(t) {
+  return new Promise(resolve => setTimeout(resolve, t));
+}
+
+function renderParseResults(parseProcessor) {
+  let result = $('#result');
+  // clear out all existing result pages;
+  result.innerHTML = '';
+  const start = parseProcessor.firstEventTimestamp;
+  const end = parseProcessor.lastEventTimestamp;
+  renderScript(result, parseProcessor.totalScript, start, end);
+  // Build up the graphs lazily to keep the page responsive.
+  parseProcessor.scripts.forEach(
+      script => renderScript(result, script, start, end));
+  renderScriptSizes(parseProcessor);
+  // Install an intersection observer to lazily load the graphs when the script
+  // div becomes visible for the first time.
+  var io = new IntersectionObserver((entries, observer) => {
+    entries.forEach(entry => {
+      if (entry.intersectionRatio == 0) return;
+      console.assert(!entry.target.querySelector('.graph'));
+      let target = entry.target;
+      appendGraph(target.script, target, start, end);
+      observer.unobserve(entry.target);
+    });
+  }, {rootMargin: '400px'});
+  document.querySelectorAll('.script').forEach(div => io.observe(div));
+}
+
+const kTimeFactor = 10;
+const kHeight = 20;
+const kFunktionTopOffset = 50;
+
+function renderScript(result, script, start, end) {
+  // Filter out empty scripts.
+  if (script.isEmpty() || script.lastParseEvent == 0) return;
+
+  let scriptDiv = div('script');
+  scriptDiv.script = script;
+
+  let scriptTitle = h3();
+  let anchor = a("", 'Script #' + script.id);
+  anchor.name = "script"+script.id
+  scriptTitle.appendChild(anchor);
+  scriptDiv.appendChild(scriptTitle);
+  if (script.file) scriptTitle.appendChild(a(script.file, script.file));
+  let summary = createNode('pre', 'script-details');
+  summary.appendChild(text(script.summary));
+  scriptDiv.appendChild(summary);
+  result.appendChild(scriptDiv);
+}
+
+function renderScriptSizes(parseProcessor) {
+  let scriptsDiv = $('#scripts');
+  parseProcessor.scripts.forEach(
+    script => {
+      let scriptDiv = a('#script'+script.id, '', 'script-size');
+      let scriptId = div('script-details');
+      scriptId.classList.add('id');
+      scriptId.innerText = script.id;
+      scriptDiv.appendChild(scriptId);
+      let scriptSize = div('script-details');
+      scriptSize.innerText = BYTES(script.bytesTotal);
+      scriptDiv.appendChild(scriptSize);
+      let scriptUrl = div('script-details');
+      if (script.isEval) {
+        scriptUrl.innerText = "eval";
+        scriptDiv.classList.add('eval');
+      } else {
+        scriptUrl.innerText = script.file.split("/").pop();
+      }
+      if (script.isStreamingCompiled ) {
+        scriptDiv.classList.add('streaming');
+      } else if (script.deserializationTimestamp > 0) {
+        scriptDiv.classList.add('deserialized');
+      }
+      scriptDiv.appendChild(scriptUrl);
+      scriptDiv.style.width = script.bytesTotal * 0.001;
+      scriptsDiv.appendChild(scriptDiv);
+    });
+}
+
+const kMaxTime = 120 * kSecondsToMillis;
+// Resolution of the graphs
+const kTimeIncrement = 1;
+const kSelectionTimespan = 2;
+// TODO(cbruni): support compilation cache hit.
+const series = [
+    ['firstParseEvent', 'Any Parse', 'area'],
+    ['execution', '1st Exec', 'area'],
+    ['firstCompileEvent', 'Any Compile', 'area'],
+    ['compile', 'Eager Compile'],
+    ['lazyCompile', 'Lazy Compile'],
+    ['parse', 'Parsing'],
+    ['preparse', 'Preparse'],
+    ['resolution', 'Preparse with Var. Resolution'],
+    ['deserialization', 'Deserialization'],
+    ['optimization', 'Optimize'],
+];
+const metricNames = series.map(each => each[0]);
+// Display cumulative values (useuful for bytes).
+const kCumulative = true;
+// Include durations in the graphs.
+const kUseDuration = false;
+
+
+function appendGraph(script, parentNode, start, end) {
+  const timerLabel = 'graph script=' + script.id;
+  // TODO(cbruni): add support for network events
+
+  console.time(timerLabel);
+  let data = new google.visualization.DataTable();
+  data.addColumn('number', 'Duration');
+  // The series are interleave bytes processed, time spent and thus have two
+  // different vAxes.
+  let seriesOptions = [];
+  let colors = ['#4D4D4D', '#fff700', '#5DA5DA', '#FAA43A', '#60BD68',
+      '#F17CB0', '#B2912F', '#B276B2', '#DECF3F', '#F15854'];
+  series.forEach(([metric, description, type]) => {
+    let color = colors.shift();
+    // Add the bytes column.
+    data.addColumn('number', description);
+    let options = {targetAxisIndex: 0, color: color};
+    if (type == 'area') options.type = 'area';
+    seriesOptions.push(options)
+    // Add the time column.
+    if (kUseDuration) {
+      data.addColumn('number', description + ' Duration');
+      seriesOptions.push(
+          {targetAxisIndex: 1, color: color, lineDashStyle: [3, 2]});
+    }
+  });
+
+  const maxTime = Math.min(kMaxTime, end);
+  console.time('metrics');
+  let metricValues =
+    script.getAccumulatedTimeMetrics(metricNames , 0, maxTime, kTimeIncrement,
+        kCumulative, kUseDuration);
+  console.timeEnd('metrics');
+  // Make sure that the series added to the graph matches the returned values.
+  console.assert(metricValues[0].length == seriesOptions.length + 1);
+  data.addRows(metricValues);
+
+  let options = {
+    explorer: {
+      actions: ['dragToZoom', 'rightClickToReset'],
+      maxZoomIn: 0.01
+    },
+    hAxis: {
+      format: '#,###.##s'
+    },
+    vAxes: {
+      0: {title: 'Bytes Touched', format: 'short'},
+      1: {title: 'Duration', format: '#,###ms'}
+    },
+    height: 400,
+    width: 1000,
+    chartArea: {left: 70, top: 0, right: 160, height: "90%"},
+    // The first series should be a area chart (total bytes touched),
+    series: seriesOptions,
+    // everthing else is a line.
+    seriesType: 'line'
+  };
+  let graphNode = createNode('div', 'chart');
+  let listNode = createNode('div', 'funktion-list');
+  parentNode.appendChild(graphNode);
+  parentNode.appendChild(listNode);
+  let chart = new google.visualization.ComboChart(graphNode);
+  google.visualization.events.addListener(chart, 'select',
+      () => selectGraphPointHandler(chart, data, script, parentNode));
+  chart.draw(data, options);
+  // Add event listeners
+  console.timeEnd(timerLabel);
+}
+
+function selectGraphPointHandler(chart, data, script, parentNode) {
+  let selection = chart.getSelection();
+  if (selection.length <= 0) return;
+  // Display a list of funktions with events at the given time.
+  let {row, column} = selection[0];
+  if (row === null|| column === null) return;
+  const kEntrySize = kUseDuration ? 2 : 1;
+  let [metric, description] = series[((column-1)/ kEntrySize) | 0];
+  let time = data.getValue(row, 0);
+  let funktions = script.getFunktionsAtTime(
+        time * kSecondsToMillis, kSelectionTimespan, metric);
+  let oldList = parentNode.querySelector('.funktion-list');
+  parentNode.replaceChild(
+      createFunktionList(metric, description, time, funktions), oldList);
+}
+
+function createFunktionList(metric, description, time, funktions) {
+  let container = createNode('div', 'funktion-list');
+  container.appendChild(h3('Changes of "' + description + '" at ' +
+        time + 's: ' + funktions.length));
+  let listNode = createNode('ul');
+  funktions.forEach(funktion => {
+    let node = createNode('li', 'funktion');
+    node.funktion = funktion;
+    node.appendChild(text(funktion.toString(false) + " "));
+    let script = funktion.script;
+    if (script) {
+      node.appendChild(a("#script" + script.id, "in script " + script.id));
+    }
+    listNode.appendChild(node);
+  });
+  container.appendChild(listNode);
+  return container;
+}
+</script>
+</head>
+
+<body>
+  <h1>BEHOLD, THIS IS PARSEROR!</h1>
+
+  <h2>Usage</h2>
+  Run your script with <code>--log-function-events</code> and upload <code>v8.log</code> on this page:<br/>
+  <code>/path/to/d8 --log-function-events your_script.js</code>
+
+  <h2>Data</h2>
+  <form name="fileForm">
+    <p>
+      <input id="uploadInput" type="file" name="files" onchange="loadFile();" accept=".log"> trace entries: <span id="count">0</span>
+    </p>
+  </form>
+
+
+  <h2>Scripts</h2>
+  <div id="scripts"></div>
+
+  <h2>Result</h2>
+  <div id="result"></div>
+</body>
+
+</html>
diff --git a/src/third_party/v8/tools/parse-processor.mjs b/src/third_party/v8/tools/parse-processor.mjs
new file mode 100644
index 0000000..f78c4c0
--- /dev/null
+++ b/src/third_party/v8/tools/parse-processor.mjs
@@ -0,0 +1,1144 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+import { LogReader, parseString } from "./logreader.mjs";
+import { BaseArgumentsProcessor } from "./arguments.mjs";
+
+/**
+ * A thin wrapper around shell's 'read' function showing a file name on error.
+ */
+export function readFile(fileName) {
+  try {
+    return read(fileName);
+  } catch (e) {
+    console.log(fileName + ': ' + (e.message || e));
+    throw e;
+  }
+}
+
+// ===========================================================================
+
+// This is the only true formatting, why? For an international audience the
+// confusion between the decimal and thousands separator is big (alternating
+// between comma "," vs dot "."). The Swiss formatting uses "'" as a thousands
+// separator, dropping most of that confusion.
+const numberFormat = new Intl.NumberFormat('de-CH', {
+  maximumFractionDigits: 2,
+  minimumFractionDigits: 2,
+});
+
+function formatNumber(value) {
+  return numberFormat.format(value);
+}
+
+function BYTES(bytes, total) {
+  let units = ['B ', 'kB', 'mB', 'gB'];
+  let unitIndex = 0;
+  let value = bytes;
+  while (value > 1000 && unitIndex < units.length) {
+    value /= 1000;
+    unitIndex++;
+  }
+  let result = formatNumber(value).padStart(10) + ' ' + units[unitIndex];
+  if (total !== void 0 && total != 0) {
+    result += PERCENT(bytes, total).padStart(5);
+  }
+  return result;
+}
+
+function PERCENT(value, total) {
+  return Math.round(value / total * 100) + "%";
+}
+
+// ===========================================================================
+const kNoTimeMetrics = {
+  __proto__: null,
+  executionDuration: 0,
+  firstEventTimestamp: 0,
+  firstParseEventTimestamp: 0,
+  lastParseEventTimestamp: 0,
+  lastEventTimestamp: 0
+};
+
+class CompilationUnit {
+  constructor() {
+    this.isEval = false;
+
+    // Lazily computed properties.
+    this.firstEventTimestamp = -1;
+    this.firstParseEventTimestamp = -1;
+    this.firstCompileEventTimestamp = -1;
+    this.lastParseEventTimestamp = -1;
+    this.lastEventTimestamp = -1;
+    this.deserializationTimestamp = -1;
+
+    this.preparseTimestamp = -1;
+    this.parseTimestamp = -1;
+    this.parse2Timestamp = -1;
+    this.resolutionTimestamp = -1;
+    this.compileTimestamp = -1;
+    this.lazyCompileTimestamp = -1;
+    this.executionTimestamp = -1;
+    this.optimizationTimestamp = -1;
+
+    this.deserializationDuration = -0.0;
+    this.preparseDuration = -0.0;
+    this.parseDuration = -0.0;
+    this.parse2Duration = -0.0;
+    this.resolutionDuration = -0.0;
+    this.scopeResolutionDuration = -0.0;
+    this.lazyCompileDuration = -0.0;
+    this.compileDuration = -0.0;
+    this.optimizeDuration = -0.0;
+
+    this.ownBytes = -1;
+    this.compilationCacheHits = [];
+  }
+
+  finalize() {
+    this.firstEventTimestamp = this.timestampMin(
+        this.deserializationTimestamp, this.parseTimestamp,
+        this.preparseTimestamp, this.resolutionTimestamp,
+        this.executionTimestamp);
+
+    this.firstParseEventTimestamp = this.timestampMin(
+        this.deserializationTimestamp, this.parseTimestamp,
+        this.preparseTimestamp, this.resolutionTimestamp);
+
+    this.firstCompileEventTimestamp = this.rawTimestampMin(
+        this.deserializationTimestamp, this.compileTimestamp,
+        this.lazyCompileTimestamp);
+    // Any excuted script needs to be compiled.
+    if (this.hasBeenExecuted() &&
+        (this.firstCompileEventTimestamp <= 0 ||
+         this.executionTimestamp < this.firstCompileTimestamp)) {
+      console.error('Compile < execution timestamp', this);
+    }
+
+    if (this.ownBytes < 0) console.error(this, 'Own bytes must be positive');
+  }
+
+  hasBeenExecuted() {
+    return this.executionTimestamp > 0;
+  }
+
+  addCompilationCacheHit(timestamp) {
+    this.compilationCacheHits.push(timestamp);
+  }
+
+  // Returns the smallest timestamp from the given list, ignoring
+  // uninitialized (-1) values.
+  rawTimestampMin(...timestamps) {
+    timestamps = timestamps.length == 1 ? timestamps[0] : timestamps;
+    let result = timestamps.reduce((min, item) => {
+      return item == -1 ? min : (min == -1 ? item : Math.min(item, item));
+    }, -1);
+    return result;
+  }
+  timestampMin(...timestamps) {
+    let result = this.rawTimestampMin(...timestamps);
+    if (Number.isNaN(result) || result < 0) {
+      console.error(
+          'Invalid timestamp min:', {result, timestamps, script: this});
+      return 0;
+    }
+    return result;
+  }
+
+  timestampMax(...timestamps) {
+    timestamps = timestamps.length == 1 ? timestamps[0] : timestamps;
+    let result = Math.max(...timestamps);
+    if (Number.isNaN(result) || result < 0) {
+      console.error(
+          'Invalid timestamp max:', {result, timestamps, script: this});
+      return 0;
+    }
+    return result;
+  }
+}
+
+// ===========================================================================
+class Script extends CompilationUnit {
+  constructor(id) {
+    super();
+    if (id === void 0 || id <= 0) {
+      throw new Error(`Invalid id=${id} for script`);
+    }
+    this.file = '';
+    this.id = id;
+
+    this.isNative = false;
+    this.isBackgroundCompiled = false;
+    this.isStreamingCompiled = false;
+
+    this.funktions = [];
+    this.metrics = new Map();
+    this.maxNestingLevel = 0;
+
+    this.width = 0;
+    this.bytesTotal = -1;
+    this.finalized = false;
+    this.summary = '';
+    this.source = '';
+  }
+
+  setFile(name) {
+    this.file = name;
+    this.isNative = name.startsWith('native ');
+  }
+
+  isEmpty() {
+    return this.funktions.length === 0;
+  }
+
+  getFunktionAtStartPosition(start) {
+    if (!this.isEval && start === 0) {
+      throw 'position 0 is reserved for the script';
+    }
+    if (this.finalized) {
+      return this.funktions.find(funktion => funktion.start == start);
+    }
+    return this.funktions[start];
+  }
+
+  // Return the innermost function at the given source position.
+  getFunktionForPosition(position) {
+    if (!this.finalized) throw 'Incomplete script';
+    for (let i = this.funktions.length - 1; i >= 0; i--) {
+      let funktion = this.funktions[i];
+      if (funktion.containsPosition(position)) return funktion;
+    }
+    return undefined;
+  }
+
+  addMissingFunktions(list) {
+    if (this.finalized) throw 'script is finalized!';
+    list.forEach(fn => {
+      if (this.funktions[fn.start] === void 0) {
+        this.addFunktion(fn);
+      }
+    });
+  }
+
+  addFunktion(fn) {
+    if (this.finalized) throw 'script is finalized!';
+    if (fn.start === void 0) throw "Funktion has no start position";
+    if (this.funktions[fn.start] !== void 0) {
+      fn.print();
+      throw "adding same function twice to script";
+    }
+    this.funktions[fn.start] = fn;
+  }
+
+  finalize() {
+    this.finalized = true;
+    // Compact funktions as we no longer need access via start byte position.
+    this.funktions = this.funktions.filter(each => true);
+    let parent = null;
+    let maxNesting = 0;
+    // Iterate over the Funktions in byte position order.
+    this.funktions.forEach(fn => {
+      fn.isEval = this.isEval;
+      if (parent === null) {
+        parent = fn;
+      } else {
+        // Walk up the nested chain of Funktions to find the parent.
+        while (parent !== null && !fn.isNestedIn(parent)) {
+          parent = parent.parent;
+        }
+        fn.parent = parent;
+        if (parent) {
+          maxNesting = Math.max(maxNesting, parent.addNestedFunktion(fn));
+        }
+        parent = fn;
+      }
+    });
+    // Sanity checks to ensure that scripts are executed and parsed before any
+    // of its funktions.
+    let funktionFirstParseEventTimestamp = -1;
+    // Second iteration step to finalize the funktions once the proper
+    // hierarchy has been set up.
+    this.funktions.forEach(fn => {
+      fn.finalize();
+
+      funktionFirstParseEventTimestamp = this.timestampMin(
+          funktionFirstParseEventTimestamp, fn.firstParseEventTimestamp);
+
+      this.lastParseEventTimestamp = this.timestampMax(
+          this.lastParseEventTimestamp, fn.lastParseEventTimestamp);
+
+      this.lastEventTimestamp =
+          this.timestampMax(this.lastEventTimestamp, fn.lastEventTimestamp);
+    });
+    this.maxNestingLevel = maxNesting;
+
+    // Initialize sizes.
+    if (!this.ownBytes === -1) throw 'Invalid state';
+    if (this.funktions.length == 0) {
+      this.bytesTotal = this.ownBytes = 0;
+      return;
+    }
+    let toplevelFunktionBytes = this.funktions.reduce(
+        (bytes, each) => bytes + (each.isToplevel() ? each.getBytes() : 0), 0);
+    if (this.isDeserialized || this.isEval || this.isStreamingCompiled) {
+      if (this.getBytes() === -1) {
+        this.bytesTotal = toplevelFunktionBytes;
+      }
+    }
+    this.ownBytes = this.bytesTotal - toplevelFunktionBytes;
+    // Initialize common properties.
+    super.finalize();
+    // Sanity checks after the minimum timestamps have been computed.
+    if (funktionFirstParseEventTimestamp < this.firstParseEventTimestamp) {
+      console.error(
+          'invalid firstCompileEventTimestamp', this,
+          funktionFirstParseEventTimestamp, this.firstParseEventTimestamp);
+    }
+  }
+
+  print() {
+    console.log(this.toString());
+  }
+
+  toString() {
+    let str = `SCRIPT id=${this.id} file=${this.file}\n` +
+      `functions[${this.funktions.length}]:`;
+    this.funktions.forEach(fn => str += fn.toString());
+    return str;
+  }
+
+  getBytes() {
+    return this.bytesTotal;
+  }
+
+  getOwnBytes() {
+    return this.ownBytes;
+  }
+
+  // Also see Funktion.prototype.getMetricBytes
+  getMetricBytes(name) {
+    if (name == 'lazyCompileTimestamp') return this.getOwnBytes();
+    return this.getOwnBytes();
+  }
+
+  getMetricDuration(name) {
+    return this[name];
+  }
+
+  forEach(fn) {
+    fn(this);
+    this.funktions.forEach(fn);
+  }
+
+  // Container helper for TotalScript / Script.
+  getScripts() {
+    return [this];
+  }
+
+  calculateMetrics(printSummary) {
+    let log = (str) => this.summary += str + '\n';
+    log(`SCRIPT: ${this.id}`);
+    let all = this.funktions;
+    if (all.length === 0) return;
+
+    let nofFunktions = all.length;
+    let ownBytesSum = list => {
+      return list.reduce((bytes, each) => bytes + each.getOwnBytes(), 0)
+    };
+
+    let info = (name, funktions) => {
+      let ownBytes = ownBytesSum(funktions);
+      let nofPercent = Math.round(funktions.length / nofFunktions * 100);
+      let value = (funktions.length + "").padStart(6) +
+        (nofPercent + "%").padStart(5) +
+        BYTES(ownBytes, this.bytesTotal).padStart(10);
+      log((`  - ${name}`).padEnd(20) + value);
+      this.metrics.set(name + "-bytes", ownBytes);
+      this.metrics.set(name + "-count", funktions.length);
+      this.metrics.set(name + "-count-percent", nofPercent);
+      this.metrics.set(name + "-bytes-percent",
+        Math.round(ownBytes / this.bytesTotal * 100));
+    };
+
+    log(`  - file:         ${this.file}`);
+    log('  - details:      ' +
+        'isEval=' + this.isEval + ' deserialized=' + this.isDeserialized +
+        ' streamed=' + this.isStreamingCompiled);
+    info("scripts", this.getScripts());
+    info("functions", all);
+    info("toplevel fn", all.filter(each => each.isToplevel()));
+    info('preparsed', all.filter(each => each.preparseDuration > 0));
+
+    info('fully parsed', all.filter(each => each.parseDuration > 0));
+    // info("fn parsed", all.filter(each => each.parse2Duration > 0));
+    // info("resolved", all.filter(each => each.resolutionDuration > 0));
+    info("executed", all.filter(each => each.executionTimestamp > 0));
+    info('forEval', all.filter(each => each.isEval));
+    info("lazy compiled", all.filter(each => each.lazyCompileTimestamp > 0));
+    info("eager compiled", all.filter(each => each.compileTimestamp > 0));
+
+    let parsingCost =
+        new ExecutionCost('parse', all, each => each.parseDuration);
+    parsingCost.setMetrics(this.metrics);
+    log(parsingCost.toString());
+
+    let preParsingCost =
+        new ExecutionCost('preparse', all, each => each.preparseDuration);
+    preParsingCost.setMetrics(this.metrics);
+    log(preParsingCost.toString());
+
+    let resolutionCost =
+        new ExecutionCost('resolution', all, each => each.resolutionDuration);
+    resolutionCost.setMetrics(this.metrics);
+    log(resolutionCost.toString());
+
+    let nesting = new NestingDistribution(all);
+    nesting.setMetrics(this.metrics);
+    log(nesting.toString());
+
+    if (printSummary) console.log(this.summary);
+  }
+
+  getAccumulatedTimeMetrics(
+      metrics, start, end, delta, cumulative = true, useDuration = false) {
+    // Returns an array of the following format:
+    // [ [start,         acc(metric0, start, start), acc(metric1, ...), ...],
+    //   [start+delta,   acc(metric0, start, start+delta), ...],
+    //   [start+delta*2, acc(metric0, start, start+delta*2), ...],
+    //   ...
+    // ]
+    if (end <= start) throw `Invalid ranges [${start},${end}]`;
+    const timespan = end - start;
+    const kSteps = Math.ceil(timespan / delta);
+    // To reduce the time spent iterating over the funktions of this script
+    // we iterate once over all funktions and add the metric changes to each
+    // timepoint:
+    // [ [0, 300, ...], [1,  15, ...], [2, 100, ...], [3,   0, ...] ... ]
+    // In a second step we accumulate all values:
+    // [ [0, 300, ...], [1, 315, ...], [2, 415, ...], [3, 415, ...] ... ]
+    //
+    // To limit the number of data points required in the resulting graphs,
+    // only the rows for entries with actual changes are created.
+
+    const metricProperties = ["time"];
+    metrics.forEach(each => {
+      metricProperties.push(each + 'Timestamp');
+      if (useDuration) metricProperties.push(each + 'Duration');
+    });
+    // Create a packed {rowTemplate} which is copied later-on.
+    let indexToTime = (t) => (start + t * delta) / kSecondsToMillis;
+    let rowTemplate = [indexToTime(0)];
+    for (let i = 1; i < metricProperties.length; i++) rowTemplate.push(0.0);
+    // Create rows with 0-time entry.
+    let rows = new Array(rowTemplate.slice());
+    for (let t = 1; t <= kSteps; t++) rows.push(null);
+    // Create the real metric's property name on the Funktion object.
+    // Add the increments of each Funktion's metric to the result.
+    this.forEach(funktionOrScript => {
+      // Iterate over the Funktion's metric names, skipping position 0 which
+      // is the time.
+      const kMetricIncrement = useDuration ? 2 : 1;
+      for (let i = 1; i < metricProperties.length; i += kMetricIncrement) {
+        let timestampPropertyName = metricProperties[i];
+        let timestamp = funktionOrScript[timestampPropertyName];
+        if (timestamp === void 0) continue;
+        if (timestamp < start || end < timestamp) continue;
+        timestamp -= start;
+        let index = Math.floor(timestamp / delta);
+        let row = rows[index];
+        if (row === null) {
+          // Add a new row if it didn't exist,
+          row = rows[index] = rowTemplate.slice();
+          // .. add the time offset.
+          row[0] = indexToTime(index);
+        }
+        // Add the metric value.
+        row[i] += funktionOrScript.getMetricBytes(timestampPropertyName);
+        if (!useDuration) continue;
+        let durationPropertyName = metricProperties[i + 1];
+        row[i + 1] += funktionOrScript.getMetricDuration(durationPropertyName);
+      }
+    });
+    // Create a packed array again with only the valid entries.
+    // Accumulate the incremental results by adding the metric values from
+    // the previous time window.
+    let previous = rows[0];
+    let result = [previous];
+    for (let t = 1; t < rows.length; t++) {
+      let current = rows[t];
+      if (current === null) {
+        // Ensure a zero data-point after each non-zero point.
+        if (!cumulative && rows[t - 1] !== null) {
+          let duplicate = rowTemplate.slice();
+          duplicate[0] = indexToTime(t);
+          result.push(duplicate);
+        }
+        continue;
+      }
+      if (cumulative) {
+        // Skip i==0 where the corresponding time value in seconds is.
+        for (let i = 1; i < metricProperties.length; i++) {
+          current[i] += previous[i];
+        }
+      }
+      // Make sure we have a data-point in time right before the current one.
+      if (rows[t - 1] === null) {
+        let duplicate = (!cumulative ? rowTemplate : previous).slice();
+        duplicate[0] = indexToTime(t - 1);
+        result.push(duplicate);
+      }
+      previous = current;
+      result.push(current);
+    }
+    // Make sure there is an entry at the last position to make sure all graphs
+    // have the same width.
+    const lastIndex = rows.length - 1;
+    if (rows[lastIndex] === null) {
+      let duplicate = previous.slice();
+      duplicate[0] = indexToTime(lastIndex);
+      result.push(duplicate);
+    }
+    return result;
+  }
+
+  getFunktionsAtTime(time, delta, metric) {
+    // Returns a list of Funktions whose metric changed in the
+    // [time-delta, time+delta] range.
+    return this.funktions.filter(
+      funktion => funktion.didMetricChange(time, delta, metric));
+    return result;
+  }
+}
+
+
+class TotalScript extends Script {
+  constructor() {
+    super('all files', 'all files');
+    this.scripts = [];
+  }
+
+  addAllFunktions(script) {
+    // funktions is indexed by byte offset and as such not packed. Add every
+    // Funktion one by one to keep this.funktions packed.
+    script.funktions.forEach(fn => this.funktions.push(fn));
+    this.scripts.push(script);
+    this.bytesTotal += script.bytesTotal;
+  }
+
+  // Iterate over all Scripts and nested Funktions.
+  forEach(fn) {
+    this.scripts.forEach(script => script.forEach(fn));
+  }
+
+  getScripts() {
+    return this.scripts;
+  }
+}
+
+
+// ===========================================================================
+
+class NestingDistribution {
+  constructor(funktions) {
+    // Stores the nof bytes per function nesting level.
+    this.accumulator = [0, 0, 0, 0, 0];
+    // Max nof bytes encountered at any nesting level.
+    this.max = 0;
+    // avg bytes per nesting level.
+    this.avg = 0;
+    this.totalBytes = 0;
+
+    funktions.forEach(each => each.accumulateNestingLevel(this.accumulator));
+    this.max = this.accumulator.reduce((max, each) => Math.max(max, each), 0);
+    this.totalBytes = this.accumulator.reduce((sum, each) => sum + each, 0);
+    for (let i = 0; i < this.accumulator.length; i++) {
+      this.avg += this.accumulator[i] * i;
+    }
+    this.avg /= this.totalBytes;
+  }
+
+  print() {
+    console.log(this.toString())
+  }
+
+  toString() {
+    let ticks = " ▁▂▃▄▅▆▇█";
+    let accString = this.accumulator.reduce((str, each) => {
+      let index = Math.round(each / this.max * (ticks.length - 1));
+      return str + ticks[index];
+    }, '');
+    let percent0 = this.accumulator[0]
+    let percent1 = this.accumulator[1];
+    let percent2plus = this.accumulator.slice(2)
+      .reduce((sum, each) => sum + each, 0);
+    return "  - nesting level:      " +
+      ' avg=' + formatNumber(this.avg) +
+      ' l0=' + PERCENT(percent0, this.totalBytes) +
+      ' l1=' + PERCENT(percent1, this.totalBytes) +
+      ' l2+=' + PERCENT(percent2plus, this.totalBytes) +
+      ' distribution=[' + accString + ']';
+
+  }
+
+  setMetrics(dict) {}
+}
+
+class ExecutionCost {
+  constructor(prefix, funktions, time_fn) {
+    this.prefix = prefix;
+    // Time spent on executed functions.
+    this.executedCost = 0
+    // Time spent on not executed functions.
+    this.nonExecutedCost = 0;
+
+    this.executedCost = funktions.reduce((sum, each) => {
+      return sum + (each.hasBeenExecuted() ? time_fn(each) : 0)
+    }, 0);
+    this.nonExecutedCost = funktions.reduce((sum, each) => {
+      return sum + (each.hasBeenExecuted() ? 0 : time_fn(each))
+    }, 0);
+
+  }
+
+  print() {
+    console.log(this.toString())
+  }
+
+  toString() {
+    return (`  - ${this.prefix}-time:`).padEnd(24) +
+      (` executed=${formatNumber(this.executedCost)}ms`).padEnd(20) +
+      " non-executed=" + formatNumber(this.nonExecutedCost) + 'ms';
+  }
+
+  setMetrics(dict) {
+    dict.set('parseMetric', this.executionCost);
+    dict.set('parseMetricNegative', this.nonExecutionCost);
+  }
+}
+
+// ===========================================================================
+
+class Funktion extends CompilationUnit {
+  constructor(name, start, end, script) {
+    super();
+    if (start < 0) throw `invalid start position: ${start}`;
+    if (script.isEval) {
+      if (end < start) throw 'invalid start end positions';
+    } else {
+      if (end <= 0) throw `invalid end position: ${end}`;
+      if (end <= start) throw 'invalid start end positions';
+    }
+
+    this.name = name;
+    this.start = start;
+    this.end = end;
+    this.script = script;
+    this.parent = null;
+    this.nested = [];
+    this.nestingLevel = 0;
+
+    if (script) this.script.addFunktion(this);
+  }
+
+  finalize() {
+    this.lastParseEventTimestamp = Math.max(
+        this.preparseTimestamp + this.preparseDuration,
+        this.parseTimestamp + this.parseDuration,
+        this.resolutionTimestamp + this.resolutionDuration);
+    if (!(this.lastParseEventTimestamp > 0)) this.lastParseEventTimestamp = 0;
+
+    this.lastEventTimestamp =
+        Math.max(this.lastParseEventTimestamp, this.executionTimestamp);
+    if (!(this.lastEventTimestamp > 0)) this.lastEventTimestamp = 0;
+
+    this.ownBytes = this.nested.reduce(
+        (bytes, each) => bytes - each.getBytes(), this.getBytes());
+
+    super.finalize();
+  }
+
+  getMetricBytes(name) {
+    if (name == 'lazyCompileTimestamp') return this.getOwnBytes();
+    return this.getOwnBytes();
+  }
+
+  getMetricDuration(name) {
+    if (name in kNoTimeMetrics) return 0;
+    return this[name];
+  }
+
+  isNestedIn(funktion) {
+    if (this.script != funktion.script) throw "Incompatible script";
+    return funktion.start < this.start && this.end <= funktion.end;
+  }
+
+  isToplevel() {
+    return this.parent === null;
+  }
+
+  containsPosition(position) {
+    return this.start <= position && position <= this.end;
+  }
+
+  accumulateNestingLevel(accumulator) {
+    let value = accumulator[this.nestingLevel] || 0;
+    accumulator[this.nestingLevel] = value + this.getOwnBytes();
+  }
+
+  addNestedFunktion(child) {
+    if (this.script != child.script) throw "Incompatible script";
+    if (child == null) throw "Nesting non child";
+    this.nested.push(child);
+    if (this.nested.length > 1) {
+      // Make sure the nested children don't overlap and have been inserted in
+      // byte start position order.
+      let last = this.nested[this.nested.length - 2];
+      if (last.end > child.start || last.start > child.start ||
+        last.end > child.end || last.start > child.end) {
+        throw "Wrongly nested child added";
+      }
+    }
+    child.nestingLevel = this.nestingLevel + 1;
+    return child.nestingLevel;
+  }
+
+  getBytes() {
+    return this.end - this.start;
+  }
+
+  getOwnBytes() {
+    return this.ownBytes;
+  }
+
+  didMetricChange(time, delta, name) {
+    let value = this[name + 'Timestamp'];
+    return (time - delta) <= value && value <= (time + delta);
+  }
+
+  print() {
+    console.log(this.toString());
+  }
+
+  toString(details = true) {
+    let result = `function${this.name ? ` ${this.name}` : ''}` +
+        `() range=${this.start}-${this.end}`;
+    if (details) result += ` script=${this.script ? this.script.id : 'X'}`;
+    return result;
+  }
+}
+
+
+// ===========================================================================
+
+export const kTimestampFactor = 1000;
+export const kSecondsToMillis = 1000;
+
+function toTimestamp(microseconds) {
+  return microseconds / kTimestampFactor
+}
+
+function startOf(timestamp, time) {
+  let result = toTimestamp(timestamp) - time;
+  if (result < 0) throw "start timestamp cannnot be negative";
+  return result;
+}
+
+
+export class ParseProcessor extends LogReader {
+  constructor() {
+    super();
+    this.dispatchTable_ = {
+      // Avoid accidental leaking of __proto__ properties and force this object
+      // to be in dictionary-mode.
+      __proto__: null,
+      // "function",{event type},
+      // {script id},{start position},{end position},{time},{timestamp},
+      // {function name}
+      'function': {
+        parsers: [
+          parseString, parseInt, parseInt, parseInt, parseFloat, parseInt,
+          parseString
+        ],
+        processor: this.processFunctionEvent
+      },
+      // "compilation-cache","hit"|"put",{type},{scriptid},{start position},
+      // {end position},{timestamp}
+      'compilation-cache': {
+        parsers:
+            [parseString, parseString, parseInt, parseInt, parseInt, parseInt],
+        processor: this.processCompilationCacheEvent
+      },
+      'script': {
+        parsers: [parseString, parseInt, parseInt],
+        processor: this.processScriptEvent
+      },
+      // "script-details", {script_id}, {file}, {line}, {column}, {size}
+      'script-details': {
+        parsers: [parseInt, parseString, parseInt, parseInt, parseInt],
+        processor: this.processScriptDetails
+      },
+      'script-source': {
+        parsers: [parseInt, parseString, parseString],
+        processor: this.processScriptSource
+      },
+    };
+    this.functionEventDispatchTable_ = {
+      // Avoid accidental leaking of __proto__ properties and force this object
+      // to be in dictionary-mode.
+      __proto__: null,
+      'full-parse': this.processFull.bind(this),
+      'parse-function': this.processParseFunction.bind(this),
+      // TODO(cbruni): make sure arrow functions emit a normal parse-function
+      // event.
+      'parse': this.processParseFunction.bind(this),
+      'parse-script': this.processParseScript.bind(this),
+      'parse-eval': this.processParseEval.bind(this),
+      'preparse-no-resolution': this.processPreparseNoResolution.bind(this),
+      'preparse-resolution': this.processPreparseResolution.bind(this),
+      'first-execution': this.processFirstExecution.bind(this),
+      'compile-lazy': this.processCompileLazy.bind(this),
+      'compile': this.processCompile.bind(this),
+      'compile-eval': this.processCompileEval.bind(this),
+      'optimize-lazy': this.processOptimizeLazy.bind(this),
+      'deserialize': this.processDeserialize.bind(this),
+    };
+
+    this.idToScript = new Map();
+    this.fileToScript = new Map();
+    this.nameToFunction = new Map();
+    this.scripts = [];
+    this.totalScript = new TotalScript();
+    this.firstEventTimestamp = -1;
+    this.lastParseEventTimestamp = -1;
+    this.lastEventTimestamp = -1;
+  }
+
+  print() {
+    console.log("scripts:");
+    this.idToScript.forEach(script => script.print());
+  }
+
+  processString(string) {
+    let end = string.length;
+    let current = 0;
+    let next = 0;
+    let line;
+    let i = 0;
+    let entry;
+    while (current < end) {
+      next = string.indexOf("\n", current);
+      if (next === -1) break;
+      i++;
+      line = string.substring(current, next);
+      current = next + 1;
+      this.processLogLine(line);
+    }
+    this.postProcess();
+  }
+
+  processLogFile(fileName) {
+    this.collectEntries = true
+    this.lastLogFileName_ = fileName;
+    let line;
+    while (line = readline()) {
+      this.processLogLine(line);
+    }
+    this.postProcess();
+  }
+
+  postProcess() {
+    this.scripts = Array.from(this.idToScript.values())
+      .filter(each => !each.isNative);
+
+    this.scripts.forEach(script => {
+      script.finalize();
+      script.calculateMetrics(false)
+    });
+
+    this.scripts.forEach(script => this.totalScript.addAllFunktions(script));
+    this.totalScript.calculateMetrics(true);
+
+    this.firstEventTimestamp = this.totalScript.timestampMin(
+        this.scripts.map(each => each.firstEventTimestamp));
+    this.lastParseEventTimestamp = this.totalScript.timestampMax(
+        this.scripts.map(each => each.lastParseEventTimestamp));
+    this.lastEventTimestamp = this.totalScript.timestampMax(
+        this.scripts.map(each => each.lastEventTimestamp));
+
+    const series = {
+      firstParseEvent: 'Any Parse Event',
+      parse: 'Parsing',
+      preparse: 'Preparsing',
+      resolution: 'Preparsing with Var. Resolution',
+      lazyCompile: 'Lazy Compilation',
+      compile: 'Eager Compilation',
+      execution: 'First Execution',
+    };
+    let metrics = Object.keys(series);
+    this.totalScript.getAccumulatedTimeMetrics(
+        metrics, 0, this.lastEventTimestamp, 10);
+  }
+
+  processFunctionEvent(
+      eventName, scriptId, startPosition, endPosition, duration, timestamp,
+      functionName) {
+    let handlerFn = this.functionEventDispatchTable_[eventName];
+    if (handlerFn === undefined) {
+      console.error(`Couldn't find handler for function event:${eventName}`);
+    }
+    handlerFn(
+        scriptId, startPosition, endPosition, duration, timestamp,
+        functionName);
+  }
+
+  addEntry(entry) {
+    this.entries.push(entry);
+  }
+
+  lookupScript(id) {
+    return this.idToScript.get(id);
+  }
+
+  getOrCreateFunction(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    if (scriptId == -1) {
+      return this.lookupFunktionByRange(startPosition, endPosition);
+    }
+    let script = this.lookupScript(scriptId);
+    let funktion = script.getFunktionAtStartPosition(startPosition);
+    if (funktion === void 0) {
+      funktion = new Funktion(functionName, startPosition, endPosition, script);
+    }
+    return funktion;
+  }
+
+  // Iterates over all functions and tries to find matching ones.
+  lookupFunktionsByRange(start, end) {
+    let results = [];
+    this.idToScript.forEach(script => {
+      script.forEach(funktion => {
+        if (funktion.startPostion == start && funktion.endPosition == end) {
+          results.push(funktion);
+        }
+      });
+    });
+    return results;
+  }
+  lookupFunktionByRange(start, end) {
+    let results = this.lookupFunktionsByRange(start, end);
+    if (results.length != 1) throw "Could not find unique function by range";
+    return results[0];
+  }
+
+  processScriptEvent(eventName, scriptId, timestamp) {
+    let script = this.idToScript.get(scriptId);
+    switch (eventName) {
+      case 'create':
+      case 'reserve-id':
+      case 'deserialize': {
+        if (script !== undefined) return;
+        script = new Script(scriptId);
+        this.idToScript.set(scriptId, script);
+        if (eventName == 'deserialize') {
+          script.deserializationTimestamp = toTimestamp(timestamp);
+        }
+        return;
+      }
+      case 'background-compile':
+        if (script.isBackgroundCompiled) {
+          throw 'Cannot background-compile twice';
+        }
+        script.isBackgroundCompiled = true;
+        // TODO(cbruni): remove once backwards compatibility is no longer needed.
+        script.isStreamingCompiled = true;
+        // TODO(cbruni): fix parse events for background compilation scripts
+        script.preparseTimestamp = toTimestamp(timestamp);
+        return;
+      case 'streaming-compile':
+        if (script.isStreamingCompiled) throw 'Cannot stream-compile twice';
+        // TODO(cbruni): remove once backwards compatibility is no longer needed.
+        script.isBackgroundCompiled = true;
+        script.isStreamingCompiled = true;
+        // TODO(cbruni): fix parse events for background compilation scripts
+        script.preparseTimestamp = toTimestamp(timestamp);
+        return;
+      default:
+        console.error(`Unhandled script event: ${eventName}`);
+    }
+  }
+
+  processScriptDetails(scriptId, file, startLine, startColumn, size) {
+    let script = this.lookupScript(scriptId);
+    script.setFile(file);
+  }
+
+  processScriptSource(scriptId, url, source) {
+    let script = this.lookupScript(scriptId);
+    script.source = source;
+  }
+
+  processParseEval(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    if (startPosition != 0 && startPosition != -1) {
+      console.error('Invalid start position for parse-eval', arguments);
+    }
+    let script = this.processParseScript(...arguments);
+    script.isEval = true;
+  }
+
+  processFull(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    if (startPosition == 0) {
+      // This should only happen for eval.
+      let script = this.lookupScript(scriptId);
+      script.isEval = true;
+      return;
+    }
+    let funktion = this.getOrCreateFunction(...arguments);
+    // TODO(cbruni): this should never happen, emit differen event from the
+    // parser.
+    if (funktion.parseTimestamp > 0) return;
+    funktion.parseTimestamp = startOf(timestamp, duration);
+    funktion.parseDuration = duration;
+  }
+
+  processParseFunction(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let funktion = this.getOrCreateFunction(...arguments);
+    funktion.parseTimestamp = startOf(timestamp, duration);
+    funktion.parseDuration = duration;
+  }
+
+  processParseScript(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    // TODO timestamp and duration
+    let script = this.lookupScript(scriptId);
+    let ts = startOf(timestamp, duration);
+    script.parseTimestamp = ts;
+    script.parseDuration = duration;
+    return script;
+  }
+
+  processPreparseResolution(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let funktion = this.getOrCreateFunction(...arguments);
+    // TODO(cbruni): this should never happen, emit different event from the
+    // parser.
+    if (funktion.resolutionTimestamp > 0) return;
+    funktion.resolutionTimestamp = startOf(timestamp, duration);
+    funktion.resolutionDuration = duration;
+  }
+
+  processPreparseNoResolution(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let funktion = this.getOrCreateFunction(...arguments);
+    funktion.preparseTimestamp = startOf(timestamp, duration);
+    funktion.preparseDuration = duration;
+  }
+
+  processFirstExecution(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let script = this.lookupScript(scriptId);
+    if (startPosition === 0) {
+      // undefined = eval fn execution
+      if (script) {
+        script.executionTimestamp = toTimestamp(timestamp);
+      }
+    } else {
+      let funktion = script.getFunktionAtStartPosition(startPosition);
+      if (funktion) {
+        funktion.executionTimestamp = toTimestamp(timestamp);
+      } else {
+        // TODO(cbruni): handle funktions from  compilation-cache hits.
+      }
+    }
+  }
+
+  processCompileLazy(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let funktion = this.getOrCreateFunction(...arguments);
+    funktion.lazyCompileTimestamp = startOf(timestamp, duration);
+    funktion.lazyCompileDuration = duration;
+  }
+
+  processCompile(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let script = this.lookupScript(scriptId);
+    if (startPosition === 0) {
+      script.compileTimestamp = startOf(timestamp, duration);
+      script.compileDuration = duration;
+      script.bytesTotal = endPosition;
+      return script;
+    } else {
+      let funktion = script.getFunktionAtStartPosition(startPosition);
+      if (funktion === undefined) {
+        // This should not happen since any funktion has to be parsed first.
+        console.error('processCompile funktion not found', ...arguments);
+        return;
+      }
+      funktion.compileTimestamp = startOf(timestamp, duration);
+      funktion.compileDuration = duration;
+      return funktion;
+    }
+  }
+
+  processCompileEval(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let compilationUnit = this.processCompile(...arguments);
+    compilationUnit.isEval = true;
+  }
+
+  processOptimizeLazy(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let compilationUnit = this.lookupScript(scriptId);
+    if (startPosition > 0) {
+      compilationUnit =
+          compilationUnit.getFunktionAtStartPosition(startPosition);
+      if (compilationUnit === undefined) {
+        // This should not happen since any funktion has to be parsed first.
+        console.error('processOptimizeLazy funktion not found', ...arguments);
+        return;
+      }
+    }
+    compilationUnit.optimizationTimestamp = startOf(timestamp, duration);
+    compilationUnit.optimizationDuration = duration;
+  }
+
+  processDeserialize(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let compilationUnit = this.lookupScript(scriptId);
+    if (startPosition === 0) {
+      compilationUnit.bytesTotal = endPosition;
+    } else {
+      compilationUnit = this.getOrCreateFunction(...arguments);
+    }
+    compilationUnit.deserializationTimestamp = startOf(timestamp, duration);
+    compilationUnit.deserializationDuration = duration;
+  }
+
+  processCompilationCacheEvent(
+      eventType, cacheType, scriptId, startPosition, endPosition, timestamp) {
+    if (eventType !== 'hit') return;
+    let compilationUnit = this.lookupScript(scriptId);
+    if (startPosition > 0) {
+      compilationUnit =
+          compilationUnit.getFunktionAtStartPosition(startPosition);
+    }
+    compilationUnit.addCompilationCacheHit(toTimestamp(timestamp));
+  }
+
+}
+
+
+export class ArgumentsProcessor extends BaseArgumentsProcessor {
+  getArgsDispatch() {
+    return {};
+  }
+
+  getDefaultResults() {
+    return {
+      logFileName: 'v8.log',
+      range: 'auto,auto',
+    };
+  }
+}
diff --git a/src/third_party/v8/tools/perf-compare.py b/src/third_party/v8/tools/perf-compare.py
new file mode 100755
index 0000000..744f6aa
--- /dev/null
+++ b/src/third_party/v8/tools/perf-compare.py
@@ -0,0 +1,486 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+'''
+python %prog
+
+Compare perf trybot JSON files and output the results into a pleasing HTML page.
+Examples:
+  %prog -t "ia32 results" Result,../result.json Master,/path-to/master.json -o results.html
+  %prog -t "x64 results" ../result.json master.json -o results.html
+'''
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from collections import OrderedDict
+import json
+import math
+from argparse import ArgumentParser
+import os
+import shutil
+import sys
+import tempfile
+
+PERCENT_CONSIDERED_SIGNIFICANT = 0.5
+PROBABILITY_CONSIDERED_SIGNIFICANT = 0.02
+PROBABILITY_CONSIDERED_MEANINGLESS = 0.05
+
+class Statistics:
+  @staticmethod
+  def Mean(values):
+    return float(sum(values)) / len(values)
+
+  @staticmethod
+  def Variance(values, average):
+    return map(lambda x: (x - average) ** 2, values)
+
+  @staticmethod
+  def StandardDeviation(values, average):
+    return math.sqrt(Statistics.Mean(Statistics.Variance(values, average)))
+
+  @staticmethod
+  def ComputeZ(baseline_avg, baseline_sigma, mean, n):
+    if baseline_sigma == 0:
+      return 1000.0;
+    return abs((mean - baseline_avg) / (baseline_sigma / math.sqrt(n)))
+
+  # Values from http://www.fourmilab.ch/rpkp/experiments/analysis/zCalc.html
+  @staticmethod
+  def ComputeProbability(z):
+    if z > 2.575829: # p 0.005: two sided < 0.01
+      return 0
+    if z > 2.326348: # p 0.010
+      return 0.01
+    if z > 2.170091: # p 0.015
+      return 0.02
+    if z > 2.053749: # p 0.020
+      return 0.03
+    if z > 1.959964: # p 0.025: two sided < 0.05
+      return 0.04
+    if z > 1.880793: # p 0.030
+      return 0.05
+    if z > 1.811910: # p 0.035
+      return 0.06
+    if z > 1.750686: # p 0.040
+      return 0.07
+    if z > 1.695397: # p 0.045
+      return 0.08
+    if z > 1.644853: # p 0.050: two sided < 0.10
+      return 0.09
+    if z > 1.281551: # p 0.100: two sided < 0.20
+      return 0.10
+    return 0.20 # two sided p >= 0.20
+
+
+class ResultsDiff:
+  def __init__(self, significant, notable, percentage_string):
+    self.significant_ = significant
+    self.notable_ = notable
+    self.percentage_string_ = percentage_string
+
+  def percentage_string(self):
+    return self.percentage_string_;
+
+  def isSignificant(self):
+    return self.significant_
+
+  def isNotablyPositive(self):
+    return self.notable_ > 0
+
+  def isNotablyNegative(self):
+    return self.notable_ < 0
+
+
+class BenchmarkResult:
+  def __init__(self, units, count, result, sigma):
+    self.units_ = units
+    self.count_ = float(count)
+    self.result_ = float(result)
+    self.sigma_ = float(sigma)
+
+  def Compare(self, other):
+    if self.units_ != other.units_:
+      print ("Incompatible units: %s and %s" % (self.units_, other.units_))
+      sys.exit(1)
+
+    significant = False
+    notable = 0
+    percentage_string = ""
+    # compute notability and significance.
+    if self.units_ == "score":
+      compare_num = 100*self.result_/other.result_ - 100
+    else:
+      compare_num = 100*other.result_/self.result_ - 100
+    if abs(compare_num) > 0.1:
+      percentage_string = "%3.1f" % (compare_num)
+      z = Statistics.ComputeZ(other.result_, other.sigma_,
+                              self.result_, self.count_)
+      p = Statistics.ComputeProbability(z)
+      if p < PROBABILITY_CONSIDERED_SIGNIFICANT:
+        significant = True
+      if compare_num >= PERCENT_CONSIDERED_SIGNIFICANT:
+        notable = 1
+      elif compare_num <= -PERCENT_CONSIDERED_SIGNIFICANT:
+        notable = -1
+    return ResultsDiff(significant, notable, percentage_string)
+
+  def result(self):
+    return self.result_
+
+  def sigma(self):
+    return self.sigma_
+
+
+class Benchmark:
+  def __init__(self, name):
+    self.name_ = name
+    self.runs_ = {}
+
+  def name(self):
+    return self.name_
+
+  def getResult(self, run_name):
+    return self.runs_.get(run_name)
+
+  def appendResult(self, run_name, trace):
+    values = map(float, trace['results'])
+    count = len(values)
+    mean = Statistics.Mean(values)
+    stddev = float(trace.get('stddev') or
+                   Statistics.StandardDeviation(values, mean))
+    units = trace["units"]
+    # print run_name, units, count, mean, stddev
+    self.runs_[run_name] = BenchmarkResult(units, count, mean, stddev)
+
+
+class BenchmarkSuite:
+  def __init__(self, name):
+    self.name_ = name
+    self.benchmarks_ = {}
+
+  def SortedTestKeys(self):
+    keys = self.benchmarks_.keys()
+    keys.sort()
+    t = "Total"
+    if t in keys:
+      keys.remove(t)
+      keys.append(t)
+    return keys
+
+  def name(self):
+    return self.name_
+
+  def getBenchmark(self, benchmark_name):
+    benchmark_object = self.benchmarks_.get(benchmark_name)
+    if benchmark_object == None:
+      benchmark_object = Benchmark(benchmark_name)
+      self.benchmarks_[benchmark_name] = benchmark_object
+    return benchmark_object
+
+
+class ResultTableRenderer:
+  def __init__(self, output_file):
+    self.benchmarks_ = []
+    self.print_output_ = []
+    self.output_file_ = output_file
+
+  def Print(self, str_data):
+    self.print_output_.append(str_data)
+
+  def FlushOutput(self):
+    string_data = "\n".join(self.print_output_)
+    print_output = []
+    if self.output_file_:
+      # create a file
+      with open(self.output_file_, "w") as text_file:
+        text_file.write(string_data)
+    else:
+      print(string_data)
+
+  def bold(self, data):
+    return "<b>%s</b>" % data
+
+  def red(self, data):
+    return "<font color=\"red\">%s</font>" % data
+
+
+  def green(self, data):
+    return "<font color=\"green\">%s</font>" % data
+
+  def PrintHeader(self):
+    data = """<html>
+<head>
+<title>Output</title>
+<style type="text/css">
+/*
+Style inspired by Andy Ferra's gist at https://gist.github.com/andyferra/2554919
+*/
+body {
+  font-family: Helvetica, arial, sans-serif;
+  font-size: 14px;
+  line-height: 1.6;
+  padding-top: 10px;
+  padding-bottom: 10px;
+  background-color: white;
+  padding: 30px;
+}
+h1, h2, h3, h4, h5, h6 {
+  margin: 20px 0 10px;
+  padding: 0;
+  font-weight: bold;
+  -webkit-font-smoothing: antialiased;
+  cursor: text;
+  position: relative;
+}
+h1 {
+  font-size: 28px;
+  color: black;
+}
+
+h2 {
+  font-size: 24px;
+  border-bottom: 1px solid #cccccc;
+  color: black;
+}
+
+h3 {
+  font-size: 18px;
+}
+
+h4 {
+  font-size: 16px;
+}
+
+h5 {
+  font-size: 14px;
+}
+
+h6 {
+  color: #777777;
+  font-size: 14px;
+}
+
+p, blockquote, ul, ol, dl, li, table, pre {
+  margin: 15px 0;
+}
+
+li p.first {
+  display: inline-block;
+}
+
+ul, ol {
+  padding-left: 30px;
+}
+
+ul :first-child, ol :first-child {
+  margin-top: 0;
+}
+
+ul :last-child, ol :last-child {
+  margin-bottom: 0;
+}
+
+table {
+  padding: 0;
+}
+
+table tr {
+  border-top: 1px solid #cccccc;
+  background-color: white;
+  margin: 0;
+  padding: 0;
+}
+
+table tr:nth-child(2n) {
+  background-color: #f8f8f8;
+}
+
+table tr th {
+  font-weight: bold;
+  border: 1px solid #cccccc;
+  text-align: left;
+  margin: 0;
+  padding: 6px 13px;
+}
+table tr td {
+  border: 1px solid #cccccc;
+  text-align: right;
+  margin: 0;
+  padding: 6px 13px;
+}
+table tr td.name-column {
+  text-align: left;
+}
+table tr th :first-child, table tr td :first-child {
+  margin-top: 0;
+}
+table tr th :last-child, table tr td :last-child {
+  margin-bottom: 0;
+}
+</style>
+</head>
+<body>
+"""
+    self.Print(data)
+
+  def StartSuite(self, suite_name, run_names):
+    self.Print("<h2>")
+    self.Print("<a name=\"%s\">%s</a> <a href=\"#top\">(top)</a>" %
+               (suite_name, suite_name))
+    self.Print("</h2>");
+    self.Print("<table class=\"benchmark\">")
+    self.Print("<thead>")
+    self.Print("  <th>Test</th>")
+    main_run = None
+    for run_name in run_names:
+      self.Print("  <th>%s</th>" % run_name)
+      if main_run == None:
+        main_run = run_name
+      else:
+        self.Print("  <th>%</th>")
+    self.Print("</thead>")
+    self.Print("<tbody>")
+
+
+  def FinishSuite(self):
+    self.Print("</tbody>")
+    self.Print("</table>")
+
+
+  def StartBenchmark(self, benchmark_name):
+    self.Print("  <tr>")
+    self.Print("    <td class=\"name-column\">%s</td>" % benchmark_name)
+
+  def FinishBenchmark(self):
+    self.Print("  </tr>")
+
+
+  def PrintResult(self, run):
+    if run == None:
+      self.PrintEmptyCell()
+      return
+    self.Print("    <td>%3.1f</td>" % run.result())
+
+
+  def PrintComparison(self, run, main_run):
+    if run == None or main_run == None:
+      self.PrintEmptyCell()
+      return
+    diff = run.Compare(main_run)
+    res = diff.percentage_string()
+    if diff.isSignificant():
+      res = self.bold(res)
+    if diff.isNotablyPositive():
+      res = self.green(res)
+    elif diff.isNotablyNegative():
+      res = self.red(res)
+    self.Print("    <td>%s</td>" % res)
+
+
+  def PrintEmptyCell(self):
+    self.Print("    <td></td>")
+
+
+  def StartTOC(self, title):
+    self.Print("<h1>%s</h1>" % title)
+    self.Print("<ul>")
+
+  def FinishTOC(self):
+    self.Print("</ul>")
+
+  def PrintBenchmarkLink(self, benchmark):
+    self.Print("<li><a href=\"#" + benchmark + "\">" + benchmark + "</a></li>")
+
+  def PrintFooter(self):
+    data = """</body>
+</html>
+"""
+    self.Print(data)
+
+
+def Render(args):
+  benchmark_suites = {}
+  run_names = OrderedDict()
+
+  for json_file_list in args.json_file_list:
+    run_name = json_file_list[0]
+    if run_name.endswith(".json"):
+      # The first item in the list is also a file name
+      run_name = os.path.splitext(run_name)[0]
+      filenames = json_file_list
+    else:
+      filenames = json_file_list[1:]
+
+    for filename in filenames:
+      print ("Processing result set \"%s\", file: %s" % (run_name, filename))
+      with open(filename) as json_data:
+        data = json.load(json_data)
+
+      run_names[run_name] = 0
+
+      for error in data["errors"]:
+        print("Error:", error)
+
+      for trace in data["traces"]:
+        suite_name = trace["graphs"][0]
+        benchmark_name = "/".join(trace["graphs"][1:])
+
+        benchmark_suite_object = benchmark_suites.get(suite_name)
+        if benchmark_suite_object == None:
+          benchmark_suite_object = BenchmarkSuite(suite_name)
+          benchmark_suites[suite_name] = benchmark_suite_object
+
+        benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name)
+        benchmark_object.appendResult(run_name, trace);
+
+
+  renderer = ResultTableRenderer(args.output)
+  renderer.PrintHeader()
+
+  title = args.title or "Benchmark results"
+  renderer.StartTOC(title)
+  for suite_name, benchmark_suite_object in sorted(benchmark_suites.iteritems()):
+    renderer.PrintBenchmarkLink(suite_name)
+  renderer.FinishTOC()
+
+  for suite_name, benchmark_suite_object in sorted(benchmark_suites.iteritems()):
+    renderer.StartSuite(suite_name, run_names)
+    for benchmark_name in benchmark_suite_object.SortedTestKeys():
+      benchmark_object = benchmark_suite_object.getBenchmark(benchmark_name)
+      # print suite_name, benchmark_object.name()
+
+      renderer.StartBenchmark(benchmark_name)
+      main_run = None
+      main_result = None
+      for run_name in run_names:
+        result = benchmark_object.getResult(run_name)
+        renderer.PrintResult(result)
+        if main_run == None:
+          main_run = run_name
+          main_result = result
+        else:
+          renderer.PrintComparison(result, main_result)
+      renderer.FinishBenchmark()
+    renderer.FinishSuite()
+
+  renderer.PrintFooter()
+  renderer.FlushOutput()
+
+def CommaSeparatedList(arg):
+  return [x for x in arg.split(',')]
+
+if __name__ == '__main__':
+  parser = ArgumentParser(description="Compare perf trybot JSON files and " +
+                          "output the results into a pleasing HTML page.")
+  parser.add_argument("-t", "--title", dest="title",
+                      help="Optional title of the web page")
+  parser.add_argument("-o", "--output", dest="output",
+                      help="Write html output to this file rather than stdout")
+  parser.add_argument("json_file_list", nargs="+", type=CommaSeparatedList,
+                      help="[column name,]./path-to/result.json - a comma-separated" +
+                      " list of optional column name and paths to json files")
+
+  args = parser.parse_args()
+  Render(args)
diff --git a/src/third_party/v8/tools/perf/statistics-for-json.R b/src/third_party/v8/tools/perf/statistics-for-json.R
new file mode 100644
index 0000000..b731ccc
--- /dev/null
+++ b/src/third_party/v8/tools/perf/statistics-for-json.R
@@ -0,0 +1,113 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Do statistical tests on benchmark results
+# This script requires the libraries rjson, R.utils, ggplot2 and data.table
+# Install them prior to running
+
+# To use the script, first get some benchmark results, for example via
+# tools/run_perf.py ../v8-perf/benchmarks/Octane2.1/Octane2.1-TF.json
+#  --outdir=out/x64.release-on --outdir-secondary=out/x64.release-off
+# --json-test-results=results-on.json
+# --json-test-results-secondary=results-off.json
+# then run this script
+# Rscript statistics-for-json.R results-on.json results-off.json ~/SVG
+# to produce graphs (and get stdio output of statistical tests).
+
+
+suppressMessages(library("rjson"))       # for fromJson
+suppressMessages(library("R.utils"))     # for printf
+suppressMessages(library("ggplot2"))     # for plotting
+suppressMessages(library("data.table"))  # less broken than data.frame
+
+# Clear all variables from environment
+rm(list=ls())
+
+args <- commandArgs(TRUE)
+if (length(args) != 3) {
+  printf(paste("usage: Rscript %%this_script patched-results.json",
+               "unpatched-results.json\n"))
+} else {
+  patch <- fromJSON(file=args[1])
+  nopatch <- fromJSON(file=args[2])
+  outputPath <- args[3]
+  df <- data.table(L = numeric(), R = numeric(), E = numeric(), 
+                   p.value = numeric(), yL = character(), 
+                   p.value.sig = logical())
+  
+  for (i in seq(1, length(patch$traces))) {
+    testName <- patch$traces[[i]]$graphs[[2]]
+    printf("%s\n", testName)
+    
+    nopatch_res <- as.integer(nopatch$traces[[i]]$results)
+    patch_res <- as.integer(patch$traces[[i]]$results)
+    if (length(nopatch_res) > 0) {
+      patch_norm <- shapiro.test(patch_res);
+      nopatch_norm <- shapiro.test(nopatch_res);
+
+      # Shaprio-Wilk test indicates whether data is not likely to 
+      # come from a normal distribution. The p-value is the probability
+      # to obtain the sample from a normal distribution. This means, the
+      # smaller p, the more likely the sample was not drawn from a normal
+      # distribution. See [wikipedia:Shapiro-Wilk-Test].
+      printf("  Patched scores look %s distributed (W=%.4f, p=%.4f)\n", 
+             ifelse(patch_norm$p.value < 0.05, "not normally", "normally"), 
+             patch_norm$statistic, patch_norm$p.value);
+      printf("  Unpatched scores look %s distributed (W=%.4f, p=%.4f)\n", 
+             ifelse(nopatch_norm$p.value < 0.05, "not normally", "normally"), 
+             nopatch_norm$statistic, nopatch_norm$p.value);
+      
+      hist <- ggplot(data=data.frame(x=as.integer(patch_res)), aes(x)) +
+        theme_bw() + 
+        geom_histogram(bins=50) +
+        ylab("Points") +
+        xlab(patch$traces[[i]]$graphs[[2]])
+      ggsave(filename=sprintf("%s/%s.svg", outputPath, testName), 
+             plot=hist, width=7, height=7)
+      
+      hist <- ggplot(data=data.frame(x=as.integer(nopatch_res)), aes(x)) +
+        theme_bw() + 
+        geom_histogram(bins=50) +
+        ylab("Points") +
+        xlab(patch$traces[[i]]$graphs[[2]])
+      ggsave(filename=sprintf("%s/%s-before.svg", outputPath, testName), 
+             plot=hist, width=7, height=7)
+      
+      # The Wilcoxon rank-sum test 
+      mww <- wilcox.test(patch_res, nopatch_res, conf.int = TRUE, exact=TRUE)
+      printf(paste("  Wilcoxon U-test W=%.4f, p=%.4f,",
+                   "confidence interval [%.1f, %.1f],",
+                   "est. effect size %.1f \n"),
+                   mww$statistic, mww$p.value,
+                   mww$conf.int[1], mww$conf.int[2], mww$estimate);
+      df <-rbind(df, list(mww$conf.int[1], mww$conf.int[2], 
+                          unname(mww$estimate), unname(mww$p.value),
+                          testName, ifelse(mww$p.value < 0.05, TRUE, FALSE)))
+      # t-test
+      t <- t.test(patch_res, nopatch_res, paired=FALSE)
+      printf(paste("  Welch t-test t=%.4f, df = %.2f, p=%.4f,",
+                   "confidence interval [%.1f, %.1f], mean diff %.1f \n"),
+             t$statistic, t$parameter, t$p.value, 
+             t$conf.int[1], t$conf.int[2], t$estimate[1]-t$estimate[2]);
+    }
+  }
+  df2 <- cbind(x=1:nrow(df), df[order(E),])
+  speedup <- ggplot(df2, aes(x = x, y = E, colour=p.value.sig)) +
+    geom_errorbar(aes(ymax = L, ymin = R), colour="black") +
+    geom_point(size = 4) +
+    scale_x_discrete(limits=df2$yL,
+                       name=paste("Benchmark, n=", length(patch_res))) +
+    theme_bw() +
+    geom_hline(yintercept = 0) +
+    ylab("Est. Effect Size in Points") +
+    theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust=0.5)) +
+    theme(legend.position = "bottom") +
+    scale_colour_manual(name="Statistical Significance (MWW, p < 0.05)",
+                          values=c("red", "green"),
+                          labels=c("not significant", "significant")) +
+    theme(legend.justification=c(0,1), legend.position=c(0,1))
+  print(speedup)
+  ggsave(filename=sprintf("%s/speedup-estimates.svg", outputPath), 
+         plot=speedup, width=7, height=7)
+}
diff --git a/src/third_party/v8/tools/predictable_wrapper.py b/src/third_party/v8/tools/predictable_wrapper.py
new file mode 100644
index 0000000..ad5adf7
--- /dev/null
+++ b/src/third_party/v8/tools/predictable_wrapper.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Wrapper script for verify-predictable mode. D8 is expected to be compiled with
+v8_enable_verify_predictable.
+
+The actual test command is expected to be passed to this wraper as is. E.g.:
+predictable_wrapper.py path/to/d8 --test --predictable --flag1 --flag2
+
+The command is run up to three times and the printed allocation hash is
+compared. Differences are reported as errors.
+"""
+
+
+# for py2/py3 compatibility
+from __future__ import absolute_import
+from __future__ import print_function
+
+import sys
+
+from testrunner.local import command
+from testrunner.local import utils
+
+
+MAX_TRIES = 3
+TIMEOUT = 120
+
+# Predictable mode works only when run on the host os.
+command.setup(utils.GuessOS(), None)
+
+def main(args):
+  def allocation_str(stdout):
+    for line in reversed((stdout or '').splitlines()):
+      if line.startswith('### Allocations = '):
+        return line
+    return None
+
+  cmd = command.Command(
+      args[0], args[1:], timeout=TIMEOUT, handle_sigterm=True)
+
+  previous_allocations = None
+  for run in range(1, MAX_TRIES + 1):
+    print('### Predictable run #%d' % run)
+    output = cmd.execute()
+    if output.stdout:
+      print('### Stdout:')
+      print(output.stdout)
+    if output.stderr:
+      print('### Stderr:')
+      print(output.stderr)
+    print('### Return code: %s' % output.exit_code)
+    if output.HasTimedOut():
+      # If we get a timeout in any run, we are in an unpredictable state. Just
+      # report it as a failure and don't rerun.
+      print('### Test timed out')
+      return 1
+    allocations = allocation_str(output.stdout)
+    if not allocations:
+      print ('### Test had no allocation output. Ensure this is built '
+             'with v8_enable_verify_predictable and that '
+             '--verify-predictable is passed at the cmd line.')
+      return 2
+    if previous_allocations and previous_allocations != allocations:
+      print('### Allocations differ')
+      return 3
+    if run >= MAX_TRIES:
+      # No difference on the last run -> report a success.
+      return 0
+    previous_allocations = allocations
+  # Unreachable.
+  assert False
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv[1:]))
diff --git a/src/third_party/v8/tools/profile.js b/src/third_party/v8/tools/profile.js
new file mode 100644
index 0000000..5007682
--- /dev/null
+++ b/src/third_party/v8/tools/profile.js
@@ -0,0 +1,1172 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// TODO: move to separate modules
+class SourcePosition {
+  constructor(script, line, column) {
+    this.script = script;
+    this.line = line;
+    this.column = column;
+    this.entries = [];
+  }
+  addEntry(entry) {
+    this.entries.push(entry);
+  }
+}
+
+class Script {
+
+  constructor(id, name, source) {
+    this.id = id;
+    this.name = name;
+    this.source = source;
+    this.sourcePositions = [];
+    // Map<line, Map<column, SourcePosition>>
+    this.lineToColumn = new Map();
+  }
+
+  addSourcePosition(line, column, entry) {
+    let sourcePosition = this.lineToColumn.get(line)?.get(column);
+    if (sourcePosition === undefined) {
+      sourcePosition = new SourcePosition(this, line, column, )
+      this.#addSourcePosition(line, column, sourcePosition);
+    }
+    sourcePosition.addEntry(entry);
+    return sourcePosition;
+  }
+
+  #addSourcePosition(line, column, sourcePosition) {
+    let columnToSourcePosition;
+    if (this.lineToColumn.has(line)) {
+      columnToSourcePosition = this.lineToColumn.get(line);
+    } else {
+      columnToSourcePosition = new Map();
+      this.lineToColumn.set(line, columnToSourcePosition);
+    }
+    this.sourcePositions.push(sourcePosition);
+    columnToSourcePosition.set(column, sourcePosition);
+  }
+}
+
+/**
+ * Creates a profile object for processing profiling-related events
+ * and calculating function execution times.
+ *
+ * @constructor
+ */
+function Profile() {
+  this.codeMap_ = new CodeMap();
+  this.topDownTree_ = new CallTree();
+  this.bottomUpTree_ = new CallTree();
+  this.c_entries_ = {};
+  this.ticks_ = [];
+  this.scripts_ = [];
+  this.urlToScript_ = new Map();
+};
+
+
+/**
+ * Returns whether a function with the specified name must be skipped.
+ * Should be overriden by subclasses.
+ *
+ * @param {string} name Function name.
+ */
+Profile.prototype.skipThisFunction = function (name) {
+  return false;
+};
+
+
+/**
+ * Enum for profiler operations that involve looking up existing
+ * code entries.
+ *
+ * @enum {number}
+ */
+Profile.Operation = {
+  MOVE: 0,
+  DELETE: 1,
+  TICK: 2
+};
+
+
+/**
+ * Enum for code state regarding its dynamic optimization.
+ *
+ * @enum {number}
+ */
+Profile.CodeState = {
+  COMPILED: 0,
+  OPTIMIZABLE: 1,
+  OPTIMIZED: 2
+};
+
+
+/**
+ * Called whenever the specified operation has failed finding a function
+ * containing the specified address. Should be overriden by subclasses.
+ * See the Profile.Operation enum for the list of
+ * possible operations.
+ *
+ * @param {number} operation Operation.
+ * @param {number} addr Address of the unknown code.
+ * @param {number} opt_stackPos If an unknown address is encountered
+ *     during stack strace processing, specifies a position of the frame
+ *     containing the address.
+ */
+Profile.prototype.handleUnknownCode = function (
+  operation, addr, opt_stackPos) {
+};
+
+
+/**
+ * Registers a library.
+ *
+ * @param {string} name Code entry name.
+ * @param {number} startAddr Starting address.
+ * @param {number} endAddr Ending address.
+ */
+Profile.prototype.addLibrary = function (
+  name, startAddr, endAddr) {
+  var entry = new CodeMap.CodeEntry(
+    endAddr - startAddr, name, 'SHARED_LIB');
+  this.codeMap_.addLibrary(startAddr, entry);
+  return entry;
+};
+
+
+/**
+ * Registers statically compiled code entry.
+ *
+ * @param {string} name Code entry name.
+ * @param {number} startAddr Starting address.
+ * @param {number} endAddr Ending address.
+ */
+Profile.prototype.addStaticCode = function (
+  name, startAddr, endAddr) {
+  var entry = new CodeMap.CodeEntry(
+    endAddr - startAddr, name, 'CPP');
+  this.codeMap_.addStaticCode(startAddr, entry);
+  return entry;
+};
+
+
+/**
+ * Registers dynamic (JIT-compiled) code entry.
+ *
+ * @param {string} type Code entry type.
+ * @param {string} name Code entry name.
+ * @param {number} start Starting address.
+ * @param {number} size Code entry size.
+ */
+Profile.prototype.addCode = function (
+  type, name, timestamp, start, size) {
+  var entry = new Profile.DynamicCodeEntry(size, type, name);
+  this.codeMap_.addCode(start, entry);
+  return entry;
+};
+
+
+/**
+ * Registers dynamic (JIT-compiled) code entry.
+ *
+ * @param {string} type Code entry type.
+ * @param {string} name Code entry name.
+ * @param {number} start Starting address.
+ * @param {number} size Code entry size.
+ * @param {number} funcAddr Shared function object address.
+ * @param {Profile.CodeState} state Optimization state.
+ */
+Profile.prototype.addFuncCode = function (
+  type, name, timestamp, start, size, funcAddr, state) {
+  // As code and functions are in the same address space,
+  // it is safe to put them in a single code map.
+  var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
+  if (!func) {
+    func = new Profile.FunctionEntry(name);
+    this.codeMap_.addCode(funcAddr, func);
+  } else if (func.name !== name) {
+    // Function object has been overwritten with a new one.
+    func.name = name;
+  }
+  var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
+  if (entry) {
+    if (entry.size === size && entry.func === func) {
+      // Entry state has changed.
+      entry.state = state;
+    } else {
+      this.codeMap_.deleteCode(start);
+      entry = null;
+    }
+  }
+  if (!entry) {
+    entry = new Profile.DynamicFuncCodeEntry(size, type, func, state);
+    this.codeMap_.addCode(start, entry);
+  }
+  return entry;
+};
+
+
+/**
+ * Reports about moving of a dynamic code entry.
+ *
+ * @param {number} from Current code entry address.
+ * @param {number} to New code entry address.
+ */
+Profile.prototype.moveCode = function (from, to) {
+  try {
+    this.codeMap_.moveCode(from, to);
+  } catch (e) {
+    this.handleUnknownCode(Profile.Operation.MOVE, from);
+  }
+};
+
+Profile.prototype.deoptCode = function (
+  timestamp, code, inliningId, scriptOffset, bailoutType,
+  sourcePositionText, deoptReasonText) {
+};
+
+/**
+ * Reports about deletion of a dynamic code entry.
+ *
+ * @param {number} start Starting address.
+ */
+Profile.prototype.deleteCode = function (start) {
+  try {
+    this.codeMap_.deleteCode(start);
+  } catch (e) {
+    this.handleUnknownCode(Profile.Operation.DELETE, start);
+  }
+};
+
+/**
+ * Adds source positions for given code.
+ */
+Profile.prototype.addSourcePositions = function (
+  start, script, startPos, endPos, sourcePositions, inliningPositions,
+  inlinedFunctions) {
+  // CLI does not need source code => ignore.
+};
+
+/**
+ * Adds script source code.
+ */
+Profile.prototype.addScriptSource = function (id, url, source) {
+  const script = new Script(id, url, source);
+  this.scripts_[id] = script;
+  this.urlToScript_.set(url, script);
+};
+
+
+/**
+ * Adds script source code.
+ */
+Profile.prototype.getScript = function (url) {
+  return this.urlToScript_.get(url);
+};
+
+/**
+ * Reports about moving of a dynamic code entry.
+ *
+ * @param {number} from Current code entry address.
+ * @param {number} to New code entry address.
+ */
+Profile.prototype.moveFunc = function (from, to) {
+  if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
+    this.codeMap_.moveCode(from, to);
+  }
+};
+
+
+/**
+ * Retrieves a code entry by an address.
+ *
+ * @param {number} addr Entry address.
+ */
+Profile.prototype.findEntry = function (addr) {
+  return this.codeMap_.findEntry(addr);
+};
+
+
+/**
+ * Records a tick event. Stack must contain a sequence of
+ * addresses starting with the program counter value.
+ *
+ * @param {Array<number>} stack Stack sample.
+ */
+Profile.prototype.recordTick = function (time_ns, vmState, stack) {
+  var processedStack = this.resolveAndFilterFuncs_(stack);
+  this.bottomUpTree_.addPath(processedStack);
+  processedStack.reverse();
+  this.topDownTree_.addPath(processedStack);
+};
+
+
+/**
+ * Translates addresses into function names and filters unneeded
+ * functions.
+ *
+ * @param {Array<number>} stack Stack sample.
+ */
+Profile.prototype.resolveAndFilterFuncs_ = function (stack) {
+  var result = [];
+  var last_seen_c_function = '';
+  var look_for_first_c_function = false;
+  for (var i = 0; i < stack.length; ++i) {
+    var entry = this.codeMap_.findEntry(stack[i]);
+    if (entry) {
+      var name = entry.getName();
+      if (i === 0 && (entry.type === 'CPP' || entry.type === 'SHARED_LIB')) {
+        look_for_first_c_function = true;
+      }
+      if (look_for_first_c_function && entry.type === 'CPP') {
+        last_seen_c_function = name;
+      }
+      if (!this.skipThisFunction(name)) {
+        result.push(name);
+      }
+    } else {
+      this.handleUnknownCode(Profile.Operation.TICK, stack[i], i);
+      if (i === 0) result.push("UNKNOWN");
+    }
+    if (look_for_first_c_function &&
+      i > 0 &&
+      (!entry || entry.type !== 'CPP') &&
+      last_seen_c_function !== '') {
+      if (this.c_entries_[last_seen_c_function] === undefined) {
+        this.c_entries_[last_seen_c_function] = 0;
+      }
+      this.c_entries_[last_seen_c_function]++;
+      look_for_first_c_function = false;  // Found it, we're done.
+    }
+  }
+  return result;
+};
+
+
+/**
+ * Performs a BF traversal of the top down call graph.
+ *
+ * @param {function(CallTree.Node)} f Visitor function.
+ */
+Profile.prototype.traverseTopDownTree = function (f) {
+  this.topDownTree_.traverse(f);
+};
+
+
+/**
+ * Performs a BF traversal of the bottom up call graph.
+ *
+ * @param {function(CallTree.Node)} f Visitor function.
+ */
+Profile.prototype.traverseBottomUpTree = function (f) {
+  this.bottomUpTree_.traverse(f);
+};
+
+
+/**
+ * Calculates a top down profile for a node with the specified label.
+ * If no name specified, returns the whole top down calls tree.
+ *
+ * @param {string} opt_label Node label.
+ */
+Profile.prototype.getTopDownProfile = function (opt_label) {
+  return this.getTreeProfile_(this.topDownTree_, opt_label);
+};
+
+
+/**
+ * Calculates a bottom up profile for a node with the specified label.
+ * If no name specified, returns the whole bottom up calls tree.
+ *
+ * @param {string} opt_label Node label.
+ */
+Profile.prototype.getBottomUpProfile = function (opt_label) {
+  return this.getTreeProfile_(this.bottomUpTree_, opt_label);
+};
+
+
+/**
+ * Helper function for calculating a tree profile.
+ *
+ * @param {Profile.CallTree} tree Call tree.
+ * @param {string} opt_label Node label.
+ */
+Profile.prototype.getTreeProfile_ = function (tree, opt_label) {
+  if (!opt_label) {
+    tree.computeTotalWeights();
+    return tree;
+  } else {
+    var subTree = tree.cloneSubtree(opt_label);
+    subTree.computeTotalWeights();
+    return subTree;
+  }
+};
+
+
+/**
+ * Calculates a flat profile of callees starting from a node with
+ * the specified label. If no name specified, starts from the root.
+ *
+ * @param {string} opt_label Starting node label.
+ */
+Profile.prototype.getFlatProfile = function (opt_label) {
+  var counters = new CallTree();
+  var rootLabel = opt_label || CallTree.ROOT_NODE_LABEL;
+  var precs = {};
+  precs[rootLabel] = 0;
+  var root = counters.findOrAddChild(rootLabel);
+
+  this.topDownTree_.computeTotalWeights();
+  this.topDownTree_.traverseInDepth(
+    function onEnter(node) {
+      if (!(node.label in precs)) {
+        precs[node.label] = 0;
+      }
+      var nodeLabelIsRootLabel = node.label == rootLabel;
+      if (nodeLabelIsRootLabel || precs[rootLabel] > 0) {
+        if (precs[rootLabel] == 0) {
+          root.selfWeight += node.selfWeight;
+          root.totalWeight += node.totalWeight;
+        } else {
+          var rec = root.findOrAddChild(node.label);
+          rec.selfWeight += node.selfWeight;
+          if (nodeLabelIsRootLabel || precs[node.label] == 0) {
+            rec.totalWeight += node.totalWeight;
+          }
+        }
+        precs[node.label]++;
+      }
+    },
+    function onExit(node) {
+      if (node.label == rootLabel || precs[rootLabel] > 0) {
+        precs[node.label]--;
+      }
+    },
+    null);
+
+  if (!opt_label) {
+    // If we have created a flat profile for the whole program, we don't
+    // need an explicit root in it. Thus, replace the counters tree
+    // root with the node corresponding to the whole program.
+    counters.root_ = root;
+  } else {
+    // Propagate weights so percents can be calculated correctly.
+    counters.getRoot().selfWeight = root.selfWeight;
+    counters.getRoot().totalWeight = root.totalWeight;
+  }
+  return counters;
+};
+
+
+Profile.CEntryNode = function (name, ticks) {
+  this.name = name;
+  this.ticks = ticks;
+}
+
+
+Profile.prototype.getCEntryProfile = function () {
+  var result = [new Profile.CEntryNode("TOTAL", 0)];
+  var total_ticks = 0;
+  for (var f in this.c_entries_) {
+    var ticks = this.c_entries_[f];
+    total_ticks += ticks;
+    result.push(new Profile.CEntryNode(f, ticks));
+  }
+  result[0].ticks = total_ticks;  // Sorting will keep this at index 0.
+  result.sort(function (n1, n2) {
+    return n2.ticks - n1.ticks || (n2.name < n1.name ? -1 : 1)
+  });
+  return result;
+}
+
+
+/**
+ * Cleans up function entries that are not referenced by code entries.
+ */
+Profile.prototype.cleanUpFuncEntries = function () {
+  var referencedFuncEntries = [];
+  var entries = this.codeMap_.getAllDynamicEntriesWithAddresses();
+  for (var i = 0, l = entries.length; i < l; ++i) {
+    if (entries[i][1].constructor === Profile.FunctionEntry) {
+      entries[i][1].used = false;
+    }
+  }
+  for (var i = 0, l = entries.length; i < l; ++i) {
+    if ("func" in entries[i][1]) {
+      entries[i][1].func.used = true;
+    }
+  }
+  for (var i = 0, l = entries.length; i < l; ++i) {
+    if (entries[i][1].constructor === Profile.FunctionEntry &&
+      !entries[i][1].used) {
+      this.codeMap_.deleteCode(entries[i][0]);
+    }
+  }
+};
+
+
+/**
+ * Creates a dynamic code entry.
+ *
+ * @param {number} size Code size.
+ * @param {string} type Code type.
+ * @param {string} name Function name.
+ * @constructor
+ */
+Profile.DynamicCodeEntry = function (size, type, name) {
+  CodeMap.CodeEntry.call(this, size, name, type);
+};
+
+
+/**
+ * Returns node name.
+ */
+Profile.DynamicCodeEntry.prototype.getName = function () {
+  return this.type + ': ' + this.name;
+};
+
+
+/**
+ * Returns raw node name (without type decoration).
+ */
+Profile.DynamicCodeEntry.prototype.getRawName = function () {
+  return this.name;
+};
+
+
+Profile.DynamicCodeEntry.prototype.isJSFunction = function () {
+  return false;
+};
+
+
+Profile.DynamicCodeEntry.prototype.toString = function () {
+  return this.getName() + ': ' + this.size.toString(16);
+};
+
+
+/**
+ * Creates a dynamic code entry.
+ *
+ * @param {number} size Code size.
+ * @param {string} type Code type.
+ * @param {Profile.FunctionEntry} func Shared function entry.
+ * @param {Profile.CodeState} state Code optimization state.
+ * @constructor
+ */
+Profile.DynamicFuncCodeEntry = function (size, type, func, state) {
+  CodeMap.CodeEntry.call(this, size, '', type);
+  this.func = func;
+  this.state = state;
+};
+
+Profile.DynamicFuncCodeEntry.STATE_PREFIX = ["", "~", "*"];
+
+/**
+ * Returns state.
+ */
+Profile.DynamicFuncCodeEntry.prototype.getState = function () {
+  return Profile.DynamicFuncCodeEntry.STATE_PREFIX[this.state];
+};
+
+/**
+ * Returns node name.
+ */
+Profile.DynamicFuncCodeEntry.prototype.getName = function () {
+  var name = this.func.getName();
+  return this.type + ': ' + this.getState() + name;
+};
+
+
+/**
+ * Returns raw node name (without type decoration).
+ */
+Profile.DynamicFuncCodeEntry.prototype.getRawName = function () {
+  return this.func.getName();
+};
+
+
+Profile.DynamicFuncCodeEntry.prototype.isJSFunction = function () {
+  return true;
+};
+
+
+Profile.DynamicFuncCodeEntry.prototype.toString = function () {
+  return this.getName() + ': ' + this.size.toString(16);
+};
+
+
+/**
+ * Creates a shared function object entry.
+ *
+ * @param {string} name Function name.
+ * @constructor
+ */
+Profile.FunctionEntry = function (name) {
+  CodeMap.CodeEntry.call(this, 0, name);
+};
+
+
+/**
+ * Returns node name.
+ */
+Profile.FunctionEntry.prototype.getName = function () {
+  var name = this.name;
+  if (name.length == 0) {
+    name = '<anonymous>';
+  } else if (name.charAt(0) == ' ') {
+    // An anonymous function with location: " aaa.js:10".
+    name = '<anonymous>' + name;
+  }
+  return name;
+};
+
+Profile.FunctionEntry.prototype.toString = CodeMap.CodeEntry.prototype.toString;
+
+/**
+ * Constructs a call graph.
+ *
+ * @constructor
+ */
+function CallTree() {
+  this.root_ = new CallTree.Node(
+    CallTree.ROOT_NODE_LABEL);
+};
+
+
+/**
+ * The label of the root node.
+ */
+CallTree.ROOT_NODE_LABEL = '';
+
+
+/**
+ * @private
+ */
+CallTree.prototype.totalsComputed_ = false;
+
+
+/**
+ * Returns the tree root.
+ */
+CallTree.prototype.getRoot = function () {
+  return this.root_;
+};
+
+
+/**
+ * Adds the specified call path, constructing nodes as necessary.
+ *
+ * @param {Array<string>} path Call path.
+ */
+CallTree.prototype.addPath = function (path) {
+  if (path.length == 0) {
+    return;
+  }
+  var curr = this.root_;
+  for (var i = 0; i < path.length; ++i) {
+    curr = curr.findOrAddChild(path[i]);
+  }
+  curr.selfWeight++;
+  this.totalsComputed_ = false;
+};
+
+
+/**
+ * Finds an immediate child of the specified parent with the specified
+ * label, creates a child node if necessary. If a parent node isn't
+ * specified, uses tree root.
+ *
+ * @param {string} label Child node label.
+ */
+CallTree.prototype.findOrAddChild = function (label) {
+  return this.root_.findOrAddChild(label);
+};
+
+
+/**
+ * Creates a subtree by cloning and merging all subtrees rooted at nodes
+ * with a given label. E.g. cloning the following call tree on label 'A'
+ * will give the following result:
+ *
+ *           <A>--<B>                                     <B>
+ *          /                                            /
+ *     <root>             == clone on 'A' ==>  <root>--<A>
+ *          \                                            \
+ *           <C>--<A>--<D>                                <D>
+ *
+ * And <A>'s selfWeight will be the sum of selfWeights of <A>'s from the
+ * source call tree.
+ *
+ * @param {string} label The label of the new root node.
+ */
+CallTree.prototype.cloneSubtree = function (label) {
+  var subTree = new CallTree();
+  this.traverse(function (node, parent) {
+    if (!parent && node.label != label) {
+      return null;
+    }
+    var child = (parent ? parent : subTree).findOrAddChild(node.label);
+    child.selfWeight += node.selfWeight;
+    return child;
+  });
+  return subTree;
+};
+
+
+/**
+ * Computes total weights in the call graph.
+ */
+CallTree.prototype.computeTotalWeights = function () {
+  if (this.totalsComputed_) {
+    return;
+  }
+  this.root_.computeTotalWeight();
+  this.totalsComputed_ = true;
+};
+
+
+/**
+ * Traverses the call graph in preorder. This function can be used for
+ * building optionally modified tree clones. This is the boilerplate code
+ * for this scenario:
+ *
+ * callTree.traverse(function(node, parentClone) {
+ *   var nodeClone = cloneNode(node);
+ *   if (parentClone)
+ *     parentClone.addChild(nodeClone);
+ *   return nodeClone;
+ * });
+ *
+ * @param {function(CallTree.Node, *)} f Visitor function.
+ *    The second parameter is the result of calling 'f' on the parent node.
+ */
+CallTree.prototype.traverse = function (f) {
+  var pairsToProcess = new ConsArray();
+  pairsToProcess.concat([{ node: this.root_, param: null }]);
+  while (!pairsToProcess.atEnd()) {
+    var pair = pairsToProcess.next();
+    var node = pair.node;
+    var newParam = f(node, pair.param);
+    var morePairsToProcess = [];
+    node.forEachChild(function (child) {
+      morePairsToProcess.push({ node: child, param: newParam });
+    });
+    pairsToProcess.concat(morePairsToProcess);
+  }
+};
+
+
+/**
+ * Performs an indepth call graph traversal.
+ *
+ * @param {function(CallTree.Node)} enter A function called
+ *     prior to visiting node's children.
+ * @param {function(CallTree.Node)} exit A function called
+ *     after visiting node's children.
+ */
+CallTree.prototype.traverseInDepth = function (enter, exit) {
+  function traverse(node) {
+    enter(node);
+    node.forEachChild(traverse);
+    exit(node);
+  }
+  traverse(this.root_);
+};
+
+
+/**
+ * Constructs a call graph node.
+ *
+ * @param {string} label Node label.
+ * @param {CallTree.Node} opt_parent Node parent.
+ */
+CallTree.Node = function (label, opt_parent) {
+  this.label = label;
+  this.parent = opt_parent;
+  this.children = {};
+};
+
+
+/**
+ * Node self weight (how many times this node was the last node in
+ * a call path).
+ * @type {number}
+ */
+CallTree.Node.prototype.selfWeight = 0;
+
+
+/**
+ * Node total weight (includes weights of all children).
+ * @type {number}
+ */
+CallTree.Node.prototype.totalWeight = 0;
+
+
+/**
+ * Adds a child node.
+ *
+ * @param {string} label Child node label.
+ */
+CallTree.Node.prototype.addChild = function (label) {
+  var child = new CallTree.Node(label, this);
+  this.children[label] = child;
+  return child;
+};
+
+
+/**
+ * Computes node's total weight.
+ */
+CallTree.Node.prototype.computeTotalWeight =
+  function () {
+    var totalWeight = this.selfWeight;
+    this.forEachChild(function (child) {
+      totalWeight += child.computeTotalWeight();
+    });
+    return this.totalWeight = totalWeight;
+  };
+
+
+/**
+ * Returns all node's children as an array.
+ */
+CallTree.Node.prototype.exportChildren = function () {
+  var result = [];
+  this.forEachChild(function (node) { result.push(node); });
+  return result;
+};
+
+
+/**
+ * Finds an immediate child with the specified label.
+ *
+ * @param {string} label Child node label.
+ */
+CallTree.Node.prototype.findChild = function (label) {
+  return this.children[label] || null;
+};
+
+
+/**
+ * Finds an immediate child with the specified label, creates a child
+ * node if necessary.
+ *
+ * @param {string} label Child node label.
+ */
+CallTree.Node.prototype.findOrAddChild = function (label) {
+  return this.findChild(label) || this.addChild(label);
+};
+
+
+/**
+ * Calls the specified function for every child.
+ *
+ * @param {function(CallTree.Node)} f Visitor function.
+ */
+CallTree.Node.prototype.forEachChild = function (f) {
+  for (var c in this.children) {
+    f(this.children[c]);
+  }
+};
+
+
+/**
+ * Walks up from the current node up to the call tree root.
+ *
+ * @param {function(CallTree.Node)} f Visitor function.
+ */
+CallTree.Node.prototype.walkUpToRoot = function (f) {
+  for (var curr = this; curr != null; curr = curr.parent) {
+    f(curr);
+  }
+};
+
+
+/**
+ * Tries to find a node with the specified path.
+ *
+ * @param {Array<string>} labels The path.
+ * @param {function(CallTree.Node)} opt_f Visitor function.
+ */
+CallTree.Node.prototype.descendToChild = function (
+  labels, opt_f) {
+  for (var pos = 0, curr = this; pos < labels.length && curr != null; pos++) {
+    var child = curr.findChild(labels[pos]);
+    if (opt_f) {
+      opt_f(child, pos);
+    }
+    curr = child;
+  }
+  return curr;
+};
+
+function JsonProfile() {
+  this.codeMap_ = new CodeMap();
+  this.codeEntries_ = [];
+  this.functionEntries_ = [];
+  this.ticks_ = [];
+  this.scripts_ = [];
+}
+
+JsonProfile.prototype.addLibrary = function (
+  name, startAddr, endAddr) {
+  var entry = new CodeMap.CodeEntry(
+    endAddr - startAddr, name, 'SHARED_LIB');
+  this.codeMap_.addLibrary(startAddr, entry);
+
+  entry.codeId = this.codeEntries_.length;
+  this.codeEntries_.push({ name: entry.name, type: entry.type });
+  return entry;
+};
+
+JsonProfile.prototype.addStaticCode = function (
+  name, startAddr, endAddr) {
+  var entry = new CodeMap.CodeEntry(
+    endAddr - startAddr, name, 'CPP');
+  this.codeMap_.addStaticCode(startAddr, entry);
+
+  entry.codeId = this.codeEntries_.length;
+  this.codeEntries_.push({ name: entry.name, type: entry.type });
+  return entry;
+};
+
+JsonProfile.prototype.addCode = function (
+  kind, name, timestamp, start, size) {
+  let codeId = this.codeEntries_.length;
+  // Find out if we have a static code entry for the code. If yes, we will
+  // make sure it is written to the JSON file just once.
+  let staticEntry = this.codeMap_.findAddress(start);
+  if (staticEntry && staticEntry.entry.type === 'CPP') {
+    codeId = staticEntry.entry.codeId;
+  }
+
+  var entry = new CodeMap.CodeEntry(size, name, 'CODE');
+  this.codeMap_.addCode(start, entry);
+
+  entry.codeId = codeId;
+  this.codeEntries_[codeId] = {
+    name: entry.name,
+    timestamp: timestamp,
+    type: entry.type,
+    kind: kind
+  };
+
+  return entry;
+};
+
+JsonProfile.prototype.addFuncCode = function (
+  kind, name, timestamp, start, size, funcAddr, state) {
+  // As code and functions are in the same address space,
+  // it is safe to put them in a single code map.
+  var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
+  if (!func) {
+    var func = new CodeMap.CodeEntry(0, name, 'SFI');
+    this.codeMap_.addCode(funcAddr, func);
+
+    func.funcId = this.functionEntries_.length;
+    this.functionEntries_.push({ name: name, codes: [] });
+  } else if (func.name !== name) {
+    // Function object has been overwritten with a new one.
+    func.name = name;
+
+    func.funcId = this.functionEntries_.length;
+    this.functionEntries_.push({ name: name, codes: [] });
+  }
+  // TODO(jarin): Insert the code object into the SFI's code list.
+  var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
+  if (entry) {
+    if (entry.size === size && entry.func === func) {
+      // Entry state has changed.
+      entry.state = state;
+    } else {
+      this.codeMap_.deleteCode(start);
+      entry = null;
+    }
+  }
+  if (!entry) {
+    entry = new CodeMap.CodeEntry(size, name, 'JS');
+    this.codeMap_.addCode(start, entry);
+
+    entry.codeId = this.codeEntries_.length;
+
+    this.functionEntries_[func.funcId].codes.push(entry.codeId);
+
+    if (state === 0) {
+      kind = "Builtin";
+    } else if (state === 1) {
+      kind = "Unopt";
+    } else if (state === 2) {
+      kind = "Opt";
+    }
+
+    this.codeEntries_.push({
+      name: entry.name,
+      type: entry.type,
+      kind: kind,
+      func: func.funcId,
+      tm: timestamp
+    });
+  }
+  return entry;
+};
+
+JsonProfile.prototype.moveCode = function (from, to) {
+  try {
+    this.codeMap_.moveCode(from, to);
+  } catch (e) {
+    printErr("Move: unknown source " + from);
+  }
+};
+
+JsonProfile.prototype.addSourcePositions = function (
+  start, script, startPos, endPos, sourcePositions, inliningPositions,
+  inlinedFunctions) {
+  var entry = this.codeMap_.findDynamicEntryByStartAddress(start);
+  if (!entry) return;
+  var codeId = entry.codeId;
+
+  // Resolve the inlined functions list.
+  if (inlinedFunctions.length > 0) {
+    inlinedFunctions = inlinedFunctions.substring(1).split("S");
+    for (var i = 0; i < inlinedFunctions.length; i++) {
+      var funcAddr = parseInt(inlinedFunctions[i]);
+      var func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
+      if (!func || func.funcId === undefined) {
+        printErr("Could not find function " + inlinedFunctions[i]);
+        inlinedFunctions[i] = null;
+      } else {
+        inlinedFunctions[i] = func.funcId;
+      }
+    }
+  } else {
+    inlinedFunctions = [];
+  }
+
+  this.codeEntries_[entry.codeId].source = {
+    script: script,
+    start: startPos,
+    end: endPos,
+    positions: sourcePositions,
+    inlined: inliningPositions,
+    fns: inlinedFunctions
+  };
+};
+
+JsonProfile.prototype.addScriptSource = function (id, url, source) {
+  this.scripts_[id] = new Script(id, url, source);
+};
+
+
+JsonProfile.prototype.deoptCode = function (
+  timestamp, code, inliningId, scriptOffset, bailoutType,
+  sourcePositionText, deoptReasonText) {
+  let entry = this.codeMap_.findDynamicEntryByStartAddress(code);
+  if (entry) {
+    let codeId = entry.codeId;
+    if (!this.codeEntries_[codeId].deopt) {
+      // Only add the deopt if there was no deopt before.
+      // The subsequent deoptimizations should be lazy deopts for
+      // other on-stack activations.
+      this.codeEntries_[codeId].deopt = {
+        tm: timestamp,
+        inliningId: inliningId,
+        scriptOffset: scriptOffset,
+        posText: sourcePositionText,
+        reason: deoptReasonText,
+        bailoutType: bailoutType
+      };
+    }
+  }
+};
+
+JsonProfile.prototype.deleteCode = function (start) {
+  try {
+    this.codeMap_.deleteCode(start);
+  } catch (e) {
+    printErr("Delete: unknown address " + start);
+  }
+};
+
+JsonProfile.prototype.moveFunc = function (from, to) {
+  if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
+    this.codeMap_.moveCode(from, to);
+  }
+};
+
+JsonProfile.prototype.findEntry = function (addr) {
+  return this.codeMap_.findEntry(addr);
+};
+
+JsonProfile.prototype.recordTick = function (time_ns, vmState, stack) {
+  // TODO(jarin) Resolve the frame-less case (when top of stack is
+  // known code).
+  var processedStack = [];
+  for (var i = 0; i < stack.length; i++) {
+    var resolved = this.codeMap_.findAddress(stack[i]);
+    if (resolved) {
+      processedStack.push(resolved.entry.codeId, resolved.offset);
+    } else {
+      processedStack.push(-1, stack[i]);
+    }
+  }
+  this.ticks_.push({ tm: time_ns, vm: vmState, s: processedStack });
+};
+
+function writeJson(s) {
+  write(JSON.stringify(s, null, 2));
+}
+
+JsonProfile.prototype.writeJson = function () {
+  // Write out the JSON in a partially manual way to avoid creating too-large
+  // strings in one JSON.stringify call when there are a lot of ticks.
+  write('{\n')
+
+  write('  "code": ');
+  writeJson(this.codeEntries_);
+  write(',\n');
+
+  write('  "functions": ');
+  writeJson(this.functionEntries_);
+  write(',\n');
+
+  write('  "ticks": [\n');
+  for (var i = 0; i < this.ticks_.length; i++) {
+    write('    ');
+    writeJson(this.ticks_[i]);
+    if (i < this.ticks_.length - 1) {
+      write(',\n');
+    } else {
+      write('\n');
+    }
+  }
+  write('  ],\n');
+
+  write('  "scripts": ');
+  writeJson(this.scripts_);
+
+  write('}\n');
+};
diff --git a/src/third_party/v8/tools/profile.mjs b/src/third_party/v8/tools/profile.mjs
new file mode 100644
index 0000000..b2e953f
--- /dev/null
+++ b/src/third_party/v8/tools/profile.mjs
@@ -0,0 +1,1103 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import { CodeMap, CodeEntry } from "./codemap.mjs";
+import { ConsArray } from "./consarray.mjs";
+
+// TODO: move to separate modules
+export class SourcePosition {
+  constructor(script, line, column) {
+    this.script = script;
+    this.line = line;
+    this.column = column;
+    this.entries = [];
+  }
+  addEntry(entry) {
+    this.entries.push(entry);
+  }
+}
+
+export class Script {
+  constructor(id, name, source) {
+    this.id = id;
+    this.name = name;
+    this.source = source;
+    this.sourcePositions = [];
+    // Map<line, Map<column, SourcePosition>>
+    this.lineToColumn = new Map();
+  }
+
+  addSourcePosition(line, column, entry) {
+    let sourcePosition = this.lineToColumn.get(line)?.get(column);
+    if (sourcePosition === undefined) {
+      sourcePosition = new SourcePosition(this, line, column, )
+      this._addSourcePosition(line, column, sourcePosition);
+    }
+    sourcePosition.addEntry(entry);
+    return sourcePosition;
+  }
+
+  _addSourcePosition(line, column, sourcePosition) {
+    let columnToSourcePosition;
+    if (this.lineToColumn.has(line)) {
+      columnToSourcePosition = this.lineToColumn.get(line);
+    } else {
+      columnToSourcePosition = new Map();
+      this.lineToColumn.set(line, columnToSourcePosition);
+    }
+    this.sourcePositions.push(sourcePosition);
+    columnToSourcePosition.set(column, sourcePosition);
+  }
+}
+
+/**
+ * Creates a profile object for processing profiling-related events
+ * and calculating function execution times.
+ *
+ * @constructor
+ */
+export class Profile {
+  codeMap_ = new CodeMap();
+  topDownTree_ = new CallTree();
+  bottomUpTree_ = new CallTree();
+  c_entries_ = {};
+  ticks_ = [];
+  scripts_ = [];
+  urlToScript_ = new Map();
+
+  /**
+   * Returns whether a function with the specified name must be skipped.
+   * Should be overriden by subclasses.
+   *
+   * @param {string} name Function name.
+   */
+  skipThisFunction(name) {
+    return false;
+  }
+
+  /**
+   * Enum for profiler operations that involve looking up existing
+   * code entries.
+   *
+   * @enum {number}
+   */
+  static Operation = {
+    MOVE: 0,
+    DELETE: 1,
+    TICK: 2
+  }
+
+  /**
+   * Enum for code state regarding its dynamic optimization.
+   *
+   * @enum {number}
+   */
+  static CodeState = {
+    COMPILED: 0,
+    OPTIMIZABLE: 1,
+    OPTIMIZED: 2
+  }
+
+  /**
+   * Called whenever the specified operation has failed finding a function
+   * containing the specified address. Should be overriden by subclasses.
+   * See the Profile.Operation enum for the list of
+   * possible operations.
+   *
+   * @param {number} operation Operation.
+   * @param {number} addr Address of the unknown code.
+   * @param {number} opt_stackPos If an unknown address is encountered
+   *     during stack strace processing, specifies a position of the frame
+   *     containing the address.
+   */
+  handleUnknownCode(operation, addr, opt_stackPos) {}
+
+  /**
+   * Registers a library.
+   *
+   * @param {string} name Code entry name.
+   * @param {number} startAddr Starting address.
+   * @param {number} endAddr Ending address.
+   */
+  addLibrary(name, startAddr, endAddr) {
+    const entry = new CodeEntry(endAddr - startAddr, name, 'SHARED_LIB');
+    this.codeMap_.addLibrary(startAddr, entry);
+    return entry;
+  }
+
+  /**
+   * Registers statically compiled code entry.
+   *
+   * @param {string} name Code entry name.
+   * @param {number} startAddr Starting address.
+   * @param {number} endAddr Ending address.
+   */
+  addStaticCode(name, startAddr, endAddr) {
+      const entry = new CodeEntry(endAddr - startAddr, name, 'CPP');
+    this.codeMap_.addStaticCode(startAddr, entry);
+    return entry;
+  }
+
+  /**
+   * Registers dynamic (JIT-compiled) code entry.
+   *
+   * @param {string} type Code entry type.
+   * @param {string} name Code entry name.
+   * @param {number} start Starting address.
+   * @param {number} size Code entry size.
+   */
+  addCode(type, name, timestamp, start, size) {
+    const entry = new DynamicCodeEntry(size, type, name);
+    this.codeMap_.addCode(start, entry);
+    return entry;
+  }
+
+  /**
+   * Registers dynamic (JIT-compiled) code entry.
+   *
+   * @param {string} type Code entry type.
+   * @param {string} name Code entry name.
+   * @param {number} start Starting address.
+   * @param {number} size Code entry size.
+   * @param {number} funcAddr Shared function object address.
+   * @param {Profile.CodeState} state Optimization state.
+   */
+  addFuncCode(type, name, timestamp, start, size, funcAddr, state) {
+    // As code and functions are in the same address space,
+    // it is safe to put them in a single code map.
+    let func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
+    if (!func) {
+      func = new FunctionEntry(name);
+      this.codeMap_.addCode(funcAddr, func);
+    } else if (func.name !== name) {
+      // Function object has been overwritten with a new one.
+      func.name = name;
+    }
+    let entry = this.codeMap_.findDynamicEntryByStartAddress(start);
+    if (entry) {
+      if (entry.size === size && entry.func === func) {
+        // Entry state has changed.
+        entry.state = state;
+      } else {
+        this.codeMap_.deleteCode(start);
+        entry = null;
+      }
+    }
+    if (!entry) {
+      entry = new DynamicFuncCodeEntry(size, type, func, state);
+      this.codeMap_.addCode(start, entry);
+    }
+    return entry;
+  }
+
+  /**
+   * Reports about moving of a dynamic code entry.
+   *
+   * @param {number} from Current code entry address.
+   * @param {number} to New code entry address.
+   */
+  moveCode(from, to) {
+    try {
+      this.codeMap_.moveCode(from, to);
+    } catch (e) {
+      this.handleUnknownCode(Profile.Operation.MOVE, from);
+    }
+  }
+
+  deoptCode(  timestamp, code, inliningId, scriptOffset, bailoutType,
+    sourcePositionText, deoptReasonText) {
+  }
+
+  /**
+   * Reports about deletion of a dynamic code entry.
+   *
+   * @param {number} start Starting address.
+   */
+  deleteCode(start) {
+    try {
+      this.codeMap_.deleteCode(start);
+    } catch (e) {
+      this.handleUnknownCode(Profile.Operation.DELETE, start);
+    }
+  }
+
+  /**
+   * Adds source positions for given code.
+   */
+  addSourcePositions(start, script, startPos, endPos, sourcePositions,
+        inliningPositions, inlinedFunctions) {
+    // CLI does not need source code => ignore.
+  }
+
+  /**
+   * Adds script source code.
+   */
+  addScriptSource(id, url, source) {
+    const script = new Script(id, url, source);
+    this.scripts_[id] = script;
+    this.urlToScript_.set(url, script);
+  }
+
+  /**
+   * Adds script source code.
+   */
+  getScript(url) {
+    return this.urlToScript_.get(url);
+  }
+
+  /**
+   * Reports about moving of a dynamic code entry.
+   *
+   * @param {number} from Current code entry address.
+   * @param {number} to New code entry address.
+   */
+  moveFunc(from, to) {
+    if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
+      this.codeMap_.moveCode(from, to);
+    }
+  }
+
+  /**
+   * Retrieves a code entry by an address.
+   *
+   * @param {number} addr Entry address.
+   */
+  findEntry(addr) {
+    return this.codeMap_.findEntry(addr);
+  }
+
+  /**
+   * Records a tick event. Stack must contain a sequence of
+   * addresses starting with the program counter value.
+   *
+   * @param {Array<number>} stack Stack sample.
+   */
+  recordTick(time_ns, vmState, stack) {
+    const processedStack = this.resolveAndFilterFuncs_(stack);
+    this.bottomUpTree_.addPath(processedStack);
+    processedStack.reverse();
+    this.topDownTree_.addPath(processedStack);
+  }
+
+  /**
+   * Translates addresses into function names and filters unneeded
+   * functions.
+   *
+   * @param {Array<number>} stack Stack sample.
+   */
+  resolveAndFilterFuncs_(stack) {
+    const result = [];
+    let last_seen_c_function = '';
+    let look_for_first_c_function = false;
+    for (let i = 0; i < stack.length; ++i) {
+      const entry = this.codeMap_.findEntry(stack[i]);
+      if (entry) {
+        const name = entry.getName();
+        if (i === 0 && (entry.type === 'CPP' || entry.type === 'SHARED_LIB')) {
+          look_for_first_c_function = true;
+        }
+        if (look_for_first_c_function && entry.type === 'CPP') {
+          last_seen_c_function = name;
+        }
+        if (!this.skipThisFunction(name)) {
+          result.push(name);
+        }
+      } else {
+        this.handleUnknownCode(Profile.Operation.TICK, stack[i], i);
+        if (i === 0) result.push("UNKNOWN");
+      }
+      if (look_for_first_c_function &&
+        i > 0 &&
+        (!entry || entry.type !== 'CPP') &&
+        last_seen_c_function !== '') {
+        if (this.c_entries_[last_seen_c_function] === undefined) {
+          this.c_entries_[last_seen_c_function] = 0;
+        }
+        this.c_entries_[last_seen_c_function]++;
+        look_for_first_c_function = false;  // Found it, we're done.
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Performs a BF traversal of the top down call graph.
+   *
+   * @param {function(CallTreeNode)} f Visitor function.
+   */
+  traverseTopDownTree(f) {
+    this.topDownTree_.traverse(f);
+  }
+
+  /**
+   * Performs a BF traversal of the bottom up call graph.
+   *
+   * @param {function(CallTreeNode)} f Visitor function.
+   */
+  traverseBottomUpTree(f) {
+    this.bottomUpTree_.traverse(f);
+  }
+
+  /**
+   * Calculates a top down profile for a node with the specified label.
+   * If no name specified, returns the whole top down calls tree.
+   *
+   * @param {string} opt_label Node label.
+   */
+  getTopDownProfile(opt_label) {
+    return this.getTreeProfile_(this.topDownTree_, opt_label);
+  }
+
+  /**
+   * Calculates a bottom up profile for a node with the specified label.
+   * If no name specified, returns the whole bottom up calls tree.
+   *
+   * @param {string} opt_label Node label.
+   */
+  getBottomUpProfile(opt_label) {
+    return this.getTreeProfile_(this.bottomUpTree_, opt_label);
+  }
+
+  /**
+   * Helper function for calculating a tree profile.
+   *
+   * @param {Profile.CallTree} tree Call tree.
+   * @param {string} opt_label Node label.
+   */
+  getTreeProfile_(tree, opt_label) {
+    if (!opt_label) {
+      tree.computeTotalWeights();
+      return tree;
+    } else {
+      const subTree = tree.cloneSubtree(opt_label);
+      subTree.computeTotalWeights();
+      return subTree;
+    }
+  }
+
+  /**
+   * Calculates a flat profile of callees starting from a node with
+   * the specified label. If no name specified, starts from the root.
+   *
+   * @param {string} opt_label Starting node label.
+   */
+  getFlatProfile(opt_label) {
+    const counters = new CallTree();
+    const rootLabel = opt_label || CallTree.ROOT_NODE_LABEL;
+    const precs = {};
+    precs[rootLabel] = 0;
+    const root = counters.findOrAddChild(rootLabel);
+
+    this.topDownTree_.computeTotalWeights();
+    this.topDownTree_.traverseInDepth(
+      function onEnter(node) {
+        if (!(node.label in precs)) {
+          precs[node.label] = 0;
+        }
+        const nodeLabelIsRootLabel = node.label == rootLabel;
+        if (nodeLabelIsRootLabel || precs[rootLabel] > 0) {
+          if (precs[rootLabel] == 0) {
+            root.selfWeight += node.selfWeight;
+            root.totalWeight += node.totalWeight;
+          } else {
+            const rec = root.findOrAddChild(node.label);
+            rec.selfWeight += node.selfWeight;
+            if (nodeLabelIsRootLabel || precs[node.label] == 0) {
+              rec.totalWeight += node.totalWeight;
+            }
+          }
+          precs[node.label]++;
+        }
+      },
+      function onExit(node) {
+        if (node.label == rootLabel || precs[rootLabel] > 0) {
+          precs[node.label]--;
+        }
+      },
+      null);
+
+    if (!opt_label) {
+      // If we have created a flat profile for the whole program, we don't
+      // need an explicit root in it. Thus, replace the counters tree
+      // root with the node corresponding to the whole program.
+      counters.root_ = root;
+    } else {
+      // Propagate weights so percents can be calculated correctly.
+      counters.getRoot().selfWeight = root.selfWeight;
+      counters.getRoot().totalWeight = root.totalWeight;
+    }
+    return counters;
+  }
+
+  getCEntryProfile() {
+    const result = [new CEntryNode("TOTAL", 0)];
+    let total_ticks = 0;
+    for (let f in this.c_entries_) {
+      const ticks = this.c_entries_[f];
+      total_ticks += ticks;
+      result.push(new CEntryNode(f, ticks));
+    }
+    result[0].ticks = total_ticks;  // Sorting will keep this at index 0.
+    result.sort((n1, n2) => n2.ticks - n1.ticks || (n2.name < n1.name ? -1 : 1));
+    return result;
+  }
+
+
+  /**
+   * Cleans up function entries that are not referenced by code entries.
+   */
+  cleanUpFuncEntries() {
+    const referencedFuncEntries = [];
+    const entries = this.codeMap_.getAllDynamicEntriesWithAddresses();
+    for (let i = 0, l = entries.length; i < l; ++i) {
+      if (entries[i][1].constructor === FunctionEntry) {
+        entries[i][1].used = false;
+      }
+    }
+    for (let i = 0, l = entries.length; i < l; ++i) {
+      if ("func" in entries[i][1]) {
+        entries[i][1].func.used = true;
+      }
+    }
+    for (let i = 0, l = entries.length; i < l; ++i) {
+      if (entries[i][1].constructor === FunctionEntry &&
+        !entries[i][1].used) {
+        this.codeMap_.deleteCode(entries[i][0]);
+      }
+    }
+  }
+}
+
+class CEntryNode {
+  constructor(name, ticks) {
+    this.name = name;
+    this.ticks = ticks;
+  }
+}
+
+
+/**
+ * Creates a dynamic code entry.
+ *
+ * @param {number} size Code size.
+ * @param {string} type Code type.
+ * @param {string} name Function name.
+ * @constructor
+ */
+class DynamicCodeEntry extends CodeEntry {
+  constructor(size, type, name) {
+    super(size, name, type);
+  }
+  
+  getName() {
+    return this.type + ': ' + this.name;
+  }
+
+  /**
+   * Returns raw node name (without type decoration).
+   */
+  getRawName() {
+    return this.name;
+  }
+
+  isJSFunction() {
+    return false;
+  }
+
+  toString() {
+    return this.getName() + ': ' + this.size.toString(16);
+  }
+}
+
+
+/**
+ * Creates a dynamic code entry.
+ *
+ * @param {number} size Code size.
+ * @param {string} type Code type.
+ * @param {FunctionEntry} func Shared function entry.
+ * @param {Profile.CodeState} state Code optimization state.
+ * @constructor
+ */
+class DynamicFuncCodeEntry extends CodeEntry {
+  constructor(size, type, func, state) {
+    super(size, '', type);
+    this.func = func;
+    this.state = state;
+  }
+
+  static STATE_PREFIX = ["", "~", "*"];
+  getState() {
+    return DynamicFuncCodeEntry.STATE_PREFIX[this.state];
+  }
+
+  getName() {
+    const name = this.func.getName();
+    return this.type + ': ' + this.getState() + name;
+  }
+
+  /**
+   * Returns raw node name (without type decoration).
+   */
+  getRawName() {
+    return this.func.getName();
+  }
+
+  isJSFunction() {
+    return true;
+  }
+
+  toString() {
+    return this.getName() + ': ' + this.size.toString(16);
+  }
+}
+
+/**
+ * Creates a shared function object entry.
+ *
+ * @param {string} name Function name.
+ * @constructor
+ */
+class FunctionEntry extends CodeEntry {
+  constructor(name) {
+    super(0, name);
+  }
+
+  /**
+   * Returns node name.
+   */
+  getName() {
+    let name = this.name;
+    if (name.length == 0) {
+      name = '<anonymous>';
+    } else if (name.charAt(0) == ' ') {
+      // An anonymous function with location: " aaa.js:10".
+      name = `<anonymous>${name}`;
+    }
+    return name;
+  }
+}
+
+/**
+ * Constructs a call graph.
+ *
+ * @constructor
+ */
+class CallTree {
+  root_ = new CallTreeNode(CallTree.ROOT_NODE_LABEL);
+  totalsComputed_ = false;
+
+  /**
+   * The label of the root node.
+   */
+  static ROOT_NODE_LABEL = '';
+
+  /**
+   * Returns the tree root.
+   */
+  getRoot() {
+    return this.root_;
+  }
+
+  /**
+   * Adds the specified call path, constructing nodes as necessary.
+   *
+   * @param {Array<string>} path Call path.
+   */
+  addPath(path) {
+    if (path.length == 0) {
+      return;
+    }
+    let curr = this.root_;
+    for (let i = 0; i < path.length; ++i) {
+      curr = curr.findOrAddChild(path[i]);
+    }
+    curr.selfWeight++;
+    this.totalsComputed_ = false;
+  }
+
+  /**
+   * Finds an immediate child of the specified parent with the specified
+   * label, creates a child node if necessary. If a parent node isn't
+   * specified, uses tree root.
+   *
+   * @param {string} label Child node label.
+   */
+  findOrAddChild(label) {
+    return this.root_.findOrAddChild(label);
+  }
+
+  /**
+   * Creates a subtree by cloning and merging all subtrees rooted at nodes
+   * with a given label. E.g. cloning the following call tree on label 'A'
+   * will give the following result:
+   *
+   *           <A>--<B>                                     <B>
+   *          /                                            /
+   *     <root>             == clone on 'A' ==>  <root>--<A>
+   *          \                                            \
+   *           <C>--<A>--<D>                                <D>
+   *
+   * And <A>'s selfWeight will be the sum of selfWeights of <A>'s from the
+   * source call tree.
+   *
+   * @param {string} label The label of the new root node.
+   */
+  cloneSubtree(label) {
+    const subTree = new CallTree();
+    this.traverse((node, parent) => {
+      if (!parent && node.label != label) {
+        return null;
+      }
+      const child = (parent ? parent : subTree).findOrAddChild(node.label);
+      child.selfWeight += node.selfWeight;
+      return child;
+    });
+    return subTree;
+  }
+
+  /**
+   * Computes total weights in the call graph.
+   */
+  computeTotalWeights() {
+    if (this.totalsComputed_) return;
+    this.root_.computeTotalWeight();
+    this.totalsComputed_ = true;
+  }
+
+  /**
+   * Traverses the call graph in preorder. This function can be used for
+   * building optionally modified tree clones. This is the boilerplate code
+   * for this scenario:
+   *
+   * callTree.traverse(function(node, parentClone) {
+   *   var nodeClone = cloneNode(node);
+   *   if (parentClone)
+   *     parentClone.addChild(nodeClone);
+   *   return nodeClone;
+   * });
+   *
+   * @param {function(CallTreeNode, *)} f Visitor function.
+   *    The second parameter is the result of calling 'f' on the parent node.
+   */
+  traverse(f) {
+    const pairsToProcess = new ConsArray();
+    pairsToProcess.concat([{ node: this.root_, param: null }]);
+    while (!pairsToProcess.atEnd()) {
+      const pair = pairsToProcess.next();
+      const node = pair.node;
+      const newParam = f(node, pair.param);
+      const morePairsToProcess = [];
+      node.forEachChild((child) => {
+        morePairsToProcess.push({ node: child, param: newParam });
+      });
+      pairsToProcess.concat(morePairsToProcess);
+    }
+  }
+
+  /**
+   * Performs an indepth call graph traversal.
+   *
+   * @param {function(CallTreeNode)} enter A function called
+   *     prior to visiting node's children.
+   * @param {function(CallTreeNode)} exit A function called
+   *     after visiting node's children.
+   */
+    traverseInDepth(enter, exit) {
+    function traverse(node) {
+      enter(node);
+      node.forEachChild(traverse);
+      exit(node);
+    }
+    traverse(this.root_);
+  }
+}
+
+
+/**
+ * Constructs a call graph node.
+ *
+ * @param {string} label Node label.
+ * @param {CallTreeNode} opt_parent Node parent.
+ */
+ class CallTreeNode {
+  /**
+   * Node self weight (how many times this node was the last node in
+   * a call path).
+   * @type {number}
+   */
+  selfWeight = 0;
+
+  /**
+   * Node total weight (includes weights of all children).
+   * @type {number}
+   */
+  totalWeight = 0;
+  children = {};
+
+  constructor(label, opt_parent) {
+    this.label = label;
+    this.parent = opt_parent;
+  }
+
+
+  /**
+   * Adds a child node.
+   *
+   * @param {string} label Child node label.
+   */
+  addChild(label) {
+    const child = new CallTreeNode(label, this);
+    this.children[label] = child;
+    return child;
+  }
+
+  /**
+   * Computes node's total weight.
+   */
+  computeTotalWeight() {
+    let totalWeight = this.selfWeight;
+    this.forEachChild(function (child) {
+      totalWeight += child.computeTotalWeight();
+    });
+    return this.totalWeight = totalWeight;
+  }
+
+  /**
+   * Returns all node's children as an array.
+   */
+  exportChildren() {
+    const result = [];
+    this.forEachChild(function (node) { result.push(node); });
+    return result;
+  }
+
+  /**
+   * Finds an immediate child with the specified label.
+   *
+   * @param {string} label Child node label.
+   */
+  findChild(label) {
+    return this.children[label] || null;
+  }
+
+  /**
+   * Finds an immediate child with the specified label, creates a child
+   * node if necessary.
+   *
+   * @param {string} label Child node label.
+   */
+  findOrAddChild(label) {
+    return this.findChild(label) || this.addChild(label);
+  }
+
+  /**
+   * Calls the specified function for every child.
+   *
+   * @param {function(CallTreeNode)} f Visitor function.
+   */
+  forEachChild(f) {
+    for (let c in this.children) {
+      f(this.children[c]);
+    }
+  }
+
+  /**
+   * Walks up from the current node up to the call tree root.
+   *
+   * @param {function(CallTreeNode)} f Visitor function.
+   */
+  walkUpToRoot(f) {
+    for (let curr = this; curr != null; curr = curr.parent) {
+      f(curr);
+    }
+  }
+
+  /**
+   * Tries to find a node with the specified path.
+   *
+   * @param {Array<string>} labels The path.
+   * @param {function(CallTreeNode)} opt_f Visitor function.
+   */
+  descendToChild(labels, opt_f) {
+    let curr = this;
+    for (let pos = 0; pos < labels.length && curr != null; pos++) {
+      const child = curr.findChild(labels[pos]);
+      if (opt_f) {
+        opt_f(child, pos);
+      }
+      curr = child;
+    }
+    return curr;
+  }
+}
+
+export function JsonProfile() {
+  this.codeMap_ = new CodeMap();
+  this.codeEntries_ = [];
+  this.functionEntries_ = [];
+  this.ticks_ = [];
+  this.scripts_ = [];
+}
+
+JsonProfile.prototype.addLibrary = function (
+  name, startAddr, endAddr) {
+  const entry = new CodeEntry(
+    endAddr - startAddr, name, 'SHARED_LIB');
+  this.codeMap_.addLibrary(startAddr, entry);
+
+  entry.codeId = this.codeEntries_.length;
+  this.codeEntries_.push({ name: entry.name, type: entry.type });
+  return entry;
+};
+
+JsonProfile.prototype.addStaticCode = function (
+  name, startAddr, endAddr) {
+  const entry = new CodeEntry(
+    endAddr - startAddr, name, 'CPP');
+  this.codeMap_.addStaticCode(startAddr, entry);
+
+  entry.codeId = this.codeEntries_.length;
+  this.codeEntries_.push({ name: entry.name, type: entry.type });
+  return entry;
+};
+
+JsonProfile.prototype.addCode = function (
+  kind, name, timestamp, start, size) {
+  let codeId = this.codeEntries_.length;
+  // Find out if we have a static code entry for the code. If yes, we will
+  // make sure it is written to the JSON file just once.
+  let staticEntry = this.codeMap_.findAddress(start);
+  if (staticEntry && staticEntry.entry.type === 'CPP') {
+    codeId = staticEntry.entry.codeId;
+  }
+
+  const entry = new CodeEntry(size, name, 'CODE');
+  this.codeMap_.addCode(start, entry);
+
+  entry.codeId = codeId;
+  this.codeEntries_[codeId] = {
+    name: entry.name,
+    timestamp: timestamp,
+    type: entry.type,
+    kind: kind,
+  };
+
+  return entry;
+};
+
+JsonProfile.prototype.addFuncCode = function (
+  kind, name, timestamp, start, size, funcAddr, state) {
+  // As code and functions are in the same address space,
+  // it is safe to put them in a single code map.
+  let func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
+  if (!func) {
+    func = new CodeEntry(0, name, 'SFI');
+    this.codeMap_.addCode(funcAddr, func);
+
+    func.funcId = this.functionEntries_.length;
+    this.functionEntries_.push({ name, codes: [] });
+  } else if (func.name !== name) {
+    // Function object has been overwritten with a new one.
+    func.name = name;
+
+    func.funcId = this.functionEntries_.length;
+    this.functionEntries_.push({ name, codes: [] });
+  }
+  // TODO(jarin): Insert the code object into the SFI's code list.
+  let entry = this.codeMap_.findDynamicEntryByStartAddress(start);
+  if (entry) {
+    if (entry.size === size && entry.func === func) {
+      // Entry state has changed.
+      entry.state = state;
+    } else {
+      this.codeMap_.deleteCode(start);
+      entry = null;
+    }
+  }
+  if (!entry) {
+    entry = new CodeEntry(size, name, 'JS');
+    this.codeMap_.addCode(start, entry);
+
+    entry.codeId = this.codeEntries_.length;
+
+    this.functionEntries_[func.funcId].codes.push(entry.codeId);
+
+    if (state === 0) {
+      kind = "Builtin";
+    } else if (state === 1) {
+      kind = "Unopt";
+    } else if (state === 2) {
+      kind = "Opt";
+    }
+
+    this.codeEntries_.push({
+      name: entry.name,
+      type: entry.type,
+      kind: kind,
+      func: func.funcId,
+      tm: timestamp,
+    });
+  }
+  return entry;
+};
+
+JsonProfile.prototype.moveCode = function (from, to) {
+  try {
+    this.codeMap_.moveCode(from, to);
+  } catch (e) {
+    printErr(`Move: unknown source ${from}`);
+  }
+};
+
+JsonProfile.prototype.addSourcePositions = function (
+  start, script, startPos, endPos, sourcePositions, inliningPositions,
+  inlinedFunctions) {
+  const entry = this.codeMap_.findDynamicEntryByStartAddress(start);
+  if (!entry) return;
+  const codeId = entry.codeId;
+
+  // Resolve the inlined functions list.
+  if (inlinedFunctions.length > 0) {
+    inlinedFunctions = inlinedFunctions.substring(1).split("S");
+    for (let i = 0; i < inlinedFunctions.length; i++) {
+      const funcAddr = parseInt(inlinedFunctions[i]);
+      const func = this.codeMap_.findDynamicEntryByStartAddress(funcAddr);
+      if (!func || func.funcId === undefined) {
+        printErr(`Could not find function ${inlinedFunctions[i]}`);
+        inlinedFunctions[i] = null;
+      } else {
+        inlinedFunctions[i] = func.funcId;
+      }
+    }
+  } else {
+    inlinedFunctions = [];
+  }
+
+  this.codeEntries_[entry.codeId].source = {
+    script: script,
+    start: startPos,
+    end: endPos,
+    positions: sourcePositions,
+    inlined: inliningPositions,
+    fns: inlinedFunctions
+  };
+};
+
+JsonProfile.prototype.addScriptSource = function (id, url, source) {
+  this.scripts_[id] = new Script(id, url, source);
+};
+
+JsonProfile.prototype.deoptCode = function (
+  timestamp, code, inliningId, scriptOffset, bailoutType,
+  sourcePositionText, deoptReasonText) {
+  let entry = this.codeMap_.findDynamicEntryByStartAddress(code);
+  if (entry) {
+    let codeId = entry.codeId;
+    if (!this.codeEntries_[codeId].deopt) {
+      // Only add the deopt if there was no deopt before.
+      // The subsequent deoptimizations should be lazy deopts for
+      // other on-stack activations.
+      this.codeEntries_[codeId].deopt = {
+        tm: timestamp,
+        inliningId: inliningId,
+        scriptOffset: scriptOffset,
+        posText: sourcePositionText,
+        reason: deoptReasonText,
+        bailoutType: bailoutType,
+      };
+    }
+  }
+};
+
+JsonProfile.prototype.deleteCode = function (start) {
+  try {
+    this.codeMap_.deleteCode(start);
+  } catch (e) {
+    printErr(`Delete: unknown address ${start}`);
+  }
+};
+
+JsonProfile.prototype.moveFunc = function (from, to) {
+  if (this.codeMap_.findDynamicEntryByStartAddress(from)) {
+    this.codeMap_.moveCode(from, to);
+  }
+};
+
+JsonProfile.prototype.findEntry = function (addr) {
+  return this.codeMap_.findEntry(addr);
+};
+
+JsonProfile.prototype.recordTick = function (time_ns, vmState, stack) {
+  // TODO(jarin) Resolve the frame-less case (when top of stack is
+  // known code).
+  const processedStack = [];
+  for (let i = 0; i < stack.length; i++) {
+    const resolved = this.codeMap_.findAddress(stack[i]);
+    if (resolved) {
+      processedStack.push(resolved.entry.codeId, resolved.offset);
+    } else {
+      processedStack.push(-1, stack[i]);
+    }
+  }
+  this.ticks_.push({ tm: time_ns, vm: vmState, s: processedStack });
+};
+
+function writeJson(s) {
+  write(JSON.stringify(s, null, 2));
+}
+
+JsonProfile.prototype.writeJson = function () {
+  // Write out the JSON in a partially manual way to avoid creating too-large
+  // strings in one JSON.stringify call when there are a lot of ticks.
+  write('{\n')
+
+  write('  "code": ');
+  writeJson(this.codeEntries_);
+  write(',\n');
+
+  write('  "functions": ');
+  writeJson(this.functionEntries_);
+  write(',\n');
+
+  write('  "ticks": [\n');
+  for (let i = 0; i < this.ticks_.length; i++) {
+    write('    ');
+    writeJson(this.ticks_[i]);
+    if (i < this.ticks_.length - 1) {
+      write(',\n');
+    } else {
+      write('\n');
+    }
+  }
+  write('  ],\n');
+
+  write('  "scripts": ');
+  writeJson(this.scripts_);
+
+  write('}\n');
+};
diff --git a/src/third_party/v8/tools/profile_view.js b/src/third_party/v8/tools/profile_view.js
new file mode 100644
index 0000000..d1545ac
--- /dev/null
+++ b/src/third_party/v8/tools/profile_view.js
@@ -0,0 +1,201 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+/**
+ * Creates a Profile View builder object.
+ *
+ * @param {number} samplingRate Number of ms between profiler ticks.
+ * @constructor
+ */
+function ViewBuilder(samplingRate) {
+  this.samplingRate = samplingRate;
+};
+
+
+/**
+ * Builds a profile view for the specified call tree.
+ *
+ * @param {CallTree} callTree A call tree.
+ * @param {boolean} opt_bottomUpViewWeights Whether remapping
+ *     of self weights for a bottom up view is needed.
+ */
+ViewBuilder.prototype.buildView = function(
+    callTree, opt_bottomUpViewWeights) {
+  var head;
+  var samplingRate = this.samplingRate;
+  var createViewNode = this.createViewNode;
+  callTree.traverse(function(node, viewParent) {
+    var totalWeight = node.totalWeight * samplingRate;
+    var selfWeight = node.selfWeight * samplingRate;
+    if (opt_bottomUpViewWeights === true) {
+      if (viewParent === head) {
+        selfWeight = totalWeight;
+      } else {
+        selfWeight = 0;
+      }
+    }
+    var viewNode = createViewNode(node.label, totalWeight, selfWeight, head);
+    if (viewParent) {
+      viewParent.addChild(viewNode);
+    } else {
+      head = viewNode;
+    }
+    return viewNode;
+  });
+  var view = this.createView(head);
+  return view;
+};
+
+
+/**
+ * Factory method for a profile view.
+ *
+ * @param {ProfileView.Node} head View head node.
+ * @return {ProfileView} Profile view.
+ */
+ViewBuilder.prototype.createView = function(head) {
+  return new ProfileView(head);
+};
+
+
+/**
+ * Factory method for a profile view node.
+ *
+ * @param {string} internalFuncName A fully qualified function name.
+ * @param {number} totalTime Amount of time that application spent in the
+ *     corresponding function and its descendants (not that depending on
+ *     profile they can be either callees or callers.)
+ * @param {number} selfTime Amount of time that application spent in the
+ *     corresponding function only.
+ * @param {ProfileView.Node} head Profile view head.
+ * @return {ProfileView.Node} Profile view node.
+ */
+ViewBuilder.prototype.createViewNode = function(
+    funcName, totalTime, selfTime, head) {
+  return new ProfileView.Node(
+      funcName, totalTime, selfTime, head);
+};
+
+
+/**
+ * Creates a Profile View object. It allows to perform sorting
+ * and filtering actions on the profile.
+ *
+ * @param {ProfileView.Node} head Head (root) node.
+ * @constructor
+ */
+function ProfileView(head) {
+  this.head = head;
+};
+
+
+/**
+ * Sorts the profile view using the specified sort function.
+ *
+ * @param {function(ProfileView.Node,
+ *     ProfileView.Node):number} sortFunc A sorting
+ *     functions. Must comply with Array.sort sorting function requirements.
+ */
+ProfileView.prototype.sort = function(sortFunc) {
+  this.traverse(function (node) {
+    node.sortChildren(sortFunc);
+  });
+};
+
+
+/**
+ * Traverses profile view nodes in preorder.
+ *
+ * @param {function(ProfileView.Node)} f Visitor function.
+ */
+ProfileView.prototype.traverse = function(f) {
+  var nodesToTraverse = new ConsArray();
+  nodesToTraverse.concat([this.head]);
+  while (!nodesToTraverse.atEnd()) {
+    var node = nodesToTraverse.next();
+    f(node);
+    nodesToTraverse.concat(node.children);
+  }
+};
+
+
+/**
+ * Constructs a Profile View node object. Each node object corresponds to
+ * a function call.
+ *
+ * @param {string} internalFuncName A fully qualified function name.
+ * @param {number} totalTime Amount of time that application spent in the
+ *     corresponding function and its descendants (not that depending on
+ *     profile they can be either callees or callers.)
+ * @param {number} selfTime Amount of time that application spent in the
+ *     corresponding function only.
+ * @param {ProfileView.Node} head Profile view head.
+ * @constructor
+ */
+ProfileView.Node = function(
+    internalFuncName, totalTime, selfTime, head) {
+  this.internalFuncName = internalFuncName;
+  this.totalTime = totalTime;
+  this.selfTime = selfTime;
+  this.head = head;
+  this.parent = null;
+  this.children = [];
+};
+
+
+/**
+ * Returns a share of the function's total time in its parent's total time.
+ */
+ProfileView.Node.prototype.__defineGetter__(
+    'parentTotalPercent',
+    function() { return this.totalTime /
+      (this.parent ? this.parent.totalTime : this.totalTime) * 100.0; });
+
+
+/**
+ * Adds a child to the node.
+ *
+ * @param {ProfileView.Node} node Child node.
+ */
+ProfileView.Node.prototype.addChild = function(node) {
+  node.parent = this;
+  this.children.push(node);
+};
+
+
+/**
+ * Sorts all the node's children recursively.
+ *
+ * @param {function(ProfileView.Node,
+ *     ProfileView.Node):number} sortFunc A sorting
+ *     functions. Must comply with Array.sort sorting function requirements.
+ */
+ProfileView.Node.prototype.sortChildren = function(
+    sortFunc) {
+  this.children.sort(sortFunc);
+};
diff --git a/src/third_party/v8/tools/profile_view.mjs b/src/third_party/v8/tools/profile_view.mjs
new file mode 100644
index 0000000..9ee687d
--- /dev/null
+++ b/src/third_party/v8/tools/profile_view.mjs
@@ -0,0 +1,200 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import { ConsArray } from "./consarray.mjs";
+
+/**
+ * Creates a Profile View builder object.
+ *
+ * @param {number} samplingRate Number of ms between profiler ticks.
+ * @constructor
+ */
+export function ViewBuilder(samplingRate) {
+  this.samplingRate = samplingRate;
+};
+
+
+/**
+ * Builds a profile view for the specified call tree.
+ *
+ * @param {CallTree} callTree A call tree.
+ * @param {boolean} opt_bottomUpViewWeights Whether remapping
+ *     of self weights for a bottom up view is needed.
+ */
+ViewBuilder.prototype.buildView = function(
+    callTree, opt_bottomUpViewWeights) {
+  let head;
+  const samplingRate = this.samplingRate;
+  const createViewNode = this.createViewNode;
+  callTree.traverse(function(node, viewParent) {
+    const totalWeight = node.totalWeight * samplingRate;
+    let selfWeight = node.selfWeight * samplingRate;
+    if (opt_bottomUpViewWeights === true) {
+      if (viewParent === head) {
+        selfWeight = totalWeight;
+      } else {
+        selfWeight = 0;
+      }
+    }
+    const viewNode = createViewNode(node.label, totalWeight, selfWeight, head);
+    if (viewParent) {
+      viewParent.addChild(viewNode);
+    } else {
+      head = viewNode;
+    }
+    return viewNode;
+  });
+  const view = this.createView(head);
+  return view;
+};
+
+
+/**
+ * Factory method for a profile view.
+ *
+ * @param {ProfileView.Node} head View head node.
+ * @return {ProfileView} Profile view.
+ */
+ViewBuilder.prototype.createView = head => new ProfileView(head);
+
+
+/**
+ * Factory method for a profile view node.
+ *
+ * @param {string} internalFuncName A fully qualified function name.
+ * @param {number} totalTime Amount of time that application spent in the
+ *     corresponding function and its descendants (not that depending on
+ *     profile they can be either callees or callers.)
+ * @param {number} selfTime Amount of time that application spent in the
+ *     corresponding function only.
+ * @param {ProfileView.Node} head Profile view head.
+ * @return {ProfileView.Node} Profile view node.
+ */
+ViewBuilder.prototype.createViewNode = (
+    funcName, totalTime, selfTime, head) =>
+  new ProfileView.Node(
+      funcName, totalTime, selfTime, head)
+;
+
+
+/**
+ * Creates a Profile View object. It allows to perform sorting
+ * and filtering actions on the profile.
+ *
+ * @param {ProfileView.Node} head Head (root) node.
+ * @constructor
+ */
+export function ProfileView(head) {
+  this.head = head;
+};
+
+
+/**
+ * Sorts the profile view using the specified sort function.
+ *
+ * @param {function(ProfileView.Node,
+ *     ProfileView.Node):number} sortFunc A sorting
+ *     functions. Must comply with Array.sort sorting function requirements.
+ */
+ProfileView.prototype.sort = function(sortFunc) {
+  this.traverse(function (node) {
+    node.sortChildren(sortFunc);
+  });
+};
+
+
+/**
+ * Traverses profile view nodes in preorder.
+ *
+ * @param {function(ProfileView.Node)} f Visitor function.
+ */
+ProfileView.prototype.traverse = function(f) {
+  const nodesToTraverse = new ConsArray();
+  nodesToTraverse.concat([this.head]);
+  while (!nodesToTraverse.atEnd()) {
+    const node = nodesToTraverse.next();
+    f(node);
+    nodesToTraverse.concat(node.children);
+  }
+};
+
+
+/**
+ * Constructs a Profile View node object. Each node object corresponds to
+ * a function call.
+ *
+ * @param {string} internalFuncName A fully qualified function name.
+ * @param {number} totalTime Amount of time that application spent in the
+ *     corresponding function and its descendants (not that depending on
+ *     profile they can be either callees or callers.)
+ * @param {number} selfTime Amount of time that application spent in the
+ *     corresponding function only.
+ * @param {ProfileView.Node} head Profile view head.
+ * @constructor
+ */
+ProfileView.Node = function(
+    internalFuncName, totalTime, selfTime, head) {
+  this.internalFuncName = internalFuncName;
+  this.totalTime = totalTime;
+  this.selfTime = selfTime;
+  this.head = head;
+  this.parent = null;
+  this.children = [];
+};
+
+
+/**
+ * Returns a share of the function's total time in its parent's total time.
+ */
+ProfileView.Node.prototype.__defineGetter__(
+    'parentTotalPercent',
+    function() { return this.totalTime /
+      (this.parent ? this.parent.totalTime : this.totalTime) * 100.0; });
+
+
+/**
+ * Adds a child to the node.
+ *
+ * @param {ProfileView.Node} node Child node.
+ */
+ProfileView.Node.prototype.addChild = function(node) {
+  node.parent = this;
+  this.children.push(node);
+};
+
+
+/**
+ * Sorts all the node's children recursively.
+ *
+ * @param {function(ProfileView.Node,
+ *     ProfileView.Node):number} sortFunc A sorting
+ *     functions. Must comply with Array.sort sorting function requirements.
+ */
+ProfileView.Node.prototype.sortChildren = function(
+    sortFunc) {
+  this.children.sort(sortFunc);
+};
diff --git a/src/third_party/v8/tools/profview/index.html b/src/third_party/v8/tools/profview/index.html
new file mode 100644
index 0000000..8695a41
--- /dev/null
+++ b/src/third_party/v8/tools/profview/index.html
@@ -0,0 +1,119 @@
+<!DOCTYPE html>
+<!-- Copyright 2017 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<html lang="en">
+
+<head>
+  <meta charset="utf-8"/>
+  <title>V8 Tick Processor</title>
+  <link rel="stylesheet" href="profview.css">
+  <link href="https://fonts.googleapis.com/css?family=Roboto" rel="stylesheet">
+  <link href="https://fonts.googleapis.com/icon?family=Material+Icons"
+      rel="stylesheet">
+
+  <script src="profview.js"></script>
+  <script src="profile-utils.js"></script>
+</head>
+
+<body onLoad="main.onLoad();" onResize="main.onResize();">
+<h3 style="margin-top: 2px;">
+  Chrome V8 profiling log processor
+</h3>
+
+<input type="file" id="fileinput" /><div id="source-status"></div>
+<br>
+<hr>
+
+<div id="help">
+  Usage:
+
+  <br>
+  <br>
+
+  Record the profile:
+  <pre>
+  d8 --prof your-file.js
+  </pre>
+
+  Then process the file (this resolves C++ symbols and produces
+  a JSON file with the profile data):
+
+  <pre>
+  &lt;v8-dir>/tools/linux-tick-processor --preprocess v8.log > v8.json
+  </pre>
+
+  To view the profile, click the <i>Choose file</i> button above and choose
+  the file in the dialog box.
+
+</div>
+
+<div id="timeline" style="display : none">
+  <div>
+  <canvas id="timeline-canvas"/>
+  </div>
+  <table>
+    <tr id="timeline-legend">
+    </tr>
+  </table>
+  <div>
+    Current code object: <span id="timeline-currentCode"></span>
+    <button id="source-viewer-hide-button">Hide source</button>
+  </div>
+  <div>
+    <table id="source-viewer"> </table>
+  </div>
+</div>
+
+<br>
+
+<div id="mode-bar" style="display : none">
+</div>
+
+<div id="calltree" style="display : none">
+  <br>
+  Attribution:
+  <select id="calltree-attribution">
+  </select>
+  Top-level tree buckets:
+  <select id="calltree-categories">
+  </select>
+  Sort by:
+  <select id="calltree-sort">
+  </select>
+
+  <br>
+  <br>
+
+  <table id="calltree-table" class="calltree">
+    <thead>
+      <tr>
+        <th class="numeric">Time (incl)</th>
+        <th class="numeric">% of parent</th>
+        <th id="calltree-table-own-time-header" class="numeric">Own time</th>
+        <th>Function/category</th>
+        <th class="numeric">Ticks</th>
+        <th id="calltree-table-own-ticks-header" class="numeric">Own ticks</th>
+      </tr>
+    </thead>
+    <tbody>
+    </tbody>
+  </table>
+</div>
+
+<div id="summary" style="display : none">
+</div>
+
+<div id="function-details" style="display : none">
+</div>
+
+<p style="font-style:italic;">
+<br>
+<br>
+<br>
+Copyright the V8 Authors - Last change to this page: 2018/08/13
+</p>
+
+</body>
+</html>
diff --git a/src/third_party/v8/tools/profview/profile-utils.js b/src/third_party/v8/tools/profview/profile-utils.js
new file mode 100644
index 0000000..4be5589
--- /dev/null
+++ b/src/third_party/v8/tools/profview/profile-utils.js
@@ -0,0 +1,611 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+let codeKinds = [
+    "UNKNOWN",
+    "CPPPARSE",
+    "CPPCOMPBC",
+    "CPPCOMP",
+    "CPPGC",
+    "CPPEXT",
+    "CPP",
+    "LIB",
+    "IC",
+    "BC",
+    "STUB",
+    "BUILTIN",
+    "REGEXP",
+    "JSOPT",
+    "JSUNOPT"
+];
+
+function resolveCodeKind(code) {
+  if (!code || !code.type) {
+    return "UNKNOWN";
+  } else if (code.type === "CPP") {
+    return "CPP";
+  } else if (code.type === "SHARED_LIB") {
+    return "LIB";
+  } else if (code.type === "CODE") {
+    if (code.kind === "LoadIC" ||
+        code.kind === "StoreIC" ||
+        code.kind === "KeyedStoreIC" ||
+        code.kind === "KeyedLoadIC" ||
+        code.kind === "LoadGlobalIC" ||
+        code.kind === "Handler") {
+      return "IC";
+    } else if (code.kind === "BytecodeHandler") {
+      return "BC";
+    } else if (code.kind === "Stub") {
+      return "STUB";
+    } else if (code.kind === "Builtin") {
+      return "BUILTIN";
+    } else if (code.kind === "RegExp") {
+      return "REGEXP";
+    }
+    console.log("Unknown CODE: '" + code.kind + "'.");
+    return "CODE";
+  } else if (code.type === "JS") {
+    if (code.kind === "Builtin") {
+      return "JSUNOPT";
+    } else if (code.kind === "Opt") {
+      return "JSOPT";
+    } else if (code.kind === "Unopt") {
+      return "JSUNOPT";
+    }
+  }
+  console.log("Unknown code type '" + type + "'.");
+}
+
+function resolveCodeKindAndVmState(code, vmState) {
+  let kind = resolveCodeKind(code);
+  if (kind === "CPP") {
+    if (vmState === 1) {
+      kind = "CPPGC";
+    } else if (vmState === 2) {
+      kind = "CPPPARSE";
+    } else if (vmState === 3) {
+      kind = "CPPCOMPBC";
+    } else if (vmState === 4) {
+      kind = "CPPCOMP";
+    } else if (vmState === 6) {
+      kind = "CPPEXT";
+    }
+  }
+  return kind;
+}
+
+function codeEquals(code1, code2, allowDifferentKinds = false) {
+  if (!code1 || !code2) return false;
+  if (code1.name !== code2.name || code1.type !== code2.type) return false;
+
+  if (code1.type === 'CODE') {
+    if (!allowDifferentKinds && code1.kind !== code2.kind) return false;
+  } else if (code1.type === 'JS') {
+    if (!allowDifferentKinds && code1.kind !== code2.kind) return false;
+    if (code1.func !== code2.func) return false;
+  }
+  return true;
+}
+
+function createNodeFromStackEntry(code, codeId, vmState) {
+  let name = code ? code.name : "UNKNOWN";
+  let node = createEmptyNode(name);
+  node.codeId = codeId;
+  node.type = resolveCodeKindAndVmState(code, vmState);
+  return node;
+}
+
+function childIdFromCode(codeId, code) {
+  // For JavaScript function, pretend there is one instance of optimized
+  // function and one instance of unoptimized function per SFI.
+  // Otherwise, just compute the id from code id.
+  let type = resolveCodeKind(code);
+  if (type === "JSOPT") {
+    return code.func * 4 + 1;
+  } else if (type === "JSUNOPT") {
+    return code.func * 4 + 2;
+  } else {
+    return codeId * 4;
+  }
+}
+
+// We store list of ticks and positions within the ticks stack by
+// storing flattened triplets of { tickIndex, depth, count }.
+// Triplet { 123, 2, 3 } encodes positions in ticks 123, 124, 125,
+// all of them at depth 2. The flattened array is used to encode
+// position within the call-tree.
+
+// The following function helps to encode such triplets.
+function addFrameToFrameList(paths, pathIndex, depth) {
+  // Try to combine with the previous code run.
+  if (paths.length > 0 &&
+      paths[paths.length - 3] + 1 === pathIndex &&
+      paths[paths.length - 2] === depth) {
+    paths[paths.length - 1]++;
+  } else {
+    paths.push(pathIndex, depth, 1);
+  }
+}
+
+function findNextFrame(file, stack, stackPos, step, filter) {
+  let codeId = -1;
+  let code = null;
+  while (stackPos >= 0 && stackPos < stack.length) {
+    codeId = stack[stackPos];
+    code = codeId >= 0 ? file.code[codeId] : undefined;
+
+    if (filter) {
+      let type = code ? code.type : undefined;
+      let kind = code ? code.kind : undefined;
+      if (filter(type, kind)) return stackPos;
+    }
+    stackPos += step;
+  }
+  return -1;
+}
+
+function addOrUpdateChildNode(parent, file, stackIndex, stackPos, ascending) {
+  if (stackPos === -1) {
+    // We reached the end without finding the next step.
+    // If we are doing top-down call tree, update own ticks.
+    if (!ascending) {
+      parent.ownTicks++;
+    }
+    return;
+  }
+
+  let stack = file.ticks[stackIndex].s;
+  console.assert(stackPos >= 0 && stackPos < stack.length);
+  let codeId = stack[stackPos];
+  let code = codeId >= 0 ? file.code[codeId] : undefined;
+  // We found a child node.
+  let childId = childIdFromCode(codeId, code);
+  let child = parent.children[childId];
+  if (!child) {
+    let vmState = file.ticks[stackIndex].vm;
+    child = createNodeFromStackEntry(code, codeId, vmState);
+    child.delayedExpansion = { frameList : [], ascending };
+    parent.children[childId] = child;
+  }
+  child.ticks++;
+  addFrameToFrameList(child.delayedExpansion.frameList, stackIndex, stackPos);
+}
+
+// This expands a tree node (direct children only).
+function expandTreeNode(file, node, filter) {
+  let { frameList, ascending } = node.delayedExpansion;
+
+  let step = ascending ? 2 : -2;
+
+  for (let i = 0; i < frameList.length; i+= 3) {
+    let firstStackIndex = frameList[i];
+    let depth = frameList[i + 1];
+    let count = frameList[i + 2];
+    for (let j = 0; j < count; j++) {
+      let stackIndex = firstStackIndex + j;
+      let stack = file.ticks[stackIndex].s;
+
+      // Get to the next frame that has not been filtered out.
+      let stackPos = findNextFrame(file, stack, depth + step, step, filter);
+      addOrUpdateChildNode(node, file, stackIndex, stackPos, ascending);
+    }
+  }
+  node.delayedExpansion = null;
+}
+
+function createEmptyNode(name) {
+  return {
+      name : name,
+      codeId: -1,
+      type : "CAT",
+      children : [],
+      ownTicks : 0,
+      ticks : 0
+  };
+}
+
+class RuntimeCallTreeProcessor {
+  constructor() {
+    this.tree = createEmptyNode("root");
+    this.tree.delayedExpansion = { frameList : [], ascending : false };
+  }
+
+  addStack(file, tickIndex) {
+    this.tree.ticks++;
+
+    let stack = file.ticks[tickIndex].s;
+    let i;
+    for (i = 0; i < stack.length; i += 2) {
+      let codeId = stack[i];
+      if (codeId < 0) return;
+      let code = file.code[codeId];
+      if (code.type !== "CPP" && code.type !== "SHARED_LIB") {
+        i -= 2;
+        break;
+      }
+    }
+    if (i < 0 || i >= stack.length) return;
+    addOrUpdateChildNode(this.tree, file, tickIndex, i, false);
+  }
+}
+
+class PlainCallTreeProcessor {
+  constructor(filter, isBottomUp) {
+    this.filter = filter;
+    this.tree = createEmptyNode("root");
+    this.tree.delayedExpansion = { frameList : [], ascending : isBottomUp };
+    this.isBottomUp = isBottomUp;
+  }
+
+  addStack(file, tickIndex) {
+    let stack = file.ticks[tickIndex].s;
+    let step = this.isBottomUp ? 2 : -2;
+    let start = this.isBottomUp ? 0 : stack.length - 2;
+
+    let stackPos = findNextFrame(file, stack, start, step, this.filter);
+    addOrUpdateChildNode(this.tree, file, tickIndex, stackPos, this.isBottomUp);
+
+    this.tree.ticks++;
+  }
+}
+
+function buildCategoryTreeAndLookup() {
+  let root = createEmptyNode("root");
+  let categories = {};
+  function addCategory(name, types) {
+    let n = createEmptyNode(name);
+    for (let i = 0; i < types.length; i++) {
+      categories[types[i]] = n;
+    }
+    root.children.push(n);
+  }
+  addCategory("JS Optimized", [ "JSOPT" ]);
+  addCategory("JS Unoptimized", [ "JSUNOPT", "BC" ]);
+  addCategory("IC", [ "IC" ]);
+  addCategory("RegExp", [ "REGEXP" ]);
+  addCategory("Other generated", [ "STUB", "BUILTIN" ]);
+  addCategory("C++", [ "CPP", "LIB" ]);
+  addCategory("C++/GC", [ "CPPGC" ]);
+  addCategory("C++/Parser", [ "CPPPARSE" ]);
+  addCategory("C++/Bytecode compiler", [ "CPPCOMPBC" ]);
+  addCategory("C++/Compiler", [ "CPPCOMP" ]);
+  addCategory("C++/External", [ "CPPEXT" ]);
+  addCategory("Unknown", [ "UNKNOWN" ]);
+
+  return { categories, root };
+}
+
+class CategorizedCallTreeProcessor {
+  constructor(filter, isBottomUp) {
+    this.filter = filter;
+    let { categories, root } = buildCategoryTreeAndLookup();
+
+    this.tree = root;
+    this.categories = categories;
+    this.isBottomUp = isBottomUp;
+  }
+
+  addStack(file, tickIndex) {
+    let stack = file.ticks[tickIndex].s;
+    let vmState = file.ticks[tickIndex].vm;
+    if (stack.length === 0) return;
+    let codeId = stack[0];
+    let code = codeId >= 0 ? file.code[codeId] : undefined;
+    let kind = resolveCodeKindAndVmState(code, vmState);
+    let node = this.categories[kind];
+
+    this.tree.ticks++;
+    node.ticks++;
+
+    let step = this.isBottomUp ? 2 : -2;
+    let start = this.isBottomUp ? 0 : stack.length - 2;
+
+    let stackPos = findNextFrame(file, stack, start, step, this.filter);
+    addOrUpdateChildNode(node, file, tickIndex, stackPos, this.isBottomUp);
+  }
+}
+
+class FunctionListTree {
+  constructor(filter, withCategories) {
+    if (withCategories) {
+      let { categories, root } = buildCategoryTreeAndLookup();
+      this.tree = root;
+      this.categories = categories;
+    } else {
+      this.tree = createEmptyNode("root");
+      this.categories = null;
+    }
+
+    this.codeVisited = [];
+    this.filter = filter;
+  }
+
+  addStack(file, tickIndex) {
+    let stack = file.ticks[tickIndex].s;
+    let vmState = file.ticks[tickIndex].vm;
+
+    this.tree.ticks++;
+    let child = null;
+    let tree = null;
+    for (let i = stack.length - 2; i >= 0; i -= 2) {
+      let codeId = stack[i];
+      if (codeId < 0 || this.codeVisited[codeId]) continue;
+
+      let code = file.code[codeId];
+      if (this.filter) {
+        let type = code ? code.type : undefined;
+        let kind = code ? code.kind : undefined;
+        if (!this.filter(type, kind)) continue;
+      }
+      let childId = childIdFromCode(codeId, code);
+      if (this.categories) {
+        let kind = resolveCodeKindAndVmState(code, vmState);
+        tree = this.categories[kind];
+      } else {
+        tree = this.tree;
+      }
+      child = tree.children[childId];
+      if (!child) {
+        child = createNodeFromStackEntry(code, codeId, vmState);
+        child.children[0] = createEmptyNode("Top-down tree");
+        child.children[0].delayedExpansion =
+          { frameList : [], ascending : false };
+        child.children[1] = createEmptyNode("Bottom-up tree");
+        child.children[1].delayedExpansion =
+          { frameList : [], ascending : true };
+        tree.children[childId] = child;
+      }
+      child.ticks++;
+      child.children[0].ticks++;
+      addFrameToFrameList(
+          child.children[0].delayedExpansion.frameList, tickIndex, i);
+      child.children[1].ticks++;
+      addFrameToFrameList(
+          child.children[1].delayedExpansion.frameList, tickIndex, i);
+      this.codeVisited[codeId] = true;
+    }
+    if (child) {
+      child.ownTicks++;
+      console.assert(tree !== null);
+      tree.ticks++;
+      console.assert(tree.type === "CAT");
+    }
+
+    for (let i = 0; i < stack.length; i += 2) {
+      let codeId = stack[i];
+      if (codeId >= 0) this.codeVisited[codeId] = false;
+    }
+  }
+}
+
+
+class CategorySampler {
+  constructor(file, bucketCount) {
+    this.bucketCount = bucketCount;
+
+    this.firstTime = file.ticks[0].tm;
+    let lastTime = file.ticks[file.ticks.length - 1].tm;
+    this.step = (lastTime - this.firstTime) / bucketCount;
+
+    this.buckets = [];
+    let bucket = {};
+    for (let i = 0; i < codeKinds.length; i++) {
+      bucket[codeKinds[i]] = 0;
+    }
+    for (let i = 0; i < bucketCount; i++) {
+      this.buckets.push(Object.assign({ total : 0 }, bucket));
+    }
+  }
+
+  addStack(file, tickIndex) {
+    let { tm : timestamp, vm : vmState, s : stack } = file.ticks[tickIndex];
+
+    let i = Math.floor((timestamp - this.firstTime) / this.step);
+    if (i === this.buckets.length) i--;
+    console.assert(i >= 0 && i < this.buckets.length);
+
+    let bucket = this.buckets[i];
+    bucket.total++;
+
+    let codeId = (stack.length > 0) ? stack[0] : -1;
+    let code = codeId >= 0 ? file.code[codeId] : undefined;
+    let kind = resolveCodeKindAndVmState(code, vmState);
+    bucket[kind]++;
+  }
+}
+
+class FunctionTimelineProcessor {
+  constructor(functionCodeId, filter) {
+    this.functionCodeId = functionCodeId;
+    this.filter = filter;
+    this.blocks = [];
+    this.currentBlock = null;
+  }
+
+  addStack(file, tickIndex) {
+    if (!this.functionCodeId) return;
+
+    let { tm : timestamp, vm : vmState, s : stack } = file.ticks[tickIndex];
+    let functionCode = file.code[this.functionCodeId];
+
+    // Find if the function is on the stack, and its position on the stack,
+    // ignoring any filtered entries.
+    let stackCode = undefined;
+    let functionPosInStack = -1;
+    let filteredI = 0;
+    for (let i = 0; i < stack.length - 1; i += 2) {
+      let codeId = stack[i];
+      let code = codeId >= 0 ? file.code[codeId] : undefined;
+      let type = code ? code.type : undefined;
+      let kind = code ? code.kind : undefined;
+      if (!this.filter(type, kind)) continue;
+
+      // Match other instances of the same function (e.g. unoptimised, various
+      // different optimised versions).
+      if (codeEquals(code, functionCode, true)) {
+        functionPosInStack = filteredI;
+        stackCode = code;
+        break;
+      }
+      filteredI++;
+    }
+
+    if (functionPosInStack >= 0) {
+      let stackKind = resolveCodeKindAndVmState(stackCode, vmState);
+
+      let codeIsTopOfStack = (functionPosInStack === 0);
+
+      if (this.currentBlock !== null) {
+        this.currentBlock.end = timestamp;
+
+        if (codeIsTopOfStack === this.currentBlock.topOfStack
+          && stackKind === this.currentBlock.kind) {
+          // If we haven't changed the stack top or the function kind, then
+          // we're happy just extending the current block and not starting
+          // a new one.
+          return;
+        }
+      }
+
+      // Start a new block at the current timestamp.
+      this.currentBlock = {
+        start: timestamp,
+        end: timestamp,
+        code: stackCode,
+        kind: stackKind,
+        topOfStack: codeIsTopOfStack
+      };
+      this.blocks.push(this.currentBlock);
+    } else {
+      this.currentBlock = null;
+    }
+  }
+}
+
+// Generates a tree out of a ticks sequence.
+// {file} is the JSON files with the ticks and code objects.
+// {startTime}, {endTime} is the interval.
+// {tree} is the processor of stacks.
+function generateTree(
+    file, startTime, endTime, tree) {
+  let ticks = file.ticks;
+  let i = 0;
+  while (i < ticks.length && ticks[i].tm < startTime) {
+    i++;
+  }
+
+  let tickCount = 0;
+  while (i < ticks.length && ticks[i].tm < endTime) {
+    tree.addStack(file, i);
+    i++;
+    tickCount++;
+  }
+
+  return tickCount;
+}
+
+function computeOptimizationStats(file,
+    timeStart = -Infinity, timeEnd = Infinity) {
+  function newCollection() {
+    return { count : 0, functions : [], functionTable : [] };
+  }
+  function addToCollection(collection, code) {
+    collection.count++;
+    let funcData = collection.functionTable[code.func];
+    if (!funcData) {
+      funcData = { f : file.functions[code.func], instances : [] };
+      collection.functionTable[code.func] = funcData;
+      collection.functions.push(funcData);
+    }
+    funcData.instances.push(code);
+  }
+
+  let functionCount = 0;
+  let optimizedFunctionCount = 0;
+  let deoptimizedFunctionCount = 0;
+  let optimizations = newCollection();
+  let eagerDeoptimizations = newCollection();
+  let softDeoptimizations = newCollection();
+  let lazyDeoptimizations = newCollection();
+
+  for (let i = 0; i < file.functions.length; i++) {
+    let f = file.functions[i];
+
+    // Skip special SFIs that do not correspond to JS functions.
+    if (f.codes.length === 0) continue;
+    if (file.code[f.codes[0]].type !== "JS") continue;
+
+    functionCount++;
+    let optimized = false;
+    let deoptimized = false;
+
+    for (let j = 0; j < f.codes.length; j++) {
+      let code = file.code[f.codes[j]];
+      console.assert(code.type === "JS");
+      if (code.kind === "Opt") {
+        optimized = true;
+        if (code.tm >= timeStart && code.tm <= timeEnd) {
+          addToCollection(optimizations, code);
+        }
+      }
+      if (code.deopt) {
+        deoptimized = true;
+        if (code.deopt.tm >= timeStart && code.deopt.tm <= timeEnd) {
+          switch (code.deopt.bailoutType) {
+            case "lazy":
+              addToCollection(lazyDeoptimizations, code);
+              break;
+            case "eager":
+              addToCollection(eagerDeoptimizations, code);
+              break;
+            case "soft":
+              addToCollection(softDeoptimizations, code);
+              break;
+          }
+        }
+      }
+    }
+    if (optimized) {
+      optimizedFunctionCount++;
+    }
+    if (deoptimized) {
+      deoptimizedFunctionCount++;
+    }
+  }
+
+  function sortCollection(collection) {
+    collection.functions.sort(
+        (a, b) => a.instances.length - b.instances.length);
+  }
+
+  sortCollection(eagerDeoptimizations);
+  sortCollection(lazyDeoptimizations);
+  sortCollection(softDeoptimizations);
+  sortCollection(optimizations);
+
+  return {
+    functionCount,
+    optimizedFunctionCount,
+    deoptimizedFunctionCount,
+    optimizations,
+    eagerDeoptimizations,
+    lazyDeoptimizations,
+    softDeoptimizations,
+  };
+}
+
+function normalizeLeadingWhitespace(lines) {
+  let regex = /^\s*/;
+  let minimumLeadingWhitespaceChars = Infinity;
+  for (let line of lines) {
+    minimumLeadingWhitespaceChars =
+        Math.min(minimumLeadingWhitespaceChars, regex.exec(line)[0].length);
+  }
+  for (let i = 0; i < lines.length; i++) {
+    lines[i] = lines[i].substring(minimumLeadingWhitespaceChars);
+  }
+}
diff --git a/src/third_party/v8/tools/profview/profview.css b/src/third_party/v8/tools/profview/profview.css
new file mode 100644
index 0000000..ca39745
--- /dev/null
+++ b/src/third_party/v8/tools/profview/profview.css
@@ -0,0 +1,118 @@
+table.calltree {
+  width : 100%;
+}
+
+td {
+  padding-top: 0.1em;
+  padding-bottom: 0.1em;
+}
+
+.numeric {
+  width : 12ex;
+}
+
+.numeric-hidden {
+  display : none;
+}
+
+body {
+  font-family: 'Roboto', sans-serif;
+}
+
+#source-status {
+  display: inline-block;
+}
+
+.tree-row-arrow {
+  margin-right: 0.2em;
+  text-align: right;
+}
+
+.code-type-chip {
+  border-radius : 1em;
+  padding : 0.2em;
+  background-color : #4040c0;
+  color: #ffffff;
+  font-size : small;
+  box-shadow: 0 2px 5px 0 rgba(0, 0, 0, 0.16), 0 2px 10px 0 rgba(0, 0, 0, 0.12);
+}
+
+.tree-row-name {
+  margin-left: 0.2em;
+  margin-right: 0.2em;
+}
+
+.codeid-link {
+  text-decoration: underline;
+  cursor: pointer;
+}
+
+.view-source-link {
+  text-decoration: underline;
+  cursor: pointer;
+  font-size: 10pt;
+  margin-left: 0.6em;
+  color: #555555;
+}
+
+#source-viewer {
+  border: 1px solid black;
+  padding: 0.2em;
+  font-family: 'Roboto Mono', monospace;
+  white-space: pre;
+  margin-top: 1em;
+  margin-bottom: 1em;
+}
+
+#source-viewer td.line-none {
+  background-color: white;
+}
+
+#source-viewer td.line-cold {
+  background-color: #e1f5fe;
+}
+
+#source-viewer td.line-mediumcold {
+  background-color: #b2ebf2;
+}
+
+#source-viewer td.line-mediumhot {
+  background-color: #c5e1a5;
+}
+
+#source-viewer td.line-hot {
+  background-color: #dce775;
+}
+
+#source-viewer td.line-superhot {
+  background-color: #ffee58;
+}
+
+#source-viewer .source-line-number {
+  padding-left: 0.2em;
+  padding-right: 0.2em;
+  color: #003c8f;
+  background-color: #eceff1;
+}
+
+div.mode-button {
+  padding: 1em 3em;
+  display: inline-block;
+  background-color: #6070ff;
+  color : #ffffff;
+  margin: 0 0.2em 2em 0;
+  box-shadow: 3px 3px 2px #d0d0ff;
+}
+
+div.mode-button:hover {
+  background-color: #4858ff;
+}
+
+div.active-mode-button {
+  background-color: #0000ff;
+  box-shadow: 3px 3px 2px #a0a0ff;
+}
+
+div.active-mode-button:hover {
+  background-color: #0000ff;
+}
diff --git a/src/third_party/v8/tools/profview/profview.js b/src/third_party/v8/tools/profview/profview.js
new file mode 100644
index 0000000..210cec7
--- /dev/null
+++ b/src/third_party/v8/tools/profview/profview.js
@@ -0,0 +1,1512 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict"
+
+function $(id) {
+  return document.getElementById(id);
+}
+
+function removeAllChildren(element) {
+  while (element.firstChild) {
+    element.removeChild(element.firstChild);
+  }
+}
+
+let components;
+function createViews() {
+  components = [
+    new CallTreeView(),
+    new TimelineView(),
+    new HelpView(),
+    new SummaryView(),
+    new ModeBarView(),
+    new ScriptSourceView(),
+  ];
+}
+
+function emptyState() {
+  return {
+    file : null,
+    mode : null,
+    currentCodeId : null,
+    viewingSource: false,
+    start : 0,
+    end : Infinity,
+    timelineSize : {
+      width : 0,
+      height : 0
+    },
+    callTree : {
+      attribution : "js-exclude-bc",
+      categories : "code-type",
+      sort : "time"
+    },
+    sourceData: null
+  };
+}
+
+function setCallTreeState(state, callTreeState) {
+  state = Object.assign({}, state);
+  state.callTree = callTreeState;
+  return state;
+}
+
+let main = {
+  currentState : emptyState(),
+  renderPending : false,
+
+  setMode(mode) {
+    if (mode !== main.currentState.mode) {
+
+      function setCallTreeModifiers(attribution, categories, sort) {
+        let callTreeState = Object.assign({}, main.currentState.callTree);
+        callTreeState.attribution = attribution;
+        callTreeState.categories = categories;
+        callTreeState.sort = sort;
+        return callTreeState;
+      }
+
+      let state = Object.assign({}, main.currentState);
+
+      switch (mode) {
+        case "bottom-up":
+          state.callTree =
+              setCallTreeModifiers("js-exclude-bc", "code-type", "time");
+          break;
+        case "top-down":
+          state.callTree =
+              setCallTreeModifiers("js-exclude-bc", "none", "time");
+          break;
+        case "function-list":
+          state.callTree =
+              setCallTreeModifiers("js-exclude-bc", "code-type", "own-time");
+          break;
+      }
+
+      state.mode = mode;
+
+      main.currentState = state;
+      main.delayRender();
+    }
+  },
+
+  setCallTreeAttribution(attribution) {
+    if (attribution !== main.currentState.attribution) {
+      let callTreeState = Object.assign({}, main.currentState.callTree);
+      callTreeState.attribution = attribution;
+      main.currentState = setCallTreeState(main.currentState,  callTreeState);
+      main.delayRender();
+    }
+  },
+
+  setCallTreeSort(sort) {
+    if (sort !== main.currentState.sort) {
+      let callTreeState = Object.assign({}, main.currentState.callTree);
+      callTreeState.sort = sort;
+      main.currentState = setCallTreeState(main.currentState,  callTreeState);
+      main.delayRender();
+    }
+  },
+
+  setCallTreeCategories(categories) {
+    if (categories !== main.currentState.categories) {
+      let callTreeState = Object.assign({}, main.currentState.callTree);
+      callTreeState.categories = categories;
+      main.currentState = setCallTreeState(main.currentState,  callTreeState);
+      main.delayRender();
+    }
+  },
+
+  setViewInterval(start, end) {
+    if (start !== main.currentState.start ||
+        end !== main.currentState.end) {
+      main.currentState = Object.assign({}, main.currentState);
+      main.currentState.start = start;
+      main.currentState.end = end;
+      main.delayRender();
+    }
+  },
+
+  updateSources(file) {
+    let statusDiv = $("source-status");
+    if (!file) {
+      statusDiv.textContent = "";
+      return;
+    }
+    if (!file.scripts || file.scripts.length === 0) {
+      statusDiv.textContent =
+          "Script source not available. Run profiler with --log-source-code.";
+      return;
+    }
+    statusDiv.textContent = "Script source is available.";
+    main.currentState.sourceData = new SourceData(file);
+  },
+
+  setFile(file) {
+    if (file !== main.currentState.file) {
+      let lastMode = main.currentState.mode || "summary";
+      main.currentState = emptyState();
+      main.currentState.file = file;
+      main.updateSources(file);
+      main.setMode(lastMode);
+      main.delayRender();
+    }
+  },
+
+  setCurrentCode(codeId) {
+    if (codeId !== main.currentState.currentCodeId) {
+      main.currentState = Object.assign({}, main.currentState);
+      main.currentState.currentCodeId = codeId;
+      main.delayRender();
+    }
+  },
+
+  setViewingSource(value) {
+    if (main.currentState.viewingSource !== value) {
+      main.currentState = Object.assign({}, main.currentState);
+      main.currentState.viewingSource = value;
+      main.delayRender();
+    }
+  },
+
+  onResize() {
+    main.delayRender();
+  },
+
+  onLoad() {
+    function loadHandler(evt) {
+      let f = evt.target.files[0];
+      if (f) {
+        let reader = new FileReader();
+        reader.onload = function(event) {
+          main.setFile(JSON.parse(event.target.result));
+        };
+        reader.onerror = function(event) {
+          console.error(
+              "File could not be read! Code " + event.target.error.code);
+        };
+        reader.readAsText(f);
+      } else {
+        main.setFile(null);
+      }
+    }
+    $("fileinput").addEventListener(
+        "change", loadHandler, false);
+    createViews();
+  },
+
+  delayRender()  {
+    if (main.renderPending) return;
+    main.renderPending = true;
+
+    window.requestAnimationFrame(() => {
+      main.renderPending = false;
+      for (let c of components) {
+        c.render(main.currentState);
+      }
+    });
+  }
+};
+
+const CATEGORY_COLOR = "#f5f5f5";
+const bucketDescriptors =
+    [ { kinds : [ "JSOPT" ],
+        color : "#64dd17",
+        backgroundColor : "#80e27e",
+        text : "JS Optimized" },
+      { kinds : [ "JSUNOPT", "BC" ],
+        color : "#dd2c00",
+        backgroundColor : "#ff9e80",
+        text : "JS Unoptimized" },
+      { kinds : [ "IC" ],
+        color : "#ff6d00",
+        backgroundColor : "#ffab40",
+        text : "IC" },
+      { kinds : [ "STUB", "BUILTIN", "REGEXP" ],
+        color : "#ffd600",
+        backgroundColor : "#ffea00",
+        text : "Other generated" },
+      { kinds : [ "CPP", "LIB" ],
+        color : "#304ffe",
+        backgroundColor : "#6ab7ff",
+        text : "C++" },
+      { kinds : [ "CPPEXT" ],
+        color : "#003c8f",
+        backgroundColor : "#c0cfff",
+        text : "C++/external" },
+      { kinds : [ "CPPPARSE" ],
+        color : "#aa00ff",
+        backgroundColor : "#ffb2ff",
+        text : "C++/Parser" },
+      { kinds : [ "CPPCOMPBC" ],
+        color : "#43a047",
+        backgroundColor : "#88c399",
+        text : "C++/Bytecode compiler" },
+      { kinds : [ "CPPCOMP" ],
+        color : "#00e5ff",
+        backgroundColor : "#6effff",
+        text : "C++/Compiler" },
+      { kinds : [ "CPPGC" ],
+        color : "#6200ea",
+        backgroundColor : "#e1bee7",
+        text : "C++/GC" },
+      { kinds : [ "UNKNOWN" ],
+        color : "#bdbdbd",
+        backgroundColor : "#efefef",
+        text : "Unknown" }
+    ];
+
+let kindToBucketDescriptor = {};
+for (let i = 0; i < bucketDescriptors.length; i++) {
+  let bucket = bucketDescriptors[i];
+  for (let j = 0; j < bucket.kinds.length; j++) {
+    kindToBucketDescriptor[bucket.kinds[j]] = bucket;
+  }
+}
+
+function bucketFromKind(kind) {
+  for (let i = 0; i < bucketDescriptors.length; i++) {
+    let bucket = bucketDescriptors[i];
+    for (let j = 0; j < bucket.kinds.length; j++) {
+      if (bucket.kinds[j] === kind) {
+        return bucket;
+      }
+    }
+  }
+  return null;
+}
+
+function codeTypeToText(type) {
+  switch (type) {
+    case "UNKNOWN":
+      return "Unknown";
+    case "CPPPARSE":
+      return "C++ Parser";
+    case "CPPCOMPBC":
+      return "C++ Bytecode Compiler)";
+    case "CPPCOMP":
+      return "C++ Compiler";
+    case "CPPGC":
+      return "C++ GC";
+    case "CPPEXT":
+      return "C++ External";
+    case "CPP":
+      return "C++";
+    case "LIB":
+      return "Library";
+    case "IC":
+      return "IC";
+    case "BC":
+      return "Bytecode";
+    case "STUB":
+      return "Stub";
+    case "BUILTIN":
+      return "Builtin";
+    case "REGEXP":
+      return "RegExp";
+    case "JSOPT":
+      return "JS opt";
+    case "JSUNOPT":
+      return "JS unopt";
+  }
+  console.error("Unknown type: " + type);
+}
+
+function createTypeNode(type) {
+  if (type === "CAT") {
+    return document.createTextNode("");
+  }
+  let span = document.createElement("span");
+  span.classList.add("code-type-chip");
+  span.textContent = codeTypeToText(type);
+
+  return span;
+}
+
+function filterFromFilterId(id) {
+  switch (id) {
+    case "full-tree":
+      return (type, kind) => true;
+    case "js-funs":
+      return (type, kind) => type !== 'CODE';
+    case "js-exclude-bc":
+      return (type, kind) =>
+          type !== 'CODE' || kind !== "BytecodeHandler";
+  }
+}
+
+function createIndentNode(indent) {
+  let div = document.createElement("div");
+  div.style.display = "inline-block";
+  div.style.width = (indent + 0.5) + "em";
+  return div;
+}
+
+function createArrowNode() {
+  let span = document.createElement("span");
+  span.classList.add("tree-row-arrow");
+  return span;
+}
+
+function createFunctionNode(name, codeId) {
+  let nameElement = document.createElement("span");
+  nameElement.appendChild(document.createTextNode(name));
+  nameElement.classList.add("tree-row-name");
+  if (codeId !== -1) {
+    nameElement.classList.add("codeid-link");
+    nameElement.onclick = (event) => {
+      main.setCurrentCode(codeId);
+      // Prevent the click from bubbling to the row and causing it to
+      // collapse/expand.
+      event.stopPropagation();
+    };
+  }
+  return nameElement;
+}
+
+function createViewSourceNode(codeId) {
+  let linkElement = document.createElement("span");
+  linkElement.appendChild(document.createTextNode("View source"));
+  linkElement.classList.add("view-source-link");
+  linkElement.onclick = (event) => {
+    main.setCurrentCode(codeId);
+    main.setViewingSource(true);
+    // Prevent the click from bubbling to the row and causing it to
+    // collapse/expand.
+    event.stopPropagation();
+  };
+  return linkElement;
+}
+
+const COLLAPSED_ARROW = "\u25B6";
+const EXPANDED_ARROW = "\u25BC";
+
+class CallTreeView {
+  constructor() {
+    this.element = $("calltree");
+    this.treeElement = $("calltree-table");
+    this.selectAttribution = $("calltree-attribution");
+    this.selectCategories = $("calltree-categories");
+    this.selectSort = $("calltree-sort");
+
+    this.selectAttribution.onchange = () => {
+      main.setCallTreeAttribution(this.selectAttribution.value);
+    };
+
+    this.selectCategories.onchange = () => {
+      main.setCallTreeCategories(this.selectCategories.value);
+    };
+
+    this.selectSort.onchange = () => {
+      main.setCallTreeSort(this.selectSort.value);
+    };
+
+    this.currentState = null;
+  }
+
+  sortFromId(id) {
+    switch (id) {
+      case "time":
+        return (c1, c2) => {
+          if (c1.ticks < c2.ticks) return 1;
+          else if (c1.ticks > c2.ticks) return -1;
+          return c2.ownTicks - c1.ownTicks;
+        };
+      case "own-time":
+        return (c1, c2) => {
+          if (c1.ownTicks < c2.ownTicks) return 1;
+          else if (c1.ownTicks > c2.ownTicks) return -1;
+          return c2.ticks - c1.ticks;
+        };
+      case "category-time":
+        return (c1, c2) => {
+          if (c1.type === c2.type) return c2.ticks - c1.ticks;
+          if (c1.type < c2.type) return 1;
+          return -1;
+        };
+      case "category-own-time":
+        return (c1, c2) => {
+          if (c1.type === c2.type) return c2.ownTicks - c1.ownTicks;
+          if (c1.type < c2.type) return 1;
+          return -1;
+        };
+    }
+  }
+
+  expandTree(tree, indent) {
+    let index = 0;
+    let id = "R/";
+    let row = tree.row;
+
+    if (row) {
+      index = row.rowIndex;
+      id = row.id;
+
+      tree.arrow.textContent = EXPANDED_ARROW;
+      // Collapse the children when the row is clicked again.
+      let expandHandler = row.onclick;
+      row.onclick = () => {
+        this.collapseRow(tree, expandHandler);
+      }
+    }
+
+    // Collect the children, and sort them by ticks.
+    let children = [];
+    let filter =
+        filterFromFilterId(this.currentState.callTree.attribution);
+    for (let childId in tree.children) {
+      let child = tree.children[childId];
+      if (child.ticks > 0) {
+        children.push(child);
+        if (child.delayedExpansion) {
+          expandTreeNode(this.currentState.file, child, filter);
+        }
+      }
+    }
+    children.sort(this.sortFromId(this.currentState.callTree.sort));
+
+    for (let i = 0; i < children.length; i++) {
+      let node = children[i];
+      let row = this.rows.insertRow(index);
+      row.id = id + i + "/";
+
+      if (node.type === "CAT") {
+        row.style.backgroundColor = CATEGORY_COLOR;
+      } else {
+        row.style.backgroundColor = bucketFromKind(node.type).backgroundColor;
+      }
+
+      // Inclusive time % cell.
+      let c = row.insertCell();
+      c.textContent = (node.ticks * 100 / this.tickCount).toFixed(2) + "%";
+      c.style.textAlign = "right";
+      // Percent-of-parent cell.
+      c = row.insertCell();
+      c.textContent = (node.ticks * 100 / tree.ticks).toFixed(2) + "%";
+      c.style.textAlign = "right";
+      // Exclusive time % cell.
+      if (this.currentState.mode !== "bottom-up") {
+        c = row.insertCell(-1);
+        c.textContent = (node.ownTicks * 100 / this.tickCount).toFixed(2) + "%";
+        c.style.textAlign = "right";
+      }
+
+      // Create the name cell.
+      let nameCell = row.insertCell();
+      nameCell.appendChild(createIndentNode(indent + 1));
+      let arrow = createArrowNode();
+      nameCell.appendChild(arrow);
+      nameCell.appendChild(createTypeNode(node.type));
+      nameCell.appendChild(createFunctionNode(node.name, node.codeId));
+      if (main.currentState.sourceData &&
+          node.codeId >= 0 &&
+          main.currentState.sourceData.hasSource(
+              this.currentState.file.code[node.codeId].func)) {
+        nameCell.appendChild(createViewSourceNode(node.codeId));
+      }
+
+      // Inclusive ticks cell.
+      c = row.insertCell();
+      c.textContent = node.ticks;
+      c.style.textAlign = "right";
+      if (this.currentState.mode !== "bottom-up") {
+        // Exclusive ticks cell.
+        c = row.insertCell(-1);
+        c.textContent = node.ownTicks;
+        c.style.textAlign = "right";
+      }
+      if (node.children.length > 0) {
+        arrow.textContent = COLLAPSED_ARROW;
+        row.onclick = () => { this.expandTree(node, indent + 1); };
+      }
+
+      node.row = row;
+      node.arrow = arrow;
+
+      index++;
+    }
+  }
+
+  collapseRow(tree, expandHandler) {
+    let row = tree.row;
+    let id = row.id;
+    let index = row.rowIndex;
+    while (row.rowIndex < this.rows.rows.length &&
+        this.rows.rows[index].id.startsWith(id)) {
+      this.rows.deleteRow(index);
+    }
+
+    tree.arrow.textContent = COLLAPSED_ARROW;
+    row.onclick = expandHandler;
+  }
+
+  fillSelects(mode, calltree) {
+    function addOptions(e, values, current) {
+      while (e.options.length > 0) {
+        e.remove(0);
+      }
+      for (let i = 0; i < values.length; i++) {
+        let option = document.createElement("option");
+        option.value = values[i].value;
+        option.textContent = values[i].text;
+        e.appendChild(option);
+      }
+      e.value = current;
+    }
+
+    let attributions = [
+        { value : "js-exclude-bc",
+          text : "Attribute bytecode handlers to caller" },
+        { value : "full-tree",
+          text : "Count each code object separately" },
+        { value : "js-funs",
+          text : "Attribute non-functions to JS functions"  }
+    ];
+
+    switch (mode) {
+      case "bottom-up":
+        addOptions(this.selectAttribution, attributions, calltree.attribution);
+        addOptions(this.selectCategories, [
+            { value : "code-type", text : "Code type" },
+            { value : "none", text : "None" }
+        ], calltree.categories);
+        addOptions(this.selectSort, [
+            { value : "time", text : "Time (including children)" },
+            { value : "category-time", text : "Code category, time" },
+        ], calltree.sort);
+        return;
+      case "top-down":
+        addOptions(this.selectAttribution, attributions, calltree.attribution);
+        addOptions(this.selectCategories, [
+            { value : "none", text : "None" },
+            { value : "rt-entry", text : "Runtime entries" }
+        ], calltree.categories);
+        addOptions(this.selectSort, [
+            { value : "time", text : "Time (including children)" },
+            { value : "own-time", text : "Own time" },
+            { value : "category-time", text : "Code category, time" },
+            { value : "category-own-time", text : "Code category, own time"}
+        ], calltree.sort);
+        return;
+      case "function-list":
+        addOptions(this.selectAttribution, attributions, calltree.attribution);
+        addOptions(this.selectCategories, [
+            { value : "code-type", text : "Code type" },
+            { value : "none", text : "None" }
+        ], calltree.categories);
+        addOptions(this.selectSort, [
+            { value : "own-time", text : "Own time" },
+            { value : "time", text : "Time (including children)" },
+            { value : "category-own-time", text : "Code category, own time"},
+            { value : "category-time", text : "Code category, time" },
+        ], calltree.sort);
+        return;
+    }
+    console.error("Unexpected mode");
+  }
+
+  static isCallTreeMode(mode) {
+    switch (mode) {
+      case "bottom-up":
+      case "top-down":
+      case "function-list":
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  render(newState) {
+    let oldState = this.currentState;
+    if (!newState.file || !CallTreeView.isCallTreeMode(newState.mode)) {
+      this.element.style.display = "none";
+      this.currentState = null;
+      return;
+    }
+
+    this.currentState = newState;
+    if (oldState) {
+      if (newState.file === oldState.file &&
+          newState.start === oldState.start &&
+          newState.end === oldState.end &&
+          newState.mode === oldState.mode &&
+          newState.callTree.attribution === oldState.callTree.attribution &&
+          newState.callTree.categories === oldState.callTree.categories &&
+          newState.callTree.sort === oldState.callTree.sort) {
+        // No change => just return.
+        return;
+      }
+    }
+
+    this.element.style.display = "inherit";
+
+    let mode = this.currentState.mode;
+    if (!oldState || mode !== oldState.mode) {
+      // Technically, we should also call this if attribution, categories or
+      // sort change, but the selection is already highlighted by the combobox
+      // itself, so we do need to do anything here.
+      this.fillSelects(newState.mode, newState.callTree);
+    }
+
+    let ownTimeClass = (mode === "bottom-up") ? "numeric-hidden" : "numeric";
+    let ownTimeTh = $(this.treeElement.id + "-own-time-header");
+    ownTimeTh.classList = ownTimeClass;
+    let ownTicksTh = $(this.treeElement.id + "-own-ticks-header");
+    ownTicksTh.classList = ownTimeClass;
+
+    // Build the tree.
+    let stackProcessor;
+    let filter = filterFromFilterId(this.currentState.callTree.attribution);
+    if (mode === "top-down") {
+      if (this.currentState.callTree.categories === "rt-entry") {
+        stackProcessor =
+            new RuntimeCallTreeProcessor();
+      } else {
+        stackProcessor =
+            new PlainCallTreeProcessor(filter, false);
+      }
+    } else if (mode === "function-list") {
+      stackProcessor = new FunctionListTree(
+          filter, this.currentState.callTree.categories === "code-type");
+
+    } else {
+      console.assert(mode === "bottom-up");
+      if (this.currentState.callTree.categories === "none") {
+        stackProcessor =
+            new PlainCallTreeProcessor(filter, true);
+      } else {
+        console.assert(this.currentState.callTree.categories === "code-type");
+        stackProcessor =
+            new CategorizedCallTreeProcessor(filter, true);
+      }
+    }
+    this.tickCount =
+        generateTree(this.currentState.file,
+                     this.currentState.start,
+                     this.currentState.end,
+                     stackProcessor);
+    // TODO(jarin) Handle the case when tick count is negative.
+
+    this.tree = stackProcessor.tree;
+
+    // Remove old content of the table, replace with new one.
+    let oldRows = this.treeElement.getElementsByTagName("tbody");
+    let newRows = document.createElement("tbody");
+    this.rows = newRows;
+
+    // Populate the table.
+    this.expandTree(this.tree, 0);
+
+    // Swap in the new rows.
+    this.treeElement.replaceChild(newRows, oldRows[0]);
+  }
+}
+
+class TimelineView {
+  constructor() {
+    this.element = $("timeline");
+    this.canvas = $("timeline-canvas");
+    this.legend = $("timeline-legend");
+    this.currentCode = $("timeline-currentCode");
+
+    this.canvas.onmousedown = this.onMouseDown.bind(this);
+    this.canvas.onmouseup = this.onMouseUp.bind(this);
+    this.canvas.onmousemove = this.onMouseMove.bind(this);
+
+    this.selectionStart = null;
+    this.selectionEnd = null;
+    this.selecting = false;
+
+    this.fontSize = 12;
+    this.imageOffset = Math.round(this.fontSize * 1.2);
+    this.functionTimelineHeight = 24;
+    this.functionTimelineTickHeight = 16;
+
+    this.currentState = null;
+  }
+
+  onMouseDown(e) {
+    this.selectionStart =
+        e.clientX - this.canvas.getBoundingClientRect().left;
+    this.selectionEnd = this.selectionStart + 1;
+    this.selecting = true;
+  }
+
+  onMouseMove(e) {
+    if (this.selecting) {
+      this.selectionEnd =
+          e.clientX - this.canvas.getBoundingClientRect().left;
+      this.drawSelection();
+    }
+  }
+
+  onMouseUp(e) {
+    if (this.selectionStart !== null) {
+      let x = e.clientX - this.canvas.getBoundingClientRect().left;
+      if (Math.abs(x - this.selectionStart) < 10) {
+        this.selectionStart = null;
+        this.selectionEnd = null;
+        let ctx = this.canvas.getContext("2d");
+        ctx.drawImage(this.buffer, 0, this.imageOffset);
+      } else {
+        this.selectionEnd = x;
+        this.drawSelection();
+      }
+      let file = this.currentState.file;
+      if (file) {
+        let start = this.selectionStart === null ? 0 : this.selectionStart;
+        let end = this.selectionEnd === null ? Infinity : this.selectionEnd;
+        let firstTime = file.ticks[0].tm;
+        let lastTime = file.ticks[file.ticks.length - 1].tm;
+
+        let width = this.buffer.width;
+
+        start = (start / width) * (lastTime - firstTime) + firstTime;
+        end = (end / width) * (lastTime - firstTime) + firstTime;
+
+        if (end < start) {
+          let temp = start;
+          start = end;
+          end = temp;
+        }
+
+        main.setViewInterval(start, end);
+      }
+    }
+    this.selecting = false;
+  }
+
+  drawSelection() {
+    let ctx = this.canvas.getContext("2d");
+
+    // Draw the timeline image.
+    ctx.drawImage(this.buffer, 0, this.imageOffset);
+
+    // Draw the current interval highlight.
+    let left;
+    let right;
+    if (this.selectionStart !== null && this.selectionEnd !== null) {
+      ctx.fillStyle = "rgba(0, 0, 0, 0.3)";
+      left = Math.min(this.selectionStart, this.selectionEnd);
+      right = Math.max(this.selectionStart, this.selectionEnd);
+      let height = this.buffer.height - this.functionTimelineHeight;
+      ctx.fillRect(0, this.imageOffset, left, height);
+      ctx.fillRect(right, this.imageOffset, this.buffer.width - right, height);
+    } else {
+      left = 0;
+      right = this.buffer.width;
+    }
+
+    // Draw the scale text.
+    let file = this.currentState.file;
+    ctx.fillStyle = "white";
+    ctx.fillRect(0, 0, this.canvas.width, this.imageOffset);
+    if (file && file.ticks.length > 0) {
+      let firstTime = file.ticks[0].tm;
+      let lastTime = file.ticks[file.ticks.length - 1].tm;
+
+      let leftTime =
+          firstTime + left / this.canvas.width * (lastTime - firstTime);
+      let rightTime =
+          firstTime + right / this.canvas.width * (lastTime - firstTime);
+
+      let leftText = (leftTime / 1000000).toFixed(3) + "s";
+      let rightText = (rightTime / 1000000).toFixed(3) + "s";
+
+      ctx.textBaseline = 'top';
+      ctx.font = this.fontSize + "px Arial";
+      ctx.fillStyle = "black";
+
+      let leftWidth = ctx.measureText(leftText).width;
+      let rightWidth = ctx.measureText(rightText).width;
+
+      let leftStart = left - leftWidth / 2;
+      let rightStart = right - rightWidth / 2;
+
+      if (leftStart < 0) leftStart = 0;
+      if (rightStart + rightWidth > this.canvas.width) {
+        rightStart = this.canvas.width - rightWidth;
+      }
+      if (leftStart + leftWidth > rightStart) {
+        if (leftStart > this.canvas.width - (rightStart - rightWidth)) {
+          rightStart = leftStart + leftWidth;
+
+        } else {
+          leftStart = rightStart - leftWidth;
+        }
+      }
+
+      ctx.fillText(leftText, leftStart, 0);
+      ctx.fillText(rightText, rightStart, 0);
+    }
+  }
+
+  render(newState) {
+    let oldState = this.currentState;
+
+    if (!newState.file) {
+      this.element.style.display = "none";
+      return;
+    }
+
+    let width = Math.round(document.documentElement.clientWidth - 20);
+    let height = Math.round(document.documentElement.clientHeight / 5);
+
+    if (oldState) {
+      if (width === oldState.timelineSize.width &&
+          height === oldState.timelineSize.height &&
+          newState.file === oldState.file &&
+          newState.currentCodeId === oldState.currentCodeId &&
+          newState.start === oldState.start &&
+          newState.end === oldState.end) {
+        // No change, nothing to do.
+        return;
+      }
+    }
+    this.currentState = newState;
+    this.currentState.timelineSize.width = width;
+    this.currentState.timelineSize.height = height;
+
+    this.element.style.display = "inherit";
+
+    let file = this.currentState.file;
+
+    const minPixelsPerBucket = 10;
+    const minTicksPerBucket = 8;
+    let maxBuckets = Math.round(file.ticks.length / minTicksPerBucket);
+    let bucketCount = Math.min(
+        Math.round(width / minPixelsPerBucket), maxBuckets);
+
+    // Make sure the canvas has the right dimensions.
+    this.canvas.width = width;
+    this.canvas.height  = height;
+
+    // Make space for the selection text.
+    height -= this.imageOffset;
+
+    let currentCodeId = this.currentState.currentCodeId;
+
+    let firstTime = file.ticks[0].tm;
+    let lastTime = file.ticks[file.ticks.length - 1].tm;
+    let start = Math.max(this.currentState.start, firstTime);
+    let end = Math.min(this.currentState.end, lastTime);
+
+    this.selectionStart = (start - firstTime) / (lastTime - firstTime) * width;
+    this.selectionEnd = (end - firstTime) / (lastTime - firstTime) * width;
+
+    let stackProcessor = new CategorySampler(file, bucketCount);
+    generateTree(file, 0, Infinity, stackProcessor);
+    let codeIdProcessor = new FunctionTimelineProcessor(
+      currentCodeId,
+      filterFromFilterId(this.currentState.callTree.attribution));
+    generateTree(file, 0, Infinity, codeIdProcessor);
+
+    let buffer = document.createElement("canvas");
+
+    buffer.width = width;
+    buffer.height = height;
+
+    // Calculate the bar heights for each bucket.
+    let graphHeight = height - this.functionTimelineHeight;
+    let buckets = stackProcessor.buckets;
+    let bucketsGraph = [];
+    for (let i = 0; i < buckets.length; i++) {
+      let sum = 0;
+      let bucketData = [];
+      let total = buckets[i].total;
+      if (total > 0) {
+        for (let j = 0; j < bucketDescriptors.length; j++) {
+          let desc = bucketDescriptors[j];
+          for (let k = 0; k < desc.kinds.length; k++) {
+            sum += buckets[i][desc.kinds[k]];
+          }
+          bucketData.push(Math.round(graphHeight * sum / total));
+        }
+      } else {
+        // No ticks fell into this bucket. Fill with "Unknown."
+        for (let j = 0; j < bucketDescriptors.length; j++) {
+          let desc = bucketDescriptors[j];
+          bucketData.push(desc.text === "Unknown" ? graphHeight : 0);
+        }
+      }
+      bucketsGraph.push(bucketData);
+    }
+
+    // Draw the category graph into the buffer.
+    let bucketWidth = width / (bucketsGraph.length - 1);
+    let ctx = buffer.getContext('2d');
+    for (let i = 0; i < bucketsGraph.length - 1; i++) {
+      let bucketData = bucketsGraph[i];
+      let nextBucketData = bucketsGraph[i + 1];
+      let x1 = Math.round(i * bucketWidth);
+      let x2 = Math.round((i + 1) * bucketWidth);
+      for (let j = 0; j < bucketData.length; j++) {
+        ctx.beginPath();
+        ctx.moveTo(x1, j > 0 ? bucketData[j - 1] : 0);
+        ctx.lineTo(x2, j > 0 ? nextBucketData[j - 1] : 0);
+        ctx.lineTo(x2, nextBucketData[j]);
+        ctx.lineTo(x1, bucketData[j]);
+        ctx.closePath();
+        ctx.fillStyle = bucketDescriptors[j].color;
+        ctx.fill();
+      }
+    }
+
+    // Draw the function ticks.
+    let functionTimelineYOffset = graphHeight;
+    let functionTimelineTickHeight = this.functionTimelineTickHeight;
+    let functionTimelineHalfHeight =
+        Math.round(functionTimelineTickHeight / 2);
+    let timestampScaler = width / (lastTime - firstTime);
+    let timestampToX = (t) => Math.round((t - firstTime) * timestampScaler);
+    ctx.fillStyle = "white";
+    ctx.fillRect(
+      0,
+      functionTimelineYOffset,
+      buffer.width,
+      this.functionTimelineHeight);
+    for (let i = 0; i < codeIdProcessor.blocks.length; i++) {
+      let block = codeIdProcessor.blocks[i];
+      let bucket = kindToBucketDescriptor[block.kind];
+      ctx.fillStyle = bucket.color;
+      ctx.fillRect(
+        timestampToX(block.start),
+        functionTimelineYOffset,
+        Math.max(1, Math.round((block.end - block.start) * timestampScaler)),
+        block.topOfStack ?
+            functionTimelineTickHeight : functionTimelineHalfHeight);
+    }
+    ctx.strokeStyle = "black";
+    ctx.lineWidth = "1";
+    ctx.beginPath();
+    ctx.moveTo(0, functionTimelineYOffset + 0.5);
+    ctx.lineTo(buffer.width, functionTimelineYOffset + 0.5);
+    ctx.stroke();
+    ctx.strokeStyle = "rgba(0,0,0,0.2)";
+    ctx.lineWidth = "1";
+    ctx.beginPath();
+    ctx.moveTo(0, functionTimelineYOffset + functionTimelineHalfHeight - 0.5);
+    ctx.lineTo(buffer.width,
+        functionTimelineYOffset + functionTimelineHalfHeight - 0.5);
+    ctx.stroke();
+
+    // Draw marks for optimizations and deoptimizations in the function
+    // timeline.
+    if (currentCodeId && currentCodeId >= 0 &&
+        file.code[currentCodeId].func) {
+      let y = Math.round(functionTimelineYOffset + functionTimelineTickHeight +
+          (this.functionTimelineHeight - functionTimelineTickHeight) / 2);
+      let func = file.functions[file.code[currentCodeId].func];
+      for (let i = 0; i < func.codes.length; i++) {
+        let code = file.code[func.codes[i]];
+        if (code.kind === "Opt") {
+          if (code.deopt) {
+            // Draw deoptimization mark.
+            let x = timestampToX(code.deopt.tm);
+            ctx.lineWidth = 0.7;
+            ctx.strokeStyle = "red";
+            ctx.beginPath();
+            ctx.moveTo(x - 3, y - 3);
+            ctx.lineTo(x + 3, y + 3);
+            ctx.stroke();
+            ctx.beginPath();
+            ctx.moveTo(x - 3, y + 3);
+            ctx.lineTo(x + 3, y - 3);
+            ctx.stroke();
+          }
+          // Draw optimization mark.
+          let x = timestampToX(code.tm);
+          ctx.lineWidth = 0.7;
+          ctx.strokeStyle = "blue";
+          ctx.beginPath();
+          ctx.moveTo(x - 3, y - 3);
+          ctx.lineTo(x, y);
+          ctx.stroke();
+          ctx.beginPath();
+          ctx.moveTo(x - 3, y + 3);
+          ctx.lineTo(x, y);
+          ctx.stroke();
+        } else {
+          // Draw code creation mark.
+          let x = Math.round(timestampToX(code.tm));
+          ctx.beginPath();
+          ctx.fillStyle = "black";
+          ctx.arc(x, y, 3, 0, 2 * Math.PI);
+          ctx.fill();
+        }
+      }
+    }
+
+    // Remember stuff for later.
+    this.buffer = buffer;
+
+    // Draw the buffer.
+    this.drawSelection();
+
+    // (Re-)Populate the graph legend.
+    while (this.legend.cells.length > 0) {
+      this.legend.deleteCell(0);
+    }
+    let cell = this.legend.insertCell(-1);
+    cell.textContent = "Legend: ";
+    cell.style.padding = "1ex";
+    for (let i = 0; i < bucketDescriptors.length; i++) {
+      let cell = this.legend.insertCell(-1);
+      cell.style.padding = "1ex";
+      let desc = bucketDescriptors[i];
+      let div = document.createElement("div");
+      div.style.display = "inline-block";
+      div.style.width = "0.6em";
+      div.style.height = "1.2ex";
+      div.style.backgroundColor = desc.color;
+      div.style.borderStyle = "solid";
+      div.style.borderWidth = "1px";
+      div.style.borderColor = "Black";
+      cell.appendChild(div);
+      cell.appendChild(document.createTextNode(" " + desc.text));
+    }
+
+    removeAllChildren(this.currentCode);
+    if (currentCodeId) {
+      let currentCode = file.code[currentCodeId];
+      this.currentCode.appendChild(document.createTextNode(currentCode.name));
+    } else {
+      this.currentCode.appendChild(document.createTextNode("<none>"));
+    }
+  }
+}
+
+class ModeBarView {
+  constructor() {
+    let modeBar = this.element = $("mode-bar");
+
+    function addMode(id, text, active) {
+      let div = document.createElement("div");
+      div.classList = "mode-button" + (active ? " active-mode-button" : "");
+      div.id = "mode-" + id;
+      div.textContent = text;
+      div.onclick = () => {
+        if (main.currentState.mode === id) return;
+        let old = $("mode-" + main.currentState.mode);
+        old.classList = "mode-button";
+        div.classList = "mode-button active-mode-button";
+        main.setMode(id);
+      };
+      modeBar.appendChild(div);
+    }
+
+    addMode("summary", "Summary", true);
+    addMode("bottom-up", "Bottom up");
+    addMode("top-down", "Top down");
+    addMode("function-list", "Functions");
+  }
+
+  render(newState) {
+    if (!newState.file) {
+      this.element.style.display = "none";
+      return;
+    }
+
+    this.element.style.display = "inherit";
+  }
+}
+
+class SummaryView {
+  constructor() {
+    this.element = $("summary");
+    this.currentState = null;
+  }
+
+  render(newState) {
+    let oldState = this.currentState;
+
+    if (!newState.file || newState.mode !== "summary") {
+      this.element.style.display = "none";
+      this.currentState = null;
+      return;
+    }
+
+    this.currentState = newState;
+    if (oldState) {
+      if (newState.file === oldState.file &&
+          newState.start === oldState.start &&
+          newState.end === oldState.end) {
+        // No change, nothing to do.
+        return;
+      }
+    }
+
+    this.element.style.display = "inherit";
+    removeAllChildren(this.element);
+
+    let stats = computeOptimizationStats(
+        this.currentState.file, newState.start, newState.end);
+
+    let table = document.createElement("table");
+    let rows = document.createElement("tbody");
+
+    function addRow(text, number, indent) {
+      let row = rows.insertRow(-1);
+      let textCell = row.insertCell(-1);
+      textCell.textContent = text;
+      let numberCell = row.insertCell(-1);
+      numberCell.textContent = number;
+      if (indent) {
+        textCell.style.textIndent = indent + "em";
+        numberCell.style.textIndent = indent + "em";
+      }
+      return row;
+    }
+
+    function makeCollapsible(row, arrow) {
+      arrow.textContent = EXPANDED_ARROW;
+      let expandHandler = row.onclick;
+      row.onclick = () => {
+        let id = row.id;
+        let index = row.rowIndex + 1;
+        while (index < rows.rows.length &&
+          rows.rows[index].id.startsWith(id)) {
+          rows.deleteRow(index);
+        }
+        arrow.textContent = COLLAPSED_ARROW;
+        row.onclick = expandHandler;
+      }
+    }
+
+    function expandDeoptInstances(row, arrow, instances, indent, kind) {
+      let index = row.rowIndex;
+      for (let i = 0; i < instances.length; i++) {
+        let childRow = rows.insertRow(index + 1);
+        childRow.id = row.id + i + "/";
+
+        let deopt = instances[i].deopt;
+
+        let textCell = childRow.insertCell(-1);
+        textCell.appendChild(document.createTextNode(deopt.posText));
+        textCell.style.textIndent = indent + "em";
+        let reasonCell = childRow.insertCell(-1);
+        reasonCell.appendChild(
+            document.createTextNode("Reason: " + deopt.reason));
+        reasonCell.style.textIndent = indent + "em";
+      }
+      makeCollapsible(row, arrow);
+    }
+
+    function expandDeoptFunctionList(row, arrow, list, indent, kind) {
+      let index = row.rowIndex;
+      for (let i = 0; i < list.length; i++) {
+        let childRow = rows.insertRow(index + 1);
+        childRow.id = row.id + i + "/";
+
+        let textCell = childRow.insertCell(-1);
+        textCell.appendChild(createIndentNode(indent));
+        let childArrow = createArrowNode();
+        textCell.appendChild(childArrow);
+        textCell.appendChild(
+            createFunctionNode(list[i].f.name, list[i].f.codes[0]));
+
+        let numberCell = childRow.insertCell(-1);
+        numberCell.textContent = list[i].instances.length;
+        numberCell.style.textIndent = indent + "em";
+
+        childArrow.textContent = COLLAPSED_ARROW;
+        childRow.onclick = () => {
+          expandDeoptInstances(
+              childRow, childArrow, list[i].instances, indent + 1);
+        };
+      }
+      makeCollapsible(row, arrow);
+    }
+
+    function expandOptimizedFunctionList(row, arrow, list, indent, kind) {
+      let index = row.rowIndex;
+      for (let i = 0; i < list.length; i++) {
+        let childRow = rows.insertRow(index + 1);
+        childRow.id = row.id + i + "/";
+
+        let textCell = childRow.insertCell(-1);
+        textCell.appendChild(
+            createFunctionNode(list[i].f.name, list[i].f.codes[0]));
+        textCell.style.textIndent = indent + "em";
+
+        let numberCell = childRow.insertCell(-1);
+        numberCell.textContent = list[i].instances.length;
+        numberCell.style.textIndent = indent + "em";
+      }
+      makeCollapsible(row, arrow);
+    }
+
+    function addExpandableRow(text, list, indent, kind) {
+      let row = rows.insertRow(-1);
+
+      row.id = "opt-table/" + kind + "/";
+      row.style.backgroundColor = CATEGORY_COLOR;
+
+      let textCell = row.insertCell(-1);
+      textCell.appendChild(createIndentNode(indent));
+      let arrow = createArrowNode();
+      textCell.appendChild(arrow);
+      textCell.appendChild(document.createTextNode(text));
+
+      let numberCell = row.insertCell(-1);
+      numberCell.textContent = list.count;
+      if (indent) {
+        numberCell.style.textIndent = indent + "em";
+      }
+
+      if (list.count > 0) {
+        arrow.textContent = COLLAPSED_ARROW;
+        if (kind === "opt") {
+          row.onclick = () => {
+            expandOptimizedFunctionList(
+                row, arrow, list.functions, indent + 1, kind);
+          };
+        } else {
+          row.onclick = () => {
+            expandDeoptFunctionList(
+                row, arrow, list.functions, indent + 1, kind);
+          };
+        }
+      }
+      return row;
+    }
+
+    addRow("Total function count:", stats.functionCount);
+    addRow("Optimized function count:", stats.optimizedFunctionCount, 1);
+    addRow("Deoptimized function count:", stats.deoptimizedFunctionCount, 2);
+
+    addExpandableRow("Optimization count:", stats.optimizations, 0, "opt");
+    let deoptCount = stats.eagerDeoptimizations.count +
+        stats.softDeoptimizations.count + stats.lazyDeoptimizations.count;
+    addRow("Deoptimization count:", deoptCount);
+    addExpandableRow("Eager:", stats.eagerDeoptimizations, 1, "eager");
+    addExpandableRow("Lazy:", stats.lazyDeoptimizations, 1, "lazy");
+    addExpandableRow("Soft:", stats.softDeoptimizations, 1, "soft");
+
+    table.appendChild(rows);
+    this.element.appendChild(table);
+  }
+}
+
+class ScriptSourceView {
+  constructor() {
+    this.table = $("source-viewer");
+    this.hideButton = $("source-viewer-hide-button");
+    this.hideButton.onclick = () => {
+      main.setViewingSource(false);
+    };
+  }
+
+  render(newState) {
+    let oldState = this.currentState;
+    if (!newState.file || !newState.viewingSource) {
+      this.table.style.display = "none";
+      this.hideButton.style.display = "none";
+      this.currentState = null;
+      return;
+    }
+    if (oldState) {
+      if (newState.file === oldState.file &&
+          newState.currentCodeId === oldState.currentCodeId &&
+          newState.viewingSource === oldState.viewingSource) {
+        // No change, nothing to do.
+        return;
+      }
+    }
+    this.currentState = newState;
+
+    this.table.style.display = "inline-block";
+    this.hideButton.style.display = "inline";
+    removeAllChildren(this.table);
+
+    let functionId =
+        this.currentState.file.code[this.currentState.currentCodeId].func;
+    let sourceView =
+        this.currentState.sourceData.generateSourceView(functionId);
+    for (let i = 0; i < sourceView.source.length; i++) {
+      let sampleCount = sourceView.lineSampleCounts[i] || 0;
+      let sampleProportion = sourceView.samplesTotal > 0 ?
+                             sampleCount / sourceView.samplesTotal : 0;
+      let heatBucket;
+      if (sampleProportion === 0) {
+        heatBucket = "line-none";
+      } else if (sampleProportion < 0.2) {
+        heatBucket = "line-cold";
+      } else if (sampleProportion < 0.4) {
+        heatBucket = "line-mediumcold";
+      } else if (sampleProportion < 0.6) {
+        heatBucket = "line-mediumhot";
+      } else if (sampleProportion < 0.8) {
+        heatBucket = "line-hot";
+      } else {
+        heatBucket = "line-superhot";
+      }
+
+      let row = this.table.insertRow(-1);
+
+      let lineNumberCell = row.insertCell(-1);
+      lineNumberCell.classList.add("source-line-number");
+      lineNumberCell.textContent = i + sourceView.firstLineNumber;
+
+      let sampleCountCell = row.insertCell(-1);
+      sampleCountCell.classList.add(heatBucket);
+      sampleCountCell.textContent = sampleCount;
+
+      let sourceLineCell = row.insertCell(-1);
+      sourceLineCell.classList.add(heatBucket);
+      sourceLineCell.textContent = sourceView.source[i];
+    }
+
+    $("timeline-currentCode").scrollIntoView();
+  }
+}
+
+class SourceData {
+  constructor(file) {
+    this.scripts = new Map();
+    for (let i = 0; i < file.scripts.length; i++) {
+      const scriptBlock = file.scripts[i];
+      if (scriptBlock === null) continue; // Array may be sparse.
+      let source = scriptBlock.source.split("\n");
+      this.scripts.set(i, source);
+    }
+
+    this.functions = new Map();
+    for (let codeId = 0; codeId < file.code.length; ++codeId) {
+      let codeBlock = file.code[codeId];
+      if (codeBlock.source && codeBlock.func !== undefined) {
+        let data = this.functions.get(codeBlock.func);
+        if (!data) {
+          data = new FunctionSourceData(codeBlock.source.script,
+                                        codeBlock.source.start,
+                                        codeBlock.source.end);
+          this.functions.set(codeBlock.func, data);
+        }
+        data.addSourceBlock(codeId, codeBlock.source);
+      }
+    }
+
+    for (let tick of file.ticks) {
+      let stack = tick.s;
+      for (let i = 0; i < stack.length; i += 2) {
+        let codeId = stack[i];
+        if (codeId < 0) continue;
+        let functionId = file.code[codeId].func;
+        if (this.functions.has(functionId)) {
+          let codeOffset = stack[i + 1];
+          this.functions.get(functionId).addOffsetSample(codeId, codeOffset);
+        }
+      }
+    }
+  }
+
+  getScript(scriptId) {
+    return this.scripts.get(scriptId);
+  }
+
+  getLineForScriptOffset(script, scriptOffset) {
+    let line = 0;
+    let charsConsumed = 0;
+    for (; line < script.length; ++line) {
+      charsConsumed += script[line].length + 1; // Add 1 for newline.
+      if (charsConsumed > scriptOffset) break;
+    }
+    return line;
+  }
+
+  hasSource(functionId) {
+    return this.functions.has(functionId);
+  }
+
+  generateSourceView(functionId) {
+    console.assert(this.hasSource(functionId));
+    let data = this.functions.get(functionId);
+    let scriptId = data.scriptId;
+    let script = this.getScript(scriptId);
+    let firstLineNumber =
+        this.getLineForScriptOffset(script, data.startScriptOffset);
+    let lastLineNumber =
+        this.getLineForScriptOffset(script, data.endScriptOffset);
+    let lines = script.slice(firstLineNumber, lastLineNumber + 1);
+    normalizeLeadingWhitespace(lines);
+
+    let samplesTotal = 0;
+    let lineSampleCounts = [];
+    for (let [codeId, block] of data.codes) {
+      block.offsets.forEach((sampleCount, codeOffset) => {
+        let sourceOffset = block.positionTable.getScriptOffset(codeOffset);
+        let lineNumber =
+            this.getLineForScriptOffset(script, sourceOffset) - firstLineNumber;
+        samplesTotal += sampleCount;
+        lineSampleCounts[lineNumber] =
+            (lineSampleCounts[lineNumber] || 0) + sampleCount;
+      });
+    }
+
+    return {
+      source: lines,
+      lineSampleCounts: lineSampleCounts,
+      samplesTotal: samplesTotal,
+      firstLineNumber: firstLineNumber + 1  // Source code is 1-indexed.
+    };
+  }
+}
+
+class FunctionSourceData {
+  constructor(scriptId, startScriptOffset, endScriptOffset) {
+    this.scriptId = scriptId;
+    this.startScriptOffset = startScriptOffset;
+    this.endScriptOffset = endScriptOffset;
+
+    this.codes = new Map();
+  }
+
+  addSourceBlock(codeId, source) {
+    this.codes.set(codeId, {
+      positionTable: new SourcePositionTable(source.positions),
+      offsets: []
+    });
+  }
+
+  addOffsetSample(codeId, codeOffset) {
+    let codeIdOffsets = this.codes.get(codeId).offsets;
+    codeIdOffsets[codeOffset] = (codeIdOffsets[codeOffset] || 0) + 1;
+  }
+}
+
+class SourcePositionTable {
+  constructor(encodedTable) {
+    this.offsetTable = [];
+    let offsetPairRegex = /C([0-9]+)O([0-9]+)/g;
+    while (true) {
+      let regexResult = offsetPairRegex.exec(encodedTable);
+      if (!regexResult) break;
+      let codeOffset = parseInt(regexResult[1]);
+      let scriptOffset = parseInt(regexResult[2]);
+      if (isNaN(codeOffset) || isNaN(scriptOffset)) continue;
+      this.offsetTable.push(codeOffset, scriptOffset);
+    }
+  }
+
+  getScriptOffset(codeOffset) {
+    console.assert(codeOffset >= 0);
+    for (let i = this.offsetTable.length - 2; i >= 0; i -= 2) {
+      if (this.offsetTable[i] <= codeOffset) {
+        return this.offsetTable[i + 1];
+      }
+    }
+    return this.offsetTable[1];
+  }
+}
+
+class HelpView {
+  constructor() {
+    this.element = $("help");
+  }
+
+  render(newState) {
+    this.element.style.display = newState.file ? "none" : "inherit";
+  }
+}
diff --git a/src/third_party/v8/tools/regexp-sequences.py b/src/third_party/v8/tools/regexp-sequences.py
new file mode 100755
index 0000000..9f920dd
--- /dev/null
+++ b/src/third_party/v8/tools/regexp-sequences.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+python %prog trace-file
+
+Parses output generated by v8 with flag --trace-regexp-bytecodes and generates
+a list of the most common sequences.
+"""
+
+from __future__ import print_function
+
+import sys
+import re
+import collections
+
+def parse(file, seqlen):
+  # example:
+  # pc = 00, sp = 0, curpos = 0, curchar = 0000000a ..., bc = PUSH_BT, 02, 00, 00, 00, e8, 00, 00, 00 .......
+  rx = re.compile(r'pc = (?P<pc>[0-9a-f]+), sp = (?P<sp>\d+), '
+                  r'curpos = (?P<curpos>\d+), curchar = (?P<char_hex>[0-9a-f]+) '
+                  r'(:?\.|\()(?P<char>\.|\w)(:?\.|\)), bc = (?P<bc>\w+), .*')
+  total = 0
+  bc_cnt = [None] * seqlen
+  for i in range(seqlen):
+    bc_cnt[i] = {}
+  last = [None] * seqlen
+  with open(file) as f:
+    l = f.readline()
+    while l:
+      l = l.strip()
+      if l.startswith("Start bytecode interpreter"):
+        for i in range(seqlen):
+          last[i] = collections.deque(maxlen=i+1)
+
+      match = rx.search(l)
+      if match:
+        total += 1
+        bc = match.group('bc')
+        for i in range(seqlen):
+          last[i].append(bc)
+          key = ' --> '.join(last[i])
+          bc_cnt[i][key] = bc_cnt[i].get(key,0) + 1
+
+      l = f.readline()
+  return bc_cnt, total
+
+def print_most_common(d, seqlen, total):
+  sorted_d = sorted(d.items(), key=lambda kv: kv[1], reverse=True)
+  for (k,v) in sorted_d:
+    if v*100/total < 1.0:
+      return
+    print("{}: {} ({} %)".format(k,v,(v*100/total)))
+
+def main(argv):
+  max_seq = 7
+  bc_cnt, total = parse(argv[1],max_seq)
+  for i in range(max_seq):
+    print()
+    print("Most common of length {}".format(i+1))
+    print()
+    print_most_common(bc_cnt[i], i, total)
+
+if __name__ == '__main__':
+  main(sys.argv)
diff --git a/src/third_party/v8/tools/release/auto_push.py b/src/third_party/v8/tools/release/auto_push.py
new file mode 100755
index 0000000..4cb9687
--- /dev/null
+++ b/src/third_party/v8/tools/release/auto_push.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import json
+import os
+import re
+import sys
+import urllib
+
+from common_includes import *
+import create_release
+
+
+class Preparation(Step):
+  MESSAGE = "Preparation."
+
+  def RunStep(self):
+    # Fetch unfetched revisions.
+    self.vc.Fetch()
+
+
+class FetchCandidate(Step):
+  MESSAGE = "Fetching V8 lkgr ref."
+
+  def RunStep(self):
+    # The roll ref points to the candidate to be rolled.
+    self.Git("fetch origin +refs/heads/lkgr:refs/heads/lkgr")
+    self["candidate"] = self.Git("show-ref -s refs/heads/lkgr").strip()
+
+
+class LastReleaseBailout(Step):
+  MESSAGE = "Checking last V8 release base."
+
+  def RunStep(self):
+    last_release = self.GetLatestReleaseBase()
+    commits = self.GitLog(
+        format="%H", git_hash="%s..%s" % (last_release, self["candidate"]))
+
+    if not commits:
+      print("Already pushed current candidate %s" % self["candidate"])
+      return True
+
+
+class CreateRelease(Step):
+  MESSAGE = "Creating release if specified."
+
+  def RunStep(self):
+    print("Creating release for %s." % self["candidate"])
+
+    args = [
+      "--author", self._options.author,
+      "--reviewer", self._options.reviewer,
+      "--revision", self["candidate"],
+      "--force",
+    ]
+
+    if self._options.work_dir:
+      args.extend(["--work-dir", self._options.work_dir])
+
+    if self._options.push:
+      self._side_effect_handler.Call(
+          create_release.CreateRelease().Run, args)
+
+
+class AutoPush(ScriptsBase):
+  def _PrepareOptions(self, parser):
+    parser.add_argument("-p", "--push",
+                        help="Create release. Dry run if unspecified.",
+                        default=False, action="store_true")
+
+  def _ProcessOptions(self, options):
+    if not options.author or not options.reviewer:  # pragma: no cover
+      print("You need to specify author and reviewer.")
+      return False
+    options.requires_editor = False
+    return True
+
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/v8-auto-push-tempfile",
+    }
+
+  def _Steps(self):
+    return [
+      Preparation,
+      FetchCandidate,
+      LastReleaseBailout,
+      CreateRelease,
+    ]
+
+
+if __name__ == "__main__":  # pragma: no cover
+  sys.exit(AutoPush().Run())
diff --git a/src/third_party/v8/tools/release/auto_roll.py b/src/third_party/v8/tools/release/auto_roll.py
new file mode 100755
index 0000000..27ba3e4
--- /dev/null
+++ b/src/third_party/v8/tools/release/auto_roll.py
@@ -0,0 +1,238 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import os
+import sys
+
+from common_includes import *
+
+ROLL_SUMMARY = ("Summary of changes available at:\n"
+                "https://chromium.googlesource.com/v8/v8/+log/%s..%s")
+
+ISSUE_MSG = (
+"""Please follow these instructions for assigning/CC'ing issues:
+https://v8.dev/docs/triage-issues
+
+Please close rolling in case of a roll revert:
+https://v8-roll.appspot.com/
+This only works with a Google account.
+
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux-blink-rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:mac_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:win_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:android_optional_gpu_tests_rel""")
+
+class Preparation(Step):
+  MESSAGE = "Preparation."
+
+  def RunStep(self):
+    self['json_output']['monitoring_state'] = 'preparation'
+    # Update v8 remote tracking branches.
+    self.GitFetchOrigin()
+    self.Git("fetch origin +refs/tags/*:refs/tags/*")
+
+
+class DetectLastRoll(Step):
+  MESSAGE = "Detect commit ID of the last Chromium roll."
+
+  def RunStep(self):
+    self['json_output']['monitoring_state'] = 'detect_last_roll'
+    self["last_roll"] = self._options.last_roll
+    if not self["last_roll"]:
+      # Get last-rolled v8 revision from Chromium's DEPS file.
+      self["last_roll"] = self.Command(
+          "gclient", "getdep -r src/v8", cwd=self._options.chromium).strip()
+
+    self["last_version"] = self.GetVersionTag(self["last_roll"])
+    assert self["last_version"], "The last rolled v8 revision is not tagged."
+
+
+class DetectRevisionToRoll(Step):
+  MESSAGE = "Detect commit ID of the V8 revision to roll."
+
+  def RunStep(self):
+    self['json_output']['monitoring_state'] = 'detect_revision'
+    self["roll"] = self._options.revision
+    if self["roll"]:
+      # If the revision was passed on the cmd line, continue script execution
+      # in the next step.
+      return False
+
+    # The revision that should be rolled. Check for the latest of the most
+    # recent releases based on commit timestamp.
+    revisions = self.GetRecentReleases(
+        max_age=self._options.max_age * DAY_IN_SECONDS)
+    assert revisions, "Didn't find any recent release."
+
+    # There must be some progress between the last roll and the new candidate
+    # revision (i.e. we don't go backwards). The revisions are ordered newest
+    # to oldest. It is possible that the newest timestamp has no progress
+    # compared to the last roll, i.e. if the newest release is a cherry-pick
+    # on a release branch. Then we look further.
+    for revision in revisions:
+      version = self.GetVersionTag(revision)
+      assert version, "Internal error. All recent releases should have a tag"
+
+      if SortingKey(self["last_version"]) < SortingKey(version):
+        self["roll"] = revision
+        break
+    else:
+      print("There is no newer v8 revision than the one in Chromium (%s)."
+            % self["last_roll"])
+      self['json_output']['monitoring_state'] = 'up_to_date'
+      return True
+
+
+class PrepareRollCandidate(Step):
+  MESSAGE = "Robustness checks of the roll candidate."
+
+  def RunStep(self):
+    self['json_output']['monitoring_state'] = 'prepare_candidate'
+    self["roll_title"] = self.GitLog(n=1, format="%s",
+                                     git_hash=self["roll"])
+
+    # Make sure the last roll and the roll candidate are releases.
+    version = self.GetVersionTag(self["roll"])
+    assert version, "The revision to roll is not tagged."
+    version = self.GetVersionTag(self["last_roll"])
+    assert version, "The revision used as last roll is not tagged."
+
+
+class SwitchChromium(Step):
+  MESSAGE = "Switch to Chromium checkout."
+
+  def RunStep(self):
+    self['json_output']['monitoring_state'] = 'switch_chromium'
+    cwd = self._options.chromium
+    self.InitialEnvironmentChecks(cwd)
+    # Check for a clean workdir.
+    if not self.GitIsWorkdirClean(cwd=cwd):  # pragma: no cover
+      self.Die("Workspace is not clean. Please commit or undo your changes.")
+    # Assert that the DEPS file is there.
+    if not os.path.exists(os.path.join(cwd, "DEPS")):  # pragma: no cover
+      self.Die("DEPS file not present.")
+
+
+class UpdateChromiumCheckout(Step):
+  MESSAGE = "Update the checkout and create a new branch."
+
+  def RunStep(self):
+    self['json_output']['monitoring_state'] = 'update_chromium'
+    cwd = self._options.chromium
+    self.GitCheckout("master", cwd=cwd)
+    self.DeleteBranch("work-branch", cwd=cwd)
+    self.GitPull(cwd=cwd)
+
+    # Update v8 remotes.
+    self.GitFetchOrigin()
+
+    self.GitCreateBranch("work-branch", cwd=cwd)
+
+
+class UploadCL(Step):
+  MESSAGE = "Create and upload CL."
+
+  def RunStep(self):
+    self['json_output']['monitoring_state'] = 'upload'
+    cwd = self._options.chromium
+    # Patch DEPS file.
+    if self.Command("gclient", "setdep -r src/v8@%s" %
+                    self["roll"], cwd=cwd) is None:
+      self.Die("Failed to create deps for %s" % self["roll"])
+
+    message = []
+    message.append("Update V8 to %s." % self["roll_title"].lower())
+
+    message.append(
+        ROLL_SUMMARY % (self["last_roll"][:8], self["roll"][:8]))
+
+    message.append(ISSUE_MSG)
+
+    message.append("TBR=%s" % self._options.reviewer)
+    self.GitCommit("\n\n".join(message),  author=self._options.author, cwd=cwd)
+    if not self._options.dry_run:
+      self.GitUpload(force=True,
+                     bypass_hooks=True,
+                     cq=self._options.use_commit_queue,
+                     cq_dry_run=self._options.use_dry_run,
+                     cwd=cwd)
+      print("CL uploaded.")
+    else:
+      print("Dry run - don't upload.")
+
+    self.GitCheckout("master", cwd=cwd)
+    self.GitDeleteBranch("work-branch", cwd=cwd)
+
+class CleanUp(Step):
+  MESSAGE = "Done!"
+
+  def RunStep(self):
+    self['json_output']['monitoring_state'] = 'success'
+    print("Congratulations, you have successfully rolled %s into "
+          "Chromium."
+          % self["roll"])
+
+    # Clean up all temporary files.
+    Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
+
+
+class AutoRoll(ScriptsBase):
+  def _PrepareOptions(self, parser):
+    parser.add_argument("-c", "--chromium", required=True,
+                        help=("The path to your Chromium src/ "
+                              "directory to automate the V8 roll."))
+    parser.add_argument("--last-roll",
+                        help="The git commit ID of the last rolled version. "
+                             "Auto-detected if not specified.")
+    parser.add_argument("--max-age", default=7, type=int,
+                        help="Maximum age in days of the latest release.")
+    parser.add_argument("--revision",
+                        help="Revision to roll. Auto-detected if not "
+                             "specified."),
+    parser.add_argument("--roll", help="Deprecated.",
+                        default=True, action="store_true")
+    group = parser.add_mutually_exclusive_group()
+    group.add_argument("--use-commit-queue",
+                       help="Trigger the CQ full run on upload.",
+                       default=False, action="store_true")
+    group.add_argument("--use-dry-run",
+                       help="Trigger the CQ dry run on upload.",
+                       default=True, action="store_true")
+
+  def _ProcessOptions(self, options):  # pragma: no cover
+    if not options.author or not options.reviewer:
+      print("A reviewer (-r) and an author (-a) are required.")
+      return False
+
+    options.requires_editor = False
+    options.force = True
+    options.manual = False
+    return True
+
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
+    }
+
+  def _Steps(self):
+    return [
+      Preparation,
+      DetectLastRoll,
+      DetectRevisionToRoll,
+      PrepareRollCandidate,
+      SwitchChromium,
+      UpdateChromiumCheckout,
+      UploadCL,
+      CleanUp,
+    ]
+
+
+if __name__ == "__main__":  # pragma: no cover
+  sys.exit(AutoRoll().Run())
diff --git a/src/third_party/v8/tools/release/auto_tag.py b/src/third_party/v8/tools/release/auto_tag.py
new file mode 100755
index 0000000..fddefed
--- /dev/null
+++ b/src/third_party/v8/tools/release/auto_tag.py
@@ -0,0 +1,204 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import sys
+
+from common_includes import *
+
+
+class Preparation(Step):
+  MESSAGE = "Preparation."
+
+  def RunStep(self):
+    # TODO(machenbach): Remove after the git switch.
+    if self.Config("PERSISTFILE_BASENAME") == "/tmp/v8-auto-tag-tempfile":
+      print("This script is disabled until after the v8 git migration.")
+      return True
+
+    self.CommonPrepare()
+    self.PrepareBranch()
+    self.GitCheckout("master")
+    self.vc.Pull()
+
+
+class GetTags(Step):
+  MESSAGE = "Get all V8 tags."
+
+  def RunStep(self):
+    self.GitCreateBranch(self._config["BRANCHNAME"])
+    self["tags"] = self.vc.GetTags()
+
+
+class GetOldestUntaggedVersion(Step):
+  MESSAGE = "Check if there's a version on bleeding edge without a tag."
+
+  def RunStep(self):
+    tags = set(self["tags"])
+    self["candidate"] = None
+    self["candidate_version"] = None
+    self["next"] = None
+    self["next_version"] = None
+
+    # Iterate backwards through all automatic version updates.
+    for git_hash in self.GitLog(
+        format="%H", grep="\\[Auto\\-roll\\] Bump up version to").splitlines():
+
+      # Get the version.
+      if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
+        continue
+
+      self.ReadAndPersistVersion()
+      version = self.ArrayToVersion("")
+
+      # Strip off trailing patch level (tags don't include tag level 0).
+      if version.endswith(".0"):
+        version = version[:-2]
+
+      # Clean up checked-out version file.
+      self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
+
+      if version in tags:
+        if self["candidate"]:
+          # Revision "git_hash" is tagged already and "candidate" was the next
+          # newer revision without a tag.
+          break
+        else:
+          print("Stop as %s is the latest version and it has been tagged." %
+                version)
+          self.CommonCleanup()
+          return True
+      else:
+        # This is the second oldest version without a tag.
+        self["next"] = self["candidate"]
+        self["next_version"] = self["candidate_version"]
+
+        # This is the oldest version without a tag.
+        self["candidate"] = git_hash
+        self["candidate_version"] = version
+
+    if not self["candidate"] or not self["candidate_version"]:
+      print("Nothing found to tag.")
+      self.CommonCleanup()
+      return True
+
+    print("Candidate for tagging is %s with version %s" %
+          (self["candidate"], self["candidate_version"]))
+
+
+class GetLKGRs(Step):
+  MESSAGE = "Get the last lkgrs."
+
+  def RunStep(self):
+    revision_url = "https://v8-status.appspot.com/revisions?format=json"
+    status_json = self.ReadURL(revision_url, wait_plan=[5, 20])
+    self["lkgrs"] = [entry["revision"]
+                     for entry in json.loads(status_json) if entry["status"]]
+
+
+class CalculateTagRevision(Step):
+  MESSAGE = "Calculate the revision to tag."
+
+  def LastLKGR(self, min_rev, max_rev):
+    """Finds the newest lkgr between min_rev (inclusive) and max_rev
+    (exclusive).
+    """
+    for lkgr in self["lkgrs"]:
+      # LKGRs are reverse sorted.
+      if int(min_rev) <= int(lkgr) and int(lkgr) < int(max_rev):
+        return lkgr
+    return None
+
+  def RunStep(self):
+    # Get the lkgr after the tag candidate and before the next tag candidate.
+    candidate_svn = self.vc.GitSvn(self["candidate"])
+    if self["next"]:
+      next_svn = self.vc.GitSvn(self["next"])
+    else:
+      # Don't include the version change commit itself if there is no upper
+      # limit yet.
+      candidate_svn =  str(int(candidate_svn) + 1)
+      next_svn = sys.maxsize
+    lkgr_svn = self.LastLKGR(candidate_svn, next_svn)
+
+    if not lkgr_svn:
+      print("There is no lkgr since the candidate version yet.")
+      self.CommonCleanup()
+      return True
+
+    # Let's check if the lkgr is at least three hours old.
+    self["lkgr"] = self.vc.SvnGit(lkgr_svn)
+    if not self["lkgr"]:
+      print("Couldn't find git hash for lkgr %s" % lkgr_svn)
+      self.CommonCleanup()
+      return True
+
+    lkgr_utc_time = int(self.GitLog(n=1, format="%at", git_hash=self["lkgr"]))
+    current_utc_time = self._side_effect_handler.GetUTCStamp()
+
+    if current_utc_time < lkgr_utc_time + 10800:
+      print("Candidate lkgr %s is too recent for tagging." % lkgr_svn)
+      self.CommonCleanup()
+      return True
+
+    print("Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"]))
+
+
+class MakeTag(Step):
+  MESSAGE = "Tag the version."
+
+  def RunStep(self):
+    if not self._options.dry_run:
+      self.GitReset(self["lkgr"])
+      # FIXME(machenbach): Make this work with the git repo.
+      self.vc.Tag(self["candidate_version"],
+                  "svn/bleeding_edge",
+                  "This won't work!")
+
+
+class CleanUp(Step):
+  MESSAGE = "Clean up."
+
+  def RunStep(self):
+    self.CommonCleanup()
+
+
+class AutoTag(ScriptsBase):
+  def _PrepareOptions(self, parser):
+    parser.add_argument("--dry_run", help="Don't tag the new version.",
+                        default=False, action="store_true")
+
+  def _ProcessOptions(self, options):  # pragma: no cover
+    if not options.dry_run and not options.author:
+      print("Specify your chromium.org email with -a")
+      return False
+    options.wait_for_lgtm = False
+    options.force_readline_defaults = True
+    options.force_upload = True
+    return True
+
+  def _Config(self):
+    return {
+      "BRANCHNAME": "auto-tag-v8",
+      "PERSISTFILE_BASENAME": "/tmp/v8-auto-tag-tempfile",
+    }
+
+  def _Steps(self):
+    return [
+      Preparation,
+      GetTags,
+      GetOldestUntaggedVersion,
+      GetLKGRs,
+      CalculateTagRevision,
+      MakeTag,
+      CleanUp,
+    ]
+
+
+if __name__ == "__main__":  # pragma: no cover
+  sys.exit(AutoTag().Run())
diff --git a/src/third_party/v8/tools/release/check_clusterfuzz.py b/src/third_party/v8/tools/release/check_clusterfuzz.py
new file mode 100755
index 0000000..021cd55
--- /dev/null
+++ b/src/third_party/v8/tools/release/check_clusterfuzz.py
@@ -0,0 +1,231 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Script to check for new clusterfuzz issues since the last rolled v8 revision.
+
+Returns a json list with test case IDs if any.
+
+Security considerations: The security key and request data must never be
+written to public logs. Public automated callers of this script should
+suppress stdout and stderr and only process contents of the results_file.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import httplib
+import json
+import os
+import re
+import sys
+import urllib
+import urllib2
+
+
+# Constants to git repos.
+BASE_URL = "https://chromium.googlesource.com"
+DEPS_LOG = BASE_URL + "/chromium/src/+log/master/DEPS?format=JSON"
+
+# Constants for retrieving v8 rolls.
+CRREV = "https://cr-rev.appspot.com/_ah/api/crrev/v1/commit/%s"
+V8_COMMIT_RE = re.compile(
+    r"^Update V8 to version \d+\.\d+\.\d+ \(based on ([a-fA-F0-9]+)\)\..*")
+
+# Constants for the clusterfuzz backend.
+HOSTNAME = "backend-dot-cluster-fuzz.appspot.com"
+
+# Crash patterns.
+V8_INTERNAL_RE = re.compile(r"^v8::internal.*")
+ANY_RE = re.compile(r".*")
+
+# List of all api requests.
+BUG_SPECS = [
+  {
+    "args": {
+      "job_type": "linux_asan_chrome_v8",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": V8_INTERNAL_RE,
+  },
+  {
+    "args": {
+      "job_type": "linux_asan_d8",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": ANY_RE,
+  },
+  {
+    "args": {
+      "job_type": "linux_asan_d8_dbg",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": ANY_RE,
+  },
+  {
+    "args": {
+      "job_type": "linux_asan_d8_ignition_dbg",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": ANY_RE,
+  },
+  {
+    "args": {
+      "job_type": "linux_asan_d8_v8_arm_dbg",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": ANY_RE,
+  },
+  {
+    "args": {
+      "job_type": "linux_asan_d8_ignition_v8_arm_dbg",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": ANY_RE,
+  },
+  {
+    "args": {
+      "job_type": "linux_asan_d8_v8_arm64_dbg",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": ANY_RE,
+  },
+  {
+    "args": {
+      "job_type": "linux_asan_d8_v8_mipsel_dbg",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": ANY_RE,
+  },
+]
+
+
+def GetRequest(url):
+  url_fh = urllib2.urlopen(url, None, 60)
+  try:
+    return url_fh.read()
+  finally:
+    url_fh.close()
+
+
+def GetLatestV8InChromium():
+  """Returns the commit position number of the latest v8 roll in chromium."""
+
+  # Check currently rolled v8 revision.
+  result = GetRequest(DEPS_LOG)
+  if not result:
+    return None
+
+  # Strip security header and load json.
+  commits = json.loads(result[5:])
+
+  git_revision = None
+  for commit in commits["log"]:
+    # Get latest commit that matches the v8 roll pattern. Ignore cherry-picks.
+    match = re.match(V8_COMMIT_RE, commit["message"])
+    if match:
+      git_revision = match.group(1)
+      break
+  else:
+    return None
+
+  # Get commit position number for v8 revision.
+  result = GetRequest(CRREV % git_revision)
+  if not result:
+    return None
+
+  commit = json.loads(result)
+  assert commit["repo"] == "v8/v8"
+  return commit["number"]
+
+
+def APIRequest(key, **params):
+  """Send a request to the clusterfuzz api.
+
+  Returns a json dict of the response.
+  """
+
+  params["api_key"] = key
+  params = urllib.urlencode(params)
+
+  headers = {"Content-type": "application/x-www-form-urlencoded"}
+
+  try:
+    conn = httplib.HTTPSConnection(HOSTNAME)
+    conn.request("POST", "/_api/", params, headers)
+
+    response = conn.getresponse()
+
+    # Never leak "data" into public logs.
+    data = response.read()
+  except:
+    raise Exception("ERROR: Connection problem.")
+
+  try:
+    return json.loads(data)
+  except:
+    raise Exception("ERROR: Could not read response. Is your key valid?")
+
+  return None
+
+
+def Main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument("-k", "--key-file", required=True,
+                      help="A file with the clusterfuzz api key.")
+  parser.add_argument("-r", "--results-file",
+                      help="A file to write the results to.")
+  options = parser.parse_args()
+
+  # Get api key. The key's content must never be logged.
+  assert options.key_file
+  with open(options.key_file) as f:
+    key = f.read().strip()
+  assert key
+
+  revision_number = GetLatestV8InChromium()
+
+  results = []
+  for spec in BUG_SPECS:
+    args = dict(spec["args"])
+    # Use incremented revision as we're interested in all revision greater than
+    # what's currently rolled into chromium.
+    if revision_number:
+      args["revision_greater_or_equal"] = str(int(revision_number) + 1)
+
+    # Never print issue details in public logs.
+    issues = APIRequest(key, **args)
+    assert issues is not None
+    for issue in issues:
+      if (re.match(spec["crash_state"], issue["crash_state"]) and
+          not issue.get('has_bug_flag')):
+        results.append(issue["id"])
+
+  if options.results_file:
+    with open(options.results_file, "w") as f:
+      f.write(json.dumps(results))
+  else:
+    print(results)
+
+
+if __name__ == "__main__":
+  sys.exit(Main())
diff --git a/src/third_party/v8/tools/release/common_includes.py b/src/third_party/v8/tools/release/common_includes.py
new file mode 100644
index 0000000..fd69075
--- /dev/null
+++ b/src/third_party/v8/tools/release/common_includes.py
@@ -0,0 +1,817 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import datetime
+import httplib
+import glob
+import imp
+import json
+import os
+import re
+import shutil
+import subprocess
+import sys
+import textwrap
+import time
+import urllib
+import urllib2
+
+from git_recipes import GitRecipesMixin
+from git_recipes import GitFailedException
+
+DAY_IN_SECONDS = 24 * 60 * 60
+PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
+PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
+VERSION_FILE = os.path.join("include", "v8-version.h")
+WATCHLISTS_FILE = "WATCHLISTS"
+RELEASE_WORKDIR = "/tmp/v8-release-scripts-work-dir/"
+
+# V8 base directory.
+V8_BASE = os.path.dirname(
+    os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+
+# Add our copy of depot_tools to the PATH as many scripts use tools from there,
+# e.g. git-cl, fetch, git-new-branch etc, and we can not depend on depot_tools
+# being in the PATH on the LUCI bots.
+path_to_depot_tools = os.path.join(V8_BASE, 'third_party', 'depot_tools')
+new_path = path_to_depot_tools + os.pathsep + os.environ.get('PATH')
+os.environ['PATH'] = new_path
+
+
+def TextToFile(text, file_name):
+  with open(file_name, "w") as f:
+    f.write(text)
+
+
+def AppendToFile(text, file_name):
+  with open(file_name, "a") as f:
+    f.write(text)
+
+
+def LinesInFile(file_name):
+  with open(file_name) as f:
+    for line in f:
+      yield line
+
+
+def FileToText(file_name):
+  with open(file_name) as f:
+    return f.read()
+
+
+def MSub(rexp, replacement, text):
+  return re.sub(rexp, replacement, text, flags=re.MULTILINE)
+
+
+def SortingKey(version):
+  """Key for sorting version number strings: '3.11' > '3.2.1.1'"""
+  version_keys = map(int, version.split("."))
+  # Fill up to full version numbers to normalize comparison.
+  while len(version_keys) < 4:  # pragma: no cover
+    version_keys.append(0)
+  # Fill digits.
+  return ".".join(map("{0:04d}".format, version_keys))
+
+
+# Some commands don't like the pipe, e.g. calling vi from within the script or
+# from subscripts like git cl upload.
+def Command(cmd, args="", prefix="", pipe=True, cwd=None):
+  cwd = cwd or os.getcwd()
+  # TODO(machenbach): Use timeout.
+  cmd_line = "%s %s %s" % (prefix, cmd, args)
+  print("Command: %s" % cmd_line)
+  print("in %s" % cwd)
+  sys.stdout.flush()
+  try:
+    if pipe:
+      return subprocess.check_output(cmd_line, shell=True, cwd=cwd)
+    else:
+      return subprocess.check_call(cmd_line, shell=True, cwd=cwd)
+  except subprocess.CalledProcessError:
+    return None
+  finally:
+    sys.stdout.flush()
+    sys.stderr.flush()
+
+
+def SanitizeVersionTag(tag):
+    version_without_prefix = re.compile(r"^\d+\.\d+\.\d+(?:\.\d+)?$")
+    version_with_prefix = re.compile(r"^tags\/\d+\.\d+\.\d+(?:\.\d+)?$")
+
+    if version_without_prefix.match(tag):
+      return tag
+    elif version_with_prefix.match(tag):
+        return tag[len("tags/"):]
+    else:
+      return None
+
+
+def NormalizeVersionTags(version_tags):
+  normalized_version_tags = []
+
+  # Remove tags/ prefix because of packed refs.
+  for current_tag in version_tags:
+    version_tag = SanitizeVersionTag(current_tag)
+    if version_tag != None:
+      normalized_version_tags.append(version_tag)
+
+  return normalized_version_tags
+
+
+# Wrapper for side effects.
+class SideEffectHandler(object):  # pragma: no cover
+  def Call(self, fun, *args, **kwargs):
+    return fun(*args, **kwargs)
+
+  def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
+    return Command(cmd, args, prefix, pipe, cwd=cwd)
+
+  def ReadLine(self):
+    return sys.stdin.readline().strip()
+
+  def ReadURL(self, url, params=None):
+    # pylint: disable=E1121
+    url_fh = urllib2.urlopen(url, params, 60)
+    try:
+      return url_fh.read()
+    finally:
+      url_fh.close()
+
+  def ReadClusterFuzzAPI(self, api_key, **params):
+    params["api_key"] = api_key.strip()
+    params = urllib.urlencode(params)
+
+    headers = {"Content-type": "application/x-www-form-urlencoded"}
+
+    conn = httplib.HTTPSConnection("backend-dot-cluster-fuzz.appspot.com")
+    conn.request("POST", "/_api/", params, headers)
+
+    response = conn.getresponse()
+    data = response.read()
+
+    try:
+      return json.loads(data)
+    except:
+      print(data)
+      print("ERROR: Could not read response. Is your key valid?")
+      raise
+
+  def Sleep(self, seconds):
+    time.sleep(seconds)
+
+  def GetUTCStamp(self):
+    return time.mktime(datetime.datetime.utcnow().timetuple())
+
+DEFAULT_SIDE_EFFECT_HANDLER = SideEffectHandler()
+
+
+class NoRetryException(Exception):
+  pass
+
+
+class VCInterface(object):
+  def InjectStep(self, step):
+    self.step=step
+
+  def Pull(self):
+    raise NotImplementedError()
+
+  def Fetch(self):
+    raise NotImplementedError()
+
+  def GetTags(self):
+    raise NotImplementedError()
+
+  def GetBranches(self):
+    raise NotImplementedError()
+
+  def MasterBranch(self):
+    raise NotImplementedError()
+
+  def CandidateBranch(self):
+    raise NotImplementedError()
+
+  def RemoteMasterBranch(self):
+    raise NotImplementedError()
+
+  def RemoteCandidateBranch(self):
+    raise NotImplementedError()
+
+  def RemoteBranch(self, name):
+    raise NotImplementedError()
+
+  def CLLand(self):
+    raise NotImplementedError()
+
+  def Tag(self, tag, remote, message):
+    """Sets a tag for the current commit.
+
+    Assumptions: The commit already landed and the commit message is unique.
+    """
+    raise NotImplementedError()
+
+
+class GitInterface(VCInterface):
+  def Pull(self):
+    self.step.GitPull()
+
+  def Fetch(self):
+    self.step.Git("fetch")
+
+  def GetTags(self):
+     return self.step.Git("tag").strip().splitlines()
+
+  def GetBranches(self):
+    # Get relevant remote branches, e.g. "branch-heads/3.25".
+    branches = filter(
+        lambda s: re.match(r"^branch\-heads/\d+\.\d+$", s),
+        self.step.GitRemotes())
+    # Remove 'branch-heads/' prefix.
+    return map(lambda s: s[13:], branches)
+
+  def MasterBranch(self):
+    return "master"
+
+  def CandidateBranch(self):
+    return "candidates"
+
+  def RemoteMasterBranch(self):
+    return "origin/master"
+
+  def RemoteCandidateBranch(self):
+    return "origin/candidates"
+
+  def RemoteBranch(self, name):
+    # Assume that if someone "fully qualified" the ref, they know what they
+    # want.
+    if name.startswith('refs/'):
+      return name
+    if name in ["candidates", "master"]:
+      return "refs/remotes/origin/%s" % name
+    try:
+      # Check if branch is in heads.
+      if self.step.Git("show-ref refs/remotes/origin/%s" % name).strip():
+        return "refs/remotes/origin/%s" % name
+    except GitFailedException:
+      pass
+    try:
+      # Check if branch is in branch-heads.
+      if self.step.Git("show-ref refs/remotes/branch-heads/%s" % name).strip():
+        return "refs/remotes/branch-heads/%s" % name
+    except GitFailedException:
+      pass
+    self.Die("Can't find remote of %s" % name)
+
+  def Tag(self, tag, remote, message):
+    # Wait for the commit to appear. Assumes unique commit message titles (this
+    # is the case for all automated merge and push commits - also no title is
+    # the prefix of another title).
+    commit = None
+    for wait_interval in [10, 30, 60, 60, 60, 60, 60]:
+      self.step.Git("fetch")
+      commit = self.step.GitLog(n=1, format="%H", grep=message, branch=remote)
+      if commit:
+        break
+      print("The commit has not replicated to git. Waiting for %s seconds." %
+            wait_interval)
+      self.step._side_effect_handler.Sleep(wait_interval)
+    else:
+      self.step.Die("Couldn't determine commit for setting the tag. Maybe the "
+                    "git updater is lagging behind?")
+
+    self.step.Git("tag %s %s" % (tag, commit))
+    self.step.Git("push origin refs/tags/%s:refs/tags/%s" % (tag, tag))
+
+  def CLLand(self):
+    self.step.GitCLLand()
+
+
+class Step(GitRecipesMixin):
+  def __init__(self, text, number, config, state, options, handler):
+    self._text = text
+    self._number = number
+    self._config = config
+    self._state = state
+    self._options = options
+    self._side_effect_handler = handler
+    self.vc = GitInterface()
+    self.vc.InjectStep(self)
+
+    # The testing configuration might set a different default cwd.
+    self.default_cwd = (self._config.get("DEFAULT_CWD") or
+                        os.path.join(self._options.work_dir, "v8"))
+
+    assert self._number >= 0
+    assert self._config is not None
+    assert self._state is not None
+    assert self._side_effect_handler is not None
+
+  def __getitem__(self, key):
+    # Convenience method to allow direct [] access on step classes for
+    # manipulating the backed state dict.
+    return self._state.get(key)
+
+  def __setitem__(self, key, value):
+    # Convenience method to allow direct [] access on step classes for
+    # manipulating the backed state dict.
+    self._state[key] = value
+
+  def Config(self, key):
+    return self._config[key]
+
+  def Run(self):
+    # Restore state.
+    state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
+    if not self._state and os.path.exists(state_file):
+      self._state.update(json.loads(FileToText(state_file)))
+
+    print(">>> Step %d: %s" % (self._number, self._text))
+    try:
+      return self.RunStep()
+    finally:
+      # Persist state.
+      TextToFile(json.dumps(self._state), state_file)
+
+  def RunStep(self):  # pragma: no cover
+    raise NotImplementedError
+
+  def Retry(self, cb, retry_on=None, wait_plan=None):
+    """ Retry a function.
+    Params:
+      cb: The function to retry.
+      retry_on: A callback that takes the result of the function and returns
+                True if the function should be retried. A function throwing an
+                exception is always retried.
+      wait_plan: A list of waiting delays between retries in seconds. The
+                 maximum number of retries is len(wait_plan).
+    """
+    retry_on = retry_on or (lambda x: False)
+    wait_plan = list(wait_plan or [])
+    wait_plan.reverse()
+    while True:
+      got_exception = False
+      try:
+        result = cb()
+      except NoRetryException as e:
+        raise e
+      except Exception as e:
+        got_exception = e
+      if got_exception or retry_on(result):
+        if not wait_plan:  # pragma: no cover
+          raise Exception("Retried too often. Giving up. Reason: %s" %
+                          str(got_exception))
+        wait_time = wait_plan.pop()
+        print("Waiting for %f seconds." % wait_time)
+        self._side_effect_handler.Sleep(wait_time)
+        print("Retrying...")
+      else:
+        return result
+
+  def ReadLine(self, default=None):
+    # Don't prompt in forced mode.
+    if self._options.force_readline_defaults and default is not None:
+      print("%s (forced)" % default)
+      return default
+    else:
+      return self._side_effect_handler.ReadLine()
+
+  def Command(self, name, args, cwd=None):
+    cmd = lambda: self._side_effect_handler.Command(
+        name, args, "", True, cwd=cwd or self.default_cwd)
+    return self.Retry(cmd, None, [5])
+
+  def Git(self, args="", prefix="", pipe=True, retry_on=None, cwd=None):
+    cmd = lambda: self._side_effect_handler.Command(
+        "git", args, prefix, pipe, cwd=cwd or self.default_cwd)
+    result = self.Retry(cmd, retry_on, [5, 30])
+    if result is None:
+      raise GitFailedException("'git %s' failed." % args)
+    return result
+
+  def Editor(self, args):
+    if self._options.requires_editor:
+      return self._side_effect_handler.Command(
+          os.environ["EDITOR"],
+          args,
+          pipe=False,
+          cwd=self.default_cwd)
+
+  def ReadURL(self, url, params=None, retry_on=None, wait_plan=None):
+    wait_plan = wait_plan or [3, 60, 600]
+    cmd = lambda: self._side_effect_handler.ReadURL(url, params)
+    return self.Retry(cmd, retry_on, wait_plan)
+
+  def Die(self, msg=""):
+    if msg != "":
+      print("Error: %s" % msg)
+    print("Exiting")
+    raise Exception(msg)
+
+  def DieNoManualMode(self, msg=""):
+    if not self._options.manual:  # pragma: no cover
+      msg = msg or "Only available in manual mode."
+      self.Die(msg)
+
+  def Confirm(self, msg):
+    print("%s [Y/n] " % msg, end=' ')
+    answer = self.ReadLine(default="Y")
+    return answer == "" or answer == "Y" or answer == "y"
+
+  def DeleteBranch(self, name, cwd=None):
+    for line in self.GitBranch(cwd=cwd).splitlines():
+      if re.match(r"\*?\s*%s$" % re.escape(name), line):
+        msg = "Branch %s exists, do you want to delete it?" % name
+        if self.Confirm(msg):
+          self.GitDeleteBranch(name, cwd=cwd)
+          print("Branch %s deleted." % name)
+        else:
+          msg = "Can't continue. Please delete branch %s and try again." % name
+          self.Die(msg)
+
+  def InitialEnvironmentChecks(self, cwd):
+    # Cancel if this is not a git checkout.
+    if not os.path.exists(os.path.join(cwd, ".git")):  # pragma: no cover
+      self.Die("%s is not a git checkout. If you know what you're doing, try "
+               "deleting it and rerunning this script." % cwd)
+
+    # Cancel if EDITOR is unset or not executable.
+    if (self._options.requires_editor and (not os.environ.get("EDITOR") or
+        self.Command(
+            "which", os.environ["EDITOR"]) is None)):  # pragma: no cover
+      self.Die("Please set your EDITOR environment variable, you'll need it.")
+
+  def CommonPrepare(self):
+    # Check for a clean workdir.
+    if not self.GitIsWorkdirClean():  # pragma: no cover
+      self.Die("Workspace is not clean. Please commit or undo your changes.")
+
+    # Checkout master in case the script was left on a work branch.
+    self.GitCheckout('origin/master')
+
+    # Fetch unfetched revisions.
+    self.vc.Fetch()
+
+  def PrepareBranch(self):
+    # Delete the branch that will be created later if it exists already.
+    self.DeleteBranch(self._config["BRANCHNAME"])
+
+  def CommonCleanup(self):
+    self.GitCheckout('origin/master')
+    self.GitDeleteBranch(self._config["BRANCHNAME"])
+
+    # Clean up all temporary files.
+    for f in glob.iglob("%s*" % self._config["PERSISTFILE_BASENAME"]):
+      if os.path.isfile(f):
+        os.remove(f)
+      if os.path.isdir(f):
+        shutil.rmtree(f)
+
+  def ReadAndPersistVersion(self, prefix=""):
+    def ReadAndPersist(var_name, def_name):
+      match = re.match(r"^#define %s\s+(\d*)" % def_name, line)
+      if match:
+        value = match.group(1)
+        self["%s%s" % (prefix, var_name)] = value
+    for line in LinesInFile(os.path.join(self.default_cwd, VERSION_FILE)):
+      for (var_name, def_name) in [("major", "V8_MAJOR_VERSION"),
+                                   ("minor", "V8_MINOR_VERSION"),
+                                   ("build", "V8_BUILD_NUMBER"),
+                                   ("patch", "V8_PATCH_LEVEL")]:
+        ReadAndPersist(var_name, def_name)
+
+  def WaitForLGTM(self):
+    print ("Please wait for an LGTM, then type \"LGTM<Return>\" to commit "
+           "your change. (If you need to iterate on the patch or double check "
+           "that it's sensible, do so in another shell, but remember to not "
+           "change the headline of the uploaded CL.")
+    answer = ""
+    while answer != "LGTM":
+      print("> ", end=' ')
+      answer = self.ReadLine(None if self._options.wait_for_lgtm else "LGTM")
+      if answer != "LGTM":
+        print("That was not 'LGTM'.")
+
+  def WaitForResolvingConflicts(self, patch_file):
+    print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
+          "or resolve the conflicts, stage *all* touched files with "
+          "'git add', and type \"RESOLVED<Return>\"" % (patch_file))
+    self.DieNoManualMode()
+    answer = ""
+    while answer != "RESOLVED":
+      if answer == "ABORT":
+        self.Die("Applying the patch failed.")
+      if answer != "":
+        print("That was not 'RESOLVED' or 'ABORT'.")
+      print("> ", end=' ')
+      answer = self.ReadLine()
+
+  # Takes a file containing the patch to apply as first argument.
+  def ApplyPatch(self, patch_file, revert=False):
+    try:
+      self.GitApplyPatch(patch_file, revert)
+    except GitFailedException:
+      self.WaitForResolvingConflicts(patch_file)
+
+  def GetVersionTag(self, revision):
+    tag = self.Git("describe --tags %s" % revision).strip()
+    return SanitizeVersionTag(tag)
+
+  def GetRecentReleases(self, max_age):
+    # Make sure tags are fetched.
+    self.Git("fetch origin +refs/tags/*:refs/tags/*")
+
+    # Current timestamp.
+    time_now = int(self._side_effect_handler.GetUTCStamp())
+
+    # List every tag from a given period.
+    revisions = self.Git("rev-list --max-age=%d --tags" %
+                         int(time_now - max_age)).strip()
+
+    # Filter out revisions who's tag is off by one or more commits.
+    return filter(lambda r: self.GetVersionTag(r), revisions.splitlines())
+
+  def GetLatestVersion(self):
+    # Use cached version if available.
+    if self["latest_version"]:
+      return self["latest_version"]
+
+    # Make sure tags are fetched.
+    self.Git("fetch origin +refs/tags/*:refs/tags/*")
+
+    all_tags = self.vc.GetTags()
+    only_version_tags = NormalizeVersionTags(all_tags)
+
+    version = sorted(only_version_tags,
+                     key=SortingKey, reverse=True)[0]
+    self["latest_version"] = version
+    return version
+
+  def GetLatestRelease(self):
+    """The latest release is the git hash of the latest tagged version.
+
+    This revision should be rolled into chromium.
+    """
+    latest_version = self.GetLatestVersion()
+
+    # The latest release.
+    latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
+    assert latest_hash
+    return latest_hash
+
+  def GetLatestReleaseBase(self, version=None):
+    """The latest release base is the latest revision that is covered in the
+    last change log file. It doesn't include cherry-picked patches.
+    """
+    latest_version = version or self.GetLatestVersion()
+
+    # Strip patch level if it exists.
+    latest_version = ".".join(latest_version.split(".")[:3])
+
+    # The latest release base.
+    latest_hash = self.GitLog(n=1, format="%H", branch=latest_version)
+    assert latest_hash
+
+    title = self.GitLog(n=1, format="%s", git_hash=latest_hash)
+    match = PUSH_MSG_GIT_RE.match(title)
+    if match:
+      # Legacy: In the old process there's one level of indirection. The
+      # version is on the candidates branch and points to the real release
+      # base on master through the commit message.
+      return match.group("git_rev")
+    match = PUSH_MSG_NEW_RE.match(title)
+    if match:
+      # This is a new-style v8 version branched from master. The commit
+      # "latest_hash" is the version-file change. Its parent is the release
+      # base on master.
+      return self.GitLog(n=1, format="%H", git_hash="%s^" % latest_hash)
+
+    self.Die("Unknown latest release: %s" % latest_hash)
+
+  def ArrayToVersion(self, prefix):
+    return ".".join([self[prefix + "major"],
+                     self[prefix + "minor"],
+                     self[prefix + "build"],
+                     self[prefix + "patch"]])
+
+  def StoreVersion(self, version, prefix):
+    version_parts = version.split(".")
+    if len(version_parts) == 3:
+      version_parts.append("0")
+    major, minor, build, patch = version_parts
+    self[prefix + "major"] = major
+    self[prefix + "minor"] = minor
+    self[prefix + "build"] = build
+    self[prefix + "patch"] = patch
+
+  def SetVersion(self, version_file, prefix):
+    output = ""
+    for line in FileToText(version_file).splitlines():
+      if line.startswith("#define V8_MAJOR_VERSION"):
+        line = re.sub("\d+$", self[prefix + "major"], line)
+      elif line.startswith("#define V8_MINOR_VERSION"):
+        line = re.sub("\d+$", self[prefix + "minor"], line)
+      elif line.startswith("#define V8_BUILD_NUMBER"):
+        line = re.sub("\d+$", self[prefix + "build"], line)
+      elif line.startswith("#define V8_PATCH_LEVEL"):
+        line = re.sub("\d+$", self[prefix + "patch"], line)
+      elif (self[prefix + "candidate"] and
+            line.startswith("#define V8_IS_CANDIDATE_VERSION")):
+        line = re.sub("\d+$", self[prefix + "candidate"], line)
+      output += "%s\n" % line
+    TextToFile(output, version_file)
+
+
+class BootstrapStep(Step):
+  MESSAGE = "Bootstrapping checkout and state."
+
+  def RunStep(self):
+    # Reserve state entry for json output.
+    self['json_output'] = {}
+
+    if os.path.realpath(self.default_cwd) == os.path.realpath(V8_BASE):
+      self.Die("Can't use v8 checkout with calling script as work checkout.")
+    # Directory containing the working v8 checkout.
+    if not os.path.exists(self._options.work_dir):
+      os.makedirs(self._options.work_dir)
+    if not os.path.exists(self.default_cwd):
+      self.Command("fetch", "v8", cwd=self._options.work_dir)
+
+
+class UploadStep(Step):
+  MESSAGE = "Upload for code review."
+
+  def RunStep(self):
+    reviewer = None
+    if self._options.reviewer:
+      print("Using account %s for review." % self._options.reviewer)
+      reviewer = self._options.reviewer
+
+    tbr_reviewer = None
+    if self._options.tbr_reviewer:
+      print("Using account %s for TBR review." % self._options.tbr_reviewer)
+      tbr_reviewer = self._options.tbr_reviewer
+
+    if not reviewer and not tbr_reviewer:
+      print(
+        "Please enter the email address of a V8 reviewer for your patch: ",
+        end=' ')
+      self.DieNoManualMode("A reviewer must be specified in forced mode.")
+      reviewer = self.ReadLine()
+
+    self.GitUpload(reviewer, self._options.force_upload,
+                   bypass_hooks=self._options.bypass_upload_hooks,
+                   cc=self._options.cc, tbr_reviewer=tbr_reviewer)
+
+
+def MakeStep(step_class=Step, number=0, state=None, config=None,
+             options=None, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER):
+    # Allow to pass in empty dictionaries.
+    state = state if state is not None else {}
+    config = config if config is not None else {}
+
+    try:
+      message = step_class.MESSAGE
+    except AttributeError:
+      message = step_class.__name__
+
+    return step_class(message, number=number, config=config,
+                      state=state, options=options,
+                      handler=side_effect_handler)
+
+
+class ScriptsBase(object):
+  def __init__(self,
+               config=None,
+               side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
+               state=None):
+    self._config = config or self._Config()
+    self._side_effect_handler = side_effect_handler
+    self._state = state if state is not None else {}
+
+  def _Description(self):
+    return None
+
+  def _PrepareOptions(self, parser):
+    pass
+
+  def _ProcessOptions(self, options):
+    return True
+
+  def _Steps(self):  # pragma: no cover
+    raise Exception("Not implemented.")
+
+  def _Config(self):
+    return {}
+
+  def MakeOptions(self, args=None):
+    parser = argparse.ArgumentParser(description=self._Description())
+    parser.add_argument("-a", "--author", default="",
+                        help="The author email used for code review.")
+    parser.add_argument("--dry-run", default=False, action="store_true",
+                        help="Perform only read-only actions.")
+    parser.add_argument("--json-output",
+                        help="File to write results summary to.")
+    parser.add_argument("-r", "--reviewer", default="",
+                        help="The account name to be used for reviews.")
+    parser.add_argument("--tbr-reviewer", "--tbr", default="",
+                        help="The account name to be used for TBR reviews.")
+    parser.add_argument("-s", "--step",
+        help="Specify the step where to start work. Default: 0.",
+        default=0, type=int)
+    parser.add_argument("--work-dir",
+                        help=("Location where to bootstrap a working v8 "
+                              "checkout."))
+    self._PrepareOptions(parser)
+
+    if args is None:  # pragma: no cover
+      options = parser.parse_args()
+    else:
+      options = parser.parse_args(args)
+
+    # Process common options.
+    if options.step < 0:  # pragma: no cover
+      print("Bad step number %d" % options.step)
+      parser.print_help()
+      return None
+
+    # Defaults for options, common to all scripts.
+    options.manual = getattr(options, "manual", True)
+    options.force = getattr(options, "force", False)
+    options.bypass_upload_hooks = False
+
+    # Derived options.
+    options.requires_editor = not options.force
+    options.wait_for_lgtm = not options.force
+    options.force_readline_defaults = not options.manual
+    options.force_upload = not options.manual
+
+    # Process script specific options.
+    if not self._ProcessOptions(options):
+      parser.print_help()
+      return None
+
+    if not options.work_dir:
+      options.work_dir = "/tmp/v8-release-scripts-work-dir"
+    return options
+
+  def RunSteps(self, step_classes, args=None):
+    options = self.MakeOptions(args)
+    if not options:
+      return 1
+
+    # Ensure temp dir exists for state files.
+    state_dir = os.path.dirname(self._config["PERSISTFILE_BASENAME"])
+    if not os.path.exists(state_dir):
+      os.makedirs(state_dir)
+
+    state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
+    if options.step == 0 and os.path.exists(state_file):
+      os.remove(state_file)
+
+    steps = []
+    for (number, step_class) in enumerate([BootstrapStep] + step_classes):
+      steps.append(MakeStep(step_class, number, self._state, self._config,
+                            options, self._side_effect_handler))
+
+    try:
+      for step in steps[options.step:]:
+        if step.Run():
+          return 0
+    finally:
+      if options.json_output:
+        with open(options.json_output, "w") as f:
+          json.dump(self._state['json_output'], f)
+
+    return 0
+
+  def Run(self, args=None):
+    return self.RunSteps(self._Steps(), args)
diff --git a/src/third_party/v8/tools/release/create_release.py b/src/third_party/v8/tools/release/create_release.py
new file mode 100755
index 0000000..c323542
--- /dev/null
+++ b/src/third_party/v8/tools/release/create_release.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import os
+import sys
+import tempfile
+import urllib2
+
+from common_includes import *
+
+class Preparation(Step):
+  MESSAGE = "Preparation."
+
+  def RunStep(self):
+    self.Git("fetch origin +refs/heads/*:refs/heads/*")
+    self.GitCheckout("origin/master")
+    self.DeleteBranch("work-branch")
+
+
+class PrepareBranchRevision(Step):
+  MESSAGE = "Check from which revision to branch off."
+
+  def RunStep(self):
+    self["push_hash"] = (self._options.revision or
+                         self.GitLog(n=1, format="%H", branch="origin/master"))
+    assert self["push_hash"]
+    print("Release revision %s" % self["push_hash"])
+
+
+class IncrementVersion(Step):
+  MESSAGE = "Increment version number."
+
+  def RunStep(self):
+    latest_version = self.GetLatestVersion()
+
+    # The version file on master can be used to bump up major/minor at
+    # branch time.
+    self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
+    self.ReadAndPersistVersion("master_")
+    master_version = self.ArrayToVersion("master_")
+
+    # Use the highest version from master or from tags to determine the new
+    # version.
+    authoritative_version = sorted(
+        [master_version, latest_version], key=SortingKey)[1]
+    self.StoreVersion(authoritative_version, "authoritative_")
+
+    # Variables prefixed with 'new_' contain the new version numbers for the
+    # ongoing candidates push.
+    self["new_major"] = self["authoritative_major"]
+    self["new_minor"] = self["authoritative_minor"]
+    self["new_build"] = str(int(self["authoritative_build"]) + 1)
+
+    # Make sure patch level is 0 in a new push.
+    self["new_patch"] = "0"
+
+    # The new version is not a candidate.
+    self["new_candidate"] = "0"
+
+    self["version"] = "%s.%s.%s" % (self["new_major"],
+                                    self["new_minor"],
+                                    self["new_build"])
+
+    print ("Incremented version to %s" % self["version"])
+
+
+class DetectLastRelease(Step):
+  MESSAGE = "Detect commit ID of last release base."
+
+  def RunStep(self):
+    self["last_push_master"] = self.GetLatestReleaseBase()
+
+
+class DeleteBranchRef(Step):
+  MESSAGE = "Delete branch ref."
+
+  def RunStep(self):
+    cmd = "push origin :refs/heads/%s" % self["version"]
+    if self._options.dry_run:
+      print("Dry run. Command:\ngit %s" % cmd)
+    else:
+      try:
+        self.Git(cmd)
+      except Exception:
+        # Be forgiving if branch ref does not exist.
+        pass
+
+
+class PushBranchRef(Step):
+  MESSAGE = "Create branch ref."
+
+  def RunStep(self):
+    cmd = "push origin %s:refs/heads/%s" % (self["push_hash"], self["version"])
+    if self._options.dry_run:
+      print("Dry run. Command:\ngit %s" % cmd)
+    else:
+      self.Git(cmd)
+
+
+class MakeBranch(Step):
+  MESSAGE = "Create the branch."
+
+  def RunStep(self):
+    self.Git("reset --hard origin/master")
+    self.Git("new-branch work-branch --upstream origin/%s" % self["version"])
+    self.GitCheckoutFile(VERSION_FILE, self["latest_version"])
+
+
+class SetVersion(Step):
+  MESSAGE = "Set correct version for candidates."
+
+  def RunStep(self):
+    self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
+
+
+class EnableMergeWatchlist(Step):
+  MESSAGE = "Enable watchlist entry for merge notifications."
+
+  def RunStep(self):
+    old_watchlist_content = FileToText(os.path.join(self.default_cwd,
+                                                    WATCHLISTS_FILE))
+    new_watchlist_content = re.sub("(# 'v8-merges@googlegroups\.com',)",
+                                   "'v8-merges@googlegroups.com',",
+                                   old_watchlist_content)
+    TextToFile(new_watchlist_content, os.path.join(self.default_cwd,
+                                                   WATCHLISTS_FILE))
+
+
+class CommitBranch(Step):
+  MESSAGE = "Commit version to new branch."
+
+  def RunStep(self):
+    self["commit_title"] = "Version %s" % self["version"]
+    text = "%s\n\nTBR=%s" % (self["commit_title"], self._options.reviewer)
+    TextToFile(text, self.Config("COMMITMSG_FILE"))
+
+    self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
+
+
+class LandBranch(Step):
+  MESSAGE = "Upload and land changes."
+
+  def RunStep(self):
+    if self._options.dry_run:
+      print("Dry run - upload CL.")
+    else:
+      self.GitUpload(force=True,
+                     bypass_hooks=True,
+                     no_autocc=True,
+                     message_file=self.Config("COMMITMSG_FILE"))
+    cmd = "cl land --bypass-hooks -f"
+    if self._options.dry_run:
+      print("Dry run. Command:\ngit %s" % cmd)
+    else:
+      self.Git(cmd)
+
+    os.remove(self.Config("COMMITMSG_FILE"))
+
+
+class TagRevision(Step):
+  MESSAGE = "Tag the new revision."
+
+  def RunStep(self):
+    if self._options.dry_run:
+      print ("Dry run. Tagging \"%s\" with %s" %
+             (self["commit_title"], self["version"]))
+    else:
+      self.vc.Tag(self["version"],
+                  "origin/%s" % self["version"],
+                  self["commit_title"])
+
+
+class CleanUp(Step):
+  MESSAGE = "Done!"
+
+  def RunStep(self):
+    print("Congratulations, you have successfully created version %s."
+          % self["version"])
+
+    self.GitCheckout("origin/master")
+    self.DeleteBranch("work-branch")
+    self.Git("gc")
+
+
+class CreateRelease(ScriptsBase):
+  def _PrepareOptions(self, parser):
+    group = parser.add_mutually_exclusive_group()
+    group.add_argument("-f", "--force",
+                      help="Don't prompt the user.",
+                      default=True, action="store_true")
+    group.add_argument("-m", "--manual",
+                      help="Prompt the user at every important step.",
+                      default=False, action="store_true")
+    parser.add_argument("-R", "--revision",
+                        help="The git commit ID to push (defaults to HEAD).")
+
+  def _ProcessOptions(self, options):  # pragma: no cover
+    if not options.author or not options.reviewer:
+      print("Reviewer (-r) and author (-a) are required.")
+      return False
+    return True
+
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/create-releases-tempfile",
+      "COMMITMSG_FILE": "/tmp/v8-create-releases-tempfile-commitmsg",
+    }
+
+  def _Steps(self):
+    return [
+      Preparation,
+      PrepareBranchRevision,
+      IncrementVersion,
+      DetectLastRelease,
+      DeleteBranchRef,
+      PushBranchRef,
+      MakeBranch,
+      SetVersion,
+      EnableMergeWatchlist,
+      CommitBranch,
+      LandBranch,
+      TagRevision,
+      CleanUp,
+    ]
+
+
+if __name__ == "__main__":  # pragma: no cover
+  sys.exit(CreateRelease().Run())
diff --git a/src/third_party/v8/tools/release/filter_build_files.py b/src/third_party/v8/tools/release/filter_build_files.py
new file mode 100755
index 0000000..65312b4
--- /dev/null
+++ b/src/third_party/v8/tools/release/filter_build_files.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Enumerates relevant build files for each platform.
+
+This can be used to filter the build directory before making an official
+archive. The archive should only contain files required for running or
+static linking, e.g. executables, startup files, libraries.
+
+The script is limited to release builds and assumes GN.
+"""
+
+import argparse
+import glob
+import itertools
+import json
+import os
+import re
+import sys
+
+EXECUTABLE_FILES = [
+  'd8',
+]
+
+# Additional executable files added only to ref archive type.
+REFBUILD_EXECUTABLE_FILES = [
+  'cctest',
+]
+
+SUPPLEMENTARY_FILES = [
+  'icudtl.dat',
+  'snapshot_blob.bin',
+  'v8_build_config.json',
+]
+
+LIBRARY_FILES = {
+  'android': ['*.a', '*.so'],
+  'linux': ['*.a', '*.so'],
+  'mac': ['*.a', '*.so', '*.dylib'],
+  'win': ['*.lib', '*.dll'],
+}
+
+
+def main(argv):
+  parser = argparse.ArgumentParser(description=__doc__)
+
+  parser.add_argument('-d', '--dir', required=True,
+                      help='Path to the build directory.')
+  parser.add_argument('-p', '--platform', required=True,
+                      help='Target platform name: win|mac|linux.')
+  parser.add_argument('-o', '--json-output', required=True,
+                      help='Path to an output file. The files will '
+                           'be stored in json list with absolute paths.')
+  parser.add_argument('-t', '--type',
+                      choices=['all', 'exe', 'lib', 'ref'], default='all',
+                      help='Specifies the archive type.')
+  args = parser.parse_args()
+
+  if not os.path.isdir(args.dir):
+    parser.error('%s is not an existing directory.' % args.dir)
+
+  args.dir = os.path.abspath(args.dir)
+
+  # Skip libraries for exe and ref archive types.
+  if args.type in ('exe', 'ref'):
+    library_files = []
+  else:
+    library_files = LIBRARY_FILES[args.platform]
+
+  # Skip executables for lib archive type.
+  if args.type == 'lib':
+    executable_files = []
+  else:
+    executable_files = EXECUTABLE_FILES
+
+  if args.type == 'ref':
+    executable_files.extend(REFBUILD_EXECUTABLE_FILES)
+
+  list_of_files = []
+  def add_files_from_globs(globs):
+    list_of_files.extend(itertools.chain(*map(glob.iglob, globs)))
+
+  # Add toplevel executables, supplementary files and libraries.
+  extended_executable_files = [
+    f + '.exe' if args.platform == 'win' else f
+    for f in executable_files]
+  add_files_from_globs(
+      os.path.join(args.dir, f)
+      for f in extended_executable_files +
+               SUPPLEMENTARY_FILES +
+               library_files
+  )
+
+  # Add libraries recursively from obj directory.
+  for root, _, __ in os.walk(os.path.join(args.dir, 'obj'), followlinks=True):
+    add_files_from_globs(os.path.join(root, g) for g in library_files)
+
+  with open(args.json_output, 'w') as f:
+    json.dump(list_of_files, f)
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main(sys.argv))
diff --git a/src/third_party/v8/tools/release/git_recipes.py b/src/third_party/v8/tools/release/git_recipes.py
new file mode 100644
index 0000000..716d146
--- /dev/null
+++ b/src/third_party/v8/tools/release/git_recipes.py
@@ -0,0 +1,292 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import re
+
+SHA1_RE = re.compile('^[a-fA-F0-9]{40}$')
+ROLL_DEPS_GIT_SVN_ID_RE = re.compile('^git-svn-id: .*@([0-9]+) .*$')
+
+# Regular expression that matches a single commit footer line.
+COMMIT_FOOTER_ENTRY_RE = re.compile(r'([^:]+):\s*(.*)')
+
+# Footer metadata key for commit position.
+COMMIT_POSITION_FOOTER_KEY = 'Cr-Commit-Position'
+
+# Regular expression to parse a commit position
+COMMIT_POSITION_RE = re.compile(r'(.+)@\{#(\d+)\}')
+
+# Key for the 'git-svn' ID metadata commit footer entry.
+GIT_SVN_ID_FOOTER_KEY = 'git-svn-id'
+
+# e.g., git-svn-id: https://v8.googlecode.com/svn/trunk@23117
+#     ce2b1a6d-e550-0410-aec6-3dcde31c8c00
+GIT_SVN_ID_RE = re.compile(r'[^@]+@(\d+)\s+(?:[a-zA-Z0-9\-]+)')
+
+
+# Copied from bot_update.py.
+def GetCommitMessageFooterMap(message):
+  """Returns: (dict) A dictionary of commit message footer entries.
+  """
+  footers = {}
+
+  # Extract the lines in the footer block.
+  lines = []
+  for line in message.strip().splitlines():
+    line = line.strip()
+    if len(line) == 0:
+      del(lines[:])
+      continue
+    lines.append(line)
+
+  # Parse the footer
+  for line in lines:
+    m = COMMIT_FOOTER_ENTRY_RE.match(line)
+    if not m:
+      # If any single line isn't valid, continue anyway for compatibility with
+      # Gerrit (which itself uses JGit for this).
+      continue
+    footers[m.group(1)] = m.group(2).strip()
+  return footers
+
+
+class GitFailedException(Exception):
+  pass
+
+
+def Strip(f):
+  def new_f(*args, **kwargs):
+    result = f(*args, **kwargs)
+    if result is None:
+      return result
+    else:
+      return result.strip()
+  return new_f
+
+
+def MakeArgs(l):
+  """['-a', '', 'abc', ''] -> '-a abc'"""
+  return " ".join(filter(None, l))
+
+
+def Quoted(s):
+  return "\"%s\"" % s
+
+
+class GitRecipesMixin(object):
+  def GitIsWorkdirClean(self, **kwargs):
+    return self.Git("status -s -uno", **kwargs).strip() == ""
+
+  @Strip
+  def GitBranch(self, **kwargs):
+    return self.Git("branch", **kwargs)
+
+  def GitCreateBranch(self, name, remote="", **kwargs):
+    assert name
+    remote_args = ["--upstream", remote] if remote else []
+    self.Git(MakeArgs(["new-branch", name] + remote_args), **kwargs)
+
+  def GitDeleteBranch(self, name, **kwargs):
+    assert name
+    self.Git(MakeArgs(["branch -D", name]), **kwargs)
+
+  def GitReset(self, name, **kwargs):
+    assert name
+    self.Git(MakeArgs(["reset --hard", name]), **kwargs)
+
+  def GitStash(self, **kwargs):
+    self.Git(MakeArgs(["stash"]), **kwargs)
+
+  def GitRemotes(self, **kwargs):
+    return map(str.strip,
+               self.Git(MakeArgs(["branch -r"]), **kwargs).splitlines())
+
+  def GitCheckout(self, name, **kwargs):
+    assert name
+    self.Git(MakeArgs(["checkout -f", name]), **kwargs)
+
+  def GitCheckoutFile(self, name, branch_or_hash, **kwargs):
+    assert name
+    assert branch_or_hash
+    self.Git(MakeArgs(["checkout -f", branch_or_hash, "--", name]), **kwargs)
+
+  def GitCheckoutFileSafe(self, name, branch_or_hash, **kwargs):
+    try:
+      self.GitCheckoutFile(name, branch_or_hash, **kwargs)
+    except GitFailedException:  # pragma: no cover
+      # The file doesn't exist in that revision.
+      return False
+    return True
+
+  def GitChangedFiles(self, git_hash, **kwargs):
+    assert git_hash
+    try:
+      files = self.Git(MakeArgs(["diff --name-only",
+                                 git_hash,
+                                 "%s^" % git_hash]), **kwargs)
+      return map(str.strip, files.splitlines())
+    except GitFailedException:  # pragma: no cover
+      # Git fails using "^" at branch roots.
+      return []
+
+
+  @Strip
+  def GitCurrentBranch(self, **kwargs):
+    for line in self.Git("status -s -b -uno", **kwargs).strip().splitlines():
+      match = re.match(r"^## (.+)", line)
+      if match: return match.group(1)
+    raise Exception("Couldn't find curent branch.")  # pragma: no cover
+
+  @Strip
+  def GitLog(self, n=0, format="", grep="", git_hash="", parent_hash="",
+             branch="", path=None, reverse=False, **kwargs):
+    assert not (git_hash and parent_hash)
+    args = ["log"]
+    if n > 0:
+      args.append("-%d" % n)
+    if format:
+      args.append("--format=%s" % format)
+    if grep:
+      args.append("--grep=\"%s\"" % grep.replace("\"", "\\\""))
+    if reverse:
+      args.append("--reverse")
+    if git_hash:
+      args.append(git_hash)
+    if parent_hash:
+      args.append("%s^" % parent_hash)
+    args.append(branch)
+    if path:
+      args.extend(["--", path])
+    return self.Git(MakeArgs(args), **kwargs)
+
+  def GitShowFile(self, refspec, path, **kwargs):
+    assert refspec
+    assert path
+    return self.Git(MakeArgs(["show", "%s:%s" % (refspec, path)]), **kwargs)
+
+  def GitGetPatch(self, git_hash, **kwargs):
+    assert git_hash
+    return self.Git(MakeArgs(["log", "-1", "-p", git_hash]), **kwargs)
+
+  # TODO(machenbach): Unused? Remove.
+  def GitAdd(self, name, **kwargs):
+    assert name
+    self.Git(MakeArgs(["add", Quoted(name)]), **kwargs)
+
+  def GitApplyPatch(self, patch_file, reverse=False, **kwargs):
+    assert patch_file
+    args = ["apply --index --reject"]
+    if reverse:
+      args.append("--reverse")
+    args.append(Quoted(patch_file))
+    self.Git(MakeArgs(args), **kwargs)
+
+  def GitUpload(self, reviewer="", force=False, cq=False,
+                cq_dry_run=False, bypass_hooks=False, cc="", tbr_reviewer="",
+                no_autocc=False, message_file=None, **kwargs):
+    args = ["cl upload --send-mail"]
+    if reviewer:
+      args += ["-r", Quoted(reviewer)]
+    if tbr_reviewer:
+      args += ["--tbrs", Quoted(tbr_reviewer)]
+    if force:
+      args.append("-f")
+    if cq:
+      args.append("--use-commit-queue")
+    if cq_dry_run:
+      args.append("--cq-dry-run")
+    if bypass_hooks:
+      args.append("--bypass-hooks")
+    if no_autocc:
+      args.append("--no-autocc")
+    if cc:
+      args += ["--cc", Quoted(cc)]
+    if message_file:
+      args += ["--message-file", Quoted(message_file)]
+    # TODO(machenbach): Check output in forced mode. Verify that all required
+    # base files were uploaded, if not retry.
+    self.Git(MakeArgs(args), pipe=False, **kwargs)
+
+  def GitCommit(self, message="", file_name="", author=None, **kwargs):
+    assert message or file_name
+    args = ["commit"]
+    if file_name:
+      args += ["-aF", Quoted(file_name)]
+    if message:
+      args += ["-am", Quoted(message)]
+    if author:
+      args += ["--author", "\"%s <%s>\"" % (author, author)]
+    self.Git(MakeArgs(args), **kwargs)
+
+  def GitPresubmit(self, **kwargs):
+    self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"", **kwargs)
+
+  def GitCLLand(self, **kwargs):
+    self.Git(
+        "cl land -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs)
+
+  def GitDiff(self, loc1, loc2, **kwargs):
+    return self.Git(MakeArgs(["diff", loc1, loc2]), **kwargs)
+
+  def GitPull(self, **kwargs):
+    self.Git("pull", **kwargs)
+
+  def GitFetchOrigin(self, *refspecs, **kwargs):
+    self.Git(MakeArgs(["fetch", "origin"] + list(refspecs)), **kwargs)
+
+  @Strip
+  # Copied from bot_update.py and modified for svn-like numbers only.
+  def GetCommitPositionNumber(self, git_hash, **kwargs):
+    """Dumps the 'git' log for a specific revision and parses out the commit
+    position number.
+
+    If a commit position metadata key is found, its number will be returned.
+
+    Otherwise, we will search for a 'git-svn' metadata entry. If one is found,
+    its SVN revision value is returned.
+    """
+    git_log = self.GitLog(format='%B', n=1, git_hash=git_hash, **kwargs)
+    footer_map = GetCommitMessageFooterMap(git_log)
+
+    # Search for commit position metadata
+    value = footer_map.get(COMMIT_POSITION_FOOTER_KEY)
+    if value:
+      match = COMMIT_POSITION_RE.match(value)
+      if match:
+        return match.group(2)
+
+    # Extract the svn revision from 'git-svn' metadata
+    value = footer_map.get(GIT_SVN_ID_FOOTER_KEY)
+    if value:
+      match = GIT_SVN_ID_RE.match(value)
+      if match:
+        return match.group(1)
+    raise GitFailedException("Couldn't determine commit position for %s" %
+                             git_hash)
+
+  def GitGetHashOfTag(self, tag_name, **kwargs):
+    return self.Git("rev-list -1 " + tag_name).strip().encode("ascii", "ignore")
diff --git a/src/third_party/v8/tools/release/merge_to_branch.py b/src/third_party/v8/tools/release/merge_to_branch.py
new file mode 100755
index 0000000..ab69672
--- /dev/null
+++ b/src/third_party/v8/tools/release/merge_to_branch.py
@@ -0,0 +1,270 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+from collections import OrderedDict
+import sys
+
+from common_includes import *
+from git_recipes import GetCommitMessageFooterMap
+
+def IsSvnNumber(rev):
+  return rev.isdigit() and len(rev) < 8
+
+class Preparation(Step):
+  MESSAGE = "Preparation."
+
+  def RunStep(self):
+    if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")):
+      if self._options.force:
+        os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE"))
+      elif self._options.step == 0:  # pragma: no cover
+        self.Die("A merge is already in progress. Use -f to continue")
+    open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
+
+    self.InitialEnvironmentChecks(self.default_cwd)
+
+    self["merge_to_branch"] = self._options.branch
+
+    self.CommonPrepare()
+    self.PrepareBranch()
+
+
+class CreateBranch(Step):
+  MESSAGE = "Create a fresh branch for the patch."
+
+  def RunStep(self):
+    self.GitCreateBranch(self.Config("BRANCHNAME"),
+                         self.vc.RemoteBranch(self["merge_to_branch"]))
+
+
+class SearchArchitecturePorts(Step):
+  MESSAGE = "Search for corresponding architecture ports."
+
+  def RunStep(self):
+    self["full_revision_list"] = list(OrderedDict.fromkeys(
+        self._options.revisions))
+    port_revision_list = []
+    for revision in self["full_revision_list"]:
+      # Search for commits which matches the "Port XXX" pattern.
+      git_hashes = self.GitLog(reverse=True, format="%H",
+                               grep="^[Pp]ort %s" % revision,
+                               branch=self.vc.RemoteMasterBranch())
+      for git_hash in git_hashes.splitlines():
+        revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
+
+        # Is this revision included in the original revision list?
+        if git_hash in self["full_revision_list"]:
+          print("Found port of %s -> %s (already included): %s"
+                % (revision, git_hash, revision_title))
+        else:
+          print("Found port of %s -> %s: %s"
+                % (revision, git_hash, revision_title))
+          port_revision_list.append(git_hash)
+
+    # Do we find any port?
+    if len(port_revision_list) > 0:
+      if self.Confirm("Automatically add corresponding ports (%s)?"
+                      % ", ".join(port_revision_list)):
+        #: 'y': Add ports to revision list.
+        self["full_revision_list"].extend(port_revision_list)
+
+
+class CreateCommitMessage(Step):
+  MESSAGE = "Create commit message."
+
+  def _create_commit_description(self, commit_hash):
+    patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash)
+    description = "Merged: " + patch_merge_desc + "\n"
+    description += "Revision: " + commit_hash + "\n\n"
+    return description
+
+  def RunStep(self):
+
+    # Stringify: ["abcde", "12345"] -> "abcde, 12345"
+    self["revision_list"] = ", ".join(self["full_revision_list"])
+
+    if not self["revision_list"]:  # pragma: no cover
+      self.Die("Revision list is empty.")
+
+    msg_pieces = []
+
+    if len(self["full_revision_list"]) > 1:
+      self["commit_title"] = "Merged: Squashed multiple commits."
+      for commit_hash in self["full_revision_list"]:
+        msg_pieces.append(self._create_commit_description(commit_hash))
+    else:
+      commit_hash = self["full_revision_list"][0]
+      full_description = self._create_commit_description(commit_hash).split("\n")
+
+      #Truncate title because of code review tool
+      title = full_description[0]
+      if len(title) > 100:
+        title = title[:96] + " ..."
+
+      self["commit_title"] = title
+      msg_pieces.append(full_description[1] + "\n\n")
+
+    bugs = []
+    for commit_hash in self["full_revision_list"]:
+      msg = self.GitLog(n=1, git_hash=commit_hash)
+      for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg, re.M):
+        bugs.extend(s.strip() for s in bug.split(","))
+      gerrit_bug = GetCommitMessageFooterMap(msg).get('Bug', '')
+      bugs.extend(s.strip() for s in gerrit_bug.split(","))
+    bug_aggregate = ",".join(
+        sorted(filter(lambda s: s and s != "none", set(bugs))))
+    if bug_aggregate:
+      # TODO(machenbach): Use proper gerrit footer for bug after switch to
+      # gerrit. Keep BUG= for now for backwards-compatibility.
+      msg_pieces.append("BUG=%s\n" % bug_aggregate)
+
+    msg_pieces.append("NOTRY=true\nNOPRESUBMIT=true\nNOTREECHECKS=true\n")
+
+    self["new_commit_msg"] = "".join(msg_pieces)
+
+
+class ApplyPatches(Step):
+  MESSAGE = "Apply patches for selected revisions."
+
+  def RunStep(self):
+    for commit_hash in self["full_revision_list"]:
+      print("Applying patch for %s to %s..."
+            % (commit_hash, self["merge_to_branch"]))
+      patch = self.GitGetPatch(commit_hash)
+      TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
+      self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"))
+    if self._options.patch:
+      self.ApplyPatch(self._options.patch)
+
+class CommitLocal(Step):
+  MESSAGE = "Commit to local branch."
+
+  def RunStep(self):
+    # Add a commit message title.
+    self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"],
+                                           self["new_commit_msg"])
+    TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
+    self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
+
+class CommitRepository(Step):
+  MESSAGE = "Commit to the repository."
+
+  def RunStep(self):
+    self.GitCheckout(self.Config("BRANCHNAME"))
+    self.WaitForLGTM()
+    self.GitPresubmit()
+    self.vc.CLLand()
+
+class CleanUp(Step):
+  MESSAGE = "Cleanup."
+
+  def RunStep(self):
+    self.CommonCleanup()
+    print("*** SUMMARY ***")
+    print("branch: %s" % self["merge_to_branch"])
+    if self["revision_list"]:
+      print("patches: %s" % self["revision_list"])
+
+
+class MergeToBranch(ScriptsBase):
+  def _Description(self):
+    return ("Performs the necessary steps to merge revisions from "
+            "master to release branches like 4.5. This script does not "
+            "version the commit. See http://goo.gl/9ke2Vw for more "
+            "information.")
+
+  def _PrepareOptions(self, parser):
+    group = parser.add_mutually_exclusive_group(required=True)
+    group.add_argument("--branch", help="The branch to merge to.")
+    parser.add_argument("revisions", nargs="*",
+                        help="The revisions to merge.")
+    parser.add_argument("-f", "--force",
+                        help="Delete sentinel file.",
+                        default=False, action="store_true")
+    parser.add_argument("-m", "--message",
+                        help="A commit message for the patch.")
+    parser.add_argument("-p", "--patch",
+                        help="A patch file to apply as part of the merge.")
+
+  def _ProcessOptions(self, options):
+    if len(options.revisions) < 1:
+      if not options.patch:
+        print("Either a patch file or revision numbers must be specified")
+        return False
+      if not options.message:
+        print("You must specify a merge comment if no patches are specified")
+        return False
+    options.bypass_upload_hooks = True
+    # CC ulan to make sure that fixes are merged to Google3.
+    options.cc = "ulan@chromium.org"
+
+    if len(options.branch.split('.')) > 2:
+      print ("This script does not support merging to roll branches. "
+             "Please use tools/release/roll_merge.py for this use case.")
+      return False
+
+    # Make sure to use git hashes in the new workflows.
+    for revision in options.revisions:
+      if (IsSvnNumber(revision) or
+          (revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
+        print("Please provide full git hashes of the patches to merge.")
+        print("Got: %s" % revision)
+        return False
+    return True
+
+  def _Config(self):
+    return {
+      "BRANCHNAME": "prepare-merge",
+      "PERSISTFILE_BASENAME": RELEASE_WORKDIR + "v8-merge-to-branch-tempfile",
+      "ALREADY_MERGING_SENTINEL_FILE":
+          RELEASE_WORKDIR + "v8-merge-to-branch-tempfile-already-merging",
+      "TEMPORARY_PATCH_FILE":
+          RELEASE_WORKDIR + "v8-prepare-merge-tempfile-temporary-patch",
+      "COMMITMSG_FILE": RELEASE_WORKDIR + "v8-prepare-merge-tempfile-commitmsg",
+    }
+
+  def _Steps(self):
+    return [
+      Preparation,
+      CreateBranch,
+      SearchArchitecturePorts,
+      CreateCommitMessage,
+      ApplyPatches,
+      CommitLocal,
+      UploadStep,
+      CommitRepository,
+      CleanUp,
+    ]
+
+
+if __name__ == "__main__":  # pragma: no cover
+  sys.exit(MergeToBranch().Run())
diff --git a/src/third_party/v8/tools/release/mergeinfo.py b/src/third_party/v8/tools/release/mergeinfo.py
new file mode 100755
index 0000000..bed7441
--- /dev/null
+++ b/src/third_party/v8/tools/release/mergeinfo.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import os
+import sys
+import re
+
+from search_related_commits import git_execute
+
+GIT_OPTION_HASH_ONLY = '--pretty=format:%H'
+GIT_OPTION_NO_DIFF = '--quiet'
+GIT_OPTION_ONELINE = '--oneline'
+
+def describe_commit(git_working_dir, hash_to_search, one_line=False):
+  if one_line:
+    return git_execute(git_working_dir, ['show',
+                                         GIT_OPTION_NO_DIFF,
+                                         GIT_OPTION_ONELINE,
+                                         hash_to_search]).strip()
+  return git_execute(git_working_dir, ['show',
+                                       GIT_OPTION_NO_DIFF,
+                                       hash_to_search]).strip()
+
+
+def get_followup_commits(git_working_dir, hash_to_search):
+  cmd = ['log', '--grep=' + hash_to_search, GIT_OPTION_HASH_ONLY,
+         'remotes/origin/master'];
+  return git_execute(git_working_dir, cmd).strip().splitlines()
+
+def get_merge_commits(git_working_dir, hash_to_search):
+  merges = get_related_commits_not_on_master(git_working_dir, hash_to_search)
+  false_merges = get_related_commits_not_on_master(
+    git_working_dir, 'Cr-Branched-From: ' + hash_to_search)
+  false_merges = set(false_merges)
+  return ([merge_commit for merge_commit in merges
+      if merge_commit not in false_merges])
+
+def get_related_commits_not_on_master(git_working_dir, grep_command):
+  commits = git_execute(git_working_dir, ['log',
+                                          '--all',
+                                          '--grep=' + grep_command,
+                                          GIT_OPTION_ONELINE,
+                                          '--decorate',
+                                          '--not',
+                                          'remotes/origin/master',
+                                          GIT_OPTION_HASH_ONLY])
+  return commits.splitlines()
+
+def get_branches_for_commit(git_working_dir, hash_to_search):
+  branches = git_execute(git_working_dir, ['branch',
+                                           '--contains',
+                                           hash_to_search,
+                                           '-a']).strip()
+  branches = branches.splitlines()
+  return map(str.strip, branches)
+
+def is_lkgr(branches):
+  return 'remotes/origin/lkgr' in branches
+
+def get_first_canary(branches):
+  canaries = ([currentBranch for currentBranch in branches if
+    currentBranch.startswith('remotes/origin/chromium/')])
+  canaries.sort()
+  if len(canaries) == 0:
+    return 'No Canary coverage'
+  return canaries[0].split('/')[-1]
+
+def get_first_v8_version(branches):
+  version_re = re.compile("remotes/origin/[0-9]+\.[0-9]+\.[0-9]+")
+  versions = filter(lambda branch: version_re.match(branch), branches)
+  if len(versions) == 0:
+    return "--"
+  version = versions[0].split("/")[-1]
+  return version
+
+def print_analysis(git_working_dir, hash_to_search):
+  print('1.) Searching for "' + hash_to_search + '"')
+  print('=====================ORIGINAL COMMIT START===================')
+  print(describe_commit(git_working_dir, hash_to_search))
+  print('=====================ORIGINAL COMMIT END=====================')
+  print('2.) General information:')
+  branches = get_branches_for_commit(git_working_dir, hash_to_search)
+  print('Is LKGR:         ' + str(is_lkgr(branches)))
+  print('Is on Canary:    ' + str(get_first_canary(branches)))
+  print('First V8 branch: ' + str(get_first_v8_version(branches)) + \
+      ' (Might not be the rolled version)')
+  print('3.) Found follow-up commits, reverts and ports:')
+  followups = get_followup_commits(git_working_dir, hash_to_search)
+  for followup in followups:
+    print(describe_commit(git_working_dir, followup, True))
+
+  print('4.) Found merges:')
+  merges = get_merge_commits(git_working_dir, hash_to_search)
+  for currentMerge in merges:
+    print(describe_commit(git_working_dir, currentMerge, True))
+    print('---Merged to:')
+    mergeOutput = git_execute(git_working_dir, ['branch',
+                                                '--contains',
+                                                currentMerge,
+                                                '-r']).strip()
+    print(mergeOutput)
+  print('Finished successfully')
+
+if __name__ == '__main__':  # pragma: no cover
+  parser = argparse.ArgumentParser('Tool to check where a git commit was'
+ ' merged and reverted.')
+
+  parser.add_argument('-g', '--git-dir', required=False, default='.',
+                        help='The path to your git working directory.')
+
+  parser.add_argument('hash',
+                      nargs=1,
+                      help='Hash of the commit to be searched.')
+
+  args = sys.argv[1:]
+  options = parser.parse_args(args)
+
+  sys.exit(print_analysis(options.git_dir, options.hash[0]))
diff --git a/src/third_party/v8/tools/release/roll_merge.py b/src/third_party/v8/tools/release/roll_merge.py
new file mode 100755
index 0000000..064ba73
--- /dev/null
+++ b/src/third_party/v8/tools/release/roll_merge.py
@@ -0,0 +1,295 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+from collections import OrderedDict
+import sys
+
+from common_includes import *
+
+def IsSvnNumber(rev):
+  return rev.isdigit() and len(rev) < 8
+
+class Preparation(Step):
+  MESSAGE = "Preparation."
+
+  def RunStep(self):
+    if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")):
+      if self._options.force:
+        os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE"))
+      elif self._options.step == 0:  # pragma: no cover
+        self.Die("A merge is already in progress")
+    open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
+
+    self.InitialEnvironmentChecks(self.default_cwd)
+    if self._options.branch:
+      self["merge_to_branch"] = self._options.branch
+    else:  # pragma: no cover
+      self.Die("Please specify a branch to merge to")
+
+    self.CommonPrepare()
+    self.PrepareBranch()
+
+
+class CreateBranch(Step):
+  MESSAGE = "Create a fresh branch for the patch."
+
+  def RunStep(self):
+    self.GitCreateBranch(self.Config("BRANCHNAME"),
+                         self.vc.RemoteBranch(self["merge_to_branch"]))
+
+
+class SearchArchitecturePorts(Step):
+  MESSAGE = "Search for corresponding architecture ports."
+
+  def RunStep(self):
+    self["full_revision_list"] = list(OrderedDict.fromkeys(
+        self._options.revisions))
+    port_revision_list = []
+    for revision in self["full_revision_list"]:
+      # Search for commits which matches the "Port XXX" pattern.
+      git_hashes = self.GitLog(reverse=True, format="%H",
+                               grep="Port %s" % revision,
+                               branch=self.vc.RemoteMasterBranch())
+      for git_hash in git_hashes.splitlines():
+        revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
+
+        # Is this revision included in the original revision list?
+        if git_hash in self["full_revision_list"]:
+          print("Found port of %s -> %s (already included): %s"
+                % (revision, git_hash, revision_title))
+        else:
+          print("Found port of %s -> %s: %s"
+                % (revision, git_hash, revision_title))
+          port_revision_list.append(git_hash)
+
+    # Do we find any port?
+    if len(port_revision_list) > 0:
+      if self.Confirm("Automatically add corresponding ports (%s)?"
+                      % ", ".join(port_revision_list)):
+        #: 'y': Add ports to revision list.
+        self["full_revision_list"].extend(port_revision_list)
+
+
+class CreateCommitMessage(Step):
+  MESSAGE = "Create commit message."
+
+  def RunStep(self):
+
+    # Stringify: ["abcde", "12345"] -> "abcde, 12345"
+    self["revision_list"] = ", ".join(self["full_revision_list"])
+
+    if not self["revision_list"]:  # pragma: no cover
+      self.Die("Revision list is empty.")
+
+    action_text = "Merged %s"
+
+    # The commit message title is added below after the version is specified.
+    msg_pieces = [
+      "\n".join(action_text % s for s in self["full_revision_list"]),
+    ]
+    msg_pieces.append("\n\n")
+
+    for commit_hash in self["full_revision_list"]:
+      patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash)
+      msg_pieces.append("%s\n\n" % patch_merge_desc)
+
+    bugs = []
+    for commit_hash in self["full_revision_list"]:
+      msg = self.GitLog(n=1, git_hash=commit_hash)
+      for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg, re.M):
+        bugs.extend(s.strip() for s in bug.split(","))
+    bug_aggregate = ",".join(sorted(filter(lambda s: s and s != "none", bugs)))
+    if bug_aggregate:
+      msg_pieces.append("BUG=%s\n" % bug_aggregate)
+
+    self["new_commit_msg"] = "".join(msg_pieces)
+
+
+class ApplyPatches(Step):
+  MESSAGE = "Apply patches for selected revisions."
+
+  def RunStep(self):
+    for commit_hash in self["full_revision_list"]:
+      print("Applying patch for %s to %s..."
+            % (commit_hash, self["merge_to_branch"]))
+      patch = self.GitGetPatch(commit_hash)
+      TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
+      self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"))
+    if self._options.patch:
+      self.ApplyPatch(self._options.patch)
+
+
+class PrepareVersion(Step):
+  MESSAGE = "Prepare version file."
+
+  def RunStep(self):
+    # This is used to calculate the patch level increment.
+    self.ReadAndPersistVersion()
+
+
+class IncrementVersion(Step):
+  MESSAGE = "Increment version number."
+
+  def RunStep(self):
+    new_patch = str(int(self["patch"]) + 1)
+    if self.Confirm("Automatically increment V8_PATCH_LEVEL? (Saying 'n' will "
+                    "fire up your EDITOR on %s so you can make arbitrary "
+                    "changes. When you're done, save the file and exit your "
+                    "EDITOR.)" % VERSION_FILE):
+      text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
+      text = MSub(r"(?<=#define V8_PATCH_LEVEL)(?P<space>\s+)\d*$",
+                  r"\g<space>%s" % new_patch,
+                  text)
+      TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
+    else:
+      self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
+    self.ReadAndPersistVersion("new_")
+    self["version"] = "%s.%s.%s.%s" % (self["new_major"],
+                                       self["new_minor"],
+                                       self["new_build"],
+                                       self["new_patch"])
+
+
+class CommitLocal(Step):
+  MESSAGE = "Commit to local branch."
+
+  def RunStep(self):
+    # Add a commit message title.
+    self["commit_title"] = "Version %s (cherry-pick)" % self["version"]
+    self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"],
+                                           self["new_commit_msg"])
+    TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
+    self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
+
+
+class CommitRepository(Step):
+  MESSAGE = "Commit to the repository."
+
+  def RunStep(self):
+    self.GitCheckout(self.Config("BRANCHNAME"))
+    self.WaitForLGTM()
+    self.GitPresubmit()
+    self.vc.CLLand()
+
+
+class TagRevision(Step):
+  MESSAGE = "Create the tag."
+
+  def RunStep(self):
+    print("Creating tag %s" % self["version"])
+    self.vc.Tag(self["version"],
+                self.vc.RemoteBranch(self["merge_to_branch"]),
+                self["commit_title"])
+
+
+class CleanUp(Step):
+  MESSAGE = "Cleanup."
+
+  def RunStep(self):
+    self.CommonCleanup()
+    print("*** SUMMARY ***")
+    print("version: %s" % self["version"])
+    print("branch: %s" % self["merge_to_branch"])
+    if self["revision_list"]:
+      print("patches: %s" % self["revision_list"])
+
+
+class RollMerge(ScriptsBase):
+  def _Description(self):
+    return ("Performs the necessary steps to merge revisions from "
+            "master to other branches, including candidates and roll branches.")
+
+  def _PrepareOptions(self, parser):
+    group = parser.add_mutually_exclusive_group(required=True)
+    group.add_argument("--branch", help="The branch to merge to.")
+    parser.add_argument("revisions", nargs="*",
+                        help="The revisions to merge.")
+    parser.add_argument("-f", "--force",
+                        help="Delete sentinel file.",
+                        default=False, action="store_true")
+    parser.add_argument("-m", "--message",
+                        help="A commit message for the patch.")
+    parser.add_argument("-p", "--patch",
+                        help="A patch file to apply as part of the merge.")
+
+  def _ProcessOptions(self, options):
+    if len(options.revisions) < 1:
+      if not options.patch:
+        print("Either a patch file or revision numbers must be specified")
+        return False
+      if not options.message:
+        print("You must specify a merge comment if no patches are specified")
+        return False
+    options.bypass_upload_hooks = True
+    # CC ulan to make sure that fixes are merged to Google3.
+    options.cc = "ulan@chromium.org"
+
+    # Make sure to use git hashes in the new workflows.
+    for revision in options.revisions:
+      if (IsSvnNumber(revision) or
+          (revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
+        print("Please provide full git hashes of the patches to merge.")
+        print("Got: %s" % revision)
+        return False
+    return True
+
+  def _Config(self):
+    return {
+      "BRANCHNAME": "prepare-merge",
+      "PERSISTFILE_BASENAME":
+          RELEASE_WORKDIR + "v8-merge-to-branch-tempfile",
+      "ALREADY_MERGING_SENTINEL_FILE":
+          RELEASE_WORKDIR + "v8-merge-to-branch-tempfile-already-merging",
+      "TEMPORARY_PATCH_FILE":
+          RELEASE_WORKDIR + "v8-prepare-merge-tempfile-temporary-patch",
+      "COMMITMSG_FILE": RELEASE_WORKDIR + "v8-prepare-merge-tempfile-commitmsg",
+    }
+
+  def _Steps(self):
+    return [
+      Preparation,
+      CreateBranch,
+      SearchArchitecturePorts,
+      CreateCommitMessage,
+      ApplyPatches,
+      PrepareVersion,
+      IncrementVersion,
+      CommitLocal,
+      UploadStep,
+      CommitRepository,
+      TagRevision,
+      CleanUp,
+    ]
+
+
+if __name__ == "__main__":  # pragma: no cover
+  sys.exit(RollMerge().Run())
diff --git a/src/third_party/v8/tools/release/script_test.py b/src/third_party/v8/tools/release/script_test.py
new file mode 100755
index 0000000..0f345b7
--- /dev/null
+++ b/src/third_party/v8/tools/release/script_test.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Wraps test execution with a coverage analysis. To get the best speed, the
+# native python coverage version >= 3.7.1 should be installed.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import coverage
+import os
+import unittest
+import sys
+
+
+def Main(argv):
+  script_path = os.path.dirname(os.path.abspath(__file__))
+  cov = coverage.coverage(include=([os.path.join(script_path, '*.py')]))
+  cov.start()
+  import test_scripts
+  alltests = map(unittest.TestLoader().loadTestsFromTestCase, [
+    test_scripts.ToplevelTest,
+    test_scripts.ScriptTest,
+  ])
+  unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(alltests))
+  cov.stop()
+  print(cov.report())
+
+
+if __name__ == '__main__':
+  sys.exit(Main(sys.argv))
diff --git a/src/third_party/v8/tools/release/search_related_commits.py b/src/third_party/v8/tools/release/search_related_commits.py
new file mode 100755
index 0000000..e6e52d2
--- /dev/null
+++ b/src/third_party/v8/tools/release/search_related_commits.py
@@ -0,0 +1,221 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import operator
+import os
+import re
+from sets import Set
+from subprocess import Popen, PIPE
+import sys
+
+def search_all_related_commits(
+    git_working_dir, start_hash, until, separator, verbose=False):
+
+  all_commits_raw = _find_commits_inbetween(
+      start_hash, until, git_working_dir, verbose)
+  if verbose:
+    print("All commits between <of> and <until>: " + all_commits_raw)
+
+  # Adding start hash too
+  all_commits = [start_hash]
+  all_commits.extend(all_commits_raw.splitlines())
+  all_related_commits = {}
+  already_treated_commits = Set([])
+  for commit in all_commits:
+    if commit in already_treated_commits:
+      continue
+
+    related_commits = _search_related_commits(
+        git_working_dir, commit, until, separator, verbose)
+    if len(related_commits) > 0:
+      all_related_commits[commit] = related_commits
+      already_treated_commits.update(related_commits)
+
+    already_treated_commits.update(commit)
+
+  return all_related_commits
+
+def _search_related_commits(
+    git_working_dir, start_hash, until, separator, verbose=False):
+
+  if separator:
+    commits_between = _find_commits_inbetween(
+        start_hash, separator, git_working_dir, verbose)
+    if commits_between == "":
+      return []
+
+  # Extract commit position
+  original_message = git_execute(
+      git_working_dir,
+      ["show", "-s", "--format=%B", start_hash],
+      verbose)
+  title = original_message.splitlines()[0]
+
+  matches = re.search("(\{#)([0-9]*)(\})", original_message)
+
+  if not matches:
+    return []
+
+  commit_position = matches.group(2)
+  if verbose:
+    print("1.) Commit position to look for: " + commit_position)
+
+  search_range = start_hash + ".." + until
+
+  def git_args(grep_pattern):
+    return [
+      "log",
+      "--reverse",
+      "--grep=" + grep_pattern,
+      "--format=%H",
+      search_range,
+    ]
+
+  found_by_hash = git_execute(
+      git_working_dir, git_args(start_hash), verbose).strip()
+
+  if verbose:
+    print("2.) Found by hash: " + found_by_hash)
+
+  found_by_commit_pos = git_execute(
+      git_working_dir, git_args(commit_position), verbose).strip()
+
+  if verbose:
+    print("3.) Found by commit position: " + found_by_commit_pos)
+
+  # Replace brackets or else they are wrongly interpreted by --grep
+  title = title.replace("[", "\\[")
+  title = title.replace("]", "\\]")
+
+  found_by_title = git_execute(
+      git_working_dir, git_args(title), verbose).strip()
+
+  if verbose:
+    print("4.) Found by title: " + found_by_title)
+
+  hits = (
+      _convert_to_array(found_by_hash) +
+      _convert_to_array(found_by_commit_pos) +
+      _convert_to_array(found_by_title))
+  hits = _remove_duplicates(hits)
+
+  if separator:
+    for current_hit in hits:
+      commits_between = _find_commits_inbetween(
+          separator, current_hit, git_working_dir, verbose)
+      if commits_between != "":
+        return hits
+    return []
+
+  return hits
+
+def _find_commits_inbetween(start_hash, end_hash, git_working_dir, verbose):
+  commits_between = git_execute(
+        git_working_dir,
+        ["rev-list", "--reverse", start_hash + ".." + end_hash],
+        verbose)
+  return commits_between.strip()
+
+def _convert_to_array(string_of_hashes):
+  return string_of_hashes.splitlines()
+
+def _remove_duplicates(array):
+   no_duplicates = []
+   for current in array:
+    if not current in no_duplicates:
+      no_duplicates.append(current)
+   return no_duplicates
+
+def git_execute(working_dir, args, verbose=False):
+  command = ["git", "-C", working_dir] + args
+  if verbose:
+    print("Git working dir: " + working_dir)
+    print("Executing git command:" + str(command))
+  p = Popen(args=command, stdin=PIPE,
+            stdout=PIPE, stderr=PIPE)
+  output, err = p.communicate()
+  rc = p.returncode
+  if rc != 0:
+    raise Exception(err)
+  if verbose:
+    print("Git return value: " + output)
+  return output
+
+def _pretty_print_entry(hash, git_dir, pre_text, verbose):
+  text_to_print = git_execute(
+      git_dir,
+      ["show",
+       "--quiet",
+       "--date=iso",
+       hash,
+       "--format=%ad # %H # %s"],
+      verbose)
+  return pre_text + text_to_print.strip()
+
+def main(options):
+    all_related_commits = search_all_related_commits(
+        options.git_dir,
+        options.of[0],
+        options.until[0],
+        options.separator,
+        options.verbose)
+
+    sort_key = lambda x: (
+        git_execute(
+            options.git_dir,
+            ["show", "--quiet", "--date=iso", x, "--format=%ad"],
+            options.verbose)).strip()
+
+    high_level_commits = sorted(all_related_commits.keys(), key=sort_key)
+
+    for current_key in high_level_commits:
+      if options.prettyprint:
+        yield _pretty_print_entry(
+            current_key,
+            options.git_dir,
+            "+",
+            options.verbose)
+      else:
+        yield "+" + current_key
+
+      found_commits = all_related_commits[current_key]
+      for current_commit in found_commits:
+        if options.prettyprint:
+          yield _pretty_print_entry(
+              current_commit,
+              options.git_dir,
+              "| ",
+              options.verbose)
+        else:
+          yield "| " + current_commit
+
+if __name__ == "__main__":  # pragma: no cover
+  parser = argparse.ArgumentParser(
+      "This tool analyzes the commit range between <of> and <until>. "
+      "It finds commits which belong together e.g. Implement/Revert pairs and "
+      "Implement/Port/Revert triples. All supplied hashes need to be "
+      "from the same branch e.g. master.")
+  parser.add_argument("-g", "--git-dir", required=False, default=".",
+                        help="The path to your git working directory.")
+  parser.add_argument("--verbose", action="store_true",
+      help="Enables a very verbose output")
+  parser.add_argument("of", nargs=1,
+      help="Hash of the commit to be searched.")
+  parser.add_argument("until", nargs=1,
+      help="Commit when searching should stop")
+  parser.add_argument("--separator", required=False,
+      help="The script will only list related commits "
+            "which are separated by hash <--separator>.")
+  parser.add_argument("--prettyprint", action="store_true",
+      help="Pretty prints the output")
+
+  args = sys.argv[1:]
+  options = parser.parse_args(args)
+  for current_line in main(options):
+    print(current_line)
diff --git a/src/third_party/v8/tools/release/test_mergeinfo.py b/src/third_party/v8/tools/release/test_mergeinfo.py
new file mode 100755
index 0000000..f8619bb
--- /dev/null
+++ b/src/third_party/v8/tools/release/test_mergeinfo.py
@@ -0,0 +1,209 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import mergeinfo
+import shutil
+import unittest
+
+from collections import namedtuple
+from os import path
+from subprocess import Popen, PIPE, check_call
+
+TEST_CONFIG = {
+  "GIT_REPO": "/tmp/test-v8-search-related-commits",
+}
+
+class TestMergeInfo(unittest.TestCase):
+
+  base_dir = TEST_CONFIG["GIT_REPO"]
+
+  def _execute_git(self, git_args):
+
+    fullCommand = ["git", "-C", self.base_dir] + git_args
+    p = Popen(args=fullCommand, stdin=PIPE,
+        stdout=PIPE, stderr=PIPE)
+    output, err = p.communicate()
+    rc = p.returncode
+    if rc != 0:
+      raise Exception(err)
+    return output
+
+  def _update_origin(self):
+    # Fetch from origin to get/update the origin/master branch
+    self._execute_git(['fetch', 'origin'])
+
+  def setUp(self):
+    if path.exists(self.base_dir):
+      shutil.rmtree(self.base_dir)
+
+    check_call(["git", "init", self.base_dir])
+
+    # Add fake remote with name 'origin'
+    self._execute_git(['remote', 'add', 'origin', self.base_dir])
+
+    # Initial commit
+    message = '''Initial commit'''
+
+    self._make_empty_commit(message)
+
+  def tearDown(self):
+    if path.exists(self.base_dir):
+      shutil.rmtree(self.base_dir)
+
+  def _assert_correct_standard_result(
+      self, result, all_commits, hash_of_first_commit):
+    self.assertEqual(len(result), 1, "Master commit not found")
+    self.assertTrue(
+        result.get(hash_of_first_commit),
+        "Master commit is wrong")
+
+    self.assertEqual(
+        len(result[hash_of_first_commit]),
+        1,
+        "Child commit not found")
+    self.assertEqual(
+        all_commits[2],
+        result[hash_of_first_commit][0],
+        "Child commit wrong")
+
+  def _get_commits(self):
+    commits = self._execute_git(
+        ["log", "--format=%H", "--reverse"]).splitlines()
+    return commits
+
+  def _get_branches(self, hash):
+    return mergeinfo.get_branches_for_commit(self.base_dir, hash)
+
+  def _make_empty_commit(self, message):
+    self._execute_git(["commit", "--allow-empty", "-m", message])
+    self._update_origin()
+    return self._get_commits()[-1]
+
+  def testCanDescribeCommit(self):
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    result = mergeinfo.describe_commit(
+        self.base_dir,
+        hash_of_first_commit).splitlines()
+
+    self.assertEqual(
+        result[0],
+        'commit ' + hash_of_first_commit)
+
+  def testCanDescribeCommitSingleLine(self):
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    result = mergeinfo.describe_commit(
+        self.base_dir,
+        hash_of_first_commit, True).splitlines()
+
+    self.assertEqual(
+        str(result[0]),
+        str(hash_of_first_commit[0:7]) + ' Initial commit')
+
+  def testSearchFollowUpCommits(self):
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    message = 'Follow-up commit of '  + hash_of_first_commit
+    self._make_empty_commit(message)
+    self._make_empty_commit(message)
+    self._make_empty_commit(message)
+    commits = self._get_commits()
+    message = 'Not related commit'
+    self._make_empty_commit(message)
+
+    followups = mergeinfo.get_followup_commits(
+        self.base_dir,
+        hash_of_first_commit)
+    self.assertEqual(set(followups), set(commits[1:]))
+
+  def testSearchMerges(self):
+    self._execute_git(['branch', 'test'])
+    self._execute_git(['checkout', 'master'])
+    message = 'real initial commit'
+    self._make_empty_commit(message)
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    self._execute_git(['checkout', 'test'])
+    message = 'Not related commit'
+    self._make_empty_commit(message)
+
+    # This should be found
+    message = 'Merge '  + hash_of_first_commit
+    hash_of_hit = self._make_empty_commit(message)
+
+    # This should be ignored
+    message = 'Cr-Branched-From: '  + hash_of_first_commit
+    hash_of_ignored = self._make_empty_commit(message)
+
+    self._execute_git(['checkout', 'master'])
+
+    followups = mergeinfo.get_followup_commits(
+        self.base_dir,
+        hash_of_first_commit)
+
+    # Check if follow ups and merges are not overlapping
+    self.assertEqual(len(followups), 0)
+
+    message = 'Follow-up commit of '  + hash_of_first_commit
+    hash_of_followup = self._make_empty_commit(message)
+
+    merges = mergeinfo.get_merge_commits(self.base_dir, hash_of_first_commit)
+    # Check if follow up is ignored
+    self.assertTrue(hash_of_followup not in merges)
+
+    # Check for proper return of merges
+    self.assertTrue(hash_of_hit in merges)
+    self.assertTrue(hash_of_ignored not in merges)
+
+  def testIsLkgr(self):
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+    self._make_empty_commit('This one is the lkgr head')
+    self._execute_git(['branch', 'remotes/origin/lkgr'])
+    hash_of_not_lkgr = self._make_empty_commit('This one is not yet lkgr')
+
+    branches = self._get_branches(hash_of_first_commit);
+    self.assertTrue(mergeinfo.is_lkgr(branches))
+    branches = self._get_branches(hash_of_not_lkgr);
+    self.assertFalse(mergeinfo.is_lkgr(branches))
+
+  def testShowFirstCanary(self):
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    branches = self._get_branches(hash_of_first_commit);
+    self.assertEqual(mergeinfo.get_first_canary(branches), 'No Canary coverage')
+
+    self._execute_git(['branch', 'remotes/origin/chromium/2345'])
+    self._execute_git(['branch', 'remotes/origin/chromium/2346'])
+
+    branches = self._get_branches(hash_of_first_commit);
+    self.assertEqual(mergeinfo.get_first_canary(branches), '2345')
+
+  def testFirstV8Version(self):
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    self._execute_git(['branch', 'remotes/origin/chromium/2345'])
+    self._execute_git(['branch', 'remotes/origin/chromium/2346'])
+    branches = self._get_branches(hash_of_first_commit);
+    self.assertEqual(mergeinfo.get_first_v8_version(branches), '--')
+
+    self._execute_git(['branch', 'remotes/origin/5.7.1'])
+    self._execute_git(['branch', 'remotes/origin/5.8.1'])
+    branches = self._get_branches(hash_of_first_commit);
+    self.assertEqual(mergeinfo.get_first_v8_version(branches), '5.7.1')
+
+    self._execute_git(['branch', 'remotes/origin/5.6.1'])
+    branches = self._get_branches(hash_of_first_commit);
+    self.assertEqual(mergeinfo.get_first_v8_version(branches), '5.6.1')
+
+if __name__ == "__main__":
+   unittest.main()
diff --git a/src/third_party/v8/tools/release/test_scripts.py b/src/third_party/v8/tools/release/test_scripts.py
new file mode 100755
index 0000000..cf86efb
--- /dev/null
+++ b/src/third_party/v8/tools/release/test_scripts.py
@@ -0,0 +1,939 @@
+#!/usr/bin/env python
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import shutil
+import tempfile
+import traceback
+import unittest
+
+import auto_push
+from auto_push import LastReleaseBailout
+import auto_roll
+import common_includes
+from common_includes import *
+import create_release
+from create_release import *
+import merge_to_branch
+from merge_to_branch import MergeToBranch
+from auto_tag import AutoTag
+import roll_merge
+from roll_merge import RollMerge
+
+TEST_CONFIG = {
+  "DEFAULT_CWD": None,
+  "BRANCHNAME": "test-prepare-push",
+  "PERSISTFILE_BASENAME": "/tmp/test-create-releases-tempfile",
+  "PATCH_FILE": "/tmp/test-v8-create-releases-tempfile-tempfile-patch",
+  "COMMITMSG_FILE": "/tmp/test-v8-create-releases-tempfile-commitmsg",
+  "CHROMIUM": "/tmp/test-create-releases-tempfile-chromium",
+  "SETTINGS_LOCATION": None,
+  "ALREADY_MERGING_SENTINEL_FILE":
+      "/tmp/test-merge-to-branch-tempfile-already-merging",
+  "TEMPORARY_PATCH_FILE": "/tmp/test-merge-to-branch-tempfile-temporary-patch",
+}
+
+
+AUTO_PUSH_ARGS = [
+  "-a", "author@chromium.org",
+  "-r", "reviewer@chromium.org",
+]
+
+
+class ToplevelTest(unittest.TestCase):
+  def testSaniniziteVersionTags(self):
+    self.assertEquals("4.8.230", SanitizeVersionTag("4.8.230"))
+    self.assertEquals("4.8.230", SanitizeVersionTag("tags/4.8.230"))
+    self.assertEquals(None, SanitizeVersionTag("candidate"))
+
+  def testNormalizeVersionTags(self):
+    input = ["4.8.230",
+              "tags/4.8.230",
+              "tags/4.8.224.1",
+              "4.8.224.1",
+              "4.8.223.1",
+              "tags/4.8.223",
+              "tags/4.8.231",
+              "candidates"]
+    expected = ["4.8.230",
+                "4.8.230",
+                "4.8.224.1",
+                "4.8.224.1",
+                "4.8.223.1",
+                "4.8.223",
+                "4.8.231",
+                ]
+    self.assertEquals(expected, NormalizeVersionTags(input))
+
+
+def Cmd(*args, **kwargs):
+  """Convenience function returning a shell command test expectation."""
+  return {
+    "name": "command",
+    "args": args,
+    "ret": args[-1],
+    "cb": kwargs.get("cb"),
+    "cwd": kwargs.get("cwd", TEST_CONFIG["DEFAULT_CWD"]),
+  }
+
+
+def RL(text, cb=None):
+  """Convenience function returning a readline test expectation."""
+  return {
+    "name": "readline",
+    "args": [],
+    "ret": text,
+    "cb": cb,
+    "cwd": None,
+  }
+
+
+def URL(*args, **kwargs):
+  """Convenience function returning a readurl test expectation."""
+  return {
+    "name": "readurl",
+    "args": args[:-1],
+    "ret": args[-1],
+    "cb": kwargs.get("cb"),
+    "cwd": None,
+  }
+
+
+class SimpleMock(object):
+  def __init__(self):
+    self._recipe = []
+    self._index = -1
+
+  def Expect(self, recipe):
+    self._recipe = recipe
+
+  def Call(self, name, *args, **kwargs):  # pragma: no cover
+    self._index += 1
+
+    try:
+      expected_call = self._recipe[self._index]
+    except IndexError:
+      raise NoRetryException("Calling %s %s" % (name, " ".join(args)))
+
+    if not isinstance(expected_call, dict):
+      raise NoRetryException("Found wrong expectation type for %s %s" %
+                             (name, " ".join(args)))
+
+    if expected_call["name"] != name:
+      raise NoRetryException("Expected action: %s %s - Actual: %s" %
+          (expected_call["name"], expected_call["args"], name))
+
+    # Check if the given working directory matches the expected one.
+    if expected_call["cwd"] != kwargs.get("cwd"):
+      raise NoRetryException("Expected cwd: %s in %s %s - Actual: %s" %
+          (expected_call["cwd"],
+           expected_call["name"],
+           expected_call["args"],
+           kwargs.get("cwd")))
+
+    # The number of arguments in the expectation must match the actual
+    # arguments.
+    if len(args) > len(expected_call['args']):
+      raise NoRetryException("When calling %s with arguments, the "
+          "expectations must consist of at least as many arguments." %
+          name)
+
+    # Compare expected and actual arguments.
+    for (expected_arg, actual_arg) in zip(expected_call['args'], args):
+      if expected_arg != actual_arg:
+        raise NoRetryException("Expected: %s - Actual: %s" %
+                               (expected_arg, actual_arg))
+
+    # The expected call contains an optional callback for checking the context
+    # at the time of the call.
+    if expected_call['cb']:
+      try:
+        expected_call['cb']()
+      except:
+        tb = traceback.format_exc()
+        raise NoRetryException("Caught exception from callback: %s" % tb)
+
+    # If the return value is an exception, raise it instead of returning.
+    if isinstance(expected_call['ret'], Exception):
+      raise expected_call['ret']
+    return expected_call['ret']
+
+  def AssertFinished(self):  # pragma: no cover
+    if self._index < len(self._recipe) -1:
+      raise NoRetryException("Called mock too seldom: %d vs. %d" %
+                             (self._index, len(self._recipe)))
+
+
+class ScriptTest(unittest.TestCase):
+  def MakeEmptyTempFile(self):
+    handle, name = tempfile.mkstemp()
+    os.close(handle)
+    self._tmp_files.append(name)
+    return name
+
+  def MakeEmptyTempDirectory(self):
+    name = tempfile.mkdtemp()
+    self._tmp_files.append(name)
+    return name
+
+
+  def WriteFakeVersionFile(self, major=3, minor=22, build=4, patch=0):
+    version_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE)
+    if not os.path.exists(os.path.dirname(version_file)):
+      os.makedirs(os.path.dirname(version_file))
+    with open(version_file, "w") as f:
+      f.write("  // Some line...\n")
+      f.write("\n")
+      f.write("#define V8_MAJOR_VERSION    %s\n" % major)
+      f.write("#define V8_MINOR_VERSION    %s\n" % minor)
+      f.write("#define V8_BUILD_NUMBER     %s\n" % build)
+      f.write("#define V8_PATCH_LEVEL      %s\n" % patch)
+      f.write("  // Some line...\n")
+      f.write("#define V8_IS_CANDIDATE_VERSION 0\n")
+
+  def WriteFakeWatchlistsFile(self):
+    watchlists_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], WATCHLISTS_FILE)
+    if not os.path.exists(os.path.dirname(watchlists_file)):
+      os.makedirs(os.path.dirname(watchlists_file))
+    with open(watchlists_file, "w") as f:
+
+      content = """
+    'merges': [
+      # Only enabled on branches created with tools/release/create_release.py
+      # 'v8-merges@googlegroups.com',
+    ],
+"""
+      f.write(content)
+
+  def MakeStep(self):
+    """Convenience wrapper."""
+    options = ScriptsBase(TEST_CONFIG, self, self._state).MakeOptions([])
+    return MakeStep(step_class=Step, state=self._state,
+                    config=TEST_CONFIG, side_effect_handler=self,
+                    options=options)
+
+  def RunStep(self, script=CreateRelease, step_class=Step, args=None):
+    """Convenience wrapper."""
+    args = args if args is not None else ["-m", "-a=author", "-r=reviewer", ]
+    return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
+
+  def Call(self, fun, *args, **kwargs):
+    print("Calling %s with %s and %s" % (str(fun), str(args), str(kwargs)))
+
+  def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
+    print("%s %s" % (cmd, args))
+    print("in %s" % cwd)
+    return self._mock.Call("command", cmd + " " + args, cwd=cwd)
+
+  def ReadLine(self):
+    return self._mock.Call("readline")
+
+  def ReadURL(self, url, params):
+    if params is not None:
+      return self._mock.Call("readurl", url, params)
+    else:
+      return self._mock.Call("readurl", url)
+
+  def Sleep(self, seconds):
+    pass
+
+  def GetUTCStamp(self):
+    return "1000000"
+
+  def Expect(self, *args):
+    """Convenience wrapper."""
+    self._mock.Expect(*args)
+
+  def setUp(self):
+    self._mock = SimpleMock()
+    self._tmp_files = []
+    self._state = {}
+    TEST_CONFIG["DEFAULT_CWD"] = self.MakeEmptyTempDirectory()
+
+  def tearDown(self):
+    if os.path.exists(TEST_CONFIG["PERSISTFILE_BASENAME"]):
+      shutil.rmtree(TEST_CONFIG["PERSISTFILE_BASENAME"])
+
+    # Clean up temps. Doesn't work automatically.
+    for name in self._tmp_files:
+      if os.path.isfile(name):
+        os.remove(name)
+      if os.path.isdir(name):
+        shutil.rmtree(name)
+
+    self._mock.AssertFinished()
+
+  def testGitMock(self):
+    self.Expect([Cmd("git --version", "git version 1.2.3"),
+                 Cmd("git dummy", "")])
+    self.assertEquals("git version 1.2.3", self.MakeStep().Git("--version"))
+    self.assertEquals("", self.MakeStep().Git("dummy"))
+
+  def testCommonPrepareDefault(self):
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git checkout -f origin/master", ""),
+      Cmd("git fetch", ""),
+      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
+      RL("Y"),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
+    ])
+    self.MakeStep().CommonPrepare()
+    self.MakeStep().PrepareBranch()
+
+  def testCommonPrepareNoConfirm(self):
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git checkout -f origin/master", ""),
+      Cmd("git fetch", ""),
+      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
+      RL("n"),
+    ])
+    self.MakeStep().CommonPrepare()
+    self.assertRaises(Exception, self.MakeStep().PrepareBranch)
+
+  def testCommonPrepareDeleteBranchFailure(self):
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git checkout -f origin/master", ""),
+      Cmd("git fetch", ""),
+      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
+      RL("Y"),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], None),
+    ])
+    self.MakeStep().CommonPrepare()
+    self.assertRaises(Exception, self.MakeStep().PrepareBranch)
+
+  def testInitialEnvironmentChecks(self):
+    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+    os.environ["EDITOR"] = "vi"
+    self.Expect([
+      Cmd("which vi", "/usr/bin/vi"),
+    ])
+    self.MakeStep().InitialEnvironmentChecks(TEST_CONFIG["DEFAULT_CWD"])
+
+  def testTagTimeout(self):
+    self.Expect([
+      Cmd("git fetch", ""),
+      Cmd("git log -1 --format=%H --grep=\"Title\" origin/tag_name", ""),
+      Cmd("git fetch", ""),
+      Cmd("git log -1 --format=%H --grep=\"Title\" origin/tag_name", ""),
+      Cmd("git fetch", ""),
+      Cmd("git log -1 --format=%H --grep=\"Title\" origin/tag_name", ""),
+      Cmd("git fetch", ""),
+      Cmd("git log -1 --format=%H --grep=\"Title\" origin/tag_name", ""),
+    ])
+    args = ["--branch", "candidates", "ab12345"]
+    self._state["version"] = "tag_name"
+    self._state["commit_title"] = "Title"
+    self.assertRaises(Exception,
+        lambda: self.RunStep(RollMerge, TagRevision, args))
+
+  def testReadAndPersistVersion(self):
+    self.WriteFakeVersionFile(build=5)
+    step = self.MakeStep()
+    step.ReadAndPersistVersion()
+    self.assertEquals("3", step["major"])
+    self.assertEquals("22", step["minor"])
+    self.assertEquals("5", step["build"])
+    self.assertEquals("0", step["patch"])
+
+  def testRegex(self):
+    self.assertEqual("(issue 321)",
+                     re.sub(r"BUG=v8:(.*)$", r"(issue \1)", "BUG=v8:321"))
+    self.assertEqual("(Chromium issue 321)",
+                     re.sub(r"BUG=(.*)$", r"(Chromium issue \1)", "BUG=321"))
+
+    cl = "  too little\n\ttab\ttab\n         too much\n        trailing  "
+    cl = MSub(r"\t", r"        ", cl)
+    cl = MSub(r"^ {1,7}([^ ])", r"        \1", cl)
+    cl = MSub(r"^ {9,80}([^ ])", r"        \1", cl)
+    cl = MSub(r" +$", r"", cl)
+    self.assertEqual("        too little\n"
+                     "        tab        tab\n"
+                     "        too much\n"
+                     "        trailing", cl)
+
+    self.assertEqual("//\n#define V8_BUILD_NUMBER  3\n",
+                     MSub(r"(?<=#define V8_BUILD_NUMBER)(?P<space>\s+)\d*$",
+                          r"\g<space>3",
+                          "//\n#define V8_BUILD_NUMBER  321\n"))
+
+  TAGS = """
+4425.0
+0.0.0.0
+3.9.6
+3.22.4
+test_tag
+"""
+
+  # Version as tag: 3.22.4.0. Version on master: 3.22.6.
+  # Make sure that the latest version is 3.22.6.0.
+  def testIncrementVersion(self):
+    self.Expect([
+      Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+      Cmd("git tag", self.TAGS),
+      Cmd("git checkout -f origin/master -- include/v8-version.h",
+          "", cb=lambda: self.WriteFakeVersionFile(3, 22, 6)),
+    ])
+
+    self.RunStep(CreateRelease, IncrementVersion)
+
+    self.assertEquals("3", self._state["new_major"])
+    self.assertEquals("22", self._state["new_minor"])
+    self.assertEquals("7", self._state["new_build"])
+    self.assertEquals("0", self._state["new_patch"])
+
+  def testBootstrapper(self):
+    work_dir = self.MakeEmptyTempDirectory()
+    class FakeScript(ScriptsBase):
+      def _Steps(self):
+        return []
+
+    # Use the test configuration without the fake testing default work dir.
+    fake_config = dict(TEST_CONFIG)
+    del(fake_config["DEFAULT_CWD"])
+
+    self.Expect([
+      Cmd("fetch v8", "", cwd=work_dir),
+    ])
+    FakeScript(fake_config, self).Run(["--work-dir", work_dir])
+
+  def testCreateRelease(self):
+    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+
+    # The version file on master has build level 5.
+    self.WriteFakeVersionFile(build=5)
+
+    commit_msg = """Version 3.22.5
+
+TBR=reviewer@chromium.org"""
+
+    def CheckVersionCommit():
+      commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
+      self.assertEquals(commit_msg, commit)
+      version = FileToText(
+          os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
+      self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
+      self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
+      self.assertFalse(re.search(r"#define V8_BUILD_NUMBER\s+6", version))
+      self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+0", version))
+      self.assertTrue(
+          re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
+
+    expectations = [
+      Cmd("git fetch origin +refs/heads/*:refs/heads/*", ""),
+      Cmd("git checkout -f origin/master", "", cb=self.WriteFakeWatchlistsFile),
+      Cmd("git branch", ""),
+      Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+      Cmd("git tag", self.TAGS),
+      Cmd("git checkout -f origin/master -- include/v8-version.h",
+          "", cb=self.WriteFakeVersionFile),
+      Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+      Cmd("git log -1 --format=%s release_hash", "Version 3.22.4\n"),
+      Cmd("git log -1 --format=%H release_hash^", "abc3\n"),
+      Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
+      Cmd("git push origin push_hash:refs/heads/3.22.5", ""),
+      Cmd("git reset --hard origin/master", ""),
+      Cmd("git new-branch work-branch --upstream origin/3.22.5", ""),
+      Cmd("git checkout -f 3.22.4 -- include/v8-version.h", "",
+          cb=self.WriteFakeVersionFile),
+      Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
+          cb=CheckVersionCommit),
+      Cmd("git cl upload --send-mail "
+          "-f --bypass-hooks --no-autocc --message-file "
+          "\"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
+      Cmd("git cl land --bypass-hooks -f", ""),
+      Cmd("git fetch", ""),
+      Cmd("git log -1 --format=%H --grep="
+          "\"Version 3.22.5\" origin/3.22.5", "hsh_to_tag"),
+      Cmd("git tag 3.22.5 hsh_to_tag", ""),
+      Cmd("git push origin refs/tags/3.22.5:refs/tags/3.22.5", ""),
+      Cmd("git checkout -f origin/master", ""),
+      Cmd("git branch", "* master\n  work-branch\n"),
+      Cmd("git branch -D work-branch", ""),
+      Cmd("git gc", ""),
+    ]
+    self.Expect(expectations)
+
+    args = ["-a", "author@chromium.org",
+            "-r", "reviewer@chromium.org",
+            "--revision", "push_hash"]
+    CreateRelease(TEST_CONFIG, self).Run(args)
+
+    # Note: The version file is on build number 5 again in the end of this test
+    # since the git command that merges to master is mocked out.
+
+    # Check for correct content of the WATCHLISTS file
+
+    watchlists_content = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"],
+                                          WATCHLISTS_FILE))
+    expected_watchlists_content = """
+    'merges': [
+      # Only enabled on branches created with tools/release/create_release.py
+      'v8-merges@googlegroups.com',
+    ],
+"""
+    self.assertEqual(watchlists_content, expected_watchlists_content)
+
+  C_V8_22624_LOG = """V8 CL.
+
+git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22624 123
+
+"""
+
+  C_V8_123455_LOG = """V8 CL.
+
+git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123455 123
+
+"""
+
+  C_V8_123456_LOG = """V8 CL.
+
+git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123456 123
+
+"""
+
+  ROLL_COMMIT_MSG = """Update V8 to version 3.22.4.
+
+Summary of changes available at:
+https://chromium.googlesource.com/v8/v8/+log/last_rol..roll_hsh
+
+Please follow these instructions for assigning/CC'ing issues:
+https://v8.dev/docs/triage-issues
+
+Please close rolling in case of a roll revert:
+https://v8-roll.appspot.com/
+This only works with a Google account.
+
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux-blink-rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:mac_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:win_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:android_optional_gpu_tests_rel
+
+TBR=reviewer@chromium.org"""
+
+  # Snippet from the original DEPS file.
+  FAKE_DEPS = """
+vars = {
+  "v8_revision": "last_roll_hsh",
+}
+deps = {
+  "src/v8":
+    (Var("googlecode_url") % "v8") + "/" + Var("v8_branch") + "@" +
+    Var("v8_revision"),
+}
+"""
+
+  def testChromiumRollUpToDate(self):
+    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+    json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json")
+    TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
+    chrome_dir = TEST_CONFIG["CHROMIUM"]
+    self.Expect([
+      Cmd("git fetch origin", ""),
+      Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+      Cmd("gclient getdep -r src/v8", "last_roll_hsh", cwd=chrome_dir),
+      Cmd("git describe --tags last_roll_hsh", "3.22.4"),
+      Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+      Cmd("git rev-list --max-age=395200 --tags",
+          "bad_tag\nroll_hsh\nhash_123"),
+      Cmd("git describe --tags bad_tag", ""),
+      Cmd("git describe --tags roll_hsh", "3.22.4"),
+      Cmd("git describe --tags hash_123", "3.22.3"),
+      Cmd("git describe --tags roll_hsh", "3.22.4"),
+      Cmd("git describe --tags hash_123", "3.22.3"),
+    ])
+
+    result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
+        AUTO_PUSH_ARGS + [
+          "-c", TEST_CONFIG["CHROMIUM"],
+          "--json-output", json_output_file])
+    self.assertEquals(0, result)
+    json_output = json.loads(FileToText(json_output_file))
+    self.assertEquals("up_to_date", json_output["monitoring_state"])
+
+
+  def testChromiumRoll(self):
+    # Setup fake directory structures.
+    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+    json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json")
+    TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
+    TextToFile("", os.path.join(TEST_CONFIG["CHROMIUM"], ".git"))
+    chrome_dir = TEST_CONFIG["CHROMIUM"]
+    os.makedirs(os.path.join(chrome_dir, "v8"))
+
+    def WriteDeps():
+      TextToFile("Some line\n   \"v8_revision\": \"22624\",\n  some line",
+                 os.path.join(chrome_dir, "DEPS"))
+
+    expectations = [
+      Cmd("git fetch origin", ""),
+      Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+      Cmd("gclient getdep -r src/v8", "last_roll_hsh", cwd=chrome_dir),
+      Cmd("git describe --tags last_roll_hsh", "3.22.3.1"),
+      Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+      Cmd("git rev-list --max-age=395200 --tags",
+          "bad_tag\nroll_hsh\nhash_123"),
+      Cmd("git describe --tags bad_tag", ""),
+      Cmd("git describe --tags roll_hsh", "3.22.4"),
+      Cmd("git describe --tags hash_123", "3.22.3"),
+      Cmd("git describe --tags roll_hsh", "3.22.4"),
+      Cmd("git log -1 --format=%s roll_hsh", "Version 3.22.4\n"),
+      Cmd("git describe --tags roll_hsh", "3.22.4"),
+      Cmd("git describe --tags last_roll_hsh", "3.22.2.1"),
+      Cmd("git status -s -uno", "", cwd=chrome_dir),
+      Cmd("git checkout -f master", "", cwd=chrome_dir),
+      Cmd("git branch", "", cwd=chrome_dir),
+      Cmd("git pull", "", cwd=chrome_dir),
+      Cmd("git fetch origin", ""),
+      Cmd("git new-branch work-branch", "", cwd=chrome_dir),
+      Cmd("gclient setdep -r src/v8@roll_hsh", "", cb=WriteDeps,
+          cwd=chrome_dir),
+      Cmd(("git commit -am \"%s\" "
+           "--author \"author@chromium.org <author@chromium.org>\"" %
+           self.ROLL_COMMIT_MSG),
+          "", cwd=chrome_dir),
+      Cmd("git cl upload --send-mail -f "
+          "--cq-dry-run --bypass-hooks", "",
+          cwd=chrome_dir),
+      Cmd("git checkout -f master", "", cwd=chrome_dir),
+      Cmd("git branch -D work-branch", "", cwd=chrome_dir),
+    ]
+    self.Expect(expectations)
+
+    args = ["-a", "author@chromium.org", "-c", chrome_dir,
+            "-r", "reviewer@chromium.org", "--json-output", json_output_file]
+    auto_roll.AutoRoll(TEST_CONFIG, self).Run(args)
+
+    deps = FileToText(os.path.join(chrome_dir, "DEPS"))
+    self.assertTrue(re.search("\"v8_revision\": \"22624\"", deps))
+
+    json_output = json.loads(FileToText(json_output_file))
+    self.assertEquals("success", json_output["monitoring_state"])
+
+  def testCheckLastPushRecently(self):
+    self.Expect([
+      Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+      Cmd("git tag", self.TAGS),
+      Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+      Cmd("git log -1 --format=%s release_hash",
+          "Version 3.22.4 (based on abc3)\n"),
+      Cmd("git log --format=%H abc3..abc123", "\n"),
+    ])
+
+    self._state["candidate"] = "abc123"
+    self.assertEquals(0, self.RunStep(
+        auto_push.AutoPush, LastReleaseBailout, AUTO_PUSH_ARGS))
+
+  def testAutoPush(self):
+    self.Expect([
+      Cmd("git fetch", ""),
+      Cmd("git fetch origin +refs/heads/lkgr:refs/heads/lkgr", ""),
+      Cmd("git show-ref -s refs/heads/lkgr", "abc123\n"),
+      Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
+      Cmd("git tag", self.TAGS),
+      Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
+      Cmd("git log -1 --format=%s release_hash",
+          "Version 3.22.4 (based on abc3)\n"),
+      Cmd("git log --format=%H abc3..abc123", "some_stuff\n"),
+    ])
+
+    auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
+
+    state = json.loads(FileToText("%s-state.json"
+                                  % TEST_CONFIG["PERSISTFILE_BASENAME"]))
+
+    self.assertEquals("abc123", state["candidate"])
+
+  def testRollMerge(self):
+    TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
+    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+    self.WriteFakeVersionFile(build=5)
+    os.environ["EDITOR"] = "vi"
+    extra_patch = self.MakeEmptyTempFile()
+
+    def VerifyPatch(patch):
+      return lambda: self.assertEquals(patch,
+          FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"]))
+
+    msg = """Version 3.22.5.1 (cherry-pick)
+
+Merged ab12345
+Merged ab23456
+Merged ab34567
+Merged ab45678
+Merged ab56789
+
+Title4
+
+Title2
+
+Title3
+
+Title1
+
+Revert "Something"
+
+BUG=123,234,345,456,567,v8:123
+"""
+
+    def VerifyLand():
+      commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
+      self.assertEquals(msg, commit)
+      version = FileToText(
+          os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
+      self.assertTrue(re.search(r"#define V8_MINOR_VERSION\s+22", version))
+      self.assertTrue(re.search(r"#define V8_BUILD_NUMBER\s+5", version))
+      self.assertTrue(re.search(r"#define V8_PATCH_LEVEL\s+1", version))
+      self.assertTrue(
+          re.search(r"#define V8_IS_CANDIDATE_VERSION\s+0", version))
+
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git checkout -f origin/master", ""),
+      Cmd("git fetch", ""),
+      Cmd("git branch", "  branch1\n* branch2\n"),
+      Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
+          TEST_CONFIG["BRANCHNAME"], ""),
+      Cmd(("git log --format=%H --grep=\"Port ab12345\" "
+           "--reverse origin/master"),
+          "ab45678\nab23456"),
+      Cmd("git log -1 --format=%s ab45678", "Title1"),
+      Cmd("git log -1 --format=%s ab23456", "Title2"),
+      Cmd(("git log --format=%H --grep=\"Port ab23456\" "
+           "--reverse origin/master"),
+          ""),
+      Cmd(("git log --format=%H --grep=\"Port ab34567\" "
+           "--reverse origin/master"),
+          "ab56789"),
+      Cmd("git log -1 --format=%s ab56789", "Title3"),
+      RL("Y"),  # Automatically add corresponding ports (ab34567, ab56789)?
+      # Simulate git being down which stops the script.
+      Cmd("git log -1 --format=%s ab12345", None),
+      # Restart script in the failing step.
+      Cmd("git log -1 --format=%s ab12345", "Title4"),
+      Cmd("git log -1 --format=%s ab23456", "Title2"),
+      Cmd("git log -1 --format=%s ab34567", "Title3"),
+      Cmd("git log -1 --format=%s ab45678", "Title1"),
+      Cmd("git log -1 --format=%s ab56789", "Revert \"Something\""),
+      Cmd("git log -1 ab12345", "Title4\nBUG=123\nBUG=234"),
+      Cmd("git log -1 ab23456", "Title2\n BUG = v8:123,345"),
+      Cmd("git log -1 ab34567", "Title3\nBUG=567, 456"),
+      Cmd("git log -1 ab45678", "Title1\nBUG="),
+      Cmd("git log -1 ab56789", "Revert \"Something\"\nBUG=none"),
+      Cmd("git log -1 -p ab12345", "patch4"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+          "", cb=VerifyPatch("patch4")),
+      Cmd("git log -1 -p ab23456", "patch2"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+          "", cb=VerifyPatch("patch2")),
+      Cmd("git log -1 -p ab34567", "patch3"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+          "", cb=VerifyPatch("patch3")),
+      Cmd("git log -1 -p ab45678", "patch1"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+          "", cb=VerifyPatch("patch1")),
+      Cmd("git log -1 -p ab56789", "patch5\n"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+          "", cb=VerifyPatch("patch5\n")),
+      Cmd("git apply --index --reject \"%s\"" % extra_patch, ""),
+      RL("Y"),  # Automatically increment patch level?
+      Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
+      RL("reviewer@chromium.org"),  # V8 reviewer.
+      Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
+          "--bypass-hooks --cc \"ulan@chromium.org\"", ""),
+      Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
+      RL("LGTM"),  # Enter LGTM for V8 CL.
+      Cmd("git cl presubmit", "Presubmit successfull\n"),
+      Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
+          cb=VerifyLand),
+      Cmd("git fetch", ""),
+      Cmd("git log -1 --format=%H --grep=\""
+          "Version 3.22.5.1 (cherry-pick)"
+          "\" refs/remotes/origin/candidates",
+          ""),
+      Cmd("git fetch", ""),
+      Cmd("git log -1 --format=%H --grep=\""
+          "Version 3.22.5.1 (cherry-pick)"
+          "\" refs/remotes/origin/candidates",
+          "hsh_to_tag"),
+      Cmd("git tag 3.22.5.1 hsh_to_tag", ""),
+      Cmd("git push origin refs/tags/3.22.5.1:refs/tags/3.22.5.1", ""),
+      Cmd("git checkout -f origin/master", ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
+    ])
+
+    # ab12345 and ab34567 are patches. ab23456 (included) and ab45678 are the
+    # MIPS ports of ab12345. ab56789 is the MIPS port of ab34567.
+    args = ["-f", "-p", extra_patch, "--branch", "candidates",
+            "ab12345", "ab23456", "ab34567"]
+
+    # The first run of the script stops because of git being down.
+    self.assertRaises(GitFailedException,
+        lambda: RollMerge(TEST_CONFIG, self).Run(args))
+
+    # Test that state recovery after restarting the script works.
+    args += ["-s", "4"]
+    RollMerge(TEST_CONFIG, self).Run(args)
+
+  def testMergeToBranch(self):
+    TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
+    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+    self.WriteFakeVersionFile(build=5)
+    os.environ["EDITOR"] = "vi"
+    extra_patch = self.MakeEmptyTempFile()
+
+
+    def VerifyPatch(patch):
+      return lambda: self.assertEquals(patch,
+          FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"]))
+
+    info_msg = ("NOTE: This script will no longer automatically "
+     "update include/v8-version.h "
+     "and create a tag. This is done automatically by the autotag bot. "
+     "Please call the merge_to_branch.py with --help for more information.")
+
+    msg = """Merged: Squashed multiple commits.
+
+Merged: Title4
+Revision: ab12345
+
+Merged: Title2
+Revision: ab23456
+
+Merged: Title3
+Revision: ab34567
+
+Merged: Title1
+Revision: ab45678
+
+Merged: Revert \"Something\"
+Revision: ab56789
+
+BUG=123,234,345,456,567,v8:123
+NOTRY=true
+NOPRESUBMIT=true
+NOTREECHECKS=true
+"""
+
+    def VerifyLand():
+      commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
+      self.assertEquals(msg, commit)
+
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git checkout -f origin/master", ""),
+      Cmd("git fetch", ""),
+      Cmd("git branch", "  branch1\n* branch2\n"),
+      Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
+          TEST_CONFIG["BRANCHNAME"], ""),
+      Cmd(("git log --format=%H --grep=\"^[Pp]ort ab12345\" "
+           "--reverse origin/master"),
+          "ab45678\nab23456"),
+      Cmd("git log -1 --format=%s ab45678", "Title1"),
+      Cmd("git log -1 --format=%s ab23456", "Title2"),
+      Cmd(("git log --format=%H --grep=\"^[Pp]ort ab23456\" "
+           "--reverse origin/master"),
+          ""),
+      Cmd(("git log --format=%H --grep=\"^[Pp]ort ab34567\" "
+           "--reverse origin/master"),
+          "ab56789"),
+      Cmd("git log -1 --format=%s ab56789", "Title3"),
+      RL("Y"),  # Automatically add corresponding ports (ab34567, ab56789)?
+      # Simulate git being down which stops the script.
+      Cmd("git log -1 --format=%s ab12345", None),
+      # Restart script in the failing step.
+      Cmd("git log -1 --format=%s ab12345", "Title4"),
+      Cmd("git log -1 --format=%s ab23456", "Title2"),
+      Cmd("git log -1 --format=%s ab34567", "Title3"),
+      Cmd("git log -1 --format=%s ab45678", "Title1"),
+      Cmd("git log -1 --format=%s ab56789", "Revert \"Something\""),
+      Cmd("git log -1 ab12345", "Title4\nBUG=123\nBUG=234"),
+      Cmd("git log -1 ab23456", "Title2\n BUG = v8:123,345"),
+      Cmd("git log -1 ab34567", "Title3\nBug: 567, 456,345"),
+      Cmd("git log -1 ab45678", "Title1\nBug:"),
+      Cmd("git log -1 ab56789", "Revert \"Something\"\nBUG=none"),
+      Cmd("git log -1 -p ab12345", "patch4"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+          "", cb=VerifyPatch("patch4")),
+      Cmd("git log -1 -p ab23456", "patch2"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+          "", cb=VerifyPatch("patch2")),
+      Cmd("git log -1 -p ab34567", "patch3"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+          "", cb=VerifyPatch("patch3")),
+      Cmd("git log -1 -p ab45678", "patch1"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+          "", cb=VerifyPatch("patch1")),
+      Cmd("git log -1 -p ab56789", "patch5\n"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
+          "", cb=VerifyPatch("patch5\n")),
+      Cmd("git apply --index --reject \"%s\"" % extra_patch, ""),
+      Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
+      RL("reviewer@chromium.org"),  # V8 reviewer.
+      Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
+          "--bypass-hooks --cc \"ulan@chromium.org\"", ""),
+      Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
+      RL("LGTM"),  # Enter LGTM for V8 CL.
+      Cmd("git cl presubmit", "Presubmit successfull\n"),
+      Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
+          cb=VerifyLand),
+      Cmd("git checkout -f origin/master", ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
+    ])
+
+    # ab12345 and ab34567 are patches. ab23456 (included) and ab45678 are the
+    # MIPS ports of ab12345. ab56789 is the MIPS port of ab34567.
+    args = ["-f", "-p", extra_patch, "--branch", "candidates",
+            "ab12345", "ab23456", "ab34567"]
+
+    # The first run of the script stops because of git being down.
+    self.assertRaises(GitFailedException,
+        lambda: MergeToBranch(TEST_CONFIG, self).Run(args))
+
+    # Test that state recovery after restarting the script works.
+    args += ["-s", "4"]
+    MergeToBranch(TEST_CONFIG, self).Run(args)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/third_party/v8/tools/release/test_search_related_commits.py b/src/third_party/v8/tools/release/test_search_related_commits.py
new file mode 100755
index 0000000..cf61236
--- /dev/null
+++ b/src/third_party/v8/tools/release/test_search_related_commits.py
@@ -0,0 +1,274 @@
+#!/usr/bin/env python
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import namedtuple
+from os import path
+import search_related_commits
+import shutil
+from subprocess import Popen, PIPE, check_call
+import unittest
+
+
+TEST_CONFIG = {
+  "GIT_REPO": "/tmp/test-v8-search-related-commits",
+}
+
+class TestSearchRelatedCommits(unittest.TestCase):
+
+  base_dir = TEST_CONFIG["GIT_REPO"]
+
+  def _execute_git(self, git_args):
+
+    fullCommand = ["git", "-C", self.base_dir] + git_args
+    p = Popen(args=fullCommand, stdin=PIPE,
+        stdout=PIPE, stderr=PIPE)
+    output, err = p.communicate()
+    rc = p.returncode
+    if rc != 0:
+      raise Exception(err)
+    return output
+
+  def setUp(self):
+    if path.exists(self.base_dir):
+      shutil.rmtree(self.base_dir)
+
+    check_call(["git", "init", self.base_dir])
+
+    # Initial commit
+    message = """[turbofan] Sanitize language mode for javascript operators.
+
+    R=mstarzinger@chromium.org
+
+    Review URL: https://codereview.chromium.org/1084243005
+
+    Cr-Commit-Position: refs/heads/master@{#28059}"""
+    self._make_empty_commit(message)
+
+    message = """[crankshaft] Do some stuff
+
+    R=hablich@chromium.org
+
+    Review URL: https://codereview.chromium.org/1084243007
+
+    Cr-Commit-Position: refs/heads/master@{#28030}"""
+
+    self._make_empty_commit(message)
+
+  def tearDown(self):
+    if path.exists(self.base_dir):
+      shutil.rmtree(self.base_dir)
+
+  def _assert_correct_standard_result(
+      self, result, all_commits, hash_of_first_commit):
+    self.assertEqual(len(result), 1, "Master commit not found")
+    self.assertTrue(
+        result.get(hash_of_first_commit),
+        "Master commit is wrong")
+
+    self.assertEqual(
+        len(result[hash_of_first_commit]),
+        1,
+        "Child commit not found")
+    self.assertEqual(
+        all_commits[2],
+        result[hash_of_first_commit][0],
+        "Child commit wrong")
+
+  def _get_commits(self):
+    commits = self._execute_git(
+        ["log", "--format=%H", "--reverse"]).splitlines()
+    return commits
+
+  def _make_empty_commit(self, message):
+    self._execute_git(["commit", "--allow-empty", "-m", message])
+
+  def testSearchByCommitPosition(self):
+    message = """Revert of some stuff.
+    > Cr-Commit-Position: refs/heads/master@{#28059}
+    R=mstarzinger@chromium.org
+
+    Review URL: https://codereview.chromium.org/1084243005
+
+    Cr-Commit-Position: refs/heads/master@{#28088}"""
+
+    self._make_empty_commit(message)
+
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    result = search_related_commits.search_all_related_commits(
+        self.base_dir, hash_of_first_commit, "HEAD", None)
+
+    self._assert_correct_standard_result(result, commits, hash_of_first_commit)
+
+  def testSearchByTitle(self):
+    message = """Revert of some stuff.
+    > [turbofan] Sanitize language mode for javascript operators.
+    > Cr-Commit-Position: refs/heads/master@{#289}
+    R=mstarzinger@chromium.org
+
+    Review URL: https://codereview.chromium.org/1084243005
+
+    Cr-Commit-Position: refs/heads/master@{#28088}"""
+
+    self._make_empty_commit(message)
+
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    result = search_related_commits.search_all_related_commits(
+        self.base_dir, hash_of_first_commit, "HEAD", None)
+
+    self._assert_correct_standard_result(result, commits, hash_of_first_commit)
+
+  def testSearchByHash(self):
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    message = """Revert of some stuff.
+    > [turbofan] Sanitize language mode for javascript operators.
+    > Reverting """ + hash_of_first_commit + """
+    > R=mstarzinger@chromium.org
+
+    Review URL: https://codereview.chromium.org/1084243005
+
+    Cr-Commit-Position: refs/heads/master@{#28088}"""
+
+    self._make_empty_commit(message)
+
+    #Fetch again for an update
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    result = search_related_commits.search_all_related_commits(
+        self.base_dir,
+        hash_of_first_commit,
+        "HEAD",
+        None)
+
+    self._assert_correct_standard_result(result, commits, hash_of_first_commit)
+
+  def testConsiderSeparator(self):
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+
+    # Related commits happen before separator so it is not a hit
+    message = """Revert of some stuff: Not a hit
+    > [turbofan] Sanitize language mode for javascript operators.
+    > Reverting """ + hash_of_first_commit + """
+    > R=mstarzinger@chromium.org
+
+    Review URL: https://codereview.chromium.org/1084243005
+
+    Cr-Commit-Position: refs/heads/master@{#28088}"""
+    self._make_empty_commit(message)
+
+    # Related commits happen before and after separator so it is a hit
+    commit_pos_of_master = "27088"
+    message = """Implement awesome feature: Master commit
+
+    Review URL: https://codereview.chromium.org/1084243235
+
+    Cr-Commit-Position: refs/heads/master@{#""" + commit_pos_of_master + "}"
+    self._make_empty_commit(message)
+
+    # Separator commit
+    message = """Commit which is the origin of the branch
+
+    Review URL: https://codereview.chromium.org/1084243456
+
+    Cr-Commit-Position: refs/heads/master@{#28173}"""
+    self._make_empty_commit(message)
+
+    # Filler commit
+    message = "Some unrelated commit: Not a hit"
+    self._make_empty_commit(message)
+
+    # Related commit after separator: a hit
+    message = "Patch r" + commit_pos_of_master +""" done
+
+    Review URL: https://codereview.chromium.org/1084243235
+
+    Cr-Commit-Position: refs/heads/master@{#29567}"""
+    self._make_empty_commit(message)
+
+    #Fetch again for an update
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+    hash_of_hit = commits[3]
+    hash_of_separator = commits[4]
+    hash_of_child_hit = commits[6]
+
+    result = search_related_commits.search_all_related_commits(
+        self.base_dir,
+        hash_of_first_commit,
+        "HEAD",
+        hash_of_separator)
+
+    self.assertTrue(result.get(hash_of_hit), "Hit not found")
+    self.assertEqual(len(result), 1, "More than one hit found")
+    self.assertEqual(
+        len(result.get(hash_of_hit)),
+        1,
+        "More than one child hit found")
+    self.assertEqual(
+        result.get(hash_of_hit)[0],
+        hash_of_child_hit,
+        "Wrong commit found")
+
+  def testPrettyPrint(self):
+    message = """Revert of some stuff.
+    > [turbofan] Sanitize language mode for javascript operators.
+    > Cr-Commit-Position: refs/heads/master@{#289}
+    R=mstarzinger@chromium.org
+
+    Review URL: https://codereview.chromium.org/1084243005
+
+    Cr-Commit-Position: refs/heads/master@{#28088}"""
+
+    self._make_empty_commit(message)
+
+    commits = self._get_commits()
+    hash_of_first_commit = commits[0]
+    OptionsStruct = namedtuple(
+        "OptionsStruct",
+        "git_dir of until all prettyprint separator verbose")
+    options = OptionsStruct(
+        git_dir= self.base_dir,
+        of= [hash_of_first_commit],
+        until= [commits[2]],
+        all= True,
+        prettyprint= True,
+        separator = None,
+        verbose=False)
+    output = []
+    for current_line in search_related_commits.main(options):
+      output.append(current_line)
+
+    self.assertIs(len(output), 2, "Not exactly two entries written")
+    self.assertTrue(output[0].startswith("+"), "Master entry not marked with +")
+    self.assertTrue(output[1].startswith("| "), "Child entry not marked with |")
+
+  def testNothingFound(self):
+    commits = self._get_commits()
+
+    self._execute_git(["commit", "--allow-empty", "-m", "A"])
+    self._execute_git(["commit", "--allow-empty", "-m", "B"])
+    self._execute_git(["commit", "--allow-empty", "-m", "C"])
+    self._execute_git(["commit", "--allow-empty", "-m", "D"])
+
+    hash_of_first_commit = commits[0]
+    result = search_related_commits.search_all_related_commits(
+        self.base_dir,
+        hash_of_first_commit,
+        "HEAD",
+        None)
+
+    self.assertEqual(len(result), 0, "Results found where none should be.")
+
+
+if __name__ == "__main__":
+  #import sys;sys.argv = ['', 'Test.testName']
+   unittest.main()
diff --git a/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/baz/gtest_new b/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/baz/gtest_new
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/baz/gtest_new
diff --git a/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_new b/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_new
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_new
diff --git a/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h b/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
new file mode 100644
index 0000000..847c8bc
--- /dev/null
+++ b/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
@@ -0,0 +1 @@
+gtest_prod
diff --git a/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/new/gtest_new b/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/new/gtest_new
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/third_party/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/new/gtest_new
diff --git a/src/third_party/v8/tools/run-clang-tidy.py b/src/third_party/v8/tools/run-clang-tidy.py
new file mode 100755
index 0000000..aee1b40
--- /dev/null
+++ b/src/third_party/v8/tools/run-clang-tidy.py
@@ -0,0 +1,424 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import json
+import multiprocessing
+import optparse
+import os
+import re
+import subprocess
+import sys
+
+CLANG_TIDY_WARNING = re.compile(r'(\/.*?)\ .*\[(.*)\]$')
+CLANG_TIDY_CMDLINE_OUT = re.compile(r'^clang-tidy.*\ .*|^\./\.\*')
+FILE_REGEXS = ['../src/*', '../test/*']
+HEADER_REGEX = ['\.\.\/src\/.*|\.\.\/include\/.*|\.\.\/test\/.*']
+
+THREADS = multiprocessing.cpu_count()
+
+
+class ClangTidyWarning(object):
+  """
+  Wraps up a clang-tidy warning to present aggregated information.
+  """
+
+  def __init__(self, warning_type):
+    self.warning_type = warning_type
+    self.occurrences = set()
+
+  def add_occurrence(self, file_path):
+    self.occurrences.add(file_path.lstrip())
+
+  def __hash__(self):
+    return hash(self.warning_type)
+
+  def to_string(self, file_loc):
+    s = '[%s] #%d\n' % (self.warning_type, len(self.occurrences))
+    if file_loc:
+      s += ' ' + '\n  '.join(self.occurrences)
+      s += '\n'
+    return s
+
+  def __str__(self):
+    return self.to_string(False)
+
+  def __lt__(self, other):
+    return len(self.occurrences) < len(other.occurrences)
+
+
+def GenerateCompileCommands(build_folder):
+  """
+  Generate a compilation database.
+
+  Currently clang-tidy-4 does not understand all flags that are passed
+  by the build system, therefore, we remove them from the generated file.
+  """
+  ninja_ps = subprocess.Popen(
+    ['ninja', '-t', 'compdb', 'cxx', 'cc'],
+    stdout=subprocess.PIPE,
+    cwd=build_folder)
+
+  out_filepath = os.path.join(build_folder, 'compile_commands.json')
+  with open(out_filepath, 'w') as cc_file:
+    while True:
+        line = ninja_ps.stdout.readline()
+
+        if line == '':
+            break
+
+        line = line.replace('-fcomplete-member-pointers', '')
+        line = line.replace('-Wno-enum-compare-switch', '')
+        line = line.replace('-Wno-ignored-pragma-optimize', '')
+        line = line.replace('-Wno-null-pointer-arithmetic', '')
+        line = line.replace('-Wno-unused-lambda-capture', '')
+        line = line.replace('-Wno-defaulted-function-deleted', '')
+        cc_file.write(line)
+
+
+def skip_line(line):
+  """
+  Check if a clang-tidy output line should be skipped.
+  """
+  return bool(CLANG_TIDY_CMDLINE_OUT.search(line))
+
+
+def ClangTidyRunFull(build_folder, skip_output_filter, checks, auto_fix):
+  """
+  Run clang-tidy on the full codebase and print warnings.
+  """
+  extra_args = []
+  if auto_fix:
+    extra_args.append('-fix')
+
+  if checks is not None:
+    extra_args.append('-checks')
+    extra_args.append('-*, ' + checks)
+
+  with open(os.devnull, 'w') as DEVNULL:
+    ct_process = subprocess.Popen(
+      ['run-clang-tidy', '-j' + str(THREADS), '-p', '.']
+       + ['-header-filter'] + HEADER_REGEX + extra_args
+       + FILE_REGEXS,
+      cwd=build_folder,
+      stdout=subprocess.PIPE,
+      stderr=DEVNULL)
+  removing_check_header = False
+  empty_lines = 0
+
+  while True:
+    line = ct_process.stdout.readline()
+    if line == '':
+      break
+
+    # Skip all lines after Enbale checks and before two newlines,
+    # i.e., skip clang-tidy check list.
+    if line.startswith('Enabled checks'):
+      removing_check_header = True
+    if removing_check_header and not skip_output_filter:
+      if line == '\n':
+        empty_lines += 1
+      if empty_lines == 2:
+        removing_check_header = False
+      continue
+
+    # Different lines get removed to ease output reading.
+    if not skip_output_filter and skip_line(line):
+      continue
+
+    # Print line, because no filter was matched.
+    if line != '\n':
+        sys.stdout.write(line)
+
+
+def ClangTidyRunAggregate(build_folder, print_files):
+  """
+  Run clang-tidy on the full codebase and aggregate warnings into categories.
+  """
+  with open(os.devnull, 'w') as DEVNULL:
+    ct_process = subprocess.Popen(
+      ['run-clang-tidy', '-j' + str(THREADS), '-p', '.'] +
+        ['-header-filter'] + HEADER_REGEX +
+        FILE_REGEXS,
+      cwd=build_folder,
+      stdout=subprocess.PIPE,
+      stderr=DEVNULL)
+  warnings = dict()
+  while True:
+    line = ct_process.stdout.readline()
+    if line == '':
+      break
+
+    res = CLANG_TIDY_WARNING.search(line)
+    if res is not None:
+      warnings.setdefault(
+          res.group(2),
+          ClangTidyWarning(res.group(2))).add_occurrence(res.group(1))
+
+  for warning in sorted(warnings.values(), reverse=True):
+    sys.stdout.write(warning.to_string(print_files))
+
+
+def ClangTidyRunDiff(build_folder, diff_branch, auto_fix):
+  """
+  Run clang-tidy on the diff between current and the diff_branch.
+  """
+  if diff_branch is None:
+    diff_branch = subprocess.check_output(['git', 'merge-base',
+                                           'HEAD', 'origin/master']).strip()
+
+  git_ps = subprocess.Popen(
+    ['git', 'diff', '-U0', diff_branch], stdout=subprocess.PIPE)
+
+  extra_args = []
+  if auto_fix:
+    extra_args.append('-fix')
+
+  with open(os.devnull, 'w') as DEVNULL:
+    """
+    The script `clang-tidy-diff` does not provide support to add header-
+    filters. To still analyze headers we use the build path option `-path` to
+    inject our header-filter option. This works because the script just adds
+    the passed path string to the commandline of clang-tidy.
+    """
+    modified_build_folder = build_folder
+    modified_build_folder += ' -header-filter='
+    modified_build_folder += '\'' + ''.join(HEADER_REGEX) + '\''
+
+    ct_ps = subprocess.Popen(
+      ['clang-tidy-diff.py', '-path', modified_build_folder, '-p1'] +
+        extra_args,
+      stdin=git_ps.stdout,
+      stdout=subprocess.PIPE,
+      stderr=DEVNULL)
+  git_ps.wait()
+  while True:
+    line = ct_ps.stdout.readline()
+    if line == '':
+      break
+
+    if skip_line(line):
+      continue
+
+    sys.stdout.write(line)
+
+
+def rm_prefix(string, prefix):
+  """
+  Removes prefix from a string until the new string
+  no longer starts with the prefix.
+  """
+  while string.startswith(prefix):
+    string = string[len(prefix):]
+  return string
+
+
+def ClangTidyRunSingleFile(build_folder, filename_to_check, auto_fix,
+                           line_ranges=[]):
+  """
+  Run clang-tidy on a single file.
+  """
+  files_with_relative_path = []
+
+  compdb_filepath = os.path.join(build_folder, 'compile_commands.json')
+  with open(compdb_filepath) as raw_json_file:
+    compdb = json.load(raw_json_file)
+
+  for db_entry in compdb:
+    if db_entry['file'].endswith(filename_to_check):
+      files_with_relative_path.append(db_entry['file'])
+
+  with open(os.devnull, 'w') as DEVNULL:
+    for file_with_relative_path in files_with_relative_path:
+      line_filter = None
+      if len(line_ranges) != 0:
+        line_filter = '['
+        line_filter += '{ \"lines\":[' + ', '.join(line_ranges)
+        line_filter += '], \"name\":\"'
+        line_filter += rm_prefix(file_with_relative_path,
+                                 '../') + '\"}'
+        line_filter += ']'
+
+      extra_args = ['-line-filter=' + line_filter] if line_filter else []
+
+      if auto_fix:
+        extra_args.append('-fix')
+
+      subprocess.call(['clang-tidy', '-p', '.'] +
+                      extra_args +
+                      [file_with_relative_path],
+                      cwd=build_folder,
+                      stderr=DEVNULL)
+
+
+def CheckClangTidy():
+  """
+  Checks if a clang-tidy binary exists.
+  """
+  with open(os.devnull, 'w') as DEVNULL:
+    return subprocess.call(['which', 'clang-tidy'], stdout=DEVNULL) == 0
+
+
+def CheckCompDB(build_folder):
+  """
+  Checks if a compilation database exists in the build_folder.
+  """
+  return os.path.isfile(os.path.join(build_folder, 'compile_commands.json'))
+
+
+def DetectBuildFolder():
+    """
+    Tries to auto detect the last used build folder in out/
+    """
+    outdirs_folder = 'out/'
+    last_used = None
+    last_timestamp = -1
+    for outdir in [outdirs_folder + folder_name
+                   for folder_name in os.listdir(outdirs_folder)
+                   if os.path.isdir(outdirs_folder + folder_name)]:
+        outdir_modified_timestamp = os.path.getmtime(outdir)
+        if  outdir_modified_timestamp > last_timestamp:
+            last_timestamp = outdir_modified_timestamp
+            last_used = outdir
+
+    return last_used
+
+
+def GetOptions():
+  """
+  Generate the option parser for this script.
+  """
+  result = optparse.OptionParser()
+  result.add_option(
+    '-b',
+    '--build-folder',
+    help='Set V8 build folder',
+    dest='build_folder',
+    default=None)
+  result.add_option(
+    '-j',
+    help='Set the amount of threads that should be used',
+    dest='threads',
+    default=None)
+  result.add_option(
+    '--gen-compdb',
+    help='Generate a compilation database for clang-tidy',
+    default=False,
+    action='store_true')
+  result.add_option(
+    '--no-output-filter',
+    help='Done use any output filterning',
+    default=False,
+    action='store_true')
+  result.add_option(
+    '--fix',
+    help='Fix auto fixable issues',
+    default=False,
+    dest='auto_fix',
+    action='store_true'
+  )
+
+  # Full clang-tidy.
+  full_run_g = optparse.OptionGroup(result, 'Clang-tidy full', '')
+  full_run_g.add_option(
+    '--full',
+    help='Run clang-tidy on the whole codebase',
+    default=False,
+    action='store_true')
+  full_run_g.add_option('--checks',
+                        help='Clang-tidy checks to use.',
+                        default=None)
+  result.add_option_group(full_run_g)
+
+  # Aggregate clang-tidy.
+  agg_run_g = optparse.OptionGroup(result, 'Clang-tidy aggregate', '')
+  agg_run_g.add_option('--aggregate', help='Run clang-tidy on the whole '\
+             'codebase and aggregate the warnings',
+             default=False, action='store_true')
+  agg_run_g.add_option('--show-loc', help='Show file locations when running '\
+             'in aggregate mode', default=False,
+             action='store_true')
+  result.add_option_group(agg_run_g)
+
+  # Diff clang-tidy.
+  diff_run_g = optparse.OptionGroup(result, 'Clang-tidy diff', '')
+  diff_run_g.add_option('--branch', help='Run clang-tidy on the diff '\
+             'between HEAD and the merge-base between HEAD '\
+             'and DIFF_BRANCH (origin/master by default).',
+             default=None, dest='diff_branch')
+  result.add_option_group(diff_run_g)
+
+  # Single clang-tidy.
+  single_run_g = optparse.OptionGroup(result, 'Clang-tidy single', '')
+  single_run_g.add_option(
+    '--single', help='', default=False, action='store_true')
+  single_run_g.add_option(
+    '--file', help='File name to check', default=None, dest='file_name')
+  single_run_g.add_option('--lines', help='Limit checks to a line range. '\
+              'For example: --lines="[2,4], [5,6]"',
+              default=[], dest='line_ranges')
+
+  result.add_option_group(single_run_g)
+  return result
+
+
+def main():
+  parser = GetOptions()
+  (options, _) = parser.parse_args()
+
+  if options.threads is not None:
+    global THREADS
+    THREADS = options.threads
+
+  if options.build_folder is None:
+    options.build_folder = DetectBuildFolder()
+
+  if not CheckClangTidy():
+    print('Could not find clang-tidy')
+  elif options.build_folder is None or not os.path.isdir(options.build_folder):
+    print('Please provide a build folder with -b')
+  elif options.gen_compdb:
+    GenerateCompileCommands(options.build_folder)
+  elif not CheckCompDB(options.build_folder):
+    print('Could not find compilation database, ' \
+      'please generate it with --gen-compdb')
+  else:
+    print('Using build folder:', options.build_folder)
+    if options.full:
+      print('Running clang-tidy - full')
+      ClangTidyRunFull(options.build_folder,
+                       options.no_output_filter,
+                       options.checks,
+                       options.auto_fix)
+    elif options.aggregate:
+      print('Running clang-tidy - aggregating warnings')
+      if options.auto_fix:
+        print('Auto fix not working in aggregate mode, running without.')
+      ClangTidyRunAggregate(options.build_folder, options.show_loc)
+    elif options.single:
+      print('Running clang-tidy - single on ' + options.file_name)
+      if options.file_name is not None:
+        line_ranges = []
+        for match in re.findall(r'(\[.*?\])', options.line_ranges):
+          if match is not []:
+            line_ranges.append(match)
+        ClangTidyRunSingleFile(options.build_folder,
+                               options.file_name,
+                               options.auto_fix,
+                               line_ranges)
+      else:
+        print('Filename provided, please specify a filename with --file')
+    else:
+      print('Running clang-tidy')
+      ClangTidyRunDiff(options.build_folder,
+                       options.diff_branch,
+                       options.auto_fix)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/src/third_party/v8/tools/run-llprof.sh b/src/third_party/v8/tools/run-llprof.sh
new file mode 100755
index 0000000..54a3881
--- /dev/null
+++ b/src/third_party/v8/tools/run-llprof.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+#
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+########## Global variable definitions
+
+# Ensure that <your CPU clock> / $SAMPLE_EVERY_N_CYCLES < $MAXIMUM_SAMPLE_RATE.
+MAXIMUM_SAMPLE_RATE=10000000
+SAMPLE_EVERY_N_CYCLES=10000
+SAMPLE_RATE_CONFIG_FILE="/proc/sys/kernel/perf_event_max_sample_rate"
+KERNEL_MAP_CONFIG_FILE="/proc/sys/kernel/kptr_restrict"
+
+########## Usage
+
+usage() {
+cat << EOF
+usage: $0 <benchmark_command>
+
+Executes <benchmark_command> under observation by the kernel's "perf" \
+framework, then calls the low level tick processor to analyze the results.
+EOF
+}
+
+if [ $# -eq 0 ] || [ "$1" == "-h" ]  || [ "$1" == "--help" ] ; then
+  usage
+  exit 1
+fi
+
+########## Actual script execution
+
+ACTUAL_SAMPLE_RATE=$(cat $SAMPLE_RATE_CONFIG_FILE)
+if [ "$ACTUAL_SAMPLE_RATE" -lt "$MAXIMUM_SAMPLE_RATE" ] ; then
+  echo "Setting appropriate maximum sample rate..."
+  echo $MAXIMUM_SAMPLE_RATE | sudo tee $SAMPLE_RATE_CONFIG_FILE
+fi
+
+ACTUAL_KERNEL_MAP_RESTRICTION=$(cat $KERNEL_MAP_CONFIG_FILE)
+if [ "$ACTUAL_KERNEL_MAP_RESTRICTION" -ne "0" ] ; then
+  echo "Disabling kernel address map restriction..."
+  echo 0 | sudo tee $KERNEL_MAP_CONFIG_FILE
+fi
+
+echo "Running benchmark..."
+perf record -R -e cycles -c $SAMPLE_EVERY_N_CYCLES -i $@ --ll-prof
diff --git a/src/third_party/v8/tools/run-num-fuzzer.py b/src/third_party/v8/tools/run-num-fuzzer.py
new file mode 100755
index 0000000..49e4329
--- /dev/null
+++ b/src/third_party/v8/tools/run-num-fuzzer.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from __future__ import absolute_import
+import sys
+
+from testrunner import num_fuzzer
+
+
+if __name__ == "__main__":
+  sys.exit(num_fuzzer.NumFuzzer().execute())
diff --git a/src/third_party/v8/tools/run-perf.sh b/src/third_party/v8/tools/run-perf.sh
new file mode 100755
index 0000000..0317a9a
--- /dev/null
+++ b/src/third_party/v8/tools/run-perf.sh
@@ -0,0 +1,58 @@
+#! /bin/sh
+#
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+
+########## Global variable definitions
+
+# Ensure that <your CPU clock> / $SAMPLE_EVERY_N_CYCLES < $MAXIMUM_SAMPLE_RATE.
+MAXIMUM_SAMPLE_RATE=10000000
+SAMPLE_EVERY_N_CYCLES=10000
+SAMPLE_RATE_CONFIG_FILE="/proc/sys/kernel/perf_event_max_sample_rate"
+KERNEL_MAP_CONFIG_FILE="/proc/sys/kernel/kptr_restrict"
+CALL_GRAPH_METHOD="fp"  # dwarf does not play nice with JITted objects.
+EVENT_TYPE=${EVENT_TYPE:=cycles:u}
+
+########## Usage
+
+usage() {
+cat << EOF
+usage: $0 <benchmark_command>
+
+Executes <benchmark_command> under observation by Linux perf.
+Sampling event is cycles in user space, call graphs are recorded.
+EOF
+}
+
+if [ $# -eq 0 ] || [ "$1" = "-h" ]  || [ "$1" = "--help" ] ; then
+  usage
+  exit 1
+fi
+
+########## Actual script execution
+
+ACTUAL_SAMPLE_RATE=$(cat $SAMPLE_RATE_CONFIG_FILE)
+if [ "$ACTUAL_SAMPLE_RATE" -lt "$MAXIMUM_SAMPLE_RATE" ] ; then
+  echo "Setting appropriate maximum sample rate..."
+  echo $MAXIMUM_SAMPLE_RATE | sudo tee $SAMPLE_RATE_CONFIG_FILE
+fi
+
+ACTUAL_KERNEL_MAP_RESTRICTION=$(cat $KERNEL_MAP_CONFIG_FILE)
+if [ "$ACTUAL_KERNEL_MAP_RESTRICTION" -ne "0" ] ; then
+  echo "Disabling kernel address map restriction..."
+  echo 0 | sudo tee $KERNEL_MAP_CONFIG_FILE
+fi
+
+# Extract the command being perfed, so that we can prepend arguments to the
+# arguments that the user supplied.
+COMMAND=$1
+shift 1
+
+echo "Running..."
+perf record -R \
+  -e $EVENT_TYPE \
+  -c $SAMPLE_EVERY_N_CYCLES \
+  --call-graph $CALL_GRAPH_METHOD \
+  -i "$COMMAND" --perf-basic-prof "$@"
diff --git a/src/third_party/v8/tools/run-tests.py b/src/third_party/v8/tools/run-tests.py
new file mode 100755
index 0000000..d22c730
--- /dev/null
+++ b/src/third_party/v8/tools/run-tests.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from __future__ import absolute_import
+import sys
+
+from testrunner import standard_runner
+
+
+if __name__ == "__main__":
+  sys.exit(standard_runner.StandardTestRunner().execute())
diff --git a/src/third_party/v8/tools/run-wasm-api-tests.py b/src/third_party/v8/tools/run-wasm-api-tests.py
new file mode 100755
index 0000000..ff37c8a
--- /dev/null
+++ b/src/third_party/v8/tools/run-wasm-api-tests.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+#
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""\
+Helper script for compiling and running the Wasm C/C++ API examples.
+
+Usage: tools/run-wasm-api-tests.py outdir tempdir [filters...]
+
+"outdir" is the build output directory containing libwee8, e.g. out/x64.release
+"tempdir" is a temporary dir where this script may put its artifacts. It is
+the caller's responsibility to clean it up afterwards.
+
+By default, this script builds and runs all examples, both the respective
+C and C++ versions, both with GCC ("gcc" and "g++" binaries found in $PATH)
+and V8's bundled Clang in third_party/llvm-build/. You can use any number
+of "filters" arguments to run only a subset:
+ - "c": run C versions of examples
+ - "cc": run C++ versions of examples
+ - "gcc": compile with GCC
+ - "clang": compile with Clang
+ - "hello" etc.: run "hello" example
+"""
+
+from __future__ import print_function
+
+import os
+import shutil
+import subprocess
+import sys
+
+CFLAGS = "-DDEBUG -Wall -Werror -O0 -ggdb -fsanitize=address"
+
+CHECKOUT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+WASM_PATH = os.path.join(CHECKOUT_PATH, "third_party", "wasm-api")
+CLANG_PATH = os.path.join(CHECKOUT_PATH, "third_party", "llvm-build",
+                          "Release+Asserts", "bin")
+
+EXAMPLES = ["hello", "callback", "trap", "reflect", "global", "table",
+            "memory", "finalize", "serialize", "threads", "hostref", "multi",
+            "start"]
+
+CLANG = {
+  "name": "Clang",
+  "c": os.path.join(CLANG_PATH, "clang"),
+  "cc": os.path.join(CLANG_PATH, "clang++"),
+  "ldflags": "-fsanitize-memory-track-origins -fsanitize-memory-use-after-dtor",
+}
+GCC = {
+  "name": "GCC",
+  "c": "gcc",
+  "cc": "g++",
+  "ldflags": "",
+}
+
+C = {
+  "name": "C",
+  "suffix": "c",
+  "cflags": "",
+}
+CXX = {
+  "name": "C++",
+  "suffix": "cc",
+  "cflags": "-std=c++11",
+}
+
+MIN_ARGS = 3  # Script, outdir, tempdir
+
+def _Call(cmd_list, silent=False):
+  cmd = " ".join(cmd_list)
+  if not silent: print("# %s" % cmd)
+  return subprocess.call(cmd, shell=True)
+
+class Runner(object):
+  def __init__(self, name, outdir, tempdir):
+    self.name = name
+    self.outdir = outdir
+    self.tempdir = tempdir
+    self.src_file_basename = os.path.join(WASM_PATH, "example", name)
+    self.dst_file_basename = os.path.join(tempdir, name)
+    self.lib_file = os.path.join(outdir, "obj", "libwee8.a")
+    if not os.path.exists(self.lib_file):
+      print("libwee8 library not found, make sure to pass the outdir as "
+            "first argument; see --help")
+      sys.exit(1)
+    src_wasm_file = self.src_file_basename + ".wasm"
+    dst_wasm_file = self.dst_file_basename + ".wasm"
+    shutil.copyfile(src_wasm_file, dst_wasm_file)
+
+  def _Error(self, step, lang, compiler, code):
+    print("Error: %s failed. To repro: tools/run-wasm-api-tests.py "
+          "%s %s %s %s %s" %
+          (step, self.outdir, self.tempdir, self.name, lang,
+           compiler["name"].lower()))
+    return code
+
+  def CompileAndRun(self, compiler, language):
+    print("==== %s %s/%s ====" %
+          (self.name, language["name"], compiler["name"]))
+    lang = language["suffix"]
+    src_file = self.src_file_basename + "." + lang
+    exe_file = self.dst_file_basename + "-" + lang
+    obj_file = exe_file  + ".o"
+    # Compile.
+    c = _Call([compiler[lang], "-c", language["cflags"], CFLAGS,
+               "-I", WASM_PATH, "-o", obj_file, src_file])
+    if c: return self._Error("compilation", lang, compiler, c)
+    # Link.
+    c = _Call([compiler["cc"], CFLAGS, compiler["ldflags"], obj_file,
+               "-o", exe_file, self.lib_file, "-ldl -pthread"])
+    if c: return self._Error("linking", lang, compiler, c)
+    # Execute.
+    exe_file = "./%s-%s" % (self.name, lang)
+    c = _Call(["cd", self.tempdir, ";", exe_file])
+    if c: return self._Error("execution", lang, compiler, c)
+    return 0
+
+def Main(args):
+  if (len(args) < MIN_ARGS or args[1] in ("-h", "--help", "help")):
+    print(__doc__)
+    return 1
+
+  outdir = sys.argv[1]
+  tempdir = sys.argv[2]
+  result = 0
+  examples = EXAMPLES
+  compilers = (GCC, CLANG)
+  languages = (C, CXX)
+  if len(args) > MIN_ARGS:
+    custom_compilers = []
+    custom_languages = []
+    custom_examples = []
+    for i in range(MIN_ARGS, len(args)):
+      arg = args[i]
+      if arg == "c" and C not in custom_languages:
+        custom_languages.append(C)
+      elif arg in ("cc", "cpp", "cxx", "c++") and CXX not in custom_languages:
+        custom_languages.append(CXX)
+      elif arg in ("gcc", "g++") and GCC not in custom_compilers:
+        custom_compilers.append(GCC)
+      elif arg in ("clang", "clang++") and CLANG not in custom_compilers:
+        custom_compilers.append(CLANG)
+      elif arg in EXAMPLES and arg not in custom_examples:
+        custom_examples.append(arg)
+      else:
+        print("Didn't understand '%s'" % arg)
+        return 1
+    if custom_compilers:
+      compilers = custom_compilers
+    if custom_languages:
+      languages = custom_languages
+    if custom_examples:
+      examples = custom_examples
+  for example in examples:
+    runner = Runner(example, outdir, tempdir)
+    for compiler in compilers:
+      for language in languages:
+        c = runner.CompileAndRun(compiler, language)
+        if c: result = c
+  if result:
+    print("\nFinished with errors.")
+  else:
+    print("\nFinished successfully.")
+  return result
+
+if __name__ == "__main__":
+  sys.exit(Main(sys.argv))
diff --git a/src/third_party/v8/tools/run.py b/src/third_party/v8/tools/run.py
new file mode 100755
index 0000000..59b3c15
--- /dev/null
+++ b/src/third_party/v8/tools/run.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This program wraps an arbitrary command since gn currently can only execute
+scripts."""
+
+from __future__ import print_function
+
+import subprocess
+import sys
+
+result = subprocess.call(sys.argv[1:])
+if result != 0:
+  # Windows error codes such as 0xC0000005 and 0xC0000409 are much easier
+  # to recognize and differentiate in hex.
+  if result < -100:
+    # Print negative hex numbers as positive by adding 2^32.
+    print('Return code is %08X' % (result + 2**32))
+  else:
+    print('Return code is %d' % result)
+sys.exit(result)
diff --git a/src/third_party/v8/tools/run_perf.py b/src/third_party/v8/tools/run_perf.py
new file mode 100644
index 0000000..80ea1f9
--- /dev/null
+++ b/src/third_party/v8/tools/run_perf.py
@@ -0,0 +1,1153 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Performance runner for d8.
+
+Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
+
+The suite json format is expected to be:
+{
+  "path": <relative path chunks to perf resources and main file>,
+  "owners": [<list of email addresses of benchmark owners (required)>],
+  "name": <optional suite name, file name is default>,
+  "archs": [<architecture name for which this suite is run>, ...],
+  "binary": <name of binary to run, default "d8">,
+  "flags": [<flag to d8>, ...],
+  "test_flags": [<flag to the test file>, ...],
+  "run_count": <how often will this suite run (optional)>,
+  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
+  "timeout": <how long test is allowed to run>,
+  "timeout_XXX": <how long test is allowed run run for arch XXX>,
+  "retry_count": <how many times to retry failures (in addition to first try)",
+  "retry_count_XXX": <how many times to retry failures for arch XXX>
+  "resources": [<js file to be moved to android device>, ...]
+  "main": <main js perf runner file>,
+  "results_regexp": <optional regexp>,
+  "results_processor": <optional python results processor script>,
+  "units": <the unit specification for the performance dashboard>,
+  "process_size": <flag - collect maximum memory used by the process>,
+  "tests": [
+    {
+      "name": <name of the trace>,
+      "results_regexp": <optional more specific regexp>,
+      "results_processor": <optional python results processor script>,
+      "units": <the unit specification for the performance dashboard>,
+      "process_size": <flag - collect maximum memory used by the process>,
+    }, ...
+  ]
+}
+
+The tests field can also nest other suites in arbitrary depth. A suite
+with a "main" file is a leaf suite that can contain one more level of
+tests.
+
+A suite's results_regexp is expected to have one string place holder
+"%s" for the trace name. A trace's results_regexp overwrites suite
+defaults.
+
+A suite's results_processor may point to an optional python script. If
+specified, it is called after running the tests (with a path relative to the
+suite level's path). It is expected to read the measurement's output text
+on stdin and print the processed output to stdout.
+
+The results_regexp will be applied to the processed output.
+
+A suite without "tests" is considered a performance test itself.
+
+Full example (suite with one runner):
+{
+  "path": ["."],
+  "owners": ["username@chromium.org"],
+  "flags": ["--expose-gc"],
+  "test_flags": ["5"],
+  "archs": ["ia32", "x64"],
+  "run_count": 5,
+  "run_count_ia32": 3,
+  "main": "run.js",
+  "results_regexp": "^%s: (.+)$",
+  "units": "score",
+  "tests": [
+    {"name": "Richards"},
+    {"name": "DeltaBlue"},
+    {"name": "NavierStokes",
+     "results_regexp": "^NavierStokes: (.+)$"}
+  ]
+}
+
+Full example (suite with several runners):
+{
+  "path": ["."],
+  "owners": ["username@chromium.org", "otherowner@google.com"],
+  "flags": ["--expose-gc"],
+  "archs": ["ia32", "x64"],
+  "run_count": 5,
+  "units": "score",
+  "tests": [
+    {"name": "Richards",
+     "path": ["richards"],
+     "main": "run.js",
+     "run_count": 3,
+     "results_regexp": "^Richards: (.+)$"},
+    {"name": "NavierStokes",
+     "path": ["navier_stokes"],
+     "main": "run.js",
+     "results_regexp": "^NavierStokes: (.+)$"}
+  ]
+}
+
+Path pieces are concatenated. D8 is always run with the suite's path as cwd.
+
+The test flags are passed to the js test file after '--'.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
+
+from collections import OrderedDict
+import copy
+import json
+import logging
+import math
+import argparse
+import os
+import re
+import subprocess
+import sys
+import time
+import traceback
+
+import numpy
+
+from testrunner.local import android
+from testrunner.local import command
+from testrunner.local import utils
+from testrunner.objects.output import Output, NULL_OUTPUT
+
+try:
+  basestring       # Python 2
+except NameError:  # Python 3
+  basestring = str
+
+SUPPORTED_ARCHS = ['arm',
+                   'ia32',
+                   'mips',
+                   'mipsel',
+                   'x64',
+                   'arm64']
+
+GENERIC_RESULTS_RE = re.compile(r'^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$')
+RESULT_STDDEV_RE = re.compile(r'^\{([^\}]+)\}$')
+RESULT_LIST_RE = re.compile(r'^\[([^\]]+)\]$')
+TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
+INFRA_FAILURE_RETCODE = 87
+MIN_RUNS_FOR_CONFIDENCE = 10
+
+
+def GeometricMean(values):
+  """Returns the geometric mean of a list of values.
+
+  The mean is calculated using log to avoid overflow.
+  """
+  values = map(float, values)
+  return math.exp(sum(map(math.log, values)) / len(values))
+
+
+class ResultTracker(object):
+  """Class that tracks trace/runnable results and produces script output.
+
+  The output is structured like this:
+  {
+    "traces": [
+      {
+        "graphs": ["path", "to", "trace", "config"],
+        "units": <string describing units, e.g. "ms" or "KB">,
+        "results": [<list of values measured over several runs>],
+        "stddev": <stddev of the value if measure by script or ''>
+      },
+      ...
+    ],
+    "runnables": [
+      {
+        "graphs": ["path", "to", "runnable", "config"],
+        "durations": [<list of durations of each runnable run in seconds>],
+        "timeout": <timeout configured for runnable in seconds>,
+      },
+      ...
+    ],
+    "errors": [<list of strings describing errors>],
+  }
+  """
+  def __init__(self):
+    self.traces = {}
+    self.errors = []
+    self.runnables = {}
+
+  def AddTraceResult(self, trace, result, stddev):
+    if trace.name not in self.traces:
+      self.traces[trace.name] = {
+        'graphs': trace.graphs,
+        'units': trace.units,
+        'results': [result],
+        'stddev': stddev or '',
+      }
+    else:
+      existing_entry = self.traces[trace.name]
+      assert trace.graphs == existing_entry['graphs']
+      assert trace.units == existing_entry['units']
+      if stddev:
+        existing_entry['stddev'] = stddev
+      existing_entry['results'].append(result)
+
+  def TraceHasStdDev(self, trace):
+    return trace.name in self.traces and self.traces[trace.name]['stddev'] != ''
+
+  def AddError(self, error):
+    self.errors.append(error)
+
+  def AddRunnableDuration(self, runnable, duration):
+    """Records a duration of a specific run of the runnable."""
+    if runnable.name not in self.runnables:
+      self.runnables[runnable.name] = {
+        'graphs': runnable.graphs,
+        'durations': [duration],
+        'timeout': runnable.timeout,
+      }
+    else:
+      existing_entry = self.runnables[runnable.name]
+      assert runnable.timeout == existing_entry['timeout']
+      assert runnable.graphs == existing_entry['graphs']
+      existing_entry['durations'].append(duration)
+
+  def ToDict(self):
+    return {
+        'traces': self.traces.values(),
+        'errors': self.errors,
+        'runnables': self.runnables.values(),
+    }
+
+  def WriteToFile(self, file_name):
+    with open(file_name, 'w') as f:
+      f.write(json.dumps(self.ToDict()))
+
+  def HasEnoughRuns(self, graph_config, confidence_level):
+    """Checks if the mean of the results for a given trace config is within
+    0.1% of the true value with the specified confidence level.
+
+    This assumes Gaussian distribution of the noise and based on
+    https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule.
+
+    Args:
+      graph_config: An instance of GraphConfig.
+      confidence_level: Number of standard deviations from the mean that all
+          values must lie within. Typical values are 1, 2 and 3 and correspond
+          to 68%, 95% and 99.7% probability that the measured value is within
+          0.1% of the true value.
+
+    Returns:
+      True if specified confidence level have been achieved.
+    """
+    if not isinstance(graph_config, TraceConfig):
+      return all(self.HasEnoughRuns(child, confidence_level)
+                 for child in graph_config.children)
+
+    trace = self.traces.get(graph_config.name, {})
+    results = trace.get('results', [])
+    logging.debug('HasEnoughRuns for %s', graph_config.name)
+
+    if len(results) < MIN_RUNS_FOR_CONFIDENCE:
+      logging.debug('  Ran %d times, need at least %d',
+                    len(results), MIN_RUNS_FOR_CONFIDENCE)
+      return False
+
+    logging.debug('  Results: %d entries', len(results))
+    mean = numpy.mean(results)
+    mean_stderr = numpy.std(results) / numpy.sqrt(len(results))
+    logging.debug('  Mean: %.2f, mean_stderr: %.2f', mean, mean_stderr)
+    logging.info('>>> Confidence level is %.2f', mean / (1000.0 * mean_stderr))
+    return confidence_level * mean_stderr < mean / 1000.0
+
+  def __str__(self):  # pragma: no cover
+    return json.dumps(self.ToDict(), indent=2, separators=(',', ': '))
+
+
+def RunResultsProcessor(results_processor, output, count):
+  # Dummy pass through for null-runs.
+  if output.stdout is None:
+    return output
+
+  # We assume the results processor is relative to the suite.
+  assert os.path.exists(results_processor)
+  p = subprocess.Popen(
+      [sys.executable, results_processor],
+      stdin=subprocess.PIPE,
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+  )
+  new_output = copy.copy(output)
+  new_output.stdout, _ = p.communicate(input=output.stdout)
+  logging.info('>>> Processed stdout (#%d):\n%s', count, output.stdout)
+  return new_output
+
+
+class Node(object):
+  """Represents a node in the suite tree structure."""
+  def __init__(self, *args):
+    self._children = []
+
+  def AppendChild(self, child):
+    self._children.append(child)
+
+  @property
+  def children(self):
+    return self._children
+
+
+class DefaultSentinel(Node):
+  """Fake parent node with all default values."""
+  def __init__(self, binary = 'd8'):
+    super(DefaultSentinel, self).__init__()
+    self.binary = binary
+    self.run_count = 10
+    self.timeout = 60
+    self.retry_count = 4
+    self.path = []
+    self.graphs = []
+    self.flags = []
+    self.test_flags = []
+    self.process_size = False
+    self.resources = []
+    self.results_processor = None
+    self.results_regexp = None
+    self.stddev_regexp = None
+    self.units = 'score'
+    self.total = False
+    self.owners = []
+
+
+class GraphConfig(Node):
+  """Represents a suite definition.
+
+  Can either be a leaf or an inner node that provides default values.
+  """
+  def __init__(self, suite, parent, arch):
+    super(GraphConfig, self).__init__()
+    self._suite = suite
+
+    assert isinstance(suite.get('path', []), list)
+    assert isinstance(suite.get('owners', []), list)
+    assert isinstance(suite['name'], basestring)
+    assert isinstance(suite.get('flags', []), list)
+    assert isinstance(suite.get('test_flags', []), list)
+    assert isinstance(suite.get('resources', []), list)
+
+    # Accumulated values.
+    self.path = parent.path[:] + suite.get('path', [])
+    self.graphs = parent.graphs[:] + [suite['name']]
+    self.flags = parent.flags[:] + suite.get('flags', [])
+    self.test_flags = parent.test_flags[:] + suite.get('test_flags', [])
+    self.owners = parent.owners[:] + suite.get('owners', [])
+
+    # Values independent of parent node.
+    self.resources = suite.get('resources', [])
+
+    # Descrete values (with parent defaults).
+    self.binary = suite.get('binary', parent.binary)
+    self.run_count = suite.get('run_count', parent.run_count)
+    self.run_count = suite.get('run_count_%s' % arch, self.run_count)
+    self.retry_count = suite.get('retry_count', parent.retry_count)
+    self.retry_count = suite.get('retry_count_%s' % arch, self.retry_count)
+    self.timeout = suite.get('timeout', parent.timeout)
+    self.timeout = suite.get('timeout_%s' % arch, self.timeout)
+    self.units = suite.get('units', parent.units)
+    self.total = suite.get('total', parent.total)
+    self.results_processor = suite.get(
+        'results_processor', parent.results_processor)
+    self.process_size = suite.get('process_size', parent.process_size)
+
+    # A regular expression for results. If the parent graph provides a
+    # regexp and the current suite has none, a string place holder for the
+    # suite name is expected.
+    # TODO(machenbach): Currently that makes only sense for the leaf level.
+    # Multiple place holders for multiple levels are not supported.
+    if parent.results_regexp:
+      regexp_default = parent.results_regexp % re.escape(suite['name'])
+    else:
+      regexp_default = None
+    self.results_regexp = suite.get('results_regexp', regexp_default)
+
+    # A similar regular expression for the standard deviation (optional).
+    if parent.stddev_regexp:
+      stddev_default = parent.stddev_regexp % re.escape(suite['name'])
+    else:
+      stddev_default = None
+    self.stddev_regexp = suite.get('stddev_regexp', stddev_default)
+
+  @property
+  def name(self):
+    return '/'.join(self.graphs)
+
+
+class TraceConfig(GraphConfig):
+  """Represents a leaf in the suite tree structure."""
+  def __init__(self, suite, parent, arch):
+    super(TraceConfig, self).__init__(suite, parent, arch)
+    assert self.results_regexp
+    assert self.owners
+
+  def ConsumeOutput(self, output, result_tracker):
+    """Extracts trace results from the output.
+
+    Args:
+      output: Output object from the test run.
+      result_tracker: Result tracker to be updated.
+
+    Returns:
+      The raw extracted result value or None if an error occurred.
+    """
+    result = None
+    stddev = None
+
+    try:
+      result = float(
+        re.search(self.results_regexp, output.stdout, re.M).group(1))
+    except ValueError:
+      result_tracker.AddError(
+          'Regexp "%s" returned a non-numeric for test %s.' %
+          (self.results_regexp, self.name))
+    except:
+      result_tracker.AddError(
+          'Regexp "%s" did not match for test %s.' %
+          (self.results_regexp, self.name))
+
+    try:
+      if self.stddev_regexp:
+        if result_tracker.TraceHasStdDev(self):
+          result_tracker.AddError(
+              'Test %s should only run once since a stddev is provided by the '
+              'test.' % self.name)
+        stddev = re.search(self.stddev_regexp, output.stdout, re.M).group(1)
+    except:
+      result_tracker.AddError(
+          'Regexp "%s" did not match for test %s.' %
+          (self.stddev_regexp, self.name))
+
+    if result:
+      result_tracker.AddTraceResult(self, result, stddev)
+    return result
+
+
+class RunnableConfig(GraphConfig):
+  """Represents a runnable suite definition (i.e. has a main file).
+  """
+  def __init__(self, suite, parent, arch):
+    super(RunnableConfig, self).__init__(suite, parent, arch)
+    self.arch = arch
+
+  @property
+  def main(self):
+    return self._suite.get('main', '')
+
+  def ChangeCWD(self, suite_path):
+    """Changes the cwd to to path defined in the current graph.
+
+    The tests are supposed to be relative to the suite configuration.
+    """
+    suite_dir = os.path.abspath(os.path.dirname(suite_path))
+    bench_dir = os.path.normpath(os.path.join(*self.path))
+    os.chdir(os.path.join(suite_dir, bench_dir))
+
+  def GetCommandFlags(self, extra_flags=None):
+    suffix = ['--'] + self.test_flags if self.test_flags else []
+    return self.flags + (extra_flags or []) + [self.main] + suffix
+
+  def GetCommand(self, cmd_prefix, shell_dir, extra_flags=None):
+    # TODO(machenbach): This requires +.exe if run on windows.
+    extra_flags = extra_flags or []
+    if self.binary != 'd8' and '--prof' in extra_flags:
+      logging.info('Profiler supported only on a benchmark run with d8')
+
+    if self.process_size:
+      cmd_prefix = ['/usr/bin/time', '--format=MaxMemory: %MKB'] + cmd_prefix
+    if self.binary.endswith('.py'):
+      # Copy cmd_prefix instead of update (+=).
+      cmd_prefix = cmd_prefix + [sys.executable]
+
+    return command.Command(
+        cmd_prefix=cmd_prefix,
+        shell=os.path.join(shell_dir, self.binary),
+        args=self.GetCommandFlags(extra_flags=extra_flags),
+        timeout=self.timeout or 60,
+        handle_sigterm=True)
+
+  def ProcessOutput(self, output, result_tracker, count):
+    """Processes test run output and updates result tracker.
+
+    Args:
+      output: Output object from the test run.
+      result_tracker: ResultTracker object to be updated.
+      count: Index of the test run (used for better logging).
+    """
+    if self.results_processor:
+      output = RunResultsProcessor(self.results_processor, output, count)
+
+    results_for_total = []
+    for trace in self.children:
+      result = trace.ConsumeOutput(output, result_tracker)
+      if result:
+        results_for_total.append(result)
+
+    if self.total:
+      # Produce total metric only when all traces have produced results.
+      if len(self.children) != len(results_for_total):
+        result_tracker.AddError(
+            'Not all traces have produced results. Can not compute total for '
+            '%s.' % self.name)
+        return
+
+      # Calculate total as a the geometric mean for results from all traces.
+      total_trace = TraceConfig(
+          {'name': 'Total', 'units': self.children[0].units}, self, self.arch)
+      result_tracker.AddTraceResult(
+          total_trace, GeometricMean(results_for_total), '')
+
+
+class RunnableTraceConfig(TraceConfig, RunnableConfig):
+  """Represents a runnable suite definition that is a leaf."""
+  def __init__(self, suite, parent, arch):
+    super(RunnableTraceConfig, self).__init__(suite, parent, arch)
+
+  def ProcessOutput(self, output, result_tracker, count):
+    result_tracker.AddRunnableDuration(self, output.duration)
+    self.ConsumeOutput(output, result_tracker)
+
+
+def MakeGraphConfig(suite, arch, parent):
+  """Factory method for making graph configuration objects."""
+  if isinstance(parent, RunnableConfig):
+    # Below a runnable can only be traces.
+    return TraceConfig(suite, parent, arch)
+  elif suite.get('main') is not None:
+    # A main file makes this graph runnable. Empty strings are accepted.
+    if suite.get('tests'):
+      # This graph has subgraphs (traces).
+      return RunnableConfig(suite, parent, arch)
+    else:
+      # This graph has no subgraphs, it's a leaf.
+      return RunnableTraceConfig(suite, parent, arch)
+  elif suite.get('tests'):
+    # This is neither a leaf nor a runnable.
+    return GraphConfig(suite, parent, arch)
+  else:  # pragma: no cover
+    raise Exception('Invalid suite configuration.')
+
+
+def BuildGraphConfigs(suite, arch, parent):
+  """Builds a tree structure of graph objects that corresponds to the suite
+  configuration.
+  """
+
+  # TODO(machenbach): Implement notion of cpu type?
+  if arch not in suite.get('archs', SUPPORTED_ARCHS):
+    return None
+
+  graph = MakeGraphConfig(suite, arch, parent)
+  for subsuite in suite.get('tests', []):
+    BuildGraphConfigs(subsuite, arch, graph)
+  parent.AppendChild(graph)
+  return graph
+
+
+def FlattenRunnables(node, node_cb):
+  """Generator that traverses the tree structure and iterates over all
+  runnables.
+  """
+  node_cb(node)
+  if isinstance(node, RunnableConfig):
+    yield node
+  elif isinstance(node, Node):
+    for child in node._children:
+      for result in FlattenRunnables(child, node_cb):
+        yield result
+  else:  # pragma: no cover
+    raise Exception('Invalid suite configuration.')
+
+
+def find_build_directory(base_path, arch):
+  """Returns the location of d8 or node in the build output directory.
+
+  This supports a seamless transition between legacy build location
+  (out/Release) and new build location (out/build).
+  """
+  def is_build(path):
+    # We support d8 or node as executables. We don't support testing on
+    # Windows.
+    return (os.path.isfile(os.path.join(path, 'd8')) or
+            os.path.isfile(os.path.join(path, 'node')))
+  possible_paths = [
+    # Location developer wrapper scripts is using.
+    '%s.release' % arch,
+    # Current build location on bots.
+    'build',
+    # Legacy build location on bots.
+    'Release',
+  ]
+  possible_paths = [os.path.join(base_path, p) for p in possible_paths]
+  actual_paths = filter(is_build, possible_paths)
+  assert actual_paths, 'No build directory found.'
+  assert len(actual_paths) == 1, 'Found ambiguous build directories.'
+  return actual_paths[0]
+
+
+class Platform(object):
+  def __init__(self, args):
+    self.shell_dir = args.shell_dir
+    self.shell_dir_secondary = args.shell_dir_secondary
+    self.extra_flags = args.extra_flags.split()
+    self.args = args
+
+  @staticmethod
+  def ReadBuildConfig(args):
+    config_path = os.path.join(args.shell_dir, 'v8_build_config.json')
+    if not os.path.isfile(config_path):
+      return {}
+    with open(config_path) as f:
+      return json.load(f)
+
+  @staticmethod
+  def GetPlatform(args):
+    if Platform.ReadBuildConfig(args).get('is_android', False):
+      return AndroidPlatform(args)
+    else:
+      return DesktopPlatform(args)
+
+  def _Run(self, runnable, count, secondary=False):
+    raise NotImplementedError()  # pragma: no cover
+
+  def _LoggedRun(self, runnable, count, secondary=False):
+    suffix = ' - secondary' if secondary else ''
+    title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
+    try:
+      output = self._Run(runnable, count, secondary)
+    except OSError:
+      logging.exception(title % 'OSError')
+      raise
+    if output.stdout:
+      logging.info(title % 'Stdout' + '\n%s', output.stdout)
+    if output.stderr:  # pragma: no cover
+      # Print stderr for debugging.
+      logging.info(title % 'Stderr' + '\n%s', output.stderr)
+      logging.warning('>>> Test timed out after %ss.', runnable.timeout)
+    if output.exit_code != 0:
+      logging.warning('>>> Test crashed with exit code %d.', output.exit_code)
+    return output
+
+  def Run(self, runnable, count, secondary):
+    """Execute the benchmark's main file.
+
+    Args:
+      runnable: A Runnable benchmark instance.
+      count: The number of this (repeated) run.
+      secondary: True if secondary run should be executed.
+
+    Returns:
+      A tuple with the two benchmark outputs. The latter will be NULL_OUTPUT if
+      secondary is False.
+    """
+    output = self._LoggedRun(runnable, count, secondary=False)
+    if secondary:
+      return output, self._LoggedRun(runnable, count, secondary=True)
+    else:
+      return output, NULL_OUTPUT
+
+
+class DesktopPlatform(Platform):
+  def __init__(self, args):
+    super(DesktopPlatform, self).__init__(args)
+    self.command_prefix = []
+
+    # Setup command class to OS specific version.
+    command.setup(utils.GuessOS(), args.device)
+
+    if args.prioritize or args.affinitize != None:
+      self.command_prefix = ['schedtool']
+      if args.prioritize:
+        self.command_prefix += ['-n', '-20']
+      if args.affinitize != None:
+      # schedtool expects a bit pattern when setting affinity, where each
+      # bit set to '1' corresponds to a core where the process may run on.
+      # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
+      # a core number, we need to map to said bit pattern.
+        cpu = int(args.affinitize)
+        core = 1 << cpu
+        self.command_prefix += ['-a', ('0x%x' % core)]
+      self.command_prefix += ['-e']
+
+  def PreExecution(self):
+    pass
+
+  def PostExecution(self):
+    pass
+
+  def PreTests(self, node, path):
+    if isinstance(node, RunnableConfig):
+      node.ChangeCWD(path)
+
+  def _Run(self, runnable, count, secondary=False):
+    shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
+    cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
+    output = cmd.execute()
+
+    if output.IsSuccess() and '--prof' in self.extra_flags:
+      os_prefix = {'linux': 'linux', 'macos': 'mac'}.get(utils.GuessOS())
+      if os_prefix:
+        tick_tools = os.path.join(TOOLS_BASE, '%s-tick-processor' % os_prefix)
+        subprocess.check_call(tick_tools + ' --only-summary', shell=True)
+      else:  # pragma: no cover
+        logging.warning(
+            'Profiler option currently supported on Linux and Mac OS.')
+
+    # /usr/bin/time outputs to stderr
+    if runnable.process_size:
+      output.stdout += output.stderr
+    return output
+
+
+class AndroidPlatform(Platform):  # pragma: no cover
+
+  def __init__(self, args):
+    super(AndroidPlatform, self).__init__(args)
+    self.driver = android.android_driver(args.device)
+
+  def PreExecution(self):
+    self.driver.set_high_perf_mode()
+
+  def PostExecution(self):
+    self.driver.set_default_perf_mode()
+    self.driver.tear_down()
+
+  def PreTests(self, node, path):
+    if isinstance(node, RunnableConfig):
+      node.ChangeCWD(path)
+    suite_dir = os.path.abspath(os.path.dirname(path))
+    if node.path:
+      bench_rel = os.path.normpath(os.path.join(*node.path))
+      bench_abs = os.path.join(suite_dir, bench_rel)
+    else:
+      bench_rel = '.'
+      bench_abs = suite_dir
+
+    self.driver.push_executable(self.shell_dir, 'bin', node.binary)
+    if self.shell_dir_secondary:
+      self.driver.push_executable(
+          self.shell_dir_secondary, 'bin_secondary', node.binary)
+
+    if isinstance(node, RunnableConfig):
+      self.driver.push_file(bench_abs, node.main, bench_rel)
+    for resource in node.resources:
+      self.driver.push_file(bench_abs, resource, bench_rel)
+
+  def _Run(self, runnable, count, secondary=False):
+    target_dir = 'bin_secondary' if secondary else 'bin'
+    self.driver.drop_ram_caches()
+
+    # Relative path to benchmark directory.
+    if runnable.path:
+      bench_rel = os.path.normpath(os.path.join(*runnable.path))
+    else:
+      bench_rel = '.'
+
+    logcat_file = None
+    if self.args.dump_logcats_to:
+      runnable_name = '-'.join(runnable.graphs)
+      logcat_file = os.path.join(
+          self.args.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
+            runnable_name, count + 1, '-secondary' if secondary else ''))
+      logging.debug('Dumping logcat into %s', logcat_file)
+
+    output = Output()
+    start = time.time()
+    try:
+      output.stdout = self.driver.run(
+          target_dir=target_dir,
+          binary=runnable.binary,
+          args=runnable.GetCommandFlags(self.extra_flags),
+          rel_path=bench_rel,
+          timeout=runnable.timeout,
+          logcat_file=logcat_file,
+      )
+    except android.CommandFailedException as e:
+      output.stdout = e.output
+      output.exit_code = e.status
+    except android.TimeoutException as e:
+      output.stdout = e.output
+      output.timed_out = True
+    if runnable.process_size:
+      output.stdout += 'MaxMemory: Unsupported'
+    output.duration = time.time() - start
+    return output
+
+
+class CustomMachineConfiguration:
+  def __init__(self, disable_aslr = False, governor = None):
+    self.aslr_backup = None
+    self.governor_backup = None
+    self.disable_aslr = disable_aslr
+    self.governor = governor
+
+  def __enter__(self):
+    if self.disable_aslr:
+      self.aslr_backup = CustomMachineConfiguration.GetASLR()
+      CustomMachineConfiguration.SetASLR(0)
+    if self.governor != None:
+      self.governor_backup = CustomMachineConfiguration.GetCPUGovernor()
+      CustomMachineConfiguration.SetCPUGovernor(self.governor)
+    return self
+
+  def __exit__(self, type, value, traceback):
+    if self.aslr_backup != None:
+      CustomMachineConfiguration.SetASLR(self.aslr_backup)
+    if self.governor_backup != None:
+      CustomMachineConfiguration.SetCPUGovernor(self.governor_backup)
+
+  @staticmethod
+  def GetASLR():
+    try:
+      with open('/proc/sys/kernel/randomize_va_space', 'r') as f:
+        return int(f.readline().strip())
+    except Exception:
+      logging.exception('Failed to get current ASLR settings.')
+      raise
+
+  @staticmethod
+  def SetASLR(value):
+    try:
+      with open('/proc/sys/kernel/randomize_va_space', 'w') as f:
+        f.write(str(value))
+    except Exception:
+      logging.exception(
+          'Failed to update ASLR to %s. Are we running under sudo?', value)
+      raise
+
+    new_value = CustomMachineConfiguration.GetASLR()
+    if value != new_value:
+      raise Exception('Present value is %s' % new_value)
+
+  @staticmethod
+  def GetCPUCoresRange():
+    try:
+      with open('/sys/devices/system/cpu/present', 'r') as f:
+        indexes = f.readline()
+        r = map(int, indexes.split('-'))
+        if len(r) == 1:
+          return range(r[0], r[0] + 1)
+        return range(r[0], r[1] + 1)
+    except Exception:
+      logging.exception('Failed to retrieve number of CPUs.')
+      raise
+
+  @staticmethod
+  def GetCPUPathForId(cpu_index):
+    ret = '/sys/devices/system/cpu/cpu'
+    ret += str(cpu_index)
+    ret += '/cpufreq/scaling_governor'
+    return ret
+
+  @staticmethod
+  def GetCPUGovernor():
+    try:
+      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
+      ret = None
+      for cpu_index in cpu_indices:
+        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
+        with open(cpu_device, 'r') as f:
+          # We assume the governors of all CPUs are set to the same value
+          val = f.readline().strip()
+          if ret == None:
+            ret = val
+          elif ret != val:
+            raise Exception('CPU cores have differing governor settings')
+      return ret
+    except Exception:
+      logging.exception('Failed to get the current CPU governor. Is the CPU '
+                        'governor disabled? Check BIOS.')
+      raise
+
+  @staticmethod
+  def SetCPUGovernor(value):
+    try:
+      cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
+      for cpu_index in cpu_indices:
+        cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
+        with open(cpu_device, 'w') as f:
+          f.write(value)
+
+    except Exception:
+      logging.exception('Failed to change CPU governor to %s. Are we '
+                        'running under sudo?', value)
+      raise
+
+    cur_value = CustomMachineConfiguration.GetCPUGovernor()
+    if cur_value != value:
+      raise Exception('Could not set CPU governor. Present value is %s'
+                      % cur_value )
+
+
+class MaxTotalDurationReachedError(Exception):
+  """Exception used to stop running tests when max total duration is reached."""
+  pass
+
+
+def Main(argv):
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--arch',
+                      help='The architecture to run tests for. Pass "auto" '
+                      'to auto-detect.', default='x64',
+                      choices=SUPPORTED_ARCHS + ['auto'])
+  parser.add_argument('--buildbot',
+                      help='Deprecated',
+                      default=False, action='store_true')
+  parser.add_argument('-d', '--device',
+                      help='The device ID to run Android tests on. If not '
+                      'given it will be autodetected.')
+  parser.add_argument('--extra-flags',
+                      help='Additional flags to pass to the test executable',
+                      default='')
+  parser.add_argument('--json-test-results',
+                      help='Path to a file for storing json results.')
+  parser.add_argument('--json-test-results-secondary',
+                      help='Path to a file for storing json results from run '
+                      'without patch or for reference build run.')
+  parser.add_argument('--outdir', help='Base directory with compile output',
+                      default='out')
+  parser.add_argument('--outdir-secondary',
+                      help='Base directory with compile output without patch '
+                      'or for reference build')
+  parser.add_argument('--binary-override-path',
+                      help='JavaScript engine binary. By default, d8 under '
+                      'architecture-specific build dir. '
+                      'Not supported in conjunction with outdir-secondary.')
+  parser.add_argument('--prioritize',
+                      help='Raise the priority to nice -20 for the '
+                      'benchmarking process.Requires Linux, schedtool, and '
+                      'sudo privileges.', default=False, action='store_true')
+  parser.add_argument('--affinitize',
+                      help='Run benchmarking process on the specified core. '
+                      'For example: --affinitize=0 will run the benchmark '
+                      'process on core 0. --affinitize=3 will run the '
+                      'benchmark process on core 3. Requires Linux, schedtool, '
+                      'and sudo privileges.', default=None)
+  parser.add_argument('--noaslr',
+                      help='Disable ASLR for the duration of the benchmarked '
+                      'process. Requires Linux and sudo privileges.',
+                      default=False, action='store_true')
+  parser.add_argument('--cpu-governor',
+                      help='Set cpu governor to specified policy for the '
+                      'duration of the benchmarked process. Typical options: '
+                      '"powersave" for more stable results, or "performance" '
+                      'for shorter completion time of suite, with potentially '
+                      'more noise in results.')
+  parser.add_argument('--filter',
+                      help='Only run the benchmarks beginning with this '
+                      'string. For example: '
+                      '--filter=JSTests/TypedArrays/ will run only TypedArray '
+                      'benchmarks from the JSTests suite.',
+                      default='')
+  parser.add_argument('--confidence-level', type=float,
+                      help='Repeatedly runs each benchmark until specified '
+                      'confidence level is reached. The value is interpreted '
+                      'as the number of standard deviations from the mean that '
+                      'all values must lie within. Typical values are 1, 2 and '
+                      '3 and correspond to 68%%, 95%% and 99.7%% probability '
+                      'that the measured value is within 0.1%% of the true '
+                      'value. Larger values result in more retries and thus '
+                      'longer runtime, but also provide more reliable results. '
+                      'Also see --max-total-duration flag.')
+  parser.add_argument('--max-total-duration', type=int, default=7140,  # 1h 59m
+                      help='Max total duration in seconds allowed for retries '
+                      'across all tests. This is especially useful in '
+                      'combination with the --confidence-level flag.')
+  parser.add_argument('--dump-logcats-to',
+                      help='Writes logcat output from each test into specified '
+                      'directory. Only supported for android targets.')
+  parser.add_argument('--run-count', type=int, default=0,
+                      help='Override the run count specified by the test '
+                      'suite. The default 0 uses the suite\'s config.')
+  parser.add_argument('-v', '--verbose', default=False, action='store_true',
+                      help='Be verbose and print debug output.')
+  parser.add_argument('suite', nargs='+', help='Path to the suite config file.')
+
+  try:
+    args = parser.parse_args(argv)
+  except SystemExit:
+    return INFRA_FAILURE_RETCODE
+
+  logging.basicConfig(
+      level=logging.DEBUG if args.verbose else logging.INFO,
+      format='%(asctime)s %(levelname)-8s  %(message)s')
+
+  if args.arch == 'auto':  # pragma: no cover
+    args.arch = utils.DefaultArch()
+    if args.arch not in SUPPORTED_ARCHS:
+      logging.error(
+          'Auto-detected architecture "%s" is not supported.', args.arch)
+      return INFRA_FAILURE_RETCODE
+
+  if (args.json_test_results_secondary and
+      not args.outdir_secondary):  # pragma: no cover
+    logging.error('For writing secondary json test results, a secondary outdir '
+                  'patch must be specified.')
+    return INFRA_FAILURE_RETCODE
+
+  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+
+  if args.binary_override_path == None:
+    args.shell_dir = find_build_directory(
+        os.path.join(workspace, args.outdir), args.arch)
+    default_binary_name = 'd8'
+  else:
+    if not os.path.isfile(args.binary_override_path):
+      logging.error('binary-override-path must be a file name')
+      return INFRA_FAILURE_RETCODE
+    if args.outdir_secondary:
+      logging.error('specify either binary-override-path or outdir-secondary')
+      return INFRA_FAILURE_RETCODE
+    args.shell_dir = os.path.abspath(
+        os.path.dirname(args.binary_override_path))
+    default_binary_name = os.path.basename(args.binary_override_path)
+
+  if args.outdir_secondary:
+    args.shell_dir_secondary = find_build_directory(
+        os.path.join(workspace, args.outdir_secondary), args.arch)
+  else:
+    args.shell_dir_secondary = None
+
+  if args.json_test_results:
+    args.json_test_results = os.path.abspath(args.json_test_results)
+
+  if args.json_test_results_secondary:
+    args.json_test_results_secondary = os.path.abspath(
+        args.json_test_results_secondary)
+
+  # Ensure all arguments have absolute path before we start changing current
+  # directory.
+  args.suite = map(os.path.abspath, args.suite)
+
+  prev_aslr = None
+  prev_cpu_gov = None
+  platform = Platform.GetPlatform(args)
+
+  result_tracker = ResultTracker()
+  result_tracker_secondary = ResultTracker()
+  have_failed_tests = False
+  with CustomMachineConfiguration(governor = args.cpu_governor,
+                                  disable_aslr = args.noaslr) as conf:
+    for path in args.suite:
+      if not os.path.exists(path):  # pragma: no cover
+        result_tracker.AddError('Configuration file %s does not exist.' % path)
+        continue
+
+      with open(path) as f:
+        suite = json.loads(f.read())
+
+      # If no name is given, default to the file name without .json.
+      suite.setdefault('name', os.path.splitext(os.path.basename(path))[0])
+
+      # Setup things common to one test suite.
+      platform.PreExecution()
+
+      # Build the graph/trace tree structure.
+      default_parent = DefaultSentinel(default_binary_name)
+      root = BuildGraphConfigs(suite, args.arch, default_parent)
+
+      # Callback to be called on each node on traversal.
+      def NodeCB(node):
+        platform.PreTests(node, path)
+
+      # Traverse graph/trace tree and iterate over all runnables.
+      start = time.time()
+      try:
+        for runnable in FlattenRunnables(root, NodeCB):
+          runnable_name = '/'.join(runnable.graphs)
+          if (not runnable_name.startswith(args.filter) and
+              runnable_name + '/' != args.filter):
+            continue
+          logging.info('>>> Running suite: %s', runnable_name)
+
+          def RunGenerator(runnable):
+            if args.confidence_level:
+              counter = 0
+              while not result_tracker.HasEnoughRuns(
+                  runnable, args.confidence_level):
+                yield counter
+                counter += 1
+            else:
+              for i in range(0, max(1, args.run_count or runnable.run_count)):
+                yield i
+
+          for i in RunGenerator(runnable):
+            attempts_left = runnable.retry_count + 1
+            while attempts_left:
+              total_duration = time.time() - start
+              if total_duration > args.max_total_duration:
+                logging.info(
+                    '>>> Stopping now since running for too long (%ds > %ds)',
+                    total_duration, args.max_total_duration)
+                raise MaxTotalDurationReachedError()
+
+              output, output_secondary = platform.Run(
+                  runnable, i, secondary=args.shell_dir_secondary)
+              result_tracker.AddRunnableDuration(runnable, output.duration)
+              result_tracker_secondary.AddRunnableDuration(
+                  runnable, output_secondary.duration)
+
+              if output.IsSuccess() and output_secondary.IsSuccess():
+                runnable.ProcessOutput(output, result_tracker, i)
+                if output_secondary is not NULL_OUTPUT:
+                  runnable.ProcessOutput(
+                      output_secondary, result_tracker_secondary, i)
+                break
+
+              attempts_left -= 1
+              if not attempts_left:
+                logging.info('>>> Suite %s failed after %d retries',
+                             runnable_name, runnable.retry_count + 1)
+                have_failed_tests = True
+              else:
+                logging.info('>>> Retrying suite: %s', runnable_name)
+      except MaxTotalDurationReachedError:
+        have_failed_tests = True
+
+      platform.PostExecution()
+
+    if args.json_test_results:
+      result_tracker.WriteToFile(args.json_test_results)
+    else:  # pragma: no cover
+      print('Primary results:', result_tracker)
+
+  if args.shell_dir_secondary:
+    if args.json_test_results_secondary:
+      result_tracker_secondary.WriteToFile(args.json_test_results_secondary)
+    else:  # pragma: no cover
+      print('Secondary results:', result_tracker_secondary)
+
+  if (result_tracker.errors or result_tracker_secondary.errors or
+      have_failed_tests):
+    return 1
+
+  return 0
+
+
+def MainWrapper():
+  try:
+    return Main(sys.argv[1:])
+  except:
+    # Log uncaptured exceptions and report infra failure to the caller.
+    traceback.print_exc()
+    return INFRA_FAILURE_RETCODE
+
+
+if __name__ == '__main__':  # pragma: no cover
+  sys.exit(MainWrapper())
diff --git a/src/third_party/v8/tools/sanitizers/sancov_formatter.py b/src/third_party/v8/tools/sanitizers/sancov_formatter.py
new file mode 100755
index 0000000..c95ff82
--- /dev/null
+++ b/src/third_party/v8/tools/sanitizers/sancov_formatter.py
@@ -0,0 +1,459 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script to transform and merge sancov files into human readable json-format.
+
+The script supports three actions:
+all: Writes a json file with all instrumented lines of all executables.
+merge: Merges sancov files with coverage output into an existing json file.
+split: Split json file into separate files per covered source file.
+
+The json data is structured as follows:
+{
+  "version": 1,
+  "tests": ["executable1", "executable2", ...],
+  "files": {
+    "file1": [[<instr line 1>, <bit_mask>], [<instr line 2>, <bit_mask>], ...],
+    "file2": [...],
+    ...
+  }
+}
+
+The executables are sorted and determine the test bit mask. Their index+1 is
+the bit, e.g. executable1 = 1, executable3 = 4, etc. Hence, a line covered by
+executable1 and executable3 will have bit_mask == 5 == 0b101. The number of
+tests is restricted to 52 in version 1, to allow javascript JSON parsing of
+the bitsets encoded as numbers. JS max safe int is (1 << 53) - 1.
+
+The line-number-bit_mask pairs are sorted by line number and don't contain
+duplicates.
+
+Split json data preserves the same format, but only contains one file per
+json file.
+
+The sancov tool is expected to be in the llvm compiler-rt third-party
+directory. It's not checked out by default and must be added as a custom deps:
+'v8/third_party/llvm/projects/compiler-rt':
+    'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
+
+import argparse
+import json
+import logging
+import os
+import re
+import subprocess
+import sys
+
+from multiprocessing import Pool, cpu_count
+
+
+logging.basicConfig(level=logging.INFO)
+
+# Files to exclude from coverage. Dropping their data early adds more speed.
+# The contained cc files are already excluded from instrumentation, but inlined
+# data is referenced through v8's object files.
+EXCLUSIONS = [
+  'buildtools',
+  'src/third_party',
+  'third_party',
+  'test',
+  'testing',
+]
+
+# Executables found in the build output for which no coverage is generated.
+# Exclude them from the coverage data file.
+EXE_EXCLUSIONS = [
+  'generate-bytecode-expectations',
+  'hello-world',
+  'mksnapshot',
+  'parser-shell',
+  'process',
+  'shell',
+]
+
+# V8 checkout directory.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+
+# The sancov tool location.
+SANCOV_TOOL = os.path.join(
+    BASE_DIR, 'third_party', 'llvm', 'projects', 'compiler-rt',
+    'lib', 'sanitizer_common', 'scripts', 'sancov.py')
+
+# Simple script to sanitize the PCs from objdump.
+SANITIZE_PCS = os.path.join(BASE_DIR, 'tools', 'sanitizers', 'sanitize_pcs.py')
+
+# The llvm symbolizer location.
+SYMBOLIZER = os.path.join(
+    BASE_DIR, 'third_party', 'llvm-build', 'Release+Asserts', 'bin',
+    'llvm-symbolizer')
+
+# Number of cpus.
+CPUS = cpu_count()
+
+# Regexp to find sancov files as output by sancov_merger.py. Also grabs the
+# executable name in group 1.
+SANCOV_FILE_RE = re.compile(r'^(.*)\.result.sancov$')
+
+
+def executables(build_dir):
+  """Iterates over executable files in the build directory."""
+  for f in os.listdir(build_dir):
+    file_path = os.path.join(build_dir, f)
+    if (os.path.isfile(file_path) and
+        os.access(file_path, os.X_OK) and
+        f not in EXE_EXCLUSIONS):
+      yield file_path
+
+
+def process_symbolizer_output(output, build_dir):
+  """Post-process llvm symbolizer output.
+
+  Excludes files outside the v8 checkout or given in exclusion list above
+  from further processing. Drops the character index in each line.
+
+  Returns: A mapping of file names to lists of line numbers. The file names
+           have relative paths to the v8 base directory. The lists of line
+           numbers don't contain duplicate lines and are sorted.
+  """
+  # Path prefix added by the llvm symbolizer including trailing slash.
+  output_path_prefix = os.path.join(build_dir, '..', '..', '')
+
+  # Drop path prefix when iterating lines. The path is redundant and takes
+  # too much space. Drop files outside that path, e.g. generated files in
+  # the build dir and absolute paths to c++ library headers.
+  def iter_lines():
+    for line in output.strip().splitlines():
+      if line.startswith(output_path_prefix):
+        yield line[len(output_path_prefix):]
+
+  # Map file names to sets of instrumented line numbers.
+  file_map = {}
+  for line in iter_lines():
+    # Drop character number, we only care for line numbers. Each line has the
+    # form: <file name>:<line number>:<character number>.
+    file_name, number, _ = line.split(':')
+    file_map.setdefault(file_name, set([])).add(int(number))
+
+  # Remove exclusion patterns from file map. It's cheaper to do it after the
+  # mapping, as there are few excluded files and we don't want to do this
+  # check for numerous lines in ordinary files.
+  def keep(file_name):
+    for e in EXCLUSIONS:
+      if file_name.startswith(e):
+        return False
+    return True
+
+  # Return in serializable form and filter.
+  return {k: sorted(file_map[k]) for k in file_map if keep(k)}
+
+
+def get_instrumented_lines(executable):
+  """Return the instrumented lines of an executable.
+
+  Called trough multiprocessing pool.
+
+  Returns: Post-processed llvm output as returned by process_symbolizer_output.
+  """
+  # The first two pipes are from llvm's tool sancov.py with 0x added to the hex
+  # numbers. The results are piped into the llvm symbolizer, which outputs for
+  # each PC: <file name with abs path>:<line number>:<character number>.
+  # We don't call the sancov tool to get more speed.
+  process = subprocess.Popen(
+      'objdump -d %s | '
+      'grep \'^\s\+[0-9a-f]\+:.*\scall\(q\|\)\s\+[0-9a-f]\+ '
+      '<__sanitizer_cov\(_with_check\|\|_trace_pc_guard\)\(@plt\|\)>\' | '
+      'grep \'^\s\+[0-9a-f]\+\' -o | '
+      '%s | '
+      '%s --obj %s -functions=none' %
+          (executable, SANITIZE_PCS, SYMBOLIZER, executable),
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+      stdin=subprocess.PIPE,
+      cwd=BASE_DIR,
+      shell=True,
+  )
+  output, _ = process.communicate()
+  assert process.returncode == 0
+  return process_symbolizer_output(output, os.path.dirname(executable))
+
+
+def merge_instrumented_line_results(exe_list, results):
+  """Merge multiprocessing results for all instrumented lines.
+
+  Args:
+    exe_list: List of all executable names with absolute paths.
+    results: List of results as returned by get_instrumented_lines.
+
+  Returns: Dict to be used as json data as specified on the top of this page.
+           The dictionary contains all instrumented lines of all files
+           referenced by all executables.
+  """
+  def merge_files(x, y):
+    for file_name, lines in y.iteritems():
+      x.setdefault(file_name, set([])).update(lines)
+    return x
+  result = reduce(merge_files, results, {})
+
+  # Return data as file->lines mapping. The lines are saved as lists
+  # with (line number, test bits (as int)). The test bits are initialized with
+  # 0, meaning instrumented, but no coverage.
+  # The order of the test bits is given with key 'tests'. For now, these are
+  # the executable names. We use a _list_ with two items instead of a tuple to
+  # ease merging by allowing mutation of the second item.
+  return {
+    'version': 1,
+    'tests': sorted(map(os.path.basename, exe_list)),
+    'files': {f: map(lambda l: [l, 0], sorted(result[f])) for f in result},
+  }
+
+
+def write_instrumented(options):
+  """Implements the 'all' action of this tool."""
+  exe_list = list(executables(options.build_dir))
+  logging.info('Reading instrumented lines from %d executables.',
+               len(exe_list))
+  pool = Pool(CPUS)
+  try:
+    results = pool.imap_unordered(get_instrumented_lines, exe_list)
+  finally:
+    pool.close()
+
+  # Merge multiprocessing results and prepare output data.
+  data = merge_instrumented_line_results(exe_list, results)
+
+  logging.info('Read data from %d executables, which covers %d files.',
+               len(data['tests']), len(data['files']))
+  logging.info('Writing results to %s', options.json_output)
+
+  # Write json output.
+  with open(options.json_output, 'w') as f:
+    json.dump(data, f, sort_keys=True)
+
+
+def get_covered_lines(args):
+  """Return the covered lines of an executable.
+
+  Called trough multiprocessing pool. The args are expected to unpack to:
+    cov_dir: Folder with sancov files merged by sancov_merger.py.
+    executable: Absolute path to the executable that was called to produce the
+                given coverage data.
+    sancov_file: The merged sancov file with coverage data.
+
+  Returns: A tuple of post-processed llvm output as returned by
+           process_symbolizer_output and the executable name.
+  """
+  cov_dir, executable, sancov_file = args
+
+  # Let the sancov tool print the covered PCs and pipe them through the llvm
+  # symbolizer.
+  process = subprocess.Popen(
+      '%s print %s 2> /dev/null | '
+      '%s --obj %s -functions=none' %
+          (SANCOV_TOOL,
+           os.path.join(cov_dir, sancov_file),
+           SYMBOLIZER,
+           executable),
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+      stdin=subprocess.PIPE,
+      cwd=BASE_DIR,
+      shell=True,
+  )
+  output, _ = process.communicate()
+  assert process.returncode == 0
+  return (
+      process_symbolizer_output(output, os.path.dirname(executable)),
+      os.path.basename(executable),
+  )
+
+
+def merge_covered_line_results(data, results):
+  """Merge multiprocessing results for covered lines.
+
+  The data is mutated, the results are merged into it in place.
+
+  Args:
+    data: Existing coverage data from json file containing all instrumented
+          lines.
+    results: List of results as returned by get_covered_lines.
+  """
+
+  # List of executables and mapping to the test bit mask. The number of
+  # tests is restricted to 52, to allow javascript JSON parsing of
+  # the bitsets encoded as numbers. JS max safe int is (1 << 53) - 1.
+  exe_list = data['tests']
+  assert len(exe_list) <= 52, 'Max 52 different tests are supported.'
+  test_bit_masks = {exe:1<<i for i, exe in enumerate(exe_list)}
+
+  def merge_lines(old_lines, new_lines, mask):
+    """Merge the coverage data of a list of lines.
+
+    Args:
+      old_lines: Lines as list of pairs with line number and test bit mask.
+                 The new lines will be merged into the list in place.
+      new_lines: List of new (covered) lines (sorted).
+      mask: The bit to be set for covered lines. The bit index is the test
+            index of the executable that covered the line.
+    """
+    i = 0
+    # Iterate over old and new lines, both are sorted.
+    for l in new_lines:
+      while old_lines[i][0] < l:
+        # Forward instrumented lines not present in this coverage data.
+        i += 1
+        # TODO: Add more context to the assert message.
+        assert i < len(old_lines), 'Covered line %d not in input file.' % l
+      assert old_lines[i][0] == l, 'Covered line %d not in input file.' % l
+
+      # Add coverage information to the line.
+      old_lines[i][1] |= mask
+
+  def merge_files(data, result):
+    """Merge result into data.
+
+    The data is mutated in place.
+
+    Args:
+      data: Merged coverage data from the previous reduce step.
+      result: New result to be merged in. The type is as returned by
+              get_covered_lines.
+    """
+    file_map, executable = result
+    files = data['files']
+    for file_name, lines in file_map.iteritems():
+      merge_lines(files[file_name], lines, test_bit_masks[executable])
+    return data
+
+  reduce(merge_files, results, data)
+
+
+def merge(options):
+  """Implements the 'merge' action of this tool."""
+
+  # Check if folder with coverage output exists.
+  assert (os.path.exists(options.coverage_dir) and
+          os.path.isdir(options.coverage_dir))
+
+  # Inputs for multiprocessing. List of tuples of:
+  # Coverage dir, absoluate path to executable, sancov file name.
+  inputs = []
+  for sancov_file in os.listdir(options.coverage_dir):
+    match = SANCOV_FILE_RE.match(sancov_file)
+    if match:
+      inputs.append((
+          options.coverage_dir,
+          os.path.join(options.build_dir, match.group(1)),
+          sancov_file,
+      ))
+
+  logging.info('Merging %d sancov files into %s',
+               len(inputs), options.json_input)
+
+  # Post-process covered lines in parallel.
+  pool = Pool(CPUS)
+  try:
+    results = pool.imap_unordered(get_covered_lines, inputs)
+  finally:
+    pool.close()
+
+  # Load existing json data file for merging the results.
+  with open(options.json_input, 'r') as f:
+    data = json.load(f)
+
+  # Merge muliprocessing results. Mutates data.
+  merge_covered_line_results(data, results)
+
+  logging.info('Merged data from %d executables, which covers %d files.',
+               len(data['tests']), len(data['files']))
+  logging.info('Writing results to %s', options.json_output)
+
+  # Write merged results to file.
+  with open(options.json_output, 'w') as f:
+    json.dump(data, f, sort_keys=True)
+
+
+def split(options):
+  """Implements the 'split' action of this tool."""
+  # Load existing json data file for splitting.
+  with open(options.json_input, 'r') as f:
+    data = json.load(f)
+
+  logging.info('Splitting off %d coverage files from %s',
+               len(data['files']), options.json_input)
+
+  for file_name, coverage in data['files'].iteritems():
+    # Preserve relative directories that are part of the file name.
+    file_path = os.path.join(options.output_dir, file_name + '.json')
+    try:
+      os.makedirs(os.path.dirname(file_path))
+    except OSError:
+      # Ignore existing directories.
+      pass
+
+    with open(file_path, 'w') as f:
+      # Flat-copy the old dict.
+      new_data = dict(data)
+
+      # Update current file.
+      new_data['files'] = {file_name: coverage}
+
+      # Write json data.
+      json.dump(new_data, f, sort_keys=True)
+
+
+def main(args=None):
+  parser = argparse.ArgumentParser()
+  # TODO(machenbach): Make this required and deprecate the default.
+  parser.add_argument('--build-dir',
+                      default=os.path.join(BASE_DIR, 'out', 'Release'),
+                      help='Path to the build output directory.')
+  parser.add_argument('--coverage-dir',
+                      help='Path to the sancov output files.')
+  parser.add_argument('--json-input',
+                      help='Path to an existing json file with coverage data.')
+  parser.add_argument('--json-output',
+                      help='Path to a file to write json output to.')
+  parser.add_argument('--output-dir',
+                      help='Directory where to put split output files to.')
+  parser.add_argument('action', choices=['all', 'merge', 'split'],
+                      help='Action to perform.')
+
+  options = parser.parse_args(args)
+  options.build_dir = os.path.abspath(options.build_dir)
+  if options.action.lower() == 'all':
+    if not options.json_output:
+      print('--json-output is required')
+      return 1
+    write_instrumented(options)
+  elif options.action.lower() == 'merge':
+    if not options.coverage_dir:
+      print('--coverage-dir is required')
+      return 1
+    if not options.json_input:
+      print('--json-input is required')
+      return 1
+    if not options.json_output:
+      print('--json-output is required')
+      return 1
+    merge(options)
+  elif options.action.lower() == 'split':
+    if not options.json_input:
+      print('--json-input is required')
+      return 1
+    if not options.output_dir:
+      print('--output-dir is required')
+      return 1
+    split(options)
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/src/third_party/v8/tools/sanitizers/sancov_formatter_test.py b/src/third_party/v8/tools/sanitizers/sancov_formatter_test.py
new file mode 100644
index 0000000..008151d
--- /dev/null
+++ b/src/third_party/v8/tools/sanitizers/sancov_formatter_test.py
@@ -0,0 +1,223 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Requires python-coverage. Native python coverage version >= 3.7.1 should
+# be installed to get the best speed.
+
+import copy
+import coverage
+import logging
+import json
+import os
+import shutil
+import sys
+import tempfile
+import unittest
+
+
+# Directory of this file.
+LOCATION = os.path.dirname(os.path.abspath(__file__))
+
+# V8 checkout directory.
+BASE_DIR = os.path.dirname(os.path.dirname(LOCATION))
+
+# Executable location.
+BUILD_DIR = os.path.join(BASE_DIR, 'out', 'Release')
+
+def abs_line(line):
+  """Absolute paths as output by the llvm symbolizer."""
+  return '%s/%s' % (BUILD_DIR, line)
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_process_symbolizer_output. This simulates output from the
+# llvm symbolizer. The paths are not normlized.
+SYMBOLIZER_OUTPUT = (
+  abs_line('../../src/foo.cc:87:7\n') +
+  abs_line('../../src/foo.cc:92:0\n') + # Test sorting.
+  abs_line('../../src/baz/bar.h:1234567:0\n') + # Test large line numbers.
+  abs_line('../../src/foo.cc:92:0\n') + # Test duplicates.
+  abs_line('../../src/baz/bar.h:0:0\n') + # Test subdirs.
+  '/usr/include/cool_stuff.h:14:2\n' + # Test dropping absolute paths.
+  abs_line('../../src/foo.cc:87:10\n') + # Test dropping character indexes.
+  abs_line('../../third_party/icu.cc:0:0\n') + # Test dropping excluded dirs.
+  abs_line('../../src/baz/bar.h:11:0\n')
+)
+
+# The expected post-processed output maps relative file names to line numbers.
+# The numbers are sorted and unique.
+EXPECTED_PROCESSED_OUTPUT = {
+  'src/baz/bar.h': [0, 11, 1234567],
+  'src/foo.cc': [87, 92],
+}
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_merge_instrumented_line_results. A list of absolute paths to
+# all executables.
+EXE_LIST = [
+  '/path/to/d8',
+  '/path/to/cctest',
+  '/path/to/unittests',
+]
+
+# Post-processed llvm symbolizer output as returned by
+# process_symbolizer_output. These are lists of this output for merging.
+INSTRUMENTED_LINE_RESULTS = [
+  {
+    'src/baz/bar.h': [0, 3, 7],
+    'src/foo.cc': [11],
+  },
+  {
+    'src/baz/bar.h': [3, 7, 8],
+    'src/baz.cc': [2],
+    'src/foo.cc': [1, 92],
+  },
+  {
+    'src/baz.cc': [1],
+    'src/foo.cc': [92, 93],
+  },
+]
+
+# This shows initial instrumentation. No lines are covered, hence,
+# the coverage mask is 0 for all lines. The line tuples remain sorted by
+# line number and contain no duplicates.
+EXPECTED_INSTRUMENTED_LINES_DATA = {
+  'version': 1,
+  'tests': ['cctest', 'd8', 'unittests'],
+  'files': {
+    'src/baz/bar.h': [[0, 0], [3, 0], [7, 0], [8, 0]],
+    'src/baz.cc': [[1, 0], [2, 0]],
+    'src/foo.cc': [[1, 0], [11, 0], [92, 0], [93, 0]],
+  },
+}
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_merge_covered_line_results. List of post-processed
+# llvm-symbolizer output as a tuple including the executable name of each data
+# set.
+COVERED_LINE_RESULTS = [
+  ({
+     'src/baz/bar.h': [3, 7],
+     'src/foo.cc': [11],
+   }, 'd8'),
+  ({
+     'src/baz/bar.h': [3, 7],
+     'src/baz.cc': [2],
+     'src/foo.cc': [1],
+   }, 'cctest'),
+  ({
+     'src/foo.cc': [92],
+     'src/baz.cc': [2],
+   }, 'unittests'),
+]
+
+# This shows initial instrumentation + coverage. The mask bits are:
+# cctest: 1, d8: 2, unittests:4. So a line covered by cctest and unittests
+# has a coverage mask of 0b101, e.g. line 2 in src/baz.cc.
+EXPECTED_COVERED_LINES_DATA = {
+  'version': 1,
+  'tests': ['cctest', 'd8', 'unittests'],
+  'files': {
+    'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
+    'src/baz.cc': [[1, 0b0], [2, 0b101]],
+    'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
+  },
+}
+
+
+#------------------------------------------------------------------------------
+
+# Data for test_split.
+
+EXPECTED_SPLIT_FILES = [
+  (
+    os.path.join('src', 'baz', 'bar.h.json'),
+    {
+      'version': 1,
+      'tests': ['cctest', 'd8', 'unittests'],
+      'files': {
+        'src/baz/bar.h': [[0, 0b0], [3, 0b11], [7, 0b11], [8, 0b0]],
+      },
+    },
+  ),
+  (
+    os.path.join('src', 'baz.cc.json'),
+    {
+      'version': 1,
+      'tests': ['cctest', 'd8', 'unittests'],
+      'files': {
+        'src/baz.cc': [[1, 0b0], [2, 0b101]],
+      },
+    },
+  ),
+  (
+    os.path.join('src', 'foo.cc.json'),
+    {
+      'version': 1,
+      'tests': ['cctest', 'd8', 'unittests'],
+      'files': {
+        'src/foo.cc': [[1, 0b1], [11, 0b10], [92, 0b100], [93, 0b0]],
+      },
+    },
+  ),
+]
+
+
+class FormatterTests(unittest.TestCase):
+  @classmethod
+  def setUpClass(cls):
+    sys.path.append(LOCATION)
+    cls._cov = coverage.coverage(
+        include=([os.path.join(LOCATION, 'sancov_formatter.py')]))
+    cls._cov.start()
+    import sancov_formatter
+    global sancov_formatter
+
+  @classmethod
+  def tearDownClass(cls):
+    cls._cov.stop()
+    cls._cov.report()
+
+  def test_process_symbolizer_output(self):
+    result = sancov_formatter.process_symbolizer_output(
+        SYMBOLIZER_OUTPUT, BUILD_DIR)
+    self.assertEquals(EXPECTED_PROCESSED_OUTPUT, result)
+
+  def test_merge_instrumented_line_results(self):
+    result = sancov_formatter.merge_instrumented_line_results(
+      EXE_LIST, INSTRUMENTED_LINE_RESULTS)
+    self.assertEquals(EXPECTED_INSTRUMENTED_LINES_DATA, result)
+
+  def test_merge_covered_line_results(self):
+    data = copy.deepcopy(EXPECTED_INSTRUMENTED_LINES_DATA)
+    sancov_formatter.merge_covered_line_results(
+      data, COVERED_LINE_RESULTS)
+    self.assertEquals(EXPECTED_COVERED_LINES_DATA, data)
+
+  def test_split(self):
+    _, json_input = tempfile.mkstemp(prefix='tmp_coverage_test_split')
+    with open(json_input, 'w') as f:
+      json.dump(EXPECTED_COVERED_LINES_DATA, f)
+    output_dir = tempfile.mkdtemp(prefix='tmp_coverage_test_split')
+
+    try:
+      sancov_formatter.main([
+        'split',
+        '--json-input', json_input,
+        '--output-dir', output_dir,
+      ])
+
+      for file_name, expected_data in EXPECTED_SPLIT_FILES:
+        full_path = os.path.join(output_dir, file_name)
+        self.assertTrue(os.path.exists(full_path))
+        with open(full_path) as f:
+          self.assertEquals(expected_data, json.load(f))
+    finally:
+      os.remove(json_input)
+      shutil.rmtree(output_dir)
diff --git a/src/third_party/v8/tools/sanitizers/sancov_merger.py b/src/third_party/v8/tools/sanitizers/sancov_merger.py
new file mode 100755
index 0000000..6fd2eb2
--- /dev/null
+++ b/src/third_party/v8/tools/sanitizers/sancov_merger.py
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Script for merging sancov files in parallel.
+
+When merging test runner output, the sancov files are expected
+to be located in one directory with the file-name pattern:
+<executable name>.test.<id>.<attempt>.sancov
+
+For each executable, this script writes a new file:
+<executable name>.result.sancov
+
+When --swarming-output-dir is specified, this script will merge the result
+files found there into the coverage folder.
+
+The sancov tool is expected to be in the llvm compiler-rt third-party
+directory. It's not checked out by default and must be added as a custom deps:
+'v8/third_party/llvm/projects/compiler-rt':
+    'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
+"""
+
+import argparse
+import logging
+import math
+import os
+import re
+import subprocess
+import sys
+
+from multiprocessing import Pool, cpu_count
+
+
+logging.basicConfig(level=logging.INFO)
+
+# V8 checkout directory.
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+
+# The sancov tool location.
+SANCOV_TOOL = os.path.join(
+    BASE_DIR, 'third_party', 'llvm', 'projects', 'compiler-rt',
+    'lib', 'sanitizer_common', 'scripts', 'sancov.py')
+
+# Number of cpus.
+CPUS = cpu_count()
+
+# Regexp to find sancov file as output by the v8 test runner. Also grabs the
+# executable name in group 1.
+SANCOV_FILE_RE = re.compile(r'^(.*)\.test\.\d+\.\d+\.sancov$')
+
+# Regexp to find sancov result files as returned from swarming.
+SANCOV_RESULTS_FILE_RE = re.compile(r'^.*\.result\.sancov$')
+
+
+def merge(args):
+  """Merge several sancov files into one.
+
+  Called trough multiprocessing pool. The args are expected to unpack to:
+    keep: Option if source and intermediate sancov files should be kept.
+    coverage_dir: Folder where to find the sancov files.
+    executable: Name of the executable whose sancov files should be merged.
+    index: A number to be put into the intermediate result file name.
+           If None, this is a final result.
+    bucket: The list of sancov files to be merged.
+  Returns: A tuple with the executable name and the result file name.
+  """
+  keep, coverage_dir, executable, index, bucket = args
+  process = subprocess.Popen(
+      [SANCOV_TOOL, 'merge'] + bucket,
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+      cwd=coverage_dir,
+  )
+  output, _ = process.communicate()
+  assert process.returncode == 0
+  if index is not None:
+    # This is an intermediate result, add the bucket index to the file name.
+    result_file_name = '%s.result.%d.sancov' % (executable, index)
+  else:
+    # This is the final result without bucket index.
+    result_file_name = '%s.result.sancov' % executable
+  with open(os.path.join(coverage_dir, result_file_name), "wb") as f:
+    f.write(output)
+  if not keep:
+    for f in bucket:
+      os.remove(os.path.join(coverage_dir, f))
+  return executable, result_file_name
+
+
+def generate_inputs(keep, coverage_dir, file_map, cpus):
+  """Generate inputs for multiprocessed merging.
+
+  Splits the sancov files into several buckets, so that each bucket can be
+  merged in a separate process. We have only few executables in total with
+  mostly lots of associated files. In the general case, with many executables
+  we might need to avoid splitting buckets of executables with few files.
+
+  Returns: List of args as expected by merge above.
+  """
+  inputs = []
+  for executable, files in file_map.iteritems():
+    # What's the bucket size for distributing files for merging? E.g. with
+    # 2 cpus and 9 files we want bucket size 5.
+    n = max(2, int(math.ceil(len(files) / float(cpus))))
+
+    # Chop files into buckets.
+    buckets = [files[i:i+n] for i in range(0, len(files), n)]
+
+    # Inputs for multiprocessing. List of tuples containing:
+    # Keep-files option, base path, executable name, index of bucket,
+    # list of files.
+    inputs.extend([(keep, coverage_dir, executable, i, b)
+                   for i, b in enumerate(buckets)])
+  return inputs
+
+
+def merge_parallel(inputs, merge_fun=merge):
+  """Process several merge jobs in parallel."""
+  pool = Pool(CPUS)
+  try:
+    return pool.map(merge_fun, inputs)
+  finally:
+    pool.close()
+
+
+def merge_test_runner_output(options):
+  # Map executable names to their respective sancov files.
+  file_map = {}
+  for f in os.listdir(options.coverage_dir):
+    match = SANCOV_FILE_RE.match(f)
+    if match:
+      file_map.setdefault(match.group(1), []).append(f)
+
+  inputs = generate_inputs(
+      options.keep, options.coverage_dir, file_map, CPUS)
+
+  logging.info('Executing %d merge jobs in parallel for %d executables.' %
+               (len(inputs), len(file_map)))
+
+  results = merge_parallel(inputs)
+
+  # Map executable names to intermediate bucket result files.
+  file_map = {}
+  for executable, f in results:
+    file_map.setdefault(executable, []).append(f)
+
+  # Merge the bucket results for each executable.
+  # The final result has index None, so no index will appear in the
+  # file name.
+  inputs = [(options.keep, options.coverage_dir, executable, None, files)
+             for executable, files in file_map.iteritems()]
+
+  logging.info('Merging %d intermediate results.' % len(inputs))
+
+  merge_parallel(inputs)
+
+
+def merge_two(args):
+  """Merge two sancov files.
+
+  Called trough multiprocessing pool. The args are expected to unpack to:
+    swarming_output_dir: Folder where to find the new file.
+    coverage_dir: Folder where to find the existing file.
+    f: File name of the file to be merged.
+  """
+  swarming_output_dir, coverage_dir, f = args
+  input_file = os.path.join(swarming_output_dir, f)
+  output_file = os.path.join(coverage_dir, f)
+  process = subprocess.Popen(
+      [SANCOV_TOOL, 'merge', input_file, output_file],
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+  )
+  output, _ = process.communicate()
+  assert process.returncode == 0
+  with open(output_file, "wb") as f:
+    f.write(output)
+
+
+def merge_swarming_output(options):
+  # Iterate sancov files from swarming.
+  files = []
+  for f in os.listdir(options.swarming_output_dir):
+    match = SANCOV_RESULTS_FILE_RE.match(f)
+    if match:
+      if os.path.exists(os.path.join(options.coverage_dir, f)):
+        # If the same file already exists, we'll merge the data.
+        files.append(f)
+      else:
+        # No file yet? Just move it.
+        os.rename(os.path.join(options.swarming_output_dir, f),
+                  os.path.join(options.coverage_dir, f))
+
+  inputs = [(options.swarming_output_dir, options.coverage_dir, f)
+            for f in files]
+
+  logging.info('Executing %d merge jobs in parallel.' % len(inputs))
+  merge_parallel(inputs, merge_two)
+
+
+def main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--coverage-dir', required=True,
+                      help='Path to the sancov output files.')
+  parser.add_argument('--keep', default=False, action='store_true',
+                      help='Keep sancov output files after merging.')
+  parser.add_argument('--swarming-output-dir',
+                      help='Folder containing a results shard from swarming.')
+  options = parser.parse_args()
+
+  # Check if folder with coverage output exists.
+  assert (os.path.exists(options.coverage_dir) and
+          os.path.isdir(options.coverage_dir))
+
+  if options.swarming_output_dir:
+    # Check if folder with swarming output exists.
+    assert (os.path.exists(options.swarming_output_dir) and
+            os.path.isdir(options.swarming_output_dir))
+    merge_swarming_output(options)
+  else:
+    merge_test_runner_output(options)
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/src/third_party/v8/tools/sanitizers/sancov_merger_test.py b/src/third_party/v8/tools/sanitizers/sancov_merger_test.py
new file mode 100644
index 0000000..899c716
--- /dev/null
+++ b/src/third_party/v8/tools/sanitizers/sancov_merger_test.py
@@ -0,0 +1,82 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import unittest
+
+import sancov_merger
+
+
+# Files on disk after test runner completes. The files are mapped by
+# executable name -> file list.
+FILE_MAP = {
+  'd8': [
+    'd8.test.1.1.sancov',
+    'd8.test.2.1.sancov',
+    'd8.test.3.1.sancov',
+    'd8.test.4.1.sancov',
+    'd8.test.5.1.sancov',
+    'd8.test.5.2.sancov',
+    'd8.test.6.1.sancov',
+  ],
+  'cctest': [
+    'cctest.test.1.1.sancov',
+    'cctest.test.2.1.sancov',
+    'cctest.test.3.1.sancov',
+    'cctest.test.4.1.sancov',
+  ],
+}
+
+
+# Inputs for merge process with 2 cpus. The tuples contain:
+# (flag, path, executable name, intermediate result index, file list).
+EXPECTED_INPUTS_2 = [
+  (False, '/some/path', 'cctest', 0, [
+    'cctest.test.1.1.sancov',
+    'cctest.test.2.1.sancov']),
+  (False, '/some/path', 'cctest', 1, [
+    'cctest.test.3.1.sancov',
+    'cctest.test.4.1.sancov']),
+  (False, '/some/path', 'd8', 0, [
+    'd8.test.1.1.sancov',
+    'd8.test.2.1.sancov',
+    'd8.test.3.1.sancov',
+    'd8.test.4.1.sancov']),
+  (False, '/some/path', 'd8', 1, [
+    'd8.test.5.1.sancov',
+    'd8.test.5.2.sancov',
+    'd8.test.6.1.sancov']),
+]
+
+
+# The same for 4 cpus.
+EXPECTED_INPUTS_4 = [
+  (True, '/some/path', 'cctest', 0, [
+    'cctest.test.1.1.sancov',
+    'cctest.test.2.1.sancov']),
+  (True, '/some/path', 'cctest', 1, [
+    'cctest.test.3.1.sancov',
+    'cctest.test.4.1.sancov']),
+  (True, '/some/path', 'd8', 0, [
+    'd8.test.1.1.sancov',
+    'd8.test.2.1.sancov']),
+  (True, '/some/path', 'd8', 1, [
+    'd8.test.3.1.sancov',
+    'd8.test.4.1.sancov']),
+  (True, '/some/path', 'd8', 2, [
+    'd8.test.5.1.sancov',
+    'd8.test.5.2.sancov']),
+  (True, '/some/path', 'd8', 3, [
+    'd8.test.6.1.sancov'])]
+
+
+class MergerTests(unittest.TestCase):
+  def test_generate_inputs_2_cpu(self):
+    inputs = sancov_merger.generate_inputs(
+        False, '/some/path', FILE_MAP, 2)
+    self.assertEquals(EXPECTED_INPUTS_2, inputs)
+
+  def test_generate_inputs_4_cpu(self):
+    inputs = sancov_merger.generate_inputs(
+        True, '/some/path', FILE_MAP, 4)
+    self.assertEquals(EXPECTED_INPUTS_4, inputs)
diff --git a/src/third_party/v8/tools/sanitizers/sanitize_pcs.py b/src/third_party/v8/tools/sanitizers/sanitize_pcs.py
new file mode 100755
index 0000000..a1e3a1d
--- /dev/null
+++ b/src/third_party/v8/tools/sanitizers/sanitize_pcs.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Corrects objdump output. The logic is from sancov.py, see comments there."""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import sys
+
+for line in sys.stdin:
+  print('0x%x' % (int(line.strip(), 16) + 4))
diff --git a/src/third_party/v8/tools/sanitizers/tsan_suppressions.txt b/src/third_party/v8/tools/sanitizers/tsan_suppressions.txt
new file mode 100644
index 0000000..270340e
--- /dev/null
+++ b/src/third_party/v8/tools/sanitizers/tsan_suppressions.txt
@@ -0,0 +1,6 @@
+# Suppressions for TSan v2
+# https://code.google.com/p/thread-sanitizer/wiki/Suppressions
+
+# Incorrectly detected lock cycles in test-lockers
+# https://code.google.com/p/thread-sanitizer/issues/detail?id=81
+deadlock:LockAndUnlockDifferentIsolatesThread::Run
diff --git a/src/third_party/v8/tools/shell-utils.h b/src/third_party/v8/tools/shell-utils.h
new file mode 100644
index 0000000..b41d327
--- /dev/null
+++ b/src/third_party/v8/tools/shell-utils.h
@@ -0,0 +1,70 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Utility functions used by parser-shell.
+
+#include "src/common/globals.h"
+
+#include <stdio.h>
+
+namespace v8 {
+namespace internal {
+
+enum Encoding {
+  LATIN1,
+  UTF8,
+  UTF16
+};
+
+const byte* ReadFileAndRepeat(const char* name, int* size, int repeat) {
+  FILE* file = fopen(name, "rb");
+  *size = 0;
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int file_size = static_cast<int>(ftell(file));
+  rewind(file);
+
+  *size = file_size * repeat;
+
+  byte* chars = new byte[*size + 1];
+  for (int i = 0; i < file_size;) {
+    int read = static_cast<int>(fread(&chars[i], 1, file_size - i, file));
+    i += read;
+  }
+  fclose(file);
+
+  for (int i = file_size; i < *size; i++) {
+    chars[i] = chars[i - file_size];
+  }
+  chars[*size] = 0;
+
+  return chars;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/third_party/v8/tools/snapshot/asm_to_inline_asm.py b/src/third_party/v8/tools/snapshot/asm_to_inline_asm.py
new file mode 100644
index 0000000..e49c961
--- /dev/null
+++ b/src/third_party/v8/tools/snapshot/asm_to_inline_asm.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+'''
+Converts a given file in clang assembly syntax to a corresponding
+representation in inline assembly. Specifically, this is used to convert
+embedded.S to embedded.cc for Windows clang builds.
+'''
+
+import argparse
+import sys
+
+def asm_to_inl_asm(in_filename, out_filename):
+  with open(in_filename, 'r') as infile, open(out_filename, 'wb') as outfile:
+    outfile.write(b'__asm__(\n')
+    for line in infile:
+      # Escape " in .S file before outputing it to inline asm file.
+      line = line.replace('"', '\\"')
+      outfile.write(b'  "%s\\n"\n' % line.rstrip().encode('utf8'))
+    outfile.write(b');\n')
+  return 0
+
+if __name__ == '__main__':
+  parser = argparse.ArgumentParser(description=__doc__)
+  parser.add_argument('input', help='Name of the input assembly file')
+  parser.add_argument('output', help='Name of the target CC file')
+  args = parser.parse_args()
+  sys.exit(asm_to_inl_asm(args.input, args.output))
diff --git a/src/third_party/v8/tools/sourcemap.mjs b/src/third_party/v8/tools/sourcemap.mjs
new file mode 100644
index 0000000..8ddab13
--- /dev/null
+++ b/src/third_party/v8/tools/sourcemap.mjs
@@ -0,0 +1,384 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This is a copy from blink dev tools, see:
+// http://src.chromium.org/viewvc/blink/trunk/Source/devtools/front_end/SourceMap.js
+// revision: 153407
+
+// Added to make the file work without dev tools
+export const WebInspector = {};
+WebInspector.ParsedURL = {};
+WebInspector.ParsedURL.completeURL = function(){};
+// start of original file content
+
+/*
+ * Copyright (C) 2012 Google Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *     * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Implements Source Map V3 model. See http://code.google.com/p/closure-compiler/wiki/SourceMaps
+ * for format description.
+ * @constructor
+ * @param {string} sourceMappingURL
+ * @param {SourceMapV3} payload
+ */
+WebInspector.SourceMap = function(sourceMappingURL, payload)
+{
+    if (!WebInspector.SourceMap.prototype._base64Map) {
+        const base64Digits = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+        WebInspector.SourceMap.prototype._base64Map = {};
+        for (let i = 0; i < base64Digits.length; ++i)
+            WebInspector.SourceMap.prototype._base64Map[base64Digits.charAt(i)] = i;
+    }
+
+    this._sourceMappingURL = sourceMappingURL;
+    this._reverseMappingsBySourceURL = {};
+    this._mappings = [];
+    this._sources = {};
+    this._sourceContentByURL = {};
+    this._parseMappingPayload(payload);
+}
+
+/**
+ * @param {string} sourceMapURL
+ * @param {string} compiledURL
+ * @param {function(WebInspector.SourceMap)} callback
+ */
+WebInspector.SourceMap.load = function(sourceMapURL, compiledURL, callback)
+{
+    NetworkAgent.loadResourceForFrontend(WebInspector.resourceTreeModel.mainFrame.id, sourceMapURL, undefined, contentLoaded.bind(this));
+
+    /**
+     * @param {?Protocol.Error} error
+     * @param {number} statusCode
+     * @param {NetworkAgent.Headers} headers
+     * @param {string} content
+     */
+    function contentLoaded(error, statusCode, headers, content)
+    {
+        if (error || !content || statusCode >= 400) {
+            console.error(`Could not load content for ${sourceMapURL} : ${error || (`HTTP status code: ${statusCode}`)}`);
+            callback(null);
+            return;
+        }
+
+        if (content.slice(0, 3) === ")]}")
+            content = content.substring(content.indexOf('\n'));
+        try {
+            const payload = /** @type {SourceMapV3} */ (JSON.parse(content));
+            const baseURL = sourceMapURL.startsWith("data:") ? compiledURL : sourceMapURL;
+            callback(new WebInspector.SourceMap(baseURL, payload));
+        } catch(e) {
+            console.error(e.message);
+            callback(null);
+        }
+    }
+}
+
+WebInspector.SourceMap.prototype = {
+    /**
+     * @return {Array.<string>}
+     */
+    sources()
+    {
+        return Object.keys(this._sources);
+    },
+
+    /**
+     * @param {string} sourceURL
+     * @return {string|undefined}
+     */
+    sourceContent(sourceURL)
+    {
+        return this._sourceContentByURL[sourceURL];
+    },
+
+    /**
+     * @param {string} sourceURL
+     * @param {WebInspector.ResourceType} contentType
+     * @return {WebInspector.ContentProvider}
+     */
+    sourceContentProvider(sourceURL, contentType)
+    {
+        const lastIndexOfDot = sourceURL.lastIndexOf(".");
+        const extension = lastIndexOfDot !== -1 ? sourceURL.substr(lastIndexOfDot + 1) : "";
+        const mimeType = WebInspector.ResourceType.mimeTypesForExtensions[extension.toLowerCase()];
+        const sourceContent = this.sourceContent(sourceURL);
+        if (sourceContent)
+            return new WebInspector.StaticContentProvider(contentType, sourceContent, mimeType);
+        return new WebInspector.CompilerSourceMappingContentProvider(sourceURL, contentType, mimeType);
+    },
+
+    /**
+     * @param {SourceMapV3} mappingPayload
+     */
+    _parseMappingPayload(mappingPayload)
+    {
+        if (mappingPayload.sections)
+            this._parseSections(mappingPayload.sections);
+        else
+            this._parseMap(mappingPayload, 0, 0);
+    },
+
+    /**
+     * @param {Array.<SourceMapV3.Section>} sections
+     */
+    _parseSections(sections)
+    {
+        for (let i = 0; i < sections.length; ++i) {
+            const section = sections[i];
+            this._parseMap(section.map, section.offset.line, section.offset.column);
+        }
+    },
+
+    /**
+     * @param {number} lineNumber in compiled resource
+     * @param {number} columnNumber in compiled resource
+     * @return {?Array}
+     */
+    findEntry(lineNumber, columnNumber)
+    {
+        let first = 0;
+        let count = this._mappings.length;
+        while (count > 1) {
+          const step = count >> 1;
+          const middle = first + step;
+          const mapping = this._mappings[middle];
+          if (lineNumber < mapping[0] || (lineNumber === mapping[0] && columnNumber < mapping[1]))
+              count = step;
+          else {
+              first = middle;
+              count -= step;
+          }
+        }
+        const entry = this._mappings[first];
+        if (!first && entry && (lineNumber < entry[0] || (lineNumber === entry[0] && columnNumber < entry[1])))
+            return null;
+        return entry;
+    },
+
+    /**
+     * @param {string} sourceURL of the originating resource
+     * @param {number} lineNumber in the originating resource
+     * @return {Array}
+     */
+    findEntryReversed(sourceURL, lineNumber)
+    {
+        const mappings = this._reverseMappingsBySourceURL[sourceURL];
+        for ( ; lineNumber < mappings.length; ++lineNumber) {
+            const mapping = mappings[lineNumber];
+            if (mapping)
+                return mapping;
+        }
+        return this._mappings[0];
+    },
+
+    /**
+     * @override
+     */
+    _parseMap(map, lineNumber, columnNumber)
+    {
+        let sourceIndex = 0;
+        let sourceLineNumber = 0;
+        let sourceColumnNumber = 0;
+        let nameIndex = 0;
+
+        const sources = [];
+        const originalToCanonicalURLMap = {};
+        for (let i = 0; i < map.sources.length; ++i) {
+            const originalSourceURL = map.sources[i];
+            let sourceRoot = map.sourceRoot || "";
+            if (sourceRoot && !sourceRoot.endsWith("/")) sourceRoot += "/";
+            const href = sourceRoot + originalSourceURL;
+            const url = WebInspector.ParsedURL.completeURL(this._sourceMappingURL, href) || href;
+            originalToCanonicalURLMap[originalSourceURL] = url;
+            sources.push(url);
+            this._sources[url] = true;
+
+            if (map.sourcesContent && map.sourcesContent[i]) {
+                this._sourceContentByURL[url] = map.sourcesContent[i];
+            }
+        }
+
+        const stringCharIterator = new WebInspector.SourceMap.StringCharIterator(map.mappings);
+        let sourceURL = sources[sourceIndex];
+
+        while (true) {
+            if (stringCharIterator.peek() === ",")
+                stringCharIterator.next();
+            else {
+                while (stringCharIterator.peek() === ";") {
+                    lineNumber += 1;
+                    columnNumber = 0;
+                    stringCharIterator.next();
+                }
+                if (!stringCharIterator.hasNext())
+                    break;
+            }
+
+            columnNumber += this._decodeVLQ(stringCharIterator);
+            if (this._isSeparator(stringCharIterator.peek())) {
+                this._mappings.push([lineNumber, columnNumber]);
+                continue;
+            }
+
+            const sourceIndexDelta = this._decodeVLQ(stringCharIterator);
+            if (sourceIndexDelta) {
+                sourceIndex += sourceIndexDelta;
+                sourceURL = sources[sourceIndex];
+            }
+            sourceLineNumber += this._decodeVLQ(stringCharIterator);
+            sourceColumnNumber += this._decodeVLQ(stringCharIterator);
+            if (!this._isSeparator(stringCharIterator.peek()))
+                nameIndex += this._decodeVLQ(stringCharIterator);
+
+            this._mappings.push([lineNumber, columnNumber, sourceURL, sourceLineNumber, sourceColumnNumber]);
+        }
+
+        for (let i = 0; i < this._mappings.length; ++i) {
+            const mapping = this._mappings[i];
+            const url = mapping[2];
+            if (!url) continue;
+            if (!this._reverseMappingsBySourceURL[url]) {
+                this._reverseMappingsBySourceURL[url] = [];
+            }
+            const reverseMappings = this._reverseMappingsBySourceURL[url];
+            const sourceLine = mapping[3];
+            if (!reverseMappings[sourceLine]) {
+                reverseMappings[sourceLine] = [mapping[0], mapping[1]];
+            }
+        }
+    },
+
+    /**
+     * @param {string} char
+     * @return {boolean}
+     */
+    _isSeparator(char)
+    {
+        return char === "," || char === ";";
+    },
+
+    /**
+     * @param {WebInspector.SourceMap.StringCharIterator} stringCharIterator
+     * @return {number}
+     */
+    _decodeVLQ(stringCharIterator)
+    {
+        // Read unsigned value.
+        let result = 0;
+        let shift = 0;
+        let digit;
+        do {
+            digit = this._base64Map[stringCharIterator.next()];
+            result += (digit & this._VLQ_BASE_MASK) << shift;
+            shift += this._VLQ_BASE_SHIFT;
+        } while (digit & this._VLQ_CONTINUATION_MASK);
+
+        // Fix the sign.
+        const negative = result & 1;
+        // Use unsigned right shift, so that the 32nd bit is properly shifted
+        // to the 31st, and the 32nd becomes unset.
+        result >>>= 1;
+        if (negate) {
+          // We need to OR 0x80000000 here to ensure the 32nd bit (the sign bit
+          // in a 32bit int) is always set for negative numbers. If `result`
+          // were 1, (meaning `negate` is true and all other bits were zeros),
+          // `result` would now be 0. But -0 doesn't flip the 32nd bit as
+          // intended. All other numbers will successfully set the 32nd bit
+          // without issue, so doing this is a noop for them.
+          return -result | 0x80000000;
+        }
+        return result;
+    },
+
+    _VLQ_BASE_SHIFT: 5,
+    _VLQ_BASE_MASK: (1 << 5) - 1,
+    _VLQ_CONTINUATION_MASK: 1 << 5
+}
+
+/**
+ * @constructor
+ * @param {string} string
+ */
+WebInspector.SourceMap.StringCharIterator = function(string)
+{
+    this._string = string;
+    this._position = 0;
+}
+
+WebInspector.SourceMap.StringCharIterator.prototype = {
+    /**
+     * @return {string}
+     */
+    next()
+    {
+        return this._string.charAt(this._position++);
+    },
+
+    /**
+     * @return {string}
+     */
+    peek()
+    {
+        return this._string.charAt(this._position);
+    },
+
+    /**
+     * @return {boolean}
+     */
+    hasNext()
+    {
+        return this._position < this._string.length;
+    }
+}
diff --git a/src/third_party/v8/tools/splaytree.js b/src/third_party/v8/tools/splaytree.js
new file mode 100644
index 0000000..d272a9e
--- /dev/null
+++ b/src/third_party/v8/tools/splaytree.js
@@ -0,0 +1,327 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+/**
+ * Constructs a Splay tree.  A splay tree is a self-balancing binary
+ * search tree with the additional property that recently accessed
+ * elements are quick to access again. It performs basic operations
+ * such as insertion, look-up and removal in O(log(n)) amortized time.
+ *
+ * @constructor
+ */
+function SplayTree() {
+};
+
+
+/**
+ * Pointer to the root node of the tree.
+ *
+ * @type {SplayTree.Node}
+ * @private
+ */
+SplayTree.prototype.root_ = null;
+
+
+/**
+ * @return {boolean} Whether the tree is empty.
+ */
+SplayTree.prototype.isEmpty = function() {
+  return !this.root_;
+};
+
+
+
+/**
+ * Inserts a node into the tree with the specified key and value if
+ * the tree does not already contain a node with the specified key. If
+ * the value is inserted, it becomes the root of the tree.
+ *
+ * @param {number} key Key to insert into the tree.
+ * @param {*} value Value to insert into the tree.
+ */
+SplayTree.prototype.insert = function(key, value) {
+  if (this.isEmpty()) {
+    this.root_ = new SplayTree.Node(key, value);
+    return;
+  }
+  // Splay on the key to move the last node on the search path for
+  // the key to the root of the tree.
+  this.splay_(key);
+  if (this.root_.key == key) {
+    return;
+  }
+  var node = new SplayTree.Node(key, value);
+  if (key > this.root_.key) {
+    node.left = this.root_;
+    node.right = this.root_.right;
+    this.root_.right = null;
+  } else {
+    node.right = this.root_;
+    node.left = this.root_.left;
+    this.root_.left = null;
+  }
+  this.root_ = node;
+};
+
+
+/**
+ * Removes a node with the specified key from the tree if the tree
+ * contains a node with this key. The removed node is returned. If the
+ * key is not found, an exception is thrown.
+ *
+ * @param {number} key Key to find and remove from the tree.
+ * @return {SplayTree.Node} The removed node.
+ */
+SplayTree.prototype.remove = function(key) {
+  if (this.isEmpty()) {
+    throw Error('Key not found: ' + key);
+  }
+  this.splay_(key);
+  if (this.root_.key != key) {
+    throw Error('Key not found: ' + key);
+  }
+  var removed = this.root_;
+  if (!this.root_.left) {
+    this.root_ = this.root_.right;
+  } else {
+    var right = this.root_.right;
+    this.root_ = this.root_.left;
+    // Splay to make sure that the new root has an empty right child.
+    this.splay_(key);
+    // Insert the original right child as the right child of the new
+    // root.
+    this.root_.right = right;
+  }
+  return removed;
+};
+
+
+/**
+ * Returns the node having the specified key or null if the tree doesn't contain
+ * a node with the specified key.
+ *
+ * @param {number} key Key to find in the tree.
+ * @return {SplayTree.Node} Node having the specified key.
+ */
+SplayTree.prototype.find = function(key) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  this.splay_(key);
+  return this.root_.key == key ? this.root_ : null;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the minimum key value.
+ */
+SplayTree.prototype.findMin = function() {
+  if (this.isEmpty()) {
+    return null;
+  }
+  var current = this.root_;
+  while (current.left) {
+    current = current.left;
+  }
+  return current;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the maximum key value.
+ */
+SplayTree.prototype.findMax = function(opt_startNode) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  var current = opt_startNode || this.root_;
+  while (current.right) {
+    current = current.right;
+  }
+  return current;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the maximum key value that
+ *     is less or equal to the specified key value.
+ */
+SplayTree.prototype.findGreatestLessThan = function(key) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  // Splay on the key to move the node with the given key or the last
+  // node on the search path to the top of the tree.
+  this.splay_(key);
+  // Now the result is either the root node or the greatest node in
+  // the left subtree.
+  if (this.root_.key <= key) {
+    return this.root_;
+  } else if (this.root_.left) {
+    return this.findMax(this.root_.left);
+  } else {
+    return null;
+  }
+};
+
+
+/**
+ * @return {Array<*>} An array containing all the values of tree's nodes paired
+ *     with keys.
+ */
+SplayTree.prototype.exportKeysAndValues = function() {
+  var result = [];
+  this.traverse_(function(node) { result.push([node.key, node.value]); });
+  return result;
+};
+
+
+/**
+ * @return {Array<*>} An array containing all the values of tree's nodes.
+ */
+SplayTree.prototype.exportValues = function() {
+  var result = [];
+  this.traverse_(function(node) { result.push(node.value); });
+  return result;
+};
+
+
+/**
+ * Perform the splay operation for the given key. Moves the node with
+ * the given key to the top of the tree.  If no node has the given
+ * key, the last node on the search path is moved to the top of the
+ * tree. This is the simplified top-down splaying algorithm from:
+ * "Self-adjusting Binary Search Trees" by Sleator and Tarjan
+ *
+ * @param {number} key Key to splay the tree on.
+ * @private
+ */
+SplayTree.prototype.splay_ = function(key) {
+  if (this.isEmpty()) {
+    return;
+  }
+  // Create a dummy node.  The use of the dummy node is a bit
+  // counter-intuitive: The right child of the dummy node will hold
+  // the L tree of the algorithm.  The left child of the dummy node
+  // will hold the R tree of the algorithm.  Using a dummy node, left
+  // and right will always be nodes and we avoid special cases.
+  var dummy, left, right;
+  dummy = left = right = new SplayTree.Node(null, null);
+  var current = this.root_;
+  while (true) {
+    if (key < current.key) {
+      if (!current.left) {
+        break;
+      }
+      if (key < current.left.key) {
+        // Rotate right.
+        var tmp = current.left;
+        current.left = tmp.right;
+        tmp.right = current;
+        current = tmp;
+        if (!current.left) {
+          break;
+        }
+      }
+      // Link right.
+      right.left = current;
+      right = current;
+      current = current.left;
+    } else if (key > current.key) {
+      if (!current.right) {
+        break;
+      }
+      if (key > current.right.key) {
+        // Rotate left.
+        var tmp = current.right;
+        current.right = tmp.left;
+        tmp.left = current;
+        current = tmp;
+        if (!current.right) {
+          break;
+        }
+      }
+      // Link left.
+      left.right = current;
+      left = current;
+      current = current.right;
+    } else {
+      break;
+    }
+  }
+  // Assemble.
+  left.right = current.left;
+  right.left = current.right;
+  current.left = dummy.right;
+  current.right = dummy.left;
+  this.root_ = current;
+};
+
+
+/**
+ * Performs a preorder traversal of the tree.
+ *
+ * @param {function(SplayTree.Node)} f Visitor function.
+ * @private
+ */
+SplayTree.prototype.traverse_ = function(f) {
+  var nodesToVisit = [this.root_];
+  while (nodesToVisit.length > 0) {
+    var node = nodesToVisit.shift();
+    if (node == null) {
+      continue;
+    }
+    f(node);
+    nodesToVisit.push(node.left);
+    nodesToVisit.push(node.right);
+  }
+};
+
+
+/**
+ * Constructs a Splay tree node.
+ *
+ * @param {number} key Key.
+ * @param {*} value Value.
+ */
+SplayTree.Node = function(key, value) {
+  this.key = key;
+  this.value = value;
+};
+
+
+/**
+ * @type {SplayTree.Node}
+ */
+SplayTree.Node.prototype.left = null;
+
+
+/**
+ * @type {SplayTree.Node}
+ */
+SplayTree.Node.prototype.right = null;
diff --git a/src/third_party/v8/tools/splaytree.mjs b/src/third_party/v8/tools/splaytree.mjs
new file mode 100644
index 0000000..eaba4e4
--- /dev/null
+++ b/src/third_party/v8/tools/splaytree.mjs
@@ -0,0 +1,327 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+/**
+ * Constructs a Splay tree.  A splay tree is a self-balancing binary
+ * search tree with the additional property that recently accessed
+ * elements are quick to access again. It performs basic operations
+ * such as insertion, look-up and removal in O(log(n)) amortized time.
+ *
+ * @constructor
+ */
+export function SplayTree() {
+};
+
+
+/**
+ * Pointer to the root node of the tree.
+ *
+ * @type {SplayTree.Node}
+ * @private
+ */
+SplayTree.prototype.root_ = null;
+
+
+/**
+ * @return {boolean} Whether the tree is empty.
+ */
+SplayTree.prototype.isEmpty = function() {
+  return !this.root_;
+};
+
+
+
+/**
+ * Inserts a node into the tree with the specified key and value if
+ * the tree does not already contain a node with the specified key. If
+ * the value is inserted, it becomes the root of the tree.
+ *
+ * @param {number} key Key to insert into the tree.
+ * @param {*} value Value to insert into the tree.
+ */
+SplayTree.prototype.insert = function(key, value) {
+  if (this.isEmpty()) {
+    this.root_ = new SplayTree.Node(key, value);
+    return;
+  }
+  // Splay on the key to move the last node on the search path for
+  // the key to the root of the tree.
+  this.splay_(key);
+  if (this.root_.key == key) {
+    return;
+  }
+  const node = new SplayTree.Node(key, value);
+  if (key > this.root_.key) {
+    node.left = this.root_;
+    node.right = this.root_.right;
+    this.root_.right = null;
+  } else {
+    node.right = this.root_;
+    node.left = this.root_.left;
+    this.root_.left = null;
+  }
+  this.root_ = node;
+};
+
+
+/**
+ * Removes a node with the specified key from the tree if the tree
+ * contains a node with this key. The removed node is returned. If the
+ * key is not found, an exception is thrown.
+ *
+ * @param {number} key Key to find and remove from the tree.
+ * @return {SplayTree.Node} The removed node.
+ */
+SplayTree.prototype.remove = function(key) {
+  if (this.isEmpty()) {
+    throw Error(`Key not found: ${key}`);
+  }
+  this.splay_(key);
+  if (this.root_.key != key) {
+    throw Error(`Key not found: ${key}`);
+  }
+  const removed = this.root_;
+  if (!this.root_.left) {
+    this.root_ = this.root_.right;
+  } else {
+    const { right } = this.root_;
+    this.root_ = this.root_.left;
+    // Splay to make sure that the new root has an empty right child.
+    this.splay_(key);
+    // Insert the original right child as the right child of the new
+    // root.
+    this.root_.right = right;
+  }
+  return removed;
+};
+
+
+/**
+ * Returns the node having the specified key or null if the tree doesn't contain
+ * a node with the specified key.
+ *
+ * @param {number} key Key to find in the tree.
+ * @return {SplayTree.Node} Node having the specified key.
+ */
+SplayTree.prototype.find = function(key) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  this.splay_(key);
+  return this.root_.key == key ? this.root_ : null;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the minimum key value.
+ */
+SplayTree.prototype.findMin = function() {
+  if (this.isEmpty()) {
+    return null;
+  }
+  let current = this.root_;
+  while (current.left) {
+    current = current.left;
+  }
+  return current;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the maximum key value.
+ */
+SplayTree.prototype.findMax = function(opt_startNode) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  let current = opt_startNode || this.root_;
+  while (current.right) {
+    current = current.right;
+  }
+  return current;
+};
+
+
+/**
+ * @return {SplayTree.Node} Node having the maximum key value that
+ *     is less or equal to the specified key value.
+ */
+SplayTree.prototype.findGreatestLessThan = function(key) {
+  if (this.isEmpty()) {
+    return null;
+  }
+  // Splay on the key to move the node with the given key or the last
+  // node on the search path to the top of the tree.
+  this.splay_(key);
+  // Now the result is either the root node or the greatest node in
+  // the left subtree.
+  if (this.root_.key <= key) {
+    return this.root_;
+  } else if (this.root_.left) {
+    return this.findMax(this.root_.left);
+  } else {
+    return null;
+  }
+};
+
+
+/**
+ * @return {Array<*>} An array containing all the values of tree's nodes paired
+ *     with keys.
+ */
+SplayTree.prototype.exportKeysAndValues = function() {
+  const result = [];
+  this.traverse_(function(node) { result.push([node.key, node.value]); });
+  return result;
+};
+
+
+/**
+ * @return {Array<*>} An array containing all the values of tree's nodes.
+ */
+SplayTree.prototype.exportValues = function() {
+  const result = [];
+  this.traverse_(function(node) { result.push(node.value); });
+  return result;
+};
+
+
+/**
+ * Perform the splay operation for the given key. Moves the node with
+ * the given key to the top of the tree.  If no node has the given
+ * key, the last node on the search path is moved to the top of the
+ * tree. This is the simplified top-down splaying algorithm from:
+ * "Self-adjusting Binary Search Trees" by Sleator and Tarjan
+ *
+ * @param {number} key Key to splay the tree on.
+ * @private
+ */
+SplayTree.prototype.splay_ = function(key) {
+  if (this.isEmpty()) {
+    return;
+  }
+  // Create a dummy node.  The use of the dummy node is a bit
+  // counter-intuitive: The right child of the dummy node will hold
+  // the L tree of the algorithm.  The left child of the dummy node
+  // will hold the R tree of the algorithm.  Using a dummy node, left
+  // and right will always be nodes and we avoid special cases.
+  let dummy, left, right;
+  dummy = left = right = new SplayTree.Node(null, null);
+  let current = this.root_;
+  while (true) {
+    if (key < current.key) {
+      if (!current.left) {
+        break;
+      }
+      if (key < current.left.key) {
+        // Rotate right.
+        const tmp = current.left;
+        current.left = tmp.right;
+        tmp.right = current;
+        current = tmp;
+        if (!current.left) {
+          break;
+        }
+      }
+      // Link right.
+      right.left = current;
+      right = current;
+      current = current.left;
+    } else if (key > current.key) {
+      if (!current.right) {
+        break;
+      }
+      if (key > current.right.key) {
+        // Rotate left.
+        const tmp = current.right;
+        current.right = tmp.left;
+        tmp.left = current;
+        current = tmp;
+        if (!current.right) {
+          break;
+        }
+      }
+      // Link left.
+      left.right = current;
+      left = current;
+      current = current.right;
+    } else {
+      break;
+    }
+  }
+  // Assemble.
+  left.right = current.left;
+  right.left = current.right;
+  current.left = dummy.right;
+  current.right = dummy.left;
+  this.root_ = current;
+};
+
+
+/**
+ * Performs a preorder traversal of the tree.
+ *
+ * @param {function(SplayTree.Node)} f Visitor function.
+ * @private
+ */
+SplayTree.prototype.traverse_ = function(f) {
+  const nodesToVisit = [this.root_];
+  while (nodesToVisit.length > 0) {
+    const node = nodesToVisit.shift();
+    if (node == null) {
+      continue;
+    }
+    f(node);
+    nodesToVisit.push(node.left);
+    nodesToVisit.push(node.right);
+  }
+};
+
+
+/**
+ * Constructs a Splay tree node.
+ *
+ * @param {number} key Key.
+ * @param {*} value Value.
+ */
+SplayTree.Node = function(key, value) {
+  this.key = key;
+  this.value = value;
+};
+
+
+/**
+ * @type {SplayTree.Node}
+ */
+SplayTree.Node.prototype.left = null;
+
+
+/**
+ * @type {SplayTree.Node}
+ */
+SplayTree.Node.prototype.right = null;
diff --git a/src/third_party/v8/tools/stats-viewer.py b/src/third_party/v8/tools/stats-viewer.py
new file mode 100755
index 0000000..dd9d2c2
--- /dev/null
+++ b/src/third_party/v8/tools/stats-viewer.py
@@ -0,0 +1,475 @@
+#!/usr/bin/env python
+#
+# Copyright 2008 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+"""A cross-platform execution counter viewer.
+
+The stats viewer reads counters from a binary file and displays them
+in a window, re-reading and re-displaying with regular intervals.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import mmap
+import optparse
+import os
+import re
+import struct
+import sys
+import time
+import Tkinter
+
+
+# The interval, in milliseconds, between ui updates
+UPDATE_INTERVAL_MS = 100
+
+
+# Mapping from counter prefix to the formatting to be used for the counter
+COUNTER_LABELS = {"t": "%i ms.", "c": "%i"}
+
+
+# The magic numbers used to check if a file is not a counters file
+COUNTERS_FILE_MAGIC_NUMBER = 0xDEADFACE
+CHROME_COUNTERS_FILE_MAGIC_NUMBER = 0x13131313
+
+
+class StatsViewer(object):
+  """The main class that keeps the data used by the stats viewer."""
+
+  def __init__(self, data_name, name_filter):
+    """Creates a new instance.
+
+    Args:
+      data_name: the name of the file containing the counters.
+      name_filter: The regexp filter to apply to counter names.
+    """
+    self.data_name = data_name
+    self.name_filter = name_filter
+
+    # The handle created by mmap.mmap to the counters file.  We need
+    # this to clean it up on exit.
+    self.shared_mmap = None
+
+    # A mapping from counter names to the ui element that displays
+    # them
+    self.ui_counters = {}
+
+    # The counter collection used to access the counters file
+    self.data = None
+
+    # The Tkinter root window object
+    self.root = None
+
+  def Run(self):
+    """The main entry-point to running the stats viewer."""
+    try:
+      self.data = self.MountSharedData()
+      # OpenWindow blocks until the main window is closed
+      self.OpenWindow()
+    finally:
+      self.CleanUp()
+
+  def MountSharedData(self):
+    """Mount the binary counters file as a memory-mapped file.  If
+    something goes wrong print an informative message and exit the
+    program."""
+    if not os.path.exists(self.data_name):
+      maps_name = "/proc/%s/maps" % self.data_name
+      if not os.path.exists(maps_name):
+        print("\"%s\" is neither a counter file nor a PID." % self.data_name)
+        sys.exit(1)
+      maps_file = open(maps_name, "r")
+      try:
+        self.data_name = None
+        for m in re.finditer(r"/dev/shm/\S*", maps_file.read()):
+          if os.path.exists(m.group(0)):
+            self.data_name = m.group(0)
+            break
+        if self.data_name is None:
+          print("Can't find counter file in maps for PID %s." % self.data_name)
+          sys.exit(1)
+      finally:
+        maps_file.close()
+    data_file = open(self.data_name, "r")
+    size = os.fstat(data_file.fileno()).st_size
+    fileno = data_file.fileno()
+    self.shared_mmap = mmap.mmap(fileno, size, access=mmap.ACCESS_READ)
+    data_access = SharedDataAccess(self.shared_mmap)
+    if data_access.IntAt(0) == COUNTERS_FILE_MAGIC_NUMBER:
+      return CounterCollection(data_access)
+    elif data_access.IntAt(0) == CHROME_COUNTERS_FILE_MAGIC_NUMBER:
+      return ChromeCounterCollection(data_access)
+    print("File %s is not stats data." % self.data_name)
+    sys.exit(1)
+
+  def CleanUp(self):
+    """Cleans up the memory mapped file if necessary."""
+    if self.shared_mmap:
+      self.shared_mmap.close()
+
+  def UpdateCounters(self):
+    """Read the contents of the memory-mapped file and update the ui if
+    necessary.  If the same counters are present in the file as before
+    we just update the existing labels.  If any counters have been added
+    or removed we scrap the existing ui and draw a new one.
+    """
+    changed = False
+    counters_in_use = self.data.CountersInUse()
+    if counters_in_use != len(self.ui_counters):
+      self.RefreshCounters()
+      changed = True
+    else:
+      for i in range(self.data.CountersInUse()):
+        counter = self.data.Counter(i)
+        name = counter.Name()
+        if name in self.ui_counters:
+          value = counter.Value()
+          ui_counter = self.ui_counters[name]
+          counter_changed = ui_counter.Set(value)
+          changed = (changed or counter_changed)
+        else:
+          self.RefreshCounters()
+          changed = True
+          break
+    if changed:
+      # The title of the window shows the last time the file was
+      # changed.
+      self.UpdateTime()
+    self.ScheduleUpdate()
+
+  def UpdateTime(self):
+    """Update the title of the window with the current time."""
+    self.root.title("Stats Viewer [updated %s]" % time.strftime("%H:%M:%S"))
+
+  def ScheduleUpdate(self):
+    """Schedules the next ui update."""
+    self.root.after(UPDATE_INTERVAL_MS, lambda: self.UpdateCounters())
+
+  def RefreshCounters(self):
+    """Tear down and rebuild the controls in the main window."""
+    counters = self.ComputeCounters()
+    self.RebuildMainWindow(counters)
+
+  def ComputeCounters(self):
+    """Group the counters by the suffix of their name.
+
+    Since the same code-level counter (for instance "X") can result in
+    several variables in the binary counters file that differ only by a
+    two-character prefix (for instance "c:X" and "t:X") counters are
+    grouped by suffix and then displayed with custom formatting
+    depending on their prefix.
+
+    Returns:
+      A mapping from suffixes to a list of counters with that suffix,
+      sorted by prefix.
+    """
+    names = {}
+    for i in range(self.data.CountersInUse()):
+      counter = self.data.Counter(i)
+      name = counter.Name()
+      names[name] = counter
+
+    # By sorting the keys we ensure that the prefixes always come in the
+    # same order ("c:" before "t:") which looks more consistent in the
+    # ui.
+    sorted_keys = names.keys()
+    sorted_keys.sort()
+
+    # Group together the names whose suffix after a ':' are the same.
+    groups = {}
+    for name in sorted_keys:
+      counter = names[name]
+      if ":" in name:
+        name = name[name.find(":")+1:]
+      if not name in groups:
+        groups[name] = []
+      groups[name].append(counter)
+
+    return groups
+
+  def RebuildMainWindow(self, groups):
+    """Tear down and rebuild the main window.
+
+    Args:
+      groups: the groups of counters to display
+    """
+    # Remove elements in the current ui
+    self.ui_counters.clear()
+    for child in self.root.children.values():
+      child.destroy()
+
+    # Build new ui
+    index = 0
+    sorted_groups = groups.keys()
+    sorted_groups.sort()
+    for counter_name in sorted_groups:
+      counter_objs = groups[counter_name]
+      if self.name_filter.match(counter_name):
+        name = Tkinter.Label(self.root, width=50, anchor=Tkinter.W,
+                             text=counter_name)
+        name.grid(row=index, column=0, padx=1, pady=1)
+      count = len(counter_objs)
+      for i in range(count):
+        counter = counter_objs[i]
+        name = counter.Name()
+        var = Tkinter.StringVar()
+        if self.name_filter.match(name):
+          value = Tkinter.Label(self.root, width=15, anchor=Tkinter.W,
+                                textvariable=var)
+          value.grid(row=index, column=(1 + i), padx=1, pady=1)
+
+        # If we know how to interpret the prefix of this counter then
+        # add an appropriate formatting to the variable
+        if (":" in name) and (name[0] in COUNTER_LABELS):
+          format = COUNTER_LABELS[name[0]]
+        else:
+          format = "%i"
+        ui_counter = UiCounter(var, format)
+        self.ui_counters[name] = ui_counter
+        ui_counter.Set(counter.Value())
+      index += 1
+    self.root.update()
+
+  def OpenWindow(self):
+    """Create and display the root window."""
+    self.root = Tkinter.Tk()
+
+    # Tkinter is no good at resizing so we disable it
+    self.root.resizable(width=False, height=False)
+    self.RefreshCounters()
+    self.ScheduleUpdate()
+    self.root.mainloop()
+
+
+class UiCounter(object):
+  """A counter in the ui."""
+
+  def __init__(self, var, format):
+    """Creates a new ui counter.
+
+    Args:
+      var: the Tkinter string variable for updating the ui
+      format: the format string used to format this counter
+    """
+    self.var = var
+    self.format = format
+    self.last_value = None
+
+  def Set(self, value):
+    """Updates the ui for this counter.
+
+    Args:
+      value: The value to display
+
+    Returns:
+      True if the value had changed, otherwise False.  The first call
+      always returns True.
+    """
+    if value == self.last_value:
+      return False
+    else:
+      self.last_value = value
+      self.var.set(self.format % value)
+      return True
+
+
+class SharedDataAccess(object):
+  """A utility class for reading data from the memory-mapped binary
+  counters file."""
+
+  def __init__(self, data):
+    """Create a new instance.
+
+    Args:
+      data: A handle to the memory-mapped file, as returned by mmap.mmap.
+    """
+    self.data = data
+
+  def ByteAt(self, index):
+    """Return the (unsigned) byte at the specified byte index."""
+    return ord(self.CharAt(index))
+
+  def IntAt(self, index):
+    """Return the little-endian 32-byte int at the specified byte index."""
+    word_str = self.data[index:index+4]
+    result, = struct.unpack("I", word_str)
+    return result
+
+  def CharAt(self, index):
+    """Return the ascii character at the specified byte index."""
+    return self.data[index]
+
+
+class Counter(object):
+  """A pointer to a single counter within a binary counters file."""
+
+  def __init__(self, data, offset):
+    """Create a new instance.
+
+    Args:
+      data: the shared data access object containing the counter
+      offset: the byte offset of the start of this counter
+    """
+    self.data = data
+    self.offset = offset
+
+  def Value(self):
+    """Return the integer value of this counter."""
+    return self.data.IntAt(self.offset)
+
+  def Name(self):
+    """Return the ascii name of this counter."""
+    result = ""
+    index = self.offset + 4
+    current = self.data.ByteAt(index)
+    while current:
+      result += chr(current)
+      index += 1
+      current = self.data.ByteAt(index)
+    return result
+
+
+class CounterCollection(object):
+  """An overlay over a counters file that provides access to the
+  individual counters contained in the file."""
+
+  def __init__(self, data):
+    """Create a new instance.
+
+    Args:
+      data: the shared data access object
+    """
+    self.data = data
+    self.max_counters = data.IntAt(4)
+    self.max_name_size = data.IntAt(8)
+
+  def CountersInUse(self):
+    """Return the number of counters in active use."""
+    return self.data.IntAt(12)
+
+  def Counter(self, index):
+    """Return the index'th counter."""
+    return Counter(self.data, 16 + index * self.CounterSize())
+
+  def CounterSize(self):
+    """Return the size of a single counter."""
+    return 4 + self.max_name_size
+
+
+class ChromeCounter(object):
+  """A pointer to a single counter within a binary counters file."""
+
+  def __init__(self, data, name_offset, value_offset):
+    """Create a new instance.
+
+    Args:
+      data: the shared data access object containing the counter
+      name_offset: the byte offset of the start of this counter's name
+      value_offset: the byte offset of the start of this counter's value
+    """
+    self.data = data
+    self.name_offset = name_offset
+    self.value_offset = value_offset
+
+  def Value(self):
+    """Return the integer value of this counter."""
+    return self.data.IntAt(self.value_offset)
+
+  def Name(self):
+    """Return the ascii name of this counter."""
+    result = ""
+    index = self.name_offset
+    current = self.data.ByteAt(index)
+    while current:
+      result += chr(current)
+      index += 1
+      current = self.data.ByteAt(index)
+    return result
+
+
+class ChromeCounterCollection(object):
+  """An overlay over a counters file that provides access to the
+  individual counters contained in the file."""
+
+  _HEADER_SIZE = 4 * 4
+  _COUNTER_NAME_SIZE = 64
+  _THREAD_NAME_SIZE = 32
+
+  def __init__(self, data):
+    """Create a new instance.
+
+    Args:
+      data: the shared data access object
+    """
+    self.data = data
+    self.max_counters = data.IntAt(8)
+    self.max_threads = data.IntAt(12)
+    self.counter_names_offset = \
+        self._HEADER_SIZE + self.max_threads * (self._THREAD_NAME_SIZE + 2 * 4)
+    self.counter_values_offset = \
+        self.counter_names_offset + self.max_counters * self._COUNTER_NAME_SIZE
+
+  def CountersInUse(self):
+    """Return the number of counters in active use."""
+    for i in range(self.max_counters):
+      name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
+      if self.data.ByteAt(name_offset) == 0:
+        return i
+    return self.max_counters
+
+  def Counter(self, i):
+    """Return the i'th counter."""
+    name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
+    value_offset = self.counter_values_offset + i * self.max_threads * 4
+    return ChromeCounter(self.data, name_offset, value_offset)
+
+
+def Main(data_file, name_filter):
+  """Run the stats counter.
+
+  Args:
+    data_file: The counters file to monitor.
+    name_filter: The regexp filter to apply to counter names.
+  """
+  StatsViewer(data_file, name_filter).Run()
+
+
+if __name__ == "__main__":
+  parser = optparse.OptionParser("usage: %prog [--filter=re] "
+                                 "<stats data>|<test_shell pid>")
+  parser.add_option("--filter",
+                    default=".*",
+                    help=("regexp filter for counter names "
+                          "[default: %default]"))
+  (options, args) = parser.parse_args()
+  if len(args) != 1:
+    parser.print_help()
+    sys.exit(1)
+  Main(args[0], re.compile(options.filter))
diff --git a/src/third_party/v8/tools/system-analyzer/app-model.mjs b/src/third_party/v8/tools/system-analyzer/app-model.mjs
new file mode 100644
index 0000000..a0b176c
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/app-model.mjs
@@ -0,0 +1,125 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class State {
+  _timeSelection = {start: 0, end: Infinity};
+  _map;
+  _ic;
+  _selectedMapLogEntries;
+  _selectedIcLogEntries;
+  _selectedDeoptLogEntries;
+  _selectedSourcePositions;
+  _nofChunks;
+  _chunks;
+  _icTimeline;
+  _mapTimeline;
+  _deoptTimeline;
+  _minStartTime = Number.POSITIVE_INFINITY;
+  _maxEndTime = Number.NEGATIVE_INFINITY;
+  get minStartTime() {
+    return this._minStartTime;
+  }
+  get maxEndTime() {
+    return this._maxEndTime;
+  }
+
+  selectTimeRange(start, end) {
+    this.timeSelection.start = start;
+    this.timeSelection.end = end;
+    this._icTimeline.selectTimeRange(start, end);
+    this._mapTimeline.selectTimeRange(start, end);
+    this._deoptTimeline.selectTimeRange(start, end);
+  }
+
+  _updateTimeRange(timeline) {
+    this._minStartTime = Math.min(this._minStartTime, timeline.startTime);
+    this._maxEndTime = Math.max(this._maxEndTime, timeline.endTime);
+    timeline.startTime = this._minStartTime;
+    timeline.endTime = this._maxEndTime;
+  }
+  get mapTimeline() {
+    return this._mapTimeline;
+  }
+  set mapTimeline(timeline) {
+    this._updateTimeRange(timeline);
+    this._mapTimeline = timeline;
+  }
+  get icTimeline() {
+    return this._icTimeline;
+  }
+  set icTimeline(timeline) {
+    this._updateTimeRange(timeline);
+    this._icTimeline = timeline;
+  }
+  get deoptTimeline() {
+    return this._deoptTimeline;
+  }
+  set deoptTimeline(timeline) {
+    this._updateTimeRange(timeline);
+    this._deoptTimeline = timeline;
+  }
+  set chunks(value) {
+    // TODO(zcankara) split up between maps and ics, and every timeline track
+    this._chunks = value;
+  }
+  get chunks() {
+    // TODO(zcankara) split up between maps and ics, and every timeline track
+    return this._chunks;
+  }
+  get nofChunks() {
+    return this._nofChunks;
+  }
+  set nofChunks(count) {
+    this._nofChunks = count;
+  }
+  get map() {
+    // TODO(zcankara) rename as selectedMapEvents, array of selected events
+    return this._map;
+  }
+  set map(value) {
+    // TODO(zcankara) rename as selectedMapEvents, array of selected events
+    if (!value) return;
+    this._map = value;
+  }
+  get ic() {
+    // TODO(zcankara) rename selectedICEvents, array of selected events
+    return this._ic;
+  }
+  set ic(value) {
+    // TODO(zcankara) rename selectedIcEvents, array of selected events
+    if (!value) return;
+    this._ic = value;
+  }
+  get selectedMapLogEntries() {
+    return this._selectedMapLogEntries;
+  }
+  set selectedMapLogEntries(value) {
+    if (!value) return;
+    this._selectedMapLogEntries = value;
+  }
+  get selectedSourcePositions() {
+    return this._selectedSourcePositions;
+  }
+  set selectedSourcePositions(value) {
+    this._selectedSourcePositions = value;
+  }
+  get selectedIcLogEntries() {
+    return this._selectedIcLogEntries;
+  }
+  set selectedIcLogEntries(value) {
+    if (!value) return;
+    this._selectedIcLogEntries = value;
+  }
+  get timeSelection() {
+    return this._timeSelection;
+  }
+  get entries() {
+    if (!this.map) return {};
+    return {
+      map: this.map.id, time: this.map.time
+    }
+  }
+}
+
+export {State};
diff --git a/src/third_party/v8/tools/system-analyzer/events.mjs b/src/third_party/v8/tools/system-analyzer/events.mjs
new file mode 100644
index 0000000..6952923
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/events.mjs
@@ -0,0 +1,51 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class SelectionEvent extends CustomEvent {
+  // TODO: turn into static class fields once Safari supports it.
+  static get name() {
+    return 'showentries';
+  }
+  constructor(entries) {
+    super(SelectionEvent.name, {bubbles: true, composed: true});
+    if (!Array.isArray(entries) || entries.length == 0) {
+      throw new Error('No valid entries selected!');
+    }
+    this.entries = entries;
+  }
+}
+
+class FocusEvent extends CustomEvent {
+  static get name() {
+    return 'showentrydetail';
+  }
+  constructor(entry) {
+    super(FocusEvent.name, {bubbles: true, composed: true});
+    this.entry = entry;
+  }
+}
+
+class SelectTimeEvent extends CustomEvent {
+  static get name() {
+    return 'timerangeselect';
+  }
+  constructor(start, end) {
+    super(SelectTimeEvent.name, {bubbles: true, composed: true});
+    this.start = start;
+    this.end = end;
+  }
+}
+
+class SynchronizeSelectionEvent extends CustomEvent {
+  static get name() {
+    return 'syncselection';
+  }
+  constructor(start, end) {
+    super(SynchronizeSelectionEvent.name, {bubbles: true, composed: true});
+    this.start = start;
+    this.end = end;
+  }
+}
+
+export {SelectionEvent, FocusEvent, SelectTimeEvent, SynchronizeSelectionEvent};
diff --git a/src/third_party/v8/tools/system-analyzer/helper.mjs b/src/third_party/v8/tools/system-analyzer/helper.mjs
new file mode 100644
index 0000000..854a51f
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/helper.mjs
@@ -0,0 +1,247 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const KB = 1024;
+const MB = KB * KB;
+const GB = MB * KB;
+const kMillis2Seconds = 1 / 1000;
+
+function formatBytes(bytes) {
+  const units = ['B', 'KiB', 'MiB', 'GiB'];
+  const divisor = 1024;
+  let index = 0;
+  while (index < units.length && bytes >= divisor) {
+    index++;
+    bytes /= divisor;
+  }
+  return bytes.toFixed(2) + units[index];
+}
+
+function formatSeconds(millis) {
+  return (millis * kMillis2Seconds).toFixed(2) + 's';
+}
+
+class CSSColor {
+  static getColor(name) {
+    const style = getComputedStyle(document.body);
+    return style.getPropertyValue(`--${name}`);
+  }
+  static get backgroundColor() {
+    return CSSColor.getColor('backgroud-color');
+  }
+  static get surfaceColor() {
+    return CSSColor.getColor('surface-color');
+  }
+  static get primaryColor() {
+    return CSSColor.getColor('primary-color');
+  }
+  static get secondaryColor() {
+    return CSSColor.getColor('secondary-color');
+  }
+  static get onSurfaceColor() {
+    return CSSColor.getColor('on-surface-color');
+  }
+  static get onBackgroundColor() {
+    return CSSColor.getColor('on-background-color');
+  }
+  static get onPrimaryColor() {
+    return CSSColor.getColor('on-primary-color');
+  }
+  static get onSecondaryColor() {
+    return CSSColor.getColor('on-secondary-color');
+  }
+  static get defaultColor() {
+    return CSSColor.getColor('default-color');
+  }
+  static get errorColor() {
+    return CSSColor.getColor('error-color');
+  }
+  static get mapBackgroundColor() {
+    return CSSColor.getColor('map-background-color');
+  }
+  static get timelineBackgroundColor() {
+    return CSSColor.getColor('timeline-background-color');
+  }
+  static get red() {
+    return CSSColor.getColor('red');
+  }
+  static get green() {
+    return CSSColor.getColor('green');
+  }
+  static get yellow() {
+    return CSSColor.getColor('yellow');
+  }
+  static get blue() {
+    return CSSColor.getColor('blue');
+  }
+  static get orange() {
+    return CSSColor.getColor('orange');
+  }
+  static get violet() {
+    return CSSColor.getColor('violet');
+  }
+}
+
+function typeToColor(type) {
+  switch (type) {
+    case 'new':
+      return CSSColor.green;
+    case 'Normalize':
+      return CSSColor.violet;
+    case 'SlowToFast':
+      return CSSColor.orange;
+    case 'InitialMap':
+      return CSSColor.yellow;
+    case 'Transition':
+      return CSSColor.primaryColor;
+    case 'ReplaceDescriptors':
+      return CSSColor.red;
+    case 'LoadGlobalIC':
+      return CSSColor.green;
+    case 'LoadIC':
+      return CSSColor.primaryColor;
+    case 'StoreInArrayLiteralIC':
+      return CSSColor.violet;
+    case 'StoreGlobalIC':
+      return CSSColor.blue;
+    case 'StoreIC':
+      return CSSColor.orange;
+    case 'KeyedLoadIC':
+      return CSSColor.red;
+    case 'KeyedStoreIC':
+      return CSSColor.yellow;
+  }
+  return CSSColor.secondaryColor;
+}
+
+class DOM {
+  static div(classes) {
+    const node = document.createElement('div');
+    if (classes !== void 0) {
+      if (typeof classes === 'string') {
+        node.classList.add(classes);
+      } else {
+        classes.forEach(cls => node.classList.add(cls));
+      }
+    }
+    return node;
+  }
+
+  static table(className) {
+    const node = document.createElement('table');
+    if (className) node.classList.add(className);
+    return node;
+  }
+
+  static td(textOrNode, className) {
+    const node = document.createElement('td');
+    if (typeof textOrNode === 'object') {
+      node.appendChild(textOrNode);
+    } else if (textOrNode) {
+      node.innerText = textOrNode;
+    }
+    if (className) node.classList.add(className);
+    return node;
+  }
+
+  static tr(className) {
+    const node = document.createElement('tr');
+    if (className) node.classList.add(className);
+    return node;
+  }
+
+  static text(string) {
+    return document.createTextNode(string);
+  }
+
+  static removeAllChildren(node) {
+    let range = document.createRange();
+    range.selectNodeContents(node);
+    range.deleteContents();
+  }
+
+  static defineCustomElement(path, generator) {
+    let name = path.substring(path.lastIndexOf('/') + 1, path.length);
+    path = path + '-template.html';
+    fetch(path)
+        .then(stream => stream.text())
+        .then(
+            templateText =>
+                customElements.define(name, generator(templateText)));
+  }
+}
+
+function $(id) {
+  return document.querySelector(id)
+}
+
+class V8CustomElement extends HTMLElement {
+  _updateTimeoutId;
+  _updateCallback = this._update.bind(this);
+
+  constructor(templateText) {
+    super();
+    const shadowRoot = this.attachShadow({mode: 'open'});
+    shadowRoot.innerHTML = templateText;
+  }
+
+  $(id) {
+    return this.shadowRoot.querySelector(id);
+  }
+
+  querySelectorAll(query) {
+    return this.shadowRoot.querySelectorAll(query);
+  }
+
+  update() {
+    // Use timeout tasks to asynchronously update the UI without blocking.
+    clearTimeout(this._updateTimeoutId);
+    const kDelayMs = 5;
+    this._updateTimeoutId = setTimeout(this._updateCallback, kDelayMs);
+  }
+
+  _update() {
+    throw Error('Subclass responsibility');
+  }
+}
+
+class LazyTable {
+  constructor(table, rowData, rowElementCreator) {
+    this._table = table;
+    this._rowData = rowData;
+    this._rowElementCreator = rowElementCreator;
+    const tbody = table.querySelector('tbody');
+    table.replaceChild(document.createElement('tbody'), tbody);
+    table.querySelector('tfoot td').onclick = (e) => this._addMoreRows();
+    this._addMoreRows();
+  }
+
+  _nextRowDataSlice() {
+    return this._rowData.splice(0, 100);
+  }
+
+  _addMoreRows() {
+    const fragment = new DocumentFragment();
+    for (let row of this._nextRowDataSlice()) {
+      const tr = this._rowElementCreator(row);
+      fragment.appendChild(tr);
+    }
+    this._table.querySelector('tbody').appendChild(fragment);
+  }
+}
+
+function delay(time) {
+  return new Promise(resolver => setTimeout(resolver, time));
+}
+
+export {
+  DOM,
+  $,
+  V8CustomElement,
+  formatBytes,
+  typeToColor,
+  CSSColor,
+  delay,
+  LazyTable,
+};
diff --git a/src/third_party/v8/tools/system-analyzer/ic-model.mjs b/src/third_party/v8/tools/system-analyzer/ic-model.mjs
new file mode 100644
index 0000000..2bb40b6
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/ic-model.mjs
@@ -0,0 +1,57 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {IcLogEntry} from './log/ic.mjs';
+
+// For compatibility with console scripts:
+print = console.log;
+
+export class Group {
+  constructor(property, key, entry) {
+    this.property = property;
+    this.key = key;
+    this.count = 1;
+    this.entries = [entry];
+    this.percentage = undefined;
+    this.groups = undefined;
+  }
+
+  add(entry) {
+    this.count++;
+    this.entries.push(entry)
+  }
+
+  createSubGroups() {
+    // TODO: use Map
+    this.groups = {};
+    for (const propertyName of IcLogEntry.propertyNames) {
+      if (this.property == propertyName) continue;
+      this.groups[propertyName] = Group.groupBy(this.entries, propertyName);
+    }
+  }
+
+  static groupBy(entries, property) {
+    let accumulator = Object.create(null);
+    let length = entries.length;
+    for (let i = 0; i < length; i++) {
+      let entry = entries[i];
+      let key = entry[property];
+      if (accumulator[key] == undefined) {
+        accumulator[key] = new Group(property, key, entry);
+      } else {
+        let group = accumulator[key];
+        if (group.entries == undefined) console.log([group, entry]);
+        group.add(entry)
+      }
+    }
+    let result = [];
+    for (let key in accumulator) {
+      let group = accumulator[key];
+      group.percentage = Math.round(group.count / length * 100 * 100) / 100;
+      result.push(group);
+    }
+    result.sort((a, b) => {return b.count - a.count});
+    return result;
+  }
+}
diff --git a/src/third_party/v8/tools/system-analyzer/ic-panel-template.html b/src/third_party/v8/tools/system-analyzer/ic-panel-template.html
new file mode 100644
index 0000000..ee08901
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/ic-panel-template.html
@@ -0,0 +1,98 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<head>
+  <link href="./index.css" rel="stylesheet">
+</head>
+<style>
+  .count {
+    text-align: right;
+    width: 5em;
+  }
+
+  .percentage {
+    text-align: right;
+    width: 5em;
+  }
+
+  .key {
+    padding-left: 1em;
+  }
+
+  .drilldown-group-title {
+    font-weight: bold;
+    padding: 0.5em 0 0.2em 0;
+  }
+
+  .toggle {
+    width: 1em;
+    text-align: center;
+    cursor: -webkit-zoom-in;
+    color: rgba(var(--border-color), 1);
+  }
+  .toggle::before {
+    content: "▶";
+  }
+  .open .toggle::before {
+    content: "▼";
+  }
+
+  .panel {
+    position: relative;
+    min-height: 200px;
+  }
+
+  #legend {
+    position: absolute;
+    right: 10px;
+    top: 10px;
+    background-color: var(--surface-color);
+    border-radius: 5px;
+    border: 3px solid rgba(var(--border-color), 0.2);
+    padding: 0 10px 0 10px;
+  }
+
+  #legend dt  {
+    font-family: monospace;
+  }
+  #legend h3 {
+    margin-top: 10px;
+  }
+  .scroller {
+    max-height: 800px;
+    overflow-y: scroll;
+  }
+</style>
+<div class="panel">
+  <h2>IC Panel <span id="count"></span></h2>
+  <div id="legend">
+    <h3>Legend</h3>
+    <dl>
+      <dt>0</dt>
+      <dd>uninitialized</dd>
+      <dt>X</dt>
+      <dd>no feedback</dd>
+      <dt>1</dt>
+      <dd>monomorphic</dd>
+      <dt>^</dt>
+      <dd>recompute handler</dd>
+      <dt>P</dt>
+      <dd>polymorphic</dd>
+      <dt>N</dt>
+      <dd>megamorphic</dd>
+      <dt>G</dt>
+      <dd>generic</dd>
+    </dl>
+  </div>
+  <p>
+    Group by IC-property: 
+    <select id="group-key"></select>
+  </p>
+  <div class="panelBody">
+    <table id="table" width="100%">
+      <tbody id="table-body">
+      </tbody>
+    </table>
+  </div>
+</div>
diff --git a/src/third_party/v8/tools/system-analyzer/ic-panel.mjs b/src/third_party/v8/tools/system-analyzer/ic-panel.mjs
new file mode 100644
index 0000000..d81d06d
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/ic-panel.mjs
@@ -0,0 +1,191 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {FocusEvent, SelectionEvent, SelectTimeEvent} from './events.mjs';
+import {delay, DOM, V8CustomElement} from './helper.mjs';
+import {Group} from './ic-model.mjs';
+import {IcLogEntry} from './log/ic.mjs';
+import {MapLogEntry} from './log/map.mjs';
+
+DOM.defineCustomElement(
+    'ic-panel', (templateText) => class ICPanel extends V8CustomElement {
+      _selectedLogEntries;
+      _timeline;
+      constructor() {
+        super(templateText);
+        this.initGroupKeySelect();
+        this.groupKey.addEventListener('change', e => this.updateTable(e));
+      }
+      set timeline(value) {
+        console.assert(value !== undefined, 'timeline undefined!');
+        this._timeline = value;
+        this.selectedLogEntries = this._timeline.all;
+        this.update();
+      }
+      get groupKey() {
+        return this.$('#group-key');
+      }
+
+      get table() {
+        return this.$('#table');
+      }
+
+      get tableBody() {
+        return this.$('#table-body');
+      }
+
+      get count() {
+        return this.$('#count');
+      }
+
+      get spanSelectAll() {
+        return this.querySelectorAll('span');
+      }
+
+      set selectedLogEntries(value) {
+        this._selectedLogEntries = value;
+        this.update();
+      }
+
+      _update() {
+        this._updateCount();
+        this._updateTable();
+      }
+
+      _updateCount() {
+        this.count.innerHTML = `length=${this._selectedLogEntries.length}`;
+      }
+
+      _updateTable(event) {
+        let select = this.groupKey;
+        let key = select.options[select.selectedIndex].text;
+        DOM.removeAllChildren(this.tableBody);
+        let groups = Group.groupBy(this._selectedLogEntries, key, true);
+        this._render(groups, this.tableBody);
+      }
+
+      escapeHtml(unsafe) {
+        if (!unsafe) return '';
+        return unsafe.toString()
+            .replace(/&/g, '&amp;')
+            .replace(/</g, '&lt;')
+            .replace(/>/g, '&gt;')
+            .replace(/"/g, '&quot;')
+            .replace(/'/g, '&#039;');
+      }
+
+      handleMapClick(e) {
+        const group = e.target.parentNode.entry;
+        const id = group.key;
+        const selectedMapLogEntries =
+            this.searchIcLogEntryToMapLogEntry(id, group.entries);
+        this.dispatchEvent(new SelectionEvent(selectedMapLogEntries));
+      }
+
+      searchIcLogEntryToMapLogEntry(id, icLogEntries) {
+        // searches for mapLogEntries using the id, time
+        const selectedMapLogEntriesSet = new Set();
+        for (const icLogEntry of icLogEntries) {
+          const selectedMap = MapLogEntry.get(id, icLogEntry.time);
+          selectedMapLogEntriesSet.add(selectedMap);
+        }
+        return Array.from(selectedMapLogEntriesSet);
+      }
+
+      // TODO(zcankara) Handle in the processor for events with source
+      // positions.
+      handleFilePositionClick(e) {
+        const tr = e.target.parentNode;
+        const sourcePosition = tr.group.entries[0].sourcePosition;
+        this.dispatchEvent(new FocusEvent(sourcePosition));
+      }
+
+      _render(groups, parent) {
+        const fragment = document.createDocumentFragment();
+        const max = Math.min(1000, groups.length)
+        const detailsClickHandler = this.handleDetailsClick.bind(this);
+        const mapClickHandler = this.handleMapClick.bind(this);
+        const fileClickHandler = this.handleFilePositionClick.bind(this);
+        for (let i = 0; i < max; i++) {
+          const group = groups[i];
+          const tr = DOM.tr();
+          tr.group = group;
+          const details = tr.appendChild(DOM.td('', 'toggle'));
+          details.onclick = detailsClickHandler;
+          tr.appendChild(DOM.td(group.percentage + '%', 'percentage'));
+          tr.appendChild(DOM.td(group.count, 'count'));
+          const valueTd = tr.appendChild(DOM.td(group.key, 'key'));
+          if (group.property === 'map') {
+            valueTd.onclick = mapClickHandler;
+            valueTd.classList.add('clickable');
+          } else if (group.property == 'filePosition') {
+            valueTd.classList.add('clickable');
+            valueTd.onclick = fileClickHandler;
+          }
+          fragment.appendChild(tr);
+        }
+        const omitted = groups.length - max;
+        if (omitted > 0) {
+          const tr = DOM.tr();
+          const tdNode = tr.appendChild(DOM.td(`Omitted ${omitted} entries.`));
+          tdNode.colSpan = 4;
+          fragment.appendChild(tr);
+        }
+        parent.appendChild(fragment);
+      }
+
+      handleDetailsClick(event) {
+        const tr = event.target.parentNode;
+        const group = tr.group;
+        // Create subgroup in-place if the don't exist yet.
+        if (group.groups === undefined) {
+          group.createSubGroups();
+          this.renderDrilldown(group, tr);
+        }
+        let detailsTr = tr.nextSibling;
+        if (tr.classList.contains('open')) {
+          tr.classList.remove('open');
+          detailsTr.style.display = 'none';
+        } else {
+          tr.classList.add('open');
+          detailsTr.style.display = 'table-row';
+        }
+      }
+
+      renderDrilldown(group, previousSibling) {
+        let tr = DOM.tr('entry-details');
+        tr.style.display = 'none';
+        // indent by one td.
+        tr.appendChild(DOM.td());
+        let td = DOM.td();
+        td.colSpan = 3;
+        for (let key in group.groups) {
+          this.renderDrilldownGroup(td, group.groups[key], key);
+        }
+        tr.appendChild(td);
+        // Append the new TR after previousSibling.
+        previousSibling.parentNode.insertBefore(tr, previousSibling.nextSibling)
+      }
+
+      renderDrilldownGroup(td, children, key) {
+        const max = 20;
+        const div = DOM.div('drilldown-group-title');
+        div.textContent =
+            `Grouped by ${key} [top ${max} out of ${children.length}]`;
+        td.appendChild(div);
+        const table = DOM.table();
+        this._render(children.slice(0, max), table, false)
+        td.appendChild(table);
+      }
+
+      initGroupKeySelect() {
+        const select = this.groupKey;
+        select.options.length = 0;
+        for (const propertyName of IcLogEntry.propertyNames) {
+          const option = document.createElement('option');
+          option.text = propertyName;
+          select.add(option);
+        }
+      }
+    });
diff --git a/src/third_party/v8/tools/system-analyzer/index.css b/src/third_party/v8/tools/system-analyzer/index.css
new file mode 100644
index 0000000..5b55182
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/index.css
@@ -0,0 +1,202 @@
+:root {
+  --background-color: #000000;
+  --surface-color: #121212;
+  --primary-color: #bb86fc;
+  --secondary-color: #03dac6;
+  --on-surface-color: #ffffff;
+  --on-background-color: #f5f0f0;
+  --on-primary-color: #000000;
+  --on-secondary-color: #000000;
+  --default-color: #9b6edc;
+  --error-color: #cf6679;
+  --map-background-color: #5e5454;
+  --timeline-background-color: #1f1f1f;
+  --file-reader-background-color: #ffffff80;
+  --red: #dc6eae;
+  --green: #aedc6e;
+  --yellow: #eeff41;
+  --blue: #6e77dc;
+  --orange: #dc9b6e;
+  --violet: #d26edc;
+  --border-color: 128, 128, 128;
+}
+
+[data-theme="light"] {
+  --background-color: #ffffff;
+  --surface-color: #ffffff;
+  --primary-color: #6200ee;
+  --secondary-color: #03dac5;
+  --on-surface-color: #000000;
+  --on-background-color: #000000;
+  --on-primary-color: #ffffff;
+  --on-secondary-color: #000000;
+  --default-color: #3700b3;
+  --error-color: #b00020;
+  --map-background-color: #5e5454;
+  --timeline-background-color: #fdfcfc;
+  --file-reader-background-color: #887e8b80;
+  --red: #b71c1c;
+  --green: #7db300;
+  --yellow: #ffff00;
+  --blue: #0024b3;
+  --orange: #ef6c00;
+  --violet: #8f00b3;
+}
+
+body {
+  font-family: sans-serif;
+  font-size: 14px;
+  color: var(--on-background-color);
+  margin: 10px 10px 0 10px;
+  background-color: var(--background-color);
+}
+
+section {
+  margin-bottom: 10px;
+}
+
+::-webkit-scrollbar, ::-webkit-scrollbar-track, ::-webkit-scrollbar-corner {
+  background-color: rgba(0, 0, 0, 0.0);
+}
+::-webkit-scrollbar, ::-webkit-scrollbar-track {
+  width: 10px;
+  height: 10px;
+}
+::-webkit-scrollbar-thumb {
+  background-color: rgba(128, 128, 128, 0.5);
+  border-radius: 8px;
+  cursor: pointer;
+}
+::-webkit-scrollbar-thumb:hover { 
+  background-color: rgba(128, 128, 128, 0.8);
+}
+
+kbd {
+  color: var(--on-primary-color);
+  background-color: var(--primary-color);
+  border-radius: 3px;
+  border: 1px solid var(--on-primary-color);
+  display: inline-block;
+  font-size: .9em;
+  font-weight: bold;
+  padding: 0px 4px 2px 4px;
+  white-space: nowrap;
+}
+
+a {
+  color: var(--primary-color);
+  text-decoration: none;
+}
+a:hover {
+  color: var(--secondary-color);
+}
+a:link {
+  color: var(--secondary-color);
+}
+
+dl {
+  display: grid;
+  grid-template-columns: min-content auto;
+  grid-gap: 5px;
+}
+dt {
+  text-align: right;
+  white-space: nowrap;
+}
+dd {
+  margin: 0;
+}
+
+.panel {
+  background-color: var(--surface-color);
+  color: var(--on-surface-color);
+  padding: 10px 10px 10px 10px;
+  border-radius: 10px;
+  border: 3px solid rgba(var(--border-color), 0.2);
+}
+
+.panelBody {
+  max-height: 800px;
+  overflow-y: scroll;
+  margin: 0 -10px -10px 0;
+}
+
+.panel > h2 {
+  margin-top: 5px;
+}
+
+button {
+  cursor: pointer;
+}
+input,
+select,
+button {
+  background-color: var(--surface-color);
+  color: var(--on-surface-color);
+  border: 2px solid rgba(var(--border-color), 0.4);
+  border-radius: 5px;
+  padding: 2px;
+}
+input:hover,
+select:hover,
+button:hover {
+  border: 2px solid rgba(var(--border-color), 0.6);
+}
+
+.colorbox {
+  width: 10px;
+  height: 10px;
+  border: 1px var(--background-color) solid;
+  border-radius: 50%;
+}
+
+.primary {
+  background-color: var(--default-color);
+}
+
+.red {
+  background-color: var(--red);
+}
+
+.green {
+  background-color: var(--green);
+}
+
+.yellow {
+  background-color: var(--yellow);
+  color: var(--map-background-color);
+}
+
+.blue {
+  background-color: var(--blue);
+}
+
+.orange {
+  background-color: var(--orange);
+}
+
+.violet {
+  background-color: var(--violet);
+  color: var(--map-background-color);
+}
+
+.success {
+  background-color: var(--secondary-color);
+}
+
+.failure {
+  background-color: var(--error-color);
+}
+
+.highlight {
+  background-color: var(--primary-color);
+  color: var(--on-primary-color);
+}
+.clickable:hover,
+.mark:hover,
+.clickable:active,
+.mark:active {
+  background-color: var(--primary-color);
+  color: var(--on-primary-color);
+  cursor: pointer;
+}
\ No newline at end of file
diff --git a/src/third_party/v8/tools/system-analyzer/index.html b/src/third_party/v8/tools/system-analyzer/index.html
new file mode 100644
index 0000000..a861300
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/index.html
@@ -0,0 +1,186 @@
+<!DOCTYPE html>
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<html lang="en">
+<head>
+  <meta charset="UTF-8">
+  <title>Indicium</title>
+  <!-- <link rel="icon" type="image/png" href="/images/favicon.png"/> -->
+
+  <link rel="modulepreload" href="./log-file-reader.mjs" >
+  <link rel="modulepreload" href="./helper.mjs" >
+  <link rel="preload" href="./log-file-reader-template.html" as="fetch" crossorigin="anonymous">
+  <script type="module">
+    // Force instatiating the log-reader before anything else.
+    import "./log-file-reader.mjs";
+    // Delay loading of the main App
+    (async function() {
+      let module = await import('./index.mjs');
+      globalThis.app = new module.App("#log-file-reader", "#map-panel", "#map-stats-panel",
+        "#timeline-panel", "#ic-panel", "#map-track", "#ic-track", "#deopt-track",
+        "#source-panel");
+    })();
+  </script>
+
+  <link rel="stylesheet" type="text/css" href="./index.css">
+  <style>
+    .theme-switch-wrapper {
+      display: inline-block;
+      align-items: center;
+    }
+
+    .theme-switch {
+      display: inline-block;
+      height: 16px;
+      position: relative;
+      width: 38px;
+    }
+
+    .theme-switch input {
+      display: none;
+    }
+
+    .slider {
+      background-color: var(--primary-color);
+      bottom: 0;
+      cursor: pointer;
+      left: 0;
+      position: absolute;
+      right: 0;
+      top: 0;
+      border-radius: 34px;
+    }
+
+    .slider:before {
+      background-color: var(--surface-color);
+      position: absolute;
+      height: 10px;
+      width: 10px;
+      bottom: 3px;
+      content: "";
+      left: 4px;
+      border-radius: 50%;
+    }
+
+    input:checked+.slider:before {
+      transform: translateX(20px);
+    }
+
+    #container.initial {
+      display: none;
+    }
+
+    #timeline-panel {
+      width: 100%;
+    }
+
+    .panels{
+      display: grid;
+      align-content: center;
+      grid-template-columns: repeat(auto-fill, minmax(500px, 1fr));
+      grid-auto-flow: row dense;
+      grid-gap: 10px;
+      margin-top: 10px;
+    }
+
+    dt::after  {
+      content: ":";
+    }
+  </style>
+</head>
+
+<body>
+  <section id="file-reader">
+    <log-file-reader id="log-file-reader"></log-file-reader>
+  </section>
+
+  <section id="container" class="initial">
+    <timeline-panel id="timeline-panel">
+      <timeline-track id="map-track"></timeline-track>
+      <timeline-track id="ic-track"></timeline-track>
+      <timeline-track id="deopt-track"></timeline-track>
+    </timeline-panel>
+    <div class="panels">
+      <map-panel id="map-panel"></map-panel>
+      <stats-panel id="map-stats-panel"></stats-panel>
+      <ic-panel id="ic-panel" onchange="app.handleSelectIc(event)"></ic-panel>
+      <source-panel id="source-panel"></source-panel>
+    </div>
+  </section>
+
+  <div class="panels">
+    <section id="settings" class="panel">
+      <h2>Settings</h2>
+      <span>Theme:</span>
+      <div class="theme-switch-wrapper">
+        <label class="theme-switch" for="theme-switch-input">
+          <input type="checkbox" id="theme-switch-input" />
+          <div class="slider"></div>
+        </label>
+      </div>
+    </section>
+
+    <section id="instructions" class="panel">
+      <h2>Instructions</h2>
+      <p>
+        Unified web interface to analyse runtime information stored in the v8 log.
+      </p>
+      For generating a v8.log file from <a href="https://v8.dev/docs/build">d8</a>:
+      <ul>
+        <li>
+          <code>/path/do/d8 --trace-maps --trace_ic --log-source-code $FILE</code>
+        </li>
+      </ul>
+      For generating a v8.log file from Chrome:
+      <ul>
+        <li>
+          <code>/path/to/chrome --user-data-dir=/var/tmp/chr$RANDOM --no-sandbox
+          --js-flags='--trace-ic --trace-maps --log-source-code’
+          $WEBSITE_URL</code>
+        </li>
+      </ul>
+
+      <h3>Log Options:</h3>
+      <dl class="d8-options">
+        <dt><code>--trace-maps</code></dt>
+        <dd>Log<a href="https://v8.dev/blog/fast-properties" target="_blank">
+            Maps</a></dd>
+        <dt><code>--trace-ic</code></dt>
+        <dd>Log
+          <a href="https://mathiasbynens.be/notes/shapes-ics" target="_blank">
+            ICs</a></dd>
+        <dt><code>--log-source-code</code></dt>
+        <dd>Log source code</dd>
+      </dl>
+
+      <h3>Keyboard Shortcuts for Navigation</h3>
+      <dl>
+        <dt><kbd>SHIFT</kbd> + <kbd>Arrow Up</kbd></dt>
+        <dd>Follow Map transition forward (first child)</dd>
+
+        <dt><kbd>SHIFT</kbd> + <kbd>Arrow Down</kbd></dt>
+        <dd>Follow Map transition backwards</dd>
+
+        <dt><kbd>Arrow Up</kbd></dt>
+        <dd>Go to previous Map chunk</dd>
+
+        <dt><kbd>Arrow Down</kbd></dt>
+        <dd>Go to next Map in chunk</dd>
+
+        <dt><kbd>Arrow Left</kbd></dt>
+        <dd>Go to previous chunk</dd>
+
+        <dt><kbd>Arrow Right</kbd></dt>
+        <dd>Go to next chunk</dd>
+
+        <dt><kbd>+</kbd></dt>
+        <dd>Timeline zoom in</dd>
+
+        <dt><kbd>-</kbd></dt>
+        <dd>Timeline zoom out</dd>
+      </dl>
+    </section>
+  </div>
+</body>
+</html>
diff --git a/src/third_party/v8/tools/system-analyzer/index.mjs b/src/third_party/v8/tools/system-analyzer/index.mjs
new file mode 100644
index 0000000..dfc858e
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/index.mjs
@@ -0,0 +1,302 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {SourcePosition} from '../profile.mjs';
+
+import {State} from './app-model.mjs';
+import {FocusEvent, SelectionEvent, SelectTimeEvent} from './events.mjs';
+import {$} from './helper.mjs';
+import {IcLogEntry} from './log/ic.mjs';
+import {MapLogEntry} from './log/map.mjs';
+import {Processor} from './processor.mjs';
+
+class App {
+  _state;
+  _view;
+  _navigation;
+  _startupPromise;
+  constructor(
+      fileReaderId, mapPanelId, mapStatsPanelId, timelinePanelId, icPanelId,
+      mapTrackId, icTrackId, deoptTrackId, sourcePanelId) {
+    this._view = {
+      __proto__: null,
+      logFileReader: $(fileReaderId),
+      icPanel: $(icPanelId),
+      mapPanel: $(mapPanelId),
+      mapStatsPanel: $(mapStatsPanelId),
+      timelinePanel: $(timelinePanelId),
+      mapTrack: $(mapTrackId),
+      icTrack: $(icTrackId),
+      deoptTrack: $(deoptTrackId),
+      sourcePanel: $(sourcePanelId)
+    };
+    this.toggleSwitch = $('.theme-switch input[type="checkbox"]');
+    this.toggleSwitch.addEventListener('change', (e) => this.switchTheme(e));
+    this._view.logFileReader.addEventListener(
+        'fileuploadstart', (e) => this.handleFileUploadStart(e));
+    this._view.logFileReader.addEventListener(
+        'fileuploadend', (e) => this.handleFileUploadEnd(e));
+    this._startupPromise = this.runAsyncInitialize();
+  }
+
+  async runAsyncInitialize() {
+    await Promise.all([
+      import('./ic-panel.mjs'),
+      import('./timeline-panel.mjs'),
+      import('./stats-panel.mjs'),
+      import('./map-panel.mjs'),
+      import('./source-panel.mjs'),
+    ]);
+    document.addEventListener(
+        'keydown', e => this._navigation?.handleKeyDown(e));
+    document.addEventListener(
+        SelectionEvent.name, e => this.handleShowEntries(e));
+    document.addEventListener(
+        FocusEvent.name, e => this.handleShowEntryDetail(e));
+    document.addEventListener(
+        SelectTimeEvent.name, e => this.handleTimeRangeSelect(e));
+  }
+
+  handleShowEntries(e) {
+    if (e.entries[0] instanceof MapLogEntry) {
+      this.showMapEntries(e.entries);
+    } else if (e.entries[0] instanceof IcLogEntry) {
+      this.showIcEntries(e.entries);
+    } else if (e.entries[0] instanceof SourcePosition) {
+      this.showSourcePositionEntries(e.entries);
+    } else {
+      throw new Error('Unknown selection type!');
+    }
+    e.stopPropagation();
+  }
+  showMapEntries(entries) {
+    this._state.selectedMapLogEntries = entries;
+    this._view.mapPanel.selectedMapLogEntries = entries;
+    this._view.mapStatsPanel.selectedLogEntries = entries;
+  }
+  showIcEntries(entries) {
+    this._state.selectedIcLogEntries = entries;
+    this._view.icPanel.selectedLogEntries = entries;
+  }
+  showDeoptEntries(entries) {
+    this._state.selectedDeoptLogEntries = entries;
+  }
+  showSourcePositionEntries(entries) {
+    // TODO: Handle multiple source position selection events
+    this._view.sourcePanel.selectedSourcePositions = entries
+  }
+
+  handleTimeRangeSelect(e) {
+    this.selectTimeRange(e.start, e.end);
+    e.stopPropagation();
+  }
+
+  selectTimeRange(start, end) {
+    this._state.selectTimeRange(start, end);
+    this.showMapEntries(this._state.mapTimeline.selection);
+    this.showIcEntries(this._state.icTimeline.selection);
+    this.showDeoptEntries(this._state.deoptTimeline.selection);
+    this._view.timelinePanel.timeSelection = {start, end};
+  }
+
+  handleShowEntryDetail(e) {
+    if (e.entry instanceof MapLogEntry) {
+      this.selectMapLogEntry(e.entry);
+    } else if (e.entry instanceof IcLogEntry) {
+      this.selectICLogEntry(e.entry);
+    } else if (e.entry instanceof SourcePosition) {
+      this.selectSourcePosition(e.entry);
+    } else {
+      throw new Error('Unknown selection type!');
+    }
+    e.stopPropagation();
+  }
+  selectMapLogEntry(entry) {
+    this._state.map = entry;
+    this._view.mapTrack.selectedEntry = entry;
+    this._view.mapPanel.map = entry;
+  }
+  selectICLogEntry(entry) {
+    this._state.ic = entry;
+    this._view.icPanel.selectedLogEntries = [entry];
+  }
+  selectSourcePosition(sourcePositions) {
+    if (!sourcePositions.script) return;
+    this._view.sourcePanel.selectedSourcePositions = [sourcePositions];
+  }
+
+  handleFileUploadStart(e) {
+    this.restartApp();
+    $('#container').className = 'initial';
+  }
+
+  restartApp() {
+    this._state = new State();
+    this._navigation = new Navigation(this._state, this._view);
+  }
+
+  async handleFileUploadEnd(e) {
+    await this._startupPromise;
+    try {
+      const processor = new Processor(e.detail);
+      const mapTimeline = processor.mapTimeline;
+      const icTimeline = processor.icTimeline;
+      const deoptTimeline = processor.deoptTimeline;
+      this._state.mapTimeline = mapTimeline;
+      this._state.icTimeline = icTimeline;
+      this._state.deoptTimeline = deoptTimeline;
+      // Transitions must be set before timeline for stats panel.
+      this._view.mapPanel.timeline = mapTimeline;
+      this._view.mapTrack.data = mapTimeline;
+      this._view.mapStatsPanel.transitions =
+          this._state.mapTimeline.transitions;
+      this._view.mapStatsPanel.timeline = mapTimeline;
+      this._view.icPanel.timeline = icTimeline;
+      this._view.icTrack.data = icTimeline;
+      this._view.deoptTrack.data = deoptTimeline;
+      this._view.sourcePanel.data = processor.scripts
+    } catch (e) {
+      this._view.logFileReader.error = 'Log file contains errors!'
+      throw (e);
+    } finally {
+      $('#container').className = 'loaded';
+      this.fileLoaded = true;
+    }
+  }
+
+  refreshTimelineTrackView() {
+    this._view.mapTrack.data = this._state.mapTimeline;
+    this._view.icTrack.data = this._state.icTimeline;
+    this._view.deoptTrack.data = this._state.deoptTimeline;
+  }
+
+  switchTheme(event) {
+    document.documentElement.dataset.theme =
+        event.target.checked ? 'light' : 'dark';
+    if (this.fileLoaded) {
+      this.refreshTimelineTrackView();
+    }
+  }
+}
+
+class Navigation {
+  _view;
+  constructor(state, view) {
+    this.state = state;
+    this._view = view;
+  }
+  get map() {
+    return this.state.map
+  }
+  set map(value) {
+    this.state.map = value
+  }
+  get chunks() {
+    return this.state.mapTimeline.chunks;
+  }
+  increaseTimelineResolution() {
+    this._view.timelinePanel.nofChunks *= 1.5;
+    this.state.nofChunks *= 1.5;
+  }
+  decreaseTimelineResolution() {
+    this._view.timelinePanel.nofChunks /= 1.5;
+    this.state.nofChunks /= 1.5;
+  }
+  selectNextEdge() {
+    if (!this.map) return;
+    if (this.map.children.length != 1) return;
+    this.map = this.map.children[0].to;
+    this._view.mapTrack.selectedEntry = this.map;
+    this.updateUrl();
+    this._view.mapPanel.map = this.map;
+  }
+  selectPrevEdge() {
+    if (!this.map) return;
+    if (!this.map.parent()) return;
+    this.map = this.map.parent();
+    this._view.mapTrack.selectedEntry = this.map;
+    this.updateUrl();
+    this._view.mapPanel.map = this.map;
+  }
+  selectDefaultMap() {
+    this.map = this.chunks[0].at(0);
+    this._view.mapTrack.selectedEntry = this.map;
+    this.updateUrl();
+    this._view.mapPanel.map = this.map;
+  }
+  moveInChunks(next) {
+    if (!this.map) return this.selectDefaultMap();
+    let chunkIndex = this.map.chunkIndex(this.chunks);
+    let chunk = this.chunks[chunkIndex];
+    let index = chunk.indexOf(this.map);
+    if (next) {
+      chunk = chunk.next(this.chunks);
+    } else {
+      chunk = chunk.prev(this.chunks);
+    }
+    if (!chunk) return;
+    index = Math.min(index, chunk.size() - 1);
+    this.map = chunk.at(index);
+    this._view.mapTrack.selectedEntry = this.map;
+    this.updateUrl();
+    this._view.mapPanel.map = this.map;
+  }
+  moveInChunk(delta) {
+    if (!this.map) return this.selectDefaultMap();
+    let chunkIndex = this.map.chunkIndex(this.chunks)
+    let chunk = this.chunks[chunkIndex];
+    let index = chunk.indexOf(this.map) + delta;
+    let map;
+    if (index < 0) {
+      map = chunk.prev(this.chunks).last();
+    } else if (index >= chunk.size()) {
+      map = chunk.next(this.chunks).first()
+    } else {
+      map = chunk.at(index);
+    }
+    this.map = map;
+    this._view.mapTrack.selectedEntry = this.map;
+    this.updateUrl();
+    this._view.mapPanel.map = this.map;
+  }
+  updateUrl() {
+    let entries = this.state.entries;
+    let params = new URLSearchParams(entries);
+    window.history.pushState(entries, '', '?' + params.toString());
+  }
+  handleKeyDown(event) {
+    switch (event.key) {
+      case 'ArrowUp':
+        event.preventDefault();
+        if (event.shiftKey) {
+          this.selectPrevEdge();
+        } else {
+          this.moveInChunk(-1);
+        }
+        return false;
+      case 'ArrowDown':
+        event.preventDefault();
+        if (event.shiftKey) {
+          this.selectNextEdge();
+        } else {
+          this.moveInChunk(1);
+        }
+        return false;
+      case 'ArrowLeft':
+        this.moveInChunks(false);
+        break;
+      case 'ArrowRight':
+        this.moveInChunks(true);
+        break;
+      case '+':
+        this.increaseTimelineResolution();
+        break;
+      case '-':
+        this.decreaseTimelineResolution();
+        break;
+    }
+  }
+}
+
+export {App};
diff --git a/src/third_party/v8/tools/system-analyzer/log-file-reader-template.html b/src/third_party/v8/tools/system-analyzer/log-file-reader-template.html
new file mode 100644
index 0000000..e54d459
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/log-file-reader-template.html
@@ -0,0 +1,86 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<head>
+  <link href="./index.css" rel="stylesheet">
+</head>
+<style>
+  #fileReader {
+    height: 100px;
+    line-height: 100px;
+    text-align: center;
+    cursor: pointer;
+    transition: all 0.5s ease-in-out;
+    background-color: var(--surface-color);
+  }
+  
+  #fileReader:hover {
+    background-color: var(--primary-color);
+    color: var(--on-primary-color);
+  }
+
+  .done #fileReader{
+    height: 20px;
+    line-height: 20px;
+  }
+
+  .fail #fileReader {
+    background-color: var(--error-color);
+  }
+
+  .loading #fileReader {
+    cursor: wait;
+  }
+
+  #fileReader>input {
+    display: none;
+  }
+
+  #loader {
+    display: none;
+  }
+
+  .loading #loader {
+    display: block;
+    position: fixed;
+    top: 0px;
+    left: 0px;
+    width: 100%;
+    height: 100%;
+    background-color: var(--file-reader-background-color);
+  }
+  #spinner {
+    position: absolute;
+    width: 100px;
+    height: 100px;
+    top: 40%;
+    left: 50%;
+    margin-left: -50px;
+    border: 30px solid var(--surface-color);
+    border-top: 30px solid var(--primary-color);
+    border-radius: 50%;
+    animation: spin 1s ease-in-out infinite;
+  }
+
+  @keyframes spin {
+    0% {
+      transform: rotate(0deg);
+    }
+
+    100% {
+      transform: rotate(360deg);
+    }
+  }
+</style>
+<div id="root">
+  <div id="fileReader" class="panel" tabindex=1>
+    <span id="label">
+      Drag and drop a v8.log file into this area, or click to choose from disk.
+    </span>
+    <input id="file" type="file" name="file">
+  </div>
+  <div id="loader">
+    <div id="spinner"></div>
+  </div>
+</div>
diff --git a/src/third_party/v8/tools/system-analyzer/log-file-reader.mjs b/src/third_party/v8/tools/system-analyzer/log-file-reader.mjs
new file mode 100644
index 0000000..c46d792
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/log-file-reader.mjs
@@ -0,0 +1,84 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+import {DOM, V8CustomElement} from './helper.mjs';
+
+DOM.defineCustomElement('log-file-reader',
+                        (templateText) =>
+                            class LogFileReader extends V8CustomElement {
+  constructor() {
+    super(templateText);
+    this.addEventListener('click', e => this.handleClick(e));
+    this.addEventListener('dragover', e => this.handleDragOver(e));
+    this.addEventListener('drop', e => this.handleChange(e));
+    this.$('#file').addEventListener('change', e => this.handleChange(e));
+    this.$('#fileReader')
+        .addEventListener('keydown', e => this.handleKeyEvent(e));
+  }
+
+  set error(message) {
+    this._updateLabel(message);
+    this.root.className = 'fail';
+  }
+
+  _updateLabel(text) {
+    this.$('#label').innerText = text;
+  }
+
+  handleKeyEvent(event) {
+    if (event.key == 'Enter') this.handleClick(event);
+  }
+
+  handleClick(event) {
+    this.$('#file').click();
+  }
+
+  handleChange(event) {
+    // Used for drop and file change.
+    event.preventDefault();
+    this.dispatchEvent(
+        new CustomEvent('fileuploadstart', {bubbles: true, composed: true}));
+    const host = event.dataTransfer ? event.dataTransfer : event.target;
+    this.readFile(host.files[0]);
+  }
+
+  handleDragOver(event) {
+    event.preventDefault();
+  }
+
+  connectedCallback() {
+    this.fileReader.focus();
+  }
+
+  get fileReader() {
+    return this.$('#fileReader');
+  }
+
+  get root() {
+    return this.$('#root');
+  }
+
+  readFile(file) {
+    if (!file) {
+      this.error = 'Failed to load file.';
+      return;
+    }
+    this.fileReader.blur();
+    this.root.className = 'loading';
+    const reader = new FileReader();
+    reader.onload = (e) => this.handleFileLoad(e, file);
+    // Delay the loading a bit to allow for CSS animations to happen.
+    setTimeout(() => reader.readAsText(file), 0);
+  }
+
+  handleFileLoad(e, file) {
+    const chunk = e.target.result;
+    this._updateLabel(`Finished loading '${file.name}'.`);
+    this.dispatchEvent(new CustomEvent('fileuploadend', {
+      bubbles: true,
+      composed: true,
+      detail: chunk,
+    }));
+    this.root.className = 'done';
+  }
+});
diff --git a/src/third_party/v8/tools/system-analyzer/log/deopt.mjs b/src/third_party/v8/tools/system-analyzer/log/deopt.mjs
new file mode 100644
index 0000000..f3ff1a7
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/log/deopt.mjs
@@ -0,0 +1,10 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+import {LogEntry} from './log.mjs';
+
+export class DeoptLogEntry extends LogEntry {
+  constructor(type, time) {
+    super(type, time);
+  }
+}
diff --git a/src/third_party/v8/tools/system-analyzer/log/ic.mjs b/src/third_party/v8/tools/system-analyzer/log/ic.mjs
new file mode 100644
index 0000000..b6c7ec5
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/log/ic.mjs
@@ -0,0 +1,65 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+import {LogEntry} from './log.mjs';
+
+export class IcLogEntry extends LogEntry {
+  constructor(
+      type, fn_file, time, line, column, key, oldState, newState, map, reason,
+      script, modifier, additional) {
+    super(type, time);
+    this.category = 'other';
+    if (this.type.indexOf('Store') !== -1) {
+      this.category = 'Store';
+    } else if (this.type.indexOf('Load') !== -1) {
+      this.category = 'Load';
+    }
+    let parts = fn_file.split(' ');
+    this.functionName = parts[0];
+    this.file = parts[1];
+    let position = line + ':' + column;
+    this.filePosition = this.file + ':' + position;
+    this.oldState = oldState;
+    this.newState = newState;
+    this.state = this.oldState + ' → ' + this.newState;
+    this.key = key;
+    this.map = map;
+    this.reason = reason;
+    this.additional = additional;
+    this.script = script;
+    this.modifier = modifier;
+  }
+
+  parseMapProperties(parts, offset) {
+    let next = parts[++offset];
+    if (!next.startsWith('dict')) return offset;
+    this.propertiesMode = next.substr(5) == '0' ? 'fast' : 'slow';
+    this.numberOfOwnProperties = parts[++offset].substr(4);
+    next = parts[++offset];
+    this.instanceType = next.substr(5, next.length - 6);
+    return offset;
+  }
+
+  parsePositionAndFile(parts, start) {
+    // find the position of 'at' in the parts array.
+    let offset = start;
+    for (let i = start + 1; i < parts.length; i++) {
+      offset++;
+      if (parts[i] == 'at') break;
+    }
+    if (parts[offset] !== 'at') return -1;
+    this.position = parts.slice(start, offset).join(' ');
+    offset += 1;
+    this.isNative = parts[offset] == 'native'
+    offset += this.isNative ? 1 : 0;
+    this.file = parts[offset];
+    return offset;
+  }
+
+  static get propertyNames() {
+    return [
+      'type', 'category', 'functionName', 'filePosition', 'state', 'key', 'map',
+      'reason', 'file'
+    ];
+  }
+}
diff --git a/src/third_party/v8/tools/system-analyzer/log/log.mjs b/src/third_party/v8/tools/system-analyzer/log/log.mjs
new file mode 100644
index 0000000..69195d7
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/log/log.mjs
@@ -0,0 +1,23 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export class LogEntry {
+  _time;
+  _type;
+  constructor(type, time) {
+    // TODO(zcankara) remove type and add empty getters to override
+    this._time = time;
+    this._type = type;
+  }
+  get time() {
+    return this._time;
+  }
+  get type() {
+    return this._type;
+  }
+  // Returns an Array of all possible #type values.
+  static get allTypes() {
+    throw new Error('Not implemented.');
+  }
+}
\ No newline at end of file
diff --git a/src/third_party/v8/tools/system-analyzer/log/map.mjs b/src/third_party/v8/tools/system-analyzer/log/map.mjs
new file mode 100644
index 0000000..4df6fb8
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/log/map.mjs
@@ -0,0 +1,289 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {LogEntry} from './log.mjs';
+
+// ===========================================================================
+// Map Log Events
+
+const kChunkHeight = 200;
+const kChunkWidth = 10;
+
+function define(prototype, name, fn) {
+  Object.defineProperty(prototype, name, {value: fn, enumerable: false});
+}
+
+define(Array.prototype, 'max', function(fn) {
+  if (this.length === 0) return undefined;
+  if (fn === undefined) fn = (each) => each;
+  let max = fn(this[0]);
+  for (let i = 1; i < this.length; i++) {
+    max = Math.max(max, fn(this[i]));
+  }
+  return max;
+})
+define(Array.prototype, 'first', function() {
+  return this[0]
+});
+define(Array.prototype, 'last', function() {
+  return this[this.length - 1]
+});
+
+// ===========================================================================
+// Map Log Events
+
+class MapLogEntry extends LogEntry {
+  edge = void 0;
+  children = [];
+  depth = 0;
+  _isDeprecated = false;
+  deprecatedTargets = null;
+  leftId = 0;
+  rightId = 0;
+  filePosition = '';
+  script = '';
+  id = -1;
+  constructor(id, time) {
+    if (!time) throw new Error('Invalid time');
+    super(id, time);
+    MapLogEntry.set(id, this);
+    this.id = id;
+  }
+
+  finalizeRootMap(id) {
+    let stack = [this];
+    while (stack.length > 0) {
+      let current = stack.pop();
+      if (current.leftId !== 0) {
+        console.warn('Skipping potential parent loop between maps:', current)
+        continue;
+      }
+      current.finalize(id)
+      id += 1;
+      current.children.forEach(edge => stack.push(edge.to))
+      // TODO implement rightId
+    }
+    return id;
+  }
+
+  finalize(id) {
+    // Initialize preorder tree traversal Ids for fast subtree inclusion checks
+    if (id <= 0) throw 'invalid id';
+    let currentId = id;
+    this.leftId = currentId
+  }
+
+  parent() {
+    if (this.edge === void 0) return void 0;
+    return this.edge.from;
+  }
+
+  isDeprecated() {
+    return this._isDeprecated;
+  }
+
+  deprecate() {
+    this._isDeprecated = true;
+  }
+
+  isRoot() {
+    return this.edge === void 0 || this.edge.from === void 0;
+  }
+
+  contains(map) {
+    return this.leftId < map.leftId && map.rightId < this.rightId;
+  }
+
+  addEdge(edge) {
+    this.children.push(edge);
+  }
+
+  chunkIndex(chunks) {
+    // Did anybody say O(n)?
+    for (let i = 0; i < chunks.length; i++) {
+      let chunk = chunks[i];
+      if (chunk.isEmpty()) continue;
+      if (chunk.last().time < this.time) continue;
+      return i;
+    }
+    return -1;
+  }
+
+  position(chunks) {
+    let index = this.chunkIndex(chunks);
+    let xFrom = (index + 1.5) * kChunkWidth;
+    let yFrom = kChunkHeight - chunks[index].yOffset(this);
+    return [xFrom, yFrom];
+  }
+
+  transitions() {
+    let transitions = Object.create(null);
+    let current = this;
+    while (current) {
+      let edge = current.edge;
+      if (edge && edge.isTransition()) {
+        transitions[edge.name] = edge;
+      }
+      current = current.parent()
+    }
+    return transitions;
+  }
+
+  get type() {
+    return this.edge === void 0 ? 'new' : this.edge.type;
+  }
+
+  isBootstrapped() {
+    return this.edge === void 0;
+  }
+
+  getParents() {
+    let parents = [];
+    let current = this.parent();
+    while (current) {
+      parents.push(current);
+      current = current.parent();
+    }
+    return parents;
+  }
+
+  static get(id, time = undefined) {
+    let maps = this.cache.get(id);
+    if (maps) {
+      for (let i = 1; i < maps.length; i++) {
+        if (maps[i].time > time) {
+          return maps[i - 1];
+        }
+      }
+      // default return the latest
+      return (maps.length > 0) ? maps[maps.length - 1] : undefined;
+    }
+  }
+
+  static set(id, map) {
+    if (this.cache.has(id)) {
+      this.cache.get(id).push(map);
+    } else {
+      this.cache.set(id, [map]);
+    }
+  }
+}
+
+MapLogEntry.cache = new Map();
+
+// ===========================================================================
+class Edge {
+  constructor(type, name, reason, time, from, to) {
+    this.type = type;
+    this.name = name;
+    this.reason = reason;
+    this.time = time;
+    this.from = from;
+    this.to = to;
+  }
+
+  finishSetup() {
+    const from = this.from;
+    if (from) from.addEdge(this);
+    const to = this.to;
+    if (to === undefined) return;
+    to.edge = this;
+    if (from === undefined) return;
+    if (to === from) throw 'From and to must be distinct.';
+    if (to.time < from.time) {
+      console.warn('invalid time order');
+    }
+    let newDepth = from.depth + 1;
+    if (to.depth > 0 && to.depth != newDepth) {
+      console.warn('Depth has already been initialized');
+    }
+    to.depth = newDepth;
+  }
+
+  chunkIndex(chunks) {
+    // Did anybody say O(n)?
+    for (let i = 0; i < chunks.length; i++) {
+      let chunk = chunks[i];
+      if (chunk.isEmpty()) continue;
+      if (chunk.last().time < this.time) continue;
+      return i;
+    }
+    return -1;
+  }
+
+  parentEdge() {
+    if (!this.from) return undefined;
+    return this.from.edge;
+  }
+
+  chainLength() {
+    let length = 0;
+    let prev = this;
+    while (prev) {
+      prev = this.parent;
+      length++;
+    }
+    return length;
+  }
+
+  isTransition() {
+    return this.type === 'Transition'
+  }
+
+  isFastToSlow() {
+    return this.type === 'Normalize'
+  }
+
+  isSlowToFast() {
+    return this.type === 'SlowToFast'
+  }
+
+  isInitial() {
+    return this.type === 'InitialMap'
+  }
+
+  isBootstrapped() {
+    return this.type === 'new'
+  }
+
+  isReplaceDescriptors() {
+    return this.type === 'ReplaceDescriptors'
+  }
+
+  isCopyAsPrototype() {
+    return this.reason === 'CopyAsPrototype'
+  }
+
+  isOptimizeAsPrototype() {
+    return this.reason === 'OptimizeAsPrototype'
+  }
+
+  symbol() {
+    if (this.isTransition()) return '+';
+    if (this.isFastToSlow()) return '⊡';
+    if (this.isSlowToFast()) return '⊛';
+    if (this.isReplaceDescriptors()) {
+      if (this.name) return '+';
+      return '∥';
+    }
+    return '';
+  }
+
+  toString() {
+    let s = this.symbol();
+    if (this.isTransition()) return s + this.name;
+    if (this.isFastToSlow()) return s + this.reason;
+    if (this.isCopyAsPrototype()) return s + 'Copy as Prototype';
+    if (this.isOptimizeAsPrototype()) {
+      return s + 'Optimize as Prototype';
+    }
+    if (this.isReplaceDescriptors() && this.name) {
+      return this.type + ' ' + this.symbol() + this.name;
+    }
+    return this.type + ' ' + (this.reason ? this.reason : '') + ' ' +
+        (this.name ? this.name : '')
+  }
+}
+
+export {MapLogEntry, Edge, kChunkWidth, kChunkHeight};
diff --git a/src/third_party/v8/tools/system-analyzer/map-panel-template.html b/src/third_party/v8/tools/system-analyzer/map-panel-template.html
new file mode 100644
index 0000000..12d6ec5
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/map-panel-template.html
@@ -0,0 +1,21 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<head>
+  <link href="./index.css" rel="stylesheet">
+</head>
+<style>
+  #searchBarInput {
+    width: 200px;
+  }
+</style>
+<div class="panel">
+  <h2>Map Panel</h2>
+  <map-transitions id="map-transitions"></map-transitions>
+  <h3>Search Map by Address</h3>
+  <section id="searchBar"></section>
+  <input type="search" id="searchBarInput"></input>
+  <button id="searchBarBtn">Search</button>
+  <map-details id="map-details"></map-details>
+</div>
diff --git a/src/third_party/v8/tools/system-analyzer/map-panel.mjs b/src/third_party/v8/tools/system-analyzer/map-panel.mjs
new file mode 100644
index 0000000..1516038
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/map-panel.mjs
@@ -0,0 +1,72 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+import './stats-panel.mjs';
+import './map-panel/map-details.mjs';
+import './map-panel/map-transitions.mjs';
+
+import {FocusEvent} from './events.mjs';
+import {DOM, V8CustomElement} from './helper.mjs';
+import {MapLogEntry} from './log/map.mjs';
+
+DOM.defineCustomElement('map-panel',
+                        (templateText) =>
+                            class MapPanel extends V8CustomElement {
+  _map;
+  constructor() {
+    super(templateText);
+    this.searchBarBtn.addEventListener('click', e => this.handleSearchBar(e));
+    this.addEventListener(FocusEvent.name, e => this.handleUpdateMapDetails(e));
+  }
+
+  handleUpdateMapDetails(e) {
+    if (e.entry instanceof MapLogEntry) {
+      this.mapDetailsPanel.map = e.entry;
+    }
+  }
+
+  get mapTransitionsPanel() {
+    return this.$('#map-transitions');
+  }
+
+  get mapDetailsPanel() {
+    return this.$('#map-details');
+  }
+
+  get searchBarBtn() {
+    return this.$('#searchBarBtn');
+  }
+
+  get searchBar() {
+    return this.$('#searchBar');
+  }
+
+  set timeline(timeline) {
+    this._timeline = timeline;
+  }
+
+  set map(value) {
+    this._map = value;
+    this.mapTransitionsPanel.map = this._map;
+  }
+
+  handleSearchBar(e) {
+    let searchBar = this.$('#searchBarInput');
+    let searchBarInput = searchBar.value;
+    // access the map from model cache
+    let selectedMap = MapLogEntry.get(parseInt(searchBarInput));
+    if (selectedMap) {
+      searchBar.className = 'success';
+    } else {
+      searchBar.className = 'failure';
+    }
+    this.dispatchEvent(new FocusEvent(selectedMap));
+  }
+
+  set selectedMapLogEntries(list) {
+    this.mapTransitionsPanel.selectedMapLogEntries = list;
+  }
+  get selectedMapLogEntries() {
+    return this.mapTransitionsPanel.selectedMapLogEntries;
+  }
+});
diff --git a/src/third_party/v8/tools/system-analyzer/map-panel/map-details-template.html b/src/third_party/v8/tools/system-analyzer/map-panel/map-details-template.html
new file mode 100644
index 0000000..6d1b268
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/map-panel/map-details-template.html
@@ -0,0 +1,23 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<head>
+  <link href="./index.css" rel="stylesheet">
+</head>
+<style>
+  #mapDetails,
+  #filePositionNode {
+    overflow-x: scroll;
+  }
+
+  #mapDetails::-webkit-scrollbar {
+    width: 0;
+    background-color: transparent;
+  }
+</style>
+<div class="panel">
+  <h4>Map Details</h4>
+  <section id="filePositionNode"></section>
+  <section id="mapDetails"></section>
+</div>
diff --git a/src/third_party/v8/tools/system-analyzer/map-panel/map-details.mjs b/src/third_party/v8/tools/system-analyzer/map-panel/map-details.mjs
new file mode 100644
index 0000000..bcf8f9c
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/map-panel/map-details.mjs
@@ -0,0 +1,47 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+import {FocusEvent} from '../events.mjs';
+import {DOM, V8CustomElement} from '../helper.mjs';
+
+DOM.defineCustomElement(
+    './map-panel/map-details',
+    (templateText) => class MapDetails extends V8CustomElement {
+      _map;
+
+      constructor() {
+        super(templateText);
+        this._filePositionNode.onclick = e => this._handleFilePositionClick(e);
+      }
+
+      get _mapDetails() {
+        return this.$('#mapDetails');
+      }
+
+      get _filePositionNode() {
+        return this.$('#filePositionNode');
+      }
+
+      set map(map) {
+        if (this._map === map) return;
+        this._map = map;
+        this.update();
+      }
+
+      _update() {
+        let details = '';
+        let clickableDetails = '';
+        if (this._map) {
+          clickableDetails = `ID: ${this._map.id}`;
+          clickableDetails += `\nSource location: ${this._map.filePosition}`;
+          details = this._map.description;
+        }
+        this._filePositionNode.innerText = clickableDetails;
+        this._filePositionNode.classList.add('clickable');
+        this._mapDetails.innerText = details;
+      }
+
+      _handleFilePositionClick(event) {
+        this.dispatchEvent(new FocusEvent(this._map.sourcePosition));
+      }
+    });
diff --git a/src/third_party/v8/tools/system-analyzer/map-panel/map-transitions-template.html b/src/third_party/v8/tools/system-analyzer/map-panel/map-transitions-template.html
new file mode 100644
index 0000000..c4cab2b
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/map-panel/map-transitions-template.html
@@ -0,0 +1,148 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<head>
+  <link href="./index.css" rel="stylesheet">
+</head>
+<style>
+  #transitionView {
+    overflow-x: scroll;
+    white-space: nowrap;
+    min-height: 50px;
+    max-height: 200px;
+    padding: 50px 0 0 0;
+    margin-top: -25px;
+    width: 100%;
+  }
+
+  #transitionView::-webkit-scrollbar {
+    width: 0;
+    background-color: transparent;
+  }
+
+  .map {
+    width: 20px;
+    height: 20px;
+    display: inline-block;
+    border-radius: 50%;
+    background-color: var(--map-background-color);
+    border: 4px solid var(--surface-color);
+    font-size: 10px;
+    text-align: center;
+    line-height: 18px;
+    color: var(--on-surface-color);
+    vertical-align: top;
+    margin-top: -13px;
+    /* raise z-index */
+    position: relative;
+    z-index: 2;
+    cursor: pointer;
+  }
+
+  .map.selected {
+    border-color: var(--on-surface-color);
+  }
+
+  .transitions {
+    display: inline-block;
+    margin-left: -15px;
+  }
+
+  .transition {
+    min-height: 55px;
+    margin: 0 0 -2px 2px;
+  }
+
+  /* gray out deprecated transitions */
+  .deprecated>.transitionEdge,
+  .deprecated>.map {
+    opacity: 0.5;
+  }
+
+  .deprecated>.transition {
+    border-color: rgba(0, 0, 0, 0.5);
+  }
+
+  /* Show a border for all but the first transition */
+  .transition:nth-of-type(2),
+  .transition:nth-last-of-type(n+2) {
+    border-left: 2px solid;
+    margin-left: 0px;
+  }
+
+  /* special case for 2 transitions */
+  .transition:nth-last-of-type(1) {
+    border-left: none;
+  }
+
+  /* topmost transitions are not related */
+  #transitionView>.transition {
+    border-left: none;
+  }
+
+  /* topmost transition edge needs initial offset to be aligned */
+  #transitionView>.transition>.transitionEdge {
+    margin-left: 13px;
+  }
+
+  .transitionEdge {
+    height: 2px;
+    width: 80px;
+    display: inline-block;
+    margin: 0 0 2px 0;
+    background-color: var(--map-background-color);
+    vertical-align: top;
+    padding-left: 15px;
+  }
+
+  .transitionLabel {
+    color: var(--on-surface-color);
+    transform: rotate(-15deg);
+    transform-origin: top left;
+    margin-top: -10px;
+    font-size: 10px;
+    white-space: normal;
+    word-break: break-all;
+    background-color: var(--surface-color);
+  }
+
+  .showSubtransitions {
+    width: 0;
+    height: 0;
+    border-left: 6px solid transparent;
+    border-right: 6px solid transparent;
+    border-top: 10px solid var(--map-background-color);
+    cursor: zoom-in;
+    margin: 4px 0 0 4px;
+  }
+
+  .showSubtransitions.opened {
+    border-top: none;
+    border-bottom: 10px solid var(--map-background-color);
+    cursor: zoom-out;
+  }
+
+  #tooltip {
+    position: absolute;
+    width: 10px;
+    height: 10px;
+    background-color: var(--red);
+    pointer-events: none;
+    z-index: 100;
+    display: none;
+  }
+
+  #title {
+    padding-bottom: 10px;
+  }
+</style>
+<div class="panel">
+  <div id="title">
+    <h4>Transitions</h4>
+  </div>
+  <section id="transitionView"></section>
+  <div id="tooltip">
+    <div id="tooltipContents"></div>
+  </div>
+</div>
diff --git a/src/third_party/v8/tools/system-analyzer/map-panel/map-transitions.mjs b/src/third_party/v8/tools/system-analyzer/map-panel/map-transitions.mjs
new file mode 100644
index 0000000..60462a1
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/map-panel/map-transitions.mjs
@@ -0,0 +1,184 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+import {FocusEvent, SelectionEvent} from '../events.mjs';
+import {DOM, typeToColor, V8CustomElement} from '../helper.mjs';
+
+DOM.defineCustomElement('./map-panel/map-transitions',
+                        (templateText) =>
+                            class MapTransitions extends V8CustomElement {
+  _map;
+  _selectedMapLogEntries;
+  _displayedMapsInTree;
+
+  constructor() {
+    super(templateText);
+    this.transitionView.addEventListener(
+        'mousemove', (e) => this.handleTransitionViewChange(e));
+    this.currentNode = this.transitionView;
+    this.currentMap = undefined;
+  }
+
+  get transitionView() {
+    return this.$('#transitionView');
+  }
+
+  get tooltip() {
+    return this.$('#tooltip');
+  }
+
+  get tooltipContents() {
+    return this.$('#tooltipContents');
+  }
+
+  set map(value) {
+    this._map = value;
+    this.showMap();
+  }
+
+  handleTransitionViewChange(e) {
+    this.tooltip.style.left = e.pageX + 'px';
+    this.tooltip.style.top = e.pageY + 'px';
+    const map = e.target.map;
+    if (map) {
+      this.tooltipContents.innerText = map.description;
+    }
+  }
+
+  _selectMap(map) {
+    this.dispatchEvent(new SelectionEvent([map]));
+  }
+
+  showMap() {
+    if (this.currentMap === this._map) return;
+    this.currentMap = this._map;
+    this.selectedMapLogEntries = [this._map];
+    this.update();
+  }
+
+  _update() {
+    this.transitionView.style.display = 'none';
+    DOM.removeAllChildren(this.transitionView);
+    this._displayedMapsInTree = new Set();
+    // Limit view to 200 maps for performance reasons.
+    this.selectedMapLogEntries.slice(0, 200).forEach(
+        (map) => this.addMapAndParentTransitions(map));
+    this._displayedMapsInTree = undefined;
+    this.transitionView.style.display = '';
+  }
+
+  set selectedMapLogEntries(list) {
+    this._selectedMapLogEntries = list;
+    this.update();
+  }
+
+  get selectedMapLogEntries() {
+    return this._selectedMapLogEntries;
+  }
+
+  addMapAndParentTransitions(map) {
+    if (map === void 0) return;
+    if (this._displayedMapsInTree.has(map)) return;
+    this._displayedMapsInTree.add(map);
+    this.currentNode = this.transitionView;
+    let parents = map.getParents();
+    if (parents.length > 0) {
+      this.addTransitionTo(parents.pop());
+      parents.reverse().forEach((each) => this.addTransitionTo(each));
+    }
+    let mapNode = this.addSubtransitions(map);
+    // Mark and show the selected map.
+    mapNode.classList.add('selected');
+    if (this.selectedMap == map) {
+      setTimeout(
+          () => mapNode.scrollIntoView({
+            behavior: 'smooth',
+            block: 'nearest',
+            inline: 'nearest',
+          }),
+          1);
+    }
+  }
+
+  addSubtransitions(map) {
+    let mapNode = this.addTransitionTo(map);
+    // Draw outgoing linear transition line.
+    let current = map;
+    while (current.children.length == 1) {
+      current = current.children[0].to;
+      this.addTransitionTo(current);
+    }
+    return mapNode;
+  }
+
+  addTransitionEdge(map) {
+    let classes = ['transitionEdge'];
+    let edge = DOM.div(classes);
+    edge.style.backgroundColor = typeToColor(map.edge);
+    let labelNode = DOM.div('transitionLabel');
+    labelNode.innerText = map.edge.toString();
+    edge.appendChild(labelNode);
+    return edge;
+  }
+
+  addTransitionTo(map) {
+    // transition[ transitions[ transition[...], transition[...], ...]];
+    this._displayedMapsInTree?.add(map);
+    let transition = DOM.div('transition');
+    if (map.isDeprecated()) transition.classList.add('deprecated');
+    if (map.edge) {
+      transition.appendChild(this.addTransitionEdge(map));
+    }
+    let mapNode = this.addMapNode(map);
+    transition.appendChild(mapNode);
+
+    let subtree = DOM.div('transitions');
+    transition.appendChild(subtree);
+
+    this.currentNode.appendChild(transition);
+    this.currentNode = subtree;
+
+    return mapNode;
+  }
+
+  addMapNode(map) {
+    let node = DOM.div('map');
+    if (map.edge) node.style.backgroundColor = typeToColor(map.edge);
+    node.map = map;
+    node.addEventListener('click', () => this._selectMap(map));
+    if (map.children.length > 1) {
+      node.innerText = map.children.length;
+      let showSubtree = DOM.div('showSubtransitions');
+      showSubtree.addEventListener('click', (e) => this.toggleSubtree(e, node));
+      node.appendChild(showSubtree);
+    } else if (map.children.length == 0) {
+      node.innerHTML = '&#x25CF;';
+    }
+    this.currentNode.appendChild(node);
+    return node;
+  }
+
+  toggleSubtree(event, node) {
+    let map = node.map;
+    event.target.classList.toggle('opened');
+    let transitionsNode = node.parentElement.querySelector('.transitions');
+    let subtransitionNodes = transitionsNode.children;
+    if (subtransitionNodes.length <= 1) {
+      // Add subtransitions excepth the one that's already shown.
+      let visibleTransitionMap = subtransitionNodes.length == 1 ?
+          transitionsNode.querySelector('.map').map :
+          void 0;
+      map.children.forEach((edge) => {
+        if (edge.to != visibleTransitionMap) {
+          this.currentNode = transitionsNode;
+          this.addSubtransitions(edge.to);
+        }
+      });
+    } else {
+      // remove all but the first (currently selected) subtransition
+      for (let i = subtransitionNodes.length - 1; i > 0; i--) {
+        transitionsNode.removeChild(subtransitionNodes[i]);
+      }
+    }
+  }
+});
diff --git a/src/third_party/v8/tools/system-analyzer/processor.mjs b/src/third_party/v8/tools/system-analyzer/processor.mjs
new file mode 100644
index 0000000..49448bb
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/processor.mjs
@@ -0,0 +1,358 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {LogReader, parseString, parseVarArgs} from '../logreader.mjs';
+import {Profile} from '../profile.mjs';
+
+import {DeoptLogEntry} from './log/deopt.mjs';
+import {IcLogEntry} from './log/ic.mjs';
+import {Edge, MapLogEntry} from './log/map.mjs';
+import {Timeline} from './timeline.mjs';
+
+// ===========================================================================
+
+export class Processor extends LogReader {
+  _profile = new Profile();
+  _mapTimeline = new Timeline();
+  _icTimeline = new Timeline();
+  _deoptTimeline = new Timeline();
+  _formatPCRegexp = /(.*):[0-9]+:[0-9]+$/;
+  MAJOR_VERSION = 7;
+  MINOR_VERSION = 6;
+  constructor(logString) {
+    super();
+    this.propertyICParser = [
+      parseInt, parseInt, parseInt, parseInt, parseString, parseString,
+      parseString, parseString, parseString, parseString
+    ];
+    this.dispatchTable_ = {
+      __proto__: null,
+      'code-creation': {
+        parsers: [
+          parseString, parseInt, parseInt, parseInt, parseInt, parseString,
+          parseVarArgs
+        ],
+        processor: this.processCodeCreation
+      },
+      'code-deopt': {
+        parsers: [
+          parseInt, parseInt, parseInt, parseInt, parseInt, parseString,
+          parseString, parseString
+        ],
+        processor: this.processCodeDeopt
+      },
+      'v8-version': {
+        parsers: [
+          parseInt,
+          parseInt,
+        ],
+        processor: this.processV8Version
+      },
+      'script-source': {
+        parsers: [parseInt, parseString, parseString],
+        processor: this.processScriptSource
+      },
+      'code-move':
+          {parsers: [parseInt, parseInt], processor: this.processCodeMove},
+      'code-delete': {parsers: [parseInt], processor: this.processCodeDelete},
+      'sfi-move':
+          {parsers: [parseInt, parseInt], processor: this.processFunctionMove},
+      'map-create':
+          {parsers: [parseInt, parseString], processor: this.processMapCreate},
+      'map': {
+        parsers: [
+          parseString, parseInt, parseString, parseString, parseInt, parseInt,
+          parseInt, parseString, parseString
+        ],
+        processor: this.processMap
+      },
+      'map-details': {
+        parsers: [parseInt, parseString, parseString],
+        processor: this.processMapDetails
+      },
+      'LoadGlobalIC': {
+        parsers: this.propertyICParser,
+        processor: this.processPropertyIC.bind(this, 'LoadGlobalIC')
+      },
+      'StoreGlobalIC': {
+        parsers: this.propertyICParser,
+        processor: this.processPropertyIC.bind(this, 'StoreGlobalIC')
+      },
+      'LoadIC': {
+        parsers: this.propertyICParser,
+        processor: this.processPropertyIC.bind(this, 'LoadIC')
+      },
+      'StoreIC': {
+        parsers: this.propertyICParser,
+        processor: this.processPropertyIC.bind(this, 'StoreIC')
+      },
+      'KeyedLoadIC': {
+        parsers: this.propertyICParser,
+        processor: this.processPropertyIC.bind(this, 'KeyedLoadIC')
+      },
+      'KeyedStoreIC': {
+        parsers: this.propertyICParser,
+        processor: this.processPropertyIC.bind(this, 'KeyedStoreIC')
+      },
+      'StoreInArrayLiteralIC': {
+        parsers: this.propertyICParser,
+        processor: this.processPropertyIC.bind(this, 'StoreInArrayLiteralIC')
+      },
+    };
+    if (logString) this.processString(logString);
+  }
+
+  printError(str) {
+    console.error(str);
+    throw str
+  }
+
+  processString(string) {
+    let end = string.length;
+    let current = 0;
+    let next = 0;
+    let line;
+    let i = 0;
+    let entry;
+    try {
+      while (current < end) {
+        next = string.indexOf('\n', current);
+        if (next === -1) break;
+        i++;
+        line = string.substring(current, next);
+        current = next + 1;
+        this.processLogLine(line);
+      }
+    } catch (e) {
+      console.error(`Error occurred during parsing, trying to continue: ${e}`);
+    }
+    this.finalize();
+  }
+
+  processLogFile(fileName) {
+    this.collectEntries = true;
+    this.lastLogFileName_ = fileName;
+    let i = 1;
+    let line;
+    try {
+      while (line = readline()) {
+        this.processLogLine(line);
+        i++;
+      }
+    } catch (e) {
+      console.error(
+          `Error occurred during parsing line ${i}` +
+          ', trying to continue: ' + e);
+    }
+    this.finalize();
+  }
+
+  finalize() {
+    // TODO(cbruni): print stats;
+    this._mapTimeline.transitions = new Map();
+    let id = 0;
+    this._mapTimeline.forEach(map => {
+      if (map.isRoot()) id = map.finalizeRootMap(id + 1);
+      if (map.edge && map.edge.name) {
+        const edge = map.edge;
+        const list = this._mapTimeline.transitions.get(edge.name);
+        if (list === undefined) {
+          this._mapTimeline.transitions.set(edge.name, [edge]);
+        } else {
+          list.push(edge);
+        }
+      }
+    });
+  }
+
+  /**
+   * Parser for dynamic code optimization state.
+   */
+  parseState(s) {
+    switch (s) {
+      case '':
+        return Profile.CodeState.COMPILED;
+      case '~':
+        return Profile.CodeState.OPTIMIZABLE;
+      case '*':
+        return Profile.CodeState.OPTIMIZED;
+    }
+    throw new Error(`unknown code state: ${s}`);
+  }
+
+  processCodeCreation(type, kind, timestamp, start, size, name, maybe_func) {
+    if (maybe_func.length) {
+      const funcAddr = parseInt(maybe_func[0]);
+      const state = this.parseState(maybe_func[1]);
+      this._profile.addFuncCode(
+          type, name, timestamp, start, size, funcAddr, state);
+    } else {
+      this._profile.addCode(type, name, timestamp, start, size);
+    }
+  }
+
+  processCodeDeopt(
+      timestamp, codeSize, instructionStart, inliningId, scriptOffset,
+      deoptKind, deoptLocation, deoptReason) {
+    this._deoptTimeline.push(new DeoptLogEntry(deoptKind, timestamp));
+  }
+
+  processV8Version(majorVersion, minorVersion) {
+    if ((majorVersion == this.MAJOR_VERSION &&
+         minorVersion <= this.MINOR_VERSION) ||
+        (majorVersion < this.MAJOR_VERSION)) {
+      window.alert(
+          `Unsupported version ${majorVersion}.${minorVersion}. \n` +
+          `Please use the matching tool for given the V8 version.`);
+    }
+  }
+
+  processScriptSource(scriptId, url, source) {
+    this._profile.addScriptSource(scriptId, url, source);
+  }
+
+  processCodeMove(from, to) {
+    this._profile.moveCode(from, to);
+  }
+
+  processCodeDelete(start) {
+    this._profile.deleteCode(start);
+  }
+
+  processFunctionMove(from, to) {
+    this._profile.moveFunc(from, to);
+  }
+
+  formatName(entry) {
+    if (!entry) return '<unknown>';
+    let name = entry.func.getName();
+    let re = /(.*):[0-9]+:[0-9]+$/;
+    let array = re.exec(name);
+    if (!array) return name;
+    return entry.getState() + array[1];
+  }
+
+  processPropertyIC(
+      type, pc, time, line, column, old_state, new_state, map, key, modifier,
+      slow_reason) {
+    let fnName = this.functionName(pc);
+    let parts = fnName.split(' ');
+    let fileName = parts[parts.length - 1];
+    let script = this.getScript(fileName);
+    // TODO: Use SourcePosition here directly
+    let entry = new IcLogEntry(
+        type, fnName, time, line, column, key, old_state, new_state, map,
+        slow_reason, script, modifier);
+    if (script) {
+      entry.sourcePosition = script.addSourcePosition(line, column, entry);
+    }
+    this._icTimeline.push(entry);
+  }
+
+  functionName(pc) {
+    let entry = this._profile.findEntry(pc);
+    return this.formatName(entry);
+  }
+  formatPC(pc, line, column) {
+    let entry = this._profile.findEntry(pc);
+    if (!entry) return '<unknown>'
+      if (entry.type === 'Builtin') {
+        return entry.name;
+      }
+    let name = entry.func.getName();
+    let array = this._formatPCRegexp.exec(name);
+    if (array === null) {
+      entry = name;
+    } else {
+      entry = entry.getState() + array[1];
+    }
+    return entry + ':' + line + ':' + column;
+  }
+
+  processFileName(filePositionLine) {
+    if (!filePositionLine.includes(' ')) return;
+    // Try to handle urls with file positions: https://foo.bar.com/:17:330"
+    filePositionLine = filePositionLine.split(' ');
+    let parts = filePositionLine[1].split(':');
+    if (parts[0].length <= 5) return parts[0] + ':' + parts[1];
+    return parts[1];
+  }
+
+  processMap(type, time, from, to, pc, line, column, reason, name) {
+    let time_ = parseInt(time);
+    if (type === 'Deprecate') return this.deprecateMap(type, time_, from);
+    let from_ = this.getExistingMapEntry(from, time_);
+    let to_ = this.getExistingMapEntry(to, time_);
+    // TODO: use SourcePosition directly.
+    let edge = new Edge(type, name, reason, time, from_, to_);
+    to_.filePosition = this.formatPC(pc, line, column);
+    let fileName = this.processFileName(to_.filePosition);
+    // TODO: avoid undefined source positions.
+    if (fileName !== undefined) {
+      to_.script = this.getScript(fileName);
+    }
+    if (to_.script) {
+      to_.sourcePosition = to_.script.addSourcePosition(line, column, to_)
+    }
+    edge.finishSetup();
+  }
+
+  deprecateMap(type, time, id) {
+    this.getExistingMapEntry(id, time).deprecate();
+  }
+
+  processMapCreate(time, id) {
+    // map-create events might override existing maps if the addresses get
+    // recycled. Hence we do not check for existing maps.
+    let map = this.createMapEntry(id, time);
+  }
+
+  processMapDetails(time, id, string) {
+    // TODO(cbruni): fix initial map logging.
+    let map = this.getExistingMapEntry(id, time);
+    map.description = string;
+  }
+
+  createMapEntry(id, time) {
+    let map = new MapLogEntry(id, time);
+    this._mapTimeline.push(map);
+    return map;
+  }
+
+  getExistingMapEntry(id, time) {
+    if (id === '0x000000000000') return undefined;
+    let map = MapLogEntry.get(id, time);
+    if (map === undefined) {
+      console.error(`No map details provided: id=${id}`);
+      // Manually patch in a map to continue running.
+      return this.createMapEntry(id, time);
+    };
+    return map;
+  }
+
+  getScript(url) {
+    const script = this._profile.getScript(url);
+    // TODO create placeholder script for empty urls.
+    if (script === undefined) {
+      console.error(`Could not find script for url: '${url}'`)
+    }
+    return script;
+  }
+
+  get icTimeline() {
+    return this._icTimeline;
+  }
+
+  get mapTimeline() {
+    return this._mapTimeline;
+  }
+
+  get deoptTimeline() {
+    return this._deoptTimeline;
+  }
+
+  get scripts() {
+    return this._profile.scripts_.filter(script => script !== undefined);
+  }
+}
diff --git a/src/third_party/v8/tools/system-analyzer/source-panel-template.html b/src/third_party/v8/tools/system-analyzer/source-panel-template.html
new file mode 100644
index 0000000..01b7770
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/source-panel-template.html
@@ -0,0 +1,54 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<head>
+  <link href="./index.css" rel="stylesheet">
+</head>
+<style>
+  pre.scriptNode {
+    white-space: pre-wrap;
+  }
+
+  pre.scriptNode:before {
+    counter-reset: sourceLineCounter;
+  }
+
+  pre.scriptNode span {
+    counter-increment: sourceLineCounter;
+  }
+
+  pre.scriptNode span::before {
+    content: counter(sourceLineCounter) ": ";
+    display: inline-block;
+    width: 4em;
+    padding-left: auto;
+    margin-left: auto;
+    text-align: right;
+  }
+
+  mark {
+    width: 1ch;
+    border-radius: 2px;
+    border: 0.5px var(--background-color) solid;
+    cursor: pointer;
+    background-color: var(--primary-color);
+    color: var(--on-primary-color);
+  }
+
+  .marked {
+    background-color: var(--secondary-color);
+  }
+
+  #script-dropdown {
+    width: 100%;
+    margin-bottom: 10px;
+  }
+</style>
+<div class="panel">
+  <h2>Source Panel</h2>
+  <select id="script-dropdown"></select>
+  <div id="script" class="panelBody">
+    <pre class="scripNode"></pre>
+  </div>
+</div>
diff --git a/src/third_party/v8/tools/system-analyzer/source-panel.mjs b/src/third_party/v8/tools/system-analyzer/source-panel.mjs
new file mode 100644
index 0000000..a4dc07f
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/source-panel.mjs
@@ -0,0 +1,237 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+import {FocusEvent, SelectionEvent} from './events.mjs';
+import {delay, DOM, formatBytes, V8CustomElement} from './helper.mjs';
+import {IcLogEntry} from './log/ic.mjs';
+import {MapLogEntry} from './log/map.mjs';
+
+DOM.defineCustomElement('source-panel',
+                        (templateText) =>
+                            class SourcePanel extends V8CustomElement {
+  _selectedSourcePositions = [];
+  _sourcePositionsToMarkNodes;
+  _scripts = [];
+  _script;
+  constructor() {
+    super(templateText);
+    this.scriptDropdown.addEventListener(
+        'change', e => this._handleSelectScript(e));
+  }
+
+  get script() {
+    return this.$('#script');
+  }
+
+  get scriptNode() {
+    return this.$('.scriptNode');
+  }
+
+  set script(script) {
+    if (this._script === script) return;
+    this._script = script;
+    this._renderSourcePanel();
+    this._updateScriptDropdownSelection();
+  }
+
+  set selectedSourcePositions(sourcePositions) {
+    this._selectedSourcePositions = sourcePositions;
+    // TODO: highlight multiple scripts
+    this.script = sourcePositions[0]?.script;
+    this._focusSelectedMarkers();
+  }
+
+  set data(scripts) {
+    this._scripts = scripts;
+    this._initializeScriptDropdown();
+  }
+
+  get scriptDropdown() {
+    return this.$('#script-dropdown');
+  }
+
+  _initializeScriptDropdown() {
+    this._scripts.sort((a, b) => a.name.localeCompare(b.name));
+    let select = this.scriptDropdown;
+    select.options.length = 0;
+    for (const script of this._scripts) {
+      const option = document.createElement('option');
+      const size = formatBytes(script.source.length);
+      option.text = `${script.name} (id=${script.id} size=${size})`;
+      option.script = script;
+      select.add(option);
+    }
+  }
+  _updateScriptDropdownSelection() {
+    this.scriptDropdown.selectedIndex =
+        this._script ? this._scripts.indexOf(this._script) : -1;
+  }
+
+  async _renderSourcePanel() {
+    let scriptNode;
+    if (this._script) {
+      await delay(1);
+      const builder =
+          new LineBuilder(this, this._script, this._selectedSourcePositions);
+      scriptNode = builder.createScriptNode();
+      this._sourcePositionsToMarkNodes = builder.sourcePositionToMarkers;
+    } else {
+      scriptNode = document.createElement('pre');
+      this._selectedMarkNodes = undefined;
+    }
+    const oldScriptNode = this.script.childNodes[1];
+    this.script.replaceChild(scriptNode, oldScriptNode);
+  }
+
+  async _focusSelectedMarkers() {
+    await delay(100);
+    // Remove all marked nodes.
+    for (let markNode of this._sourcePositionsToMarkNodes.values()) {
+      markNode.className = '';
+    }
+    for (let sourcePosition of this._selectedSourcePositions) {
+      this._sourcePositionsToMarkNodes.get(sourcePosition).className = 'marked';
+    }
+    const sourcePosition = this._selectedSourcePositions[0];
+    if (!sourcePosition) return;
+    const markNode = this._sourcePositionsToMarkNodes.get(sourcePosition);
+    markNode.scrollIntoView(
+        {behavior: 'smooth', block: 'nearest', inline: 'center'});
+  }
+
+  _handleSelectScript(e) {
+    const option =
+        this.scriptDropdown.options[this.scriptDropdown.selectedIndex];
+    this.script = option.script;
+    this.selectLogEntries(this._script.entries());
+  }
+
+  handleSourcePositionClick(e) {
+    this.selectLogEntries(e.target.sourcePosition.entries)
+  }
+
+  selectLogEntries(logEntries) {
+    let icLogEntries = [];
+    let mapLogEntries = [];
+    for (const entry of logEntries) {
+      if (entry instanceof MapLogEntry) {
+        mapLogEntries.push(entry);
+      } else if (entry instanceof IcLogEntry) {
+        icLogEntries.push(entry);
+      }
+    }
+    if (icLogEntries.length > 0) {
+      this.dispatchEvent(new SelectionEvent(icLogEntries));
+    }
+    if (mapLogEntries.length > 0) {
+      this.dispatchEvent(new SelectionEvent(mapLogEntries));
+    }
+  }
+});
+
+class SourcePositionIterator {
+  _entries;
+  _index = 0;
+  constructor(sourcePositions) {
+    this._entries = sourcePositions;
+  }
+
+  * forLine(lineIndex) {
+    this._findStart(lineIndex);
+    while (!this._done() && this._current().line === lineIndex) {
+      yield this._current();
+      this._next();
+    }
+  }
+
+  _findStart(lineIndex) {
+    while (!this._done() && this._current().line < lineIndex) {
+      this._next();
+    }
+  }
+
+  _current() {
+    return this._entries[this._index];
+  }
+
+  _done() {
+    return this._index + 1 >= this._entries.length;
+  }
+
+  _next() {
+    this._index++;
+  }
+}
+
+function* lineIterator(source) {
+  let current = 0;
+  let line = 1;
+  while (current < source.length) {
+    const next = source.indexOf('\n', current);
+    if (next === -1) break;
+    yield [line, source.substring(current, next)];
+    line++;
+    current = next + 1;
+  }
+  if (current < source.length) yield [line, source.substring(current)];
+}
+
+class LineBuilder {
+  _script;
+  _clickHandler;
+  _sourcePositions;
+  _selection;
+  _sourcePositionToMarkers = new Map();
+
+  constructor(panel, script, highlightPositions) {
+    this._script = script;
+    this._selection = new Set(highlightPositions);
+    this._clickHandler = panel.handleSourcePositionClick.bind(panel);
+    // TODO: sort on script finalization.
+    script.sourcePositions.sort((a, b) => {
+      if (a.line === b.line) return a.column - b.column;
+      return a.line - b.line;
+    });
+    this._sourcePositions = new SourcePositionIterator(script.sourcePositions);
+  }
+
+  get sourcePositionToMarkers() {
+    return this._sourcePositionToMarkers;
+  }
+
+  createScriptNode() {
+    const scriptNode = document.createElement('pre');
+    scriptNode.classList.add('scriptNode');
+    for (let [lineIndex, line] of lineIterator(this._script.source)) {
+      scriptNode.appendChild(this._createLineNode(lineIndex, line));
+    }
+    return scriptNode;
+  }
+
+  _createLineNode(lineIndex, line) {
+    const lineNode = document.createElement('span');
+    let columnIndex = 0;
+    for (const sourcePosition of this._sourcePositions.forLine(lineIndex)) {
+      const nextColumnIndex = sourcePosition.column - 1;
+      lineNode.appendChild(document.createTextNode(
+          line.substring(columnIndex, nextColumnIndex)));
+      columnIndex = nextColumnIndex;
+
+      lineNode.appendChild(
+          this._createMarkerNode(line[columnIndex], sourcePosition));
+      columnIndex++;
+    }
+    lineNode.appendChild(
+        document.createTextNode(line.substring(columnIndex) + '\n'));
+    return lineNode;
+  }
+
+  _createMarkerNode(text, sourcePosition) {
+    const marker = document.createElement('mark');
+    this._sourcePositionToMarkers.set(sourcePosition, marker);
+    marker.textContent = text;
+    marker.sourcePosition = sourcePosition;
+    marker.onclick = this._clickHandler;
+    return marker;
+  }
+}
\ No newline at end of file
diff --git a/src/third_party/v8/tools/system-analyzer/stats-panel-template.html b/src/third_party/v8/tools/system-analyzer/stats-panel-template.html
new file mode 100644
index 0000000..fb91fad
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/stats-panel-template.html
@@ -0,0 +1,73 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<head>
+  <link href="./index.css" rel="stylesheet">
+</head>
+<style>
+  #stats {
+    display: flex;
+    height: 250px;
+    background-color: var(--surface-color);
+    padding: 10px 10px 10px 10px;
+    margin: auto;
+  }
+
+  table {
+    flex: 1;
+    max-height: 250px;
+    display: inline-block;
+    overflow-y: scroll;
+    border-collapse: collapse;
+  }
+  table td {
+    padding: 2px;
+  }
+
+  table thead td {
+    border-bottom: 1px var(--on-surface-color) dotted;
+  }
+
+  table tbody td {
+    cursor: pointer;
+  }
+
+  #nameTable tr {
+    max-width: 200px;
+
+  }
+
+  #nameTable tr td:nth-child(1) {
+    text-align: right;
+  }
+
+  #typeTable {
+    text-align: right;
+    max-width: 380px;
+  }
+
+  #typeTable tr td:nth-child(2) {
+    text-align: left;
+  }
+</style>
+<div class="panel">
+  <h2>Map Stats</h2>
+  <section id="stats">
+    <table id="typeTable" class="statsTable">
+      <thead>
+        <tr><td></td><td>Type</td><td>Count</td><td>Percent</td></tr>
+      </thead>
+      <tbody></tbody>
+    </table>
+    <table id="nameTable">
+      <thead>
+        <tr><td>Count</td><td>Propery Name</td></tr>
+      </thead>
+      <tbody></tbody>
+      <tfoot>
+        <tr><td colspan="2" class="clickable">Show more...</td></tr>
+      </tfoo>
+    </table>
+  </section>
+</div>
diff --git a/src/third_party/v8/tools/system-analyzer/stats-panel.mjs b/src/third_party/v8/tools/system-analyzer/stats-panel.mjs
new file mode 100644
index 0000000..dd0ac78
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/stats-panel.mjs
@@ -0,0 +1,129 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+import {SelectionEvent} from './events.mjs';
+import {DOM, V8CustomElement} from './helper.mjs';
+import {delay, LazyTable} from './helper.mjs';
+
+DOM.defineCustomElement(
+    'stats-panel', (templateText) => class StatsPanel extends V8CustomElement {
+      _timeline;
+      _transitions;
+      _selectedLogEntries;
+      constructor() {
+        super(templateText);
+      }
+
+      get stats() {
+        return this.$('#stats');
+      }
+
+      set timeline(timeline) {
+        this._timeline = timeline;
+        this.selectedLogEntries = timeline.all
+      }
+
+      set selectedLogEntries(entries) {
+        this._selectedLogEntries = entries;
+        this.update();
+      }
+
+      set transitions(value) {
+        this._transitions = value;
+      }
+
+      _filterUniqueTransitions(filter) {
+        // Returns a list of Maps whose parent is not in the list.
+        return this._selectedLogEntries.filter((map) => {
+          if (filter(map) === false) return false;
+          let parent = map.parent();
+          if (parent === undefined) return true;
+          return filter(parent) === false;
+        });
+      }
+
+      _update() {
+        this._updateGeneralStats();
+        this._updateNamedTransitionsStats();
+      }
+
+      _updateGeneralStats() {
+        console.assert(this._timeline !== undefined, 'Timeline not set yet!');
+        let pairs = [
+          ['Transitions', 'primary', (e) => e.edge && e.edge.isTransition()],
+          ['Fast to Slow', 'violet', (e) => e.edge && e.edge.isFastToSlow()],
+          ['Slow to Fast', 'orange', (e) => e.edge && e.edge.isSlowToFast()],
+          ['Initial Map', 'yellow', (e) => e.edge && e.edge.isInitial()],
+          [
+            'Replace Descriptors',
+            'red',
+            (e) => e.edge && e.edge.isReplaceDescriptors(),
+          ],
+          [
+            'Copy as Prototype',
+            'red',
+            (e) => e.edge && e.edge.isCopyAsPrototype(),
+          ],
+          [
+            'Optimize as Prototype',
+            null,
+            (e) => e.edge && e.edge.isOptimizeAsPrototype(),
+          ],
+          ['Deprecated', null, (e) => e.isDeprecated()],
+          ['Bootstrapped', 'green', (e) => e.isBootstrapped()],
+          ['Total', null, (e) => true],
+        ];
+
+        let tbody = document.createElement('tbody');
+        let total = this._selectedLogEntries.length;
+        pairs.forEach(([name, color, filter]) => {
+          let row = DOM.tr();
+          if (color !== null) {
+            row.appendChild(DOM.td(DOM.div(['colorbox', color])));
+          } else {
+            row.appendChild(DOM.td(''));
+          }
+          row.classList.add('clickable');
+          row.onclick = (e) => {
+            // lazily compute the stats
+            let node = e.target.parentNode;
+            if (node.maps == undefined) {
+              node.maps = this._filterUniqueTransitions(filter);
+            }
+            this.dispatchEvent(new SelectionEvent(node.maps));
+          };
+          row.appendChild(DOM.td(name));
+          let count = this._count(filter);
+          row.appendChild(DOM.td(count));
+          let percent = Math.round((count / total) * 1000) / 10;
+          row.appendChild(DOM.td(percent.toFixed(1) + '%'));
+          tbody.appendChild(row);
+        });
+        this.$('#typeTable').replaceChild(tbody, this.$('#typeTable tbody'));
+      }
+
+      _count(filter) {
+        let count = 0;
+        for (const map of this._selectedLogEntries) {
+          if (filter(map)) count++;
+        }
+        return count;
+      }
+
+      _updateNamedTransitionsStats() {
+        let rowData = Array.from(this._transitions.entries());
+        rowData.sort((a, b) => b[1].length - a[1].length);
+        new LazyTable(this.$('#nameTable'), rowData, ([name, maps]) => {
+          let row = DOM.tr();
+          row.maps = maps;
+          row.classList.add('clickable');
+          row.addEventListener(
+              'click',
+              (e) => this.dispatchEvent(new SelectionEvent(
+                  e.target.parentNode.maps.map((map) => map.to))));
+          row.appendChild(DOM.td(maps.length));
+          row.appendChild(DOM.td(name));
+          return row;
+        });
+      }
+    });
diff --git a/src/third_party/v8/tools/system-analyzer/timeline-panel-template.html b/src/third_party/v8/tools/system-analyzer/timeline-panel-template.html
new file mode 100644
index 0000000..2641c71
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/timeline-panel-template.html
@@ -0,0 +1,13 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<head>
+  <link href="./index.css" rel="stylesheet">
+</head>
+<div class="panel">
+  <h2>Timeline Panel</h2>
+  <div>
+    <slot></slot>
+  </div>
+</div>
diff --git a/src/third_party/v8/tools/system-analyzer/timeline-panel.mjs b/src/third_party/v8/tools/system-analyzer/timeline-panel.mjs
new file mode 100644
index 0000000..a61d2ef
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/timeline-panel.mjs
@@ -0,0 +1,55 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import './timeline/timeline-track.mjs';
+
+import {SynchronizeSelectionEvent} from './events.mjs';
+import {DOM, V8CustomElement} from './helper.mjs';
+
+DOM.defineCustomElement(
+    'timeline-panel',
+    (templateText) => class TimelinePanel extends V8CustomElement {
+      constructor() {
+        super(templateText);
+        this.addEventListener('scrolltrack', e => this.handleTrackScroll(e));
+        this.addEventListener(
+            SynchronizeSelectionEvent.name,
+            e => this.handleSelectionSyncronization(e));
+      }
+
+      set nofChunks(count) {
+        for (const track of this.timelineTracks) {
+          track.nofChunks = count;
+        }
+      }
+
+      get nofChunks() {
+        return this.timelineTracks[0].nofChunks;
+      }
+
+      get timelineTracks() {
+        return this.$('slot').assignedNodes().filter(
+            node => node.nodeType === Node.ELEMENT_NODE);
+      }
+
+      handleTrackScroll(event) {
+        // TODO(zcankara) add forEachTrack  helper method
+        for (const track of this.timelineTracks) {
+          track.scrollLeft = event.detail;
+        }
+      }
+
+      handleSelectionSyncronization(event) {
+        this.timeSelection = {start: event.start, end: event.end};
+      }
+
+      set timeSelection(timeSelection) {
+        if (timeSelection.start > timeSelection.end) {
+          throw new Error('Invalid time range');
+        }
+        for (const track of this.timelineTracks) {
+          track.timeSelection = timeSelection;
+        }
+      }
+    });
diff --git a/src/third_party/v8/tools/system-analyzer/timeline.mjs b/src/third_party/v8/tools/system-analyzer/timeline.mjs
new file mode 100644
index 0000000..996f108
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/timeline.mjs
@@ -0,0 +1,269 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class Timeline {
+  // Class:
+  _model;
+  // Array of #model instances:
+  _values;
+  // Current selection, subset of #values:
+  _selection;
+  _uniqueTypes;
+
+  constructor(model) {
+    this._model = model;
+    this._values = [];
+    this.startTime = 0;
+    this.endTime = 0;
+  }
+
+  get model() {
+    return this._model;
+  }
+
+  get all() {
+    return this._values;
+  }
+
+  get selection() {
+    return this._selection;
+  }
+
+  set selection(value) {
+    this._selection = value;
+  }
+
+  selectTimeRange(start, end) {
+    this._selection = this.filter(e => e.time >= start && e.time <= end);
+  }
+
+  getChunks(windowSizeMs) {
+    // TODO(zcankara) Fill this one
+    return this.chunkSizes(windowSizeMs);
+  }
+
+  get values() {
+    // TODO(zcankara) Not to break something delete later
+    return this._values;
+  }
+
+  count(filter) {
+    return this.all.reduce((sum, each) => {
+      return sum + (filter(each) === true ? 1 : 0);
+    }, 0);
+  }
+
+  filter(predicate) {
+    return this.all.filter(predicate);
+  }
+
+  push(event) {
+    let time = event.time;
+    if (!this.isEmpty() && this.last().time > time) {
+      // Invalid insertion order, might happen without --single-process,
+      // finding insertion point.
+      let insertionPoint = this.find(time);
+      this._values.splice(insertionPoint, event);
+    } else {
+      this._values.push(event);
+    }
+    if (time > 0) {
+      this.endTime = Math.max(this.endTime, time);
+      if (this.startTime === 0) {
+        this.startTime = time;
+      } else {
+        this.startTime = Math.min(this.startTime, time);
+      }
+    }
+  }
+
+  at(index) {
+    return this._values[index];
+  }
+
+  isEmpty() {
+    return this.size() === 0;
+  }
+
+  size() {
+    return this._values.length;
+  }
+
+  get length() {
+    return this._values.length;
+  }
+
+  first() {
+    return this._values[0];
+  }
+
+  last() {
+    return this._values[this._values.length - 1];
+  }
+
+  duration() {
+    return this.last().time - this.first().time;
+  }
+
+  forEachChunkSize(count, fn) {
+    const increment = this.duration() / count;
+    let currentTime = this.first().time + increment;
+    let index = 0;
+    for (let i = 0; i < count; i++) {
+      let nextIndex = this.find(currentTime, index);
+      let nextTime = currentTime + increment;
+      fn(index, nextIndex, currentTime, nextTime);
+      index = nextIndex;
+      currentTime = nextTime;
+    }
+  }
+
+  chunkSizes(count) {
+    let chunks = [];
+    this.forEachChunkSize(count, (start, end) => chunks.push(end - start));
+    return chunks;
+  }
+
+  chunks(count) {
+    let chunks = [];
+    this.forEachChunkSize(count, (start, end, startTime, endTime) => {
+      let items = this._values.slice(start, end);
+      chunks.push(new Chunk(chunks.length, startTime, endTime, items));
+    });
+    return chunks;
+  }
+
+  range(start, end) {
+    const first = this.find(start);
+    if (first < 0) return [];
+    const last = this.find(end, first);
+    return this._values.slice(first, last);
+  }
+
+  find(time, offset = 0) {
+    return this._find(this._values, each => each.time - time, offset);
+  }
+
+  _find(array, cmp, offset = 0) {
+    let min = offset;
+    let max = array.length;
+    while (min < max) {
+      let mid = min + Math.floor((max - min) / 2);
+      let result = cmp(array[mid]);
+      if (result > 0) {
+        max = mid - 1;
+      } else {
+        min = mid + 1;
+      }
+    }
+    return min;
+  }
+
+  initializeTypes() {
+    const types = new Map();
+    for (const entry of this.all) {
+      types.get(entry.type)?.push(entry) ?? types.set(entry.type, [entry])
+    }
+    return this._uniqueTypes = types;
+  }
+
+  get uniqueTypes() {
+    return this._uniqueTypes ?? this.initializeTypes();
+  }
+
+  depthHistogram() {
+    return this._values.histogram(each => each.depth);
+  }
+
+  fanOutHistogram() {
+    return this._values.histogram(each => each.children.length);
+  }
+
+  forEach(fn) {
+    return this._values.forEach(fn);
+  }
+}
+
+// ===========================================================================
+class Chunk {
+  constructor(index, start, end, items) {
+    this.index = index;
+    this.start = start;
+    this.end = end;
+    this.items = items;
+    this.height = 0;
+  }
+
+  isEmpty() {
+    return this.items.length === 0;
+  }
+
+  last() {
+    return this.at(this.size() - 1);
+  }
+
+  first() {
+    return this.at(0);
+  }
+
+  at(index) {
+    return this.items[index];
+  }
+
+  size() {
+    return this.items.length;
+  }
+
+  yOffset(event) {
+    // items[0]   == oldest event, displayed at the top of the chunk
+    // items[n-1] == youngest event, displayed at the bottom of the chunk
+    return (1 - (this.indexOf(event) + 0.5) / this.size()) * this.height;
+  }
+
+  indexOf(event) {
+    return this.items.indexOf(event);
+  }
+
+  has(event) {
+    if (this.isEmpty()) return false;
+    return this.first().time <= event.time && event.time <= this.last().time;
+  }
+
+  next(chunks) {
+    return this.findChunk(chunks, 1);
+  }
+
+  prev(chunks) {
+    return this.findChunk(chunks, -1);
+  }
+
+  findChunk(chunks, delta) {
+    let i = this.index + delta;
+    let chunk = chunks[i];
+    while (chunk && chunk.size() === 0) {
+      i += delta;
+      chunk = chunks[i];
+    }
+    return chunk;
+  }
+
+  getBreakdown(event_fn) {
+    if (event_fn === void 0) {
+      event_fn = each => each;
+    }
+    let breakdown = {__proto__: null};
+    this.items.forEach(each => {
+      const type = event_fn(each);
+      const v = breakdown[type];
+      breakdown[type] = (v | 0) + 1;
+    });
+    return Object.entries(breakdown).sort((a, b) => a[1] - b[1]);
+  }
+
+  filter() {
+    return this.items.filter(map => !map.parent() || !this.has(map.parent()));
+  }
+}
+
+export {Timeline, Chunk};
diff --git a/src/third_party/v8/tools/system-analyzer/timeline/timeline-track-template.html b/src/third_party/v8/tools/system-analyzer/timeline/timeline-track-template.html
new file mode 100644
index 0000000..e14b927
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/timeline/timeline-track-template.html
@@ -0,0 +1,138 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<head>
+  <link href="./index.css" rel="stylesheet">
+</head>
+<style>
+  #timeline {
+    position: relative;
+    height: calc(200px + 12px);
+    overflow-y: hidden;
+    overflow-x: scroll;
+    user-select: none;
+  }
+
+  #timelineLabel {
+    transform: rotate(90deg);
+    transform-origin: left bottom 0;
+    position: absolute;
+    left: 0;
+    width: 200px;
+    text-align: center;
+    font-size: 10px;
+    opacity: 0.5;
+  }
+
+  #timelineChunks {
+    height: 200px;
+    position: absolute;
+    margin-right: 100px;
+  }
+
+  #timelineCanvas {
+    height: 200px;
+    position: relative;
+    overflow: visible;
+    pointer-events: none;
+  }
+
+  .chunk {
+    width: 6px;
+    position: absolute;
+    background-size: 100% 100%;
+    image-rendering: pixelated;
+    bottom: 0px;
+    background-color: var(--on-surface-color);
+    cursor: pointer;
+  }
+  .chunk:hover {
+    border-radius: 2px 2px 0 0;
+    margin: 0 0 -2px -2px;
+    border: 2px var(--primary-color) solid;
+  }
+
+  .timestamp {
+    height: 200px;
+    width: 100px;
+    border-left: 1px var(--on-surface-color) dashed;
+    padding-left: 4px;
+    position: absolute;
+    pointer-events: none;
+    font-size: 10px;
+  }
+
+  #legend {
+    position: relative;
+    float: right;
+    width: 100%;
+    max-width: 280px;
+    padding-left: 20px;
+    padding-top: 10px;
+    border-collapse: collapse;
+  }
+
+  th,
+  td {
+    width: 200px;
+    text-align: left;
+    padding-bottom: 3px;
+  }
+
+  /* right align numbers */
+  #legend td:nth-of-type(4n+3),
+  #legend td:nth-of-type(4n+4) {
+    text-align: right;
+  }
+
+  .legendTypeColumn {
+    width: 100%;
+  }
+
+  .timeline {
+    background-color: var(--timeline-background-color);
+  }
+
+  #timeline .rightHandle,
+  #timeline .leftHandle {
+    background-color: rgba(200, 200, 200, 0.5);
+    height: 100%;
+    width: 5px;
+    position: absolute;
+    z-index: 3;
+    cursor: col-resize;
+  }
+  #timeline .leftHandle {
+    border-left: 1px solid var(--on-surface-color);
+  }
+  #timeline .rightHandle {
+    border-right: 1px solid var(--on-surface-color);
+  }
+
+  #timeline .selection {
+    background-color: rgba(133, 68, 163, 0.5);
+    height: 100%;
+    position: absolute;
+  }
+</style>
+<table id="legend" class="typeStatsTable">
+  <thead>
+    <tr>
+      <td></td>
+      <td>Type</td>
+      <td>Count</td>
+      <td>Percent</td>
+    </tr>
+  </thead>
+  <tbody id="legendContent">
+  </tbody>
+</table>
+<div id="timeline">
+  <div class="leftHandle"></div>
+  <div class="selection"></div>
+  <div class="rightHandle"></div>
+  <div id="timelineLabel">Frequency</div>
+  <div id="timelineChunks"></div>
+  <canvas id="timelineCanvas"></canvas>
+</div>
\ No newline at end of file
diff --git a/src/third_party/v8/tools/system-analyzer/timeline/timeline-track.mjs b/src/third_party/v8/tools/system-analyzer/timeline/timeline-track.mjs
new file mode 100644
index 0000000..a37bcce
--- /dev/null
+++ b/src/third_party/v8/tools/system-analyzer/timeline/timeline-track.mjs
@@ -0,0 +1,518 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import {FocusEvent, SelectionEvent, SelectTimeEvent, SynchronizeSelectionEvent} from '../events.mjs';
+import {CSSColor, delay, DOM, V8CustomElement} from '../helper.mjs';
+import {kChunkHeight, kChunkWidth} from '../log/map.mjs';
+
+const kColors = [
+  CSSColor.green,
+  CSSColor.violet,
+  CSSColor.orange,
+  CSSColor.yellow,
+  CSSColor.primaryColor,
+  CSSColor.red,
+  CSSColor.blue,
+  CSSColor.yellow,
+  CSSColor.secondaryColor,
+];
+
+DOM.defineCustomElement('./timeline/timeline-track',
+                        (templateText) =>
+                            class TimelineTrack extends V8CustomElement {
+  // TODO turn into static field once Safari supports it.
+  static get SELECTION_OFFSET() {
+    return 10
+  };
+  _timeline;
+  _nofChunks = 400;
+  _chunks;
+  _selectedEntry;
+  _timeToPixel;
+  _timeSelection = {start: -1, end: Infinity};
+  _timeStartOffset;
+  _selectionOriginTime;
+  _typeToColor;
+  constructor() {
+    super(templateText);
+    this.timeline.addEventListener('scroll', e => this.handleTimelineScroll(e));
+    this.timeline.addEventListener(
+        'mousedown', e => this.handleTimeSelectionMouseDown(e));
+    this.timeline.addEventListener(
+        'mouseup', e => this.handleTimeSelectionMouseUp(e));
+    this.timeline.addEventListener(
+        'mousemove', e => this.handleTimeSelectionMouseMove(e));
+    this.backgroundCanvas = document.createElement('canvas');
+    this.isLocked = false;
+  }
+
+  handleTimeSelectionMouseDown(e) {
+    let xPosition = e.clientX
+    // Update origin time in case we click on a handle.
+    if (this.isOnLeftHandle(xPosition)) {
+      xPosition = this.rightHandlePosX;
+    }
+    else if (this.isOnRightHandle(xPosition)) {
+      xPosition = this.leftHandlePosX;
+    }
+    this._selectionOriginTime = this.positionToTime(xPosition);
+  }
+
+  isOnLeftHandle(posX) {
+    return (
+        Math.abs(this.leftHandlePosX - posX) <= TimelineTrack.SELECTION_OFFSET);
+  }
+
+  isOnRightHandle(posX) {
+    return (
+        Math.abs(this.rightHandlePosX - posX) <=
+        TimelineTrack.SELECTION_OFFSET);
+  }
+
+  handleTimeSelectionMouseMove(e) {
+    if (!this._isSelecting) return;
+    const currentTime = this.positionToTime(e.clientX);
+    this.dispatchEvent(new SynchronizeSelectionEvent(
+        Math.min(this._selectionOriginTime, currentTime),
+        Math.max(this._selectionOriginTime, currentTime)));
+  }
+
+  handleTimeSelectionMouseUp(e) {
+    this._selectionOriginTime = -1;
+    const delta = this._timeSelection.end - this._timeSelection.start;
+    if (delta <= 1 || isNaN(delta)) return;
+    this.dispatchEvent(new SelectTimeEvent(
+        this._timeSelection.start, this._timeSelection.end));
+  }
+
+  set timeSelection(selection) {
+    this._timeSelection.start = selection.start;
+    this._timeSelection.end = selection.end;
+    this.updateSelection();
+  }
+
+  get _isSelecting() {
+    return this._selectionOriginTime >= 0;
+  }
+
+  updateSelection() {
+    const startPosition = this.timeToPosition(this._timeSelection.start);
+    const endPosition = this.timeToPosition(this._timeSelection.end);
+    const delta = endPosition - startPosition;
+    this.leftHandle.style.left = startPosition + 'px';
+    this.selection.style.left = startPosition + 'px';
+    this.rightHandle.style.left = endPosition + 'px';
+    this.selection.style.width = delta + 'px';
+  }
+
+  get leftHandlePosX() {
+    return this.leftHandle.getBoundingClientRect().x;
+  }
+
+  get rightHandlePosX() {
+    return this.rightHandle.getBoundingClientRect().x;
+  }
+
+  // Maps the clicked x position to the x position on timeline canvas
+  positionOnTimeline(posX) {
+    let rect = this.timeline.getBoundingClientRect();
+    let posClickedX = posX - rect.left + this.timeline.scrollLeft;
+    return posClickedX;
+  }
+
+  positionToTime(posX) {
+    let posTimelineX = this.positionOnTimeline(posX) + this._timeStartOffset;
+    return posTimelineX / this._timeToPixel;
+  }
+
+  timeToPosition(time) {
+    let posX = time * this._timeToPixel;
+    posX -= this._timeStartOffset
+    return posX;
+  }
+
+  get leftHandle() {
+    return this.$('.leftHandle');
+  }
+
+  get rightHandle() {
+    return this.$('.rightHandle');
+  }
+
+  get selection() {
+    return this.$('.selection');
+  }
+
+  get timelineCanvas() {
+    return this.$('#timelineCanvas');
+  }
+
+  get timelineChunks() {
+    return this.$('#timelineChunks');
+  }
+
+  get timeline() {
+    return this.$('#timeline');
+  }
+
+  get timelineLegend() {
+    return this.$('#legend');
+  }
+
+  get timelineLegendContent() {
+    return this.$('#legendContent');
+  }
+
+  set data(value) {
+    this._timeline = value;
+    this._resetTypeToColorCache();
+    this.update();
+  }
+
+  _update() {
+    this._updateChunks();
+    this._updateTimeline();
+    this._renderLegend();
+  }
+
+  _resetTypeToColorCache() {
+    this._typeToColor = new Map();
+    let lastIndex = 0;
+    for (const type of this.data.uniqueTypes.keys()) {
+      this._typeToColor.set(type, kColors[lastIndex++]);
+    }
+  }
+
+  get data() {
+    return this._timeline;
+  }
+
+  set nofChunks(count) {
+    this._nofChunks = count;
+    this.update();
+  }
+
+  get nofChunks() {
+    return this._nofChunks;
+  }
+
+  _updateChunks() {
+    this._chunks = this.data.chunks(this.nofChunks);
+  }
+
+  get chunks() {
+    return this._chunks;
+  }
+
+  set selectedEntry(value) {
+    this._selectedEntry = value;
+    if (value.edge) this.redraw();
+  }
+
+  get selectedEntry() {
+    return this._selectedEntry;
+  }
+
+  set scrollLeft(offset) {
+    this.timeline.scrollLeft = offset;
+  }
+
+  typeToColor(type) {
+    return this._typeToColor.get(type);
+  }
+
+  _renderLegend() {
+    let timelineLegendContent = this.timelineLegendContent;
+    DOM.removeAllChildren(timelineLegendContent);
+    this._timeline.uniqueTypes.forEach((entries, type) => {
+      let row = DOM.tr('clickable');
+      row.entries = entries;
+      row.addEventListener('dblclick', e => this.handleEntryTypeDblClick(e));
+      let color = this.typeToColor(type);
+      if (color !== null) {
+        let div = DOM.div('colorbox');
+        div.style.backgroundColor = color;
+        row.appendChild(DOM.td(div));
+      } else {
+        row.appendChild(DOM.td());
+      }
+      let td = DOM.td(type);
+      row.appendChild(td);
+      row.appendChild(DOM.td(entries.length));
+      let percent = (entries.length / this.data.all.length) * 100;
+      row.appendChild(DOM.td(percent.toFixed(1) + '%'));
+      timelineLegendContent.appendChild(row);
+    });
+    // Add Total row.
+    let row = DOM.tr();
+    row.appendChild(DOM.td(''));
+    row.appendChild(DOM.td('All'));
+    row.appendChild(DOM.td(this.data.all.length));
+    row.appendChild(DOM.td('100%'));
+    timelineLegendContent.appendChild(row);
+    this.timelineLegend.appendChild(timelineLegendContent);
+  }
+
+  handleEntryTypeDblClick(e) {
+    this.dispatchEvent(new SelectionEvent(e.target.parentNode.entries));
+  }
+
+  timelineIndicatorMove(offset) {
+    this.timeline.scrollLeft += offset;
+  }
+
+  handleTimelineScroll(e) {
+    let horizontal = e.currentTarget.scrollLeft;
+    this.dispatchEvent(new CustomEvent(
+        'scrolltrack', {bubbles: true, composed: true, detail: horizontal}));
+  }
+
+  async setChunkBackgrounds(backgroundTodo) {
+    const kMaxDuration = 50;
+    let lastTime = 0;
+    for (let [chunk, node] of backgroundTodo) {
+      const current = performance.now();
+      if (current - lastTime > kMaxDuration) {
+        await delay(25);
+        lastTime = current;
+      }
+      this.setChunkBackground(chunk, node);
+    }
+  }
+
+  setChunkBackground(chunk, node) {
+    // Render the types of transitions as bar charts
+    const kHeight = chunk.height;
+    const kWidth = 1;
+    this.backgroundCanvas.width = kWidth;
+    this.backgroundCanvas.height = kHeight;
+    let ctx = this.backgroundCanvas.getContext('2d');
+    ctx.clearRect(0, 0, kWidth, kHeight);
+    let y = 0;
+    let total = chunk.size();
+    let type, count;
+    if (true) {
+      chunk.getBreakdown(map => map.type).forEach(([type, count]) => {
+        ctx.fillStyle = this.typeToColor(type);
+        let height = count / total * kHeight;
+        ctx.fillRect(0, y, kWidth, y + height);
+        y += height;
+      });
+    } else {
+      chunk.items.forEach(map => {
+        ctx.fillStyle = this.typeToColor(map.type);
+        let y = chunk.yOffset(map);
+        ctx.fillRect(0, y, kWidth, y + 1);
+      });
+    }
+
+    let imageData = this.backgroundCanvas.toDataURL('image/webp', 0.2);
+    node.style.backgroundImage = `url(${imageData})`;
+  }
+
+  _updateTimeline() {
+    let chunksNode = this.timelineChunks;
+    DOM.removeAllChildren(chunksNode);
+    let chunks = this.chunks;
+    let max = chunks.max(each => each.size());
+    let start = this.data.startTime;
+    let end = this.data.endTime;
+    let duration = end - start;
+    this._timeToPixel = chunks.length * kChunkWidth / duration;
+    this._timeStartOffset = start * this._timeToPixel;
+    let addTimestamp = (time, name) => {
+      let timeNode = DOM.div('timestamp');
+      timeNode.innerText = name;
+      timeNode.style.left = ((time - start) * this._timeToPixel) + 'px';
+      chunksNode.appendChild(timeNode);
+    };
+    let backgroundTodo = [];
+    for (let i = 0; i < chunks.length; i++) {
+      let chunk = chunks[i];
+      let height = (chunk.size() / max * kChunkHeight);
+      chunk.height = height;
+      if (chunk.isEmpty()) continue;
+      let node = DOM.div();
+      node.className = 'chunk';
+      node.style.left = ((chunks[i].start - start) * this._timeToPixel) + 'px';
+      node.style.height = height + 'px';
+      node.chunk = chunk;
+      node.addEventListener('mousemove', e => this.handleChunkMouseMove(e));
+      node.addEventListener('click', e => this.handleChunkClick(e));
+      node.addEventListener('dblclick', e => this.handleChunkDoubleClick(e));
+      backgroundTodo.push([chunk, node])
+      chunksNode.appendChild(node);
+    }
+    this.setChunkBackgrounds(backgroundTodo);
+
+    // Put a time marker roughly every 20 chunks.
+    let expected = duration / chunks.length * 20;
+    let interval = (10 ** Math.floor(Math.log10(expected)));
+    let correction = Math.log10(expected / interval);
+    correction = (correction < 0.33) ? 1 : (correction < 0.75) ? 2.5 : 5;
+    interval *= correction;
+
+    let time = start;
+    while (time < end) {
+      addTimestamp(time, ((time - start) / 1000) + ' ms');
+      time += interval;
+    }
+    this.redraw();
+  }
+
+  handleChunkMouseMove(event) {
+    if (this.isLocked) return false;
+    if (this._isSelecting) return false;
+    let chunk = event.target.chunk;
+    if (!chunk) return;
+    // topmost map (at chunk.height) == map #0.
+    let relativeIndex =
+        Math.round(event.layerY / event.target.offsetHeight * chunk.size());
+    let map = chunk.at(relativeIndex);
+    this.dispatchEvent(new FocusEvent(map));
+  }
+
+  handleChunkClick(event) {
+    this.isLocked = !this.isLocked;
+  }
+
+  handleChunkDoubleClick(event) {
+    let chunk = event.target.chunk;
+    if (!chunk) return;
+    this.dispatchEvent(new SelectTimeEvent(chunk.start, chunk.end));
+  }
+
+  redraw() {
+    let canvas = this.timelineCanvas;
+    canvas.width = (this.chunks.length + 1) * kChunkWidth;
+    canvas.height = kChunkHeight;
+    let ctx = canvas.getContext('2d');
+    ctx.clearRect(0, 0, canvas.width, kChunkHeight);
+    if (!this.selectedEntry || !this.selectedEntry.edge) return;
+    this.drawEdges(ctx);
+  }
+  setMapStyle(map, ctx) {
+    ctx.fillStyle = map.edge && map.edge.from ? CSSColor.onBackgroundColor :
+                                                CSSColor.onPrimaryColor;
+  }
+
+  setEdgeStyle(edge, ctx) {
+    let color = this.typeToColor(edge.type);
+    ctx.strokeStyle = color;
+    ctx.fillStyle = color;
+  }
+
+  markMap(ctx, map) {
+    let [x, y] = map.position(this.chunks);
+    ctx.beginPath();
+    this.setMapStyle(map, ctx);
+    ctx.arc(x, y, 3, 0, 2 * Math.PI);
+    ctx.fill();
+    ctx.beginPath();
+    ctx.fillStyle = CSSColor.onBackgroundColor;
+    ctx.arc(x, y, 2, 0, 2 * Math.PI);
+    ctx.fill();
+  }
+
+  markSelectedMap(ctx, map) {
+    let [x, y] = map.position(this.chunks);
+    ctx.beginPath();
+    this.setMapStyle(map, ctx);
+    ctx.arc(x, y, 6, 0, 2 * Math.PI);
+    ctx.strokeStyle = CSSColor.onBackgroundColor;
+    ctx.stroke();
+  }
+
+  drawEdges(ctx) {
+    // Draw the trace of maps in reverse order to make sure the outgoing
+    // transitions of previous maps aren't drawn over.
+    const kMaxOutgoingEdges = 100;
+    let nofEdges = 0;
+    let stack = [];
+    let current = this.selectedEntry;
+    while (current && nofEdges < kMaxOutgoingEdges) {
+      nofEdges += current.children.length;
+      stack.push(current);
+      current = current.parent();
+    }
+    ctx.save();
+    this.drawOutgoingEdges(ctx, this.selectedEntry, 3);
+    ctx.restore();
+
+    let labelOffset = 15;
+    let xPrev = 0;
+    while (current = stack.pop()) {
+      if (current.edge) {
+        this.setEdgeStyle(current.edge, ctx);
+        let [xTo, yTo] = this.drawEdge(ctx, current.edge, true, labelOffset);
+        if (xTo == xPrev) {
+          labelOffset += 8;
+        } else {
+          labelOffset = 15
+        }
+        xPrev = xTo;
+      }
+      this.markMap(ctx, current);
+      current = current.parent();
+      ctx.save();
+      // this.drawOutgoingEdges(ctx, current, 1);
+      ctx.restore();
+    }
+    // Mark selected map
+    this.markSelectedMap(ctx, this.selectedEntry);
+  }
+
+  drawEdge(ctx, edge, showLabel = true, labelOffset = 20) {
+    if (!edge.from || !edge.to) return [-1, -1];
+    let [xFrom, yFrom] = edge.from.position(this.chunks);
+    let [xTo, yTo] = edge.to.position(this.chunks);
+    let sameChunk = xTo == xFrom;
+    if (sameChunk) labelOffset += 8;
+
+    ctx.beginPath();
+    ctx.moveTo(xFrom, yFrom);
+    let offsetX = 20;
+    let offsetY = 20;
+    let midX = xFrom + (xTo - xFrom) / 2;
+    let midY = (yFrom + yTo) / 2 - 100;
+    if (!sameChunk) {
+      ctx.quadraticCurveTo(midX, midY, xTo, yTo);
+    } else {
+      ctx.lineTo(xTo, yTo);
+    }
+    if (!showLabel) {
+      ctx.stroke();
+    } else {
+      let centerX, centerY;
+      if (!sameChunk) {
+        centerX = (xFrom / 2 + midX + xTo / 2) / 2;
+        centerY = (yFrom / 2 + midY + yTo / 2) / 2;
+      } else {
+        centerX = xTo;
+        centerY = yTo;
+      }
+      ctx.moveTo(centerX, centerY);
+      ctx.lineTo(centerX + offsetX, centerY - labelOffset);
+      ctx.stroke();
+      ctx.textAlign = 'left';
+      ctx.fillStyle = this.typeToColor(edge.type);
+      ctx.fillText(
+          edge.toString(), centerX + offsetX + 2, centerY - labelOffset);
+    }
+    return [xTo, yTo];
+  }
+
+  drawOutgoingEdges(ctx, map, max = 10, depth = 0) {
+    if (!map) return;
+    if (depth >= max) return;
+    ctx.globalAlpha = 0.5 - depth * (0.3 / max);
+    ctx.strokeStyle = CSSColor.timelineBackgroundColor;
+    const limit = Math.min(map.children.length, 100)
+    for (let i = 0; i < limit; i++) {
+      let edge = map.children[i];
+      this.drawEdge(ctx, edge, true);
+      this.drawOutgoingEdges(ctx, edge.to, max, depth + 1);
+    }
+  }
+});
diff --git a/src/third_party/v8/tools/test262-results-parser.js b/src/third_party/v8/tools/test262-results-parser.js
new file mode 100644
index 0000000..379436e
--- /dev/null
+++ b/src/third_party/v8/tools/test262-results-parser.js
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Run the test runner and dump a json file. Use this script to pass
+// the json file and return a list of failing tests that can be copied
+// to test262.status.
+//
+// Usage:
+//
+// Run the test runner to generate the results:
+// $ tools/run-tests.py --gn test262 --json-test-results=tools/.test262-results.json
+//
+// Run this script to print the formatted results:
+// $ node tools/test262-results-parser.js .test262-results.json
+//
+// Note: The json results file generated by the test runner should be
+// in the tools/ directly, which is the same dir as this script.
+
+var fs = require('fs'),
+    path = require('path');
+
+function main() {
+  if (process.argv.length === 2)  {
+    throw new Error('File name required as first arg.');
+  }
+
+  var fileName = process.argv[2],
+      fullPath = path.join(__dirname, fileName),
+      results = require(fullPath)[0].results,
+      tests = new Set();
+  for (let result of results) {
+    let [_, ...test] = result.name.split('/');
+    tests.add(`  '${test.join('/')}': [FAIL],`);
+  }
+
+
+  [...tests].sort().forEach(i => console.log(i));
+}
+
+main();
diff --git a/src/third_party/v8/tools/testrunner/__init__.py b/src/third_party/v8/tools/testrunner/__init__.py
new file mode 100644
index 0000000..202a262
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/__init__.py
@@ -0,0 +1,26 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/third_party/v8/tools/testrunner/base_runner.py b/src/third_party/v8/tools/testrunner/base_runner.py
new file mode 100644
index 0000000..54a9e61
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/base_runner.py
@@ -0,0 +1,789 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
+
+from collections import OrderedDict, namedtuple
+import json
+import multiprocessing
+import optparse
+import os
+import shlex
+import sys
+import traceback
+
+
+
+# Add testrunner to the path.
+sys.path.insert(
+  0,
+  os.path.dirname(
+    os.path.dirname(os.path.abspath(__file__))))
+
+
+from testrunner.local import command
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.test_config import TestConfig
+from testrunner.testproc import progress
+from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.shard import ShardProc
+from testrunner.testproc.sigproc import SignalProc
+from testrunner.testproc.timeout import TimeoutProc
+from testrunner.testproc import util
+
+
+BASE_DIR = (
+    os.path.dirname(
+      os.path.dirname(
+        os.path.dirname(
+          os.path.abspath(__file__)))))
+
+DEFAULT_OUT_GN = 'out.gn'
+
+# Map of test name synonyms to lists of test suites. Should be ordered by
+# expected runtimes (suites with slow test cases first). These groups are
+# invoked in separate steps on the bots.
+# The mapping from names used here to GN targets (which must stay in sync)
+# is defined in infra/mb/gn_isolate_map.pyl.
+TEST_MAP = {
+  # This needs to stay in sync with group("v8_bot_default") in test/BUILD.gn.
+  "bot_default": [
+    "debugger",
+    "mjsunit",
+    "cctest",
+    "wasm-spec-tests",
+    "inspector",
+    "webkit",
+    "mkgrokdump",
+    "wasm-js",
+    "fuzzer",
+    "message",
+    "intl",
+    "unittests",
+    "wasm-api-tests",
+  ],
+  # This needs to stay in sync with group("v8_default") in test/BUILD.gn.
+  "default": [
+    "debugger",
+    "mjsunit",
+    "cctest",
+    "wasm-spec-tests",
+    "inspector",
+    "mkgrokdump",
+    "wasm-js",
+    "fuzzer",
+    "message",
+    "intl",
+    "unittests",
+    "wasm-api-tests",
+  ],
+  # This needs to stay in sync with group("v8_d8_default") in test/BUILD.gn.
+  "d8_default": [
+    "debugger",
+    "mjsunit",
+    "webkit",
+    "message",
+    "intl",
+  ],
+  # This needs to stay in sync with "v8_optimize_for_size" in test/BUILD.gn.
+  "optimize_for_size": [
+    "debugger",
+    "mjsunit",
+    "cctest",
+    "inspector",
+    "webkit",
+    "intl",
+  ],
+  "unittests": [
+    "unittests",
+  ],
+}
+
+# Increase the timeout for these:
+SLOW_ARCHS = [
+  "arm",
+  "arm64",
+  "mips",
+  "mipsel",
+  "mips64",
+  "mips64el",
+  "s390",
+  "s390x",
+]
+
+
+ModeConfig = namedtuple(
+    'ModeConfig', 'label flags timeout_scalefactor status_mode')
+
+DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
+RELEASE_FLAGS = ["--nohard-abort"]
+
+DEBUG_MODE = ModeConfig(
+    label='debug',
+    flags=DEBUG_FLAGS,
+    timeout_scalefactor=4,
+    status_mode="debug",
+)
+
+RELEASE_MODE = ModeConfig(
+    label='release',
+    flags=RELEASE_FLAGS,
+    timeout_scalefactor=1,
+    status_mode="release",
+)
+
+# Normal trybot release configuration. There, dchecks are always on which
+# implies debug is set. Hence, the status file needs to assume debug-like
+# behavior/timeouts.
+TRY_RELEASE_MODE = ModeConfig(
+    label='release+dchecks',
+    flags=RELEASE_FLAGS,
+    timeout_scalefactor=4,
+    status_mode="debug",
+)
+
+PROGRESS_INDICATORS = {
+  'verbose': progress.VerboseProgressIndicator,
+  'ci': progress.CIProgressIndicator,
+  'dots': progress.DotsProgressIndicator,
+  'color': progress.ColorProgressIndicator,
+  'mono': progress.MonochromeProgressIndicator,
+  'stream': progress.StreamProgressIndicator,
+}
+
+class TestRunnerError(Exception):
+  pass
+
+
+class BuildConfig(object):
+  def __init__(self, build_config):
+    # In V8 land, GN's x86 is called ia32.
+    if build_config['v8_target_cpu'] == 'x86':
+      self.arch = 'ia32'
+    else:
+      self.arch = build_config['v8_target_cpu']
+
+    self.asan = build_config['is_asan']
+    self.cfi_vptr = build_config['is_cfi']
+    self.concurrent_marking = build_config['v8_enable_concurrent_marking']
+    self.dcheck_always_on = build_config['dcheck_always_on']
+    self.gcov_coverage = build_config['is_gcov_coverage']
+    self.is_android = build_config['is_android']
+    self.is_clang = build_config['is_clang']
+    self.is_debug = build_config['is_debug']
+    self.is_full_debug = build_config['is_full_debug']
+    self.msan = build_config['is_msan']
+    self.no_i18n = not build_config['v8_enable_i18n_support']
+    self.predictable = build_config['v8_enable_verify_predictable']
+    self.simulator_run = (build_config['target_cpu'] !=
+                          build_config['v8_target_cpu'])
+    self.tsan = build_config['is_tsan']
+    # TODO(machenbach): We only have ubsan not ubsan_vptr.
+    self.ubsan_vptr = build_config['is_ubsan_vptr']
+    self.verify_csa = build_config['v8_enable_verify_csa']
+    self.lite_mode = build_config['v8_enable_lite_mode']
+    self.pointer_compression = build_config['v8_enable_pointer_compression']
+    # Export only for MIPS target
+    if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
+      self.mips_arch_variant = build_config['mips_arch_variant']
+      self.mips_use_msa = build_config['mips_use_msa']
+
+  @property
+  def use_sanitizer(self):
+    return (self.asan or self.cfi_vptr or self.msan or self.tsan or
+            self.ubsan_vptr)
+
+  def __str__(self):
+    detected_options = []
+
+    if self.asan:
+      detected_options.append('asan')
+    if self.cfi_vptr:
+      detected_options.append('cfi_vptr')
+    if self.dcheck_always_on:
+      detected_options.append('dcheck_always_on')
+    if self.gcov_coverage:
+      detected_options.append('gcov_coverage')
+    if self.msan:
+      detected_options.append('msan')
+    if self.no_i18n:
+      detected_options.append('no_i18n')
+    if self.predictable:
+      detected_options.append('predictable')
+    if self.tsan:
+      detected_options.append('tsan')
+    if self.ubsan_vptr:
+      detected_options.append('ubsan_vptr')
+    if self.verify_csa:
+      detected_options.append('verify_csa')
+    if self.lite_mode:
+      detected_options.append('lite_mode')
+    if self.pointer_compression:
+      detected_options.append('pointer_compression')
+
+    return '\n'.join(detected_options)
+
+
+def _do_load_build_config(outdir, verbose=False):
+  build_config_path = os.path.join(outdir, "v8_build_config.json")
+  if not os.path.exists(build_config_path):
+    if verbose:
+      print("Didn't find build config: %s" % build_config_path)
+    raise TestRunnerError()
+
+  with open(build_config_path) as f:
+    try:
+      build_config_json = json.load(f)
+    except Exception:  # pragma: no cover
+      print("%s exists but contains invalid json. Is your build up-to-date?"
+            % build_config_path)
+      raise TestRunnerError()
+
+  return BuildConfig(build_config_json)
+
+
+class BaseTestRunner(object):
+  def __init__(self, basedir=None):
+    self.basedir = basedir or BASE_DIR
+    self.outdir = None
+    self.build_config = None
+    self.mode_options = None
+    self.target_os = None
+
+  @property
+  def framework_name(self):
+    """String name of the base-runner subclass, used in test results."""
+    raise NotImplementedError()
+
+  def execute(self, sys_args=None):
+    if sys_args is None:  # pragma: no cover
+      sys_args = sys.argv[1:]
+    try:
+      parser = self._create_parser()
+      options, args = self._parse_args(parser, sys_args)
+      if options.swarming:
+        # Swarming doesn't print how isolated commands are called. Lets make
+        # this less cryptic by printing it ourselves.
+        print(' '.join(sys.argv))
+
+        # Kill stray processes from previous tasks on swarming.
+        util.kill_processes_linux()
+
+      self._load_build_config(options)
+      command.setup(self.target_os, options.device)
+
+      try:
+        self._process_default_options(options)
+        self._process_options(options)
+      except TestRunnerError:
+        parser.print_help()
+        raise
+
+      args = self._parse_test_args(args)
+      tests = self._load_testsuite_generators(args, options)
+      self._setup_env()
+      print(">>> Running tests for %s.%s" % (self.build_config.arch,
+                                             self.mode_options.label))
+      exit_code = self._do_execute(tests, args, options)
+      if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
+        print("Force exit code 0 after failures. Json test results file "
+              "generated with failure information.")
+        exit_code = utils.EXIT_CODE_PASS
+      return exit_code
+    except TestRunnerError:
+      traceback.print_exc()
+      return utils.EXIT_CODE_INTERNAL_ERROR
+    except KeyboardInterrupt:
+      return utils.EXIT_CODE_INTERRUPTED
+    except Exception:
+      traceback.print_exc()
+      return utils.EXIT_CODE_INTERNAL_ERROR
+    finally:
+      command.tear_down()
+
+  def _create_parser(self):
+    parser = optparse.OptionParser()
+    parser.usage = '%prog [options] [tests]'
+    parser.description = """TESTS: %s""" % (TEST_MAP["default"])
+    self._add_parser_default_options(parser)
+    self._add_parser_options(parser)
+    return parser
+
+  def _add_parser_default_options(self, parser):
+    parser.add_option("--gn", help="Scan out.gn for the last built"
+                      " configuration",
+                      default=False, action="store_true")
+    parser.add_option("--outdir", help="Base directory with compile output",
+                      default="out")
+    parser.add_option("--arch",
+                      help="The architecture to run tests for")
+    parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
+                      "directory will be used")
+    parser.add_option("--test-root", help="Root directory of the test suites",
+                      default=os.path.join(self.basedir, 'test'))
+    parser.add_option("--total-timeout-sec", default=0, type="int",
+                      help="How long should fuzzer run")
+    parser.add_option("--swarming", default=False, action="store_true",
+                      help="Indicates running test driver on swarming.")
+
+    parser.add_option("-j", help="The number of parallel tasks to run",
+                      default=0, type=int)
+    parser.add_option("-d", "--device",
+                      help="The device ID to run Android tests on. If not "
+                           "given it will be autodetected.")
+
+    # Shard
+    parser.add_option("--shard-count", default=1, type=int,
+                      help="Split tests into this number of shards")
+    parser.add_option("--shard-run", default=1, type=int,
+                      help="Run this shard from the split up tests.")
+
+    # Progress
+    parser.add_option("-p", "--progress",
+                      choices=PROGRESS_INDICATORS.keys(), default="mono",
+                      help="The style of progress indicator (verbose, dots, "
+                           "color, mono)")
+    parser.add_option("--json-test-results",
+                      help="Path to a file for storing json results.")
+    parser.add_option('--slow-tests-cutoff', type="int", default=100,
+                      help='Collect N slowest tests')
+    parser.add_option("--exit-after-n-failures", type="int", default=100,
+                      help="Exit after the first N failures instead of "
+                           "running all tests. Pass 0 to disable this feature.")
+    parser.add_option("--ci-test-completion",
+                      help="Path to a file for logging test completion in the "
+                           "context of CI progress indicator. Ignored if "
+                           "progress indicator is other than 'ci'.")
+
+    # Rerun
+    parser.add_option("--rerun-failures-count", default=0, type=int,
+                      help="Number of times to rerun each failing test case. "
+                           "Very slow tests will be rerun only once.")
+    parser.add_option("--rerun-failures-max", default=100, type=int,
+                      help="Maximum number of failing test cases to rerun")
+
+    # Test config
+    parser.add_option("--command-prefix", default="",
+                      help="Prepended to each shell command used to run a test")
+    parser.add_option('--dont-skip-slow-simulator-tests',
+                      help='Don\'t skip more slow tests when using a'
+                      ' simulator.', default=False, action='store_true',
+                      dest='dont_skip_simulator_slow_tests')
+    parser.add_option("--extra-flags", action="append", default=[],
+                      help="Additional flags to pass to each test command")
+    parser.add_option("--isolates", action="store_true", default=False,
+                      help="Whether to test isolates")
+    parser.add_option("--no-harness", "--noharness",
+                      default=False, action="store_true",
+                      help="Run without test harness of a given suite")
+    parser.add_option("--random-seed", default=0, type=int,
+                      help="Default seed for initializing random generator")
+    parser.add_option("--run-skipped", help="Also run skipped tests.",
+                      default=False, action="store_true")
+    parser.add_option("-t", "--timeout", default=60, type=int,
+                      help="Timeout for single test in seconds")
+    parser.add_option("-v", "--verbose", default=False, action="store_true",
+                      help="Verbose output")
+    parser.add_option('--regenerate-expected-files', default=False, action='store_true',
+                      help='Regenerate expected files')
+
+    # TODO(machenbach): Temporary options for rolling out new test runner
+    # features.
+    parser.add_option("--mastername", default='',
+                      help="Mastername property from infrastructure. Not "
+                           "setting this option indicates manual usage.")
+    parser.add_option("--buildername", default='',
+                      help="Buildername property from infrastructure. Not "
+                           "setting this option indicates manual usage.")
+
+  def _add_parser_options(self, parser):
+    pass
+
+  def _parse_args(self, parser, sys_args):
+    options, args = parser.parse_args(sys_args)
+
+    if options.arch and ',' in options.arch:  # pragma: no cover
+      print('Multiple architectures are deprecated')
+      raise TestRunnerError()
+
+    return options, args
+
+  def _load_build_config(self, options):
+    for outdir in self._possible_outdirs(options):
+      try:
+        self.build_config = _do_load_build_config(outdir, options.verbose)
+
+        # In auto-detect mode the outdir is always where we found the build config.
+        # This ensures that we'll also take the build products from there.
+        self.outdir = outdir
+        break
+      except TestRunnerError:
+        pass
+
+    if not self.build_config:  # pragma: no cover
+      print('Failed to load build config')
+      raise TestRunnerError
+
+    print('Build found: %s' % self.outdir)
+    if str(self.build_config):
+      print('>>> Autodetected:')
+      print(self.build_config)
+
+    # Represents the OS where tests are run on. Same as host OS except for
+    # Android, which is determined by build output.
+    if self.build_config.is_android:
+      self.target_os = 'android'
+    else:
+      self.target_os = utils.GuessOS()
+
+  # Returns possible build paths in order:
+  # gn
+  # outdir
+  # outdir on bots
+  def _possible_outdirs(self, options):
+    def outdirs():
+      if options.gn:
+        yield self._get_gn_outdir()
+        return
+
+      yield options.outdir
+
+      if os.path.basename(options.outdir) != 'build':
+        yield os.path.join(options.outdir, 'build')
+
+    for outdir in outdirs():
+      yield os.path.join(self.basedir, outdir)
+
+  def _get_gn_outdir(self):
+    gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN)
+    latest_timestamp = -1
+    latest_config = None
+    for gn_config in os.listdir(gn_out_dir):
+      gn_config_dir = os.path.join(gn_out_dir, gn_config)
+      if not os.path.isdir(gn_config_dir):
+        continue
+      if os.path.getmtime(gn_config_dir) > latest_timestamp:
+        latest_timestamp = os.path.getmtime(gn_config_dir)
+        latest_config = gn_config
+    if latest_config:
+      print(">>> Latest GN build found: %s" % latest_config)
+      return os.path.join(DEFAULT_OUT_GN, latest_config)
+
+  def _process_default_options(self, options):
+    if self.build_config.is_debug:
+      self.mode_options = DEBUG_MODE
+    elif self.build_config.dcheck_always_on:
+      self.mode_options = TRY_RELEASE_MODE
+    else:
+      self.mode_options = RELEASE_MODE
+
+    if options.arch and options.arch != self.build_config.arch:
+      print('--arch value (%s) inconsistent with build config (%s).' % (
+        options.arch, self.build_config.arch))
+      raise TestRunnerError()
+
+    if options.shell_dir:  # pragma: no cover
+      print('Warning: --shell-dir is deprecated. Searching for executables in '
+            'build directory (%s) instead.' % self.outdir)
+
+    if options.j == 0:
+      if self.build_config.is_android:
+        # Adb isn't happy about multi-processed file pushing.
+        options.j = 1
+      else:
+        options.j = multiprocessing.cpu_count()
+
+    options.command_prefix = shlex.split(options.command_prefix)
+    options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+
+  def _process_options(self, options):
+    pass
+
+  def _setup_env(self):
+    # Use the v8 root as cwd as some test cases use "load" with relative paths.
+    os.chdir(self.basedir)
+
+    # Many tests assume an English interface.
+    os.environ['LANG'] = 'en_US.UTF-8'
+
+    symbolizer_option = self._get_external_symbolizer_option()
+
+    if self.build_config.asan:
+      asan_options = [
+          symbolizer_option,
+          'allow_user_segv_handler=1',
+          'allocator_may_return_null=1',
+      ]
+      if not utils.GuessOS() in ['macos', 'windows']:
+        # LSAN is not available on mac and windows.
+        asan_options.append('detect_leaks=1')
+      else:
+        asan_options.append('detect_leaks=0')
+      if utils.GuessOS() == 'windows':
+        # https://crbug.com/967663
+        asan_options.append('detect_stack_use_after_return=0')
+      os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
+
+    if self.build_config.cfi_vptr:
+      os.environ['UBSAN_OPTIONS'] = ":".join([
+        'print_stacktrace=1',
+        'print_summary=1',
+        'symbolize=1',
+        symbolizer_option,
+      ])
+
+    if self.build_config.ubsan_vptr:
+      os.environ['UBSAN_OPTIONS'] = ":".join([
+        'print_stacktrace=1',
+        symbolizer_option,
+      ])
+
+    if self.build_config.msan:
+      os.environ['MSAN_OPTIONS'] = symbolizer_option
+
+    if self.build_config.tsan:
+      suppressions_file = os.path.join(
+          self.basedir,
+          'tools',
+          'sanitizers',
+          'tsan_suppressions.txt')
+      os.environ['TSAN_OPTIONS'] = " ".join([
+        symbolizer_option,
+        'suppressions=%s' % suppressions_file,
+        'exit_code=0',
+        'report_thread_leaks=0',
+        'history_size=7',
+        'report_destroy_locked=0',
+      ])
+
+  def _get_external_symbolizer_option(self):
+    external_symbolizer_path = os.path.join(
+        self.basedir,
+        'third_party',
+        'llvm-build',
+        'Release+Asserts',
+        'bin',
+        'llvm-symbolizer',
+    )
+
+    if utils.IsWindows():
+      # Quote, because sanitizers might confuse colon as option separator.
+      external_symbolizer_path = '"%s.exe"' % external_symbolizer_path
+
+    return 'external_symbolizer_path=%s' % external_symbolizer_path
+
+  def _parse_test_args(self, args):
+    if not args:
+      args = self._get_default_suite_names()
+
+    # Expand arguments with grouped tests. The args should reflect the list
+    # of suites as otherwise filters would break.
+    def expand_test_group(name):
+      return TEST_MAP.get(name, [name])
+
+    return reduce(list.__add__, map(expand_test_group, args), [])
+
+  def _args_to_suite_names(self, args, test_root):
+    # Use default tests if no test configuration was provided at the cmd line.
+    all_names = set(utils.GetSuitePaths(test_root))
+    args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set
+    return [name for name in args_names if name in all_names]
+
+  def _get_default_suite_names(self):
+    return []
+
+  def _load_testsuite_generators(self, args, options):
+    names = self._args_to_suite_names(args, options.test_root)
+    test_config = self._create_test_config(options)
+    variables = self._get_statusfile_variables(options)
+
+    # Head generator with no elements
+    test_chain = testsuite.TestGenerator(0, [], [])
+    for name in names:
+      if options.verbose:
+        print('>>> Loading test suite: %s' % name)
+      suite = testsuite.TestSuite.Load(
+          os.path.join(options.test_root, name), test_config,
+          self.framework_name)
+
+      if self._is_testsuite_supported(suite, options):
+        tests = suite.load_tests_from_disk(variables)
+        test_chain.merge(tests)
+
+    return test_chain
+
+  def _is_testsuite_supported(self, suite, options):
+    """A predicate that can be overridden to filter out unsupported TestSuite
+    instances (see NumFuzzer for usage)."""
+    return True
+
+  def _get_statusfile_variables(self, options):
+    simd_mips = (
+      self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
+      self.build_config.mips_arch_variant == "r6" and
+      self.build_config.mips_use_msa)
+
+    mips_arch_variant = (
+      self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
+      self.build_config.mips_arch_variant)
+
+    return {
+      "arch": self.build_config.arch,
+      "asan": self.build_config.asan,
+      "byteorder": sys.byteorder,
+      "cfi_vptr": self.build_config.cfi_vptr,
+      "concurrent_marking": self.build_config.concurrent_marking,
+      "dcheck_always_on": self.build_config.dcheck_always_on,
+      "deopt_fuzzer": False,
+      "endurance_fuzzer": False,
+      "gc_fuzzer": False,
+      "gc_stress": False,
+      "gcov_coverage": self.build_config.gcov_coverage,
+      "isolates": options.isolates,
+      "is_clang": self.build_config.is_clang,
+      "is_full_debug": self.build_config.is_full_debug,
+      "mips_arch_variant": mips_arch_variant,
+      "mode": self.mode_options.status_mode,
+      "msan": self.build_config.msan,
+      "no_harness": options.no_harness,
+      "no_i18n": self.build_config.no_i18n,
+      "novfp3": False,
+      "optimize_for_size": "--optimize-for-size" in options.extra_flags,
+      "predictable": self.build_config.predictable,
+      "simd_mips": simd_mips,
+      "simulator_run": self.build_config.simulator_run and
+                       not options.dont_skip_simulator_slow_tests,
+      "system": self.target_os,
+      "tsan": self.build_config.tsan,
+      "ubsan_vptr": self.build_config.ubsan_vptr,
+      "verify_csa": self.build_config.verify_csa,
+      "lite_mode": self.build_config.lite_mode,
+      "pointer_compression": self.build_config.pointer_compression,
+    }
+
+  def _runner_flags(self):
+    """Extra default flags specific to the test runner implementation."""
+    return []
+
+  def _create_test_config(self, options):
+    timeout = options.timeout * self._timeout_scalefactor(options)
+    return TestConfig(
+        command_prefix=options.command_prefix,
+        extra_flags=options.extra_flags,
+        isolates=options.isolates,
+        mode_flags=self.mode_options.flags + self._runner_flags(),
+        no_harness=options.no_harness,
+        noi18n=self.build_config.no_i18n,
+        random_seed=options.random_seed,
+        run_skipped=options.run_skipped,
+        shell_dir=self.outdir,
+        timeout=timeout,
+        verbose=options.verbose,
+        regenerate_expected_files=options.regenerate_expected_files,
+    )
+
+  def _timeout_scalefactor(self, options):
+    """Increases timeout for slow build configurations."""
+    factor = self.mode_options.timeout_scalefactor
+    if self.build_config.arch in SLOW_ARCHS:
+      factor *= 4.5
+    if self.build_config.lite_mode:
+      factor *= 2
+    if self.build_config.predictable:
+      factor *= 4
+    if self.build_config.use_sanitizer:
+      factor *= 1.5
+    if self.build_config.is_full_debug:
+      factor *= 4
+
+    return factor
+
+  # TODO(majeski): remove options & args parameters
+  def _do_execute(self, suites, args, options):
+    raise NotImplementedError()
+
+  def _prepare_procs(self, procs):
+    procs = filter(None, procs)
+    for i in range(0, len(procs) - 1):
+      procs[i].connect_to(procs[i + 1])
+    procs[0].setup()
+
+  def _create_shard_proc(self, options):
+    myid, count = self._get_shard_info(options)
+    if count == 1:
+      return None
+    return ShardProc(myid - 1, count)
+
+  def _get_shard_info(self, options):
+    """
+    Returns pair:
+      (id of the current shard [1; number of shards], number of shards)
+    """
+    # Read gtest shard configuration from environment (e.g. set by swarming).
+    # If none is present, use values passed on the command line.
+    shard_count = int(
+      os.environ.get('GTEST_TOTAL_SHARDS', options.shard_count))
+    shard_run = os.environ.get('GTEST_SHARD_INDEX')
+    if shard_run is not None:
+      # The v8 shard_run starts at 1, while GTEST_SHARD_INDEX starts at 0.
+      shard_run = int(shard_run) + 1
+    else:
+      shard_run = options.shard_run
+
+    if options.shard_count > 1:
+      # Log if a value was passed on the cmd line and it differs from the
+      # environment variables.
+      if options.shard_count != shard_count:  # pragma: no cover
+        print("shard_count from cmd line differs from environment variable "
+              "GTEST_TOTAL_SHARDS")
+      if (options.shard_run > 1 and
+          options.shard_run != shard_run):  # pragma: no cover
+        print("shard_run from cmd line differs from environment variable "
+              "GTEST_SHARD_INDEX")
+
+    if shard_run < 1 or shard_run > shard_count:
+      # TODO(machenbach): Turn this into an assert. If that's wrong on the
+      # bots, printing will be quite useless. Or refactor this code to make
+      # sure we get a return code != 0 after testing if we got here.
+      print("shard-run not a valid number, should be in [1:shard-count]")
+      print("defaulting back to running all tests")
+      return 1, 1
+
+    return shard_run, shard_count
+
+  def _create_progress_indicators(self, test_count, options):
+    procs = [PROGRESS_INDICATORS[options.progress]()]
+    if options.json_test_results:
+      procs.append(progress.JsonTestProgressIndicator(self.framework_name))
+
+    for proc in procs:
+      proc.configure(options)
+
+    for proc in procs:
+      try:
+        proc.set_test_count(test_count)
+      except AttributeError:
+        pass
+
+    return procs
+
+  def _create_result_tracker(self, options):
+    return progress.ResultsTracker(options.exit_after_n_failures)
+
+  def _create_timeout_proc(self, options):
+    if not options.total_timeout_sec:
+      return None
+    return TimeoutProc(options.total_timeout_sec)
+
+  def _create_signal_proc(self):
+    return SignalProc()
+
+  def _create_rerun_proc(self, options):
+    if not options.rerun_failures_count:
+      return None
+    return RerunProc(options.rerun_failures_count,
+                     options.rerun_failures_max)
diff --git a/src/third_party/v8/tools/testrunner/local/__init__.py b/src/third_party/v8/tools/testrunner/local/__init__.py
new file mode 100644
index 0000000..202a262
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/__init__.py
@@ -0,0 +1,26 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/third_party/v8/tools/testrunner/local/android.py b/src/third_party/v8/tools/testrunner/local/android.py
new file mode 100644
index 0000000..ebf04af
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/android.py
@@ -0,0 +1,205 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Wrapper around the Android device abstraction from src/build/android.
+"""
+
+import logging
+import os
+import sys
+import re
+
+BASE_DIR = os.path.normpath(
+    os.path.join(os.path.dirname(__file__), '..', '..', '..'))
+ANDROID_DIR = os.path.join(BASE_DIR, 'build', 'android')
+DEVICE_DIR = '/data/local/tmp/v8/'
+
+
+class TimeoutException(Exception):
+  def __init__(self, timeout, output=None):
+    self.timeout = timeout
+    self.output = output
+
+
+class CommandFailedException(Exception):
+  def __init__(self, status, output):
+    self.status = status
+    self.output = output
+
+
+class _Driver(object):
+  """Helper class to execute shell commands on an Android device."""
+  def __init__(self, device=None):
+    assert os.path.exists(ANDROID_DIR)
+    sys.path.insert(0, ANDROID_DIR)
+
+    # We import the dependencies only on demand, so that this file can be
+    # imported unconditionally.
+    import devil_chromium
+    from devil.android import device_errors  # pylint: disable=import-error
+    from devil.android import device_utils  # pylint: disable=import-error
+    from devil.android.perf import cache_control  # pylint: disable=import-error
+    from devil.android.perf import perf_control  # pylint: disable=import-error
+    global cache_control
+    global device_errors
+    global perf_control
+
+    devil_chromium.Initialize()
+
+    # Find specified device or a single attached device if none was specified.
+    # In case none or multiple devices are attached, this raises an exception.
+    self.device = device_utils.DeviceUtils.HealthyDevices(
+        retries=5, enable_usb_resets=True, device_arg=device)[0]
+
+    # This remembers what we have already pushed to the device.
+    self.pushed = set()
+
+  def tear_down(self):
+    """Clean up files after running all tests."""
+    self.device.RemovePath(DEVICE_DIR, force=True, recursive=True)
+
+  def push_file(self, host_dir, file_name, target_rel='.',
+                skip_if_missing=False):
+    """Push a single file to the device (cached).
+
+    Args:
+      host_dir: Absolute parent directory of the file to push.
+      file_name: Name of the file to push.
+      target_rel: Parent directory of the target location on the device
+          (relative to the device's base dir for testing).
+      skip_if_missing: Keeps silent about missing files when set. Otherwise logs
+          error.
+    """
+    # TODO(sergiyb): Implement this method using self.device.PushChangedFiles to
+    # avoid accessing low-level self.device.adb.
+    file_on_host = os.path.join(host_dir, file_name)
+
+    # Only push files not yet pushed in one execution.
+    if file_on_host in self.pushed:
+      return
+
+    file_on_device_tmp = os.path.join(DEVICE_DIR, '_tmp_', file_name)
+    file_on_device = os.path.join(DEVICE_DIR, target_rel, file_name)
+    folder_on_device = os.path.dirname(file_on_device)
+
+    # Only attempt to push files that exist.
+    if not os.path.exists(file_on_host):
+      if not skip_if_missing:
+        logging.critical('Missing file on host: %s' % file_on_host)
+      return
+
+    # Work-around for 'text file busy' errors. Push the files to a temporary
+    # location and then copy them with a shell command.
+    output = self.device.adb.Push(file_on_host, file_on_device_tmp)
+    # Success looks like this: '3035 KB/s (12512056 bytes in 4.025s)'.
+    # Errors look like this: 'failed to copy  ... '.
+    if output and not re.search('^[0-9]', output.splitlines()[-1]):
+      logging.critical('PUSH FAILED: ' + output)
+    self.device.adb.Shell('mkdir -p %s' % folder_on_device)
+    self.device.adb.Shell('cp %s %s' % (file_on_device_tmp, file_on_device))
+    self.pushed.add(file_on_host)
+
+  def push_executable(self, shell_dir, target_dir, binary):
+    """Push files required to run a V8 executable.
+
+    Args:
+      shell_dir: Absolute parent directory of the executable on the host.
+      target_dir: Parent directory of the executable on the device (relative to
+          devices' base dir for testing).
+      binary: Name of the binary to push.
+    """
+    self.push_file(shell_dir, binary, target_dir)
+
+    # Push external startup data. Backwards compatible for revisions where
+    # these files didn't exist. Or for bots that don't produce these files.
+    self.push_file(
+        shell_dir,
+        'natives_blob.bin',
+        target_dir,
+        skip_if_missing=True,
+    )
+    self.push_file(
+        shell_dir,
+        'snapshot_blob.bin',
+        target_dir,
+        skip_if_missing=True,
+    )
+    self.push_file(
+        shell_dir,
+        'snapshot_blob_trusted.bin',
+        target_dir,
+        skip_if_missing=True,
+    )
+    self.push_file(
+        shell_dir,
+        'icudtl.dat',
+        target_dir,
+        skip_if_missing=True,
+    )
+
+  def run(self, target_dir, binary, args, rel_path, timeout, env=None,
+          logcat_file=False):
+    """Execute a command on the device's shell.
+
+    Args:
+      target_dir: Parent directory of the executable on the device (relative to
+          devices' base dir for testing).
+      binary: Name of the binary.
+      args: List of arguments to pass to the binary.
+      rel_path: Relative path on device to use as CWD.
+      timeout: Timeout in seconds.
+      env: The environment variables with which the command should be run.
+      logcat_file: File into which to stream adb logcat log.
+    """
+    binary_on_device = os.path.join(DEVICE_DIR, target_dir, binary)
+    cmd = [binary_on_device] + args
+    def run_inner():
+      try:
+        output = self.device.RunShellCommand(
+            cmd,
+            cwd=os.path.join(DEVICE_DIR, rel_path),
+            check_return=True,
+            env=env,
+            timeout=timeout,
+            retries=0,
+        )
+        return '\n'.join(output)
+      except device_errors.AdbCommandFailedError as e:
+        raise CommandFailedException(e.status, e.output)
+      except device_errors.CommandTimeoutError as e:
+        raise TimeoutException(timeout, e.output)
+
+
+    if logcat_file:
+      with self.device.GetLogcatMonitor(output_file=logcat_file) as logmon:
+        result = run_inner()
+      logmon.Close()
+      return result
+    else:
+      return run_inner()
+
+  def drop_ram_caches(self):
+    """Drop ran caches on device."""
+    cache = cache_control.CacheControl(self.device)
+    cache.DropRamCaches()
+
+  def set_high_perf_mode(self):
+    """Set device into high performance mode."""
+    perf = perf_control.PerfControl(self.device)
+    perf.SetHighPerfMode()
+
+  def set_default_perf_mode(self):
+    """Set device into default performance mode."""
+    perf = perf_control.PerfControl(self.device)
+    perf.SetDefaultPerfMode()
+
+
+_ANDROID_DRIVER = None
+def android_driver(device=None):
+  """Singleton access method to the driver class."""
+  global _ANDROID_DRIVER
+  if not _ANDROID_DRIVER:
+    _ANDROID_DRIVER = _Driver(device)
+  return _ANDROID_DRIVER
diff --git a/src/third_party/v8/tools/testrunner/local/command.py b/src/third_party/v8/tools/testrunner/local/command.py
new file mode 100644
index 0000000..df603d7
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/command.py
@@ -0,0 +1,349 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from contextlib import contextmanager
+import os
+import re
+import signal
+import subprocess
+import sys
+import threading
+import time
+
+from ..local.android import (
+    android_driver, CommandFailedException, TimeoutException)
+from ..local import utils
+from ..objects import output
+
+
+BASE_DIR = os.path.normpath(
+    os.path.join(os.path.dirname(os.path.abspath(__file__)), '..' , '..', '..'))
+
+SEM_INVALID_VALUE = -1
+SEM_NOGPFAULTERRORBOX = 0x0002  # Microsoft Platform SDK WinBase.h
+
+
+def setup_testing():
+  """For testing only: We use threading under the hood instead of
+  multiprocessing to make coverage work. Signal handling is only supported
+  in the main thread, so we disable it for testing.
+  """
+  signal.signal = lambda *_: None
+
+
+class AbortException(Exception):
+  """Indicates early abort on SIGINT, SIGTERM or internal hard timeout."""
+  pass
+
+
+@contextmanager
+def handle_sigterm(process, abort_fun, enabled):
+  """Call`abort_fun` on sigterm and restore previous handler to prevent
+  erroneous termination of an already terminated process.
+
+  Args:
+    process: The process to terminate.
+    abort_fun: Function taking two parameters: the process to terminate and
+        an array with a boolean for storing if an abort occured.
+    enabled: If False, this wrapper will be a no-op.
+  """
+  # Variable to communicate with the signal handler.
+  abort_occured = [False]
+  def handler(signum, frame):
+    abort_fun(process, abort_occured)
+
+  if enabled:
+    previous = signal.signal(signal.SIGTERM, handler)
+  try:
+    yield
+  finally:
+    if enabled:
+      signal.signal(signal.SIGTERM, previous)
+
+  if abort_occured[0]:
+    raise AbortException()
+
+
+class BaseCommand(object):
+  def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
+               verbose=False, resources_func=None, handle_sigterm=False):
+    """Initialize the command.
+
+    Args:
+      shell: The name of the executable (e.g. d8).
+      args: List of args to pass to the executable.
+      cmd_prefix: Prefix of command (e.g. a wrapper script).
+      timeout: Timeout in seconds.
+      env: Environment dict for execution.
+      verbose: Print additional output.
+      resources_func: Callable, returning all test files needed by this command.
+      handle_sigterm: Flag indicating if SIGTERM will be used to terminate the
+          underlying process. Should not be used from the main thread, e.g. when
+          using a command to list tests.
+    """
+    assert(timeout > 0)
+
+    self.shell = shell
+    self.args = args or []
+    self.cmd_prefix = cmd_prefix or []
+    self.timeout = timeout
+    self.env = env or {}
+    self.verbose = verbose
+    self.handle_sigterm = handle_sigterm
+
+  def execute(self):
+    if self.verbose:
+      print('# %s' % self)
+
+    process = self._start_process()
+
+    with handle_sigterm(process, self._abort, self.handle_sigterm):
+      # Variable to communicate with the timer.
+      timeout_occured = [False]
+      timer = threading.Timer(
+          self.timeout, self._abort, [process, timeout_occured])
+      timer.start()
+
+      start_time = time.time()
+      stdout, stderr = process.communicate()
+      duration = time.time() - start_time
+
+      timer.cancel()
+
+    return output.Output(
+      process.returncode,
+      timeout_occured[0],
+      stdout.decode('utf-8', 'replace').encode('utf-8'),
+      stderr.decode('utf-8', 'replace').encode('utf-8'),
+      process.pid,
+      duration
+    )
+
+  def _start_process(self):
+    try:
+      return subprocess.Popen(
+        args=self._get_popen_args(),
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        env=self._get_env(),
+      )
+    except Exception as e:
+      sys.stderr.write('Error executing: %s\n' % self)
+      raise e
+
+  def _get_popen_args(self):
+    return self._to_args_list()
+
+  def _get_env(self):
+    env = os.environ.copy()
+    env.update(self.env)
+    # GTest shard information is read by the V8 tests runner. Make sure it
+    # doesn't leak into the execution of gtests we're wrapping. Those might
+    # otherwise apply a second level of sharding and as a result skip tests.
+    env.pop('GTEST_TOTAL_SHARDS', None)
+    env.pop('GTEST_SHARD_INDEX', None)
+    return env
+
+  def _kill_process(self, process):
+    raise NotImplementedError()
+
+  def _abort(self, process, abort_called):
+    abort_called[0] = True
+    started_as = self.to_string(relative=True)
+    process_text = 'process %d started as:\n  %s\n' % (process.pid, started_as)
+    try:
+      print('Attempting to kill ' + process_text)
+      sys.stdout.flush()
+      self._kill_process(process)
+    except OSError as e:
+      print(e)
+      print('Unruly ' + process_text)
+      sys.stdout.flush()
+
+  def __str__(self):
+    return self.to_string()
+
+  def to_string(self, relative=False):
+    def escape(part):
+      # Escape spaces. We may need to escape more characters for this to work
+      # properly.
+      if ' ' in part:
+        return '"%s"' % part
+      return part
+
+    parts = map(escape, self._to_args_list())
+    cmd = ' '.join(parts)
+    if relative:
+      cmd = cmd.replace(os.getcwd() + os.sep, '')
+    return cmd
+
+  def _to_args_list(self):
+    return self.cmd_prefix + [self.shell] + self.args
+
+
+class PosixCommand(BaseCommand):
+  # TODO(machenbach): Use base process start without shell once
+  # https://crbug.com/v8/8889 is resolved.
+  def _start_process(self):
+    def wrapped(arg):
+      if set('() \'"') & set(arg):
+        return "'%s'" % arg.replace("'", "'\"'\"'")
+      return arg
+    try:
+      return subprocess.Popen(
+        args=' '.join(map(wrapped, self._get_popen_args())),
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        env=self._get_env(),
+        shell=True,
+        # Make the new shell create its own process group. This allows to kill
+        # all spawned processes reliably (https://crbug.com/v8/8292).
+        preexec_fn=os.setsid,
+      )
+    except Exception as e:
+      sys.stderr.write('Error executing: %s\n' % self)
+      raise e
+
+  def _kill_process(self, process):
+    # Kill the whole process group (PID == GPID after setsid).
+    os.killpg(process.pid, signal.SIGKILL)
+
+
+def taskkill_windows(process, verbose=False, force=True):
+  force_flag = ' /F' if force else ''
+  tk = subprocess.Popen(
+      'taskkill /T%s /PID %d' % (force_flag, process.pid),
+      stdout=subprocess.PIPE,
+      stderr=subprocess.PIPE,
+  )
+  stdout, stderr = tk.communicate()
+  if verbose:
+    print('Taskkill results for %d' % process.pid)
+    print(stdout)
+    print(stderr)
+    print('Return code: %d' % tk.returncode)
+    sys.stdout.flush()
+
+
+class WindowsCommand(BaseCommand):
+  def _start_process(self, **kwargs):
+    # Try to change the error mode to avoid dialogs on fatal errors. Don't
+    # touch any existing error mode flags by merging the existing error mode.
+    # See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
+    def set_error_mode(mode):
+      prev_error_mode = SEM_INVALID_VALUE
+      try:
+        import ctypes
+        prev_error_mode = (
+            ctypes.windll.kernel32.SetErrorMode(mode))  #@UndefinedVariable
+      except ImportError:
+        pass
+      return prev_error_mode
+
+    error_mode = SEM_NOGPFAULTERRORBOX
+    prev_error_mode = set_error_mode(error_mode)
+    set_error_mode(error_mode | prev_error_mode)
+
+    try:
+      return super(WindowsCommand, self)._start_process(**kwargs)
+    finally:
+      if prev_error_mode != SEM_INVALID_VALUE:
+        set_error_mode(prev_error_mode)
+
+  def _get_popen_args(self):
+    return subprocess.list2cmdline(self._to_args_list())
+
+  def _kill_process(self, process):
+    taskkill_windows(process, self.verbose)
+
+
+class AndroidCommand(BaseCommand):
+  # This must be initialized before creating any instances of this class.
+  driver = None
+
+  def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
+               verbose=False, resources_func=None, handle_sigterm=False):
+    """Initialize the command and all files that need to be pushed to the
+    Android device.
+    """
+    self.shell_name = os.path.basename(shell)
+    self.shell_dir = os.path.dirname(shell)
+    self.files_to_push = (resources_func or (lambda: []))()
+
+    # Make all paths in arguments relative and also prepare files from arguments
+    # for pushing to the device.
+    rel_args = []
+    find_path_re = re.compile(r'.*(%s/[^\'"]+).*' % re.escape(BASE_DIR))
+    for arg in (args or []):
+      match = find_path_re.match(arg)
+      if match:
+        self.files_to_push.append(match.group(1))
+      rel_args.append(
+          re.sub(r'(.*)%s/(.*)' % re.escape(BASE_DIR), r'\1\2', arg))
+
+    super(AndroidCommand, self).__init__(
+        shell, args=rel_args, cmd_prefix=cmd_prefix, timeout=timeout, env=env,
+        verbose=verbose, handle_sigterm=handle_sigterm)
+
+  def execute(self, **additional_popen_kwargs):
+    """Execute the command on the device.
+
+    This pushes all required files to the device and then runs the command.
+    """
+    if self.verbose:
+      print('# %s' % self)
+
+    self.driver.push_executable(self.shell_dir, 'bin', self.shell_name)
+
+    for abs_file in self.files_to_push:
+      abs_dir = os.path.dirname(abs_file)
+      file_name = os.path.basename(abs_file)
+      rel_dir = os.path.relpath(abs_dir, BASE_DIR)
+      self.driver.push_file(abs_dir, file_name, rel_dir)
+
+    start_time = time.time()
+    return_code = 0
+    timed_out = False
+    try:
+      stdout = self.driver.run(
+          'bin', self.shell_name, self.args, '.', self.timeout, self.env)
+    except CommandFailedException as e:
+      return_code = e.status
+      stdout = e.output
+    except TimeoutException as e:
+      return_code = 1
+      timed_out = True
+      # Sadly the Android driver doesn't provide output on timeout.
+      stdout = ''
+
+    duration = time.time() - start_time
+    return output.Output(
+        return_code,
+        timed_out,
+        stdout,
+        '',  # No stderr available.
+        -1,  # No pid available.
+        duration,
+    )
+
+
+Command = None
+def setup(target_os, device):
+  """Set the Command class to the OS-specific version."""
+  global Command
+  if target_os == 'android':
+    AndroidCommand.driver = android_driver(device)
+    Command = AndroidCommand
+  elif target_os == 'windows':
+    Command = WindowsCommand
+  else:
+    Command = PosixCommand
+
+def tear_down():
+  """Clean up after using commands."""
+  if Command == AndroidCommand:
+    AndroidCommand.driver.tear_down()
diff --git a/src/third_party/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status b/src/third_party/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status
new file mode 100644
index 0000000..b5ebc84
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status
@@ -0,0 +1,5 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[]
diff --git a/src/third_party/v8/tools/testrunner/local/fake_testsuite/testcfg.py b/src/third_party/v8/tools/testrunner/local/fake_testsuite/testcfg.py
new file mode 100644
index 0000000..28de737
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/fake_testsuite/testcfg.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+from testrunner.local import testsuite, statusfile
+
+
+class TestLoader(testsuite.TestLoader):
+  def _list_test_filenames(self):
+    return ["fast", "slow"]
+
+  def list_tests(self):
+    self.test_count_estimation = 2
+    fast = self._create_test("fast", self.suite)
+    slow = self._create_test("slow", self.suite)
+
+    slow._statusfile_outcomes.append(statusfile.SLOW)
+    yield fast
+    yield slow
+
+
+class TestSuite(testsuite.TestSuite):
+  def _test_loader_class(self):
+    return TestLoader
+
+  def _test_class(self):
+    return testsuite.TestCase
+
+def GetSuite(*args, **kwargs):
+  return TestSuite(*args, **kwargs)
diff --git a/src/third_party/v8/tools/testrunner/local/pool.py b/src/third_party/v8/tools/testrunner/local/pool.py
new file mode 100644
index 0000000..f3f2e9d
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/pool.py
@@ -0,0 +1,313 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from contextlib import contextmanager
+from multiprocessing import Process, Queue
+import os
+import signal
+import time
+import traceback
+
+try:
+  from queue import Empty  # Python 3
+except ImportError:
+  from Queue import Empty  # Python 2
+
+from . import command
+from . import utils
+
+
+def setup_testing():
+  """For testing only: Use threading under the hood instead of multiprocessing
+  to make coverage work.
+  """
+  global Queue
+  global Process
+  del Queue
+  del Process
+  try:
+    from queue import Queue  # Python 3
+  except ImportError:
+    from Queue import Queue  # Python 2
+
+  from threading import Thread as Process
+  # Monkeypatch threading Queue to look like multiprocessing Queue.
+  Queue.cancel_join_thread = lambda self: None
+  # Monkeypatch os.kill and add fake pid property on Thread.
+  os.kill = lambda *args: None
+  Process.pid = property(lambda self: None)
+
+
+class NormalResult():
+  def __init__(self, result):
+    self.result = result
+    self.exception = None
+
+class ExceptionResult():
+  def __init__(self, exception):
+    self.exception = exception
+
+
+class MaybeResult():
+  def __init__(self, heartbeat, value):
+    self.heartbeat = heartbeat
+    self.value = value
+
+  @staticmethod
+  def create_heartbeat():
+    return MaybeResult(True, None)
+
+  @staticmethod
+  def create_result(value):
+    return MaybeResult(False, value)
+
+
+def Worker(fn, work_queue, done_queue,
+           process_context_fn=None, process_context_args=None):
+  """Worker to be run in a child process.
+  The worker stops when the poison pill "STOP" is reached.
+  """
+  try:
+    kwargs = {}
+    if process_context_fn and process_context_args is not None:
+      kwargs.update(process_context=process_context_fn(*process_context_args))
+    for args in iter(work_queue.get, "STOP"):
+      try:
+        done_queue.put(NormalResult(fn(*args, **kwargs)))
+      except command.AbortException:
+        # SIGINT, SIGTERM or internal hard timeout.
+        break
+      except Exception as e:
+        traceback.print_exc()
+        print(">>> EXCEPTION: %s" % e)
+        done_queue.put(ExceptionResult(e))
+    # When we reach here on normal tear down, all items have been pulled from
+    # the done_queue before and this should have no effect. On fast abort, it's
+    # possible that a fast worker left items on the done_queue in memory, which
+    # will never be pulled. This call purges those to avoid a deadlock.
+    done_queue.cancel_join_thread()
+  except KeyboardInterrupt:
+    assert False, 'Unreachable'
+
+
+@contextmanager
+def without_sig():
+  int_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
+  term_handler = signal.signal(signal.SIGTERM, signal.SIG_IGN)
+  try:
+    yield
+  finally:
+    signal.signal(signal.SIGINT, int_handler)
+    signal.signal(signal.SIGTERM, term_handler)
+
+
+class Pool():
+  """Distributes tasks to a number of worker processes.
+  New tasks can be added dynamically even after the workers have been started.
+  Requirement: Tasks can only be added from the parent process, e.g. while
+  consuming the results generator."""
+
+  # Factor to calculate the maximum number of items in the work/done queue.
+  # Necessary to not overflow the queue's pipe if a keyboard interrupt happens.
+  BUFFER_FACTOR = 4
+
+  def __init__(self, num_workers, heartbeat_timeout=1, notify_fun=None):
+    """
+    Args:
+      num_workers: Number of worker processes to run in parallel.
+      heartbeat_timeout: Timeout in seconds for waiting for results. Each time
+          the timeout is reached, a heartbeat is signalled and timeout is reset.
+      notify_fun: Callable called to signale some events like termination. The
+          event name is passed as string.
+    """
+    self.num_workers = num_workers
+    self.processes = []
+    self.terminated = False
+    self.abort_now = False
+
+    # Invariant: processing_count >= #work_queue + #done_queue. It is greater
+    # when a worker takes an item from the work_queue and before the result is
+    # submitted to the done_queue. It is equal when no worker is working,
+    # e.g. when all workers have finished, and when no results are processed.
+    # Count is only accessed by the parent process. Only the parent process is
+    # allowed to remove items from the done_queue and to add items to the
+    # work_queue.
+    self.processing_count = 0
+    self.heartbeat_timeout = heartbeat_timeout
+    self.notify = notify_fun or (lambda x: x)
+
+    # Disable sigint and sigterm to prevent subprocesses from capturing the
+    # signals.
+    with without_sig():
+      self.work_queue = Queue()
+      self.done_queue = Queue()
+
+  def imap_unordered(self, fn, gen,
+                     process_context_fn=None, process_context_args=None):
+    """Maps function "fn" to items in generator "gen" on the worker processes
+    in an arbitrary order. The items are expected to be lists of arguments to
+    the function. Returns a results iterator. A result value of type
+    MaybeResult either indicates a heartbeat of the runner, i.e. indicating
+    that the runner is still waiting for the result to be computed, or it wraps
+    the real result.
+
+    Args:
+      process_context_fn: Function executed once by each worker. Expected to
+          return a process-context object. If present, this object is passed
+          as additional argument to each call to fn.
+      process_context_args: List of arguments for the invocation of
+          process_context_fn. All arguments will be pickled and sent beyond the
+          process boundary.
+    """
+    if self.terminated:
+      return
+    try:
+      internal_error = False
+      gen = iter(gen)
+      self.advance = self._advance_more
+
+      # Disable sigint and sigterm to prevent subprocesses from capturing the
+      # signals.
+      with without_sig():
+        for w in range(self.num_workers):
+          p = Process(target=Worker, args=(fn,
+                                          self.work_queue,
+                                          self.done_queue,
+                                          process_context_fn,
+                                          process_context_args))
+          p.start()
+          self.processes.append(p)
+
+      self.advance(gen)
+      while self.processing_count > 0:
+        while True:
+          try:
+            # Read from result queue in a responsive fashion. If available,
+            # this will return a normal result immediately or a heartbeat on
+            # heartbeat timeout (default 1 second).
+            result = self._get_result_from_queue()
+          except:
+            # TODO(machenbach): Handle a few known types of internal errors
+            # gracefully, e.g. missing test files.
+            internal_error = True
+            continue
+          finally:
+            if self.abort_now:
+              # SIGINT, SIGTERM or internal hard timeout.
+              return
+
+          yield result
+          break
+
+        self.advance(gen)
+    except KeyboardInterrupt:
+      assert False, 'Unreachable'
+    except Exception as e:
+      traceback.print_exc()
+      print(">>> EXCEPTION: %s" % e)
+    finally:
+      self._terminate()
+
+    if internal_error:
+      raise Exception("Internal error in a worker process.")
+
+  def _advance_more(self, gen):
+    while self.processing_count < self.num_workers * self.BUFFER_FACTOR:
+      try:
+        self.work_queue.put(next(gen))
+        self.processing_count += 1
+      except StopIteration:
+        self.advance = self._advance_empty
+        break
+
+  def _advance_empty(self, gen):
+    pass
+
+  def add(self, args):
+    """Adds an item to the work queue. Can be called dynamically while
+    processing the results from imap_unordered."""
+    assert not self.terminated
+
+    self.work_queue.put(args)
+    self.processing_count += 1
+
+  def abort(self):
+    """Schedules abort on next queue read.
+
+    This is safe to call when handling SIGINT, SIGTERM or when an internal
+    hard timeout is reached.
+    """
+    self.abort_now = True
+
+  def _terminate_processes(self):
+    for p in self.processes:
+      if utils.IsWindows():
+        command.taskkill_windows(p, verbose=True, force=False)
+      else:
+        os.kill(p.pid, signal.SIGTERM)
+
+  def _terminate(self):
+    """Terminates execution and cleans up the queues.
+
+    If abort() was called before termination, this also terminates the
+    subprocesses and doesn't wait for ongoing tests.
+    """
+    if self.terminated:
+      return
+    self.terminated = True
+
+    # Drain out work queue from tests
+    try:
+      while True:
+        self.work_queue.get(True, 0.1)
+    except Empty:
+      pass
+
+    # Make sure all processes stop
+    for _ in self.processes:
+      # During normal tear down the workers block on get(). Feed a poison pill
+      # per worker to make them stop.
+      self.work_queue.put("STOP")
+
+    if self.abort_now:
+      self._terminate_processes()
+
+    self.notify("Joining workers")
+    for p in self.processes:
+      p.join()
+
+    # Drain the queues to prevent stderr chatter when queues are garbage
+    # collected.
+    self.notify("Draining queues")
+    try:
+      while True: self.work_queue.get(False)
+    except:
+      pass
+    try:
+      while True: self.done_queue.get(False)
+    except:
+      pass
+
+  def _get_result_from_queue(self):
+    """Attempts to get the next result from the queue.
+
+    Returns: A wrapped result if one was available within heartbeat timeout,
+        a heartbeat result otherwise.
+    Raises:
+        Exception: If an exception occured when processing the task on the
+            worker side, it is reraised here.
+    """
+    while True:
+      try:
+        result = self.done_queue.get(timeout=self.heartbeat_timeout)
+        self.processing_count -= 1
+        if result.exception:
+          raise result.exception
+        return MaybeResult.create_result(result.result)
+      except Empty:
+        return MaybeResult.create_heartbeat()
diff --git a/src/third_party/v8/tools/testrunner/local/pool_unittest.py b/src/third_party/v8/tools/testrunner/local/pool_unittest.py
new file mode 100755
index 0000000..240cd56
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/pool_unittest.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.local.pool import Pool
+
+def Run(x):
+  if x == 10:
+    raise Exception("Expected exception triggered by test.")
+  return x
+
+class PoolTest(unittest.TestCase):
+  def testNormal(self):
+    results = set()
+    pool = Pool(3)
+    for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
+      if result.heartbeat:
+        # Any result can be a heartbeat due to timings.
+        continue
+      results.add(result.value)
+    self.assertEquals(set(range(0, 10)), results)
+
+  def testException(self):
+    results = set()
+    pool = Pool(3)
+    with self.assertRaises(Exception):
+      for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
+        if result.heartbeat:
+          # Any result can be a heartbeat due to timings.
+          continue
+        # Item 10 will not appear in results due to an internal exception.
+        results.add(result.value)
+    expect = set(range(0, 12))
+    expect.remove(10)
+    self.assertEquals(expect, results)
+
+  def testAdd(self):
+    results = set()
+    pool = Pool(3)
+    for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
+      if result.heartbeat:
+        # Any result can be a heartbeat due to timings.
+        continue
+      results.add(result.value)
+      if result.value < 30:
+        pool.add([result.value + 20])
+    self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)),
+                      results)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/third_party/v8/tools/testrunner/local/statusfile.py b/src/third_party/v8/tools/testrunner/local/statusfile.py
new file mode 100644
index 0000000..854abc6
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/statusfile.py
@@ -0,0 +1,342 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+from __future__ import absolute_import
+
+import os
+import re
+
+from .variants import ALL_VARIANTS
+from .utils import Freeze
+
+# Possible outcomes
+FAIL = "FAIL"
+PASS = "PASS"
+TIMEOUT = "TIMEOUT"
+CRASH = "CRASH"
+
+# Outcomes only for status file, need special handling
+FAIL_OK = "FAIL_OK"
+FAIL_SLOPPY = "FAIL_SLOPPY"
+
+# Modifiers
+SKIP = "SKIP"
+SLOW = "SLOW"
+NO_VARIANTS = "NO_VARIANTS"
+FAIL_PHASE_ONLY = "FAIL_PHASE_ONLY"
+
+ALWAYS = "ALWAYS"
+
+KEYWORDS = {}
+for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, NO_VARIANTS, FAIL_SLOPPY,
+            ALWAYS, FAIL_PHASE_ONLY]:
+  KEYWORDS[key] = key
+
+# Support arches, modes to be written as keywords instead of strings.
+VARIABLES = {ALWAYS: True}
+for var in ["debug", "release", "big", "little", "android",
+            "arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
+            "x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
+            "linux", "aix", "r1", "r2", "r3", "r5", "r6"]:
+  VARIABLES[var] = var
+
+# Allow using variants as keywords.
+for var in ALL_VARIANTS:
+  VARIABLES[var] = var
+
+class StatusFile(object):
+  def __init__(self, path, variables):
+    """
+    _rules:        {variant: {test name: [rule]}}
+    _prefix_rules: {variant: {test name prefix: [rule]}}
+    """
+    self.variables = variables
+    with open(path) as f:
+      self._rules, self._prefix_rules = ReadStatusFile(f.read(), variables)
+
+  def get_outcomes(self, testname, variant=None):
+    """Merges variant dependent and independent rules."""
+    outcomes = frozenset()
+
+    for key in set([variant or '', '']):
+      rules = self._rules.get(key, {})
+      prefix_rules = self._prefix_rules.get(key, {})
+
+      if testname in rules:
+        outcomes |= rules[testname]
+
+      for prefix in prefix_rules:
+        if testname.startswith(prefix):
+          outcomes |= prefix_rules[prefix]
+
+    return outcomes
+
+  def warn_unused_rules(self, tests, check_variant_rules=False):
+    """Finds and prints unused rules in status file.
+
+    Rule X is unused when it doesn't apply to any tests, which can also mean
+    that all matching tests were skipped by another rule before evaluating X.
+
+    Args:
+      tests: list of pairs (testname, variant)
+      check_variant_rules: if set variant dependent rules are checked
+    """
+
+    if check_variant_rules:
+      variants = list(ALL_VARIANTS)
+    else:
+      variants = ['']
+    used_rules = set()
+
+    for testname, variant in tests:
+      variant = variant or ''
+
+      if testname in self._rules.get(variant, {}):
+        used_rules.add((testname, variant))
+        if SKIP in self._rules[variant][testname]:
+          continue
+
+      for prefix in self._prefix_rules.get(variant, {}):
+        if testname.startswith(prefix):
+          used_rules.add((prefix, variant))
+          if SKIP in self._prefix_rules[variant][prefix]:
+            break
+
+    for variant in variants:
+      for rule, value in (
+          list(self._rules.get(variant, {}).iteritems()) +
+          list(self._prefix_rules.get(variant, {}).iteritems())):
+        if (rule, variant) not in used_rules:
+          if variant == '':
+            variant_desc = 'variant independent'
+          else:
+            variant_desc = 'variant: %s' % variant
+          print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
+
+
+def _JoinsPassAndFail(outcomes1, outcomes2):
+  """Indicates if we join PASS and FAIL from two different outcome sets and
+  the first doesn't already contain both.
+  """
+  return (
+      PASS in outcomes1 and
+      not (FAIL in outcomes1 or FAIL_OK in outcomes1) and
+      (FAIL in outcomes2 or FAIL_OK in outcomes2)
+  )
+
+VARIANT_EXPRESSION = object()
+
+def _EvalExpression(exp, variables):
+  """Evaluates expression and returns its result. In case of NameError caused by
+  undefined "variant" identifier returns VARIANT_EXPRESSION marker.
+  """
+
+  try:
+    return eval(exp, variables)
+  except NameError as e:
+    identifier = re.match("name '(.*)' is not defined", e.message).group(1)
+    assert identifier == "variant", "Unknown identifier: %s" % identifier
+    return VARIANT_EXPRESSION
+
+
+def _EvalVariantExpression(
+  condition, section, variables, variant, rules, prefix_rules):
+  variables_with_variant = dict(variables)
+  variables_with_variant["variant"] = variant
+  result = _EvalExpression(condition, variables_with_variant)
+  assert result != VARIANT_EXPRESSION
+  if result is True:
+    _ReadSection(
+        section,
+        variables_with_variant,
+        rules[variant],
+        prefix_rules[variant],
+    )
+  else:
+    assert result is False, "Make sure expressions evaluate to boolean values"
+
+
+def _ParseOutcomeList(rule, outcomes, variables, target_dict):
+  """Outcome list format: [condition, outcome, outcome, ...]"""
+
+  result = set([])
+  if type(outcomes) == str:
+    outcomes = [outcomes]
+  for item in outcomes:
+    if type(item) == str:
+      result.add(item)
+    elif type(item) == list:
+      condition = item[0]
+      exp = _EvalExpression(condition, variables)
+      assert exp != VARIANT_EXPRESSION, (
+        "Nested variant expressions are not supported")
+      if exp is False:
+        continue
+
+      # Ensure nobody uses an identifier by mistake, like "default",
+      # which would evaluate to true here otherwise.
+      assert exp is True, "Make sure expressions evaluate to boolean values"
+
+      for outcome in item[1:]:
+        assert type(outcome) == str
+        result.add(outcome)
+    else:
+      assert False
+  if len(result) == 0:
+    return
+  if rule in target_dict:
+    # A FAIL without PASS in one rule has always precedence over a single
+    # PASS (without FAIL) in another. Otherwise the default PASS expectation
+    # in a rule with a modifier (e.g. PASS, SLOW) would be joined to a FAIL
+    # from another rule (which intended to mark a test as FAIL and not as
+    # PASS and FAIL).
+    if _JoinsPassAndFail(target_dict[rule], result):
+      target_dict[rule] -= set([PASS])
+    if _JoinsPassAndFail(result, target_dict[rule]):
+      result -= set([PASS])
+    target_dict[rule] |= result
+  else:
+    target_dict[rule] = result
+
+
+def ReadContent(content):
+  return eval(content, KEYWORDS)
+
+
+def ReadStatusFile(content, variables):
+  """Status file format
+  Status file := [section]
+  section = [CONDITION, section_rules]
+  section_rules := {path: outcomes}
+  outcomes := outcome | [outcome, ...]
+  outcome := SINGLE_OUTCOME | [CONDITION, SINGLE_OUTCOME, SINGLE_OUTCOME, ...]
+  """
+
+  # Empty defaults for rules and prefix_rules. Variant-independent
+  # rules are mapped by "", others by the variant name.
+  rules = {variant: {} for variant in ALL_VARIANTS}
+  rules[""] = {}
+  prefix_rules = {variant: {} for variant in ALL_VARIANTS}
+  prefix_rules[""] = {}
+
+  variables.update(VARIABLES)
+  for conditional_section in ReadContent(content):
+    assert type(conditional_section) == list
+    assert len(conditional_section) == 2
+    condition, section = conditional_section
+    exp = _EvalExpression(condition, variables)
+
+    # The expression is variant-independent and evaluates to False.
+    if exp is False:
+      continue
+
+    # The expression is variant-independent and evaluates to True.
+    if exp is True:
+      _ReadSection(
+          section,
+          variables,
+          rules[''],
+          prefix_rules[''],
+      )
+      continue
+
+    # The expression is variant-dependent (contains "variant" keyword)
+    if exp == VARIANT_EXPRESSION:
+      # If the expression contains one or more "variant" keywords, we evaluate
+      # it for all possible variants and create rules for those that apply.
+      for variant in ALL_VARIANTS:
+        _EvalVariantExpression(
+            condition, section, variables, variant, rules, prefix_rules)
+      continue
+
+    assert False, "Make sure expressions evaluate to boolean values"
+
+  return Freeze(rules), Freeze(prefix_rules)
+
+
+def _ReadSection(section, variables, rules, prefix_rules):
+  assert type(section) == dict
+  for rule, outcome_list in section.iteritems():
+    assert type(rule) == str
+
+    if rule[-1] == '*':
+      _ParseOutcomeList(rule[:-1], outcome_list, variables, prefix_rules)
+    else:
+      _ParseOutcomeList(rule, outcome_list, variables, rules)
+
+JS_TEST_PATHS = {
+  'debugger': [[]],
+  'inspector': [[]],
+  'intl': [[]],
+  'message': [[]],
+  'mjsunit': [[]],
+  'mozilla': [['data']],
+  'test262': [['data', 'test'], ['local-tests', 'test']],
+  'webkit': [[]],
+}
+
+FILE_EXTENSIONS = [".js", ".mjs"]
+
+def PresubmitCheck(path):
+  with open(path) as f:
+    contents = ReadContent(f.read())
+  basename = os.path.basename(os.path.dirname(path))
+  root_prefix = basename + "/"
+  status = {"success": True}
+  def _assert(check, message):  # Like "assert", but doesn't throw.
+    if not check:
+      print("%s: Error: %s" % (path, message))
+      status["success"] = False
+  try:
+    for section in contents:
+      _assert(type(section) == list, "Section must be a list")
+      _assert(len(section) == 2, "Section list must have exactly 2 entries")
+      section = section[1]
+      _assert(type(section) == dict,
+              "Second entry of section must be a dictionary")
+      for rule in section:
+        _assert(type(rule) == str, "Rule key must be a string")
+        _assert(not rule.startswith(root_prefix),
+                "Suite name prefix must not be used in rule keys")
+        _assert(not rule.endswith('.js'),
+                ".js extension must not be used in rule keys.")
+        _assert('*' not in rule or (rule.count('*') == 1 and rule[-1] == '*'),
+                "Only the last character of a rule key can be a wildcard")
+        if basename in JS_TEST_PATHS  and '*' not in rule:
+          def _any_exist(paths):
+            return any(os.path.exists(os.path.join(os.path.dirname(path),
+                                      *(paths + [rule + ext])))
+                       for ext in FILE_EXTENSIONS)
+          _assert(any(_any_exist(paths)
+                      for paths in JS_TEST_PATHS[basename]),
+                  "missing file for %s test %s" % (basename, rule))
+    return status["success"]
+  except Exception as e:
+    print(e)
+    return False
diff --git a/src/third_party/v8/tools/testrunner/local/statusfile_unittest.py b/src/third_party/v8/tools/testrunner/local/statusfile_unittest.py
new file mode 100755
index 0000000..3e2493c
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/statusfile_unittest.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+from __future__ import absolute_import
+import os
+import sys
+import unittest
+
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.local import statusfile
+from testrunner.local.utils import Freeze
+
+
+TEST_VARIABLES = {
+  'system': 'linux',
+  'mode': 'release',
+}
+
+
+TEST_STATUS_FILE = """
+[
+[ALWAYS, {
+  'foo/bar': [PASS, SKIP],
+  'baz/bar': [PASS, FAIL],
+  'foo/*': [PASS, SLOW],
+}],  # ALWAYS
+
+['%s', {
+  'baz/bar': [PASS, SLOW],
+  'foo/*': [FAIL],
+}],
+]
+"""
+
+
+def make_variables():
+  variables = {}
+  variables.update(TEST_VARIABLES)
+  return variables
+
+
+class UtilsTest(unittest.TestCase):
+  def test_freeze(self):
+    self.assertEqual(2, Freeze({1: [2]})[1][0])
+    self.assertEqual(set([3]), Freeze({1: [2], 2: set([3])})[2])
+
+    with self.assertRaises(Exception):
+      Freeze({1: [], 2: set([3])})[2] = 4
+    with self.assertRaises(Exception):
+      Freeze({1: [], 2: set([3])}).update({3: 4})
+    with self.assertRaises(Exception):
+      Freeze({1: [], 2: set([3])})[1].append(2)
+    with self.assertRaises(Exception):
+      Freeze({1: [], 2: set([3])})[2] |= set([3])
+
+    # Sanity check that we can do the same calls on a non-frozen object.
+    {1: [], 2: set([3])}[2] = 4
+    {1: [], 2: set([3])}.update({3: 4})
+    {1: [], 2: set([3])}[1].append(2)
+    {1: [], 2: set([3])}[2] |= set([3])
+
+
+class StatusFileTest(unittest.TestCase):
+  def test_eval_expression(self):
+    variables = make_variables()
+    variables.update(statusfile.VARIABLES)
+
+    self.assertTrue(
+        statusfile._EvalExpression(
+            'system==linux and mode==release', variables))
+    self.assertTrue(
+        statusfile._EvalExpression(
+            'system==linux or variant==default', variables))
+    self.assertFalse(
+        statusfile._EvalExpression(
+            'system==linux and mode==debug', variables))
+    self.assertRaises(
+        AssertionError,
+        lambda: statusfile._EvalExpression(
+            'system==linux and mode==foo', variables))
+    self.assertRaises(
+        SyntaxError,
+        lambda: statusfile._EvalExpression(
+            'system==linux and mode=release', variables))
+    self.assertEquals(
+        statusfile.VARIANT_EXPRESSION,
+        statusfile._EvalExpression(
+            'system==linux and variant==default', variables)
+    )
+
+  def test_read_statusfile_section_true(self):
+    rules, prefix_rules = statusfile.ReadStatusFile(
+        TEST_STATUS_FILE % 'system==linux', make_variables())
+
+    self.assertEquals(
+        {
+          'foo/bar': set(['PASS', 'SKIP']),
+          'baz/bar': set(['PASS', 'FAIL', 'SLOW']),
+        },
+        rules[''],
+    )
+    self.assertEquals(
+        {
+          'foo/': set(['SLOW', 'FAIL']),
+        },
+        prefix_rules[''],
+    )
+    self.assertEquals({}, rules['default'])
+    self.assertEquals({}, prefix_rules['default'])
+
+  def test_read_statusfile_section_false(self):
+    rules, prefix_rules = statusfile.ReadStatusFile(
+        TEST_STATUS_FILE % 'system==windows', make_variables())
+
+    self.assertEquals(
+        {
+          'foo/bar': set(['PASS', 'SKIP']),
+          'baz/bar': set(['PASS', 'FAIL']),
+        },
+        rules[''],
+    )
+    self.assertEquals(
+        {
+          'foo/': set(['PASS', 'SLOW']),
+        },
+        prefix_rules[''],
+    )
+    self.assertEquals({}, rules['default'])
+    self.assertEquals({}, prefix_rules['default'])
+
+  def test_read_statusfile_section_variant(self):
+    rules, prefix_rules = statusfile.ReadStatusFile(
+        TEST_STATUS_FILE % 'system==linux and variant==default',
+        make_variables(),
+    )
+
+    self.assertEquals(
+        {
+          'foo/bar': set(['PASS', 'SKIP']),
+          'baz/bar': set(['PASS', 'FAIL']),
+        },
+        rules[''],
+    )
+    self.assertEquals(
+        {
+          'foo/': set(['PASS', 'SLOW']),
+        },
+        prefix_rules[''],
+    )
+    self.assertEquals(
+        {
+          'baz/bar': set(['PASS', 'SLOW']),
+        },
+        rules['default'],
+    )
+    self.assertEquals(
+        {
+          'foo/': set(['FAIL']),
+        },
+        prefix_rules['default'],
+    )
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/third_party/v8/tools/testrunner/local/testsuite.py b/src/third_party/v8/tools/testrunner/local/testsuite.py
new file mode 100644
index 0000000..a72ef4b
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/testsuite.py
@@ -0,0 +1,317 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import fnmatch
+import imp
+import itertools
+import os
+from contextlib import contextmanager
+
+from . import command
+from . import statusfile
+from . import utils
+from ..objects.testcase import TestCase
+from .variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
+
+
+STANDARD_VARIANT = set(["default"])
+
+
+class VariantsGenerator(object):
+  def __init__(self, variants):
+    self._all_variants = [v for v in variants if v in ALL_VARIANTS]
+    self._standard_variant = [v for v in variants if v in STANDARD_VARIANT]
+
+  def gen(self, test):
+    """Generator producing (variant, flags, procid suffix) tuples."""
+    flags_set = self._get_flags_set(test)
+    for n, variant in enumerate(self._get_variants(test)):
+      yield (variant, flags_set[variant][0], n)
+
+  def _get_flags_set(self, test):
+    return ALL_VARIANT_FLAGS
+
+  def _get_variants(self, test):
+    if test.only_standard_variant:
+      return self._standard_variant
+    return self._all_variants
+
+
+class TestCombiner(object):
+  def get_group_key(self, test):
+    """To indicate what tests can be combined with each other we define a group
+    key for each test. Tests with the same group key can be combined. Test
+    without a group key (None) is not combinable with any other test.
+    """
+    raise NotImplementedError()
+
+  def combine(self, name, tests):
+    """Returns test combined from `tests`. Since we identify tests by their
+    suite and name, `name` parameter should be unique within one suite.
+    """
+    return self._combined_test_class()(name, tests)
+
+  def _combined_test_class(self):
+    raise NotImplementedError()
+
+
+class TestLoader(object):
+  """Base class for loading TestSuite tests after applying test suite
+  transformations."""
+
+  def __init__(self, suite, test_class, test_config, test_root):
+    self.suite = suite
+    self.test_class = test_class
+    self.test_config = test_config
+    self.test_root = test_root
+    self.test_count_estimation = len(list(self._list_test_filenames()))
+
+  def _list_test_filenames(self):
+    """Implemented by the subclassed TestLoaders to list filenames.
+
+    Filenames are expected to be sorted and are deterministic."""
+    raise NotImplementedError
+
+  def _should_filter_by_name(self, name):
+    return False
+
+  def _should_filter_by_test(self, test):
+    return False
+
+  def _filename_to_testname(self, filename):
+    """Hook for subclasses to write their own filename transformation
+    logic before the test creation."""
+    return filename
+
+  # TODO: not needed for every TestLoader, extract it into a subclass.
+  def _path_to_name(self, path):
+    if utils.IsWindows():
+      return path.replace(os.path.sep, "/")
+
+    return path
+
+  def _create_test(self, path, suite, **kwargs):
+    """Converts paths into test objects using the given options"""
+    return self.test_class(
+      suite, path, self._path_to_name(path), self.test_config, **kwargs)
+
+  def list_tests(self):
+    """Loads and returns the test objects for a TestSuite"""
+    # TODO: detect duplicate tests.
+    for filename in self._list_test_filenames():
+      if self._should_filter_by_name(filename):
+        continue
+
+      testname = self._filename_to_testname(filename)
+      case = self._create_test(testname, self.suite)
+      if self._should_filter_by_test(case):
+        continue
+
+      yield case
+
+
+class GenericTestLoader(TestLoader):
+  """Generic TestLoader implementing the logic for listing filenames"""
+  @property
+  def excluded_files(self):
+    return set()
+
+  @property
+  def excluded_dirs(self):
+    return set()
+
+  @property
+  def excluded_suffixes(self):
+    return set()
+
+  @property
+  def test_dirs(self):
+    return [self.test_root]
+
+  @property
+  def extensions(self):
+    return []
+
+  def __find_extension(self, filename):
+    for extension in self.extensions:
+      if filename.endswith(extension):
+        return extension
+
+    return False
+
+  def _should_filter_by_name(self, filename):
+    if not self.__find_extension(filename):
+      return True
+
+    for suffix in self.excluded_suffixes:
+      if filename.endswith(suffix):
+        return True
+
+    if os.path.basename(filename) in self.excluded_files:
+      return True
+
+    return False
+
+  def _filename_to_testname(self, filename):
+    extension = self.__find_extension(filename)
+    if not extension:
+      return filename
+
+    return filename[:-len(extension)]
+
+  def _to_relpath(self, abspath, test_root):
+    return os.path.relpath(abspath, test_root)
+
+  def _list_test_filenames(self):
+    for test_dir in sorted(self.test_dirs):
+      test_root = os.path.join(self.test_root, test_dir)
+      for dirname, dirs, files in os.walk(test_root, followlinks=True):
+        dirs.sort()
+        for dir in dirs:
+          if dir in self.excluded_dirs or dir.startswith('.'):
+            dirs.remove(dir)
+
+        files.sort()
+        for filename in files:
+          abspath = os.path.join(dirname, filename)
+
+          yield self._to_relpath(abspath, test_root)
+
+
+class JSTestLoader(GenericTestLoader):
+  @property
+  def extensions(self):
+    return [".js", ".mjs"]
+
+
+class TestGenerator(object):
+  def __init__(self, test_count_estimate, slow_tests, fast_tests):
+    self.test_count_estimate = test_count_estimate
+    self.slow_tests = slow_tests
+    self.fast_tests = fast_tests
+    self._rebuild_iterator()
+
+  def _rebuild_iterator(self):
+    self._iterator = itertools.chain(self.slow_tests, self.fast_tests)
+
+  def __iter__(self):
+    return self
+
+  def __next__(self):
+    return next(self)
+
+  def next(self):
+    return next(self._iterator)
+
+  def merge(self, test_generator):
+    self.test_count_estimate += test_generator.test_count_estimate
+    self.slow_tests = itertools.chain(
+      self.slow_tests, test_generator.slow_tests)
+    self.fast_tests = itertools.chain(
+      self.fast_tests, test_generator.fast_tests)
+    self._rebuild_iterator()
+
+
+@contextmanager
+def _load_testsuite_module(name, root):
+  f = None
+  try:
+    (f, pathname, description) = imp.find_module("testcfg", [root])
+    yield imp.load_module(name + "_testcfg", f, pathname, description)
+  finally:
+    if f:
+      f.close()
+
+class TestSuite(object):
+  @staticmethod
+  def Load(root, test_config, framework_name):
+    name = root.split(os.path.sep)[-1]
+    with _load_testsuite_module(name, root) as module:
+      return module.GetSuite(name, root, test_config, framework_name)
+
+  def __init__(self, name, root, test_config, framework_name):
+    self.name = name  # string
+    self.root = root  # string containing path
+    self.test_config = test_config
+    self.framework_name = framework_name  # name of the test runner impl
+    self.tests = None  # list of TestCase objects
+    self.statusfile = None
+
+    self._test_loader = self._test_loader_class()(
+      self, self._test_class(), self.test_config, self.root)
+
+  def status_file(self):
+    return "%s/%s.status" % (self.root, self.name)
+
+  @property
+  def _test_loader_class(self):
+    raise NotImplementedError
+
+  def ListTests(self):
+    return self._test_loader.list_tests()
+
+  def __initialize_test_count_estimation(self):
+    # Retrieves a single test to initialize the test generator.
+    next(iter(self.ListTests()), None)
+
+  def __calculate_test_count(self):
+    self.__initialize_test_count_estimation()
+    return self._test_loader.test_count_estimation
+
+  def load_tests_from_disk(self, statusfile_variables):
+    self.statusfile = statusfile.StatusFile(
+      self.status_file(), statusfile_variables)
+
+    test_count = self.__calculate_test_count()
+    slow_tests = (test for test in self.ListTests() if test.is_slow)
+    fast_tests = (test for test in self.ListTests() if not test.is_slow)
+    return TestGenerator(test_count, slow_tests, fast_tests)
+
+  def get_variants_gen(self, variants):
+    return self._variants_gen_class()(variants)
+
+  def _variants_gen_class(self):
+    return VariantsGenerator
+
+  def test_combiner_available(self):
+    return bool(self._test_combiner_class())
+
+  def get_test_combiner(self):
+    cls = self._test_combiner_class()
+    if cls:
+      return cls()
+    return None
+
+  def _test_combiner_class(self):
+    """Returns Combiner subclass. None if suite doesn't support combining
+    tests.
+    """
+    return None
+
+  def _test_class(self):
+    raise NotImplementedError
diff --git a/src/third_party/v8/tools/testrunner/local/testsuite_unittest.py b/src/third_party/v8/tools/testrunner/local/testsuite_unittest.py
new file mode 100755
index 0000000..b74fef1
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/testsuite_unittest.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import itertools
+import os
+import sys
+import tempfile
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.local.testsuite import TestSuite, TestGenerator
+from testrunner.objects.testcase import TestCase
+from testrunner.test_config import TestConfig
+
+
+class TestSuiteTest(unittest.TestCase):
+  def setUp(self):
+    test_dir = os.path.dirname(__file__)
+    self.test_root = os.path.join(test_dir, "fake_testsuite")
+    self.test_config = TestConfig(
+        command_prefix=[],
+        extra_flags=[],
+        isolates=False,
+        mode_flags=[],
+        no_harness=False,
+        noi18n=False,
+        random_seed=0,
+        run_skipped=False,
+        shell_dir='fake_testsuite/fake_d8',
+        timeout=10,
+        verbose=False,
+    )
+
+    self.suite = TestSuite.Load(
+        self.test_root, self.test_config, "standard_runner")
+
+  def testLoadingTestSuites(self):
+    self.assertEquals(self.suite.name, "fake_testsuite")
+    self.assertEquals(self.suite.test_config, self.test_config)
+
+    # Verify that the components of the TestSuite aren't loaded yet.
+    self.assertIsNone(self.suite.tests)
+    self.assertIsNone(self.suite.statusfile)
+
+  def testLoadingTestsFromDisk(self):
+    tests = self.suite.load_tests_from_disk(
+      statusfile_variables={})
+    def is_generator(iterator):
+      return iterator == iter(iterator)
+
+    self.assertTrue(is_generator(tests))
+    self.assertEquals(tests.test_count_estimate, 2)
+
+    slow_tests, fast_tests = list(tests.slow_tests), list(tests.fast_tests)
+    # Verify that the components of the TestSuite are loaded.
+    self.assertTrue(len(slow_tests) == len(fast_tests) == 1)
+    self.assertTrue(all(test.is_slow for test in slow_tests))
+    self.assertFalse(any(test.is_slow for test in fast_tests))
+    self.assertIsNotNone(self.suite.statusfile)
+
+  def testMergingTestGenerators(self):
+    tests = self.suite.load_tests_from_disk(
+      statusfile_variables={})
+    more_tests = self.suite.load_tests_from_disk(
+      statusfile_variables={})
+
+    # Merge the test generators
+    tests.merge(more_tests)
+    self.assertEquals(tests.test_count_estimate, 4)
+
+    # Check the tests are sorted by speed
+    test_speeds = []
+    for test in tests:
+      test_speeds.append(test.is_slow)
+
+    self.assertEquals(test_speeds, [True, True, False, False])
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/third_party/v8/tools/testrunner/local/utils.py b/src/third_party/v8/tools/testrunner/local/utils.py
new file mode 100644
index 0000000..a6b92dc
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/utils.py
@@ -0,0 +1,158 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from os.path import exists
+from os.path import isdir
+from os.path import join
+import os
+import platform
+import re
+import urllib
+
+
+### Exit codes and their meaning.
+# Normal execution.
+EXIT_CODE_PASS = 0
+# Execution with test failures.
+EXIT_CODE_FAILURES = 1
+# Execution with no tests executed.
+EXIT_CODE_NO_TESTS = 2
+# Execution aborted with SIGINT (Ctrl-C).
+EXIT_CODE_INTERRUPTED = 3
+# Execution aborted with SIGTERM.
+EXIT_CODE_TERMINATED = 4
+# Internal error.
+EXIT_CODE_INTERNAL_ERROR = 5
+
+
+def GetSuitePaths(test_root):
+  return [ f for f in os.listdir(test_root) if isdir(join(test_root, f)) ]
+
+
+# Reads a file into an array of strings
+def ReadLinesFrom(name):
+  lines = []
+  with open(name) as f:
+    for line in f:
+      if line.startswith('#'): continue
+      if '#' in line:
+        line = line[:line.find('#')]
+      line = line.strip()
+      if not line: continue
+      lines.append(line)
+  return lines
+
+
+def GuessOS():
+  system = platform.system()
+  if system == 'Linux':
+    return 'linux'
+  elif system == 'Darwin':
+    return 'macos'
+  elif system.find('CYGWIN') >= 0:
+    return 'cygwin'
+  elif system == 'Windows' or system == 'Microsoft':
+    # On Windows Vista platform.system() can return 'Microsoft' with some
+    # versions of Python, see http://bugs.python.org/issue1082
+    return 'windows'
+  elif system == 'FreeBSD':
+    return 'freebsd'
+  elif system == 'OpenBSD':
+    return 'openbsd'
+  elif system == 'SunOS':
+    return 'solaris'
+  elif system == 'NetBSD':
+    return 'netbsd'
+  elif system == 'AIX':
+    return 'aix'
+  else:
+    return None
+
+
+def UseSimulator(arch):
+  machine = platform.machine()
+  return (machine and
+      (arch == "mipsel" or arch == "arm" or arch == "arm64") and
+      not arch.startswith(machine))
+
+
+# This will default to building the 32 bit VM even on machines that are
+# capable of running the 64 bit VM.
+def DefaultArch():
+  machine = platform.machine()
+  machine = machine.lower()  # Windows 7 capitalizes 'AMD64'.
+  if machine.startswith('arm'):
+    return 'arm'
+  elif (not machine) or (not re.match('(x|i[3-6])86$', machine) is None):
+    return 'ia32'
+  elif machine == 'i86pc':
+    return 'ia32'
+  elif machine == 'x86_64':
+    return 'ia32'
+  elif machine == 'amd64':
+    return 'ia32'
+  elif machine == 's390x':
+    return 's390'
+  elif machine == 'ppc64':
+    return 'ppc'
+  else:
+    return None
+
+
+def GuessWordsize():
+  if '64' in platform.machine():
+    return '64'
+  else:
+    return '32'
+
+
+def IsWindows():
+  return GuessOS() == 'windows'
+
+
+class FrozenDict(dict):
+  def __setitem__(self, *args, **kwargs):
+    raise Exception('Tried to mutate a frozen dict')
+
+  def update(self, *args, **kwargs):
+    raise Exception('Tried to mutate a frozen dict')
+
+
+def Freeze(obj):
+  if isinstance(obj, dict):
+    return FrozenDict((k, Freeze(v)) for k, v in obj.iteritems())
+  elif isinstance(obj, set):
+    return frozenset(obj)
+  elif isinstance(obj, list):
+    return tuple(Freeze(item) for item in obj)
+  else:
+    # Make sure object is hashable.
+    hash(obj)
+    return obj
diff --git a/src/third_party/v8/tools/testrunner/local/variants.py b/src/third_party/v8/tools/testrunner/local/variants.py
new file mode 100644
index 0000000..4236c16
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/variants.py
@@ -0,0 +1,123 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Use this to run several variants of the tests.
+ALL_VARIANT_FLAGS = {
+  "assert_types": [["--assert-types"]],
+  "code_serializer": [["--cache=code"]],
+  "default": [[]],
+  "future": [["--future"]],
+  "gc_stats": [["--gc-stats=1"]],
+  # Alias of exhaustive variants, but triggering new test framework features.
+  "infra_staging": [[]],
+  "interpreted_regexp": [["--regexp-interpret-all"]],
+  "experimental_regexp":  [["--default-to-experimental-regexp-engine"]],
+  "jitless": [["--jitless"]],
+  "minor_mc": [["--minor-mc"]],
+  "nci": [["--turbo-nci"]],
+  "nci_as_midtier": [["--turbo-nci-as-midtier"]],
+  "no_lfa": [["--no-lazy-feedback-allocation"]],
+  "no_local_heaps": [[
+      "--no-local-heaps",
+      "--no-turbo-direct-heap-access",
+      "--no-finalize-streaming-on-background"]],
+  # No optimization means disable all optimizations. OptimizeFunctionOnNextCall
+  # would not force optimization too. It turns into a Nop. Please see
+  # https://chromium-review.googlesource.com/c/452620/ for more discussion.
+  # For WebAssembly, we test "Liftoff-only" in the nooptimization variant and
+  # "TurboFan-only" in the stress variant. The WebAssembly configuration is
+  # independent of JS optimizations, so we can combine those configs.
+  "nooptimization": [["--no-opt", "--liftoff", "--no-wasm-tier-up",
+                      "--wasm-generic-wrapper"]],
+  "slow_path": [["--force-slow-path"]],
+  "stress": [["--stress-opt", "--no-liftoff", "--stress-lazy-source-positions"]],
+  "stress_concurrent_allocation": [["--stress-concurrent-allocation"]],
+  "stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
+                                         "--finalize-streaming-on-background",
+                                         "--stress-wasm-code-gc"]],
+  "stress_incremental_marking": [["--stress-incremental-marking"]],
+  "stress_snapshot": [["--stress-snapshot"]],
+  # Trigger stress sampling allocation profiler with sample interval = 2^14
+  "stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
+  "trusted": [["--no-untrusted-code-mitigations"]],
+  "no_wasm_traps": [["--no-wasm-trap-handler"]],
+  "turboprop": [["--turboprop"]],
+  "instruction_scheduling": [["--turbo-instruction-scheduling"]],
+  "stress_instruction_scheduling": [["--turbo-stress-instruction-scheduling"]],
+  "top_level_await": [["--harmony-top-level-await"]],
+}
+
+# Flags that lead to a contradiction with the flags provided by the respective
+# variant. This depends on the flags specified in ALL_VARIANT_FLAGS and on the
+# implications defined in flag-definitions.h.
+INCOMPATIBLE_FLAGS_PER_VARIANT = {
+  "assert_types": ["--no-assert-types"],
+  "jitless": ["--opt", "--liftoff", "--track-field-types", "--validate-asm"],
+  "no_wasm_traps": ["--wasm-trap-handler"],
+  "nooptimization": ["--opt", "--no-liftoff", "--predictable", "--wasm-tier-up"],
+  "slow_path": ["--no-force-slow-path"],
+  "stress_incremental_marking": ["--no-stress-incremental-marking"],
+  "stress_js_bg_compile_wasm_code_gc": ["--no-stress-background-compile"],
+  "stress": ["--no-stress-opt", "--always-opt", "--no-always-opt", "--liftoff", "--max-inlined-bytecode-size=*",
+             "--max-inlined-bytecode-size-cumulative=*", "--stress-inline"],
+  "turboprop": ["--interrupt-budget=*", "--no-turboprop"],
+  "code_serializer": ["--cache=after-execute", "--cache=full-code-cache", "--cache=none"],
+  "no_local_heaps": ["--concurrent-inlining", "--turboprop"],
+  "experimental_regexp": ["--no-enable-experimental-regexp-engine", "--no-default-to-experimental-regexp-engine"],
+}
+
+# Flags that lead to a contradiction under certain build variables.
+# This corresponds to the build variables usable in status files as generated
+# in _get_statusfile_variables in base_runner.py.
+# The conflicts might be directly contradictory flags or be caused by the
+# implications defined in flag-definitions.h.
+INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE = {
+  "lite_mode": ["--no-lazy-feedback-allocation", "--max-semi-space-size=*"]
+               + INCOMPATIBLE_FLAGS_PER_VARIANT["jitless"],
+  "predictable": ["--liftoff", "--parallel-compile-tasks",
+                  "--concurrent-recompilation",
+                  "--wasm-num-compilation-tasks=*"],
+}
+
+# Flags that lead to a contradiction when a certain extra-flag is present.
+# Such extra-flags are defined for example in infra/testing/builders.pyl or in
+# standard_runner.py.
+# The conflicts might be directly contradictory flags or be caused by the
+# implications defined in flag-definitions.h.
+INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG = {
+  "--concurrent-recompilation": ["--no-concurrent-recompilation", "--predictable"],
+  "--enable-armv8": ["--no-enable-armv8"],
+  "--gc-interval=*": ["--gc-interval=*"],
+  "--no-enable-sse3": ["--enable-sse3"],
+  "--no-enable-sse4-1": ["--enable-sse4-1"],
+  "--optimize-for-size": ["--max-semi-space-size=*"],
+  "--stress-flush-bytecode": ["--no-stress-flush-bytecode"],
+  "--stress-incremental-marking": INCOMPATIBLE_FLAGS_PER_VARIANT["stress_incremental_marking"],
+}
+
+SLOW_VARIANTS = set([
+  'stress',
+  'stress_snapshot',
+  'nooptimization',
+])
+
+FAST_VARIANTS = set([
+  'default'
+])
+
+
+def _variant_order_key(v):
+  if v in SLOW_VARIANTS:
+    return 0
+  if v in FAST_VARIANTS:
+    return 100
+  return 50
+
+ALL_VARIANTS = sorted(ALL_VARIANT_FLAGS.keys(),
+                      key=_variant_order_key)
+
+# Check {SLOW,FAST}_VARIANTS entries
+for variants in [SLOW_VARIANTS, FAST_VARIANTS]:
+  for v in variants:
+    assert v in ALL_VARIANT_FLAGS
diff --git a/src/third_party/v8/tools/testrunner/local/verbose.py b/src/third_party/v8/tools/testrunner/local/verbose.py
new file mode 100644
index 0000000..8569368
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/local/verbose.py
@@ -0,0 +1,106 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import sys
+import time
+
+from . import statusfile
+
+
+REPORT_TEMPLATE = (
+"""Total: %(total)i tests
+ * %(skipped)4d tests will be skipped
+ * %(nocrash)4d tests are expected to be flaky but not crash
+ * %(pass)4d tests are expected to pass
+ * %(fail_ok)4d tests are expected to fail that we won't fix
+ * %(fail)4d tests are expected to fail that we should fix
+ * %(crash)4d tests are expected to crash
+""")
+
+
+# TODO(majeski): Turn it into an observer.
+def PrintReport(tests):
+  total = len(tests)
+  skipped = nocrash = passes = fail_ok = fail = crash = 0
+  for t in tests:
+    if t.do_skip:
+      skipped += 1
+    elif t.is_pass_or_fail:
+      nocrash += 1
+    elif t.is_fail_ok:
+      fail_ok += 1
+    elif t.expected_outcomes == [statusfile.PASS]:
+      passes += 1
+    elif t.expected_outcomes == [statusfile.FAIL]:
+      fail += 1
+    elif t.expected_outcomes == [statusfile.CRASH]:
+      crash += 1
+    else:
+      assert False # Unreachable # TODO: check this in outcomes parsing phase.
+
+  print(REPORT_TEMPLATE % {
+    "total": total,
+    "skipped": skipped,
+    "nocrash": nocrash,
+    "pass": passes,
+    "fail_ok": fail_ok,
+    "fail": fail,
+    "crash": crash,
+  })
+
+
+def PrintTestSource(tests):
+  for test in tests:
+    print("--- begin source: %s ---" % test)
+    if test.is_source_available():
+      print(test.get_source())
+    else:
+      print('(no source available)')
+    print("--- end source: %s ---" % test)
+
+
+def FormatTime(d):
+  millis = round(d * 1000) % 1000
+  return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
+
+
+def PrintTestDurations(suites, outputs, overall_time):
+    # Write the times to stderr to make it easy to separate from the
+    # test output.
+    print()
+    sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time))
+    timed_tests = [(t, outputs[t].duration) for s in suites for t in s.tests
+                   if t in outputs]
+    timed_tests.sort(key=lambda test_duration: test_duration[1], reverse=True)
+    index = 1
+    for test, duration in timed_tests[:20]:
+      t = FormatTime(duration)
+      sys.stderr.write("%4i (%s) %s\n" % (index, t, test))
+      index += 1
diff --git a/src/third_party/v8/tools/testrunner/num_fuzzer.py b/src/third_party/v8/tools/testrunner/num_fuzzer.py
new file mode 100755
index 0000000..d5b243b
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/num_fuzzer.py
@@ -0,0 +1,231 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import absolute_import
+from __future__ import print_function
+
+import random
+import sys
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+from . import base_runner
+
+from testrunner.local import utils
+
+from testrunner.testproc import fuzzer
+from testrunner.testproc.base import TestProcProducer
+from testrunner.testproc.combiner import CombinerProc
+from testrunner.testproc.execution import ExecutionProc
+from testrunner.testproc.expectation import ForgiveTimeoutProc
+from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
+from testrunner.testproc.loader import LoadProc
+from testrunner.testproc.progress import ResultsTracker
+from testrunner.utils import random_utils
+
+
+DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
+
+
+class NumFuzzer(base_runner.BaseTestRunner):
+  def __init__(self, *args, **kwargs):
+    super(NumFuzzer, self).__init__(*args, **kwargs)
+
+  @property
+  def framework_name(self):
+    return 'num_fuzzer'
+
+  def _add_parser_options(self, parser):
+    parser.add_option("--fuzzer-random-seed", default=0,
+                      help="Default seed for initializing fuzzer random "
+                      "generator")
+    parser.add_option("--tests-count", default=5, type="int",
+                      help="Number of tests to generate from each base test. "
+                           "Can be combined with --total-timeout-sec with "
+                           "value 0 to provide infinite number of subtests. "
+                           "When --combine-tests is set it indicates how many "
+                           "tests to create in total")
+
+    # Stress gc
+    parser.add_option("--stress-marking", default=0, type="int",
+                      help="probability [0-10] of adding --stress-marking "
+                           "flag to the test")
+    parser.add_option("--stress-scavenge", default=0, type="int",
+                      help="probability [0-10] of adding --stress-scavenge "
+                           "flag to the test")
+    parser.add_option("--stress-compaction", default=0, type="int",
+                      help="probability [0-10] of adding --stress-compaction "
+                           "flag to the test")
+    parser.add_option("--stress-gc", default=0, type="int",
+                      help="probability [0-10] of adding --random-gc-interval "
+                           "flag to the test")
+
+    # Stress tasks
+    parser.add_option("--stress-delay-tasks", default=0, type="int",
+                      help="probability [0-10] of adding --stress-delay-tasks "
+                           "flag to the test")
+    parser.add_option("--stress-thread-pool-size", default=0, type="int",
+                      help="probability [0-10] of adding --thread-pool-size "
+                           "flag to the test")
+
+    # Stress deopt
+    parser.add_option("--stress-deopt", default=0, type="int",
+                      help="probability [0-10] of adding --deopt-every-n-times "
+                           "flag to the test")
+    parser.add_option("--stress-deopt-min", default=1, type="int",
+                      help="extends --stress-deopt to have minimum interval "
+                           "between deopt points")
+
+    # Combine multiple tests
+    parser.add_option("--combine-tests", default=False, action="store_true",
+                      help="Combine multiple tests as one and run with "
+                           "try-catch wrapper")
+    parser.add_option("--combine-max", default=100, type="int",
+                      help="Maximum number of tests to combine")
+    parser.add_option("--combine-min", default=2, type="int",
+                      help="Minimum number of tests to combine")
+
+    # Miscellaneous
+    parser.add_option("--variants", default='default',
+                      help="Comma-separated list of testing variants")
+
+    return parser
+
+
+  def _process_options(self, options):
+    if not options.fuzzer_random_seed:
+      options.fuzzer_random_seed = random_utils.random_seed()
+
+    if options.total_timeout_sec:
+      options.tests_count = 0
+
+    if options.combine_tests:
+      if options.combine_min > options.combine_max:
+        print(('min_group_size (%d) cannot be larger than max_group_size (%d)' %
+               options.min_group_size, options.max_group_size))
+        raise base_runner.TestRunnerError()
+
+    if options.variants != 'default':
+      print ('Only default testing variant is supported with numfuzz')
+      raise base_runner.TestRunnerError()
+
+    return True
+
+  def _get_default_suite_names(self):
+    return DEFAULT_SUITES
+
+  def _runner_flags(self):
+    """Extra default flags specific to the test runner implementation."""
+    return ['--no-abort-on-contradictory-flags']
+
+  def _get_statusfile_variables(self, options):
+    variables = (
+        super(NumFuzzer, self)._get_statusfile_variables(options))
+    variables.update({
+      'deopt_fuzzer': bool(options.stress_deopt),
+      'endurance_fuzzer': bool(options.combine_tests),
+      'gc_stress': bool(options.stress_gc),
+      'gc_fuzzer': bool(max([options.stress_marking,
+                             options.stress_scavenge,
+                             options.stress_compaction,
+                             options.stress_gc,
+                             options.stress_delay_tasks,
+                             options.stress_thread_pool_size])),
+    })
+    return variables
+
+  def _do_execute(self, tests, args, options):
+    loader = LoadProc(tests)
+    fuzzer_rng = random.Random(options.fuzzer_random_seed)
+
+    combiner = self._create_combiner(fuzzer_rng, options)
+    results = self._create_result_tracker(options)
+    execproc = ExecutionProc(options.j)
+    sigproc = self._create_signal_proc()
+    indicators = self._create_progress_indicators(
+      tests.test_count_estimate, options)
+    procs = [
+      loader,
+      NameFilterProc(args) if args else None,
+      StatusFileFilterProc(None, None),
+      # TODO(majeski): Improve sharding when combiner is present. Maybe select
+      # different random seeds for shards instead of splitting tests.
+      self._create_shard_proc(options),
+      ForgiveTimeoutProc(),
+      combiner,
+      self._create_fuzzer(fuzzer_rng, options),
+      sigproc,
+    ] + indicators + [
+      results,
+      self._create_timeout_proc(options),
+      self._create_rerun_proc(options),
+      execproc,
+    ]
+    self._prepare_procs(procs)
+    loader.load_initial_tests(initial_batch_size=float('inf'))
+
+    # TODO(majeski): maybe some notification from loader would be better?
+    if combiner:
+      combiner.generate_initial_tests(options.j * 4)
+
+    # This starts up worker processes and blocks until all tests are
+    # processed.
+    execproc.run()
+
+    for indicator in indicators:
+      indicator.finished()
+
+    print('>>> %d tests ran' % results.total)
+    if results.failed:
+      return utils.EXIT_CODE_FAILURES
+
+    # Indicate if a SIGINT or SIGTERM happened.
+    return sigproc.exit_code
+
+  def _is_testsuite_supported(self, suite, options):
+    return not options.combine_tests or suite.test_combiner_available()
+
+  def _create_combiner(self, rng, options):
+    if not options.combine_tests:
+      return None
+    return CombinerProc(rng, options.combine_min, options.combine_max,
+                        options.tests_count)
+
+  def _create_fuzzer(self, rng, options):
+    return fuzzer.FuzzerProc(
+        rng,
+        self._tests_count(options),
+        self._create_fuzzer_configs(options),
+        self._disable_analysis(options),
+    )
+
+  def _tests_count(self, options):
+    if options.combine_tests:
+      return 1
+    return options.tests_count
+
+  def _disable_analysis(self, options):
+    """Disable analysis phase when options are used that don't support it."""
+    return options.combine_tests
+
+  def _create_fuzzer_configs(self, options):
+    fuzzers = []
+    def add(name, prob, *args):
+      if prob:
+        fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args))
+
+    add('compaction', options.stress_compaction)
+    add('marking', options.stress_marking)
+    add('scavenge', options.stress_scavenge)
+    add('gc_interval', options.stress_gc)
+    add('threads', options.stress_thread_pool_size)
+    add('delay', options.stress_delay_tasks)
+    add('deopt', options.stress_deopt, options.stress_deopt_min)
+    return fuzzers
+
+
+if __name__ == '__main__':
+  sys.exit(NumFuzzer().execute())
diff --git a/src/third_party/v8/tools/testrunner/objects/__init__.py b/src/third_party/v8/tools/testrunner/objects/__init__.py
new file mode 100644
index 0000000..202a262
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/objects/__init__.py
@@ -0,0 +1,26 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/third_party/v8/tools/testrunner/objects/output.py b/src/third_party/v8/tools/testrunner/objects/output.py
new file mode 100644
index 0000000..200f546
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/objects/output.py
@@ -0,0 +1,81 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import signal
+import copy
+
+from ..local import utils
+
+
+class Output(object):
+
+  def __init__(self, exit_code=0, timed_out=False, stdout=None, stderr=None,
+               pid=None, duration=None):
+    self.exit_code = exit_code
+    self.timed_out = timed_out
+    self.stdout = stdout
+    self.stderr = stderr
+    self.pid = pid
+    self.duration = duration
+
+  def without_text(self):
+    """Returns copy of the output without stdout and stderr."""
+    other = copy.copy(self)
+    other.stdout = None
+    other.stderr = None
+    return other
+
+  def HasCrashed(self):
+    if utils.IsWindows():
+      return 0x80000000 & self.exit_code and not (0x3FFFFF00 & self.exit_code)
+    else:
+      # Timed out tests will have exit_code -signal.SIGTERM.
+      if self.timed_out:
+        return False
+      return (self.exit_code < 0 and
+              self.exit_code != -signal.SIGABRT)
+
+  def HasTimedOut(self):
+    return self.timed_out
+
+  def IsSuccess(self):
+    return not self.HasCrashed() and not self.HasTimedOut()
+
+  @property
+  def exit_code_string(self):
+    return "%d [%02X]" % (self.exit_code, self.exit_code & 0xffffffff)
+
+
+class _NullOutput(Output):
+  """Useful to signal that the binary has not been run."""
+  def __init__(self):
+    super(_NullOutput, self).__init__()
+
+
+# Default instance of the _NullOutput class above.
+NULL_OUTPUT = _NullOutput()
diff --git a/src/third_party/v8/tools/testrunner/objects/predictable.py b/src/third_party/v8/tools/testrunner/objects/predictable.py
new file mode 100644
index 0000000..52d14ea
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/objects/predictable.py
@@ -0,0 +1,48 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from ..local import statusfile
+from ..outproc import base as outproc_base
+from ..testproc import base as testproc_base
+from ..testproc.result import Result
+
+
+# Only check the exit code of the predictable_wrapper in
+# verify-predictable mode. Negative tests are not supported as they
+# usually also don't print allocation hashes. There are two versions of
+# negative tests: one specified by the test, the other specified through
+# the status file (e.g. known bugs).
+
+
+def get_outproc(test):
+  return OutProc(test.output_proc)
+
+
+class OutProc(outproc_base.BaseOutProc):
+  """Output processor wrapper for predictable mode. It has custom process and
+  has_unexpected_output implementation, but for all other methods it simply
+  calls wrapped output processor.
+  """
+  def __init__(self, _outproc):
+    super(OutProc, self).__init__()
+    self._outproc = _outproc
+
+  def has_unexpected_output(self, output):
+    return output.exit_code != 0
+
+  def get_outcome(self, output):
+    return self._outproc.get_outcome(output)
+
+  @property
+  def negative(self):
+    return self._outproc.negative
+
+  @property
+  def expected_outcomes(self):
+    return self._outproc.expected_outcomes
+
+
+class PredictableFilterProc(testproc_base.TestProcFilter):
+  def _filter(self, test):
+    return test.skip_predictable()
diff --git a/src/third_party/v8/tools/testrunner/objects/testcase.py b/src/third_party/v8/tools/testrunner/objects/testcase.py
new file mode 100644
index 0000000..e037f99
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/objects/testcase.py
@@ -0,0 +1,455 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import copy
+import os
+import re
+import shlex
+
+from ..outproc import base as outproc
+from ..local import command
+from ..local import statusfile
+from ..local import utils
+from ..local.variants import INCOMPATIBLE_FLAGS_PER_VARIANT
+from ..local.variants import INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE
+from ..local.variants import INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG
+
+
+FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
+
+# Patterns for additional resource files on Android. Files that are not covered
+# by one of the other patterns below will be specified in the resources section.
+RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
+# Pattern to auto-detect files to push on Android for statements like:
+# load("path/to/file.js")
+LOAD_PATTERN = re.compile(
+    r"(?:load|readbuffer|read)\((?:'|\")([^'\"]+)(?:'|\")\)")
+# Pattern to auto-detect files to push on Android for statements like:
+# import foobar from "path/to/file.js"
+# import {foo, bar} from "path/to/file.js"
+# export {"foo" as "bar"} from "path/to/file.js"
+MODULE_FROM_RESOURCES_PATTERN = re.compile(
+    r"(?:import|export).*?from\s*\(?['\"]([^'\"]+)['\"]",
+    re.MULTILINE | re.DOTALL)
+# Pattern to detect files to push on Android for statements like:
+# import "path/to/file.js"
+# import("module.mjs").catch()...
+MODULE_IMPORT_RESOURCES_PATTERN = re.compile(
+    r"import\s*\(?['\"]([^'\"]+)['\"]",
+    re.MULTILINE | re.DOTALL)
+# Pattern to detect and strip test262 frontmatter from tests to prevent false
+# positives for MODULE_RESOURCES_PATTERN above.
+TEST262_FRONTMATTER_PATTERN = re.compile(r"/\*---.*?---\*/", re.DOTALL)
+
+TIMEOUT_LONG = "long"
+
+try:
+  cmp             # Python 2
+except NameError:
+  def cmp(x, y):  # Python 3
+    return (x > y) - (x < y)
+
+
+class TestCase(object):
+  def __init__(self, suite, path, name, test_config):
+    self.suite = suite        # TestSuite object
+
+    self.path = path          # string, e.g. 'div-mod', 'test-api/foo'
+    self.name = name          # string that identifies test in the status file
+
+    self.variant = None       # name of the used testing variant
+    self.variant_flags = []   # list of strings, flags specific to this test
+
+    # Fields used by the test processors.
+    self.origin = None # Test that this test is subtest of.
+    self.processor = None # Processor that created this subtest.
+    self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
+    self.keep_output = False # Can output of this test be dropped
+
+    # Test config contains information needed to build the command.
+    self._test_config = test_config
+    self._random_seed = None # Overrides test config value if not None
+
+    # Outcomes
+    self._statusfile_outcomes = None
+    self._expected_outcomes = None
+    self._checked_flag_contradictions = False
+    self._statusfile_flags = None
+    self.expected_failure_reason = None
+
+    self._prepare_outcomes()
+
+  def create_subtest(self, processor, subtest_id, variant=None, flags=None,
+                     keep_output=False, random_seed=None):
+    subtest = copy.copy(self)
+    subtest.origin = self
+    subtest.processor = processor
+    subtest.procid += '.%s' % subtest_id
+    subtest.keep_output |= keep_output
+    if random_seed:
+      subtest._random_seed = random_seed
+    if flags:
+      subtest.variant_flags = subtest.variant_flags + flags
+    if variant is not None:
+      assert self.variant is None
+      subtest.variant = variant
+      subtest._prepare_outcomes()
+    return subtest
+
+  def _prepare_outcomes(self, force_update=True):
+    if force_update or self._statusfile_outcomes is None:
+      def is_flag(outcome):
+        return outcome.startswith('--')
+      def not_flag(outcome):
+        return not is_flag(outcome)
+
+      outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant)
+      self._statusfile_outcomes = filter(not_flag, outcomes)
+      self._statusfile_flags = filter(is_flag, outcomes)
+    self._expected_outcomes = (
+      self._parse_status_file_outcomes(self._statusfile_outcomes))
+
+  def _parse_status_file_outcomes(self, outcomes):
+    if (statusfile.FAIL_SLOPPY in outcomes and
+        '--use-strict' not in self.variant_flags):
+      return outproc.OUTCOMES_FAIL
+
+    expected_outcomes = []
+    if (statusfile.FAIL in outcomes or
+        statusfile.FAIL_OK in outcomes):
+      expected_outcomes.append(statusfile.FAIL)
+    if statusfile.CRASH in outcomes:
+      expected_outcomes.append(statusfile.CRASH)
+
+    # Do not add PASS if there is nothing else. Empty outcomes are converted to
+    # the global [PASS].
+    if expected_outcomes and statusfile.PASS in outcomes:
+      expected_outcomes.append(statusfile.PASS)
+
+    # Avoid creating multiple instances of a list with a single FAIL.
+    if expected_outcomes == outproc.OUTCOMES_FAIL:
+      return outproc.OUTCOMES_FAIL
+    return expected_outcomes or outproc.OUTCOMES_PASS
+
+  def allow_timeouts(self):
+    if self.expected_outcomes == outproc.OUTCOMES_PASS:
+      self._expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
+    elif self.expected_outcomes == outproc.OUTCOMES_FAIL:
+      self._expected_outcomes = outproc.OUTCOMES_FAIL_OR_TIMEOUT
+    elif statusfile.TIMEOUT not in self.expected_outcomes:
+      self._expected_outcomes = (
+          self.expected_outcomes + [statusfile.TIMEOUT])
+
+  @property
+  def expected_outcomes(self):
+    def normalize_flag(flag):
+      return flag.replace("_", "-").replace("--no-", "--no")
+
+    def has_flag(conflicting_flag, flags):
+      conflicting_flag = normalize_flag(conflicting_flag)
+      if conflicting_flag in flags:
+        return True
+      if conflicting_flag.endswith("*"):
+        return any(flag.startswith(conflicting_flag[:-1]) for flag in flags)
+      return False
+
+    def check_flags(incompatible_flags, actual_flags, rule):
+      for incompatible_flag in incompatible_flags:
+          if has_flag(incompatible_flag, actual_flags):
+            self._statusfile_outcomes = outproc.OUTCOMES_FAIL
+            self._expected_outcomes = outproc.OUTCOMES_FAIL
+            self.expected_failure_reason = ("Rule " + rule + " in " +
+                "tools/testrunner/local/variants.py expected a flag " +
+                "contradiction error with " + incompatible_flag + ".")
+
+    if not self._checked_flag_contradictions:
+      self._checked_flag_contradictions = True
+
+      file_specific_flags = (self._get_source_flags() + self._get_suite_flags()
+                             + self._get_statusfile_flags())
+      file_specific_flags = [normalize_flag(flag) for flag in file_specific_flags]
+      extra_flags = [normalize_flag(flag) for flag in self._get_extra_flags()]
+
+      if self.variant in INCOMPATIBLE_FLAGS_PER_VARIANT:
+        check_flags(INCOMPATIBLE_FLAGS_PER_VARIANT[self.variant], file_specific_flags,
+                    "INCOMPATIBLE_FLAGS_PER_VARIANT[\""+self.variant+"\"]")
+
+      for variable, incompatible_flags in INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE.items():
+        if self.suite.statusfile.variables[variable]:
+            check_flags(incompatible_flags, file_specific_flags,
+              "INCOMPATIBLE_FLAGS_PER_BUILD_VARIABLE[\""+variable+"\"]")
+
+      for extra_flag, incompatible_flags in INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG.items():
+        if has_flag(extra_flag, extra_flags):
+            check_flags(incompatible_flags, file_specific_flags,
+              "INCOMPATIBLE_FLAGS_PER_EXTRA_FLAG[\""+extra_flag+"\"]")
+    return self._expected_outcomes
+
+  @property
+  def do_skip(self):
+    return (statusfile.SKIP in self._statusfile_outcomes and
+            not self.suite.test_config.run_skipped)
+
+  @property
+  def is_slow(self):
+    return statusfile.SLOW in self._statusfile_outcomes
+
+  @property
+  def is_fail_ok(self):
+    return statusfile.FAIL_OK in self._statusfile_outcomes
+
+  @property
+  def is_pass_or_fail(self):
+    return (statusfile.PASS in self._statusfile_outcomes and
+            statusfile.FAIL in self._statusfile_outcomes and
+            statusfile.CRASH not in self._statusfile_outcomes)
+
+  @property
+  def is_fail(self):
+     return (statusfile.FAIL in self._statusfile_outcomes and
+             statusfile.PASS not in self._statusfile_outcomes)
+
+  @property
+  def only_standard_variant(self):
+    return statusfile.NO_VARIANTS in self._statusfile_outcomes
+
+  def get_command(self):
+    params = self._get_cmd_params()
+    env = self._get_cmd_env()
+    shell = self.get_shell()
+    if utils.IsWindows():
+      shell += '.exe'
+    shell_flags = self._get_shell_flags()
+    timeout = self._get_timeout(params)
+    return self._create_cmd(shell, shell_flags + params, env, timeout)
+
+  def _get_cmd_params(self):
+    """Gets command parameters and combines them in the following order:
+      - files [empty by default]
+      - random seed
+      - mode flags (based on chosen mode)
+      - extra flags (from command line)
+      - user flags (variant/fuzzer flags)
+      - source flags (from source code) [empty by default]
+      - test-suite flags
+      - statusfile flags
+
+    The best way to modify how parameters are created is to only override
+    methods for getting partial parameters.
+    """
+    return (
+        self._get_files_params() +
+        self._get_random_seed_flags() +
+        self._get_mode_flags() +
+        self._get_extra_flags() +
+        self._get_variant_flags() +
+        self._get_source_flags() +
+        self._get_suite_flags() +
+        self._get_statusfile_flags()
+    )
+
+  def _get_cmd_env(self):
+    return {}
+
+  def _get_files_params(self):
+    return []
+
+  def _get_timeout_param(self):
+    return None
+
+  def _get_random_seed_flags(self):
+    return ['--random-seed=%d' % self.random_seed]
+
+  @property
+  def random_seed(self):
+    return self._random_seed or self._test_config.random_seed
+
+  def _get_extra_flags(self):
+    return self._test_config.extra_flags
+
+  def _get_variant_flags(self):
+    return self.variant_flags
+
+  def _get_statusfile_flags(self):
+    """Gets runtime flags from a status file.
+
+    Every outcome that starts with "--" is a flag.
+    """
+    return self._statusfile_flags
+
+  def _get_mode_flags(self):
+    return self._test_config.mode_flags
+
+  def _get_source_flags(self):
+    return []
+
+  def _get_suite_flags(self):
+    return []
+
+  def _get_shell_flags(self):
+    return []
+
+  def _get_timeout(self, params):
+    timeout = self._test_config.timeout
+    if "--stress-opt" in params:
+      timeout *= 4
+    if "--jitless" in params:
+      timeout *= 2
+    if "--no-opt" in params:
+      timeout *= 2
+    if "--noenable-vfp3" in params:
+      timeout *= 2
+    if self._get_timeout_param() == TIMEOUT_LONG:
+      timeout *= 10
+    if self.is_slow:
+      timeout *= 4
+    return timeout
+
+  def get_shell(self):
+    raise NotImplementedError()
+
+  def _get_suffix(self):
+    return '.js'
+
+  def _create_cmd(self, shell, params, env, timeout):
+    return command.Command(
+      cmd_prefix=self._test_config.command_prefix,
+      shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)),
+      args=params,
+      env=env,
+      timeout=timeout,
+      verbose=self._test_config.verbose,
+      resources_func=self._get_resources,
+      handle_sigterm=True,
+    )
+
+  def _parse_source_flags(self, source=None):
+    source = source or self.get_source()
+    flags = []
+    for match in re.findall(FLAGS_PATTERN, source):
+      flags += shlex.split(match.strip())
+    return flags
+
+  def is_source_available(self):
+    return self._get_source_path() is not None
+
+  def get_source(self):
+    with open(self._get_source_path()) as f:
+      return f.read()
+
+  def _get_source_path(self):
+    return None
+
+  def _get_resources(self):
+    """Returns a list of absolute paths with additional files needed by the
+    test case.
+
+    Used to push additional files to Android devices.
+    """
+    return []
+
+  def skip_predictable(self):
+    """Returns True if the test case is not suitable for predictable testing."""
+    return True
+
+  @property
+  def output_proc(self):
+    if self.expected_outcomes is outproc.OUTCOMES_PASS:
+      return outproc.DEFAULT
+    return outproc.OutProc(self.expected_outcomes)
+
+  def __cmp__(self, other):
+    # Make sure that test cases are sorted correctly if sorted without
+    # key function. But using a key function is preferred for speed.
+    return cmp(
+        (self.suite.name, self.name, self.variant),
+        (other.suite.name, other.name, other.variant)
+    )
+
+  def __str__(self):
+    return self.suite.name + '/' + self.name
+
+
+class D8TestCase(TestCase):
+  def get_shell(self):
+    return "d8"
+
+  def _get_shell_flags(self):
+    return ['--test']
+
+  def _get_resources_for_file(self, file):
+    """Returns for a given file a list of absolute paths of files needed by the
+    given file.
+    """
+    with open(file) as f:
+      source = f.read()
+    result = []
+    def add_path(path):
+      result.append(os.path.abspath(path.replace('/', os.path.sep)))
+    def add_import_path(import_path):
+      add_path(os.path.normpath(
+        os.path.join(os.path.dirname(file), import_path)))
+    def strip_test262_frontmatter(input):
+      return TEST262_FRONTMATTER_PATTERN.sub('', input)
+    for match in RESOURCES_PATTERN.finditer(source):
+      # There are several resources per line. Relative to base dir.
+      for path in match.group(1).strip().split():
+        add_path(path)
+    # Strip test262 frontmatter before looking for load() and import/export
+    # statements.
+    source = strip_test262_frontmatter(source)
+    for match in LOAD_PATTERN.finditer(source):
+      # Files in load statements are relative to base dir.
+      add_path(match.group(1))
+    # Imported files are relative to the file importing them.
+    for match in MODULE_FROM_RESOURCES_PATTERN.finditer(source):
+      add_import_path(match.group(1))
+    for match in MODULE_IMPORT_RESOURCES_PATTERN.finditer(source):
+      add_import_path(match.group(1))
+    return result
+
+  def _get_resources(self):
+    """Returns the list of files needed by a test case."""
+    if not self._get_source_path():
+      return []
+    result = set()
+    to_check = [self._get_source_path()]
+    # Recurse over all files until reaching a fixpoint.
+    while to_check:
+      next_resource = to_check.pop()
+      result.add(next_resource)
+      for resource in self._get_resources_for_file(next_resource):
+        # Only add files that exist on disc. The pattens we check for give some
+        # false positives otherwise.
+        if resource not in result and os.path.exists(resource):
+          to_check.append(resource)
+    return sorted(list(result))
+
+  def skip_predictable(self):
+    """Returns True if the test case is not suitable for predictable testing."""
+    return (statusfile.FAIL in self.expected_outcomes or
+            self.output_proc.negative)
diff --git a/src/third_party/v8/tools/testrunner/outproc/__init__.py b/src/third_party/v8/tools/testrunner/outproc/__init__.py
new file mode 100644
index 0000000..4433538
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/outproc/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/src/third_party/v8/tools/testrunner/outproc/base.py b/src/third_party/v8/tools/testrunner/outproc/base.py
new file mode 100644
index 0000000..9646b96
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/outproc/base.py
@@ -0,0 +1,213 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import itertools
+
+from ..testproc.base import (
+    DROP_RESULT, DROP_OUTPUT, DROP_PASS_OUTPUT, DROP_PASS_STDOUT)
+from ..local import statusfile
+from ..testproc.result import Result
+
+
+OUTCOMES_PASS = [statusfile.PASS]
+OUTCOMES_FAIL = [statusfile.FAIL]
+OUTCOMES_PASS_OR_TIMEOUT = [statusfile.PASS, statusfile.TIMEOUT]
+OUTCOMES_FAIL_OR_TIMEOUT = [statusfile.FAIL, statusfile.TIMEOUT]
+
+
+class BaseOutProc(object):
+  def process(self, output, reduction=None):
+    has_unexpected_output = self.has_unexpected_output(output)
+    if has_unexpected_output:
+      self.regenerate_expected_files(output)
+    return self._create_result(has_unexpected_output, output, reduction)
+
+  def regenerate_expected_files(self, output):
+    return
+
+  def has_unexpected_output(self, output):
+    return self.get_outcome(output) not in self.expected_outcomes
+
+  def _create_result(self, has_unexpected_output, output, reduction):
+    """Creates Result instance. When reduction is passed it tries to drop some
+    parts of the result to save memory and time needed to send the result
+    across process boundary. None disables reduction and full result is created.
+    """
+    if reduction == DROP_RESULT:
+      return None
+    if reduction == DROP_OUTPUT:
+      return Result(has_unexpected_output, None)
+    if not has_unexpected_output:
+      if reduction == DROP_PASS_OUTPUT:
+        return Result(has_unexpected_output, None)
+      if reduction == DROP_PASS_STDOUT:
+        return Result(has_unexpected_output, output.without_text())
+
+    return Result(has_unexpected_output, output)
+
+  def get_outcome(self, output):
+    if output.HasCrashed():
+      return statusfile.CRASH
+    elif output.HasTimedOut():
+      return statusfile.TIMEOUT
+    elif self._has_failed(output):
+      return statusfile.FAIL
+    else:
+      return statusfile.PASS
+
+  def _has_failed(self, output):
+    execution_failed = self._is_failure_output(output)
+    if self.negative:
+      return not execution_failed
+    return execution_failed
+
+  def _is_failure_output(self, output):
+    return output.exit_code != 0
+
+  @property
+  def negative(self):
+    return False
+
+  @property
+  def expected_outcomes(self):
+    raise NotImplementedError()
+
+
+class Negative(object):
+  @property
+  def negative(self):
+    return True
+
+
+class PassOutProc(BaseOutProc):
+  """Output processor optimized for positive tests expected to PASS."""
+  def has_unexpected_output(self, output):
+    return self.get_outcome(output) != statusfile.PASS
+
+  @property
+  def expected_outcomes(self):
+    return OUTCOMES_PASS
+
+
+class NegPassOutProc(Negative, PassOutProc):
+  """Output processor optimized for negative tests expected to PASS"""
+  pass
+
+
+class OutProc(BaseOutProc):
+  """Output processor optimized for positive tests with expected outcomes
+  different than a single PASS.
+  """
+  def __init__(self, expected_outcomes):
+    self._expected_outcomes = expected_outcomes
+
+  @property
+  def expected_outcomes(self):
+    return self._expected_outcomes
+
+  # TODO(majeski): Inherit from PassOutProc in case of OUTCOMES_PASS and remove
+  # custom get/set state.
+  def __getstate__(self):
+    d = self.__dict__
+    if self._expected_outcomes is OUTCOMES_PASS:
+      d = d.copy()
+      del d['_expected_outcomes']
+    return d
+
+  def __setstate__(self, d):
+    if '_expected_outcomes' not in d:
+      d['_expected_outcomes'] = OUTCOMES_PASS
+    self.__dict__.update(d)
+
+
+# TODO(majeski): Override __reduce__ to make it deserialize as one instance.
+DEFAULT = PassOutProc()
+DEFAULT_NEGATIVE = NegPassOutProc()
+
+
+class ExpectedOutProc(OutProc):
+  """Output processor that has is_failure_output depending on comparing the
+  output with the expected output.
+  """
+  def __init__(self, expected_outcomes, expected_filename,
+                regenerate_expected_files=False):
+    super(ExpectedOutProc, self).__init__(expected_outcomes)
+    self._expected_filename = expected_filename
+    self._regenerate_expected_files = regenerate_expected_files
+
+  def _is_failure_output(self, output):
+    if output.exit_code != 0:
+        return True
+
+    with open(self._expected_filename, 'r') as f:
+      expected_lines = f.readlines()
+
+    for act_iterator in self._act_block_iterator(output):
+      for expected, actual in itertools.izip_longest(
+          self._expected_iterator(expected_lines),
+          act_iterator,
+          fillvalue=''
+      ):
+        if expected != actual:
+          return True
+      return False
+
+  def regenerate_expected_files(self, output):
+    if not self._regenerate_expected_files:
+      return
+    lines = output.stdout.splitlines()
+    with open(self._expected_filename, 'w') as f:
+      for _, line in enumerate(lines):
+        f.write(line+'\n')
+
+  def _act_block_iterator(self, output):
+    """Iterates over blocks of actual output lines."""
+    lines = output.stdout.splitlines()
+    start_index = 0
+    found_eqeq = False
+    for index, line in enumerate(lines):
+      # If a stress test separator is found:
+      if line.startswith('=='):
+        # Iterate over all lines before a separator except the first.
+        if not found_eqeq:
+          found_eqeq = True
+        else:
+          yield self._actual_iterator(lines[start_index:index])
+        # The next block of output lines starts after the separator.
+        start_index = index + 1
+    # Iterate over complete output if no separator was found.
+    if not found_eqeq:
+      yield self._actual_iterator(lines)
+
+  def _actual_iterator(self, lines):
+    return self._iterator(lines, self._ignore_actual_line)
+
+  def _expected_iterator(self, lines):
+    return self._iterator(lines, self._ignore_expected_line)
+
+  def _ignore_actual_line(self, line):
+    """Ignore empty lines, valgrind output, Android output and trace
+    incremental marking output.
+    """
+    if not line:
+      return True
+    return (line.startswith('==') or
+            line.startswith('**') or
+            line.startswith('ANDROID') or
+            line.startswith('###') or
+            # Android linker warning.
+            line.startswith('WARNING: linker:') or
+            # FIXME(machenbach): The test driver shouldn't try to use slow
+            # asserts if they weren't compiled. This fails in optdebug=2.
+            line == 'Warning: unknown flag --enable-slow-asserts.' or
+            line == 'Try --help for options')
+
+  def _ignore_expected_line(self, line):
+    return not line
+
+  def _iterator(self, lines, ignore_predicate):
+    for line in lines:
+      line = line.strip()
+      if not ignore_predicate(line):
+        yield line
diff --git a/src/third_party/v8/tools/testrunner/outproc/message.py b/src/third_party/v8/tools/testrunner/outproc/message.py
new file mode 100644
index 0000000..c253b6f
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/outproc/message.py
@@ -0,0 +1,65 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import itertools
+import os
+import re
+
+from . import base
+
+
+class OutProc(base.OutProc):
+  def __init__(self, expected_outcomes, basepath, expected_fail):
+    super(OutProc, self).__init__(expected_outcomes)
+    self._basepath = basepath
+    self._expected_fail = expected_fail
+
+  def _is_failure_output(self, output):
+    fail = output.exit_code != 0
+    if fail != self._expected_fail:
+      return True
+
+    expected_lines = []
+    # Can't use utils.ReadLinesFrom() here because it strips whitespace.
+    with open(self._basepath + '.out') as f:
+      for line in f:
+        if line.startswith("#") or not line.strip():
+          continue
+        expected_lines.append(line)
+    raw_lines = output.stdout.splitlines()
+    actual_lines = [ s for s in raw_lines if not self._ignore_line(s) ]
+    if len(expected_lines) != len(actual_lines):
+      return True
+
+    # Try .js first, and fall back to .mjs.
+    # TODO(v8:9406): clean this up by never separating the path from
+    # the extension in the first place.
+    base_path = self._basepath + '.js'
+    if not os.path.exists(base_path):
+      base_path = self._basepath + '.mjs'
+
+    env = {
+      'basename': os.path.basename(base_path),
+    }
+    for (expected, actual) in itertools.izip_longest(
+        expected_lines, actual_lines, fillvalue=''):
+      pattern = re.escape(expected.rstrip() % env)
+      pattern = pattern.replace('\\*', '.*')
+      pattern = pattern.replace('\\{NUMBER\\}', '\d+(?:\.\d*)?')
+      pattern = '^%s$' % pattern
+      if not re.match(pattern, actual):
+        return True
+    return False
+
+  def _ignore_line(self, string):
+    """Ignore empty lines, valgrind output, Android output."""
+    return (
+      not string or
+      not string.strip() or
+      string.startswith("==") or
+      string.startswith("**") or
+      string.startswith("ANDROID") or
+      # Android linker warning.
+      string.startswith('WARNING: linker:')
+    )
diff --git a/src/third_party/v8/tools/testrunner/outproc/mkgrokdump.py b/src/third_party/v8/tools/testrunner/outproc/mkgrokdump.py
new file mode 100644
index 0000000..4013023
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/outproc/mkgrokdump.py
@@ -0,0 +1,31 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import difflib
+
+from . import base
+
+
+class OutProc(base.OutProc):
+  def __init__(self, expected_outcomes, expected_path):
+    super(OutProc, self).__init__(expected_outcomes)
+    self._expected_path = expected_path
+
+  def _is_failure_output(self, output):
+    with open(self._expected_path) as f:
+      expected = f.read()
+    expected_lines = expected.splitlines()
+    actual_lines = output.stdout.splitlines()
+    diff = difflib.unified_diff(expected_lines, actual_lines, lineterm="",
+                                fromfile="expected_path")
+    diffstring = '\n'.join(diff)
+    if diffstring != "":
+      if "generated from a non-shipping build" in output.stdout:
+        return False
+      if not "generated from a shipping build" in output.stdout:
+        output.stdout = "Unexpected output:\n\n" + output.stdout
+        return True
+      output.stdout = diffstring
+      return True
+    return False
diff --git a/src/third_party/v8/tools/testrunner/outproc/mozilla.py b/src/third_party/v8/tools/testrunner/outproc/mozilla.py
new file mode 100644
index 0000000..1400d0e
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/outproc/mozilla.py
@@ -0,0 +1,33 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+def _is_failure_output(self, output):
+  return (
+    output.exit_code != 0 or
+    'FAILED!' in output.stdout
+  )
+
+
+class OutProc(base.OutProc):
+  """Optimized for positive tests."""
+OutProc._is_failure_output = _is_failure_output
+
+
+class PassOutProc(base.PassOutProc):
+  """Optimized for positive tests expected to PASS."""
+PassOutProc._is_failure_output = _is_failure_output
+
+
+class NegOutProc(base.Negative, OutProc):
+  pass
+
+class NegPassOutProc(base.Negative, PassOutProc):
+  pass
+
+
+MOZILLA_PASS_DEFAULT = PassOutProc()
+MOZILLA_PASS_NEGATIVE = NegPassOutProc()
diff --git a/src/third_party/v8/tools/testrunner/outproc/test262.py b/src/third_party/v8/tools/testrunner/outproc/test262.py
new file mode 100644
index 0000000..bf3bc05
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/outproc/test262.py
@@ -0,0 +1,63 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+
+from . import base
+
+
+def _is_failure_output(output):
+  return (
+    output.exit_code != 0 or
+    'FAILED!' in output.stdout
+  )
+
+
+class ExceptionOutProc(base.OutProc):
+  """Output processor for tests with expected exception."""
+  def __init__(
+      self, expected_outcomes, expected_exception=None, negative=False):
+    super(ExceptionOutProc, self).__init__(expected_outcomes)
+    self._expected_exception = expected_exception
+    self._negative = negative
+
+  @property
+  def negative(self):
+    return self._negative
+
+  def _is_failure_output(self, output):
+    if self._expected_exception != self._parse_exception(output.stdout):
+      return True
+    return _is_failure_output(output)
+
+  def _parse_exception(self, string):
+    # somefile:somelinenumber: someerror[: sometext]
+    # somefile might include an optional drive letter on windows e.g. "e:".
+    match = re.search(
+        '^(?:\w:)?[^:]*:[0-9]+: ([^: ]+?)($|: )', string, re.MULTILINE)
+    if match:
+      return match.group(1).strip()
+    else:
+      return None
+
+
+class NoExceptionOutProc(base.OutProc):
+  """Output processor optimized for tests without expected exception."""
+  def __init__(self, expected_outcomes):
+    super(NoExceptionOutProc, self).__init__(expected_outcomes)
+
+  def _is_failure_output(self, output):
+    return _is_failure_output(output)
+
+
+class PassNoExceptionOutProc(base.PassOutProc):
+  """
+  Output processor optimized for tests expected to PASS without expected
+  exception.
+  """
+  def _is_failure_output(self, output):
+    return _is_failure_output(output)
+
+
+PASS_NO_EXCEPTION = PassNoExceptionOutProc()
diff --git a/src/third_party/v8/tools/testrunner/outproc/webkit.py b/src/third_party/v8/tools/testrunner/outproc/webkit.py
new file mode 100644
index 0000000..290e67d
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/outproc/webkit.py
@@ -0,0 +1,18 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+class OutProc(base.ExpectedOutProc):
+  def _is_failure_output(self, output):
+    if output.exit_code != 0:
+      return True
+    return super(OutProc, self)._is_failure_output(output)
+
+  def _ignore_expected_line(self, line):
+    return (
+        line.startswith('#') or
+        super(OutProc, self)._ignore_expected_line(line)
+    )
diff --git a/src/third_party/v8/tools/testrunner/standard_runner.py b/src/third_party/v8/tools/testrunner/standard_runner.py
new file mode 100755
index 0000000..ff58391
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/standard_runner.py
@@ -0,0 +1,397 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import absolute_import
+from __future__ import print_function
+from functools import reduce
+
+import datetime
+import json
+import os
+import sys
+import tempfile
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+from . import base_runner
+
+from testrunner.local import utils
+from testrunner.local.variants import ALL_VARIANTS
+from testrunner.objects import predictable
+from testrunner.testproc.execution import ExecutionProc
+from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
+from testrunner.testproc.loader import LoadProc
+from testrunner.testproc.seed import SeedProc
+from testrunner.testproc.variant import VariantProc
+
+
+VARIANTS = ['default']
+
+MORE_VARIANTS = [
+  'jitless',
+  'stress',
+  'stress_js_bg_compile_wasm_code_gc',
+  'stress_incremental_marking',
+]
+
+VARIANT_ALIASES = {
+  # The default for developer workstations.
+  'dev': VARIANTS,
+  # Additional variants, run on all bots.
+  'more': MORE_VARIANTS,
+  # Shortcut for the two above ('more' first - it has the longer running tests)
+  'exhaustive': MORE_VARIANTS + VARIANTS,
+  # Additional variants, run on a subset of bots.
+  'extra': ['nooptimization', 'future', 'no_wasm_traps', 'turboprop',
+            'instruction_scheduling'],
+}
+
+# Extra flags passed to all tests using the standard test runner.
+EXTRA_DEFAULT_FLAGS = ['--testing-d8-test-runner']
+
+GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
+                   '--concurrent-recompilation-queue-length=64',
+                   '--concurrent-recompilation-delay=500',
+                   '--concurrent-recompilation',
+                   '--stress-flush-bytecode',
+                   '--wasm-code-gc', '--stress-wasm-code-gc']
+
+RANDOM_GC_STRESS_FLAGS = ['--random-gc-interval=5000',
+                          '--stress-compaction-random']
+
+
+PREDICTABLE_WRAPPER = os.path.join(
+    base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
+
+
+class StandardTestRunner(base_runner.BaseTestRunner):
+  def __init__(self, *args, **kwargs):
+    super(StandardTestRunner, self).__init__(*args, **kwargs)
+
+    self.sancov_dir = None
+    self._variants = None
+
+  @property
+  def framework_name(self):
+    return 'standard_runner'
+
+  def _get_default_suite_names(self):
+    return ['default']
+
+  def _add_parser_options(self, parser):
+    parser.add_option('--novfp3',
+                      help='Indicates that V8 was compiled without VFP3'
+                      ' support',
+                      default=False, action='store_true')
+
+    # Variants
+    parser.add_option('--no-variants', '--novariants',
+                      help='Deprecated. '
+                           'Equivalent to passing --variants=default',
+                      default=False, dest='no_variants', action='store_true')
+    parser.add_option('--variants',
+                      help='Comma-separated list of testing variants;'
+                      ' default: "%s"' % ','.join(VARIANTS))
+    parser.add_option('--exhaustive-variants',
+                      default=False, action='store_true',
+                      help='Deprecated. '
+                           'Equivalent to passing --variants=exhaustive')
+
+    # Filters
+    parser.add_option('--slow-tests', default='dontcare',
+                      help='Regard slow tests (run|skip|dontcare)')
+    parser.add_option('--pass-fail-tests', default='dontcare',
+                      help='Regard pass|fail tests (run|skip|dontcare)')
+    parser.add_option('--quickcheck', default=False, action='store_true',
+                      help=('Quick check mode (skip slow tests)'))
+
+    # Stress modes
+    parser.add_option('--gc-stress',
+                      help='Switch on GC stress mode',
+                      default=False, action='store_true')
+    parser.add_option('--random-gc-stress',
+                      help='Switch on random GC stress mode',
+                      default=False, action='store_true')
+    parser.add_option('--random-seed-stress-count', default=1, type='int',
+                      dest='random_seed_stress_count',
+                      help='Number of runs with different random seeds. Only '
+                           'with test processors: 0 means infinite '
+                           'generation.')
+
+    # Extra features.
+    parser.add_option('--time', help='Print timing information after running',
+                      default=False, action='store_true')
+
+    # Noop
+    parser.add_option('--cfi-vptr',
+                      help='Run tests with UBSAN cfi_vptr option.',
+                      default=False, action='store_true')
+    parser.add_option('--infra-staging', help='Use new test runner features',
+                      dest='infra_staging', default=None,
+                      action='store_true')
+    parser.add_option('--no-infra-staging',
+                      help='Opt out of new test runner features',
+                      dest='infra_staging', default=None,
+                      action='store_false')
+    parser.add_option('--no-sorting', '--nosorting',
+                      help='Don\'t sort tests according to duration of last'
+                      ' run.',
+                      default=False, dest='no_sorting', action='store_true')
+    parser.add_option('--no-presubmit', '--nopresubmit',
+                      help='Skip presubmit checks (deprecated)',
+                      default=False, dest='no_presubmit', action='store_true')
+
+    # Unimplemented for test processors
+    parser.add_option('--sancov-dir',
+                      help='Directory where to collect coverage data')
+    parser.add_option('--cat', help='Print the source of the tests',
+                      default=False, action='store_true')
+    parser.add_option('--flakiness-results',
+                      help='Path to a file for storing flakiness json.')
+    parser.add_option('--warn-unused', help='Report unused rules',
+                      default=False, action='store_true')
+    parser.add_option('--report', default=False, action='store_true',
+                      help='Print a summary of the tests to be run')
+
+  def _process_options(self, options):
+    if options.sancov_dir:
+      self.sancov_dir = options.sancov_dir
+      if not os.path.exists(self.sancov_dir):
+        print('sancov-dir %s doesn\'t exist' % self.sancov_dir)
+        raise base_runner.TestRunnerError()
+
+    if options.gc_stress:
+      options.extra_flags += GC_STRESS_FLAGS
+
+    if options.random_gc_stress:
+      options.extra_flags += RANDOM_GC_STRESS_FLAGS
+
+    if self.build_config.asan:
+      options.extra_flags.append('--invoke-weak-callbacks')
+
+    if options.novfp3:
+      options.extra_flags.append('--noenable-vfp3')
+
+    if options.no_variants:  # pragma: no cover
+      print ('Option --no-variants is deprecated. '
+             'Pass --variants=default instead.')
+      assert not options.variants
+      options.variants = 'default'
+
+    if options.exhaustive_variants:  # pragma: no cover
+      # TODO(machenbach): Switch infra to --variants=exhaustive after M65.
+      print ('Option --exhaustive-variants is deprecated. '
+             'Pass --variants=exhaustive instead.')
+      # This is used on many bots. It includes a larger set of default
+      # variants.
+      # Other options for manipulating variants still apply afterwards.
+      assert not options.variants
+      options.variants = 'exhaustive'
+
+    if options.quickcheck:
+      assert not options.variants
+      options.variants = 'stress,default'
+      options.slow_tests = 'skip'
+      options.pass_fail_tests = 'skip'
+
+    if self.build_config.predictable:
+      options.variants = 'default'
+      options.extra_flags.append('--predictable')
+      options.extra_flags.append('--verify-predictable')
+      options.extra_flags.append('--no-inline-new')
+      # Add predictable wrapper to command prefix.
+      options.command_prefix = (
+          [sys.executable, PREDICTABLE_WRAPPER] + options.command_prefix)
+
+    # TODO(machenbach): Figure out how to test a bigger subset of variants on
+    # msan.
+    if self.build_config.msan:
+      options.variants = 'default'
+
+    if options.variants == 'infra_staging':
+      options.variants = 'exhaustive'
+
+    self._variants = self._parse_variants(options.variants)
+
+    def CheckTestMode(name, option):  # pragma: no cover
+      if option not in ['run', 'skip', 'dontcare']:
+        print('Unknown %s mode %s' % (name, option))
+        raise base_runner.TestRunnerError()
+    CheckTestMode('slow test', options.slow_tests)
+    CheckTestMode('pass|fail test', options.pass_fail_tests)
+    if self.build_config.no_i18n:
+      base_runner.TEST_MAP['bot_default'].remove('intl')
+      base_runner.TEST_MAP['default'].remove('intl')
+      # TODO(machenbach): uncomment after infra side lands.
+      # base_runner.TEST_MAP['d8_default'].remove('intl')
+
+    if options.time and not options.json_test_results:
+      # We retrieve the slowest tests from the JSON output file, so create
+      # a temporary output file (which will automatically get deleted on exit)
+      # if the user didn't specify one.
+      self._temporary_json_output_file = tempfile.NamedTemporaryFile(
+          prefix="v8-test-runner-")
+      options.json_test_results = self._temporary_json_output_file.name
+
+  def _runner_flags(self):
+    return EXTRA_DEFAULT_FLAGS
+
+  def _parse_variants(self, aliases_str):
+    # Use developer defaults if no variant was specified.
+    aliases_str = aliases_str or 'dev'
+    aliases = aliases_str.split(',')
+    user_variants = set(reduce(
+        list.__add__, [VARIANT_ALIASES.get(a, [a]) for a in aliases]))
+
+    result = [v for v in ALL_VARIANTS if v in user_variants]
+    if len(result) == len(user_variants):
+      return result
+
+    for v in user_variants:
+      if v not in ALL_VARIANTS:
+        print('Unknown variant: %s' % v)
+        print('    Available variants: %s' % ALL_VARIANTS)
+        print('    Available variant aliases: %s' % VARIANT_ALIASES.keys());
+        raise base_runner.TestRunnerError()
+    assert False, 'Unreachable'
+
+  def _setup_env(self):
+    super(StandardTestRunner, self)._setup_env()
+
+    symbolizer_option = self._get_external_symbolizer_option()
+
+    if self.sancov_dir:
+      os.environ['ASAN_OPTIONS'] = ':'.join([
+        'coverage=1',
+        'coverage_dir=%s' % self.sancov_dir,
+        symbolizer_option,
+        'allow_user_segv_handler=1',
+      ])
+
+  def _get_statusfile_variables(self, options):
+    variables = (
+        super(StandardTestRunner, self)._get_statusfile_variables(options))
+
+    variables.update({
+      'gc_stress': options.gc_stress or options.random_gc_stress,
+      'gc_fuzzer': options.random_gc_stress,
+      'novfp3': options.novfp3,
+    })
+    return variables
+
+  def _do_execute(self, tests, args, options):
+    jobs = options.j
+
+    print('>>> Running with test processors')
+    loader = LoadProc(tests)
+    results = self._create_result_tracker(options)
+    indicators = self._create_progress_indicators(
+        tests.test_count_estimate, options)
+
+    outproc_factory = None
+    if self.build_config.predictable:
+      outproc_factory = predictable.get_outproc
+    execproc = ExecutionProc(jobs, outproc_factory)
+    sigproc = self._create_signal_proc()
+
+    procs = [
+      loader,
+      NameFilterProc(args) if args else None,
+      StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+      VariantProc(self._variants),
+      StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+      self._create_predictable_filter(),
+      self._create_shard_proc(options),
+      self._create_seed_proc(options),
+      sigproc,
+    ] + indicators + [
+      results,
+      self._create_timeout_proc(options),
+      self._create_rerun_proc(options),
+      execproc,
+    ]
+
+    self._prepare_procs(procs)
+
+    loader.load_initial_tests(initial_batch_size=options.j * 2)
+
+    # This starts up worker processes and blocks until all tests are
+    # processed.
+    execproc.run()
+
+    for indicator in indicators:
+      indicator.finished()
+
+    if tests.test_count_estimate:
+      percentage = float(results.total) / tests.test_count_estimate * 100
+    else:
+      percentage = 0
+
+    print (('>>> %d base tests produced %d (%d%s)'
+           ' non-filtered tests') % (
+        tests.test_count_estimate, results.total, percentage, '%'))
+
+    print('>>> %d tests ran' % (results.total - results.remaining))
+
+    exit_code = utils.EXIT_CODE_PASS
+    if results.failed:
+      exit_code = utils.EXIT_CODE_FAILURES
+    if not results.total:
+      exit_code = utils.EXIT_CODE_NO_TESTS
+
+    if options.time:
+      self._print_durations(options)
+
+    # Indicate if a SIGINT or SIGTERM happened.
+    return max(exit_code, sigproc.exit_code)
+
+  def _print_durations(self, options):
+
+    def format_duration(duration_in_seconds):
+      duration = datetime.timedelta(seconds=duration_in_seconds)
+      time = (datetime.datetime.min + duration).time()
+      return time.strftime('%M:%S:') + '%03i' % int(time.microsecond / 1000)
+
+    def _duration_results_text(test):
+      return [
+        'Test: %s' % test['name'],
+        'Flags: %s' % ' '.join(test['flags']),
+        'Command: %s' % test['command'],
+        'Duration: %s' % format_duration(test['duration']),
+      ]
+
+    assert os.path.exists(options.json_test_results)
+    with open(options.json_test_results, "r") as f:
+      output = json.load(f)
+    lines = []
+    for test in output['slowest_tests']:
+      suffix = ''
+      if test.get('marked_slow') is False:
+        suffix = ' *'
+      lines.append(
+          '%s %s%s' % (format_duration(test['duration']),
+                       test['name'], suffix))
+
+    # Slowest tests duration details.
+    lines.extend(['', 'Details:', ''])
+    for test in output['slowest_tests']:
+      lines.extend(_duration_results_text(test))
+    print("\n".join(lines))
+
+  def _create_predictable_filter(self):
+    if not self.build_config.predictable:
+      return None
+    return predictable.PredictableFilterProc()
+
+  def _create_seed_proc(self, options):
+    if options.random_seed_stress_count == 1:
+      return None
+    return SeedProc(options.random_seed_stress_count, options.random_seed,
+                    options.j * 4)
+
+
+if __name__ == '__main__':
+  sys.exit(StandardTestRunner().execute())
diff --git a/src/third_party/v8/tools/testrunner/test_config.py b/src/third_party/v8/tools/testrunner/test_config.py
new file mode 100644
index 0000000..fed6006
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/test_config.py
@@ -0,0 +1,36 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+from .utils import random_utils
+
+
+class TestConfig(object):
+  def __init__(self,
+               command_prefix,
+               extra_flags,
+               isolates,
+               mode_flags,
+               no_harness,
+               noi18n,
+               random_seed,
+               run_skipped,
+               shell_dir,
+               timeout,
+               verbose,
+               regenerate_expected_files=False):
+    self.command_prefix = command_prefix
+    self.extra_flags = extra_flags
+    self.isolates = isolates
+    self.mode_flags = mode_flags
+    self.no_harness = no_harness
+    self.noi18n = noi18n
+    # random_seed is always not None.
+    self.random_seed = random_seed or random_utils.random_seed()
+    self.run_skipped = run_skipped
+    self.shell_dir = shell_dir
+    self.timeout = timeout
+    self.verbose = verbose
+    self.regenerate_expected_files = regenerate_expected_files
diff --git a/src/third_party/v8/tools/testrunner/testproc/__init__.py b/src/third_party/v8/tools/testrunner/testproc/__init__.py
new file mode 100644
index 0000000..4433538
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/src/third_party/v8/tools/testrunner/testproc/base.py b/src/third_party/v8/tools/testrunner/testproc/base.py
new file mode 100644
index 0000000..6048ef5
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/base.py
@@ -0,0 +1,216 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from .result import SKIPPED
+
+
+"""
+Pipeline
+
+Test processors are chained together and communicate with each other by
+calling previous/next processor in the chain.
+     ----next_test()---->     ----next_test()---->
+Proc1                    Proc2                    Proc3
+     <---result_for()----     <---result_for()----
+
+For every next_test there is exactly one result_for call.
+If processor ignores the test it has to return SkippedResult.
+If it created multiple subtests for one test and wants to pass all of them to
+the previous processor it can enclose them in GroupedResult.
+
+
+Subtests
+
+When test processor needs to modify the test or create some variants of the
+test it creates subtests and sends them to the next processor.
+Each subtest has:
+- procid - globally unique id that should contain id of the parent test and
+          some suffix given by test processor, e.g. its name + subtest type.
+- processor - which created it
+- origin - pointer to the parent (sub)test
+"""
+
+
+DROP_RESULT = 0
+DROP_OUTPUT = 1
+DROP_PASS_OUTPUT = 2
+DROP_PASS_STDOUT = 3
+
+
+class TestProc(object):
+  def __init__(self):
+    self._prev_proc = None
+    self._next_proc = None
+    self._stopped = False
+    self._requirement = DROP_RESULT
+    self._prev_requirement = None
+    self._reduce_result = lambda result: result
+
+  def connect_to(self, next_proc):
+    """Puts `next_proc` after itself in the chain."""
+    next_proc._prev_proc = self
+    self._next_proc = next_proc
+
+  def remove_from_chain(self):
+    if self._prev_proc:
+      self._prev_proc._next_proc = self._next_proc
+    if self._next_proc:
+      self._next_proc._prev_proc = self._prev_proc
+
+  def setup(self, requirement=DROP_RESULT):
+    """
+    Method called by previous processor or processor pipeline creator to let
+    the processors know what part of the result can be ignored.
+    """
+    self._prev_requirement = requirement
+    if self._next_proc:
+      self._next_proc.setup(max(requirement, self._requirement))
+
+    # Since we're not winning anything by droping part of the result we are
+    # dropping the whole result or pass it as it is. The real reduction happens
+    # during result creation (in the output processor), so the result is
+    # immutable.
+    if (self._prev_requirement < self._requirement and
+        self._prev_requirement == DROP_RESULT):
+      self._reduce_result = lambda _: None
+
+  def next_test(self, test):
+    """
+    Method called by previous processor whenever it produces new test.
+    This method shouldn't be called by anyone except previous processor.
+    Returns a boolean value to signal whether the test was loaded into the
+    execution queue successfully or not.
+    """
+    raise NotImplementedError()
+
+  def result_for(self, test, result):
+    """
+    Method called by next processor whenever it has result for some test.
+    This method shouldn't be called by anyone except next processor.
+    """
+    raise NotImplementedError()
+
+  def heartbeat(self):
+    if self._prev_proc:
+      self._prev_proc.heartbeat()
+
+  def stop(self):
+    if not self._stopped:
+      self._stopped = True
+      if self._prev_proc:
+        self._prev_proc.stop()
+      if self._next_proc:
+        self._next_proc.stop()
+
+  @property
+  def is_stopped(self):
+    return self._stopped
+
+  ### Communication
+
+  def notify_previous(self, event):
+    self._on_event(event)
+    if self._prev_proc:
+      self._prev_proc.notify_previous(event)
+
+  def _on_event(self, event):
+    """Called when processors to the right signal events, e.g. termination.
+
+    Args:
+      event: A text describing the signalled event.
+    """
+    pass
+
+  def _send_test(self, test):
+    """Helper method for sending test to the next processor."""
+    return self._next_proc.next_test(test)
+
+  def _send_result(self, test, result):
+    """Helper method for sending result to the previous processor."""
+    if not test.keep_output:
+      result = self._reduce_result(result)
+    self._prev_proc.result_for(test, result)
+
+
+class TestProcObserver(TestProc):
+  """Processor used for observing the data."""
+  def __init__(self):
+    super(TestProcObserver, self).__init__()
+
+  def next_test(self, test):
+    self._on_next_test(test)
+    return self._send_test(test)
+
+  def result_for(self, test, result):
+    self._on_result_for(test, result)
+    self._send_result(test, result)
+
+  def heartbeat(self):
+    self._on_heartbeat()
+    super(TestProcObserver, self).heartbeat()
+
+  def _on_next_test(self, test):
+    """Method called after receiving test from previous processor but before
+    sending it to the next one."""
+    pass
+
+  def _on_result_for(self, test, result):
+    """Method called after receiving result from next processor but before
+    sending it to the previous one."""
+    pass
+
+  def _on_heartbeat(self):
+    pass
+
+
+class TestProcProducer(TestProc):
+  """Processor for creating subtests."""
+
+  def __init__(self, name):
+    super(TestProcProducer, self).__init__()
+    self._name = name
+
+  def next_test(self, test):
+    return self._next_test(test)
+
+  def result_for(self, subtest, result):
+    self._result_for(subtest.origin, subtest, result)
+
+  ### Implementation
+  def _next_test(self, test):
+    raise NotImplementedError()
+
+  def _result_for(self, test, subtest, result):
+    """
+    result_for method extended with `subtest` parameter.
+
+    Args
+      test: test used by current processor to create the subtest.
+      subtest: test for which the `result` is.
+      result: subtest execution result created by the output processor.
+    """
+    raise NotImplementedError()
+
+  ### Managing subtests
+  def _create_subtest(self, test, subtest_id, **kwargs):
+    """Creates subtest with subtest id <processor name>-`subtest_id`."""
+    return test.create_subtest(self, '%s-%s' % (self._name, subtest_id),
+                               **kwargs)
+
+
+class TestProcFilter(TestProc):
+  """Processor for filtering tests."""
+
+  def next_test(self, test):
+    if self._filter(test):
+      return False
+
+    return self._send_test(test)
+
+  def result_for(self, test, result):
+    self._send_result(test, result)
+
+  def _filter(self, test):
+    """Returns whether test should be filtered out."""
+    raise NotImplementedError()
diff --git a/src/third_party/v8/tools/testrunner/testproc/combiner.py b/src/third_party/v8/tools/testrunner/testproc/combiner.py
new file mode 100644
index 0000000..4d992f4
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/combiner.py
@@ -0,0 +1,127 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from collections import defaultdict
+import time
+
+from . import base
+from ..objects import testcase
+from ..outproc import base as outproc
+
+class CombinerProc(base.TestProc):
+  def __init__(self, rng, min_group_size, max_group_size, count):
+    """
+    Args:
+      rng: random number generator
+      min_group_size: minimum number of tests to combine
+      max_group_size: maximum number of tests to combine
+      count: how many tests to generate. 0 means infinite running
+    """
+    super(CombinerProc, self).__init__()
+
+    self._rng = rng
+    self._min_size = min_group_size
+    self._max_size = max_group_size
+    self._count = count
+
+    # Index of the last generated test
+    self._current_num = 0
+
+    # {suite name: instance of TestGroups}
+    self._groups = defaultdict(TestGroups)
+
+    # {suite name: instance of TestCombiner}
+    self._combiners = {}
+
+  def setup(self, requirement=base.DROP_RESULT):
+    # Combiner is not able to pass results (even as None) to the previous
+    # processor.
+    assert requirement == base.DROP_RESULT
+    self._next_proc.setup(base.DROP_RESULT)
+
+  def next_test(self, test):
+    group_key = self._get_group_key(test)
+    if not group_key:
+      # Test not suitable for combining
+      return False
+
+    self._groups[test.suite.name].add_test(group_key, test)
+    return True
+
+  def _get_group_key(self, test):
+    combiner =  self._get_combiner(test.suite)
+    if not combiner:
+      print ('>>> Warning: There is no combiner for %s testsuite' %
+             test.suite.name)
+      return None
+    return combiner.get_group_key(test)
+
+  def result_for(self, test, result):
+    self._send_next_test()
+
+  def generate_initial_tests(self, num=1):
+    for _ in range(0, num):
+      self._send_next_test()
+
+  def _send_next_test(self):
+    if self.is_stopped:
+      return False
+
+    if self._count and self._current_num >= self._count:
+      return False
+
+    combined_test = self._create_new_test()
+    if not combined_test:
+      # Not enough tests
+      return False
+
+    return self._send_test(combined_test)
+
+  def _create_new_test(self):
+    suite, combiner = self._select_suite()
+    groups = self._groups[suite]
+
+    max_size = self._rng.randint(self._min_size, self._max_size)
+    sample = groups.sample(self._rng, max_size)
+    if not sample:
+      return None
+
+    self._current_num += 1
+    return combiner.combine('%s-%d' % (suite, self._current_num), sample)
+
+  def _select_suite(self):
+    """Returns pair (suite name, combiner)."""
+    selected = self._rng.randint(0, len(self._groups) - 1)
+    for n, suite in enumerate(self._groups):
+      if n == selected:
+        return suite, self._combiners[suite]
+
+  def _get_combiner(self, suite):
+    combiner = self._combiners.get(suite.name)
+    if not combiner:
+      combiner = suite.get_test_combiner()
+      self._combiners[suite.name] = combiner
+    return combiner
+
+
+class TestGroups(object):
+  def __init__(self):
+    self._groups = defaultdict(list)
+    self._keys = []
+
+  def add_test(self, key, test):
+    self._groups[key].append(test)
+    self._keys.append(key)
+
+  def sample(self, rng, max_size):
+    # Not enough tests
+    if not self._groups:
+      return None
+
+    group_key = rng.choice(self._keys)
+    tests = self._groups[group_key]
+    return [rng.choice(tests) for _ in range(0, max_size)]
diff --git a/src/third_party/v8/tools/testrunner/testproc/execution.py b/src/third_party/v8/tools/testrunner/testproc/execution.py
new file mode 100644
index 0000000..aaf0db1
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/execution.py
@@ -0,0 +1,95 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+import traceback
+
+from . import base
+from ..local import pool
+
+
+# Global function for multiprocessing, because pickling a static method doesn't
+# work on Windows.
+def run_job(job, process_context):
+  return job.run(process_context)
+
+
+def create_process_context(result_reduction):
+  return ProcessContext(result_reduction)
+
+
+JobResult = collections.namedtuple('JobResult', ['id', 'result'])
+ProcessContext = collections.namedtuple('ProcessContext', ['result_reduction'])
+
+
+class Job(object):
+  def __init__(self, test_id, cmd, outproc, keep_output):
+    self.test_id = test_id
+    self.cmd = cmd
+    self.outproc = outproc
+    self.keep_output = keep_output
+
+  def run(self, process_ctx):
+    output = self.cmd.execute()
+    reduction = process_ctx.result_reduction if not self.keep_output else None
+    result = self.outproc.process(output, reduction)
+    return JobResult(self.test_id, result)
+
+
+class ExecutionProc(base.TestProc):
+  """Last processor in the chain. Instead of passing tests further it creates
+  commands and output processors, executes them in multiple worker processes and
+  sends results to the previous processor.
+  """
+
+  def __init__(self, jobs, outproc_factory=None):
+    super(ExecutionProc, self).__init__()
+    self._pool = pool.Pool(jobs, notify_fun=self.notify_previous)
+    self._outproc_factory = outproc_factory or (lambda t: t.output_proc)
+    self._tests = {}
+
+  def connect_to(self, next_proc):
+    assert False, 'ExecutionProc cannot be connected to anything'
+
+  def run(self):
+    it = self._pool.imap_unordered(
+        fn=run_job,
+        gen=[],
+        process_context_fn=create_process_context,
+        process_context_args=[self._prev_requirement],
+    )
+    for pool_result in it:
+      self._unpack_result(pool_result)
+
+  def next_test(self, test):
+    if self.is_stopped:
+      return False
+
+    test_id = test.procid
+    cmd = test.get_command()
+    self._tests[test_id] = test, cmd
+
+    outproc = self._outproc_factory(test)
+    self._pool.add([Job(test_id, cmd, outproc, test.keep_output)])
+
+    return True
+
+  def result_for(self, test, result):
+    assert False, 'ExecutionProc cannot receive results'
+
+  def stop(self):
+    super(ExecutionProc, self).stop()
+    self._pool.abort()
+
+  def _unpack_result(self, pool_result):
+    if pool_result.heartbeat:
+      self.heartbeat()
+      return
+
+    job_result = pool_result.value
+    test_id, result = job_result
+
+    test, result.cmd = self._tests[test_id]
+    del self._tests[test_id]
+    self._send_result(test, result)
diff --git a/src/third_party/v8/tools/testrunner/testproc/expectation.py b/src/third_party/v8/tools/testrunner/testproc/expectation.py
new file mode 100644
index 0000000..285a599
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/expectation.py
@@ -0,0 +1,21 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+from testrunner.local import statusfile
+from testrunner.outproc import base as outproc
+
+class ForgiveTimeoutProc(base.TestProcProducer):
+  """Test processor passing tests and results through and forgiving timeouts."""
+  def __init__(self):
+    super(ForgiveTimeoutProc, self).__init__('no-timeout')
+
+  def _next_test(self, test):
+    subtest = self._create_subtest(test, 'no_timeout')
+    subtest.allow_timeouts()
+    return self._send_test(subtest)
+
+  def _result_for(self, test, subtest, result):
+    self._send_result(test, result)
diff --git a/src/third_party/v8/tools/testrunner/testproc/filter.py b/src/third_party/v8/tools/testrunner/testproc/filter.py
new file mode 100644
index 0000000..e2a5e97
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/filter.py
@@ -0,0 +1,83 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import defaultdict
+import fnmatch
+
+from . import base
+
+
+class StatusFileFilterProc(base.TestProcFilter):
+  """Filters tests by outcomes from status file.
+
+  Status file has to be loaded before using this function.
+
+  Args:
+    slow_tests_mode: What to do with slow tests.
+    pass_fail_tests_mode: What to do with pass or fail tests.
+
+  Mode options:
+    None (default): don't skip
+    "skip": skip if slow/pass_fail
+    "run": skip if not slow/pass_fail
+  """
+
+  def __init__(self, slow_tests_mode, pass_fail_tests_mode):
+    super(StatusFileFilterProc, self).__init__()
+    self._slow_tests_mode = slow_tests_mode
+    self._pass_fail_tests_mode = pass_fail_tests_mode
+
+  def _filter(self, test):
+    return (
+      test.do_skip or
+      self._skip_slow(test.is_slow) or
+      self._skip_pass_fail(test.is_pass_or_fail)
+    )
+
+  def _skip_slow(self, is_slow):
+    return (
+      (self._slow_tests_mode == 'run' and not is_slow) or
+      (self._slow_tests_mode == 'skip' and is_slow)
+    )
+
+  def _skip_pass_fail(self, is_pass_fail):
+    return (
+      (self._pass_fail_tests_mode == 'run' and not is_pass_fail) or
+      (self._pass_fail_tests_mode == 'skip' and is_pass_fail)
+    )
+
+
+class NameFilterProc(base.TestProcFilter):
+  """Filters tests based on command-line arguments.
+
+  args can be a glob: asterisks in any position of the name
+  represent zero or more characters. Without asterisks, only exact matches
+  will be used with the exeption of the test-suite name as argument.
+  """
+  def __init__(self, args):
+    super(NameFilterProc, self).__init__()
+
+    self._globs = defaultdict(list)
+    self._exact_matches = defaultdict(dict)
+    for a in args:
+      argpath = a.split('/')
+      suitename = argpath[0]
+      path = '/'.join(argpath[1:]) or '*'
+      if '*' in path:
+        self._globs[suitename].append(path)
+      else:
+        self._exact_matches[suitename][path] = True
+
+    for s, globs in self._globs.iteritems():
+      if not globs or '*' in globs:
+        self._globs[s] = ['*']
+
+  def _filter(self, test):
+    globs = self._globs.get(test.suite.name, [])
+    for g in globs:
+      if g == '*': return False
+      if fnmatch.fnmatch(test.path, g):
+        return False
+    exact_matches = self._exact_matches.get(test.suite.name, {})
+    return test.path not in exact_matches
diff --git a/src/third_party/v8/tools/testrunner/testproc/fuzzer.py b/src/third_party/v8/tools/testrunner/testproc/fuzzer.py
new file mode 100644
index 0000000..b802368
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/fuzzer.py
@@ -0,0 +1,332 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import namedtuple
+import time
+
+from . import base
+
+
+# Extra flags randomly added to all fuzz tests with numfuzz. List of tuples
+# (probability, flag).
+EXTRA_FLAGS = [
+  (0.1, '--always-opt'),
+  (0.1, '--assert-types'),
+  # TODO(mythria): https://crbug.com/v8/10243
+  # (0.1, '--budget-for-feedback-vector-allocation=0'),
+  (0.1, '--cache=code'),
+  (0.1, '--force-slow-path'),
+  (0.2, '--future'),
+  (0.1, '--interrupt-budget=100'),
+  (0.1, '--liftoff'),
+  (0.2, '--no-analyze-environment-liveness'),
+  (0.1, '--no-enable-sse3'),
+  (0.1, '--no-enable-ssse3'),
+  (0.1, '--no-enable-sse4_1'),
+  (0.1, '--no-enable-sse4_2'),
+  (0.1, '--no-enable-sahf'),
+  (0.1, '--no-enable-avx'),
+  (0.1, '--no-enable-fma3'),
+  (0.1, '--no-enable-bmi1'),
+  (0.1, '--no-enable-bmi2'),
+  (0.1, '--no-enable-lzcnt'),
+  (0.1, '--no-enable-popcnt'),
+  (0.3, '--no-lazy-feedback-allocation'),
+  (0.1, '--no-liftoff'),
+  (0.1, '--no-opt'),
+  (0.2, '--no-regexp-tier-up'),
+  (0.1, '--no-wasm-tier-up'),
+  (0.1, '--regexp-interpret-all'),
+  (0.1, '--regexp-tier-up-ticks=10'),
+  (0.1, '--regexp-tier-up-ticks=100'),
+  (0.1, '--stress-background-compile'),
+  (0.1, '--stress-lazy-source-positions'),
+  (0.1, '--stress-wasm-code-gc'),
+  (0.1, '--turbo-instruction-scheduling'),
+  (0.1, '--turbo-stress-instruction-scheduling'),
+]
+
+def random_extra_flags(rng):
+  """Returns a random list of flags chosen from the configurations in
+  EXTRA_FLAGS.
+  """
+  return [flag for prob, flag in EXTRA_FLAGS if rng.random() < prob]
+
+
+class FuzzerConfig(object):
+  def __init__(self, probability, analyzer, fuzzer):
+    """
+    Args:
+      probability: of choosing this fuzzer (0; 10]
+      analyzer: instance of Analyzer class, can be None if no analysis is needed
+      fuzzer: instance of Fuzzer class
+    """
+    assert probability > 0 and probability <= 10
+
+    self.probability = probability
+    self.analyzer = analyzer
+    self.fuzzer = fuzzer
+
+
+class Analyzer(object):
+  def get_analysis_flags(self):
+    raise NotImplementedError()
+
+  def do_analysis(self, result):
+    raise NotImplementedError()
+
+
+class Fuzzer(object):
+  def create_flags_generator(self, rng, test, analysis_value):
+    """
+    Args:
+      rng: random number generator
+      test: test for which to create flags
+      analysis_value: value returned by the analyzer. None if there is no
+        corresponding analyzer to this fuzzer or the analysis phase is disabled
+    """
+    raise NotImplementedError()
+
+
+# TODO(majeski): Allow multiple subtests to run at once.
+class FuzzerProc(base.TestProcProducer):
+  def __init__(self, rng, count, fuzzers, disable_analysis=False):
+    """
+    Args:
+      rng: random number generator used to select flags and values for them
+      count: number of tests to generate based on each base test
+      fuzzers: list of FuzzerConfig instances
+      disable_analysis: disable analysis phase and filtering base on it. When
+        set, processor passes None as analysis result to fuzzers
+    """
+    super(FuzzerProc, self).__init__('Fuzzer')
+
+    self._rng = rng
+    self._count = count
+    self._fuzzer_configs = fuzzers
+    self._disable_analysis = disable_analysis
+    self._gens = {}
+
+  def setup(self, requirement=base.DROP_RESULT):
+    # Fuzzer is optimized to not store the results
+    assert requirement == base.DROP_RESULT
+    super(FuzzerProc, self).setup(requirement)
+
+  def _next_test(self, test):
+    if self.is_stopped:
+      return False
+
+    analysis_subtest = self._create_analysis_subtest(test)
+    if analysis_subtest:
+      return self._send_test(analysis_subtest)
+
+    self._gens[test.procid] = self._create_gen(test)
+    return self._try_send_next_test(test)
+
+  def _create_analysis_subtest(self, test):
+    if self._disable_analysis:
+      return None
+
+    analysis_flags = []
+    for fuzzer_config in self._fuzzer_configs:
+      if fuzzer_config.analyzer:
+        analysis_flags += fuzzer_config.analyzer.get_analysis_flags()
+
+    if analysis_flags:
+      analysis_flags = list(set(analysis_flags))
+      return self._create_subtest(test, 'analysis', flags=analysis_flags,
+                                  keep_output=True)
+
+  def _result_for(self, test, subtest, result):
+    if not self._disable_analysis:
+      if result is not None:
+        # Analysis phase, for fuzzing we drop the result.
+        if result.has_unexpected_output:
+          self._send_result(test, None)
+          return
+
+        self._gens[test.procid] = self._create_gen(test, result)
+
+    self._try_send_next_test(test)
+
+  def _create_gen(self, test, analysis_result=None):
+    # It will be called with analysis_result==None only when there is no
+    # analysis phase at all, so no fuzzer has it's own analyzer.
+    gens = []
+    indexes = []
+    for fuzzer_config in self._fuzzer_configs:
+      analysis_value = None
+      if analysis_result and fuzzer_config.analyzer:
+        analysis_value = fuzzer_config.analyzer.do_analysis(analysis_result)
+        if not analysis_value:
+          # Skip fuzzer for this test since it doesn't have analysis data
+          continue
+      p = fuzzer_config.probability
+      flag_gen = fuzzer_config.fuzzer.create_flags_generator(self._rng, test,
+                                                             analysis_value)
+      indexes += [len(gens)] * p
+      gens.append((p, flag_gen))
+
+    if not gens:
+      # No fuzzers for this test, skip it
+      return
+
+    i = 0
+    while not self._count or i < self._count:
+      main_index = self._rng.choice(indexes)
+      _, main_gen = gens[main_index]
+
+      flags = random_extra_flags(self._rng) + next(main_gen)
+      for index, (p, gen) in enumerate(gens):
+        if index == main_index:
+          continue
+        if self._rng.randint(1, 10) <= p:
+          flags += next(gen)
+
+      flags.append('--fuzzer-random-seed=%s' % self._next_seed())
+      yield self._create_subtest(test, str(i), flags=flags)
+
+      i += 1
+
+  def _try_send_next_test(self, test):
+    if not self.is_stopped:
+      for subtest in self._gens[test.procid]:
+        if self._send_test(subtest):
+          return True
+
+    del self._gens[test.procid]
+    return False
+
+  def _next_seed(self):
+    seed = None
+    while not seed:
+      seed = self._rng.randint(-2147483648, 2147483647)
+    return seed
+
+
+class ScavengeAnalyzer(Analyzer):
+  def get_analysis_flags(self):
+    return ['--fuzzer-gc-analysis']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('### Maximum new space size reached = '):
+        return int(float(line.split()[7]))
+
+
+class ScavengeFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-scavenge=%d' % (analysis_value or 100)]
+
+
+class MarkingAnalyzer(Analyzer):
+  def get_analysis_flags(self):
+    return ['--fuzzer-gc-analysis']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('### Maximum marking limit reached = '):
+        return int(float(line.split()[6]))
+
+
+class MarkingFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-marking=%d' % (analysis_value or 100)]
+
+
+class GcIntervalAnalyzer(Analyzer):
+  def get_analysis_flags(self):
+    return ['--fuzzer-gc-analysis']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('### Allocations = '):
+        return int(float(line.split()[3][:-1]))
+
+
+class GcIntervalFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    if analysis_value:
+      value = analysis_value // 10
+    else:
+      value = 10000
+    while True:
+      yield ['--random-gc-interval=%d' % value]
+
+
+class CompactionFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-compaction-random']
+
+
+class TaskDelayFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-delay-tasks']
+
+
+class ThreadPoolSizeFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--thread-pool-size=%d' % rng.randint(1, 8)]
+
+
+class DeoptAnalyzer(Analyzer):
+  MAX_DEOPT=1000000000
+
+  def __init__(self, min_interval):
+    super(DeoptAnalyzer, self).__init__()
+    self._min = min_interval
+
+  def get_analysis_flags(self):
+    return ['--deopt-every-n-times=%d' % self.MAX_DEOPT,
+            '--print-deopt-stress']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('=== Stress deopt counter: '):
+        counter = self.MAX_DEOPT - int(line.split(' ')[-1])
+        if counter < self._min:
+          # Skip this test since we won't generate any meaningful interval with
+          # given minimum.
+          return None
+        return counter
+
+
+class DeoptFuzzer(Fuzzer):
+  def __init__(self, min_interval):
+    super(DeoptFuzzer, self).__init__()
+    self._min = min_interval
+
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      if analysis_value:
+        value = analysis_value // 2
+      else:
+        value = 10000
+      interval = rng.randint(self._min, max(value, self._min))
+      yield ['--deopt-every-n-times=%d' % interval]
+
+
+FUZZERS = {
+  'compaction': (None, CompactionFuzzer),
+  'delay': (None, TaskDelayFuzzer),
+  'deopt': (DeoptAnalyzer, DeoptFuzzer),
+  'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer),
+  'marking': (MarkingAnalyzer, MarkingFuzzer),
+  'scavenge': (ScavengeAnalyzer, ScavengeFuzzer),
+  'threads': (None, ThreadPoolSizeFuzzer),
+}
+
+
+def create_fuzzer_config(name, probability, *args, **kwargs):
+  analyzer_class, fuzzer_class = FUZZERS[name]
+  return FuzzerConfig(
+      probability,
+      analyzer_class(*args, **kwargs) if analyzer_class else None,
+      fuzzer_class(*args, **kwargs),
+  )
diff --git a/src/third_party/v8/tools/testrunner/testproc/loader.py b/src/third_party/v8/tools/testrunner/testproc/loader.py
new file mode 100644
index 0000000..f4afeae
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/loader.py
@@ -0,0 +1,42 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+class LoadProc(base.TestProc):
+  """First processor in the chain that passes all tests to the next processor.
+  """
+
+  def __init__(self, tests):
+    super(LoadProc, self).__init__()
+
+    self.tests = tests
+
+  def load_initial_tests(self, initial_batch_size):
+    """
+    Args:
+      exec_proc: execution processor that the tests are being loaded into
+      initial_batch_size: initial number of tests to load
+    """
+    loaded_tests = 0
+    while loaded_tests < initial_batch_size:
+      try:
+        t = next(self.tests)
+      except StopIteration:
+        return
+
+      if self._send_test(t):
+        loaded_tests += 1
+
+  def next_test(self, test):
+    assert False, 'Nothing can be connected to the LoadProc'
+
+  def result_for(self, test, result):
+    try:
+      while not self._send_test(next(self.tests)):
+        pass
+    except StopIteration:
+      # No more tests to load.
+      pass
diff --git a/src/third_party/v8/tools/testrunner/testproc/progress.py b/src/third_party/v8/tools/testrunner/testproc/progress.py
new file mode 100644
index 0000000..9ff943a
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/progress.py
@@ -0,0 +1,442 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+from __future__ import absolute_import
+
+import datetime
+import json
+import os
+import platform
+import sys
+import time
+
+from . import base
+from . import util
+
+
+def print_failure_header(test):
+  if test.output_proc.negative:
+    negative_marker = '[negative] '
+  else:
+    negative_marker = ''
+  print("=== %(label)s %(negative)s===" % {
+    'label': test,
+    'negative': negative_marker,
+  })
+
+
+class ResultsTracker(base.TestProcObserver):
+  """Tracks number of results and stops to run tests if max_failures reached."""
+  def __init__(self, max_failures):
+    super(ResultsTracker, self).__init__()
+    self._requirement = base.DROP_OUTPUT
+
+    self.failed = 0
+    self.remaining = 0
+    self.total = 0
+    self.max_failures = max_failures
+
+  def _on_next_test(self, test):
+    self.total += 1
+    self.remaining += 1
+
+  def _on_result_for(self, test, result):
+    self.remaining -= 1
+    if result.has_unexpected_output:
+      self.failed += 1
+      if self.max_failures and self.failed >= self.max_failures:
+        print('>>> Too many failures, exiting...')
+        self.stop()
+
+
+class ProgressIndicator(base.TestProcObserver):
+  def __init__(self):
+    super(base.TestProcObserver, self).__init__()
+    self.options = None
+
+  def finished(self):
+    pass
+
+  def configure(self, options):
+    self.options = options
+
+
+class SimpleProgressIndicator(ProgressIndicator):
+  def __init__(self):
+    super(SimpleProgressIndicator, self).__init__()
+    self._requirement = base.DROP_PASS_OUTPUT
+
+    self._failed = []
+
+  def _on_result_for(self, test, result):
+    # TODO(majeski): Support for dummy/grouped results
+    if result.has_unexpected_output:
+      self._failed.append((test, result))
+
+  def finished(self):
+    crashed = 0
+    print()
+    for test, result in self._failed:
+      print_failure_header(test)
+      if result.output.stderr:
+        print("--- stderr ---")
+        print(result.output.stderr.strip())
+      if result.output.stdout:
+        print("--- stdout ---")
+        print(result.output.stdout.strip())
+      print("Command: %s" % result.cmd.to_string())
+      if result.output.HasCrashed():
+        print("exit code: %s" % result.output.exit_code_string)
+        print("--- CRASHED ---")
+        crashed += 1
+      if result.output.HasTimedOut():
+        print("--- TIMEOUT ---")
+    if len(self._failed) == 0:
+      print("===")
+      print("=== All tests succeeded")
+      print("===")
+    else:
+      print()
+      print("===")
+      print("=== %i tests failed" % len(self._failed))
+      if crashed > 0:
+        print("=== %i tests CRASHED" % crashed)
+      print("===")
+
+
+class StreamProgressIndicator(ProgressIndicator):
+  def __init__(self):
+    super(StreamProgressIndicator, self).__init__()
+    self._requirement = base.DROP_PASS_OUTPUT
+
+  def _on_result_for(self, test, result):
+      if not result.has_unexpected_output:
+        self.print('PASS', test)
+      elif result.output.HasCrashed():
+        self.print("CRASH", test)
+      elif result.output.HasTimedOut():
+        self.print("TIMEOUT", test)
+      else:
+        if test.is_fail:
+          self.print("UNEXPECTED PASS", test)
+        else:
+          self.print("FAIL", test)
+
+  def print(self, prefix, test):
+    print('%s: %ss' % (prefix, test))
+    sys.stdout.flush()
+
+class VerboseProgressIndicator(SimpleProgressIndicator):
+  def __init__(self):
+    super(VerboseProgressIndicator, self).__init__()
+    self._last_printed_time = time.time()
+
+  def _print(self, text):
+    print(text)
+    sys.stdout.flush()
+    self._last_printed_time = time.time()
+
+  def _message(self, test, result):
+    # TODO(majeski): Support for dummy/grouped results
+    if result.has_unexpected_output:
+      if result.output.HasCrashed():
+        outcome = 'CRASH'
+      else:
+        outcome = 'FAIL'
+    else:
+      outcome = 'pass'
+    return '%s %s: %s' % (
+      test, test.variant or 'default', outcome)
+
+  def _on_result_for(self, test, result):
+    super(VerboseProgressIndicator, self)._on_result_for(test, result)
+    self._print(self._message(test, result))
+
+  # TODO(machenbach): Remove this platform specific hack and implement a proper
+  # feedback channel from the workers, providing which tests are currently run.
+  def _print_processes_linux(self):
+    if platform.system() == 'Linux':
+      self._print('List of processes:')
+      for pid, cmd in util.list_processes_linux():
+        # Show command with pid, but other process info cut off.
+        self._print('pid: %d cmd: %s' % (pid, cmd))
+
+  def _ensure_delay(self, delay):
+    return time.time() - self._last_printed_time > delay
+
+  def _on_heartbeat(self):
+    if self._ensure_delay(30):
+      # Print something every 30 seconds to not get killed by an output
+      # timeout.
+      self._print('Still working...')
+      self._print_processes_linux()
+
+  def _on_event(self, event):
+    self._print(event)
+    self._print_processes_linux()
+
+
+class CIProgressIndicator(VerboseProgressIndicator):
+  def _on_result_for(self, test, result):
+    super(VerboseProgressIndicator, self)._on_result_for(test, result)
+    if self.options.ci_test_completion:
+      with open(self.options.ci_test_completion, "a") as f:
+        f.write(self._message(test, result) + "\n")
+    self._output_feedback()
+
+  def _output_feedback(self):
+    """Reduced the verbosity leads to getting killed by an ouput timeout.
+    We ensure output every minute.
+    """
+    if self._ensure_delay(60):
+      dt = time.time()
+      st = datetime.datetime.fromtimestamp(dt).strftime('%Y-%m-%d %H:%M:%S')
+      self._print(st)
+
+
+class DotsProgressIndicator(SimpleProgressIndicator):
+  def __init__(self):
+    super(DotsProgressIndicator, self).__init__()
+    self._count = 0
+
+  def _on_result_for(self, test, result):
+    super(DotsProgressIndicator, self)._on_result_for(test, result)
+    # TODO(majeski): Support for dummy/grouped results
+    self._count += 1
+    if self._count > 1 and self._count % 50 == 1:
+      sys.stdout.write('\n')
+    if result.has_unexpected_output:
+      if result.output.HasCrashed():
+        sys.stdout.write('C')
+        sys.stdout.flush()
+      elif result.output.HasTimedOut():
+        sys.stdout.write('T')
+        sys.stdout.flush()
+      else:
+        sys.stdout.write('F')
+        sys.stdout.flush()
+    else:
+      sys.stdout.write('.')
+      sys.stdout.flush()
+
+
+class CompactProgressIndicator(ProgressIndicator):
+  def __init__(self, templates):
+    super(CompactProgressIndicator, self).__init__()
+    self._requirement = base.DROP_PASS_OUTPUT
+
+    self._templates = templates
+    self._last_status_length = 0
+    self._start_time = time.time()
+
+    self._passed = 0
+    self._failed = 0
+
+  def set_test_count(self, test_count):
+    self._total = test_count
+
+  def _on_result_for(self, test, result):
+    # TODO(majeski): Support for dummy/grouped results
+    if result.has_unexpected_output:
+      self._failed += 1
+    else:
+      self._passed += 1
+
+    self._print_progress(str(test))
+    if result.has_unexpected_output:
+      output = result.output
+      stdout = output.stdout.strip()
+      stderr = output.stderr.strip()
+
+      self._clear_line(self._last_status_length)
+      print_failure_header(test)
+      if len(stdout):
+        self.printFormatted('stdout', stdout)
+      if len(stderr):
+        self.printFormatted('stderr', stderr)
+      self.printFormatted(
+          'command', "Command: %s" % result.cmd.to_string(relative=True))
+      if output.HasCrashed():
+        self.printFormatted(
+            'failure', "exit code: %s" % output.exit_code_string)
+        self.printFormatted('failure', "--- CRASHED ---")
+      elif output.HasTimedOut():
+        self.printFormatted('failure', "--- TIMEOUT ---")
+      else:
+        if test.is_fail:
+          self.printFormatted('failure', "--- UNEXPECTED PASS ---")
+          if test.expected_failure_reason != None:
+            self.printFormatted('failure', test.expected_failure_reason)
+        else:
+          self.printFormatted('failure', "--- FAILED ---")
+
+  def finished(self):
+    self._print_progress('Done')
+    print()
+
+  def _print_progress(self, name):
+    self._clear_line(self._last_status_length)
+    elapsed = time.time() - self._start_time
+    if self._total:
+      progress = (self._passed + self._failed) * 100 // self._total
+    else:
+      progress = 0
+    status = self._templates['status_line'] % {
+      'passed': self._passed,
+      'progress': progress,
+      'failed': self._failed,
+      'test': name,
+      'mins': int(elapsed) // 60,
+      'secs': int(elapsed) % 60
+    }
+    status = self._truncateStatusLine(status, 78)
+    self._last_status_length = len(status)
+    print(status, end='')
+    sys.stdout.flush()
+
+  def _truncateStatusLine(self, string, length):
+    if length and len(string) > (length - 3):
+      return string[:(length - 3)] + "..."
+    else:
+      return string
+
+  def _clear_line(self, last_length):
+    raise NotImplementedError()
+
+
+class ColorProgressIndicator(CompactProgressIndicator):
+  def __init__(self):
+    templates = {
+      'status_line': ("[%(mins)02i:%(secs)02i|"
+                      "\033[34m%%%(progress) 4d\033[0m|"
+                      "\033[32m+%(passed) 4d\033[0m|"
+                      "\033[31m-%(failed) 4d\033[0m]: %(test)s"),
+      'stdout': "\033[1m%s\033[0m",
+      'stderr': "\033[31m%s\033[0m",
+      'failure': "\033[1;31m%s\033[0m",
+      'command': "\033[33m%s\033[0m",
+    }
+    super(ColorProgressIndicator, self).__init__(templates)
+
+  def printFormatted(self, format, string):
+    print(self._templates[format] % string)
+
+  def _truncateStatusLine(self, string, length):
+    # Add some slack for the color control chars
+    return super(ColorProgressIndicator, self)._truncateStatusLine(
+        string, length + 3*9)
+
+  def _clear_line(self, last_length):
+    print("\033[1K\r", end='')
+
+
+class MonochromeProgressIndicator(CompactProgressIndicator):
+  def __init__(self):
+   templates = {
+     'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
+                     "+%(passed) 4d|-%(failed) 4d]: %(test)s"),
+   }
+   super(MonochromeProgressIndicator, self).__init__(templates)
+
+  def printFormatted(self, format, string):
+    print(string)
+
+  def _clear_line(self, last_length):
+    print(("\r" + (" " * last_length) + "\r"), end='')
+
+
+class JsonTestProgressIndicator(ProgressIndicator):
+  def __init__(self, framework_name):
+    super(JsonTestProgressIndicator, self).__init__()
+    # We want to drop stdout/err for all passed tests on the first try, but we
+    # need to get outputs for all runs after the first one. To accommodate that,
+    # reruns are set to keep the result no matter what requirement says, i.e.
+    # keep_output set to True in the RerunProc.
+    self._requirement = base.DROP_PASS_STDOUT
+
+    self.framework_name = framework_name
+    self.results = []
+    self.duration_sum = 0
+    self.test_count = 0
+
+  def configure(self, options):
+    super(JsonTestProgressIndicator, self).configure(options)
+    self.tests = util.FixedSizeTopList(
+        self.options.slow_tests_cutoff,
+        key=lambda rec: rec['duration'])
+
+  def _on_result_for(self, test, result):
+    if result.is_rerun:
+      self.process_results(test, result.results)
+    else:
+      self.process_results(test, [result])
+
+  def process_results(self, test, results):
+    for run, result in enumerate(results):
+      # TODO(majeski): Support for dummy/grouped results
+      output = result.output
+
+      self._buffer_slow_tests(test, result, output, run)
+
+      # Omit tests that run as expected on the first try.
+      # Everything that happens after the first run is included in the output
+      # even if it flakily passes.
+      if not result.has_unexpected_output and run == 0:
+        continue
+
+      record = self._test_record(test, result, output, run)
+      record.update({
+          "result": test.output_proc.get_outcome(output),
+          "stdout": output.stdout,
+          "stderr": output.stderr,
+        })
+      self.results.append(record)
+
+  def _buffer_slow_tests(self, test, result, output, run):
+    def result_value(test, result, output):
+      if not result.has_unexpected_output:
+        return ""
+      return test.output_proc.get_outcome(output)
+
+    record = self._test_record(test, result, output, run)
+    record.update({
+        "result": result_value(test, result, output),
+        "marked_slow": test.is_slow,
+      })
+    self.tests.add(record)
+    self.duration_sum += record['duration']
+    self.test_count += 1
+
+  def _test_record(self, test, result, output, run):
+    return {
+        "name": str(test),
+        "flags": result.cmd.args,
+        "command": result.cmd.to_string(relative=True),
+        "run": run + 1,
+        "exit_code": output.exit_code,
+        "expected": test.expected_outcomes,
+        "duration": output.duration,
+        "random_seed": test.random_seed,
+        "target_name": test.get_shell(),
+        "variant": test.variant,
+        "variant_flags": test.variant_flags,
+        "framework_name": self.framework_name,
+      }
+
+  def finished(self):
+    duration_mean = None
+    if self.test_count:
+      duration_mean = self.duration_sum / self.test_count
+
+    result = {
+      "results": self.results,
+      "slowest_tests": self.tests.as_list(),
+      "duration_mean": duration_mean,
+      "test_total": self.test_count,
+    }
+
+    with open(self.options.json_test_results, "w") as f:
+      json.dump(result, f)
diff --git a/src/third_party/v8/tools/testrunner/testproc/rerun.py b/src/third_party/v8/tools/testrunner/testproc/rerun.py
new file mode 100644
index 0000000..d085c55
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/rerun.py
@@ -0,0 +1,59 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import collections
+
+from . import base
+from .result import RerunResult
+
+
+class RerunProc(base.TestProcProducer):
+  def __init__(self, rerun_max, rerun_max_total=None):
+    super(RerunProc, self).__init__('Rerun')
+    self._requirement = base.DROP_OUTPUT
+
+    self._rerun = {}
+    self._results = collections.defaultdict(list)
+    self._rerun_max = rerun_max
+    self._rerun_total_left = rerun_max_total
+
+  def _next_test(self, test):
+    return self._send_next_subtest(test)
+
+  def _result_for(self, test, subtest, result):
+    # First result
+    if subtest.procid[-2:] == '-1':
+      # Passed, no reruns
+      if not result.has_unexpected_output:
+        self._send_result(test, result)
+        return
+
+      self._rerun[test.procid] = 0
+
+    results = self._results[test.procid]
+    results.append(result)
+
+    if not self.is_stopped and self._needs_rerun(test, result):
+      self._rerun[test.procid] += 1
+      if self._rerun_total_left is not None:
+        self._rerun_total_left -= 1
+      self._send_next_subtest(test, self._rerun[test.procid])
+    else:
+      result = RerunResult.create(results)
+      self._finalize_test(test)
+      self._send_result(test, result)
+
+  def _needs_rerun(self, test, result):
+    # TODO(majeski): Limit reruns count for slow tests.
+    return ((self._rerun_total_left is None or self._rerun_total_left > 0) and
+            self._rerun[test.procid] < self._rerun_max and
+            result.has_unexpected_output)
+
+  def _send_next_subtest(self, test, run=0):
+    subtest = self._create_subtest(test, str(run + 1), keep_output=(run != 0))
+    return self._send_test(subtest)
+
+  def _finalize_test(self, test):
+    del self._rerun[test.procid]
+    del self._results[test.procid]
diff --git a/src/third_party/v8/tools/testrunner/testproc/result.py b/src/third_party/v8/tools/testrunner/testproc/result.py
new file mode 100644
index 0000000..c817fc0
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/result.py
@@ -0,0 +1,97 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+class ResultBase(object):
+  @property
+  def is_skipped(self):
+    return False
+
+  @property
+  def is_grouped(self):
+    return False
+
+  @property
+  def is_rerun(self):
+    return False
+
+
+class Result(ResultBase):
+  """Result created by the output processor."""
+
+  def __init__(self, has_unexpected_output, output, cmd=None):
+    self.has_unexpected_output = has_unexpected_output
+    self.output = output
+    self.cmd = cmd
+
+
+class GroupedResult(ResultBase):
+  """Result consisting of multiple results. It can be used by processors that
+  create multiple subtests for each test and want to pass all results back.
+  """
+
+  @staticmethod
+  def create(results):
+    """Create grouped result from the list of results. It filters out skipped
+    results. If all results are skipped results it returns skipped result.
+
+    Args:
+      results: list of pairs (test, result)
+    """
+    results = [(t, r) for (t, r) in results if not r.is_skipped]
+    if not results:
+      return SKIPPED
+    return GroupedResult(results)
+
+  def __init__(self, results):
+    self.results = results
+
+  @property
+  def is_grouped(self):
+    return True
+
+
+class SkippedResult(ResultBase):
+  """Result without any meaningful value. Used primarily to inform the test
+  processor that it's test wasn't executed.
+  """
+
+  @property
+  def is_skipped(self):
+    return True
+
+
+SKIPPED = SkippedResult()
+
+
+class RerunResult(Result):
+  """Result generated from several reruns of the same test. It's a subclass of
+  Result since the result of rerun is result of the last run. In addition to
+  normal result it contains results of all reruns.
+  """
+  @staticmethod
+  def create(results):
+    """Create RerunResult based on list of results. List cannot be empty. If it
+    has only one element it's returned as a result.
+    """
+    assert results
+
+    if len(results) == 1:
+      return results[0]
+    return RerunResult(results)
+
+  def __init__(self, results):
+    """Has unexpected output and the output itself of the RerunResult equals to
+    the last result in the passed list.
+    """
+    assert results
+
+    last = results[-1]
+    super(RerunResult, self).__init__(last.has_unexpected_output, last.output,
+                                      last.cmd)
+    self.results = results
+
+  @property
+  def is_rerun(self):
+    return True
diff --git a/src/third_party/v8/tools/testrunner/testproc/seed.py b/src/third_party/v8/tools/testrunner/testproc/seed.py
new file mode 100644
index 0000000..160eac8
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/seed.py
@@ -0,0 +1,63 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+from collections import defaultdict
+
+from . import base
+from ..utils import random_utils
+
+
+class SeedProc(base.TestProcProducer):
+  def __init__(self, count, seed=None, parallel_subtests=1):
+    """
+    Args:
+      count: How many subtests with different seeds to create for each test.
+        0 means infinite.
+      seed: seed to use. None means random seed for each subtest.
+      parallel_subtests: How many subtest of each test to run at the same time.
+    """
+    super(SeedProc, self).__init__('Seed')
+    self._count = count
+    self._seed = seed
+    self._last_idx = defaultdict(int)
+    self._todo = defaultdict(int)
+    self._parallel_subtests = parallel_subtests
+    if count:
+      self._parallel_subtests = min(self._parallel_subtests, count)
+
+  def setup(self, requirement=base.DROP_RESULT):
+    super(SeedProc, self).setup(requirement)
+
+    # SeedProc is optimized for dropping the result
+    assert requirement == base.DROP_RESULT
+
+  def _next_test(self, test):
+    is_loaded = False
+    for _ in range(0, self._parallel_subtests):
+      is_loaded |= self._try_send_next_test(test)
+
+    return is_loaded
+
+  def _result_for(self, test, subtest, result):
+    self._todo[test.procid] -= 1
+    if not self._try_send_next_test(test):
+      if not self._todo.get(test.procid):
+        del self._last_idx[test.procid]
+        del self._todo[test.procid]
+        self._send_result(test, None)
+
+  def _try_send_next_test(self, test):
+    def create_subtest(idx):
+      seed = self._seed or random_utils.random_seed()
+      return self._create_subtest(test, idx, random_seed=seed)
+
+    num = self._last_idx[test.procid]
+    if not self._count or num < self._count:
+      num += 1
+      self._todo[test.procid] += 1
+      self._last_idx[test.procid] = num
+      return self._send_test(create_subtest(num))
+
+    return False
diff --git a/src/third_party/v8/tools/testrunner/testproc/shard.py b/src/third_party/v8/tools/testrunner/testproc/shard.py
new file mode 100644
index 0000000..9475ea1
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/shard.py
@@ -0,0 +1,38 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+
+# Alphabet size determines the hashing radix. Choosing a prime number prevents
+# clustering of the hashes.
+HASHING_ALPHABET_SIZE = 2 ** 7 -1
+
+def radix_hash(capacity, key):
+  h = 0
+  for character in key:
+    h = (h * HASHING_ALPHABET_SIZE + ord(character)) % capacity
+
+  return h
+
+
+class ShardProc(base.TestProcFilter):
+  """Processor distributing tests between shards.
+  It hashes the unique test identifiers uses the hash to shard tests.
+  """
+  def __init__(self, myid, shards_count):
+    """
+    Args:
+      myid: id of the shard within [0; shards_count - 1]
+      shards_count: number of shards
+    """
+    super(ShardProc, self).__init__()
+
+    assert myid >= 0 and myid < shards_count
+
+    self._myid = myid
+    self._shards_count = shards_count
+
+  def _filter(self, test):
+    return self._myid != radix_hash(self._shards_count, test.procid)
diff --git a/src/third_party/v8/tools/testrunner/testproc/shard_unittest.py b/src/third_party/v8/tools/testrunner/testproc/shard_unittest.py
new file mode 100755
index 0000000..33a094e
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/shard_unittest.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import tempfile
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.testproc.shard import radix_hash
+
+
+class TestRadixHashing(unittest.TestCase):
+  def test_hash_character_by_radix(self):
+    self.assertEqual(97, radix_hash(capacity=2**32, key="a"))
+
+  def test_hash_character_by_radix_with_capacity(self):
+    self.assertEqual(6, radix_hash(capacity=7, key="a"))
+
+  def test_hash_string(self):
+    self.assertEqual(6, radix_hash(capacity=7, key="ab"))
+
+  def test_hash_test_id(self):
+    self.assertEqual(
+      5,
+      radix_hash(capacity=7,
+                 key="test262/Map/class-private-method-Variant-0-1"))
+
+  def test_hash_boundaries(self):
+    total_variants = 5
+    cases = []
+    for case in [
+      "test262/Map/class-private-method",
+      "test262/Map/class-public-method",
+      "test262/Map/object-retrieval",
+      "test262/Map/object-deletion",
+      "test262/Map/object-creation",
+      "test262/Map/garbage-collection",
+    ]:
+      for variant_index in range(total_variants):
+        cases.append("%s-Variant-%d" % (case, variant_index))
+
+    for case in cases:
+      self.assertTrue(0 <= radix_hash(capacity=7, key=case) < 7)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/third_party/v8/tools/testrunner/testproc/sigproc.py b/src/third_party/v8/tools/testrunner/testproc/sigproc.py
new file mode 100644
index 0000000..f29fa22
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/sigproc.py
@@ -0,0 +1,34 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import signal
+
+from . import base
+from testrunner.local import utils
+
+
+class SignalProc(base.TestProcObserver):
+  def __init__(self):
+    super(SignalProc, self).__init__()
+    self.exit_code = utils.EXIT_CODE_PASS
+
+  def setup(self, *args, **kwargs):
+    super(SignalProc, self).setup(*args, **kwargs)
+    # It should be called after processors are chained together to not loose
+    # catched signal.
+    signal.signal(signal.SIGINT, self._on_ctrlc)
+    signal.signal(signal.SIGTERM, self._on_sigterm)
+
+  def _on_ctrlc(self, _signum, _stack_frame):
+    print('>>> Ctrl-C detected, early abort...')
+    self.exit_code = utils.EXIT_CODE_INTERRUPTED
+    self.stop()
+
+  def _on_sigterm(self, _signum, _stack_frame):
+    print('>>> SIGTERM received, early abort...')
+    self.exit_code = utils.EXIT_CODE_TERMINATED
+    self.stop()
diff --git a/src/third_party/v8/tools/testrunner/testproc/timeout.py b/src/third_party/v8/tools/testrunner/testproc/timeout.py
new file mode 100644
index 0000000..026ba02
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/timeout.py
@@ -0,0 +1,30 @@
+from __future__ import print_function
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+
+from . import base
+
+
+class TimeoutProc(base.TestProcObserver):
+  def __init__(self, duration_sec):
+    super(TimeoutProc, self).__init__()
+    self._duration_sec = duration_sec
+    self._start = time.time()
+
+  def _on_next_test(self, test):
+    self.__on_event()
+
+  def _on_result_for(self, test, result):
+    self.__on_event()
+
+  def _on_heartbeat(self):
+    self.__on_event()
+
+  def __on_event(self):
+    if not self.is_stopped:
+      if time.time() - self._start > self._duration_sec:
+        print('>>> Total timeout reached.')
+        self.stop()
diff --git a/src/third_party/v8/tools/testrunner/testproc/util.py b/src/third_party/v8/tools/testrunner/testproc/util.py
new file mode 100644
index 0000000..1f5cc7e
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/util.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import heapq
+import os
+import platform
+import random
+import signal
+import subprocess
+
+# Base dir of the build products for Release and Debug.
+OUT_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out'))
+
+
+def list_processes_linux():
+  """Returns list of tuples (pid, command) of processes running in the same out
+  directory as this checkout.
+  """
+  if platform.system() != 'Linux':
+    return []
+  try:
+    cmd = 'pgrep -fa %s' % OUT_DIR
+    output = subprocess.check_output(cmd, shell=True) or ''
+    processes = [
+      (int(line.split()[0]), line[line.index(OUT_DIR):])
+      for line in output.splitlines()
+    ]
+    # Filter strange process with name as out dir.
+    return [p for p in processes if p[1] != OUT_DIR]
+  except:
+    return []
+
+
+def kill_processes_linux():
+  """Kill stray processes on the system that started in the same out directory.
+
+  All swarming tasks share the same out directory location.
+  """
+  if platform.system() != 'Linux':
+    return
+  for pid, cmd in list_processes_linux():
+    try:
+      print('Attempting to kill %d - %s' % (pid, cmd))
+      os.kill(pid, signal.SIGKILL)
+    except:
+      pass
+
+
+class FixedSizeTopList():
+  """Utility collection for gathering a fixed number of elements with the
+  biggest value for the given key. It employs a heap from which we pop the
+  smallest element when the collection is 'full'.
+
+  If you need a reversed behaviour (collect min values) just provide an
+  inverse key."""
+
+  def __init__(self, size, key=None):
+    self.size = size
+    self.key = key or (lambda x: x)
+    self.data = []
+    self.discriminator = 0
+
+  def add(self, elem):
+    elem_k = self.key(elem)
+    heapq.heappush(self.data, (elem_k, self.extra_key(), elem))
+    if len(self.data) > self.size:
+      heapq.heappop(self.data)
+
+  def extra_key(self):
+    # Avoid key clash in tuples sent to the heap.
+    # We want to avoid comparisons on the last element of the tuple
+    # since those elements might not be comparable.
+    self.discriminator += 1
+    return self.discriminator
+
+  def as_list(self):
+    original_data = [rec for (_, _, rec) in self.data]
+    return sorted(original_data, key=self.key, reverse=True)
diff --git a/src/third_party/v8/tools/testrunner/testproc/util_unittest.py b/src/third_party/v8/tools/testrunner/testproc/util_unittest.py
new file mode 100644
index 0000000..5bf6a6e
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/util_unittest.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import absolute_import
+
+import os
+import sys
+import unittest
+
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.testproc.util import FixedSizeTopList
+
+class TestOrderedFixedSizeList(unittest.TestCase):
+  def test_empty(self):
+    ofsl = FixedSizeTopList(3)
+    self.assertEqual(ofsl.as_list(), [])
+
+  def test_12(self):
+    ofsl = FixedSizeTopList(3)
+    ofsl.add(1)
+    ofsl.add(2)
+    self.assertEqual(ofsl.as_list(), [2,1])
+
+  def test_4321(self):
+    ofsl = FixedSizeTopList(3)
+    ofsl.add(4)
+    ofsl.add(3)
+    ofsl.add(2)
+    ofsl.add(1)
+    data = ofsl.as_list()
+    self.assertEqual(data, [4,3,2])
+
+  def test_544321(self):
+    ofsl = FixedSizeTopList(4)
+    ofsl.add(5)
+    ofsl.add(4)
+    ofsl.add(4)
+    ofsl.add(3)
+    ofsl.add(2)
+    ofsl.add(1)
+    data = ofsl.as_list()
+    self.assertEqual(data, [5, 4, 4, 3])
+
+  def test_withkey(self):
+    ofsl = FixedSizeTopList(3,key=lambda x: x['val'])
+    ofsl.add({'val':4, 'something': 'four'})
+    ofsl.add({'val':3, 'something': 'three'})
+    ofsl.add({'val':-1, 'something': 'minusone'})
+    ofsl.add({'val':5, 'something': 'five'})
+    ofsl.add({'val':0, 'something': 'zero'})
+    data = [e['something'] for e in ofsl.as_list()]
+    self.assertEqual(data, ['five', 'four', 'three'])
+
+  def test_withkeyclash(self):
+    # Test that a key clash does not throw exeption
+    ofsl = FixedSizeTopList(2,key=lambda x: x['val'])
+    ofsl.add({'val':2, 'something': 'two'})
+    ofsl.add({'val':2, 'something': 'two'})
+    ofsl.add({'val':0, 'something': 'zero'})
+    data = [e['something'] for e in ofsl.as_list()]
+    self.assertEqual(data, ['two', 'two'])
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/third_party/v8/tools/testrunner/testproc/variant.py b/src/third_party/v8/tools/testrunner/testproc/variant.py
new file mode 100644
index 0000000..0164ad8
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/variant.py
@@ -0,0 +1,69 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+from ..local.variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
+from .result import GroupedResult
+
+
+STANDARD_VARIANT = set(["default"])
+
+
+class VariantProc(base.TestProcProducer):
+  """Processor creating variants.
+
+  For each test it keeps generator that returns variant, flags and id suffix.
+  It produces variants one at a time, so it's waiting for the result of one
+  variant to create another variant of the same test.
+  It maintains the order of the variants passed to the init.
+
+  There are some cases when particular variant of the test is not valid. To
+  ignore subtests like that, StatusFileFilterProc should be placed somewhere
+  after the VariantProc.
+  """
+
+  def __init__(self, variants):
+    super(VariantProc, self).__init__('VariantProc')
+    self._next_variant = {}
+    self._variant_gens = {}
+    self._variants = variants
+
+  def setup(self, requirement=base.DROP_RESULT):
+    super(VariantProc, self).setup(requirement)
+
+    # VariantProc is optimized for dropping the result and it should be placed
+    # in the chain where it's possible.
+    assert requirement == base.DROP_RESULT
+
+  def _next_test(self, test):
+    gen = self._variants_gen(test)
+    self._next_variant[test.procid] = gen
+    return self._try_send_new_subtest(test, gen)
+
+  def _result_for(self, test, subtest, result):
+    gen = self._next_variant[test.procid]
+    if not self._try_send_new_subtest(test, gen):
+      self._send_result(test, None)
+
+  def _try_send_new_subtest(self, test, variants_gen):
+    for variant, flags, suffix in variants_gen:
+      subtest = self._create_subtest(test, '%s-%s' % (variant, suffix),
+                                     variant=variant, flags=flags)
+      if self._send_test(subtest):
+        return True
+
+    del self._next_variant[test.procid]
+    return False
+
+  def _variants_gen(self, test):
+    """Generator producing (variant, flags, procid suffix) tuples."""
+    return self._get_variants_gen(test).gen(test)
+
+  def _get_variants_gen(self, test):
+    key = test.suite.name
+    variants_gen = self._variant_gens.get(key)
+    if not variants_gen:
+      variants_gen = test.suite.get_variants_gen(self._variants)
+      self._variant_gens[key] = variants_gen
+    return variants_gen
diff --git a/src/third_party/v8/tools/testrunner/testproc/variant_unittest.py b/src/third_party/v8/tools/testrunner/testproc/variant_unittest.py
new file mode 100755
index 0000000..56e28c8
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/testproc/variant_unittest.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import tempfile
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.testproc import base
+from testrunner.testproc.variant import VariantProc
+
+
+class FakeResultObserver(base.TestProcObserver):
+  def __init__(self):
+    super(FakeResultObserver, self).__init__()
+
+    self.results = set()
+
+  def result_for(self, test, result):
+    self.results.add((test, result))
+
+
+class FakeFilter(base.TestProcFilter):
+  def __init__(self, filter_predicate):
+    super(FakeFilter, self).__init__()
+
+    self._filter_predicate = filter_predicate
+
+    self.loaded = set()
+    self.call_counter = 0
+
+  def next_test(self, test):
+    self.call_counter += 1
+
+    if self._filter_predicate(test):
+      return False
+
+    self.loaded.add(test)
+    return True
+
+
+class FakeSuite(object):
+  def __init__(self, name):
+    self.name = name
+
+
+class FakeTest(object):
+  def __init__(self, procid):
+    self.suite = FakeSuite("fake_suite")
+    self.procid = procid
+
+    self.keep_output = False
+
+  def create_subtest(self, proc, subtest_id, **kwargs):
+    variant = kwargs['variant']
+
+    variant.origin = self
+    return variant
+
+
+class FakeVariantGen(object):
+  def __init__(self, variants):
+    self._variants = variants
+
+  def gen(self, test):
+    for variant in self._variants:
+      yield variant, [], "fake_suffix"
+
+
+class TestVariantProcLoading(unittest.TestCase):
+  def setUp(self):
+    self.test = FakeTest("test")
+
+  def _simulate_proc(self, variants):
+    """Expects the list of instantiated test variants to load into the
+    VariantProc."""
+    variants_mapping = {self.test: variants}
+
+    # Creates a Variant processor containing the possible types of test
+    # variants.
+    self.variant_proc = VariantProc(variants=["to_filter", "to_load"])
+    self.variant_proc._variant_gens = {
+      "fake_suite": FakeVariantGen(variants)}
+
+    # FakeFilter only lets tests passing the predicate to be loaded.
+    self.fake_filter = FakeFilter(
+      filter_predicate=(lambda t: t.procid == "to_filter"))
+
+    # FakeResultObserver to verify that VariantProc calls result_for correctly.
+    self.fake_result_observer = FakeResultObserver()
+
+    # Links up processors together to form a test processing pipeline.
+    self.variant_proc._prev_proc = self.fake_result_observer
+    self.fake_filter._prev_proc = self.variant_proc
+    self.variant_proc._next_proc = self.fake_filter
+
+    # Injects the test into the VariantProc
+    is_loaded = self.variant_proc.next_test(self.test)
+
+    # Verifies the behavioral consistency by using the instrumentation in
+    # FakeFilter
+    loaded_variants = list(self.fake_filter.loaded)
+    self.assertEqual(is_loaded, any(loaded_variants))
+    return self.fake_filter.loaded, self.fake_filter.call_counter
+
+  def test_filters_first_two_variants(self):
+    variants = [
+      FakeTest('to_filter'),
+      FakeTest('to_filter'),
+      FakeTest('to_load'),
+      FakeTest('to_load'),
+    ]
+    expected_load_results = {variants[2]}
+
+    load_results, call_count = self._simulate_proc(variants)
+
+    self.assertSetEqual(expected_load_results, load_results)
+    self.assertEqual(call_count, 3)
+
+  def test_stops_loading_after_first_successful_load(self):
+    variants = [
+      FakeTest('to_load'),
+      FakeTest('to_load'),
+      FakeTest('to_filter'),
+    ]
+    expected_load_results = {variants[0]}
+
+    loaded_tests, call_count = self._simulate_proc(variants)
+
+    self.assertSetEqual(expected_load_results, loaded_tests)
+    self.assertEqual(call_count, 1)
+
+  def test_return_result_when_out_of_variants(self):
+    variants = [
+      FakeTest('to_filter'),
+      FakeTest('to_load'),
+    ]
+
+    self._simulate_proc(variants)
+
+    self.variant_proc.result_for(variants[1], None)
+
+    expected_results = {(self.test, None)}
+
+    self.assertSetEqual(expected_results, self.fake_result_observer.results)
+
+  def test_return_result_after_running_variants(self):
+    variants = [
+      FakeTest('to_filter'),
+      FakeTest('to_load'),
+      FakeTest('to_load'),
+    ]
+
+    self._simulate_proc(variants)
+    self.variant_proc.result_for(variants[1], None)
+
+    self.assertSetEqual(set(variants[1:]), self.fake_filter.loaded)
+
+    self.variant_proc.result_for(variants[2], None)
+
+    expected_results = {(self.test, None)}
+    self.assertSetEqual(expected_results, self.fake_result_observer.results)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/third_party/v8/tools/testrunner/trycatch_loader.js b/src/third_party/v8/tools/testrunner/trycatch_loader.js
new file mode 100644
index 0000000..737c8e4
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/trycatch_loader.js
@@ -0,0 +1,42 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Wrapper loading javascript tests passed as arguments used by gc fuzzer.
+// It ignores all exceptions and run tests in a separate namespaces.
+//
+// It can't prevent %AbortJS function from aborting execution, so it should be
+// used with d8's --disable-abortjs flag to ignore all possible errors inside
+// tests.
+
+// We use -- as an additional separator for test preamble files and test files.
+// The preamble files (before --) will be loaded in each realm before each
+// test.
+var separator = arguments.indexOf("--")
+var preamble = arguments.slice(0, separator)
+var tests = arguments.slice(separator + 1)
+
+var preambleString = ""
+for (let jstest of preamble) {
+  preambleString += "load(\"" + jstest + "\");"
+}
+
+for (let jstest of tests) {
+  print("Loading " + jstest);
+  let start = performance.now();
+
+  // anonymous function to not populate global namespace.
+  (function () {
+    let realm = Realm.create();
+    try {
+      Realm.eval(realm, preambleString + "load(\"" + jstest + "\");");
+    } catch (err) {
+      // ignore all errors
+    }
+    Realm.dispose(realm);
+  })();
+
+  let durationSec = ((performance.now() - start) / 1000.0).toFixed(2);
+  print("Duration " + durationSec + "s");
+}
diff --git a/src/third_party/v8/tools/testrunner/utils/__init__.py b/src/third_party/v8/tools/testrunner/utils/__init__.py
new file mode 100644
index 0000000..4433538
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/utils/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/src/third_party/v8/tools/testrunner/utils/dump_build_config.py b/src/third_party/v8/tools/testrunner/utils/dump_build_config.py
new file mode 100644
index 0000000..b691bb3
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/utils/dump_build_config.py
@@ -0,0 +1,26 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Writes a dictionary to a json file with the passed key-value pairs.
+
+Expected to be called like:
+dump_build_config.py path/to/file.json [key1=value1 ...]
+
+The values are expected to be valid json. E.g. true is a boolean and "true" is
+the string "true".
+"""
+
+import json
+import os
+import sys
+
+assert len(sys.argv) > 2
+
+def as_json(kv):
+  assert '=' in kv
+  k, v = kv.split('=', 1)
+  return k, json.loads(v)
+
+with open(sys.argv[1], 'w') as f:
+  json.dump(dict(map(as_json, sys.argv[2:])), f)
diff --git a/src/third_party/v8/tools/testrunner/utils/dump_build_config_gyp.py b/src/third_party/v8/tools/testrunner/utils/dump_build_config_gyp.py
new file mode 100644
index 0000000..963b0e2
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/utils/dump_build_config_gyp.py
@@ -0,0 +1,57 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""The same as dump_build_config.py but for gyp legacy.
+
+Expected to be called like:
+dump_build_config.py path/to/file.json [key1=value1 ...]
+
+Raw gyp values are supported - they will be tranformed into valid json.
+"""
+# TODO(machenbach): Remove this when gyp is deprecated.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import json
+import os
+import sys
+
+assert len(sys.argv) > 2
+
+
+GYP_GN_CONVERSION = {
+  'is_component_build': {
+    'shared_library': 'true',
+    'static_library': 'false',
+  },
+  'is_debug': {
+    'Debug': 'true',
+    'Release': 'false',
+  },
+}
+
+DEFAULT_CONVERSION ={
+  '0': 'false',
+  '1': 'true',
+  'ia32': 'x86',
+}
+
+def gyp_to_gn(key, value):
+  value = GYP_GN_CONVERSION.get(key, DEFAULT_CONVERSION).get(value, value)
+  value = value if value in ['true', 'false'] else '"{0}"'.format(value)
+  return value
+
+def as_json(kv):
+  assert '=' in kv
+  k, v = kv.split('=', 1)
+  v2 = gyp_to_gn(k, v)
+  try:
+    return k, json.loads(v2)
+  except ValueError as e:
+    print((k, v, v2))
+    raise e
+
+with open(sys.argv[1], 'w') as f:
+  json.dump(dict(map(as_json, sys.argv[2:])), f)
diff --git a/src/third_party/v8/tools/testrunner/utils/random_utils.py b/src/third_party/v8/tools/testrunner/utils/random_utils.py
new file mode 100644
index 0000000..0d2cb3f
--- /dev/null
+++ b/src/third_party/v8/tools/testrunner/utils/random_utils.py
@@ -0,0 +1,13 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+
+def random_seed():
+  """Returns random, non-zero seed."""
+  seed = 0
+  while not seed:
+    seed = random.SystemRandom().randint(-2147483648, 2147483647)
+  return seed
diff --git a/src/third_party/v8/tools/tick-processor.html b/src/third_party/v8/tools/tick-processor.html
new file mode 100644
index 0000000..ecb794e
--- /dev/null
+++ b/src/third_party/v8/tools/tick-processor.html
@@ -0,0 +1,157 @@
+<!DOCTYPE html>
+<!-- Copyright 2012 the V8 project authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of Google Inc. nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -->
+
+<html lang="en">
+<head>
+  <meta charset="utf-8">
+  <title>V8 Tick Processor</title>
+
+  <style>
+    body {
+      font-family: Verdana, Arial, Helvetica, sans-serif;
+      font-size: 10pt;
+    }
+    h4 {
+      margin-bottom: 0px;
+    }
+    p {
+      margin-top: 0px;
+    }
+  </style>
+  <script type="module">
+    import {
+      TickProcessor, UnixCppEntriesProvider, MacCppEntriesProvider,
+      WindowsCppEntriesProvider
+    } from "./tickprocessor.mjs";
+
+var v8log_content;
+globalThis.textout;
+
+globalThis.load_logfile = function(evt) {
+  globalThis.textout.value = "";
+  var f = evt.target.files[0];
+  if (f) {
+    var reader = new FileReader();
+    reader.onload = function(event) {
+      v8log_content = event.target.result;
+      start_process();
+    };
+    reader.onerror = function(event) {
+      console.error("File could not be read! Code " + event.target.error.code);
+    };
+    reader.readAsText(f);
+  } else {
+    alert("Failed to load file");
+  }
+}
+
+function print(arg) {
+  globalThis.textout.value+=arg+"\n";
+}
+
+function start_process() {
+  let DEFAULTS = {
+    logFileName: 'v8.log',
+    platform: 'unix',
+    stateFilter: null,
+    callGraphSize: 5,
+    ignoreUnknown: false,
+    separateIc: true,
+    targetRootFS: '',
+    apkEmbeddedLibrary: '',
+    nm: 'nm'
+  };
+
+  var entriesProviders = {
+    'unix': UnixCppEntriesProvider,
+    'windows': WindowsCppEntriesProvider,
+    'mac': MacCppEntriesProvider
+  };
+
+  var tickProcessor = new TickProcessor(
+    new (entriesProviders[DEFAULTS.platform])(
+        DEFAULTS.nm, DEFAULTS.targetRootFS, DEFAULTS.apkEmbeddedLibrary),
+    DEFAULTS.separateIc, DEFAULTS.callGraphSize,
+    DEFAULTS.ignoreUnknown, DEFAULTS.stateFilter);
+
+  tickProcessor.processLogChunk(v8log_content);
+  tickProcessor.printStatistics();
+}
+</script>
+<script>
+function Load() {
+  document.getElementById('fileinput').addEventListener(
+      'change', globalThis.load_logfile, false);
+  globalThis.textout = document.getElementById('textout');
+}
+</script>
+</head>
+<body onLoad="Load()">
+
+<h3 style="margin-top: 2px;">
+  Chrome V8 profiling log processor
+</h3>
+<p>
+Process V8's profiling information log (sampling profiler tick information)
+in your browser. Particularly useful if you don't have the V8 shell (d8)
+at hand on your system. You still have to run Chrome with the appropriate
+<a href="https://code.google.com/p/v8/wiki/ProfilingChromiumWithV8">
+  command line flags</a>
+to produce the profiling log.
+</p>
+<h4>Usage:</h4>
+<p>
+Click on the button and browse to the profiling log file (usually, v8.log).
+Process will start automatically and the output will be visible in the below
+text area.
+</p>
+<h4>Limitations and disclaimer:</h4>
+<p>
+This page offers a subset of the functionalities of the command-line tick
+processor utility in the V8 repository. In particular, this page cannot
+access the command-line utility that provides library symbol information,
+hence the [C++] section of the output stays empty. Also consider that this
+web-based tool is provided only for convenience and quick reference, you
+should refer to the
+<a href="https://code.google.com/p/v8/wiki/V8Profiler">
+  command-line</a>
+version for full output.
+</p>
+<p>
+<input type="file" id="fileinput" />
+</p>
+<p>
+<textarea name="myTextArea" cols="120" rows="40" wrap="off" id="textout"
+          readonly="yes"></textarea>
+</p>
+<p style="font-style:italic;">
+Copyright the V8 Authors - Last change to this page: 12/12/2012
+</p>
+
+
+</body>
+</html>
diff --git a/src/third_party/v8/tools/tickprocessor-driver.js b/src/third_party/v8/tools/tickprocessor-driver.js
new file mode 100644
index 0000000..d0e2178
--- /dev/null
+++ b/src/third_party/v8/tools/tickprocessor-driver.js
@@ -0,0 +1,83 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+// Tick Processor's code flow.
+
+function processArguments(args) {
+  var processor = new ArgumentsProcessor(args);
+  if (processor.parse()) {
+    return processor.result();
+  } else {
+    processor.printUsageAndExit();
+  }
+}
+
+function initSourceMapSupport() {
+  // Pull dev tools source maps  into our name space.
+  SourceMap = WebInspector.SourceMap;
+
+  // Overwrite the load function to load scripts synchronously.
+  SourceMap.load = function(sourceMapURL) {
+    var content = readFile(sourceMapURL);
+    var sourceMapObject = (JSON.parse(content));
+    return new SourceMap(sourceMapURL, sourceMapObject);
+  };
+}
+
+var entriesProviders = {
+  'unix': UnixCppEntriesProvider,
+  'windows': WindowsCppEntriesProvider,
+  'mac': MacCppEntriesProvider
+};
+
+var params = processArguments(arguments);
+var sourceMap = null;
+if (params.sourceMap) {
+  initSourceMapSupport();
+  sourceMap = SourceMap.load(params.sourceMap);
+}
+var tickProcessor = new TickProcessor(
+  new (entriesProviders[params.platform])(params.nm, params.objdump, params.targetRootFS,
+                                          params.apkEmbeddedLibrary),
+  params.separateIc,
+  params.separateBytecodes,
+  params.separateBuiltins,
+  params.separateStubs,
+  params.callGraphSize,
+  params.ignoreUnknown,
+  params.stateFilter,
+  params.distortion,
+  params.range,
+  sourceMap,
+  params.timedRange,
+  params.pairwiseTimedRange,
+  params.onlySummary,
+  params.runtimeTimerFilter,
+  params.preprocessJson);
+tickProcessor.processLogFile(params.logFileName);
+tickProcessor.printStatistics();
diff --git a/src/third_party/v8/tools/tickprocessor-driver.mjs b/src/third_party/v8/tools/tickprocessor-driver.mjs
new file mode 100644
index 0000000..e7020e3
--- /dev/null
+++ b/src/third_party/v8/tools/tickprocessor-driver.mjs
@@ -0,0 +1,88 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import { WebInspector} from "./sourcemap.mjs";
+import {
+    ArgumentsProcessor, TickProcessor, UnixCppEntriesProvider,
+    WindowsCppEntriesProvider, MacCppEntriesProvider, readFile,
+  } from "./tickprocessor.mjs";
+
+// Tick Processor's code flow.
+
+function processArguments(args) {
+  const processor = new ArgumentsProcessor(args);
+  if (processor.parse()) {
+    return processor.result();
+  } else {
+    processor.printUsageAndExit();
+  }
+}
+
+function initSourceMapSupport() {
+  // Pull dev tools source maps  into our name space.
+  SourceMap = WebInspector.SourceMap;
+
+  // Overwrite the load function to load scripts synchronously.
+  SourceMap.load = function(sourceMapURL) {
+    const content = readFile(sourceMapURL);
+    const sourceMapObject = (JSON.parse(content));
+    return new SourceMap(sourceMapURL, sourceMapObject);
+  };
+}
+
+const entriesProviders = {
+  'unix': UnixCppEntriesProvider,
+  'windows': WindowsCppEntriesProvider,
+  'mac': MacCppEntriesProvider
+};
+
+const params = processArguments(arguments);
+let sourceMap = null;
+if (params.sourceMap) {
+  initSourceMapSupport();
+  sourceMap = SourceMap.load(params.sourceMap);
+}
+const tickProcessor = new TickProcessor(
+  new (entriesProviders[params.platform])(params.nm, params.objdump, params.targetRootFS,
+                                          params.apkEmbeddedLibrary),
+  params.separateIc,
+  params.separateBytecodes,
+  params.separateBuiltins,
+  params.separateStubs,
+  params.callGraphSize,
+  params.ignoreUnknown,
+  params.stateFilter,
+  params.distortion,
+  params.range,
+  sourceMap,
+  params.timedRange,
+  params.pairwiseTimedRange,
+  params.onlySummary,
+  params.runtimeTimerFilter,
+  params.preprocessJson);
+tickProcessor.processLogFile(params.logFileName);
+tickProcessor.printStatistics();
diff --git a/src/third_party/v8/tools/tickprocessor.js b/src/third_party/v8/tools/tickprocessor.js
new file mode 100644
index 0000000..2a5b9af
--- /dev/null
+++ b/src/third_party/v8/tools/tickprocessor.js
@@ -0,0 +1,977 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function inherits(childCtor, parentCtor) {
+  childCtor.prototype.__proto__ = parentCtor.prototype;
+};
+
+
+function V8Profile(separateIc, separateBytecodes, separateBuiltins,
+    separateStubs) {
+  Profile.call(this);
+  var regexps = [];
+  if (!separateIc) regexps.push(V8Profile.IC_RE);
+  if (!separateBytecodes) regexps.push(V8Profile.BYTECODES_RE);
+  if (!separateBuiltins) regexps.push(V8Profile.BUILTINS_RE);
+  if (!separateStubs) regexps.push(V8Profile.STUBS_RE);
+  if (regexps.length > 0) {
+    this.skipThisFunction = function(name) {
+      for (var i=0; i<regexps.length; i++) {
+        if (regexps[i].test(name)) return true;
+      }
+      return false;
+    };
+  }
+};
+inherits(V8Profile, Profile);
+
+
+V8Profile.IC_RE =
+    /^(LoadGlobalIC: )|(Handler: )|(?:CallIC|LoadIC|StoreIC)|(?:Builtin: (?:Keyed)?(?:Load|Store)IC_)/;
+V8Profile.BYTECODES_RE = /^(BytecodeHandler: )/
+V8Profile.BUILTINS_RE = /^(Builtin: )/
+V8Profile.STUBS_RE = /^(Stub: )/
+
+
+/**
+ * A thin wrapper around shell's 'read' function showing a file name on error.
+ */
+function readFile(fileName) {
+  try {
+    return read(fileName);
+  } catch (e) {
+    printErr(fileName + ': ' + (e.message || e));
+    throw e;
+  }
+}
+
+
+/**
+ * Parser for dynamic code optimization state.
+ */
+function parseState(s) {
+  switch (s) {
+  case "": return Profile.CodeState.COMPILED;
+  case "~": return Profile.CodeState.OPTIMIZABLE;
+  case "*": return Profile.CodeState.OPTIMIZED;
+  }
+  throw new Error("unknown code state: " + s);
+}
+
+
+function TickProcessor(
+    cppEntriesProvider,
+    separateIc,
+    separateBytecodes,
+    separateBuiltins,
+    separateStubs,
+    callGraphSize,
+    ignoreUnknown,
+    stateFilter,
+    distortion,
+    range,
+    sourceMap,
+    timedRange,
+    pairwiseTimedRange,
+    onlySummary,
+    runtimeTimerFilter,
+    preprocessJson) {
+  this.preprocessJson = preprocessJson;
+  LogReader.call(this, {
+      'shared-library': { parsers: [parseString, parseInt, parseInt, parseInt],
+          processor: this.processSharedLibrary },
+      'code-creation': {
+          parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
+                    parseString, parseVarArgs],
+          processor: this.processCodeCreation },
+      'code-deopt': {
+          parsers: [parseInt, parseInt, parseInt, parseInt, parseInt,
+                    parseString, parseString, parseString],
+          processor: this.processCodeDeopt },
+      'code-move': { parsers: [parseInt, parseInt, ],
+          processor: this.processCodeMove },
+      'code-delete': { parsers: [parseInt],
+          processor: this.processCodeDelete },
+      'code-source-info': {
+          parsers: [parseInt, parseInt, parseInt, parseInt, parseString,
+                    parseString, parseString],
+          processor: this.processCodeSourceInfo },
+      'script-source': {
+          parsers: [parseInt, parseString, parseString],
+          processor: this.processScriptSource },
+      'sfi-move': { parsers: [parseInt, parseInt],
+          processor: this.processFunctionMove },
+      'active-runtime-timer': {
+        parsers: [parseString],
+        processor: this.processRuntimeTimerEvent },
+      'tick': {
+          parsers: [parseInt, parseInt, parseInt,
+                    parseInt, parseInt, parseVarArgs],
+          processor: this.processTick },
+      'heap-sample-begin': { parsers: [parseString, parseString, parseInt],
+          processor: this.processHeapSampleBegin },
+      'heap-sample-end': { parsers: [parseString, parseString],
+          processor: this.processHeapSampleEnd },
+      'timer-event-start' : { parsers: [parseString, parseString, parseString],
+                              processor: this.advanceDistortion },
+      'timer-event-end' : { parsers: [parseString, parseString, parseString],
+                            processor: this.advanceDistortion },
+      // Ignored events.
+      'profiler': null,
+      'function-creation': null,
+      'function-move': null,
+      'function-delete': null,
+      'heap-sample-item': null,
+      'current-time': null,  // Handled specially, not parsed.
+      // Obsolete row types.
+      'code-allocate': null,
+      'begin-code-region': null,
+      'end-code-region': null },
+      timedRange,
+      pairwiseTimedRange);
+
+  this.cppEntriesProvider_ = cppEntriesProvider;
+  this.callGraphSize_ = callGraphSize;
+  this.ignoreUnknown_ = ignoreUnknown;
+  this.stateFilter_ = stateFilter;
+  this.runtimeTimerFilter_ = runtimeTimerFilter;
+  this.sourceMap = sourceMap;
+  var ticks = this.ticks_ =
+    { total: 0, unaccounted: 0, excluded: 0, gc: 0 };
+
+  distortion = parseInt(distortion);
+  // Convert picoseconds to nanoseconds.
+  this.distortion_per_entry = isNaN(distortion) ? 0 : (distortion / 1000);
+  this.distortion = 0;
+  var rangelimits = range ? range.split(",") : [];
+  var range_start = parseInt(rangelimits[0]);
+  var range_end = parseInt(rangelimits[1]);
+  // Convert milliseconds to nanoseconds.
+  this.range_start = isNaN(range_start) ? -Infinity : (range_start * 1000);
+  this.range_end = isNaN(range_end) ? Infinity : (range_end * 1000)
+
+  V8Profile.prototype.handleUnknownCode = function(
+      operation, addr, opt_stackPos) {
+    var op = Profile.Operation;
+    switch (operation) {
+      case op.MOVE:
+        printErr('Code move event for unknown code: 0x' + addr.toString(16));
+        break;
+      case op.DELETE:
+        printErr('Code delete event for unknown code: 0x' + addr.toString(16));
+        break;
+      case op.TICK:
+        // Only unknown PCs (the first frame) are reported as unaccounted,
+        // otherwise tick balance will be corrupted (this behavior is compatible
+        // with the original tickprocessor.py script.)
+        if (opt_stackPos == 0) {
+          ticks.unaccounted++;
+        }
+        break;
+    }
+  };
+
+  if (preprocessJson) {
+    this.profile_ = new JsonProfile();
+  } else {
+    this.profile_ = new V8Profile(separateIc, separateBytecodes,
+        separateBuiltins, separateStubs);
+  }
+  this.codeTypes_ = {};
+  // Count each tick as a time unit.
+  this.viewBuilder_ = new ViewBuilder(1);
+  this.lastLogFileName_ = null;
+
+  this.generation_ = 1;
+  this.currentProducerProfile_ = null;
+  this.onlySummary_ = onlySummary;
+};
+inherits(TickProcessor, LogReader);
+
+
+TickProcessor.VmStates = {
+  JS: 0,
+  GC: 1,
+  PARSER: 2,
+  BYTECODE_COMPILER: 3,
+  COMPILER: 4,
+  OTHER: 5,
+  EXTERNAL: 6,
+  IDLE: 7,
+};
+
+
+TickProcessor.CodeTypes = {
+  CPP: 0,
+  SHARED_LIB: 1
+};
+// Otherwise, this is JS-related code. We are not adding it to
+// codeTypes_ map because there can be zillions of them.
+
+
+TickProcessor.CALL_PROFILE_CUTOFF_PCT = 1.0;
+
+TickProcessor.CALL_GRAPH_SIZE = 5;
+
+/**
+ * @override
+ */
+TickProcessor.prototype.printError = function(str) {
+  printErr(str);
+};
+
+
+TickProcessor.prototype.setCodeType = function(name, type) {
+  this.codeTypes_[name] = TickProcessor.CodeTypes[type];
+};
+
+
+TickProcessor.prototype.isSharedLibrary = function(name) {
+  return this.codeTypes_[name] == TickProcessor.CodeTypes.SHARED_LIB;
+};
+
+
+TickProcessor.prototype.isCppCode = function(name) {
+  return this.codeTypes_[name] == TickProcessor.CodeTypes.CPP;
+};
+
+
+TickProcessor.prototype.isJsCode = function(name) {
+  return name !== "UNKNOWN" && !(name in this.codeTypes_);
+};
+
+
+TickProcessor.prototype.processLogFile = function(fileName) {
+  this.lastLogFileName_ = fileName;
+  var line;
+  while (line = readline()) {
+    this.processLogLine(line);
+  }
+};
+
+
+TickProcessor.prototype.processLogFileInTest = function(fileName) {
+   // Hack file name to avoid dealing with platform specifics.
+  this.lastLogFileName_ = 'v8.log';
+  var contents = readFile(fileName);
+  this.processLogChunk(contents);
+};
+
+
+TickProcessor.prototype.processSharedLibrary = function(
+    name, startAddr, endAddr, aslrSlide) {
+  var entry = this.profile_.addLibrary(name, startAddr, endAddr, aslrSlide);
+  this.setCodeType(entry.getName(), 'SHARED_LIB');
+
+  var self = this;
+  var libFuncs = this.cppEntriesProvider_.parseVmSymbols(
+      name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) {
+    self.profile_.addStaticCode(fName, fStart, fEnd);
+    self.setCodeType(fName, 'CPP');
+  });
+};
+
+
+TickProcessor.prototype.processCodeCreation = function(
+    type, kind, timestamp, start, size, name, maybe_func) {
+  if (maybe_func.length) {
+    var funcAddr = parseInt(maybe_func[0]);
+    var state = parseState(maybe_func[1]);
+    this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
+  } else {
+    this.profile_.addCode(type, name, timestamp, start, size);
+  }
+};
+
+
+TickProcessor.prototype.processCodeDeopt = function(
+    timestamp, size, code, inliningId, scriptOffset, bailoutType,
+    sourcePositionText, deoptReasonText) {
+  this.profile_.deoptCode(timestamp, code, inliningId, scriptOffset,
+      bailoutType, sourcePositionText, deoptReasonText);
+};
+
+
+TickProcessor.prototype.processCodeMove = function(from, to) {
+  this.profile_.moveCode(from, to);
+};
+
+TickProcessor.prototype.processCodeDelete = function(start) {
+  this.profile_.deleteCode(start);
+};
+
+TickProcessor.prototype.processCodeSourceInfo = function(
+    start, script, startPos, endPos, sourcePositions, inliningPositions,
+    inlinedFunctions) {
+  this.profile_.addSourcePositions(start, script, startPos,
+    endPos, sourcePositions, inliningPositions, inlinedFunctions);
+};
+
+TickProcessor.prototype.processScriptSource = function(script, url, source) {
+  this.profile_.addScriptSource(script, url, source);
+};
+
+TickProcessor.prototype.processFunctionMove = function(from, to) {
+  this.profile_.moveFunc(from, to);
+};
+
+
+TickProcessor.prototype.includeTick = function(vmState) {
+  if (this.stateFilter_ !== null) {
+    return this.stateFilter_ == vmState;
+  } else if (this.runtimeTimerFilter_ !== null) {
+    return this.currentRuntimeTimer == this.runtimeTimerFilter_;
+  }
+  return true;
+};
+
+TickProcessor.prototype.processRuntimeTimerEvent = function(name) {
+  this.currentRuntimeTimer = name;
+}
+
+TickProcessor.prototype.processTick = function(pc,
+                                               ns_since_start,
+                                               is_external_callback,
+                                               tos_or_external_callback,
+                                               vmState,
+                                               stack) {
+  this.distortion += this.distortion_per_entry;
+  ns_since_start -= this.distortion;
+  if (ns_since_start < this.range_start || ns_since_start > this.range_end) {
+    return;
+  }
+  this.ticks_.total++;
+  if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++;
+  if (!this.includeTick(vmState)) {
+    this.ticks_.excluded++;
+    return;
+  }
+  if (is_external_callback) {
+    // Don't use PC when in external callback code, as it can point
+    // inside callback's code, and we will erroneously report
+    // that a callback calls itself. Instead we use tos_or_external_callback,
+    // as simply resetting PC will produce unaccounted ticks.
+    pc = tos_or_external_callback;
+    tos_or_external_callback = 0;
+  } else if (tos_or_external_callback) {
+    // Find out, if top of stack was pointing inside a JS function
+    // meaning that we have encountered a frameless invocation.
+    var funcEntry = this.profile_.findEntry(tos_or_external_callback);
+    if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) {
+      tos_or_external_callback = 0;
+    }
+  }
+
+  this.profile_.recordTick(
+      ns_since_start, vmState,
+      this.processStack(pc, tos_or_external_callback, stack));
+};
+
+
+TickProcessor.prototype.advanceDistortion = function() {
+  this.distortion += this.distortion_per_entry;
+}
+
+
+TickProcessor.prototype.processHeapSampleBegin = function(space, state, ticks) {
+  if (space != 'Heap') return;
+  this.currentProducerProfile_ = new CallTree();
+};
+
+
+TickProcessor.prototype.processHeapSampleEnd = function(space, state) {
+  if (space != 'Heap' || !this.currentProducerProfile_) return;
+
+  print('Generation ' + this.generation_ + ':');
+  var tree = this.currentProducerProfile_;
+  tree.computeTotalWeights();
+  var producersView = this.viewBuilder_.buildView(tree);
+  // Sort by total time, desc, then by name, desc.
+  producersView.sort(function(rec1, rec2) {
+      return rec2.totalTime - rec1.totalTime ||
+          (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
+  this.printHeavyProfile(producersView.head.children);
+
+  this.currentProducerProfile_ = null;
+  this.generation_++;
+};
+
+
+TickProcessor.prototype.printStatistics = function() {
+  if (this.preprocessJson) {
+    this.profile_.writeJson();
+    return;
+  }
+
+  print('Statistical profiling result from ' + this.lastLogFileName_ +
+        ', (' + this.ticks_.total +
+        ' ticks, ' + this.ticks_.unaccounted + ' unaccounted, ' +
+        this.ticks_.excluded + ' excluded).');
+
+  if (this.ticks_.total == 0) return;
+
+  var flatProfile = this.profile_.getFlatProfile();
+  var flatView = this.viewBuilder_.buildView(flatProfile);
+  // Sort by self time, desc, then by name, desc.
+  flatView.sort(function(rec1, rec2) {
+      return rec2.selfTime - rec1.selfTime ||
+          (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
+  var totalTicks = this.ticks_.total;
+  if (this.ignoreUnknown_) {
+    totalTicks -= this.ticks_.unaccounted;
+  }
+  var printAllTicks = !this.onlySummary_;
+
+  // Count library ticks
+  var flatViewNodes = flatView.head.children;
+  var self = this;
+
+  var libraryTicks = 0;
+  if(printAllTicks) this.printHeader('Shared libraries');
+  this.printEntries(flatViewNodes, totalTicks, null,
+      function(name) { return self.isSharedLibrary(name); },
+      function(rec) { libraryTicks += rec.selfTime; }, printAllTicks);
+  var nonLibraryTicks = totalTicks - libraryTicks;
+
+  var jsTicks = 0;
+  if(printAllTicks) this.printHeader('JavaScript');
+  this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
+      function(name) { return self.isJsCode(name); },
+      function(rec) { jsTicks += rec.selfTime; }, printAllTicks);
+
+  var cppTicks = 0;
+  if(printAllTicks) this.printHeader('C++');
+  this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
+      function(name) { return self.isCppCode(name); },
+      function(rec) { cppTicks += rec.selfTime; }, printAllTicks);
+
+  this.printHeader('Summary');
+  this.printLine('JavaScript', jsTicks, totalTicks, nonLibraryTicks);
+  this.printLine('C++', cppTicks, totalTicks, nonLibraryTicks);
+  this.printLine('GC', this.ticks_.gc, totalTicks, nonLibraryTicks);
+  this.printLine('Shared libraries', libraryTicks, totalTicks, null);
+  if (!this.ignoreUnknown_ && this.ticks_.unaccounted > 0) {
+    this.printLine('Unaccounted', this.ticks_.unaccounted,
+                   this.ticks_.total, null);
+  }
+
+  if(printAllTicks) {
+    print('\n [C++ entry points]:');
+    print('   ticks    cpp   total   name');
+    var c_entry_functions = this.profile_.getCEntryProfile();
+    var total_c_entry = c_entry_functions[0].ticks;
+    for (var i = 1; i < c_entry_functions.length; i++) {
+      c = c_entry_functions[i];
+      this.printLine(c.name, c.ticks, total_c_entry, totalTicks);
+    }
+
+    this.printHeavyProfHeader();
+    var heavyProfile = this.profile_.getBottomUpProfile();
+    var heavyView = this.viewBuilder_.buildView(heavyProfile);
+    // To show the same percentages as in the flat profile.
+    heavyView.head.totalTime = totalTicks;
+    // Sort by total time, desc, then by name, desc.
+    heavyView.sort(function(rec1, rec2) {
+        return rec2.totalTime - rec1.totalTime ||
+            (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1); });
+    this.printHeavyProfile(heavyView.head.children);
+  }
+};
+
+
+function padLeft(s, len) {
+  s = s.toString();
+  if (s.length < len) {
+    var padLength = len - s.length;
+    if (!(padLength in padLeft)) {
+      padLeft[padLength] = new Array(padLength + 1).join(' ');
+    }
+    s = padLeft[padLength] + s;
+  }
+  return s;
+};
+
+
+TickProcessor.prototype.printHeader = function(headerTitle) {
+  print('\n [' + headerTitle + ']:');
+  print('   ticks  total  nonlib   name');
+};
+
+
+TickProcessor.prototype.printLine = function(
+    entry, ticks, totalTicks, nonLibTicks) {
+  var pct = ticks * 100 / totalTicks;
+  var nonLibPct = nonLibTicks != null
+      ? padLeft((ticks * 100 / nonLibTicks).toFixed(1), 5) + '%  '
+      : '        ';
+  print('  ' + padLeft(ticks, 5) + '  ' +
+        padLeft(pct.toFixed(1), 5) + '%  ' +
+        nonLibPct +
+        entry);
+}
+
+TickProcessor.prototype.printHeavyProfHeader = function() {
+  print('\n [Bottom up (heavy) profile]:');
+  print('  Note: percentage shows a share of a particular caller in the ' +
+        'total\n' +
+        '  amount of its parent calls.');
+  print('  Callers occupying less than ' +
+        TickProcessor.CALL_PROFILE_CUTOFF_PCT.toFixed(1) +
+        '% are not shown.\n');
+  print('   ticks parent  name');
+};
+
+
+TickProcessor.prototype.processProfile = function(
+    profile, filterP, func) {
+  for (var i = 0, n = profile.length; i < n; ++i) {
+    var rec = profile[i];
+    if (!filterP(rec.internalFuncName)) {
+      continue;
+    }
+    func(rec);
+  }
+};
+
+TickProcessor.prototype.getLineAndColumn = function(name) {
+  var re = /:([0-9]+):([0-9]+)$/;
+  var array = re.exec(name);
+  if (!array) {
+    return null;
+  }
+  return {line: array[1], column: array[2]};
+}
+
+TickProcessor.prototype.hasSourceMap = function() {
+  return this.sourceMap != null;
+};
+
+
+TickProcessor.prototype.formatFunctionName = function(funcName) {
+  if (!this.hasSourceMap()) {
+    return funcName;
+  }
+  var lc = this.getLineAndColumn(funcName);
+  if (lc == null) {
+    return funcName;
+  }
+  // in source maps lines and columns are zero based
+  var lineNumber = lc.line - 1;
+  var column = lc.column - 1;
+  var entry = this.sourceMap.findEntry(lineNumber, column);
+  var sourceFile = entry[2];
+  var sourceLine = entry[3] + 1;
+  var sourceColumn = entry[4] + 1;
+
+  return sourceFile + ':' + sourceLine + ':' + sourceColumn + ' -> ' + funcName;
+};
+
+TickProcessor.prototype.printEntries = function(
+    profile, totalTicks, nonLibTicks, filterP, callback, printAllTicks) {
+  var that = this;
+  this.processProfile(profile, filterP, function (rec) {
+    if (rec.selfTime == 0) return;
+    callback(rec);
+    var funcName = that.formatFunctionName(rec.internalFuncName);
+    if(printAllTicks) {
+      that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
+    }
+  });
+};
+
+
+TickProcessor.prototype.printHeavyProfile = function(profile, opt_indent) {
+  var self = this;
+  var indent = opt_indent || 0;
+  var indentStr = padLeft('', indent);
+  this.processProfile(profile, function() { return true; }, function (rec) {
+    // Cut off too infrequent callers.
+    if (rec.parentTotalPercent < TickProcessor.CALL_PROFILE_CUTOFF_PCT) return;
+    var funcName = self.formatFunctionName(rec.internalFuncName);
+    print('  ' + padLeft(rec.totalTime, 5) + '  ' +
+          padLeft(rec.parentTotalPercent.toFixed(1), 5) + '%  ' +
+          indentStr + funcName);
+    // Limit backtrace depth.
+    if (indent < 2 * self.callGraphSize_) {
+      self.printHeavyProfile(rec.children, indent + 2);
+    }
+    // Delimit top-level functions.
+    if (indent == 0) {
+      print('');
+    }
+  });
+};
+
+
+function CppEntriesProvider() {
+};
+
+
+CppEntriesProvider.prototype.parseVmSymbols = function(
+    libName, libStart, libEnd, libASLRSlide, processorFunc) {
+  this.loadSymbols(libName);
+
+  var lastUnknownSize;
+  var lastAdded;
+
+  function inRange(funcInfo, start, end) {
+    return funcInfo.start >= start && funcInfo.end <= end;
+  }
+
+  function addEntry(funcInfo) {
+    // Several functions can be mapped onto the same address. To avoid
+    // creating zero-sized entries, skip such duplicates.
+    // Also double-check that function belongs to the library address space.
+
+    if (lastUnknownSize &&
+        lastUnknownSize.start < funcInfo.start) {
+      // Try to update lastUnknownSize based on new entries start position.
+      lastUnknownSize.end = funcInfo.start;
+      if ((!lastAdded || !inRange(lastUnknownSize, lastAdded.start,
+                                  lastAdded.end)) &&
+          inRange(lastUnknownSize, libStart, libEnd)) {
+        processorFunc(lastUnknownSize.name, lastUnknownSize.start,
+                      lastUnknownSize.end);
+        lastAdded = lastUnknownSize;
+      }
+    }
+    lastUnknownSize = undefined;
+
+    if (funcInfo.end) {
+      // Skip duplicates that have the same start address as the last added.
+      if ((!lastAdded || lastAdded.start != funcInfo.start) &&
+          inRange(funcInfo, libStart, libEnd)) {
+        processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
+        lastAdded = funcInfo;
+      }
+    } else {
+      // If a funcInfo doesn't have an end, try to match it up with then next
+      // entry.
+      lastUnknownSize = funcInfo;
+    }
+  }
+
+  while (true) {
+    var funcInfo = this.parseNextLine();
+    if (funcInfo === null) {
+      continue;
+    } else if (funcInfo === false) {
+      break;
+    }
+    if (funcInfo.start < libStart - libASLRSlide &&
+        funcInfo.start < libEnd - libStart) {
+      funcInfo.start += libStart;
+    } else {
+      funcInfo.start += libASLRSlide;
+    }
+    if (funcInfo.size) {
+      funcInfo.end = funcInfo.start + funcInfo.size;
+    }
+    addEntry(funcInfo);
+  }
+  addEntry({name: '', start: libEnd});
+};
+
+
+CppEntriesProvider.prototype.loadSymbols = function(libName) {
+};
+
+
+CppEntriesProvider.prototype.parseNextLine = function() {
+  return false;
+};
+
+
+function UnixCppEntriesProvider(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
+  this.symbols = [];
+  // File offset of a symbol minus the virtual address of a symbol found in
+  // the symbol table.
+  this.fileOffsetMinusVma = 0;
+  this.parsePos = 0;
+  this.nmExec = nmExec;
+  this.objdumpExec = objdumpExec;
+  this.targetRootFS = targetRootFS;
+  this.apkEmbeddedLibrary = apkEmbeddedLibrary;
+  this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
+};
+inherits(UnixCppEntriesProvider, CppEntriesProvider);
+
+
+UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
+  this.parsePos = 0;
+  if (this.apkEmbeddedLibrary && libName.endsWith('.apk')) {
+    libName = this.apkEmbeddedLibrary;
+  }
+  if (this.targetRootFS) {
+    libName = libName.substring(libName.lastIndexOf('/') + 1);
+    libName = this.targetRootFS + libName;
+  }
+  try {
+    this.symbols = [
+      os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
+      os.system(this.nmExec, ['-C', '-n', '-S', '-D', libName], -1, -1)
+    ];
+
+    const objdumpOutput = os.system(this.objdumpExec, ['-h', libName], -1, -1);
+    for (const line of objdumpOutput.split('\n')) {
+      const [,sectionName,,vma,,fileOffset] = line.trim().split(/\s+/);
+      if (sectionName === ".text") {
+        this.fileOffsetMinusVma = parseInt(fileOffset, 16) - parseInt(vma, 16);
+      }
+    }
+  } catch (e) {
+    // If the library cannot be found on this system let's not panic.
+    this.symbols = ['', ''];
+  }
+};
+
+
+UnixCppEntriesProvider.prototype.parseNextLine = function() {
+  if (this.symbols.length == 0) {
+    return false;
+  }
+  var lineEndPos = this.symbols[0].indexOf('\n', this.parsePos);
+  if (lineEndPos == -1) {
+    this.symbols.shift();
+    this.parsePos = 0;
+    return this.parseNextLine();
+  }
+
+  var line = this.symbols[0].substring(this.parsePos, lineEndPos);
+  this.parsePos = lineEndPos + 1;
+  var fields = line.match(this.FUNC_RE);
+  var funcInfo = null;
+  if (fields) {
+    funcInfo = { name: fields[3], start: parseInt(fields[1], 16) + this.fileOffsetMinusVma };
+    if (fields[2]) {
+      funcInfo.size = parseInt(fields[2], 16);
+    }
+  }
+  return funcInfo;
+};
+
+
+function MacCppEntriesProvider(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
+  UnixCppEntriesProvider.call(this, nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary);
+  // Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
+  this.FUNC_RE = /^([0-9a-fA-F]{8,16})() (.*)$/;
+};
+inherits(MacCppEntriesProvider, UnixCppEntriesProvider);
+
+
+MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
+  this.parsePos = 0;
+  libName = this.targetRootFS + libName;
+
+  // It seems that in OS X `nm` thinks that `-f` is a format option, not a
+  // "flat" display option flag.
+  try {
+    this.symbols = [os.system(this.nmExec, ['-n', libName], -1, -1), ''];
+  } catch (e) {
+    // If the library cannot be found on this system let's not panic.
+    this.symbols = '';
+  }
+};
+
+
+function WindowsCppEntriesProvider(_ignored_nmExec, _ignored_objdumpExec, targetRootFS,
+                                   _ignored_apkEmbeddedLibrary) {
+  this.targetRootFS = targetRootFS;
+  this.symbols = '';
+  this.parsePos = 0;
+};
+inherits(WindowsCppEntriesProvider, CppEntriesProvider);
+
+
+WindowsCppEntriesProvider.FILENAME_RE = /^(.*)\.([^.]+)$/;
+
+
+WindowsCppEntriesProvider.FUNC_RE =
+    /^\s+0001:[0-9a-fA-F]{8}\s+([_\?@$0-9a-zA-Z]+)\s+([0-9a-fA-F]{8}).*$/;
+
+
+WindowsCppEntriesProvider.IMAGE_BASE_RE =
+    /^\s+0000:00000000\s+___ImageBase\s+([0-9a-fA-F]{8}).*$/;
+
+
+// This is almost a constant on Windows.
+WindowsCppEntriesProvider.EXE_IMAGE_BASE = 0x00400000;
+
+
+WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) {
+  libName = this.targetRootFS + libName;
+  var fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
+  if (!fileNameFields) return;
+  var mapFileName = fileNameFields[1] + '.map';
+  this.moduleType_ = fileNameFields[2].toLowerCase();
+  try {
+    this.symbols = read(mapFileName);
+  } catch (e) {
+    // If .map file cannot be found let's not panic.
+    this.symbols = '';
+  }
+};
+
+
+WindowsCppEntriesProvider.prototype.parseNextLine = function() {
+  var lineEndPos = this.symbols.indexOf('\r\n', this.parsePos);
+  if (lineEndPos == -1) {
+    return false;
+  }
+
+  var line = this.symbols.substring(this.parsePos, lineEndPos);
+  this.parsePos = lineEndPos + 2;
+
+  // Image base entry is above all other symbols, so we can just
+  // terminate parsing.
+  var imageBaseFields = line.match(WindowsCppEntriesProvider.IMAGE_BASE_RE);
+  if (imageBaseFields) {
+    var imageBase = parseInt(imageBaseFields[1], 16);
+    if ((this.moduleType_ == 'exe') !=
+        (imageBase == WindowsCppEntriesProvider.EXE_IMAGE_BASE)) {
+      return false;
+    }
+  }
+
+  var fields = line.match(WindowsCppEntriesProvider.FUNC_RE);
+  return fields ?
+      { name: this.unmangleName(fields[1]), start: parseInt(fields[2], 16) } :
+      null;
+};
+
+
+/**
+ * Performs very simple unmangling of C++ names.
+ *
+ * Does not handle arguments and template arguments. The mangled names have
+ * the form:
+ *
+ *   ?LookupInDescriptor@JSObject@internal@v8@@...arguments info...
+ */
+WindowsCppEntriesProvider.prototype.unmangleName = function(name) {
+  // Empty or non-mangled name.
+  if (name.length < 1 || name.charAt(0) != '?') return name;
+  var nameEndPos = name.indexOf('@@');
+  var components = name.substring(1, nameEndPos).split('@');
+  components.reverse();
+  return components.join('::');
+};
+
+
+class ArgumentsProcessor extends BaseArgumentsProcessor {
+  getArgsDispatch() {
+    let dispatch = {
+      '-j': ['stateFilter', TickProcessor.VmStates.JS,
+          'Show only ticks from JS VM state'],
+      '-g': ['stateFilter', TickProcessor.VmStates.GC,
+          'Show only ticks from GC VM state'],
+      '-p': ['stateFilter', TickProcessor.VmStates.PARSER,
+          'Show only ticks from PARSER VM state'],
+      '-b': ['stateFilter', TickProcessor.VmStates.BYTECODE_COMPILER,
+          'Show only ticks from BYTECODE_COMPILER VM state'],
+      '-c': ['stateFilter', TickProcessor.VmStates.COMPILER,
+          'Show only ticks from COMPILER VM state'],
+      '-o': ['stateFilter', TickProcessor.VmStates.OTHER,
+          'Show only ticks from OTHER VM state'],
+      '-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL,
+          'Show only ticks from EXTERNAL VM state'],
+      '--filter-runtime-timer': ['runtimeTimerFilter', null,
+              'Show only ticks matching the given runtime timer scope'],
+      '--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE,
+          'Set the call graph size'],
+      '--ignore-unknown': ['ignoreUnknown', true,
+          'Exclude ticks of unknown code entries from processing'],
+      '--separate-ic': ['separateIc', parseBool,
+          'Separate IC entries'],
+      '--separate-bytecodes': ['separateBytecodes', parseBool,
+          'Separate Bytecode entries'],
+      '--separate-builtins': ['separateBuiltins', parseBool,
+          'Separate Builtin entries'],
+      '--separate-stubs': ['separateStubs', parseBool,
+          'Separate Stub entries'],
+      '--unix': ['platform', 'unix',
+          'Specify that we are running on *nix platform'],
+      '--windows': ['platform', 'windows',
+          'Specify that we are running on Windows platform'],
+      '--mac': ['platform', 'mac',
+          'Specify that we are running on Mac OS X platform'],
+      '--nm': ['nm', 'nm',
+          'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
+      '--objdump': ['objdump', 'objdump',
+          'Specify the \'objdump\' executable to use (e.g. --objdump=/my_dir/objdump)'],
+      '--target': ['targetRootFS', '',
+          'Specify the target root directory for cross environment'],
+      '--apk-embedded-library': ['apkEmbeddedLibrary', '',
+          'Specify the path of the embedded library for Android traces'],
+      '--range': ['range', 'auto,auto',
+          'Specify the range limit as [start],[end]'],
+      '--distortion': ['distortion', 0,
+          'Specify the logging overhead in picoseconds'],
+      '--source-map': ['sourceMap', null,
+          'Specify the source map that should be used for output'],
+      '--timed-range': ['timedRange', true,
+          'Ignore ticks before first and after last Date.now() call'],
+      '--pairwise-timed-range': ['pairwiseTimedRange', true,
+          'Ignore ticks outside pairs of Date.now() calls'],
+      '--only-summary': ['onlySummary', true,
+          'Print only tick summary, exclude other information'],
+      '--preprocess': ['preprocessJson', true,
+          'Preprocess for consumption with web interface']
+    };
+    dispatch['--js'] = dispatch['-j'];
+    dispatch['--gc'] = dispatch['-g'];
+    dispatch['--compiler'] = dispatch['-c'];
+    dispatch['--other'] = dispatch['-o'];
+    dispatch['--external'] = dispatch['-e'];
+    dispatch['--ptr'] = dispatch['--pairwise-timed-range'];
+    return dispatch;
+  }
+
+  getDefaultResults() {
+    return {
+      logFileName: 'v8.log',
+      platform: 'unix',
+      stateFilter: null,
+      callGraphSize: 5,
+      ignoreUnknown: false,
+      separateIc: true,
+      separateBytecodes: false,
+      separateBuiltins: true,
+      separateStubs: true,
+      preprocessJson: null,
+      targetRootFS: '',
+      nm: 'nm',
+      objdump: 'objdump',
+      range: 'auto,auto',
+      distortion: 0,
+      timedRange: false,
+      pairwiseTimedRange: false,
+      onlySummary: false,
+      runtimeTimerFilter: null,
+    };
+  }
+}
diff --git a/src/third_party/v8/tools/tickprocessor.mjs b/src/third_party/v8/tools/tickprocessor.mjs
new file mode 100644
index 0000000..5b746d9
--- /dev/null
+++ b/src/third_party/v8/tools/tickprocessor.mjs
@@ -0,0 +1,980 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import { LogReader, parseString, parseVarArgs } from "./logreader.mjs";
+import { BaseArgumentsProcessor, parseBool } from "./arguments.mjs";
+import { Profile, JsonProfile } from "./profile.mjs";
+import { ViewBuilder } from "./profile_view.mjs";
+
+
+export function inherits(childCtor, parentCtor) {
+  childCtor.prototype.__proto__ = parentCtor.prototype;
+};
+
+
+class V8Profile extends Profile {
+  static IC_RE =
+      /^(LoadGlobalIC: )|(Handler: )|(?:CallIC|LoadIC|StoreIC)|(?:Builtin: (?:Keyed)?(?:Load|Store)IC_)/;
+  static BYTECODES_RE = /^(BytecodeHandler: )/;
+  static BUILTINS_RE = /^(Builtin: )/;
+  static STUBS_RE = /^(Stub: )/;
+
+  constructor(separateIc, separateBytecodes, separateBuiltins, separateStubs) {
+    super();
+    const regexps = [];
+    if (!separateIc) regexps.push(V8Profile.IC_RE);
+    if (!separateBytecodes) regexps.push(V8Profile.BYTECODES_RE);
+    if (!separateBuiltins) regexps.push(V8Profile.BUILTINS_RE);
+    if (!separateStubs) regexps.push(V8Profile.STUBS_RE);
+    if (regexps.length > 0) {
+      this.skipThisFunction = function(name) {
+        for (let i=0; i<regexps.length; i++) {
+          if (regexps[i].test(name)) return true;
+        }
+        return false;
+      };
+    }
+  }
+}
+
+
+/**
+ * A thin wrapper around shell's 'read' function showing a file name on error.
+ */
+export function readFile(fileName) {
+  try {
+    return read(fileName);
+  } catch (e) {
+    printErr(fileName + ': ' + (e.message || e));
+    throw e;
+  }
+}
+
+
+/**
+ * Parser for dynamic code optimization state.
+ */
+function parseState(s) {
+  switch (s) {
+  case "": return Profile.CodeState.COMPILED;
+  case "~": return Profile.CodeState.OPTIMIZABLE;
+  case "*": return Profile.CodeState.OPTIMIZED;
+  }
+  throw new Error(`unknown code state: ${s}`);
+}
+
+
+export function TickProcessor(
+    cppEntriesProvider,
+    separateIc,
+    separateBytecodes,
+    separateBuiltins,
+    separateStubs,
+    callGraphSize,
+    ignoreUnknown,
+    stateFilter,
+    distortion,
+    range,
+    sourceMap,
+    timedRange,
+    pairwiseTimedRange,
+    onlySummary,
+    runtimeTimerFilter,
+    preprocessJson) {
+  this.preprocessJson = preprocessJson;
+  LogReader.call(this, {
+      'shared-library': { parsers: [parseString, parseInt, parseInt, parseInt],
+          processor: this.processSharedLibrary },
+      'code-creation': {
+          parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
+                    parseString, parseVarArgs],
+          processor: this.processCodeCreation },
+      'code-deopt': {
+          parsers: [parseInt, parseInt, parseInt, parseInt, parseInt,
+                    parseString, parseString, parseString],
+          processor: this.processCodeDeopt },
+      'code-move': { parsers: [parseInt, parseInt, ],
+          processor: this.processCodeMove },
+      'code-delete': { parsers: [parseInt],
+          processor: this.processCodeDelete },
+      'code-source-info': {
+          parsers: [parseInt, parseInt, parseInt, parseInt, parseString,
+                    parseString, parseString],
+          processor: this.processCodeSourceInfo },
+      'script-source': {
+          parsers: [parseInt, parseString, parseString],
+          processor: this.processScriptSource },
+      'sfi-move': { parsers: [parseInt, parseInt],
+          processor: this.processFunctionMove },
+      'active-runtime-timer': {
+        parsers: [parseString],
+        processor: this.processRuntimeTimerEvent },
+      'tick': {
+          parsers: [parseInt, parseInt, parseInt,
+                    parseInt, parseInt, parseVarArgs],
+          processor: this.processTick },
+      'heap-sample-begin': { parsers: [parseString, parseString, parseInt],
+          processor: this.processHeapSampleBegin },
+      'heap-sample-end': { parsers: [parseString, parseString],
+          processor: this.processHeapSampleEnd },
+      'timer-event-start' : { parsers: [parseString, parseString, parseString],
+                              processor: this.advanceDistortion },
+      'timer-event-end' : { parsers: [parseString, parseString, parseString],
+                            processor: this.advanceDistortion },
+      // Ignored events.
+      'profiler': null,
+      'function-creation': null,
+      'function-move': null,
+      'function-delete': null,
+      'heap-sample-item': null,
+      'current-time': null,  // Handled specially, not parsed.
+      // Obsolete row types.
+      'code-allocate': null,
+      'begin-code-region': null,
+      'end-code-region': null },
+      timedRange,
+      pairwiseTimedRange);
+
+  this.cppEntriesProvider_ = cppEntriesProvider;
+  this.callGraphSize_ = callGraphSize;
+  this.ignoreUnknown_ = ignoreUnknown;
+  this.stateFilter_ = stateFilter;
+  this.runtimeTimerFilter_ = runtimeTimerFilter;
+  this.sourceMap = sourceMap;
+  const ticks = this.ticks_ =
+    { total: 0, unaccounted: 0, excluded: 0, gc: 0 };
+
+  distortion = parseInt(distortion);
+  // Convert picoseconds to nanoseconds.
+  this.distortion_per_entry = isNaN(distortion) ? 0 : (distortion / 1000);
+  this.distortion = 0;
+  const rangelimits = range ? range.split(",") : [];
+  const range_start = parseInt(rangelimits[0]);
+  const range_end = parseInt(rangelimits[1]);
+  // Convert milliseconds to nanoseconds.
+  this.range_start = isNaN(range_start) ? -Infinity : (range_start * 1000);
+  this.range_end = isNaN(range_end) ? Infinity : (range_end * 1000)
+
+  V8Profile.prototype.handleUnknownCode = function(
+      operation, addr, opt_stackPos) {
+    const op = Profile.Operation;
+    switch (operation) {
+      case op.MOVE:
+        printErr(`Code move event for unknown code: 0x${addr.toString(16)}`);
+        break;
+      case op.DELETE:
+        printErr(`Code delete event for unknown code: 0x${addr.toString(16)}`);
+        break;
+      case op.TICK:
+        // Only unknown PCs (the first frame) are reported as unaccounted,
+        // otherwise tick balance will be corrupted (this behavior is compatible
+        // with the original tickprocessor.py script.)
+        if (opt_stackPos == 0) {
+          ticks.unaccounted++;
+        }
+        break;
+    }
+  };
+
+  if (preprocessJson) {
+    this.profile_ = new JsonProfile();
+  } else {
+    this.profile_ = new V8Profile(separateIc, separateBytecodes,
+        separateBuiltins, separateStubs);
+  }
+  this.codeTypes_ = {};
+  // Count each tick as a time unit.
+  this.viewBuilder_ = new ViewBuilder(1);
+  this.lastLogFileName_ = null;
+
+  this.generation_ = 1;
+  this.currentProducerProfile_ = null;
+  this.onlySummary_ = onlySummary;
+};
+inherits(TickProcessor, LogReader);
+
+
+TickProcessor.VmStates = {
+  JS: 0,
+  GC: 1,
+  PARSER: 2,
+  BYTECODE_COMPILER: 3,
+  COMPILER: 4,
+  OTHER: 5,
+  EXTERNAL: 6,
+  IDLE: 7,
+};
+
+
+TickProcessor.CodeTypes = {
+  CPP: 0,
+  SHARED_LIB: 1
+};
+// Otherwise, this is JS-related code. We are not adding it to
+// codeTypes_ map because there can be zillions of them.
+
+
+TickProcessor.CALL_PROFILE_CUTOFF_PCT = 1.0;
+
+TickProcessor.CALL_GRAPH_SIZE = 5;
+
+/**
+ * @override
+ */
+TickProcessor.prototype.printError = function(str) {
+  printErr(str);
+};
+
+
+TickProcessor.prototype.setCodeType = function(name, type) {
+  this.codeTypes_[name] = TickProcessor.CodeTypes[type];
+};
+
+
+TickProcessor.prototype.isSharedLibrary = function(name) {
+  return this.codeTypes_[name] == TickProcessor.CodeTypes.SHARED_LIB;
+};
+
+
+TickProcessor.prototype.isCppCode = function(name) {
+  return this.codeTypes_[name] == TickProcessor.CodeTypes.CPP;
+};
+
+
+TickProcessor.prototype.isJsCode = function(name) {
+  return name !== "UNKNOWN" && !(name in this.codeTypes_);
+};
+
+
+TickProcessor.prototype.processLogFile = function(fileName) {
+  this.lastLogFileName_ = fileName;
+  let line;
+  while (line = readline()) {
+    this.processLogLine(line);
+  }
+};
+
+
+TickProcessor.prototype.processLogFileInTest = function(fileName) {
+   // Hack file name to avoid dealing with platform specifics.
+  this.lastLogFileName_ = 'v8.log';
+  const contents = readFile(fileName);
+  this.processLogChunk(contents);
+};
+
+
+TickProcessor.prototype.processSharedLibrary = function(
+    name, startAddr, endAddr, aslrSlide) {
+  const entry = this.profile_.addLibrary(name, startAddr, endAddr, aslrSlide);
+  this.setCodeType(entry.getName(), 'SHARED_LIB');
+
+  const self = this;
+  const libFuncs = this.cppEntriesProvider_.parseVmSymbols(
+      name, startAddr, endAddr, aslrSlide, function(fName, fStart, fEnd) {
+    self.profile_.addStaticCode(fName, fStart, fEnd);
+    self.setCodeType(fName, 'CPP');
+  });
+};
+
+
+TickProcessor.prototype.processCodeCreation = function(
+    type, kind, timestamp, start, size, name, maybe_func) {
+  if (maybe_func.length) {
+    const funcAddr = parseInt(maybe_func[0]);
+    const state = parseState(maybe_func[1]);
+    this.profile_.addFuncCode(type, name, timestamp, start, size, funcAddr, state);
+  } else {
+    this.profile_.addCode(type, name, timestamp, start, size);
+  }
+};
+
+
+TickProcessor.prototype.processCodeDeopt = function(
+    timestamp, size, code, inliningId, scriptOffset, bailoutType,
+    sourcePositionText, deoptReasonText) {
+  this.profile_.deoptCode(timestamp, code, inliningId, scriptOffset,
+      bailoutType, sourcePositionText, deoptReasonText);
+};
+
+
+TickProcessor.prototype.processCodeMove = function(from, to) {
+  this.profile_.moveCode(from, to);
+};
+
+TickProcessor.prototype.processCodeDelete = function(start) {
+  this.profile_.deleteCode(start);
+};
+
+TickProcessor.prototype.processCodeSourceInfo = function(
+    start, script, startPos, endPos, sourcePositions, inliningPositions,
+    inlinedFunctions) {
+  this.profile_.addSourcePositions(start, script, startPos,
+    endPos, sourcePositions, inliningPositions, inlinedFunctions);
+};
+
+TickProcessor.prototype.processScriptSource = function(script, url, source) {
+  this.profile_.addScriptSource(script, url, source);
+};
+
+TickProcessor.prototype.processFunctionMove = function(from, to) {
+  this.profile_.moveFunc(from, to);
+};
+
+
+TickProcessor.prototype.includeTick = function(vmState) {
+  if (this.stateFilter_ !== null) {
+    return this.stateFilter_ == vmState;
+  } else if (this.runtimeTimerFilter_ !== null) {
+    return this.currentRuntimeTimer == this.runtimeTimerFilter_;
+  }
+  return true;
+};
+
+TickProcessor.prototype.processRuntimeTimerEvent = function(name) {
+  this.currentRuntimeTimer = name;
+}
+
+TickProcessor.prototype.processTick = function(pc,
+                                               ns_since_start,
+                                               is_external_callback,
+                                               tos_or_external_callback,
+                                               vmState,
+                                               stack) {
+  this.distortion += this.distortion_per_entry;
+  ns_since_start -= this.distortion;
+  if (ns_since_start < this.range_start || ns_since_start > this.range_end) {
+    return;
+  }
+  this.ticks_.total++;
+  if (vmState == TickProcessor.VmStates.GC) this.ticks_.gc++;
+  if (!this.includeTick(vmState)) {
+    this.ticks_.excluded++;
+    return;
+  }
+  if (is_external_callback) {
+    // Don't use PC when in external callback code, as it can point
+    // inside callback's code, and we will erroneously report
+    // that a callback calls itself. Instead we use tos_or_external_callback,
+    // as simply resetting PC will produce unaccounted ticks.
+    pc = tos_or_external_callback;
+    tos_or_external_callback = 0;
+  } else if (tos_or_external_callback) {
+    // Find out, if top of stack was pointing inside a JS function
+    // meaning that we have encountered a frameless invocation.
+    const funcEntry = this.profile_.findEntry(tos_or_external_callback);
+    if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) {
+      tos_or_external_callback = 0;
+    }
+  }
+
+  this.profile_.recordTick(
+      ns_since_start, vmState,
+      this.processStack(pc, tos_or_external_callback, stack));
+};
+
+
+TickProcessor.prototype.advanceDistortion = function() {
+  this.distortion += this.distortion_per_entry;
+}
+
+
+TickProcessor.prototype.processHeapSampleBegin = function(space, state, ticks) {
+  if (space != 'Heap') return;
+  this.currentProducerProfile_ = new CallTree();
+};
+
+
+TickProcessor.prototype.processHeapSampleEnd = function(space, state) {
+  if (space != 'Heap' || !this.currentProducerProfile_) return;
+
+  print(`Generation ${this.generation_}:`);
+  const tree = this.currentProducerProfile_;
+  tree.computeTotalWeights();
+  const producersView = this.viewBuilder_.buildView(tree);
+  // Sort by total time, desc, then by name, desc.
+  producersView.sort((rec1, rec2) =>
+      rec2.totalTime - rec1.totalTime ||
+          (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1) );
+  this.printHeavyProfile(producersView.head.children);
+
+  this.currentProducerProfile_ = null;
+  this.generation_++;
+};
+
+
+TickProcessor.prototype.printStatistics = function() {
+  if (this.preprocessJson) {
+    this.profile_.writeJson();
+    return;
+  }
+
+  print(`Statistical profiling result from ${this.lastLogFileName_}` +
+        ', (' + this.ticks_.total +
+        ' ticks, ' + this.ticks_.unaccounted + ' unaccounted, ' +
+        this.ticks_.excluded + ' excluded).');
+
+  if (this.ticks_.total == 0) return;
+
+  const flatProfile = this.profile_.getFlatProfile();
+  const flatView = this.viewBuilder_.buildView(flatProfile);
+  // Sort by self time, desc, then by name, desc.
+  flatView.sort((rec1, rec2) =>
+      rec2.selfTime - rec1.selfTime ||
+          (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1) );
+  let totalTicks = this.ticks_.total;
+  if (this.ignoreUnknown_) {
+    totalTicks -= this.ticks_.unaccounted;
+  }
+  const printAllTicks = !this.onlySummary_;
+
+  // Count library ticks
+  const flatViewNodes = flatView.head.children;
+  const self = this;
+
+  let libraryTicks = 0;
+  if(printAllTicks) this.printHeader('Shared libraries');
+  this.printEntries(flatViewNodes, totalTicks, null,
+      name => self.isSharedLibrary(name),
+      function(rec) { libraryTicks += rec.selfTime; }, printAllTicks);
+  const nonLibraryTicks = totalTicks - libraryTicks;
+
+  let jsTicks = 0;
+  if(printAllTicks) this.printHeader('JavaScript');
+  this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
+      name => self.isJsCode(name),
+      function(rec) { jsTicks += rec.selfTime; }, printAllTicks);
+
+  let cppTicks = 0;
+  if(printAllTicks) this.printHeader('C++');
+  this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
+      name => self.isCppCode(name),
+      function(rec) { cppTicks += rec.selfTime; }, printAllTicks);
+
+  this.printHeader('Summary');
+  this.printLine('JavaScript', jsTicks, totalTicks, nonLibraryTicks);
+  this.printLine('C++', cppTicks, totalTicks, nonLibraryTicks);
+  this.printLine('GC', this.ticks_.gc, totalTicks, nonLibraryTicks);
+  this.printLine('Shared libraries', libraryTicks, totalTicks, null);
+  if (!this.ignoreUnknown_ && this.ticks_.unaccounted > 0) {
+    this.printLine('Unaccounted', this.ticks_.unaccounted,
+                   this.ticks_.total, null);
+  }
+
+  if(printAllTicks) {
+    print('\n [C++ entry points]:');
+    print('   ticks    cpp   total   name');
+    const c_entry_functions = this.profile_.getCEntryProfile();
+    const total_c_entry = c_entry_functions[0].ticks;
+    for (let i = 1; i < c_entry_functions.length; i++) {
+      const c = c_entry_functions[i];
+      this.printLine(c.name, c.ticks, total_c_entry, totalTicks);
+    }
+
+    this.printHeavyProfHeader();
+    const heavyProfile = this.profile_.getBottomUpProfile();
+    const heavyView = this.viewBuilder_.buildView(heavyProfile);
+    // To show the same percentages as in the flat profile.
+    heavyView.head.totalTime = totalTicks;
+    // Sort by total time, desc, then by name, desc.
+    heavyView.sort((rec1, rec2) =>
+        rec2.totalTime - rec1.totalTime ||
+            (rec2.internalFuncName < rec1.internalFuncName ? -1 : 1) );
+    this.printHeavyProfile(heavyView.head.children);
+  }
+};
+
+
+function padLeft(s, len) {
+  s = s.toString();
+  if (s.length < len) {
+    const padLength = len - s.length;
+    if (!(padLength in padLeft)) {
+      padLeft[padLength] = new Array(padLength + 1).join(' ');
+    }
+    s = padLeft[padLength] + s;
+  }
+  return s;
+};
+
+
+TickProcessor.prototype.printHeader = function(headerTitle) {
+  print(`\n [${headerTitle}]:`);
+  print('   ticks  total  nonlib   name');
+};
+
+
+TickProcessor.prototype.printLine = function(
+    entry, ticks, totalTicks, nonLibTicks) {
+  const pct = ticks * 100 / totalTicks;
+  const nonLibPct = nonLibTicks != null
+      ? padLeft((ticks * 100 / nonLibTicks).toFixed(1), 5) + '%  '
+      : '        ';
+  print(`  ${padLeft(ticks, 5)}  ` +
+        padLeft(pct.toFixed(1), 5) + '%  ' +
+        nonLibPct +
+        entry);
+}
+
+TickProcessor.prototype.printHeavyProfHeader = function() {
+  print('\n [Bottom up (heavy) profile]:');
+  print('  Note: percentage shows a share of a particular caller in the ' +
+        'total\n' +
+        '  amount of its parent calls.');
+  print('  Callers occupying less than ' +
+        TickProcessor.CALL_PROFILE_CUTOFF_PCT.toFixed(1) +
+        '% are not shown.\n');
+  print('   ticks parent  name');
+};
+
+
+TickProcessor.prototype.processProfile = function(
+    profile, filterP, func) {
+  for (let i = 0, n = profile.length; i < n; ++i) {
+    const rec = profile[i];
+    if (!filterP(rec.internalFuncName)) {
+      continue;
+    }
+    func(rec);
+  }
+};
+
+TickProcessor.prototype.getLineAndColumn = function(name) {
+  const re = /:([0-9]+):([0-9]+)$/;
+  const array = re.exec(name);
+  if (!array) {
+    return null;
+  }
+  return {line: array[1], column: array[2]};
+}
+
+TickProcessor.prototype.hasSourceMap = function() {
+  return this.sourceMap != null;
+};
+
+
+TickProcessor.prototype.formatFunctionName = function(funcName) {
+  if (!this.hasSourceMap()) {
+    return funcName;
+  }
+  const lc = this.getLineAndColumn(funcName);
+  if (lc == null) {
+    return funcName;
+  }
+  // in source maps lines and columns are zero based
+  const lineNumber = lc.line - 1;
+  const column = lc.column - 1;
+  const entry = this.sourceMap.findEntry(lineNumber, column);
+  const sourceFile = entry[2];
+  const sourceLine = entry[3] + 1;
+  const sourceColumn = entry[4] + 1;
+
+  return sourceFile + ':' + sourceLine + ':' + sourceColumn + ' -> ' + funcName;
+};
+
+TickProcessor.prototype.printEntries = function(
+    profile, totalTicks, nonLibTicks, filterP, callback, printAllTicks) {
+  const that = this;
+  this.processProfile(profile, filterP, function (rec) {
+    if (rec.selfTime == 0) return;
+    callback(rec);
+    const funcName = that.formatFunctionName(rec.internalFuncName);
+    if(printAllTicks) {
+      that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
+    }
+  });
+};
+
+
+TickProcessor.prototype.printHeavyProfile = function(profile, opt_indent) {
+  const self = this;
+  const indent = opt_indent || 0;
+  const indentStr = padLeft('', indent);
+  this.processProfile(profile, () => true, function (rec) {
+    // Cut off too infrequent callers.
+    if (rec.parentTotalPercent < TickProcessor.CALL_PROFILE_CUTOFF_PCT) return;
+    const funcName = self.formatFunctionName(rec.internalFuncName);
+    print(`  ${padLeft(rec.totalTime, 5)}  ` +
+          padLeft(rec.parentTotalPercent.toFixed(1), 5) + '%  ' +
+          indentStr + funcName);
+    // Limit backtrace depth.
+    if (indent < 2 * self.callGraphSize_) {
+      self.printHeavyProfile(rec.children, indent + 2);
+    }
+    // Delimit top-level functions.
+    if (indent == 0) {
+      print('');
+    }
+  });
+};
+
+
+function CppEntriesProvider() {
+};
+
+
+CppEntriesProvider.prototype.parseVmSymbols = function(
+    libName, libStart, libEnd, libASLRSlide, processorFunc) {
+  this.loadSymbols(libName);
+
+  let lastUnknownSize;
+  let lastAdded;
+
+  function inRange(funcInfo, start, end) {
+    return funcInfo.start >= start && funcInfo.end <= end;
+  }
+
+  function addEntry(funcInfo) {
+    // Several functions can be mapped onto the same address. To avoid
+    // creating zero-sized entries, skip such duplicates.
+    // Also double-check that function belongs to the library address space.
+
+    if (lastUnknownSize &&
+        lastUnknownSize.start < funcInfo.start) {
+      // Try to update lastUnknownSize based on new entries start position.
+      lastUnknownSize.end = funcInfo.start;
+      if ((!lastAdded || !inRange(lastUnknownSize, lastAdded.start,
+                                  lastAdded.end)) &&
+          inRange(lastUnknownSize, libStart, libEnd)) {
+        processorFunc(lastUnknownSize.name, lastUnknownSize.start,
+                      lastUnknownSize.end);
+        lastAdded = lastUnknownSize;
+      }
+    }
+    lastUnknownSize = undefined;
+
+    if (funcInfo.end) {
+      // Skip duplicates that have the same start address as the last added.
+      if ((!lastAdded || lastAdded.start != funcInfo.start) &&
+          inRange(funcInfo, libStart, libEnd)) {
+        processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
+        lastAdded = funcInfo;
+      }
+    } else {
+      // If a funcInfo doesn't have an end, try to match it up with then next
+      // entry.
+      lastUnknownSize = funcInfo;
+    }
+  }
+
+  while (true) {
+    const funcInfo = this.parseNextLine();
+    if (funcInfo === null) {
+      continue;
+    } else if (funcInfo === false) {
+      break;
+    }
+    if (funcInfo.start < libStart - libASLRSlide &&
+        funcInfo.start < libEnd - libStart) {
+      funcInfo.start += libStart;
+    } else {
+      funcInfo.start += libASLRSlide;
+    }
+    if (funcInfo.size) {
+      funcInfo.end = funcInfo.start + funcInfo.size;
+    }
+    addEntry(funcInfo);
+  }
+  addEntry({name: '', start: libEnd});
+};
+
+
+CppEntriesProvider.prototype.loadSymbols = function(libName) {
+};
+
+
+CppEntriesProvider.prototype.parseNextLine = () => false;
+
+
+export function UnixCppEntriesProvider(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
+  this.symbols = [];
+  // File offset of a symbol minus the virtual address of a symbol found in
+  // the symbol table.
+  this.fileOffsetMinusVma = 0;
+  this.parsePos = 0;
+  this.nmExec = nmExec;
+  this.objdumpExec = objdumpExec;
+  this.targetRootFS = targetRootFS;
+  this.apkEmbeddedLibrary = apkEmbeddedLibrary;
+  this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
+};
+inherits(UnixCppEntriesProvider, CppEntriesProvider);
+
+
+UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
+  this.parsePos = 0;
+  if (this.apkEmbeddedLibrary && libName.endsWith('.apk')) {
+    libName = this.apkEmbeddedLibrary;
+  }
+  if (this.targetRootFS) {
+    libName = libName.substring(libName.lastIndexOf('/') + 1);
+    libName = this.targetRootFS + libName;
+  }
+  try {
+    this.symbols = [
+      os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
+      os.system(this.nmExec, ['-C', '-n', '-S', '-D', libName], -1, -1)
+    ];
+
+    const objdumpOutput = os.system(this.objdumpExec, ['-h', libName], -1, -1);
+    for (const line of objdumpOutput.split('\n')) {
+      const [,sectionName,,vma,,fileOffset] = line.trim().split(/\s+/);
+      if (sectionName === ".text") {
+        this.fileOffsetMinusVma = parseInt(fileOffset, 16) - parseInt(vma, 16);
+      }
+    }
+  } catch (e) {
+    // If the library cannot be found on this system let's not panic.
+    this.symbols = ['', ''];
+  }
+};
+
+
+UnixCppEntriesProvider.prototype.parseNextLine = function() {
+  if (this.symbols.length == 0) {
+    return false;
+  }
+  const lineEndPos = this.symbols[0].indexOf('\n', this.parsePos);
+  if (lineEndPos == -1) {
+    this.symbols.shift();
+    this.parsePos = 0;
+    return this.parseNextLine();
+  }
+
+  const line = this.symbols[0].substring(this.parsePos, lineEndPos);
+  this.parsePos = lineEndPos + 1;
+  const fields = line.match(this.FUNC_RE);
+  let funcInfo = null;
+  if (fields) {
+    funcInfo = { name: fields[3], start: parseInt(fields[1], 16) + this.fileOffsetMinusVma };
+    if (fields[2]) {
+      funcInfo.size = parseInt(fields[2], 16);
+    }
+  }
+  return funcInfo;
+};
+
+
+export function MacCppEntriesProvider(nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary) {
+  UnixCppEntriesProvider.call(this, nmExec, objdumpExec, targetRootFS, apkEmbeddedLibrary);
+  // Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
+  this.FUNC_RE = /^([0-9a-fA-F]{8,16})() (.*)$/;
+};
+inherits(MacCppEntriesProvider, UnixCppEntriesProvider);
+
+
+MacCppEntriesProvider.prototype.loadSymbols = function(libName) {
+  this.parsePos = 0;
+  libName = this.targetRootFS + libName;
+
+  // It seems that in OS X `nm` thinks that `-f` is a format option, not a
+  // "flat" display option flag.
+  try {
+    this.symbols = [os.system(this.nmExec, ['-n', libName], -1, -1), ''];
+  } catch (e) {
+    // If the library cannot be found on this system let's not panic.
+    this.symbols = '';
+  }
+};
+
+
+export function WindowsCppEntriesProvider(_ignored_nmExec, _ignored_objdumpExec, targetRootFS,
+                                   _ignored_apkEmbeddedLibrary) {
+  this.targetRootFS = targetRootFS;
+  this.symbols = '';
+  this.parsePos = 0;
+};
+inherits(WindowsCppEntriesProvider, CppEntriesProvider);
+
+
+WindowsCppEntriesProvider.FILENAME_RE = /^(.*)\.([^.]+)$/;
+
+
+WindowsCppEntriesProvider.FUNC_RE =
+    /^\s+0001:[0-9a-fA-F]{8}\s+([_\?@$0-9a-zA-Z]+)\s+([0-9a-fA-F]{8}).*$/;
+
+
+WindowsCppEntriesProvider.IMAGE_BASE_RE =
+    /^\s+0000:00000000\s+___ImageBase\s+([0-9a-fA-F]{8}).*$/;
+
+
+// This is almost a constant on Windows.
+WindowsCppEntriesProvider.EXE_IMAGE_BASE = 0x00400000;
+
+
+WindowsCppEntriesProvider.prototype.loadSymbols = function(libName) {
+  libName = this.targetRootFS + libName;
+  const fileNameFields = libName.match(WindowsCppEntriesProvider.FILENAME_RE);
+  if (!fileNameFields) return;
+  const mapFileName = fileNameFields[1] + '.map';
+  this.moduleType_ = fileNameFields[2].toLowerCase();
+  try {
+    this.symbols = read(mapFileName);
+  } catch (e) {
+    // If .map file cannot be found let's not panic.
+    this.symbols = '';
+  }
+};
+
+
+WindowsCppEntriesProvider.prototype.parseNextLine = function() {
+  const lineEndPos = this.symbols.indexOf('\r\n', this.parsePos);
+  if (lineEndPos == -1) {
+    return false;
+  }
+
+  const line = this.symbols.substring(this.parsePos, lineEndPos);
+  this.parsePos = lineEndPos + 2;
+
+  // Image base entry is above all other symbols, so we can just
+  // terminate parsing.
+  const imageBaseFields = line.match(WindowsCppEntriesProvider.IMAGE_BASE_RE);
+  if (imageBaseFields) {
+    const imageBase = parseInt(imageBaseFields[1], 16);
+    if ((this.moduleType_ == 'exe') !=
+        (imageBase == WindowsCppEntriesProvider.EXE_IMAGE_BASE)) {
+      return false;
+    }
+  }
+
+  const fields = line.match(WindowsCppEntriesProvider.FUNC_RE);
+  return fields ?
+      { name: this.unmangleName(fields[1]), start: parseInt(fields[2], 16) } :
+      null;
+};
+
+
+/**
+ * Performs very simple unmangling of C++ names.
+ *
+ * Does not handle arguments and template arguments. The mangled names have
+ * the form:
+ *
+ *   ?LookupInDescriptor@JSObject@internal@v8@@...arguments info...
+ */
+WindowsCppEntriesProvider.prototype.unmangleName = function(name) {
+  // Empty or non-mangled name.
+  if (name.length < 1 || name.charAt(0) != '?') return name;
+  const nameEndPos = name.indexOf('@@');
+  const components = name.substring(1, nameEndPos).split('@');
+  components.reverse();
+  return components.join('::');
+};
+
+
+export class ArgumentsProcessor extends BaseArgumentsProcessor {
+  getArgsDispatch() {
+    let dispatch = {
+      '-j': ['stateFilter', TickProcessor.VmStates.JS,
+          'Show only ticks from JS VM state'],
+      '-g': ['stateFilter', TickProcessor.VmStates.GC,
+          'Show only ticks from GC VM state'],
+      '-p': ['stateFilter', TickProcessor.VmStates.PARSER,
+          'Show only ticks from PARSER VM state'],
+      '-b': ['stateFilter', TickProcessor.VmStates.BYTECODE_COMPILER,
+          'Show only ticks from BYTECODE_COMPILER VM state'],
+      '-c': ['stateFilter', TickProcessor.VmStates.COMPILER,
+          'Show only ticks from COMPILER VM state'],
+      '-o': ['stateFilter', TickProcessor.VmStates.OTHER,
+          'Show only ticks from OTHER VM state'],
+      '-e': ['stateFilter', TickProcessor.VmStates.EXTERNAL,
+          'Show only ticks from EXTERNAL VM state'],
+      '--filter-runtime-timer': ['runtimeTimerFilter', null,
+              'Show only ticks matching the given runtime timer scope'],
+      '--call-graph-size': ['callGraphSize', TickProcessor.CALL_GRAPH_SIZE,
+          'Set the call graph size'],
+      '--ignore-unknown': ['ignoreUnknown', true,
+          'Exclude ticks of unknown code entries from processing'],
+      '--separate-ic': ['separateIc', parseBool,
+          'Separate IC entries'],
+      '--separate-bytecodes': ['separateBytecodes', parseBool,
+          'Separate Bytecode entries'],
+      '--separate-builtins': ['separateBuiltins', parseBool,
+          'Separate Builtin entries'],
+      '--separate-stubs': ['separateStubs', parseBool,
+          'Separate Stub entries'],
+      '--unix': ['platform', 'unix',
+          'Specify that we are running on *nix platform'],
+      '--windows': ['platform', 'windows',
+          'Specify that we are running on Windows platform'],
+      '--mac': ['platform', 'mac',
+          'Specify that we are running on Mac OS X platform'],
+      '--nm': ['nm', 'nm',
+          'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
+      '--objdump': ['objdump', 'objdump',
+          'Specify the \'objdump\' executable to use (e.g. --objdump=/my_dir/objdump)'],
+      '--target': ['targetRootFS', '',
+          'Specify the target root directory for cross environment'],
+      '--apk-embedded-library': ['apkEmbeddedLibrary', '',
+          'Specify the path of the embedded library for Android traces'],
+      '--range': ['range', 'auto,auto',
+          'Specify the range limit as [start],[end]'],
+      '--distortion': ['distortion', 0,
+          'Specify the logging overhead in picoseconds'],
+      '--source-map': ['sourceMap', null,
+          'Specify the source map that should be used for output'],
+      '--timed-range': ['timedRange', true,
+          'Ignore ticks before first and after last Date.now() call'],
+      '--pairwise-timed-range': ['pairwiseTimedRange', true,
+          'Ignore ticks outside pairs of Date.now() calls'],
+      '--only-summary': ['onlySummary', true,
+          'Print only tick summary, exclude other information'],
+      '--preprocess': ['preprocessJson', true,
+          'Preprocess for consumption with web interface']
+    };
+    dispatch['--js'] = dispatch['-j'];
+    dispatch['--gc'] = dispatch['-g'];
+    dispatch['--compiler'] = dispatch['-c'];
+    dispatch['--other'] = dispatch['-o'];
+    dispatch['--external'] = dispatch['-e'];
+    dispatch['--ptr'] = dispatch['--pairwise-timed-range'];
+    return dispatch;
+  }
+
+  getDefaultResults() {
+    return {
+      logFileName: 'v8.log',
+      platform: 'unix',
+      stateFilter: null,
+      callGraphSize: 5,
+      ignoreUnknown: false,
+      separateIc: true,
+      separateBytecodes: false,
+      separateBuiltins: true,
+      separateStubs: true,
+      preprocessJson: null,
+      targetRootFS: '',
+      nm: 'nm',
+      objdump: 'objdump',
+      range: 'auto,auto',
+      distortion: 0,
+      timedRange: false,
+      pairwiseTimedRange: false,
+      onlySummary: false,
+      runtimeTimerFilter: null,
+    };
+  }
+}
diff --git a/src/third_party/v8/tools/toolchain/BUILD.gn b/src/third_party/v8/tools/toolchain/BUILD.gn
new file mode 100644
index 0000000..b252c5e
--- /dev/null
+++ b/src/third_party/v8/tools/toolchain/BUILD.gn
@@ -0,0 +1,93 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/toolchain/gcc_toolchain.gni")
+
+gcc_toolchain("mips-bundled") {
+  toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-",
+                           root_build_dir)
+  cc = "${toolprefix}gcc"
+  cxx = "${toolprefix}g++"
+
+  readelf = "${toolprefix}readelf"
+  nm = "${toolprefix}nm"
+  ar = "${toolprefix}ar"
+  ld = cxx
+
+  # Flag that sets endianness
+  extra_ldflags = "-EB"
+  extra_cppflags = "-EB"
+
+  toolchain_args = {
+    current_cpu = "mips"
+    current_os = "linux"
+    is_clang = false
+  }
+}
+
+gcc_toolchain("mips64-bundled") {
+  toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-",
+                           root_build_dir)
+  cc = "${toolprefix}gcc"
+  cxx = "${toolprefix}g++"
+
+  readelf = "${toolprefix}readelf"
+  nm = "${toolprefix}nm"
+  ar = "${toolprefix}ar"
+  ld = cxx
+
+  # Flag that sets endianness and ABI
+  extra_ldflags = "-EB -mabi=64"
+  extra_cppflags = "-EB -mabi=64"
+
+  toolchain_args = {
+    current_cpu = "mips64"
+    current_os = "linux"
+    is_clang = false
+  }
+}
+
+gcc_toolchain("mipsel-bundled") {
+  toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-",
+                           root_build_dir)
+  cc = "${toolprefix}gcc"
+  cxx = "${toolprefix}g++"
+
+  readelf = "${toolprefix}readelf"
+  nm = "${toolprefix}nm"
+  ar = "${toolprefix}ar"
+  ld = cxx
+
+  # Flag that sets endianness
+  extra_ldflags = "-EL"
+  extra_cppflags = "-EL"
+
+  toolchain_args = {
+    current_cpu = "mipsel"
+    current_os = "linux"
+    is_clang = false
+  }
+}
+
+gcc_toolchain("mips64el-bundled") {
+  toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-",
+                           root_build_dir)
+  cc = "${toolprefix}gcc"
+  cxx = "${toolprefix}g++"
+
+  readelf = "${toolprefix}readelf"
+  nm = "${toolprefix}nm"
+  ar = "${toolprefix}ar"
+  ld = cxx
+
+  # Flag that sets endianness and ABI
+  extra_ldflags = "-EL -mabi=64"
+  extra_cppflags = "-EL -mabi=64"
+
+  toolchain_args = {
+    current_cpu = "mips64el"
+    current_os = "linux"
+    is_clang = false
+  }
+}
diff --git a/src/third_party/v8/tools/torque/format-torque.py b/src/third_party/v8/tools/torque/format-torque.py
new file mode 100755
index 0000000..16fc798
--- /dev/null
+++ b/src/third_party/v8/tools/torque/format-torque.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This program either generates the parser files for Torque, generating
+the source and header files directly in V8's src directory."""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import subprocess
+import sys
+import re
+from subprocess import Popen, PIPE
+
+kPercentEscape = r'α';  # Unicode alpha
+kDerefEscape = r'☆'; # Unicode star
+kAddressofEscape = r'⌂'; # Unicode house
+
+def preprocess(input):
+  # Special handing of '%' for intrinsics, turn the percent
+  # into a unicode character so that it gets treated as part of the
+  # intrinsic's name if it's already adjacent to it.
+  input = re.sub(r'%([A-Za-z])', kPercentEscape + r'\1', input)
+  # Similarly, avoid treating * and & as binary operators when they're
+  # probably used as address operators.
+  input = re.sub(r'([^/])\*([a-zA-Z(])', r'\1' + kDerefEscape + r'\2', input)
+  input = re.sub(r'&([a-zA-Z(])', kAddressofEscape + r'\1', input)
+
+
+  input = re.sub(r'(if\s+)constexpr(\s*\()', r'\1/*COxp*/\2', input)
+  input = re.sub(r'(\s+)operator\s*(\'[^\']+\')', r'\1/*_OPE \2*/', input)
+  input = re.sub(r'\btypeswitch\s*(\([^{]*\))\s{', r' if /*tPsW*/ \1 {', input)
+  input = re.sub(r'\bcase\s*(\([^{]*\))\s*:\s*deferred\s*{', r' if /*cAsEdEfF*/ \1 {', input)
+  input = re.sub(r'\bcase\s*(\([^{]*\))\s*:\s*{', r' if /*cA*/ \1 {', input)
+
+  input = re.sub(r'\bgenerates\s+\'([^\']+)\'\s*',
+      r'_GeNeRaTeS00_/*\1@*/', input)
+  input = re.sub(r'\bconstexpr\s+\'([^\']+)\'\s*',
+      r' _CoNsExP_/*\1@*/', input)
+  input = re.sub(r'\notherwise',
+      r'\n otherwise', input)
+  input = re.sub(r'(\n\s*\S[^\n]*\s)otherwise',
+      r'\1_OtheSaLi', input)
+  input = re.sub(r'@if\(', r'@iF(', input)
+  input = re.sub(r'@export', r'@eXpOrT', input)
+  input = re.sub(r'js-implicit[ \n]+', r'jS_iMpLiCiT_', input)
+  input = re.sub(r'^(\s*namespace\s+[a-zA-Z_0-9]+\s*{)(\s*)$', r'\1}\2', input, flags = re.MULTILINE)
+
+  # includes are not recognized, change them into comments so that the
+  # formatter ignores them first, until we can figure out a way to format cpp
+  # includes within a JS file.
+  input = re.sub(r'^#include', r'// InClUdE', input, flags=re.MULTILINE)
+
+  return input
+
+def postprocess(output):
+  output = re.sub(r'\/\*COxp\*\/', r'constexpr', output)
+  output = re.sub(r'(\S+)\s*: type([,>])', r'\1: type\2', output)
+  output = re.sub(r'(\n\s*)labels( [A-Z])', r'\1    labels\2', output)
+  output = re.sub(r'\/\*_OPE \'([^\']+)\'\*\/', r"operator '\1'", output)
+  output = re.sub(r'\bif\s*\/\*tPsW\*\/', r'typeswitch', output)
+  output = re.sub(r'\bif\s*\/\*cA\*\/\s*(\([^{]*\))\s*{', r'case \1: {', output)
+  output = re.sub(r'\bif\s*\/\*cAsEdEfF\*\/\s*(\([^{]*\))\s*{', r'case \1: deferred {', output)
+  output = re.sub(r'\n_GeNeRaTeS00_\s*\/\*([^@]+)@\*\/',
+      r"\n    generates '\1'", output)
+  output = re.sub(r'_GeNeRaTeS00_\s*\/\*([^@]+)@\*\/',
+      r"generates '\1'", output)
+  output = re.sub(r'_CoNsExP_\s*\/\*([^@]+)@\*\/',
+      r"constexpr '\1'", output)
+  output = re.sub(r'\n(\s+)otherwise',
+      r"\n\1    otherwise", output)
+  output = re.sub(r'\n(\s+)_OtheSaLi',
+      r"\n\1otherwise", output)
+  output = re.sub(r'_OtheSaLi',
+      r"otherwise", output)
+  output = re.sub(r'@iF\(', r'@if(', output)
+  output = re.sub(r'@eXpOrT',
+      r"@export", output)
+  output = re.sub(r'jS_iMpLiCiT_',
+      r"js-implicit ", output)
+  output = re.sub(r'}\n *label ', r'} label ', output);
+  output = re.sub(r'^(\s*namespace\s+[a-zA-Z_0-9]+\s*{)}(\s*)$', r'\1\2', output, flags = re.MULTILINE);
+
+  output = re.sub(kPercentEscape, r'%', output)
+  output = re.sub(kDerefEscape, r'*', output)
+  output = re.sub(kAddressofEscape, r'&', output)
+
+
+  output = re.sub( r'^// InClUdE',r'#include', output, flags=re.MULTILINE)
+
+  return output
+
+def process(filename, lint, should_format):
+  with open(filename, 'r') as content_file:
+    content = content_file.read()
+
+  original_input = content
+
+  if sys.platform.startswith('win'):
+    p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
+  else:
+    p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
+  output, err = p.communicate(preprocess(content))
+  output = postprocess(output)
+  rc = p.returncode
+  if (rc != 0):
+    print("error code " + str(rc) + " running clang-format. Exiting...")
+    sys.exit(rc);
+
+  if (output != original_input):
+    if lint:
+      print(filename + ' requires formatting', file=sys.stderr)
+
+    if should_format:
+      output_file = open(filename, 'w')
+      output_file.write(output);
+      output_file.close()
+
+def print_usage():
+  print('format-torque -i file1[, file2[, ...]]')
+  print('    format and overwrite input files')
+  print('format-torque -l file1[, file2[, ...]]')
+  print('    merely indicate which files need formatting')
+
+def Main():
+  if len(sys.argv) < 3:
+    print("error: at least 2 arguments required")
+    print_usage();
+    sys.exit(-1)
+
+  def is_option(arg):
+    return arg in ['-i', '-l', '-il']
+
+  should_format = lint = False
+  use_stdout = True
+
+  flag, files = sys.argv[1], sys.argv[2:]
+  if is_option(flag):
+    if '-i' == flag:
+      should_format = True
+    elif '-l' == flag:
+      lint = True
+    else:
+      lint = True
+      should_format = True
+  else:
+    print("error: -i and/or -l flags must be specified")
+    print_usage();
+    sys.exit(-1);
+
+  for filename in files:
+    process(filename, lint, should_format)
+
+  return 0
+
+if __name__ == '__main__':
+  sys.exit(Main());
diff --git a/src/third_party/v8/tools/torque/vim-torque/README.md b/src/third_party/v8/tools/torque/vim-torque/README.md
new file mode 100644
index 0000000..fbdef0f
--- /dev/null
+++ b/src/third_party/v8/tools/torque/vim-torque/README.md
@@ -0,0 +1,33 @@
+# V8 Torque syntax support for vim
+
+This plugin adds syntax highlighting support for the V8 Torque domain-specific
+language.
+
+## Installation
+
+Installation depends on your favorite plugin manager.
+
+**Pathogen:**
+
+Run
+
+```sh
+ln -s $V8/tools/torque/vim-torque ~/.vim/bundle/vim-torque
+# or ~/.config/nvim/bundle/vim-torque for Neovim
+```
+
+**Vundle:**
+
+Add this line to your `.vimrc` or `~/.config/nvim/init.vim`.
+
+```vim
+Plugin 'file:///path/to/v8/tools/torque/vim-torque'
+```
+
+**vim-plug:**
+
+Add this line to your `.vimrc` or `~/.config/nvim/init.vim`.
+
+```vim
+Plug '~/path/to/v8/tools/torque/vim-torque'
+```
diff --git a/src/third_party/v8/tools/torque/vim-torque/ftdetect/torque.vim b/src/third_party/v8/tools/torque/vim-torque/ftdetect/torque.vim
new file mode 100644
index 0000000..ead2c5e
--- /dev/null
+++ b/src/third_party/v8/tools/torque/vim-torque/ftdetect/torque.vim
@@ -0,0 +1 @@
+au BufRead,BufNewFile *.tq set filetype=torque
diff --git a/src/third_party/v8/tools/torque/vim-torque/syntax/torque.vim b/src/third_party/v8/tools/torque/vim-torque/syntax/torque.vim
new file mode 100644
index 0000000..592e870
--- /dev/null
+++ b/src/third_party/v8/tools/torque/vim-torque/syntax/torque.vim
@@ -0,0 +1,84 @@
+" Copyright 2018 the V8 project authors. All rights reserved.
+" Use of this source code is governed by a BSD-style license that can be
+" found in the LICENSE file.
+
+if !exists("main_syntax")
+  " quit when a syntax file was already loaded
+  if exists("b:current_syntax")
+    finish
+  endif
+  let main_syntax = 'torque'
+elseif exists("b:current_syntax") && b:current_syntax == "torque"
+  finish
+endif
+
+let s:cpo_save = &cpo
+set cpo&vim
+
+syn match   torqueLineComment      "\/\/.*" contains=@Spell
+syn region  torqueComment	   start="/\*"  end="\*/" contains=@Spell
+syn region  torqueStringS	   start=+'+  skip=+\\\\\|\\'+  end=+'\|$+
+
+syn keyword torqueAssert assert check debug unreachable
+syn keyword torqueAtom True False Undefined TheHole Null
+syn keyword torqueBoolean true false
+syn keyword torqueBranch break continue goto
+syn keyword torqueConditional if else typeswitch otherwise
+syn match torqueConstant /\v<[A-Z][A-Z0-9_]+>/
+syn match torqueConstant /\v<k[A-Z][A-Za-z0-9]*>/
+syn keyword torqueFunction macro builtin runtime intrinsic
+syn keyword torqueKeyword cast convert from_constexpr min max unsafe_cast js-implicit implicit
+syn keyword torqueLabel case
+syn keyword torqueMatching try label catch
+syn keyword torqueModifier extern javascript constexpr transitioning transient weak export
+syn match torqueNumber /\v<[0-9]+(\.[0-9]*)?>/
+syn match torqueNumber /\v<0x[0-9a-fA-F]+>/
+syn keyword torqueOperator operator
+syn keyword torqueRel extends generates labels
+syn keyword torqueRepeat while for of
+syn keyword torqueStatement return tail
+syn keyword torqueStructure module struct type class
+syn keyword torqueVariable const let
+
+syn match torqueType /\v(\<)@<=([A-Za-z][0-9A-Za-z_]*)(>)@=/
+syn match torqueType /\v(:\s*(constexpr\s*)?)@<=([A-Za-z][0-9A-Za-z_]*)/
+" Include some common types also
+syn keyword torqueType Arguments void never
+syn keyword torqueType Tagged Smi HeapObject Object
+syn keyword torqueType int32 uint32 int64 intptr uintptr float32 float64
+syn keyword torqueType bool string
+syn keyword torqueType int31 RawPtr AbstractCode Code JSReceiver Context String
+syn keyword torqueType Oddball HeapNumber Number BigInt Numeric Boolean JSProxy
+syn keyword torqueType JSObject JSArray JSFunction JSBoundFunction Callable Map
+
+hi def link torqueAssert		Statement
+hi def link torqueAtom		Constant
+hi def link torqueBoolean		Boolean
+hi def link torqueBranch		Conditional
+hi def link torqueComment		Comment
+hi def link torqueConditional		Conditional
+hi def link torqueConstant		Constant
+hi def link torqueFunction		Function
+hi def link torqueKeyword		Keyword
+hi def link torqueLabel		Label
+hi def link torqueLineComment		Comment
+hi def link torqueMatching		Exception
+hi def link torqueModifier		StorageClass
+hi def link torqueNumber		Number
+hi def link torqueOperator		Operator
+hi def link torqueRel		StorageClass
+hi def link torqueRepeat		Repeat
+hi def link torqueStatement		Statement
+hi def link torqueStringS		String
+hi def link torqueStructure		Structure
+hi def link torqueType		Type
+hi def link torqueVariable		Identifier
+
+let b:current_syntax = "torque"
+if main_syntax == 'torque'
+  unlet main_syntax
+endif
+let &cpo = s:cpo_save
+unlet s:cpo_save
+
+" vim: set ts=8:
diff --git a/src/third_party/v8/tools/tracing/proto-converter/.nvmrc b/src/third_party/v8/tools/tracing/proto-converter/.nvmrc
new file mode 100644
index 0000000..a7b32ad
--- /dev/null
+++ b/src/third_party/v8/tools/tracing/proto-converter/.nvmrc
@@ -0,0 +1 @@
+v11.9.0
diff --git a/src/third_party/v8/tools/tracing/proto-converter/package-lock.json b/src/third_party/v8/tools/tracing/proto-converter/package-lock.json
new file mode 100644
index 0000000..52e52b3
--- /dev/null
+++ b/src/third_party/v8/tools/tracing/proto-converter/package-lock.json
@@ -0,0 +1,123 @@
+{
+  "requires": true,
+  "lockfileVersion": 1,
+  "dependencies": {
+    "@protobufjs/aspromise": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
+      "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=",
+      "dev": true
+    },
+    "@protobufjs/base64": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
+      "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==",
+      "dev": true
+    },
+    "@protobufjs/codegen": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
+      "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==",
+      "dev": true
+    },
+    "@protobufjs/eventemitter": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
+      "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=",
+      "dev": true
+    },
+    "@protobufjs/fetch": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
+      "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=",
+      "dev": true,
+      "requires": {
+        "@protobufjs/aspromise": "^1.1.1",
+        "@protobufjs/inquire": "^1.1.0"
+      }
+    },
+    "@protobufjs/float": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
+      "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=",
+      "dev": true
+    },
+    "@protobufjs/inquire": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
+      "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=",
+      "dev": true
+    },
+    "@protobufjs/path": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
+      "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=",
+      "dev": true
+    },
+    "@protobufjs/pool": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
+      "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=",
+      "dev": true
+    },
+    "@protobufjs/utf8": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
+      "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=",
+      "dev": true
+    },
+    "@types/long": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.0.tgz",
+      "integrity": "sha512-1w52Nyx4Gq47uuu0EVcsHBxZFJgurQ+rTKS3qMHxR1GY2T8c2AJYd6vZoZ9q1rupaDjU0yT+Jc2XTyXkjeMA+Q==",
+      "dev": true
+    },
+    "@types/node": {
+      "version": "11.11.4",
+      "resolved": "https://registry.npmjs.org/@types/node/-/node-11.11.4.tgz",
+      "integrity": "sha512-02tIL+QIi/RW4E5xILdoAMjeJ9kYq5t5S2vciUdFPXv/ikFTb0zK8q9vXkg4+WAJuYXGiVT1H28AkD2C+IkXVw==",
+      "dev": true
+    },
+    "long": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
+      "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==",
+      "dev": true
+    },
+    "protobufjs": {
+      "version": "6.8.8",
+      "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.8.8.tgz",
+      "integrity": "sha512-AAmHtD5pXgZfi7GMpllpO3q1Xw1OYldr+dMUlAnffGTAhqkg72WdmSY71uKBF/JuyiKs8psYbtKrhi0ASCD8qw==",
+      "dev": true,
+      "requires": {
+        "@protobufjs/aspromise": "^1.1.2",
+        "@protobufjs/base64": "^1.1.2",
+        "@protobufjs/codegen": "^2.0.4",
+        "@protobufjs/eventemitter": "^1.1.0",
+        "@protobufjs/fetch": "^1.1.0",
+        "@protobufjs/float": "^1.0.2",
+        "@protobufjs/inquire": "^1.1.0",
+        "@protobufjs/path": "^1.1.2",
+        "@protobufjs/pool": "^1.1.0",
+        "@protobufjs/utf8": "^1.1.0",
+        "@types/long": "^4.0.0",
+        "@types/node": "^10.1.0",
+        "long": "^4.0.0"
+      },
+      "dependencies": {
+        "@types/node": {
+          "version": "10.14.1",
+          "resolved": "https://registry.npmjs.org/@types/node/-/node-10.14.1.tgz",
+          "integrity": "sha512-Rymt08vh1GaW4vYB6QP61/5m/CFLGnFZP++bJpWbiNxceNa6RBipDmb413jvtSf/R1gg5a/jQVl2jY4XVRscEA==",
+          "dev": true
+        }
+      }
+    },
+    "typescript": {
+      "version": "3.3.4000",
+      "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.3.4000.tgz",
+      "integrity": "sha512-jjOcCZvpkl2+z7JFn0yBOoLQyLoIkNZAs/fYJkUG6VKy6zLPHJGfQJYFHzibB6GJaF/8QrcECtlQ5cpvRHSMEA==",
+      "dev": true
+    }
+  }
+}
diff --git a/src/third_party/v8/tools/tracing/proto-converter/package.json b/src/third_party/v8/tools/tracing/proto-converter/package.json
new file mode 100644
index 0000000..41401a1
--- /dev/null
+++ b/src/third_party/v8/tools/tracing/proto-converter/package.json
@@ -0,0 +1,11 @@
+{
+  "private": true,
+  "scripts": {
+    "build": "tsc proto-to-json.ts"
+  },
+  "devDependencies": {
+    "@types/node": "^11.11.4",
+    "protobufjs": "^6.8.8",
+    "typescript": "^3.3.4000"
+  }
+}
diff --git a/src/third_party/v8/tools/tracing/proto-converter/proto-to-json.ts b/src/third_party/v8/tools/tracing/proto-converter/proto-to-json.ts
new file mode 100644
index 0000000..2427410
--- /dev/null
+++ b/src/third_party/v8/tools/tracing/proto-converter/proto-to-json.ts
@@ -0,0 +1,132 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as fs from 'fs';
+import * as path from 'path';
+import { Root } from 'protobufjs';
+
+// Requirements: node 10.4.0+, npm
+
+// Setup:
+// (nvm is optional, you can also just install node manually)
+// $ nvm use
+// $ npm install
+// $ npm run build
+
+// Usage: node proto-to-json.js path_to_trace.proto input_file output_file
+
+// Converts a binary proto file to a 'Trace Event Format' compatible .json file
+// that can be used with chrome://tracing. Documentation of this format:
+// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU
+
+// Attempts to reproduce the logic of the JSONTraceWriter in V8 in terms of the
+// JSON fields it will include/exclude based on the data present in the trace
+// event.
+
+// TODO(petermarshall): Replace with Array#flat once it lands in Node.js.
+const flatten = <T>(a: T[], b: T[]) => { a.push(...b); return a; }
+
+// Convert a string representing an int or uint (64 bit) to a Number or throw
+// if the value won't fit.
+function parseIntOrThrow(int: string) {
+  if (BigInt(int) > Number.MAX_SAFE_INTEGER) {
+    throw new Error("Loss of int precision");
+  }
+  return Number(int);
+}
+
+function uint64AsHexString(val : string) : string {
+  return "0x" + BigInt(val).toString(16);
+}
+
+function parseArgValue(arg: any) : any {
+  if (arg.jsonValue) {
+    return JSON.parse(arg.jsonValue);
+  }
+  if (typeof arg.stringValue !== 'undefined') {
+    return arg.stringValue;
+  }
+  if (typeof arg.uintValue !== 'undefined') {
+    return parseIntOrThrow(arg.uintValue);
+  }
+  if (typeof arg.intValue !== 'undefined') {
+    return parseIntOrThrow(arg.intValue);
+  }
+  if (typeof arg.boolValue !== 'undefined') {
+    return arg.boolValue;
+  }
+  if (typeof arg.doubleValue !== 'undefined') {
+    // Handle [-]Infinity and NaN which protobufjs outputs as strings here.
+    return typeof arg.doubleValue === 'string' ?
+        arg.doubleValue : Number(arg.doubleValue);
+  }
+  if (typeof arg.pointerValue !== 'undefined') {
+    return uint64AsHexString(arg.pointerValue);
+  }
+}
+
+// These come from
+// https://cs.chromium.org/chromium/src/base/trace_event/common/trace_event_common.h
+const TRACE_EVENT_FLAG_HAS_ID: number = 1 << 1;
+const TRACE_EVENT_FLAG_FLOW_IN: number = 1 << 8;
+const TRACE_EVENT_FLAG_FLOW_OUT: number = 1 << 9;
+
+async function main() {
+  const root = new Root();
+  const { resolvePath } = root;
+  const numDirectoriesToStrip = 2;
+  let initialOrigin: string|null;
+  root.resolvePath = (origin, target) => {
+    if (!origin) {
+      initialOrigin = target;
+      for (let i = 0; i <= numDirectoriesToStrip; i++) {
+        initialOrigin = path.dirname(initialOrigin);
+      }
+      return resolvePath(origin, target);
+    }
+    return path.resolve(initialOrigin!, target);
+  };
+  const traceProto = await root.load(process.argv[2]);
+  const Trace = traceProto.lookupType("Trace");
+  const payload = await fs.promises.readFile(process.argv[3]);
+  const msg = Trace.decode(payload).toJSON();
+  const output = {
+    traceEvents: msg.packet
+      .filter((packet: any) => !!packet.chromeEvents)
+      .map((packet: any) => packet.chromeEvents.traceEvents)
+      .map((traceEvents: any) => traceEvents.map((e: any) => {
+
+        const bind_id = (e.flags & (TRACE_EVENT_FLAG_FLOW_IN |
+          TRACE_EVENT_FLAG_FLOW_OUT)) ? e.bindId : undefined;
+        const scope = (e.flags & TRACE_EVENT_FLAG_HAS_ID) && e.scope ?
+            e.scope : undefined;
+
+        return {
+          pid: e.processId,
+          tid: e.threadId,
+          ts: parseIntOrThrow(e.timestamp),
+          tts: parseIntOrThrow(e.threadTimestamp),
+          ph: String.fromCodePoint(e.phase),
+          cat: e.categoryGroupName,
+          name: e.name,
+          dur: parseIntOrThrow(e.duration),
+          tdur: parseIntOrThrow(e.threadDuration),
+          bind_id: bind_id,
+          flow_in: e.flags & TRACE_EVENT_FLAG_FLOW_IN ? true : undefined,
+          flow_out: e.flags & TRACE_EVENT_FLAG_FLOW_OUT ? true : undefined,
+          scope: scope,
+          id: (e.flags & TRACE_EVENT_FLAG_HAS_ID) ?
+              uint64AsHexString(e.id) : undefined,
+          args: (e.args || []).reduce((js_args: any, proto_arg: any) => {
+            js_args[proto_arg.name] = parseArgValue(proto_arg);
+            return js_args;
+          }, {})
+        };
+      }))
+      .reduce(flatten, [])
+  };
+  await fs.promises.writeFile(process.argv[4], JSON.stringify(output, null, 2));
+}
+
+main().catch(console.error);
diff --git a/src/third_party/v8/tools/tracing/proto-converter/tsconfig.json b/src/third_party/v8/tools/tracing/proto-converter/tsconfig.json
new file mode 100644
index 0000000..defc4ef
--- /dev/null
+++ b/src/third_party/v8/tools/tracing/proto-converter/tsconfig.json
@@ -0,0 +1,12 @@
+{
+  "compilerOptions": {
+    "target": "ES2018",
+    "module": "commonjs",
+    "lib": ["es6","dom"],
+    "outDir": "lib",
+    "rootDir": "src",
+    "strict": true,
+    "esModuleInterop": true,
+    "resolveJsonModule": true
+  }
+}
diff --git a/src/third_party/v8/tools/try_perf.py b/src/third_party/v8/tools/try_perf.py
new file mode 100755
index 0000000..2c9c382
--- /dev/null
+++ b/src/third_party/v8/tools/try_perf.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import os
+import subprocess
+import sys
+
+BOTS = {
+  '--chromebook': 'v8_chromebook_perf_try',
+  '--linux32': 'v8_linux32_perf_try',
+  '--linux64': 'v8_linux64_perf_try',
+  '--linux64_atom': 'v8_linux64_atom_perf_try',
+  '--nexus5': 'v8_nexus5_perf_try',
+  '--nexus7': 'v8_nexus7_perf_try',
+  '--nokia1': 'v8_nokia1_perf_try',
+  '--odroid32': 'v8_odroid32_perf_try',
+  '--pixel2': 'v8_pixel2_perf_try',
+}
+
+DEFAULT_BOTS = [
+  'v8_chromebook_perf_try',
+  'v8_linux32_perf_try',
+  'v8_linux64_perf_try',
+]
+
+PUBLIC_BENCHMARKS = [
+  'arewefastyet',
+  'ares6',
+  'blazor',
+  'compile',
+  'embenchen',
+  'emscripten',
+  'jetstream',
+  'jsbench',
+  'jstests',
+  'kraken_orig',
+  'massive',
+  'memory',
+  'octane',
+  'octane-noopt',
+  'octane-pr',
+  'octane-tf',
+  'octane-tf-pr',
+  'sunspider',
+  'unity',
+  'wasm',
+  'web-tooling-benchmark',
+]
+
+V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
+
+def main():
+  parser = argparse.ArgumentParser(description='')
+  parser.add_argument('benchmarks', nargs='+', help='The benchmarks to run.')
+  parser.add_argument('--extra-flags', default='',
+                      help='Extra flags to be passed to the executable.')
+  parser.add_argument('-r', '--revision', type=str, default=None,
+                      help='Revision (use full hash!) to use for the try job; '
+                           'default: the revision will be determined by the '
+                           'try server; see its waterfall for more info')
+  parser.add_argument('-v', '--verbose', action='store_true',
+                      help='Print debug information')
+  parser.add_argument('-c', '--confidence-level', type=float,
+                      help='Repeatedly runs each benchmark until specified '
+                      'confidence level is reached. The value is interpreted '
+                      'as the number of standard deviations from the mean that '
+                      'all values must lie within. Typical values are 1, 2 and '
+                      '3 and correspond to 68%%, 95%% and 99.7%% probability '
+                      'that the measured value is within 0.1%% of the true '
+                      'value. Larger values result in more retries and thus '
+                      'longer runtime, but also provide more reliable results.')
+  for option in sorted(BOTS):
+    parser.add_argument(
+        option, dest='bots', action='append_const', const=BOTS[option],
+        help='Add %s trybot.' % BOTS[option])
+  options = parser.parse_args()
+  if not options.bots:
+    print('No trybots specified. Using default %s.' % ','.join(DEFAULT_BOTS))
+    options.bots = DEFAULT_BOTS
+
+  if not options.benchmarks:
+    print('Please specify the benchmarks to run as arguments.')
+    return 1
+
+  for benchmark in options.benchmarks:
+    if benchmark not in PUBLIC_BENCHMARKS:
+      print ('%s not found in our benchmark list. The respective trybot might '
+            'fail, unless you run something this script isn\'t aware of. '
+            'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS))
+      print('Proceed anyways? [Y/n] ', end=' ')
+      answer = sys.stdin.readline().strip()
+      if answer != "" and answer != "Y" and answer != "y":
+        return 1
+
+  assert '"' not in options.extra_flags and '\'' not in options.extra_flags, (
+      'Invalid flag specification.')
+
+  # Ensure depot_tools are updated.
+  subprocess.check_output(
+      'update_depot_tools', shell=True, stderr=subprocess.STDOUT, cwd=V8_BASE)
+
+  cmd = ['git cl try', '-B', 'luci.v8-internal.try']
+  cmd += ['-b %s' % bot for bot in options.bots]
+  if options.revision:
+    cmd.append('-r %s' % options.revision)
+  benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
+  cmd.append('-p \'testfilter=[%s]\'' % ','.join(benchmarks))
+  if options.extra_flags:
+    cmd.append('-p \'extra_flags="%s"\'' % options.extra_flags)
+  if options.confidence_level:
+    cmd.append('-p confidence_level=%f' % options.confidence_level)
+  if options.verbose:
+    cmd.append('-vv')
+    print('Running %s' % ' '.join(cmd))
+  subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
+
+if __name__ == '__main__':  # pragma: no cover
+  sys.exit(main())
diff --git a/src/third_party/v8/tools/turbolizer-perf.py b/src/third_party/v8/tools/turbolizer-perf.py
new file mode 100644
index 0000000..d35f538
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer-perf.py
@@ -0,0 +1,59 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import sys
+import json
+import re
+import argparse
+
+sys.path.append(os.environ['PERF_EXEC_PATH'] + \
+  '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
+
+from perf_trace_context import *
+from Core import *
+
+def trace_begin():
+  json_obj['eventCounts'] = {}
+  prog = re.compile(r'0x[0-9a-fA-F]+')
+  for phase in reversed(json_obj['phases']):
+    if phase['name'] == "disassembly":
+      for line in phase['data'].splitlines():
+        result = re.match(prog, line)
+        if result:
+          known_addrs.add(result.group(0))
+
+def trace_end():
+  print(json.dumps(json_obj))
+
+def process_event(param_dict):
+  addr = "0x%x" % int(param_dict['sample']['ip'])
+
+  # Only count samples that belong to the function
+  if addr not in known_addrs:
+    return
+
+  ev_name = param_dict['ev_name']
+  if ev_name not in json_obj['eventCounts']:
+    json_obj['eventCounts'][ev_name] = {}
+  if addr not in json_obj['eventCounts'][ev_name]:
+    json_obj['eventCounts'][ev_name][addr] = 0
+  json_obj['eventCounts'][ev_name][addr] += 1
+
+if __name__ == "__main__":
+  parser = argparse.ArgumentParser(
+      description="Perf script to merge profiling data with turbofan compiler "
+                  "traces.")
+  parser.add_argument("file_name", metavar="JSON File",
+      help="turbo trace json file.")
+
+  args = parser.parse_args()
+
+  with open(args.file_name, 'r') as json_file:
+    json_obj = json.load(json_file)
+
+  known_addrs = set()
diff --git a/src/third_party/v8/tools/turbolizer/README.md b/src/third_party/v8/tools/turbolizer/README.md
new file mode 100644
index 0000000..fa804f6
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/README.md
@@ -0,0 +1,79 @@
+Turbolizer
+==========
+
+Turbolizer is a HTML-based tool that visualizes optimized code along the various
+phases of Turbofan's optimization pipeline, allowing easy navigation between
+source code, Turbofan IR graphs, scheduled IR nodes and generated assembly code.
+
+Turbolizer consumes .json files that are generated per-function by d8 by passing
+the '--trace-turbo' command-line flag.
+
+Turbolizer is build using npm:
+
+    cd tools/turbolizer
+    npm i
+    npm run-script build
+
+Afterwards, turbolizer can be hosted locally by starting a web server that serve
+the contents of the turbolizer directory, e.g.:
+
+    python -m SimpleHTTPServer 8000
+
+To deploy to a directory that can be hosted the script `deploy` can be used. The
+following command will deploy to the directory /www/turbolizer:
+
+    npm run deploy -- /www/turbolizer
+
+Optionally, profiling data generated by the perf tools in linux can be merged
+with the .json files using the turbolizer-perf.py file included. The following
+command is an example of using the perf script:
+
+    perf script -i perf.data.jitted -s turbolizer-perf.py turbo-main.json
+
+The output of the above command is a json object that can be piped to a file
+which, when uploaded to turbolizer, will display the event counts from perf next
+to each instruction in the disassembly. Further detail can be found in the
+bottom of this document under "Using Perf with Turbo."
+
+Using the python interface in perf script requires python-dev to be installed
+and perf be recompiled with python support enabled. Once recompiled, the
+variable PERF_EXEC_PATH must be set to the location of the recompiled perf
+binaries.
+
+Graph visualization and manipulation based on Mike Bostock's sample code for an
+interactive tool for creating directed graphs. Original source is at
+https://github.com/metacademy/directed-graph-creator and released under the
+MIT/X license.
+
+Icons derived from the "White Olive Collection" created by Breezi released under
+the Creative Commons BY license.
+
+Using Perf with Turbo
+---------------------
+
+In order to generate perf data that matches exactly with the turbofan trace, you
+must use either a debug build of v8 or a release build with the flag
+'disassembler=on'. This flag ensures that the '--trace-turbo' will output the
+necessary disassembly for linking with the perf profile.
+
+The basic example of generating the required data is as follows:
+
+    perf record -k mono /path/to/d8 --trace-turbo --perf-prof main.js
+    perf inject -j -i perf.data -o perf.data.jitted
+    perf script -i perf.data.jitted -s turbolizer-perf.py turbo-main.json
+
+These commands combined will run and profile d8, merge the output into a single
+'perf.data.jitted' file, then take the event data from that and link them to the
+disassembly in the 'turbo-main.json'. Note that, as above, the output of the
+script command must be piped to a file for uploading to turbolizer.
+
+There are many options that can be added to the first command, for example '-e'
+can be used to specify the counting of specific events (default: cycles), as
+well as '--cpu' to specify which CPU to sample.
+
+Turbolizer build process
+------------------------
+
+The typescript sources reside in tools/turbolizer/src, and the typescript
+compiler will put the JavaScript output into tools/turbolizer/build/. The
+index.html file is set up to load the JavaScript from that directory.
diff --git a/src/third_party/v8/tools/turbolizer/deploy.sh b/src/third_party/v8/tools/turbolizer/deploy.sh
new file mode 100755
index 0000000..011c2f4
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/deploy.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+DEST=$1
+
+if [ ! -d "$DEST" ]; then
+  echo -e "Destination \"$DEST\" is not a directory. Run\n\tnpm deploy -- [destination-directory]"
+  exit 1
+fi
+
+function copy() {
+  echo -n "."
+  cp "$@"
+}
+
+echo -n "Deploying..."
+copy *.png $DEST/
+copy *.css $DEST/
+copy index.html $DEST/
+copy info-view.html $DEST/
+copy -R build $DEST/
+copy -R img $DEST/
+echo "done!"
+
+echo "Deployed to $DEST/."
diff --git a/src/third_party/v8/tools/turbolizer/down-arrow.png b/src/third_party/v8/tools/turbolizer/down-arrow.png
new file mode 100644
index 0000000..39339f2
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/down-arrow.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/img/hide-selected-icon.png b/src/third_party/v8/tools/turbolizer/img/hide-selected-icon.png
new file mode 100644
index 0000000..207cdbb
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/img/hide-selected-icon.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/img/hide-unselected-icon.png b/src/third_party/v8/tools/turbolizer/img/hide-unselected-icon.png
new file mode 100644
index 0000000..15617b0
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/img/hide-unselected-icon.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/img/layout-icon.png b/src/third_party/v8/tools/turbolizer/img/layout-icon.png
new file mode 100644
index 0000000..95a517a
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/img/layout-icon.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/img/show-all-icon.png b/src/third_party/v8/tools/turbolizer/img/show-all-icon.png
new file mode 100644
index 0000000..50fc845
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/img/show-all-icon.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/img/show-control-icon.png b/src/third_party/v8/tools/turbolizer/img/show-control-icon.png
new file mode 100644
index 0000000..4238bee
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/img/show-control-icon.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/img/toggle-hide-dead-icon.png b/src/third_party/v8/tools/turbolizer/img/toggle-hide-dead-icon.png
new file mode 100644
index 0000000..ac72bb9
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/img/toggle-hide-dead-icon.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/img/toggle-types-icon.png b/src/third_party/v8/tools/turbolizer/img/toggle-types-icon.png
new file mode 100644
index 0000000..8fead8f
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/img/toggle-types-icon.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/img/zoom-selection-icon.png b/src/third_party/v8/tools/turbolizer/img/zoom-selection-icon.png
new file mode 100644
index 0000000..12dc3e3
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/img/zoom-selection-icon.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/index.html b/src/third_party/v8/tools/turbolizer/index.html
new file mode 100644
index 0000000..ea1b0b7
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/index.html
@@ -0,0 +1,52 @@
+<!DOCTYPE html>
+<html>
+<!--
+Copyright 2019 the V8 project authors. All rights reserved.  Use of this source
+code is governed by a BSD-style license that can be found in the LICENSE file.
+-->
+<head>
+  <meta charset="utf-8">
+  <title>V8 Turbolizer</title>
+  <link rel="stylesheet" href="turbo-visualizer.css">
+  <link rel="stylesheet" href="turbo-visualizer-ranges.css">
+  <link rel="stylesheet" href="tabs.css">
+  <link rel="icon" type="image/png" href="turbolizer.png">
+</head>
+
+<body>
+  <div id="left" class="content"></div>
+  <div id="resizer-left" class="resizer"></div>
+  <div id="middle">
+
+    <div id="load-file">
+      <input id="upload-helper" type="file">
+      <input id="upload" type="image" title="load graph" class="button-input" src="upload-icon.png" alt="upload graph">
+    </div>
+    <div id="resizer-ranges" class="resizer" style="visibility:hidden;"></div>
+    <div id="ranges" class="content" style="visibility:hidden;"></div>
+    <div id="show-hide-ranges" class="show-hide-pane" style="visibility: hidden">
+      <input id="ranges-expand" type="image" title="show ranges" src="up-arrow.png" class="button-input invisible">
+      <input id="ranges-shrink" type="image" title="hide ranges" src="down-arrow.png" class="button-input">
+    </div>
+  </div>
+  <div id="resizer-right" class="resizer"></div>
+  <div id="right" class="content"></div>
+  <div id="show-hide-source" class="show-hide-pane">
+    <input id="source-expand" type="image" title="show source" src="right-arrow.png" class="button-input invisible">
+    <input id="source-shrink" type="image" title="hide source" src="left-arrow.png" class="button-input">
+  </div>
+  <div id="show-hide-disassembly" class="show-hide-pane">
+    <input id="disassembly-expand" type="image" title="show disassembly" src="left-arrow.png" class="button-input invisible">
+    <input id="disassembly-shrink" type="image" title="hide disassembly" src="right-arrow.png" class="button-input">
+  </div>
+  <div id="text-placeholder" width="0" height="0" style="position: absolute; top:100000px;">
+    <svg>
+      <text text-anchor="right">
+        <tspan white-space="inherit" id="text-measure">
+      </text>
+    </svg>
+  </div>
+  <script src="https://cdn.jsdelivr.net/gh/google/code-prettify@master/loader/run_prettify.js"></script>
+  <script src="build/turbolizer.js"></script>
+</body>
+</html>
diff --git a/src/third_party/v8/tools/turbolizer/info-view.html b/src/third_party/v8/tools/turbolizer/info-view.html
new file mode 100644
index 0000000..534860d
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/info-view.html
@@ -0,0 +1,123 @@
+<div>This is view contains hints about available keyboard shortcuts.</div>
+<div class="info-topic" id="info-global">
+  <div class="info-topic-header">Global shortcuts</div>
+  <div class="info-topic-content">
+    <table>
+      <tr>
+        <td>CTRL+L</td>
+        <td>Open load file dialog.</td>
+      </tr>
+      <tr>
+        <td>CTRL+R</td>
+        <td>Reload turbolizer (Chrome shortcut)</td>
+      </tr>
+    </table>
+  </div>
+</div>
+<div class="info-topic" id="info-graph-view">
+  <div class="info-topic-header">Graph view</div>
+  <div class="info-topic-content">
+    <table>
+      <tr>
+        <td>r</td>
+        <td>Relayout graph</td>
+      </tr>
+      <tr>
+        <td>a</td>
+        <td>Select all nodes</td>
+      </tr>
+      <tr>
+        <td>/</td>
+        <td>Select search box</td>
+      </tr>
+      <tr>
+        <td>i</td>
+        <td>Reveal node's input nodes</td>
+      </tr>
+      <tr>
+        <td>o</td>
+        <td>Reveal node's output nodes</td>
+      </tr>
+      <tr>
+        <td>s</td>
+        <td>Hide selected nodes</td>
+      </tr>
+      <tr>
+        <td>u</td>
+        <td>Hide unselected nodes</td>
+      </tr>
+    </table>
+  </div>
+</div>
+<div class="info-topic" id="info-graph-nodes">
+  <div class="info-topic-header">TurboFan graph nodes</div>
+  <div class="info-topic-content">
+    <div>The following commands transform node selections, i.e. each operation will be applied
+      to each node in the current selection and the union of the resulting nodes will become the
+      new selection.</div>
+    <table>
+      <tr>
+        <td>UP</td>
+        <td>Select all input nodes</td>
+      </tr>
+      <tr>
+        <td>DOWN</td>
+        <td>Select all output nodes</td>
+      </tr>
+      <tr>
+        <td>1-9</td>
+        <td>Select input node 1-9</td>
+      </tr>
+      <tr>
+        <td>CTRL+1-9</td>
+        <td>Toggle input edge 1-9</td>
+      </tr>
+      <tr>
+        <td>c</td>
+        <td>Select control output node</td>
+      </tr>
+      <tr>
+        <td>e</td>
+        <td>Select effect output node</td>
+      </tr>
+      <tr>
+        <td>p</td>
+        <td>Select node's origin node</td>
+      </tr>
+    </table>
+  </div>
+</div>
+<div class="info-topic" id="info-graph-search">
+  <div class="info-topic-header">Graph search</div>
+  <div class="info-topic-content">
+    <table>
+      <tr>
+        <td>ENTER</td>
+        <td>Select nodes according to regular expression. Invisible nodes are included depending on the state of the
+          checkbox "only visible".</td>
+      </tr>
+      <tr>
+        <td>CTRL+ENTER</td>
+        <td>Select nodes according to regular expression, always including invisible nodes regardless of checkbox.</td>
+      </tr>
+    </table>
+    <div style="font-weight: bold">
+      Useful patterns
+    </div>
+    <table>
+      <tr>
+        <td>IfTrue</td>
+        <td>Select nodes which have 'IfTrue' in title or description.</td>
+      </tr>
+      <tr>
+        <td>^42:</td>
+        <td>Select exactly the node with id 42.</td>
+      </tr>
+      <tr>
+        <td>Origin:&nbsp;#42&nbsp;</td>
+        <td>Select nodes which were created while node with id 42 was reduced. This is inaccurate if the node was
+          changed in-place.</td>
+      </tr>
+    </table>
+  </div>
+</div>
diff --git a/src/third_party/v8/tools/turbolizer/left-arrow.png b/src/third_party/v8/tools/turbolizer/left-arrow.png
new file mode 100644
index 0000000..fc0603e
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/left-arrow.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/package-lock.json b/src/third_party/v8/tools/turbolizer/package-lock.json
new file mode 100644
index 0000000..bb53aa7
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/package-lock.json
@@ -0,0 +1,3550 @@
+{
+  "name": "turbolizer",
+  "version": "0.1.0",
+  "lockfileVersion": 1,
+  "requires": true,
+  "dependencies": {
+    "@koa/cors": {
+      "version": "2.2.2",
+      "resolved": "https://registry.npmjs.org/@koa/cors/-/cors-2.2.2.tgz",
+      "integrity": "sha512-Ollvsy3wB8+7R9w6hPVzlj3wekF6nK+IHpHj7faSPVXCkahqCwNEPp9+0C4b51RDkdpHjevLEGLOKuVjqtXgSQ==",
+      "dev": true
+    },
+    "@types/d3": {
+      "version": "5.7.2",
+      "resolved": "https://registry.npmjs.org/@types/d3/-/d3-5.7.2.tgz",
+      "integrity": "sha512-7/wClB8ycneWGy3jdvLfXKTd5SoTg9hji7IdJ0RuO9xTY54YpJ8zlcFADcXhY1J3kCBwxp+/1jeN6a5OMwgYOw==",
+      "requires": {
+        "@types/d3-array": "1.2.7",
+        "@types/d3-axis": "1.0.12",
+        "@types/d3-brush": "1.0.10",
+        "@types/d3-chord": "1.0.9",
+        "@types/d3-collection": "1.0.8",
+        "@types/d3-color": "1.2.2",
+        "@types/d3-contour": "1.3.0",
+        "@types/d3-dispatch": "1.0.7",
+        "@types/d3-drag": "1.2.3",
+        "@types/d3-dsv": "1.0.36",
+        "@types/d3-ease": "1.0.9",
+        "@types/d3-fetch": "1.1.5",
+        "@types/d3-force": "1.2.1",
+        "@types/d3-format": "1.3.1",
+        "@types/d3-geo": "1.11.1",
+        "@types/d3-hierarchy": "1.1.6",
+        "@types/d3-interpolate": "1.3.1",
+        "@types/d3-path": "1.0.8",
+        "@types/d3-polygon": "1.0.7",
+        "@types/d3-quadtree": "1.0.7",
+        "@types/d3-random": "1.1.2",
+        "@types/d3-scale": "2.1.1",
+        "@types/d3-scale-chromatic": "1.3.1",
+        "@types/d3-selection": "1.4.1",
+        "@types/d3-shape": "1.3.2",
+        "@types/d3-time": "1.0.10",
+        "@types/d3-time-format": "2.1.1",
+        "@types/d3-timer": "1.0.9",
+        "@types/d3-transition": "1.1.4",
+        "@types/d3-voronoi": "1.1.9",
+        "@types/d3-zoom": "1.7.4"
+      }
+    },
+    "@types/d3-array": {
+      "version": "1.2.7",
+      "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-1.2.7.tgz",
+      "integrity": "sha512-51vHWuUyDOi+8XuwPrTw3cFqyh2Slg9y8COYkRfjCPG9TfYqY0hoNPzv/8BrcAy0FeQBzqEo/D/8Nk2caOQJnA=="
+    },
+    "@types/d3-axis": {
+      "version": "1.0.12",
+      "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-1.0.12.tgz",
+      "integrity": "sha512-BZISgSD5M8TgURyNtcPAmUB9sk490CO1Thb6/gIn0WZTt3Y50IssX+2Z0vTccoqZksUDTep0b+o4ofXslvNbqg==",
+      "requires": {
+        "@types/d3-selection": "1.4.1"
+      }
+    },
+    "@types/d3-brush": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-1.0.10.tgz",
+      "integrity": "sha512-J8jREATIrfJaAfhJivqaEKPnJsRlwwrOPje+ABqZFgamADjll+q9zaDXnYyjiGPPsiJEU+Qq9jQi5rECxIOfhg==",
+      "requires": {
+        "@types/d3-selection": "1.4.1"
+      }
+    },
+    "@types/d3-chord": {
+      "version": "1.0.9",
+      "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-1.0.9.tgz",
+      "integrity": "sha512-UA6lI9CVW5cT5Ku/RV4hxoFn4mKySHm7HEgodtfRthAj1lt9rKZEPon58vyYfk+HIAm33DtJJgZwMXy2QgyPXw=="
+    },
+    "@types/d3-collection": {
+      "version": "1.0.8",
+      "resolved": "https://registry.npmjs.org/@types/d3-collection/-/d3-collection-1.0.8.tgz",
+      "integrity": "sha512-y5lGlazdc0HNO0F3UUX2DPE7OmYvd9Kcym4hXwrJcNUkDaypR5pX+apuMikl9LfTxKItJsY9KYvzBulpCKyvuQ=="
+    },
+    "@types/d3-color": {
+      "version": "1.2.2",
+      "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-1.2.2.tgz",
+      "integrity": "sha512-6pBxzJ8ZP3dYEQ4YjQ+NVbQaOflfgXq/JbDiS99oLobM2o72uAST4q6yPxHv6FOTCRC/n35ktuo8pvw/S4M7sw=="
+    },
+    "@types/d3-contour": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-1.3.0.tgz",
+      "integrity": "sha512-AUCUIjEnC5lCGBM9hS+MryRaFLIrPls4Rbv6ktqbd+TK/RXZPwOy9rtBWmGpbeXcSOYCJTUDwNJuEnmYPJRxHQ==",
+      "requires": {
+        "@types/d3-array": "1.2.7",
+        "@types/geojson": "7946.0.7"
+      }
+    },
+    "@types/d3-dispatch": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-1.0.7.tgz",
+      "integrity": "sha512-M+z84G7UKwK6hEPnGCSccOg8zJ3Nk2hgDQ9sCstHXgsFU0sMxlIZVKqKB5oxUDbALqQG6ucg0G9e8cmOSlishg=="
+    },
+    "@types/d3-drag": {
+      "version": "1.2.3",
+      "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-1.2.3.tgz",
+      "integrity": "sha512-rWB5SPvkYVxW3sqUxHOJUZwifD0KqvKwvt1bhNqcLpW6Azsd0BJgRNcyVW8GAferaAk5r8dzeZnf9zKlg9+xMQ==",
+      "requires": {
+        "@types/d3-selection": "1.4.1"
+      }
+    },
+    "@types/d3-dsv": {
+      "version": "1.0.36",
+      "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-1.0.36.tgz",
+      "integrity": "sha512-jbIWQ27QJcBNMZbQv0NSQMHnBDCmxghAxePxgyiPH1XPCRkOsTBei7jcdi3fDrUCGpCV3lKrSZFSlOkhUQVClA=="
+    },
+    "@types/d3-ease": {
+      "version": "1.0.9",
+      "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-1.0.9.tgz",
+      "integrity": "sha512-U5ADevQ+W6fy32FVZZC9EXallcV/Mi12A5Tkd0My5MrC7T8soMQEhlDAg88XUWm0zoCQlB4XV0en/24LvuDB4Q=="
+    },
+    "@types/d3-fetch": {
+      "version": "1.1.5",
+      "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-1.1.5.tgz",
+      "integrity": "sha512-o9c0ItT5/Gl3wbNuVpzRnYX1t3RghzeWAjHUVLuyZJudiTxC4f/fC0ZPFWLQ2lVY8pAMmxpV8TJ6ETYCgPeI3A==",
+      "requires": {
+        "@types/d3-dsv": "1.0.36"
+      }
+    },
+    "@types/d3-force": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-1.2.1.tgz",
+      "integrity": "sha512-jqK+I36uz4kTBjyk39meed5y31Ab+tXYN/x1dn3nZEus9yOHCLc+VrcIYLc/aSQ0Y7tMPRlIhLetulME76EiiA=="
+    },
+    "@types/d3-format": {
+      "version": "1.3.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-1.3.1.tgz",
+      "integrity": "sha512-KAWvReOKMDreaAwOjdfQMm0HjcUMlQG47GwqdVKgmm20vTd2pucj0a70c3gUSHrnsmo6H2AMrkBsZU2UhJLq8A=="
+    },
+    "@types/d3-geo": {
+      "version": "1.11.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-1.11.1.tgz",
+      "integrity": "sha512-Ox8WWOG3igDRoep/dNsGbOiSJYdUG3ew/6z0ETvHyAtXZVBjOE0S96zSSmzgl0gqQ3RdZjn2eeJOj9oRcMZPkQ==",
+      "requires": {
+        "@types/geojson": "7946.0.7"
+      }
+    },
+    "@types/d3-hierarchy": {
+      "version": "1.1.6",
+      "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-1.1.6.tgz",
+      "integrity": "sha512-vvSaIDf/Ov0o3KwMT+1M8+WbnnlRiGjlGD5uvk83a1mPCTd/E5x12bUJ/oP55+wUY/4Kb5kc67rVpVGJ2KUHxg=="
+    },
+    "@types/d3-interpolate": {
+      "version": "1.3.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-1.3.1.tgz",
+      "integrity": "sha512-z8Zmi08XVwe8e62vP6wcA+CNuRhpuUU5XPEfqpG0hRypDE5BWNthQHB1UNWWDB7ojCbGaN4qBdsWp5kWxhT1IQ==",
+      "requires": {
+        "@types/d3-color": "1.2.2"
+      }
+    },
+    "@types/d3-path": {
+      "version": "1.0.8",
+      "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-1.0.8.tgz",
+      "integrity": "sha512-AZGHWslq/oApTAHu9+yH/Bnk63y9oFOMROtqPAtxl5uB6qm1x2lueWdVEjsjjV3Qc2+QfuzKIwIR5MvVBakfzA=="
+    },
+    "@types/d3-polygon": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-1.0.7.tgz",
+      "integrity": "sha512-Xuw0eSjQQKs8jTiNbntWH0S+Xp+JyhqxmQ0YAQ3rDu6c3kKMFfgsaGN7Jv5u3zG6yVX/AsLP/Xs/QRjmi9g43Q=="
+    },
+    "@types/d3-quadtree": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-1.0.7.tgz",
+      "integrity": "sha512-0ajFawWicfjsaCLh6NzxOyVDYhQAmMFbsiI3MPGLInorauHFEh9/Cl6UHNf+kt/J1jfoxKY/ZJaKAoDpbvde5Q=="
+    },
+    "@types/d3-random": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-1.1.2.tgz",
+      "integrity": "sha512-Jui+Zn28pQw/3EayPKaN4c/PqTvqNbIPjHkgIIFnxne1FdwNjfHtAIsZIBMKlquQNrrMjFzCrlF2gPs3xckqaA=="
+    },
+    "@types/d3-scale": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-2.1.1.tgz",
+      "integrity": "sha512-kNTkbZQ+N/Ip8oX9PByXfDLoCSaZYm+VUOasbmsa6KD850/ziMdYepg/8kLg2plHzoLANdMqPoYQbvExevLUHg==",
+      "requires": {
+        "@types/d3-time": "1.0.10"
+      }
+    },
+    "@types/d3-scale-chromatic": {
+      "version": "1.3.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-1.3.1.tgz",
+      "integrity": "sha512-Ny3rLbV5tnmqgW7w/poCcef4kXP8mHPo/p8EjTS5d9OUk8MlqAeRaM8eF7Vyv7QMLiIXNE94Pa1cMLSPkXQBoQ=="
+    },
+    "@types/d3-selection": {
+      "version": "1.4.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-1.4.1.tgz",
+      "integrity": "sha512-bv8IfFYo/xG6dxri9OwDnK3yCagYPeRIjTlrcdYJSx+FDWlCeBDepIHUpqROmhPtZ53jyna0aUajZRk0I3rXNA=="
+    },
+    "@types/d3-shape": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-1.3.2.tgz",
+      "integrity": "sha512-LtD8EaNYCaBRzHzaAiIPrfcL3DdIysc81dkGlQvv7WQP3+YXV7b0JJTtR1U3bzeRieS603KF4wUo+ZkJVenh8w==",
+      "requires": {
+        "@types/d3-path": "1.0.8"
+      }
+    },
+    "@types/d3-time": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-1.0.10.tgz",
+      "integrity": "sha512-aKf62rRQafDQmSiv1NylKhIMmznsjRN+MnXRXTqHoqm0U/UZzVpdrtRnSIfdiLS616OuC1soYeX1dBg2n1u8Xw=="
+    },
+    "@types/d3-time-format": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-2.1.1.tgz",
+      "integrity": "sha512-tJSyXta8ZyJ52wDDHA96JEsvkbL6jl7wowGmuf45+fAkj5Y+SQOnz0N7/H68OWmPshPsAaWMQh+GAws44IzH3g=="
+    },
+    "@types/d3-timer": {
+      "version": "1.0.9",
+      "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-1.0.9.tgz",
+      "integrity": "sha512-WvfJ3LFxBbWjqRGz9n7GJt08RrTHPJDVsIwwoCMROlqF+iDacYiAFjf9oqnq0mXpb2juA2N/qjKP+MKdal3YNQ=="
+    },
+    "@types/d3-transition": {
+      "version": "1.1.4",
+      "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-1.1.4.tgz",
+      "integrity": "sha512-/vsmKVUIXEyCcIXYAlw7bnYkIs9/J/nZbptRJFKUN3FdXq/dF6j9z9xXzerkyU6TDHLrMrwx9eGwdKyTIy/j9w==",
+      "requires": {
+        "@types/d3-selection": "1.4.1"
+      }
+    },
+    "@types/d3-voronoi": {
+      "version": "1.1.9",
+      "resolved": "https://registry.npmjs.org/@types/d3-voronoi/-/d3-voronoi-1.1.9.tgz",
+      "integrity": "sha512-DExNQkaHd1F3dFPvGA/Aw2NGyjMln6E9QzsiqOcBgnE+VInYnFBHBBySbZQts6z6xD+5jTfKCP7M4OqMyVjdwQ=="
+    },
+    "@types/d3-zoom": {
+      "version": "1.7.4",
+      "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-1.7.4.tgz",
+      "integrity": "sha512-5jnFo/itYhJeB2khO/lKe730kW/h2EbKMOvY0uNp3+7NdPm4w63DwPEMxifQZ7n902xGYK5DdU67FmToSoy4VA==",
+      "requires": {
+        "@types/d3-interpolate": "1.3.1",
+        "@types/d3-selection": "1.4.1"
+      }
+    },
+    "@types/estree": {
+      "version": "0.0.39",
+      "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.39.tgz",
+      "integrity": "sha512-EYNwp3bU+98cpU4lAWYYL7Zz+2gryWH1qbdDTidVd6hkiR6weksdbMadyXKXNPEkQFhXM+hVO9ZygomHXp+AIw==",
+      "dev": true
+    },
+    "@types/geojson": {
+      "version": "7946.0.7",
+      "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.7.tgz",
+      "integrity": "sha512-wE2v81i4C4Ol09RtsWFAqg3BUitWbHSpSlIo+bNdsCJijO9sjme+zm+73ZMCa/qMC8UEERxzGbvmr1cffo2SiQ=="
+    },
+    "@types/json5": {
+      "version": "0.0.29",
+      "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
+      "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=",
+      "dev": true,
+      "optional": true
+    },
+    "@types/node": {
+      "version": "12.7.12",
+      "resolved": "https://registry.npmjs.org/@types/node/-/node-12.7.12.tgz",
+      "integrity": "sha512-KPYGmfD0/b1eXurQ59fXD1GBzhSQfz6/lKBxkaHX9dKTzjXbK68Zt7yGUxUsCS1jeTy/8aL+d9JEr+S54mpkWQ==",
+      "dev": true
+    },
+    "JSONStream": {
+      "version": "1.3.5",
+      "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz",
+      "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==",
+      "dev": true,
+      "requires": {
+        "jsonparse": "1.3.1",
+        "through": "2.3.8"
+      }
+    },
+    "accepts": {
+      "version": "1.3.5",
+      "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz",
+      "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=",
+      "dev": true,
+      "requires": {
+        "mime-types": "2.1.21",
+        "negotiator": "0.6.1"
+      }
+    },
+    "ansi-escape-sequences": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/ansi-escape-sequences/-/ansi-escape-sequences-4.0.0.tgz",
+      "integrity": "sha512-v+0wW9Wezwsyb0uF4aBVCjmSqit3Ru7PZFziGF0o2KwTvN2zWfTi3BRLq9EkJFdg3eBbyERXGTntVpBxH1J68Q==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0"
+      }
+    },
+    "ansi-regex": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+      "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+      "dev": true
+    },
+    "ansi-styles": {
+      "version": "3.2.1",
+      "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+      "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+      "dev": true,
+      "requires": {
+        "color-convert": "1.9.3"
+      }
+    },
+    "any-promise": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
+      "integrity": "sha1-q8av7tzqUugJzcA3au0845Y10X8=",
+      "dev": true
+    },
+    "argparse": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+      "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+      "dev": true,
+      "requires": {
+        "sprintf-js": "1.0.3"
+      }
+    },
+    "argv-tools": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/argv-tools/-/argv-tools-0.1.1.tgz",
+      "integrity": "sha512-Cc0dBvx4dvrjjKpyDA6w8RlNAw8Su30NvZbWl/Tv9ZALEVlLVkWQiHMi84Q0xNfpVuSaiQbYkdmWK8g1PLGhKw==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0",
+        "find-replace": "2.0.1"
+      }
+    },
+    "arr-diff": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz",
+      "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA="
+    },
+    "arr-flatten": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
+      "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg=="
+    },
+    "arr-union": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz",
+      "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ="
+    },
+    "array-back": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/array-back/-/array-back-2.0.0.tgz",
+      "integrity": "sha512-eJv4pLLufP3g5kcZry0j6WXpIbzYw9GUB4mVJZno9wfwiBxbizTnHCw3VJb07cBihbFX48Y7oSrW9y+gt4glyw==",
+      "dev": true,
+      "requires": {
+        "typical": "2.6.1"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "array-unique": {
+      "version": "0.3.2",
+      "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz",
+      "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg="
+    },
+    "arrify": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz",
+      "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=",
+      "dev": true
+    },
+    "assertion-error": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz",
+      "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==",
+      "dev": true
+    },
+    "assign-symbols": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz",
+      "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c="
+    },
+    "async-limiter": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz",
+      "integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg==",
+      "dev": true
+    },
+    "atob": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz",
+      "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg=="
+    },
+    "babel-code-frame": {
+      "version": "6.26.0",
+      "resolved": "https://registry.npmjs.org/babel-code-frame/-/babel-code-frame-6.26.0.tgz",
+      "integrity": "sha1-Y/1D99weO7fONZR9uP42mj9Yx0s=",
+      "dev": true,
+      "requires": {
+        "chalk": "1.1.3",
+        "esutils": "2.0.2",
+        "js-tokens": "3.0.2"
+      },
+      "dependencies": {
+        "ansi-styles": {
+          "version": "2.2.1",
+          "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz",
+          "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=",
+          "dev": true
+        },
+        "chalk": {
+          "version": "1.1.3",
+          "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz",
+          "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=",
+          "dev": true,
+          "requires": {
+            "ansi-styles": "2.2.1",
+            "escape-string-regexp": "1.0.5",
+            "has-ansi": "2.0.0",
+            "strip-ansi": "3.0.1",
+            "supports-color": "2.0.0"
+          }
+        },
+        "supports-color": {
+          "version": "2.0.0",
+          "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz",
+          "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=",
+          "dev": true
+        }
+      }
+    },
+    "balanced-match": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
+      "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=",
+      "dev": true
+    },
+    "base": {
+      "version": "0.11.2",
+      "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz",
+      "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==",
+      "requires": {
+        "cache-base": "1.0.1",
+        "class-utils": "0.3.6",
+        "component-emitter": "1.2.1",
+        "define-property": "1.0.0",
+        "isobject": "3.0.1",
+        "mixin-deep": "1.3.2",
+        "pascalcase": "0.1.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+          "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+          "requires": {
+            "is-descriptor": "1.0.2"
+          }
+        },
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "requires": {
+            "kind-of": "6.0.2"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "requires": {
+            "kind-of": "6.0.2"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "requires": {
+            "is-accessor-descriptor": "1.0.0",
+            "is-data-descriptor": "1.0.0",
+            "kind-of": "6.0.2"
+          }
+        }
+      }
+    },
+    "basic-auth": {
+      "version": "1.1.0",
+      "resolved": "http://registry.npmjs.org/basic-auth/-/basic-auth-1.1.0.tgz",
+      "integrity": "sha1-RSIe5Cn37h5QNb4/UVM/HN/SmIQ=",
+      "dev": true
+    },
+    "batch": {
+      "version": "0.6.1",
+      "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
+      "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=",
+      "dev": true
+    },
+    "brace-expansion": {
+      "version": "1.1.11",
+      "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+      "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+      "dev": true,
+      "requires": {
+        "balanced-match": "1.0.0",
+        "concat-map": "0.0.1"
+      }
+    },
+    "braces": {
+      "version": "2.3.2",
+      "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz",
+      "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==",
+      "requires": {
+        "arr-flatten": "1.1.0",
+        "array-unique": "0.3.2",
+        "extend-shallow": "2.0.1",
+        "fill-range": "4.0.0",
+        "isobject": "3.0.1",
+        "repeat-element": "1.1.3",
+        "snapdragon": "0.8.2",
+        "snapdragon-node": "2.1.1",
+        "split-string": "3.1.0",
+        "to-regex": "3.0.2"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "0.1.1"
+          }
+        }
+      }
+    },
+    "browser-stdout": {
+      "version": "1.3.1",
+      "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz",
+      "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==",
+      "dev": true
+    },
+    "buffer-from": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz",
+      "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==",
+      "dev": true
+    },
+    "builtin-modules": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-3.0.0.tgz",
+      "integrity": "sha512-hMIeU4K2ilbXV6Uv93ZZ0Avg/M91RaKXucQ+4me2Do1txxBDyDZWCBa5bJSLqoNTRpXTLwEzIk1KmloenDDjhg=="
+    },
+    "byte-size": {
+      "version": "4.0.4",
+      "resolved": "https://registry.npmjs.org/byte-size/-/byte-size-4.0.4.tgz",
+      "integrity": "sha512-82RPeneC6nqCdSwCX2hZUz3JPOvN5at/nTEw/CMf05Smu3Hrpo9Psb7LjN+k+XndNArG1EY8L4+BM3aTM4BCvw==",
+      "dev": true
+    },
+    "bytes": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
+      "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=",
+      "dev": true
+    },
+    "cache-base": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz",
+      "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==",
+      "requires": {
+        "collection-visit": "1.0.0",
+        "component-emitter": "1.2.1",
+        "get-value": "2.0.6",
+        "has-value": "1.0.0",
+        "isobject": "3.0.1",
+        "set-value": "2.0.1",
+        "to-object-path": "0.3.0",
+        "union-value": "1.0.1",
+        "unset-value": "1.0.0"
+      }
+    },
+    "cache-content-type": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/cache-content-type/-/cache-content-type-1.0.1.tgz",
+      "integrity": "sha512-IKufZ1o4Ut42YUrZSo8+qnMTrFuKkvyoLXUywKz9GJ5BrhOFGhLdkx9sG4KAnVvbY6kEcSFjLQul+DVmBm2bgA==",
+      "dev": true,
+      "requires": {
+        "mime-types": "2.1.21",
+        "ylru": "1.2.1"
+      }
+    },
+    "chai": {
+      "version": "4.2.0",
+      "resolved": "https://registry.npmjs.org/chai/-/chai-4.2.0.tgz",
+      "integrity": "sha512-XQU3bhBukrOsQCuwZndwGcCVQHyZi53fQ6Ys1Fym7E4olpIqqZZhhoFJoaKVvV17lWQoXYwgWN2nF5crA8J2jw==",
+      "dev": true,
+      "requires": {
+        "assertion-error": "1.1.0",
+        "check-error": "1.0.2",
+        "deep-eql": "3.0.1",
+        "get-func-name": "2.0.0",
+        "pathval": "1.1.0",
+        "type-detect": "4.0.8"
+      }
+    },
+    "chalk": {
+      "version": "2.4.1",
+      "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz",
+      "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==",
+      "dev": true,
+      "requires": {
+        "ansi-styles": "3.2.1",
+        "escape-string-regexp": "1.0.5",
+        "supports-color": "5.4.0"
+      }
+    },
+    "check-error": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz",
+      "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=",
+      "dev": true
+    },
+    "class-utils": {
+      "version": "0.3.6",
+      "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz",
+      "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==",
+      "requires": {
+        "arr-union": "3.1.0",
+        "define-property": "0.2.5",
+        "isobject": "3.0.1",
+        "static-extend": "0.1.2"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "requires": {
+            "is-descriptor": "0.1.6"
+          }
+        }
+      }
+    },
+    "cli-commands": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/cli-commands/-/cli-commands-0.4.0.tgz",
+      "integrity": "sha512-zAvJlR7roeMgpUIhMDYATYL90vz+9ffuyPr0+qq4LzcZ0Jq+gM+H1KdYKxerc6U2nhitiDEx79YiJlXdrooEOA==",
+      "dev": true,
+      "requires": {
+        "command-line-args": "5.0.2",
+        "command-line-commands": "2.0.1"
+      }
+    },
+    "co": {
+      "version": "4.6.0",
+      "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
+      "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=",
+      "dev": true
+    },
+    "co-body": {
+      "version": "6.0.0",
+      "resolved": "https://registry.npmjs.org/co-body/-/co-body-6.0.0.tgz",
+      "integrity": "sha512-9ZIcixguuuKIptnY8yemEOuhb71L/lLf+Rl5JfJEUiDNJk0e02MBt7BPxR2GEh5mw8dPthQYR4jPI/BnS1MQgw==",
+      "dev": true,
+      "requires": {
+        "inflation": "2.0.0",
+        "qs": "6.5.2",
+        "raw-body": "2.3.3",
+        "type-is": "1.6.16"
+      }
+    },
+    "collection-visit": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
+      "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=",
+      "requires": {
+        "map-visit": "1.0.0",
+        "object-visit": "1.0.1"
+      }
+    },
+    "color-convert": {
+      "version": "1.9.3",
+      "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+      "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+      "dev": true,
+      "requires": {
+        "color-name": "1.1.3"
+      }
+    },
+    "color-name": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+      "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=",
+      "dev": true
+    },
+    "command-line-args": {
+      "version": "5.0.2",
+      "resolved": "https://registry.npmjs.org/command-line-args/-/command-line-args-5.0.2.tgz",
+      "integrity": "sha512-/qPcbL8zpqg53x4rAaqMFlRV4opN3pbla7I7k9x8kyOBMQoGT6WltjN6sXZuxOXw6DgdK7Ad+ijYS5gjcr7vlA==",
+      "dev": true,
+      "requires": {
+        "argv-tools": "0.1.1",
+        "array-back": "2.0.0",
+        "find-replace": "2.0.1",
+        "lodash.camelcase": "4.3.0",
+        "typical": "2.6.1"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "command-line-commands": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/command-line-commands/-/command-line-commands-2.0.1.tgz",
+      "integrity": "sha512-m8c2p1DrNd2ruIAggxd/y6DgygQayf6r8RHwchhXryaLF8I6koYjoYroVP+emeROE9DXN5b9sP1Gh+WtvTTdtQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0"
+      }
+    },
+    "command-line-usage": {
+      "version": "5.0.5",
+      "resolved": "https://registry.npmjs.org/command-line-usage/-/command-line-usage-5.0.5.tgz",
+      "integrity": "sha512-d8NrGylA5oCXSbGoKz05FkehDAzSmIm4K03S5VDh4d5lZAtTWfc3D1RuETtuQCn8129nYfJfDdF7P/lwcz1BlA==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0",
+        "chalk": "2.4.1",
+        "table-layout": "0.4.4",
+        "typical": "2.6.1"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "commander": {
+      "version": "2.15.1",
+      "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz",
+      "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag=="
+    },
+    "common-log-format": {
+      "version": "0.1.4",
+      "resolved": "https://registry.npmjs.org/common-log-format/-/common-log-format-0.1.4.tgz",
+      "integrity": "sha512-BXcgq+wzr2htmBmnT7cL7YHzPAWketWbr4kozjoM9kWe4sk3+zMgjcH0HO+EddjDlEw2LZysqLpVRwbF318tDw==",
+      "dev": true
+    },
+    "component-emitter": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz",
+      "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY="
+    },
+    "compressible": {
+      "version": "2.0.15",
+      "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.15.tgz",
+      "integrity": "sha512-4aE67DL33dSW9gw4CI2H/yTxqHLNcxp0yS6jB+4h+wr3e43+1z7vm0HU9qXOH8j+qjKuL8+UtkOxYQSMq60Ylw==",
+      "dev": true,
+      "requires": {
+        "mime-db": "1.37.0"
+      }
+    },
+    "concat-map": {
+      "version": "0.0.1",
+      "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+      "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=",
+      "dev": true
+    },
+    "content-disposition": {
+      "version": "0.5.2",
+      "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
+      "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=",
+      "dev": true
+    },
+    "content-type": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
+      "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==",
+      "dev": true
+    },
+    "cookies": {
+      "version": "0.7.2",
+      "resolved": "https://registry.npmjs.org/cookies/-/cookies-0.7.2.tgz",
+      "integrity": "sha512-J2JjH9T3PUNKPHknprxgCrCaZshIfxW2j49gq1E1CP5Micj1LppWAR2y9EHSQAzEiX84zOsScWNwUZ0b/ChlMw==",
+      "dev": true,
+      "requires": {
+        "depd": "1.1.2",
+        "keygrip": "1.0.3"
+      }
+    },
+    "copy-descriptor": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz",
+      "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40="
+    },
+    "copy-to": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/copy-to/-/copy-to-2.0.1.tgz",
+      "integrity": "sha1-JoD7uAaKSNCGVrYJgJK9r8kG9KU=",
+      "dev": true
+    },
+    "core-util-is": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
+      "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=",
+      "dev": true
+    },
+    "d3": {
+      "version": "5.7.0",
+      "resolved": "https://registry.npmjs.org/d3/-/d3-5.7.0.tgz",
+      "integrity": "sha512-8KEIfx+dFm8PlbJN9PI0suazrZ41QcaAufsKE9PRcqYPWLngHIyWJZX96n6IQKePGgeSu0l7rtlueSSNq8Zc3g==",
+      "requires": {
+        "d3-array": "1.2.4",
+        "d3-axis": "1.0.12",
+        "d3-brush": "1.0.6",
+        "d3-chord": "1.0.6",
+        "d3-collection": "1.0.7",
+        "d3-color": "1.2.3",
+        "d3-contour": "1.3.2",
+        "d3-dispatch": "1.0.5",
+        "d3-drag": "1.2.3",
+        "d3-dsv": "1.0.10",
+        "d3-ease": "1.0.5",
+        "d3-fetch": "1.1.2",
+        "d3-force": "1.1.2",
+        "d3-format": "1.3.2",
+        "d3-geo": "1.11.3",
+        "d3-hierarchy": "1.1.8",
+        "d3-interpolate": "1.3.2",
+        "d3-path": "1.0.7",
+        "d3-polygon": "1.0.5",
+        "d3-quadtree": "1.0.5",
+        "d3-random": "1.1.2",
+        "d3-scale": "2.1.2",
+        "d3-scale-chromatic": "1.3.3",
+        "d3-selection": "1.3.2",
+        "d3-shape": "1.2.2",
+        "d3-time": "1.0.10",
+        "d3-time-format": "2.1.3",
+        "d3-timer": "1.0.9",
+        "d3-transition": "1.1.3",
+        "d3-voronoi": "1.1.4",
+        "d3-zoom": "1.7.3"
+      }
+    },
+    "d3-array": {
+      "version": "1.2.4",
+      "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-1.2.4.tgz",
+      "integrity": "sha512-KHW6M86R+FUPYGb3R5XiYjXPq7VzwxZ22buHhAEVG5ztoEcZZMLov530mmccaqA1GghZArjQV46fuc8kUqhhHw=="
+    },
+    "d3-axis": {
+      "version": "1.0.12",
+      "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-1.0.12.tgz",
+      "integrity": "sha512-ejINPfPSNdGFKEOAtnBtdkpr24c4d4jsei6Lg98mxf424ivoDP2956/5HDpIAtmHo85lqT4pruy+zEgvRUBqaQ=="
+    },
+    "d3-brush": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-1.0.6.tgz",
+      "integrity": "sha512-lGSiF5SoSqO5/mYGD5FAeGKKS62JdA1EV7HPrU2b5rTX4qEJJtpjaGLJngjnkewQy7UnGstnFd3168wpf5z76w==",
+      "requires": {
+        "d3-dispatch": "1.0.5",
+        "d3-drag": "1.2.3",
+        "d3-interpolate": "1.3.2",
+        "d3-selection": "1.3.2",
+        "d3-transition": "1.1.3"
+      }
+    },
+    "d3-chord": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-1.0.6.tgz",
+      "integrity": "sha512-JXA2Dro1Fxw9rJe33Uv+Ckr5IrAa74TlfDEhE/jfLOaXegMQFQTAgAw9WnZL8+HxVBRXaRGCkrNU7pJeylRIuA==",
+      "requires": {
+        "d3-array": "1.2.4",
+        "d3-path": "1.0.7"
+      }
+    },
+    "d3-collection": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/d3-collection/-/d3-collection-1.0.7.tgz",
+      "integrity": "sha512-ii0/r5f4sjKNTfh84Di+DpztYwqKhEyUlKoPrzUFfeSkWxjW49xU2QzO9qrPrNkpdI0XJkfzvmTu8V2Zylln6A=="
+    },
+    "d3-color": {
+      "version": "1.2.3",
+      "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-1.2.3.tgz",
+      "integrity": "sha512-x37qq3ChOTLd26hnps36lexMRhNXEtVxZ4B25rL0DVdDsGQIJGB18S7y9XDwlDD6MD/ZBzITCf4JjGMM10TZkw=="
+    },
+    "d3-contour": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-1.3.2.tgz",
+      "integrity": "sha512-hoPp4K/rJCu0ladiH6zmJUEz6+u3lgR+GSm/QdM2BBvDraU39Vr7YdDCicJcxP1z8i9B/2dJLgDC1NcvlF8WCg==",
+      "requires": {
+        "d3-array": "1.2.4"
+      }
+    },
+    "d3-dispatch": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-1.0.5.tgz",
+      "integrity": "sha512-vwKx+lAqB1UuCeklr6Jh1bvC4SZgbSqbkGBLClItFBIYH4vqDJCA7qfoy14lXmJdnBOdxndAMxjCbImJYW7e6g=="
+    },
+    "d3-drag": {
+      "version": "1.2.3",
+      "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-1.2.3.tgz",
+      "integrity": "sha512-8S3HWCAg+ilzjJsNtWW1Mutl74Nmzhb9yU6igspilaJzeZVFktmY6oO9xOh5TDk+BM2KrNFjttZNoJJmDnkjkg==",
+      "requires": {
+        "d3-dispatch": "1.0.5",
+        "d3-selection": "1.3.2"
+      }
+    },
+    "d3-dsv": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-1.0.10.tgz",
+      "integrity": "sha512-vqklfpxmtO2ZER3fq/B33R/BIz3A1PV0FaZRuFM8w6jLo7sUX1BZDh73fPlr0s327rzq4H6EN1q9U+eCBCSN8g==",
+      "requires": {
+        "commander": "2.15.1",
+        "iconv-lite": "0.4.23",
+        "rw": "1.3.3"
+      }
+    },
+    "d3-ease": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-1.0.5.tgz",
+      "integrity": "sha512-Ct1O//ly5y5lFM9YTdu+ygq7LleSgSE4oj7vUt9tPLHUi8VCV7QoizGpdWRWAwCO9LdYzIrQDg97+hGVdsSGPQ=="
+    },
+    "d3-fetch": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-1.1.2.tgz",
+      "integrity": "sha512-S2loaQCV/ZeyTyIF2oP8D1K9Z4QizUzW7cWeAOAS4U88qOt3Ucf6GsmgthuYSdyB2HyEm4CeGvkQxWsmInsIVA==",
+      "requires": {
+        "d3-dsv": "1.0.10"
+      }
+    },
+    "d3-force": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-1.1.2.tgz",
+      "integrity": "sha512-p1vcHAUF1qH7yR+e8ip7Bs61AHjLeKkIn8Z2gzwU2lwEf2wkSpWdjXG0axudTHsVFnYGlMkFaEsVy2l8tAg1Gw==",
+      "requires": {
+        "d3-collection": "1.0.7",
+        "d3-dispatch": "1.0.5",
+        "d3-quadtree": "1.0.5",
+        "d3-timer": "1.0.9"
+      }
+    },
+    "d3-format": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-1.3.2.tgz",
+      "integrity": "sha512-Z18Dprj96ExragQ0DeGi+SYPQ7pPfRMtUXtsg/ChVIKNBCzjO8XYJvRTC1usblx52lqge56V5ect+frYTQc8WQ=="
+    },
+    "d3-geo": {
+      "version": "1.11.3",
+      "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-1.11.3.tgz",
+      "integrity": "sha512-n30yN9qSKREvV2fxcrhmHUdXP9TNH7ZZj3C/qnaoU0cVf/Ea85+yT7HY7i8ySPwkwjCNYtmKqQFTvLFngfkItQ==",
+      "requires": {
+        "d3-array": "1.2.4"
+      }
+    },
+    "d3-hierarchy": {
+      "version": "1.1.8",
+      "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-1.1.8.tgz",
+      "integrity": "sha512-L+GHMSZNwTpiq4rt9GEsNcpLa4M96lXMR8M/nMG9p5hBE0jy6C+3hWtyZMenPQdwla249iJy7Nx0uKt3n+u9+w=="
+    },
+    "d3-interpolate": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-1.3.2.tgz",
+      "integrity": "sha512-NlNKGopqaz9qM1PXh9gBF1KSCVh+jSFErrSlD/4hybwoNX/gt1d8CDbDW+3i+5UOHhjC6s6nMvRxcuoMVNgL2w==",
+      "requires": {
+        "d3-color": "1.2.3"
+      }
+    },
+    "d3-path": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.7.tgz",
+      "integrity": "sha512-q0cW1RpvA5c5ma2rch62mX8AYaiLX0+bdaSM2wxSU9tXjU4DNvkx9qiUvjkuWCj3p22UO/hlPivujqMiR9PDzA=="
+    },
+    "d3-polygon": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-1.0.5.tgz",
+      "integrity": "sha512-RHhh1ZUJZfhgoqzWWuRhzQJvO7LavchhitSTHGu9oj6uuLFzYZVeBzaWTQ2qSO6bz2w55RMoOCf0MsLCDB6e0w=="
+    },
+    "d3-quadtree": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-1.0.5.tgz",
+      "integrity": "sha512-U2tjwDFbZ75JRAg8A+cqMvqPg1G3BE7UTJn3h8DHjY/pnsAfWdbJKgyfcy7zKjqGtLAmI0q8aDSeG1TVIKRaHQ=="
+    },
+    "d3-random": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-1.1.2.tgz",
+      "integrity": "sha512-6AK5BNpIFqP+cx/sreKzNjWbwZQCSUatxq+pPRmFIQaWuoD+NrbVWw7YWpHiXpCQ/NanKdtGDuB+VQcZDaEmYQ=="
+    },
+    "d3-scale": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-2.1.2.tgz",
+      "integrity": "sha512-bESpd64ylaKzCDzvULcmHKZTlzA/6DGSVwx7QSDj/EnX9cpSevsdiwdHFYI9ouo9tNBbV3v5xztHS2uFeOzh8Q==",
+      "requires": {
+        "d3-array": "1.2.4",
+        "d3-collection": "1.0.7",
+        "d3-format": "1.3.2",
+        "d3-interpolate": "1.3.2",
+        "d3-time": "1.0.10",
+        "d3-time-format": "2.1.3"
+      }
+    },
+    "d3-scale-chromatic": {
+      "version": "1.3.3",
+      "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-1.3.3.tgz",
+      "integrity": "sha512-BWTipif1CimXcYfT02LKjAyItX5gKiwxuPRgr4xM58JwlLocWbjPLI7aMEjkcoOQXMkYsmNsvv3d2yl/OKuHHw==",
+      "requires": {
+        "d3-color": "1.2.3",
+        "d3-interpolate": "1.3.2"
+      }
+    },
+    "d3-selection": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-1.3.2.tgz",
+      "integrity": "sha512-OoXdv1nZ7h2aKMVg3kaUFbLLK5jXUFAMLD/Tu5JA96mjf8f2a9ZUESGY+C36t8R1WFeWk/e55hy54Ml2I62CRQ=="
+    },
+    "d3-shape": {
+      "version": "1.2.2",
+      "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.2.2.tgz",
+      "integrity": "sha512-hUGEozlKecFZ2bOSNt7ENex+4Tk9uc/m0TtTEHBvitCBxUNjhzm5hS2GrrVRD/ae4IylSmxGeqX5tWC2rASMlQ==",
+      "requires": {
+        "d3-path": "1.0.7"
+      }
+    },
+    "d3-time": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-1.0.10.tgz",
+      "integrity": "sha512-hF+NTLCaJHF/JqHN5hE8HVGAXPStEq6/omumPE/SxyHVrR7/qQxusFDo0t0c/44+sCGHthC7yNGFZIEgju0P8g=="
+    },
+    "d3-time-format": {
+      "version": "2.1.3",
+      "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-2.1.3.tgz",
+      "integrity": "sha512-6k0a2rZryzGm5Ihx+aFMuO1GgelgIz+7HhB4PH4OEndD5q2zGn1mDfRdNrulspOfR6JXkb2sThhDK41CSK85QA==",
+      "requires": {
+        "d3-time": "1.0.10"
+      }
+    },
+    "d3-timer": {
+      "version": "1.0.9",
+      "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-1.0.9.tgz",
+      "integrity": "sha512-rT34J5HnQUHhcLvhSB9GjCkN0Ddd5Y8nCwDBG2u6wQEeYxT/Lf51fTFFkldeib/sE/J0clIe0pnCfs6g/lRbyg=="
+    },
+    "d3-transition": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-1.1.3.tgz",
+      "integrity": "sha512-tEvo3qOXL6pZ1EzcXxFcPNxC/Ygivu5NoBY6mbzidATAeML86da+JfVIUzon3dNM6UX6zjDx+xbYDmMVtTSjuA==",
+      "requires": {
+        "d3-color": "1.2.3",
+        "d3-dispatch": "1.0.5",
+        "d3-ease": "1.0.5",
+        "d3-interpolate": "1.3.2",
+        "d3-selection": "1.3.2",
+        "d3-timer": "1.0.9"
+      }
+    },
+    "d3-voronoi": {
+      "version": "1.1.4",
+      "resolved": "https://registry.npmjs.org/d3-voronoi/-/d3-voronoi-1.1.4.tgz",
+      "integrity": "sha512-dArJ32hchFsrQ8uMiTBLq256MpnZjeuBtdHpaDlYuQyjU0CVzCJl/BVW+SkszaAeH95D/8gxqAhgx0ouAWAfRg=="
+    },
+    "d3-zoom": {
+      "version": "1.7.3",
+      "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-1.7.3.tgz",
+      "integrity": "sha512-xEBSwFx5Z9T3/VrwDkMt+mr0HCzv7XjpGURJ8lWmIC8wxe32L39eWHIasEe/e7Ox8MPU4p1hvH8PKN2olLzIBg==",
+      "requires": {
+        "d3-dispatch": "1.0.5",
+        "d3-drag": "1.2.3",
+        "d3-interpolate": "1.3.2",
+        "d3-selection": "1.3.2",
+        "d3-transition": "1.1.3"
+      }
+    },
+    "debug": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
+      "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
+      "dev": true,
+      "requires": {
+        "ms": "2.0.0"
+      }
+    },
+    "decode-uri-component": {
+      "version": "0.2.0",
+      "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz",
+      "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU="
+    },
+    "deep-eql": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz",
+      "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==",
+      "dev": true,
+      "requires": {
+        "type-detect": "4.0.8"
+      }
+    },
+    "deep-equal": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.0.1.tgz",
+      "integrity": "sha1-9dJgKStmDghO/0zbyfCK0yR0SLU=",
+      "dev": true
+    },
+    "deep-extend": {
+      "version": "0.6.0",
+      "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
+      "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
+      "dev": true
+    },
+    "deepmerge": {
+      "version": "2.2.1",
+      "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-2.2.1.tgz",
+      "integrity": "sha512-R9hc1Xa/NOBi9WRVUWg19rl1UB7Tt4kuPd+thNJgFZoxXsTz7ncaPaeIm+40oSGuP33DfMb4sZt1QIGiJzC4EA==",
+      "dev": true,
+      "optional": true
+    },
+    "defer-promise": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/defer-promise/-/defer-promise-1.0.1.tgz",
+      "integrity": "sha1-HKb/7dvO8XFd16riXHYW+a4iky8=",
+      "dev": true
+    },
+    "define-property": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+      "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+      "requires": {
+        "is-descriptor": "1.0.2",
+        "isobject": "3.0.1"
+      },
+      "dependencies": {
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "requires": {
+            "kind-of": "6.0.2"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "requires": {
+            "kind-of": "6.0.2"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "requires": {
+            "is-accessor-descriptor": "1.0.0",
+            "is-data-descriptor": "1.0.0",
+            "kind-of": "6.0.2"
+          }
+        }
+      }
+    },
+    "delegates": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz",
+      "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=",
+      "dev": true
+    },
+    "depd": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+      "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=",
+      "dev": true
+    },
+    "destroy": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
+      "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=",
+      "dev": true
+    },
+    "diff": {
+      "version": "3.5.0",
+      "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz",
+      "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==",
+      "dev": true
+    },
+    "ee-first": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+      "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=",
+      "dev": true
+    },
+    "error-inject": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/error-inject/-/error-inject-1.0.0.tgz",
+      "integrity": "sha1-4rPZG1Su1nLzCdlQ0VSFD6EdTzc=",
+      "dev": true
+    },
+    "escape-html": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+      "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=",
+      "dev": true
+    },
+    "escape-string-regexp": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+      "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
+      "dev": true
+    },
+    "esprima": {
+      "version": "4.0.1",
+      "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+      "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+      "dev": true
+    },
+    "estree-walker": {
+      "version": "0.6.0",
+      "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-0.6.0.tgz",
+      "integrity": "sha512-peq1RfVAVzr3PU/jL31RaOjUKLoZJpObQWJJ+LgfcxDUifyLZ1RjPQZTl0pzj2uJ45b7A7XpyppXvxdEqzo4rw=="
+    },
+    "esutils": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.2.tgz",
+      "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs=",
+      "dev": true
+    },
+    "etag": {
+      "version": "1.8.1",
+      "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+      "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=",
+      "dev": true
+    },
+    "expand-brackets": {
+      "version": "2.1.4",
+      "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz",
+      "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=",
+      "requires": {
+        "debug": "2.6.9",
+        "define-property": "0.2.5",
+        "extend-shallow": "2.0.1",
+        "posix-character-classes": "0.1.1",
+        "regex-not": "1.0.2",
+        "snapdragon": "0.8.2",
+        "to-regex": "3.0.2"
+      },
+      "dependencies": {
+        "debug": {
+          "version": "2.6.9",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+          "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+          "requires": {
+            "ms": "2.0.0"
+          }
+        },
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "requires": {
+            "is-descriptor": "0.1.6"
+          }
+        },
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "0.1.1"
+          }
+        }
+      }
+    },
+    "extend-shallow": {
+      "version": "3.0.2",
+      "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+      "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+      "requires": {
+        "assign-symbols": "1.0.0",
+        "is-extendable": "1.0.1"
+      },
+      "dependencies": {
+        "is-extendable": {
+          "version": "1.0.1",
+          "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+          "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+          "requires": {
+            "is-plain-object": "2.0.4"
+          }
+        }
+      }
+    },
+    "extglob": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz",
+      "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==",
+      "requires": {
+        "array-unique": "0.3.2",
+        "define-property": "1.0.0",
+        "expand-brackets": "2.1.4",
+        "extend-shallow": "2.0.1",
+        "fragment-cache": "0.2.1",
+        "regex-not": "1.0.2",
+        "snapdragon": "0.8.2",
+        "to-regex": "3.0.2"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+          "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+          "requires": {
+            "is-descriptor": "1.0.2"
+          }
+        },
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "0.1.1"
+          }
+        },
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "requires": {
+            "kind-of": "6.0.2"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "requires": {
+            "kind-of": "6.0.2"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "requires": {
+            "is-accessor-descriptor": "1.0.0",
+            "is-data-descriptor": "1.0.0",
+            "kind-of": "6.0.2"
+          }
+        }
+      }
+    },
+    "fill-range": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz",
+      "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=",
+      "requires": {
+        "extend-shallow": "2.0.1",
+        "is-number": "3.0.0",
+        "repeat-string": "1.6.1",
+        "to-regex-range": "2.1.1"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "0.1.1"
+          }
+        }
+      }
+    },
+    "find-replace": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/find-replace/-/find-replace-2.0.1.tgz",
+      "integrity": "sha512-LzDo3Fpa30FLIBsh6DCDnMN1KW2g4QKkqKmejlImgWY67dDFPX/x9Kh/op/GK522DchQXEvDi/wD48HKW49XOQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0",
+        "test-value": "3.0.0"
+      }
+    },
+    "for-in": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
+      "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA="
+    },
+    "fragment-cache": {
+      "version": "0.2.1",
+      "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
+      "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=",
+      "requires": {
+        "map-cache": "0.2.2"
+      }
+    },
+    "fresh": {
+      "version": "0.5.2",
+      "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+      "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=",
+      "dev": true
+    },
+    "fs-extra": {
+      "version": "7.0.1",
+      "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz",
+      "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==",
+      "requires": {
+        "graceful-fs": "4.1.15",
+        "jsonfile": "4.0.0",
+        "universalify": "0.1.2"
+      }
+    },
+    "fs.realpath": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+      "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=",
+      "dev": true
+    },
+    "get-func-name": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz",
+      "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=",
+      "dev": true
+    },
+    "get-value": {
+      "version": "2.0.6",
+      "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz",
+      "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg="
+    },
+    "glob": {
+      "version": "7.1.2",
+      "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz",
+      "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==",
+      "dev": true,
+      "requires": {
+        "fs.realpath": "1.0.0",
+        "inflight": "1.0.6",
+        "inherits": "2.0.3",
+        "minimatch": "3.0.4",
+        "once": "1.4.0",
+        "path-is-absolute": "1.0.1"
+      }
+    },
+    "graceful-fs": {
+      "version": "4.1.15",
+      "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.15.tgz",
+      "integrity": "sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA=="
+    },
+    "growl": {
+      "version": "1.10.5",
+      "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz",
+      "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==",
+      "dev": true
+    },
+    "has-ansi": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz",
+      "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=",
+      "dev": true,
+      "requires": {
+        "ansi-regex": "2.1.1"
+      }
+    },
+    "has-flag": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+      "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=",
+      "dev": true
+    },
+    "has-value": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz",
+      "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=",
+      "requires": {
+        "get-value": "2.0.6",
+        "has-values": "1.0.0",
+        "isobject": "3.0.1"
+      }
+    },
+    "has-values": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz",
+      "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=",
+      "requires": {
+        "is-number": "3.0.0",
+        "kind-of": "4.0.0"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "4.0.0",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz",
+          "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=",
+          "requires": {
+            "is-buffer": "1.1.6"
+          }
+        }
+      }
+    },
+    "he": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz",
+      "integrity": "sha1-k0EP0hsAlzUVH4howvJx80J+I/0=",
+      "dev": true
+    },
+    "http-assert": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/http-assert/-/http-assert-1.4.0.tgz",
+      "integrity": "sha512-tPVv62a6l3BbQoM/N5qo969l0OFxqpnQzNUPeYfTP6Spo4zkgWeDBD1D5thI7sDLg7jCCihXTLB0X8UtdyAy8A==",
+      "dev": true,
+      "requires": {
+        "deep-equal": "1.0.1",
+        "http-errors": "1.7.1"
+      }
+    },
+    "http-errors": {
+      "version": "1.7.1",
+      "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.1.tgz",
+      "integrity": "sha512-jWEUgtZWGSMba9I1N3gc1HmvpBUaNC9vDdA46yScAdp+C5rdEuKWUBLWTQpW9FwSWSbYYs++b6SDCxf9UEJzfw==",
+      "dev": true,
+      "requires": {
+        "depd": "1.1.2",
+        "inherits": "2.0.3",
+        "setprototypeof": "1.1.0",
+        "statuses": "1.5.0",
+        "toidentifier": "1.0.0"
+      }
+    },
+    "iconv-lite": {
+      "version": "0.4.23",
+      "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz",
+      "integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==",
+      "requires": {
+        "safer-buffer": "2.1.2"
+      }
+    },
+    "inflation": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/inflation/-/inflation-2.0.0.tgz",
+      "integrity": "sha1-i0F+R8KPklpFEz2RTKH9OJEH8w8=",
+      "dev": true
+    },
+    "inflight": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+      "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
+      "dev": true,
+      "requires": {
+        "once": "1.4.0",
+        "wrappy": "1.0.2"
+      }
+    },
+    "inherits": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+      "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=",
+      "dev": true
+    },
+    "is-accessor-descriptor": {
+      "version": "0.1.6",
+      "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+      "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+      "requires": {
+        "kind-of": "3.2.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "1.1.6"
+          }
+        }
+      }
+    },
+    "is-buffer": {
+      "version": "1.1.6",
+      "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
+      "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w=="
+    },
+    "is-data-descriptor": {
+      "version": "0.1.4",
+      "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+      "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+      "requires": {
+        "kind-of": "3.2.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "1.1.6"
+          }
+        }
+      }
+    },
+    "is-descriptor": {
+      "version": "0.1.6",
+      "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+      "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+      "requires": {
+        "is-accessor-descriptor": "0.1.6",
+        "is-data-descriptor": "0.1.4",
+        "kind-of": "5.1.0"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "5.1.0",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+          "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw=="
+        }
+      }
+    },
+    "is-extendable": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+      "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik="
+    },
+    "is-generator-function": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.7.tgz",
+      "integrity": "sha512-YZc5EwyO4f2kWCax7oegfuSr9mFz1ZvieNYBEjmukLxgXfBUbxAWGVF7GZf0zidYtoBl3WvC07YK0wT76a+Rtw==",
+      "dev": true
+    },
+    "is-module": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz",
+      "integrity": "sha1-Mlj7afeMFNW4FdZkM2tM/7ZEFZE="
+    },
+    "is-number": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+      "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+      "requires": {
+        "kind-of": "3.2.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "1.1.6"
+          }
+        }
+      }
+    },
+    "is-plain-object": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+      "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+      "requires": {
+        "isobject": "3.0.1"
+      }
+    },
+    "is-windows": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+      "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA=="
+    },
+    "is-wsl": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz",
+      "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=",
+      "dev": true
+    },
+    "isarray": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+      "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
+    },
+    "isobject": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+      "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8="
+    },
+    "js-tokens": {
+      "version": "3.0.2",
+      "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz",
+      "integrity": "sha1-mGbfOVECEw449/mWvOtlRDIJwls=",
+      "dev": true
+    },
+    "js-yaml": {
+      "version": "3.13.1",
+      "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.13.1.tgz",
+      "integrity": "sha512-YfbcO7jXDdyj0DGxYVSlSeQNHbD7XPWvrVWeVUujrQEoZzWJIRrCPoyk6kL6IAjAG2IolMK4T0hNUe0HOUs5Jw==",
+      "dev": true,
+      "requires": {
+        "argparse": "1.0.10",
+        "esprima": "4.0.1"
+      }
+    },
+    "json-stringify-safe": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
+      "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=",
+      "dev": true
+    },
+    "json5": {
+      "version": "1.0.1",
+      "resolved": "http://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
+      "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
+      "dev": true,
+      "optional": true,
+      "requires": {
+        "minimist": "1.2.0"
+      }
+    },
+    "jsonfile": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
+      "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
+      "requires": {
+        "graceful-fs": "4.1.15"
+      }
+    },
+    "jsonparse": {
+      "version": "1.3.1",
+      "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz",
+      "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=",
+      "dev": true
+    },
+    "keygrip": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/keygrip/-/keygrip-1.0.3.tgz",
+      "integrity": "sha512-/PpesirAIfaklxUzp4Yb7xBper9MwP6hNRA6BGGUFCgbJ+BM5CKBtsoxinNXkLHAr+GXS1/lSlF2rP7cv5Fl+g==",
+      "dev": true
+    },
+    "kind-of": {
+      "version": "6.0.2",
+      "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz",
+      "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA=="
+    },
+    "koa": {
+      "version": "2.6.1",
+      "resolved": "https://registry.npmjs.org/koa/-/koa-2.6.1.tgz",
+      "integrity": "sha512-n9R5Eex4y0drUeqFTeCIeXyz8wjr2AxBo2Cq8LvmiXbJl4yDA5KIrecMPkhnmgACZnPXMRyCLbJoyLmpM9aFAw==",
+      "dev": true,
+      "requires": {
+        "accepts": "1.3.5",
+        "cache-content-type": "1.0.1",
+        "content-disposition": "0.5.2",
+        "content-type": "1.0.4",
+        "cookies": "0.7.2",
+        "debug": "3.1.0",
+        "delegates": "1.0.0",
+        "depd": "1.1.2",
+        "destroy": "1.0.4",
+        "error-inject": "1.0.0",
+        "escape-html": "1.0.3",
+        "fresh": "0.5.2",
+        "http-assert": "1.4.0",
+        "http-errors": "1.7.1",
+        "is-generator-function": "1.0.7",
+        "koa-compose": "4.1.0",
+        "koa-convert": "1.2.0",
+        "koa-is-json": "1.0.0",
+        "on-finished": "2.3.0",
+        "only": "0.0.2",
+        "parseurl": "1.3.2",
+        "statuses": "1.5.0",
+        "type-is": "1.6.16",
+        "vary": "1.1.2"
+      }
+    },
+    "koa-bodyparser": {
+      "version": "4.2.1",
+      "resolved": "https://registry.npmjs.org/koa-bodyparser/-/koa-bodyparser-4.2.1.tgz",
+      "integrity": "sha512-UIjPAlMZfNYDDe+4zBaOAUKYqkwAGcIU6r2ARf1UOXPAlfennQys5IiShaVeNf7KkVBlf88f2LeLvBFvKylttw==",
+      "dev": true,
+      "requires": {
+        "co-body": "6.0.0",
+        "copy-to": "2.0.1"
+      }
+    },
+    "koa-compose": {
+      "version": "4.1.0",
+      "resolved": "https://registry.npmjs.org/koa-compose/-/koa-compose-4.1.0.tgz",
+      "integrity": "sha512-8ODW8TrDuMYvXRwra/Kh7/rJo9BtOfPc6qO8eAfC80CnCvSjSl0bkRM24X6/XBBEyj0v1nRUQ1LyOy3dbqOWXw==",
+      "dev": true
+    },
+    "koa-compress": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/koa-compress/-/koa-compress-2.0.0.tgz",
+      "integrity": "sha1-e36ykhuEd0a14SK6n1zYpnHo6jo=",
+      "dev": true,
+      "requires": {
+        "bytes": "2.5.0",
+        "compressible": "2.0.15",
+        "koa-is-json": "1.0.0",
+        "statuses": "1.5.0"
+      },
+      "dependencies": {
+        "bytes": {
+          "version": "2.5.0",
+          "resolved": "https://registry.npmjs.org/bytes/-/bytes-2.5.0.tgz",
+          "integrity": "sha1-TJQj6i0lLCcMQbK97+/5u2tiwGo=",
+          "dev": true
+        }
+      }
+    },
+    "koa-conditional-get": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/koa-conditional-get/-/koa-conditional-get-2.0.0.tgz",
+      "integrity": "sha1-pD83I8HQFLcwo07Oit8wuTyCM/I=",
+      "dev": true
+    },
+    "koa-convert": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/koa-convert/-/koa-convert-1.2.0.tgz",
+      "integrity": "sha1-2kCHXfSd4FOQmNFwC1CCDOvNIdA=",
+      "dev": true,
+      "requires": {
+        "co": "4.6.0",
+        "koa-compose": "3.2.1"
+      },
+      "dependencies": {
+        "koa-compose": {
+          "version": "3.2.1",
+          "resolved": "https://registry.npmjs.org/koa-compose/-/koa-compose-3.2.1.tgz",
+          "integrity": "sha1-qFzLQLfZhtjlo0Wzoazo6rz1Tec=",
+          "dev": true,
+          "requires": {
+            "any-promise": "1.3.0"
+          }
+        }
+      }
+    },
+    "koa-etag": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/koa-etag/-/koa-etag-3.0.0.tgz",
+      "integrity": "sha1-nvc4Ld1agqsN6xU0FckVg293HT8=",
+      "dev": true,
+      "requires": {
+        "etag": "1.8.1",
+        "mz": "2.7.0"
+      }
+    },
+    "koa-is-json": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/koa-is-json/-/koa-is-json-1.0.0.tgz",
+      "integrity": "sha1-JzwH7c3Ljfaiwat9We52SRRR7BQ=",
+      "dev": true
+    },
+    "koa-json": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/koa-json/-/koa-json-2.0.2.tgz",
+      "integrity": "sha1-Nq8U5uofXWRtfESihXAcb4Wk/eQ=",
+      "dev": true,
+      "requires": {
+        "koa-is-json": "1.0.0",
+        "streaming-json-stringify": "3.1.0"
+      }
+    },
+    "koa-mock-response": {
+      "version": "0.2.0",
+      "resolved": "https://registry.npmjs.org/koa-mock-response/-/koa-mock-response-0.2.0.tgz",
+      "integrity": "sha512-HmybRN1a3WqcSFvf7tycu2YhBIEHeqzm8bwcsShNWGsTgP86coZOpdI8aqYm/1DFsAQMctnpdWrva4rDr1Pibg==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0",
+        "path-to-regexp": "1.7.0",
+        "typical": "2.6.1"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "0.0.1",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+          "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+          "dev": true
+        },
+        "path-to-regexp": {
+          "version": "1.7.0",
+          "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+          "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+          "dev": true,
+          "requires": {
+            "isarray": "0.0.1"
+          }
+        },
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "koa-morgan": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/koa-morgan/-/koa-morgan-1.0.1.tgz",
+      "integrity": "sha1-CAUuDODYOdPEMXi5CluzQkvvH5k=",
+      "dev": true,
+      "requires": {
+        "morgan": "1.9.1"
+      }
+    },
+    "koa-range": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/koa-range/-/koa-range-0.3.0.tgz",
+      "integrity": "sha1-NYjjSWRzqDmhvSZNKkKx2FvX/qw=",
+      "dev": true,
+      "requires": {
+        "stream-slice": "0.1.2"
+      }
+    },
+    "koa-rewrite-75lb": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/koa-rewrite-75lb/-/koa-rewrite-75lb-2.1.1.tgz",
+      "integrity": "sha512-i9ofDKLs0xNCb2PW7wKGFzBFX6+Ce3aKoZzNKPh0fkejeUOTWkkDqnjXrgqrJEP2ifX6WWsHp6VtGuXzSYLSWQ==",
+      "dev": true,
+      "requires": {
+        "path-to-regexp": "1.7.0"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "0.0.1",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+          "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+          "dev": true
+        },
+        "path-to-regexp": {
+          "version": "1.7.0",
+          "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+          "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+          "dev": true,
+          "requires": {
+            "isarray": "0.0.1"
+          }
+        }
+      }
+    },
+    "koa-route": {
+      "version": "3.2.0",
+      "resolved": "https://registry.npmjs.org/koa-route/-/koa-route-3.2.0.tgz",
+      "integrity": "sha1-dimLmaa8+p44yrb+XHmocz51i84=",
+      "dev": true,
+      "requires": {
+        "debug": "3.1.0",
+        "methods": "1.1.2",
+        "path-to-regexp": "1.7.0"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "0.0.1",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+          "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+          "dev": true
+        },
+        "path-to-regexp": {
+          "version": "1.7.0",
+          "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+          "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+          "dev": true,
+          "requires": {
+            "isarray": "0.0.1"
+          }
+        }
+      }
+    },
+    "koa-send": {
+      "version": "4.1.3",
+      "resolved": "http://registry.npmjs.org/koa-send/-/koa-send-4.1.3.tgz",
+      "integrity": "sha512-3UetMBdaXSiw24qM2Mx5mKmxLKw5ZTPRjACjfhK6Haca55RKm9hr/uHDrkrxhSl5/S1CKI/RivZVIopiatZuTA==",
+      "dev": true,
+      "requires": {
+        "debug": "2.6.9",
+        "http-errors": "1.7.1",
+        "mz": "2.7.0",
+        "resolve-path": "1.4.0"
+      },
+      "dependencies": {
+        "debug": {
+          "version": "2.6.9",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+          "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+          "dev": true,
+          "requires": {
+            "ms": "2.0.0"
+          }
+        }
+      }
+    },
+    "koa-static": {
+      "version": "4.0.3",
+      "resolved": "https://registry.npmjs.org/koa-static/-/koa-static-4.0.3.tgz",
+      "integrity": "sha512-JGmxTuPWy4bH7bt6gD/OMWkhprawvRmzJSr8TWKmTL4N7+IMv3s0SedeQi5S4ilxM9Bo6ptkCyXj/7wf+VS5tg==",
+      "dev": true,
+      "requires": {
+        "debug": "3.1.0",
+        "koa-send": "4.1.3"
+      }
+    },
+    "load-module": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/load-module/-/load-module-1.0.0.tgz",
+      "integrity": "sha512-FmoAJI/RM4vmvIRk65g/SFCnGQC9BbALY3zy38Z0cMllNnra1+iCdxAf051LVymzE60/FweOo9or9XJiGgFshg==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0"
+      }
+    },
+    "local-web-server": {
+      "version": "2.6.0",
+      "resolved": "https://registry.npmjs.org/local-web-server/-/local-web-server-2.6.0.tgz",
+      "integrity": "sha512-m7Z5zlzZFxMyiK1W8xR5TJMh00Fy9z7Po8vilSQCpeU4LG2VMK667xCkASBUepFR9fPj6heUMBHu9P/TrwDqFw==",
+      "dev": true,
+      "requires": {
+        "lws": "1.3.0",
+        "lws-basic-auth": "0.1.1",
+        "lws-blacklist": "0.3.0",
+        "lws-body-parser": "0.2.4",
+        "lws-compress": "0.2.1",
+        "lws-conditional-get": "0.3.4",
+        "lws-cors": "1.0.0",
+        "lws-index": "0.4.0",
+        "lws-json": "0.3.2",
+        "lws-log": "0.3.2",
+        "lws-mime": "0.2.2",
+        "lws-mock-response": "0.5.1",
+        "lws-range": "1.1.0",
+        "lws-request-monitor": "0.1.5",
+        "lws-rewrite": "0.4.1",
+        "lws-spa": "0.3.0",
+        "lws-static": "0.5.0",
+        "node-version-matches": "1.0.0"
+      }
+    },
+    "lodash.assignwith": {
+      "version": "4.2.0",
+      "resolved": "https://registry.npmjs.org/lodash.assignwith/-/lodash.assignwith-4.2.0.tgz",
+      "integrity": "sha1-EnqX8CrcQXUalU0ksN4X4QDgOOs=",
+      "dev": true
+    },
+    "lodash.camelcase": {
+      "version": "4.3.0",
+      "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz",
+      "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=",
+      "dev": true
+    },
+    "lodash.padend": {
+      "version": "4.6.1",
+      "resolved": "https://registry.npmjs.org/lodash.padend/-/lodash.padend-4.6.1.tgz",
+      "integrity": "sha1-U8y6BH0G4VjTEfRdpiX05J5vFm4=",
+      "dev": true
+    },
+    "lodash.pick": {
+      "version": "4.4.0",
+      "resolved": "https://registry.npmjs.org/lodash.pick/-/lodash.pick-4.4.0.tgz",
+      "integrity": "sha1-UvBWEP/53tQiYRRB7R/BI6AwAbM=",
+      "dev": true
+    },
+    "lodash.throttle": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz",
+      "integrity": "sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ=",
+      "dev": true
+    },
+    "lws": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/lws/-/lws-1.3.0.tgz",
+      "integrity": "sha512-2gOJzVtgjg4mA1cyWnzkICR/NLuMD24sbRSwQeVZeVkadp0VOKTlpmnjvA1tQpkb1TGrcOS+N+3vKMJST8tt2w==",
+      "dev": true,
+      "requires": {
+        "ansi-escape-sequences": "4.0.0",
+        "array-back": "2.0.0",
+        "byte-size": "4.0.4",
+        "cli-commands": "0.4.0",
+        "command-line-args": "5.0.2",
+        "command-line-usage": "5.0.5",
+        "koa": "2.6.1",
+        "load-module": "1.0.0",
+        "lodash.assignwith": "4.2.0",
+        "node-version-matches": "1.0.0",
+        "opn": "5.4.0",
+        "reduce-flatten": "2.0.0",
+        "typical": "3.0.0",
+        "walk-back": "3.0.0",
+        "ws": "5.2.2"
+      }
+    },
+    "lws-basic-auth": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/lws-basic-auth/-/lws-basic-auth-0.1.1.tgz",
+      "integrity": "sha512-npPpqkOFzJzB9yJ2pGXmiYOswH+0n86ro75WhromeGuNo0GfE18ZLI/VCOVWmBbeXp2pcnPIMUAdkNSgukpAww==",
+      "dev": true,
+      "requires": {
+        "basic-auth": "1.1.0"
+      }
+    },
+    "lws-blacklist": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/lws-blacklist/-/lws-blacklist-0.3.0.tgz",
+      "integrity": "sha512-ZA8dujYaZwRNMBhgP+oGsZi9tum44Ba6VHsA3JrV1JVrjZ8c65kLaO/41rLBqQDKP3SDPu7dLity4YLwe1FuNQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0",
+        "path-to-regexp": "2.4.0"
+      }
+    },
+    "lws-body-parser": {
+      "version": "0.2.4",
+      "resolved": "https://registry.npmjs.org/lws-body-parser/-/lws-body-parser-0.2.4.tgz",
+      "integrity": "sha512-XKJzbzK97TUsewIPA5J2RpEk7kRoJcL+/Du6JlwzqIq84tWuXMfiT2a4Ncj12+tRWrdY2avV6d8uLhqlHLz1yg==",
+      "dev": true,
+      "requires": {
+        "koa-bodyparser": "4.2.1"
+      }
+    },
+    "lws-compress": {
+      "version": "0.2.1",
+      "resolved": "https://registry.npmjs.org/lws-compress/-/lws-compress-0.2.1.tgz",
+      "integrity": "sha512-14++1o6U8upi3DLx9J2O2sFELsijEJF9utoFxSH4Stoo9SdU2Cxw6BtqQTrb9SEA6O6IsApzstdMYnq8floLSg==",
+      "dev": true,
+      "requires": {
+        "koa-compress": "2.0.0"
+      }
+    },
+    "lws-conditional-get": {
+      "version": "0.3.4",
+      "resolved": "https://registry.npmjs.org/lws-conditional-get/-/lws-conditional-get-0.3.4.tgz",
+      "integrity": "sha512-6asZSfM747snhdz4xexRllm09pebz8pjYeg2d5khLR53D/OJznZWHsIqW0JGiScJObri2D7+H4z7yRLBjokT7g==",
+      "dev": true,
+      "requires": {
+        "koa-conditional-get": "2.0.0",
+        "koa-etag": "3.0.0"
+      }
+    },
+    "lws-cors": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/lws-cors/-/lws-cors-1.0.0.tgz",
+      "integrity": "sha512-4C0m4lvYdAnpAa03tr9AqziB4d8SRPh4beQBuzPiefv7N9/tpVdrl9kgXrUe1hLHhISnVJ5MoOZuZ6wFeMiU4g==",
+      "dev": true,
+      "requires": {
+        "@koa/cors": "2.2.2"
+      }
+    },
+    "lws-index": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/lws-index/-/lws-index-0.4.0.tgz",
+      "integrity": "sha512-k+mkqgMSzx1ipzVpaxsAJU4Qe7R1kp1B/u+qC+d1Y3l+auBz+bLcIxL4dYKfaxLqiz0IFwg1dZwGzVm/dd7FFw==",
+      "dev": true,
+      "requires": {
+        "serve-index-75lb": "2.0.1"
+      }
+    },
+    "lws-json": {
+      "version": "0.3.2",
+      "resolved": "https://registry.npmjs.org/lws-json/-/lws-json-0.3.2.tgz",
+      "integrity": "sha512-ElmCA8hi3GPMfxbtiI015PDHuJovhhcbXX/qTTTifXhopedAzIBzn/rF5dHZHE4k7HQDYfbiaPgPMbmpv9dMvQ==",
+      "dev": true,
+      "requires": {
+        "koa-json": "2.0.2"
+      }
+    },
+    "lws-log": {
+      "version": "0.3.2",
+      "resolved": "https://registry.npmjs.org/lws-log/-/lws-log-0.3.2.tgz",
+      "integrity": "sha512-DRp4bFl4a7hjwR/RjARjhFLEXs8pIeqKbUvojaAl1hhfRBuW2JsDxRSKC+ViQN06CW4Qypg3ZsztMMR8dRO8dA==",
+      "dev": true,
+      "requires": {
+        "koa-morgan": "1.0.1",
+        "stream-log-stats": "2.0.2"
+      }
+    },
+    "lws-mime": {
+      "version": "0.2.2",
+      "resolved": "https://registry.npmjs.org/lws-mime/-/lws-mime-0.2.2.tgz",
+      "integrity": "sha512-cWBj9CuuSvvaqdYMPiXRid0QhzJmr+5gWAA96pEDOiW8tMCMoxl7CIgTpHXZwhJzCqdI84RZDVm+FswByATS5w==",
+      "dev": true
+    },
+    "lws-mock-response": {
+      "version": "0.5.1",
+      "resolved": "https://registry.npmjs.org/lws-mock-response/-/lws-mock-response-0.5.1.tgz",
+      "integrity": "sha512-4R5Q1RmRglC0pqEwywrS5g62aKaLQsteMnShGmWU9aQ/737Bq0/3qbQ3mb8VbMk3lLzo3ZaNZ1DUsPgVvZaXNQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0",
+        "koa-mock-response": "0.2.0",
+        "load-module": "1.0.0",
+        "reduce-flatten": "2.0.0"
+      }
+    },
+    "lws-range": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/lws-range/-/lws-range-1.1.0.tgz",
+      "integrity": "sha512-Mpx6FdO58Z4l6DAXlATsC2zm10QvyGYElQvFd7P1xqUSTPoYG0wAxfjlpqI+Qdb2O7W4Ah21yESVnPEwae3SIw==",
+      "dev": true,
+      "requires": {
+        "koa-range": "0.3.0"
+      }
+    },
+    "lws-request-monitor": {
+      "version": "0.1.5",
+      "resolved": "https://registry.npmjs.org/lws-request-monitor/-/lws-request-monitor-0.1.5.tgz",
+      "integrity": "sha512-u9eczHPowH17ftUjQ8ysutGDADNZdDD6k8wgFMzOB7/rRq1Is12lTYA4u8pfKZ8C2oyoy+HYsDSrOzTwespTlA==",
+      "dev": true,
+      "requires": {
+        "byte-size": "4.0.4"
+      }
+    },
+    "lws-rewrite": {
+      "version": "0.4.1",
+      "resolved": "https://registry.npmjs.org/lws-rewrite/-/lws-rewrite-0.4.1.tgz",
+      "integrity": "sha512-EHUdbqfdwc4Baa7iXOdG2y815WC040Cing1GwhM9VsBL7lHtZ7zl3EHzjWFv3styoO3qNqZ4W0xCey4hoo/aYg==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0",
+        "koa-rewrite-75lb": "2.1.1",
+        "koa-route": "3.2.0",
+        "path-to-regexp": "1.7.0",
+        "req-then": "0.6.4",
+        "stream-read-all": "0.1.2",
+        "typical": "2.6.1"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "0.0.1",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+          "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+          "dev": true
+        },
+        "path-to-regexp": {
+          "version": "1.7.0",
+          "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+          "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+          "dev": true,
+          "requires": {
+            "isarray": "0.0.1"
+          }
+        },
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "lws-spa": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/lws-spa/-/lws-spa-0.3.0.tgz",
+      "integrity": "sha512-8wxZl5dOI/CQsJ6oOG8Y7B4khjlQXfB7GlVkjYFPuOYM+JIw/QzMvezKjKweG0qGePmHJVHWa38+CyololV4aw==",
+      "dev": true,
+      "requires": {
+        "koa-route": "3.2.0",
+        "koa-send": "4.1.3"
+      }
+    },
+    "lws-static": {
+      "version": "0.5.0",
+      "resolved": "https://registry.npmjs.org/lws-static/-/lws-static-0.5.0.tgz",
+      "integrity": "sha512-r3QIeJfBox/hSJLSL7TPhNSZsTKE0r4mWYHbGZ+DwrBcKbLt1ljsh5NAtmJpsqCcjYpyOuD/DlsZ0yQY9VI8bA==",
+      "dev": true,
+      "requires": {
+        "koa-static": "4.0.3"
+      }
+    },
+    "make-error": {
+      "version": "1.3.5",
+      "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz",
+      "integrity": "sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g==",
+      "dev": true
+    },
+    "map-cache": {
+      "version": "0.2.2",
+      "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz",
+      "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8="
+    },
+    "map-visit": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz",
+      "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=",
+      "requires": {
+        "object-visit": "1.0.1"
+      }
+    },
+    "media-typer": {
+      "version": "0.3.0",
+      "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+      "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=",
+      "dev": true
+    },
+    "methods": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+      "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=",
+      "dev": true
+    },
+    "micromatch": {
+      "version": "3.1.10",
+      "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
+      "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==",
+      "requires": {
+        "arr-diff": "4.0.0",
+        "array-unique": "0.3.2",
+        "braces": "2.3.2",
+        "define-property": "2.0.2",
+        "extend-shallow": "3.0.2",
+        "extglob": "2.0.4",
+        "fragment-cache": "0.2.1",
+        "kind-of": "6.0.2",
+        "nanomatch": "1.2.13",
+        "object.pick": "1.3.0",
+        "regex-not": "1.0.2",
+        "snapdragon": "0.8.2",
+        "to-regex": "3.0.2"
+      }
+    },
+    "mime-db": {
+      "version": "1.37.0",
+      "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.37.0.tgz",
+      "integrity": "sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg==",
+      "dev": true
+    },
+    "mime-types": {
+      "version": "2.1.21",
+      "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.21.tgz",
+      "integrity": "sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg==",
+      "dev": true,
+      "requires": {
+        "mime-db": "1.37.0"
+      }
+    },
+    "minimatch": {
+      "version": "3.0.4",
+      "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
+      "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
+      "dev": true,
+      "requires": {
+        "brace-expansion": "1.1.11"
+      }
+    },
+    "minimist": {
+      "version": "1.2.0",
+      "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz",
+      "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=",
+      "dev": true
+    },
+    "mixin-deep": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.2.tgz",
+      "integrity": "sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==",
+      "requires": {
+        "for-in": "1.0.2",
+        "is-extendable": "1.0.1"
+      },
+      "dependencies": {
+        "is-extendable": {
+          "version": "1.0.1",
+          "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+          "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+          "requires": {
+            "is-plain-object": "2.0.4"
+          }
+        }
+      }
+    },
+    "mkdirp": {
+      "version": "0.5.1",
+      "resolved": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz",
+      "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=",
+      "dev": true,
+      "requires": {
+        "minimist": "0.0.8"
+      },
+      "dependencies": {
+        "minimist": {
+          "version": "0.0.8",
+          "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz",
+          "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=",
+          "dev": true
+        }
+      }
+    },
+    "mocha": {
+      "version": "5.2.0",
+      "resolved": "https://registry.npmjs.org/mocha/-/mocha-5.2.0.tgz",
+      "integrity": "sha512-2IUgKDhc3J7Uug+FxMXuqIyYzH7gJjXECKe/w43IGgQHTSj3InJi+yAA7T24L9bQMRKiUEHxEX37G5JpVUGLcQ==",
+      "dev": true,
+      "requires": {
+        "browser-stdout": "1.3.1",
+        "commander": "2.15.1",
+        "debug": "3.1.0",
+        "diff": "3.5.0",
+        "escape-string-regexp": "1.0.5",
+        "glob": "7.1.2",
+        "growl": "1.10.5",
+        "he": "1.1.1",
+        "minimatch": "3.0.4",
+        "mkdirp": "0.5.1",
+        "supports-color": "5.4.0"
+      }
+    },
+    "morgan": {
+      "version": "1.9.1",
+      "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.9.1.tgz",
+      "integrity": "sha512-HQStPIV4y3afTiCYVxirakhlCfGkI161c76kKFca7Fk1JusM//Qeo1ej2XaMniiNeaZklMVrh3vTtIzpzwbpmA==",
+      "dev": true,
+      "requires": {
+        "basic-auth": "2.0.1",
+        "debug": "2.6.9",
+        "depd": "1.1.2",
+        "on-finished": "2.3.0",
+        "on-headers": "1.0.1"
+      },
+      "dependencies": {
+        "basic-auth": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz",
+          "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==",
+          "dev": true,
+          "requires": {
+            "safe-buffer": "5.1.2"
+          }
+        },
+        "debug": {
+          "version": "2.6.9",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+          "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+          "dev": true,
+          "requires": {
+            "ms": "2.0.0"
+          }
+        }
+      }
+    },
+    "ms": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+      "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
+    },
+    "mz": {
+      "version": "2.7.0",
+      "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
+      "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
+      "dev": true,
+      "requires": {
+        "any-promise": "1.3.0",
+        "object-assign": "4.1.1",
+        "thenify-all": "1.6.0"
+      }
+    },
+    "nanomatch": {
+      "version": "1.2.13",
+      "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
+      "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==",
+      "requires": {
+        "arr-diff": "4.0.0",
+        "array-unique": "0.3.2",
+        "define-property": "2.0.2",
+        "extend-shallow": "3.0.2",
+        "fragment-cache": "0.2.1",
+        "is-windows": "1.0.2",
+        "kind-of": "6.0.2",
+        "object.pick": "1.3.0",
+        "regex-not": "1.0.2",
+        "snapdragon": "0.8.2",
+        "to-regex": "3.0.2"
+      }
+    },
+    "negotiator": {
+      "version": "0.6.1",
+      "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
+      "integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk=",
+      "dev": true
+    },
+    "node-version-matches": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/node-version-matches/-/node-version-matches-1.0.0.tgz",
+      "integrity": "sha512-E1OQnAUB+BvEyNTXTWpUUMAWXYCa7yjiS64djOuTJEkm20yaQfNmWTfx/kvN6nC7fc0GQS182IaefOPxQvpxXg==",
+      "dev": true,
+      "requires": {
+        "semver": "5.5.0"
+      }
+    },
+    "object-assign": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+      "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=",
+      "dev": true
+    },
+    "object-copy": {
+      "version": "0.1.0",
+      "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz",
+      "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=",
+      "requires": {
+        "copy-descriptor": "0.1.1",
+        "define-property": "0.2.5",
+        "kind-of": "3.2.2"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "requires": {
+            "is-descriptor": "0.1.6"
+          }
+        },
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "1.1.6"
+          }
+        }
+      }
+    },
+    "object-visit": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz",
+      "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=",
+      "requires": {
+        "isobject": "3.0.1"
+      }
+    },
+    "object.pick": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz",
+      "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=",
+      "requires": {
+        "isobject": "3.0.1"
+      }
+    },
+    "on-finished": {
+      "version": "2.3.0",
+      "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
+      "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+      "dev": true,
+      "requires": {
+        "ee-first": "1.1.1"
+      }
+    },
+    "on-headers": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.1.tgz",
+      "integrity": "sha1-ko9dD0cNSTQmUepnlLCFfBAGk/c=",
+      "dev": true
+    },
+    "once": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+      "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+      "dev": true,
+      "requires": {
+        "wrappy": "1.0.2"
+      }
+    },
+    "only": {
+      "version": "0.0.2",
+      "resolved": "https://registry.npmjs.org/only/-/only-0.0.2.tgz",
+      "integrity": "sha1-Kv3oTQPlC5qO3EROMGEKcCle37Q=",
+      "dev": true
+    },
+    "opn": {
+      "version": "5.4.0",
+      "resolved": "https://registry.npmjs.org/opn/-/opn-5.4.0.tgz",
+      "integrity": "sha512-YF9MNdVy/0qvJvDtunAOzFw9iasOQHpVthTCvGzxt61Il64AYSGdK+rYwld7NAfk9qJ7dt+hymBNSc9LNYS+Sw==",
+      "dev": true,
+      "requires": {
+        "is-wsl": "1.1.0"
+      }
+    },
+    "parseurl": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz",
+      "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M=",
+      "dev": true
+    },
+    "pascalcase": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz",
+      "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ="
+    },
+    "path-is-absolute": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+      "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
+      "dev": true
+    },
+    "path-parse": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.5.tgz",
+      "integrity": "sha1-PBrfhx6pzWyUMbbqK9dKD/BVxME="
+    },
+    "path-to-regexp": {
+      "version": "2.4.0",
+      "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.4.0.tgz",
+      "integrity": "sha512-G6zHoVqC6GGTQkZwF4lkuEyMbVOjoBKAEybQUypI1WTkqinCOrq2x6U2+phkJ1XsEMTy4LjtwPI7HW+NVrRR2w==",
+      "dev": true
+    },
+    "pathval": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.0.tgz",
+      "integrity": "sha1-uULm1L3mUwBe9rcTYd74cn0GReA=",
+      "dev": true
+    },
+    "posix-character-classes": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz",
+      "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs="
+    },
+    "process-nextick-args": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz",
+      "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==",
+      "dev": true
+    },
+    "qs": {
+      "version": "6.5.2",
+      "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
+      "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==",
+      "dev": true
+    },
+    "raw-body": {
+      "version": "2.3.3",
+      "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz",
+      "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==",
+      "dev": true,
+      "requires": {
+        "bytes": "3.0.0",
+        "http-errors": "1.6.3",
+        "iconv-lite": "0.4.23",
+        "unpipe": "1.0.0"
+      },
+      "dependencies": {
+        "http-errors": {
+          "version": "1.6.3",
+          "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+          "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+          "dev": true,
+          "requires": {
+            "depd": "1.1.2",
+            "inherits": "2.0.3",
+            "setprototypeof": "1.1.0",
+            "statuses": "1.5.0"
+          }
+        }
+      }
+    },
+    "readable-stream": {
+      "version": "2.3.6",
+      "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
+      "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
+      "dev": true,
+      "requires": {
+        "core-util-is": "1.0.2",
+        "inherits": "2.0.3",
+        "isarray": "1.0.0",
+        "process-nextick-args": "2.0.0",
+        "safe-buffer": "5.1.2",
+        "string_decoder": "1.1.1",
+        "util-deprecate": "1.0.2"
+      }
+    },
+    "reduce-flatten": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/reduce-flatten/-/reduce-flatten-2.0.0.tgz",
+      "integrity": "sha512-EJ4UNY/U1t2P/2k6oqotuX2Cc3T6nxJwsM0N0asT7dhrtH1ltUxDn4NalSYmPE2rCkVpcf/X6R0wDwcFpzhd4w==",
+      "dev": true
+    },
+    "regex-not": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz",
+      "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==",
+      "requires": {
+        "extend-shallow": "3.0.2",
+        "safe-regex": "1.1.0"
+      }
+    },
+    "repeat-element": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz",
+      "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g=="
+    },
+    "repeat-string": {
+      "version": "1.6.1",
+      "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+      "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc="
+    },
+    "req-then": {
+      "version": "0.6.4",
+      "resolved": "https://registry.npmjs.org/req-then/-/req-then-0.6.4.tgz",
+      "integrity": "sha512-Uf7xsK1qPqPUetESHemNQ7nGtgOxngSFtlcAOOkx0lDAo+XRZpEA9QDrGBdyOfGq4b+a0z/D5gR2VJ+pp/dzBA==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0",
+        "defer-promise": "1.0.1",
+        "lodash.pick": "4.4.0",
+        "stream-read-all": "0.1.2",
+        "typical": "2.6.1"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "resolve": {
+      "version": "1.8.1",
+      "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.8.1.tgz",
+      "integrity": "sha512-AicPrAC7Qu1JxPCZ9ZgCZlY35QgFnNqc+0LtbRNxnVw4TXvjQ72wnuL9JQcEBgXkI9JM8MsT9kaQoHcpCRJOYA==",
+      "requires": {
+        "path-parse": "1.0.5"
+      }
+    },
+    "resolve-path": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/resolve-path/-/resolve-path-1.4.0.tgz",
+      "integrity": "sha1-xL2p9e+y/OZSR4c6s2u02DT+Fvc=",
+      "dev": true,
+      "requires": {
+        "http-errors": "1.6.3",
+        "path-is-absolute": "1.0.1"
+      },
+      "dependencies": {
+        "http-errors": {
+          "version": "1.6.3",
+          "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+          "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+          "dev": true,
+          "requires": {
+            "depd": "1.1.2",
+            "inherits": "2.0.3",
+            "setprototypeof": "1.1.0",
+            "statuses": "1.5.0"
+          }
+        }
+      }
+    },
+    "resolve-url": {
+      "version": "0.2.1",
+      "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz",
+      "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo="
+    },
+    "ret": {
+      "version": "0.1.15",
+      "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz",
+      "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg=="
+    },
+    "rollup": {
+      "version": "0.68.2",
+      "resolved": "https://registry.npmjs.org/rollup/-/rollup-0.68.2.tgz",
+      "integrity": "sha512-WgjNCXYv7ZbtStIap1+tz4pd2zwz0XYN//OILwEY6dINIFLVizK1iWdu+ZtUURL/OKnp8Lv2w8FBds8YihzX7Q==",
+      "dev": true,
+      "requires": {
+        "@types/estree": "0.0.39",
+        "@types/node": "12.7.12"
+      }
+    },
+    "rollup-plugin-node-resolve": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/rollup-plugin-node-resolve/-/rollup-plugin-node-resolve-4.0.0.tgz",
+      "integrity": "sha512-7Ni+/M5RPSUBfUaP9alwYQiIKnKeXCOHiqBpKUl9kwp3jX5ZJtgXAait1cne6pGEVUUztPD6skIKH9Kq9sNtfw==",
+      "requires": {
+        "builtin-modules": "3.0.0",
+        "is-module": "1.0.0",
+        "resolve": "1.8.1"
+      }
+    },
+    "rollup-plugin-typescript2": {
+      "version": "0.20.1",
+      "resolved": "https://registry.npmjs.org/rollup-plugin-typescript2/-/rollup-plugin-typescript2-0.20.1.tgz",
+      "integrity": "sha512-uxA5JQNOfmJ9rsO0yJKTObb1t4nNYUexCg9zxhEKF+NzZwljYWdfgrA06UzA24cOk8fQjGEe7Q5+Vge2vFlnnw==",
+      "requires": {
+        "fs-extra": "7.0.1",
+        "resolve": "1.10.0",
+        "rollup-pluginutils": "2.4.1",
+        "tslib": "1.9.3"
+      },
+      "dependencies": {
+        "path-parse": {
+          "version": "1.0.6",
+          "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
+          "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw=="
+        },
+        "resolve": {
+          "version": "1.10.0",
+          "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.10.0.tgz",
+          "integrity": "sha512-3sUr9aq5OfSg2S9pNtPA9hL1FVEAjvfOC4leW0SNf/mpnaakz2a9femSd6LqAww2RaFctwyf1lCqnTHuF1rxDg==",
+          "requires": {
+            "path-parse": "1.0.6"
+          }
+        }
+      }
+    },
+    "rollup-pluginutils": {
+      "version": "2.4.1",
+      "resolved": "https://registry.npmjs.org/rollup-pluginutils/-/rollup-pluginutils-2.4.1.tgz",
+      "integrity": "sha512-wesMQ9/172IJDIW/lYWm0vW0LiKe5Ekjws481R7z9WTRtmO59cqyM/2uUlxvf6yzm/fElFmHUobeQOYz46dZJw==",
+      "requires": {
+        "estree-walker": "0.6.0",
+        "micromatch": "3.1.10"
+      }
+    },
+    "rw": {
+      "version": "1.3.3",
+      "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz",
+      "integrity": "sha1-P4Yt+pGrdmsUiF700BEkv9oHT7Q="
+    },
+    "safe-buffer": {
+      "version": "5.1.2",
+      "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+      "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
+      "dev": true
+    },
+    "safe-regex": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
+      "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
+      "requires": {
+        "ret": "0.1.15"
+      }
+    },
+    "safer-buffer": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+      "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+    },
+    "semver": {
+      "version": "5.5.0",
+      "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz",
+      "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==",
+      "dev": true
+    },
+    "serve-index-75lb": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/serve-index-75lb/-/serve-index-75lb-2.0.1.tgz",
+      "integrity": "sha512-/d9r8bqJlFQcwy0a0nb1KnWAA+Mno+V+VaoKocdkbW5aXKRQd/+4bfnRhQRQr6uEoYwTRJ4xgztOyCJvWcpBpQ==",
+      "dev": true,
+      "requires": {
+        "accepts": "1.3.5",
+        "batch": "0.6.1",
+        "debug": "2.6.9",
+        "escape-html": "1.0.3",
+        "http-errors": "1.6.3",
+        "mime-types": "2.1.21",
+        "parseurl": "1.3.2"
+      },
+      "dependencies": {
+        "debug": {
+          "version": "2.6.9",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+          "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+          "dev": true,
+          "requires": {
+            "ms": "2.0.0"
+          }
+        },
+        "http-errors": {
+          "version": "1.6.3",
+          "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+          "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+          "dev": true,
+          "requires": {
+            "depd": "1.1.2",
+            "inherits": "2.0.3",
+            "setprototypeof": "1.1.0",
+            "statuses": "1.5.0"
+          }
+        }
+      }
+    },
+    "set-value": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.1.tgz",
+      "integrity": "sha512-JxHc1weCN68wRY0fhCoXpyK55m/XPHafOmK4UWD7m2CI14GMcFypt4w/0+NV5f/ZMby2F6S2wwA7fgynh9gWSw==",
+      "requires": {
+        "extend-shallow": "2.0.1",
+        "is-extendable": "0.1.1",
+        "is-plain-object": "2.0.4",
+        "split-string": "3.1.0"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "0.1.1"
+          }
+        }
+      }
+    },
+    "setprototypeof": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
+      "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==",
+      "dev": true
+    },
+    "snapdragon": {
+      "version": "0.8.2",
+      "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz",
+      "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==",
+      "requires": {
+        "base": "0.11.2",
+        "debug": "2.6.9",
+        "define-property": "0.2.5",
+        "extend-shallow": "2.0.1",
+        "map-cache": "0.2.2",
+        "source-map": "0.5.7",
+        "source-map-resolve": "0.5.2",
+        "use": "3.1.1"
+      },
+      "dependencies": {
+        "debug": {
+          "version": "2.6.9",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+          "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+          "requires": {
+            "ms": "2.0.0"
+          }
+        },
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "requires": {
+            "is-descriptor": "0.1.6"
+          }
+        },
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "0.1.1"
+          }
+        },
+        "source-map": {
+          "version": "0.5.7",
+          "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+          "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w="
+        }
+      }
+    },
+    "snapdragon-node": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz",
+      "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==",
+      "requires": {
+        "define-property": "1.0.0",
+        "isobject": "3.0.1",
+        "snapdragon-util": "3.0.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+          "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+          "requires": {
+            "is-descriptor": "1.0.2"
+          }
+        },
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "requires": {
+            "kind-of": "6.0.2"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "requires": {
+            "kind-of": "6.0.2"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "requires": {
+            "is-accessor-descriptor": "1.0.0",
+            "is-data-descriptor": "1.0.0",
+            "kind-of": "6.0.2"
+          }
+        }
+      }
+    },
+    "snapdragon-util": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz",
+      "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==",
+      "requires": {
+        "kind-of": "3.2.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "1.1.6"
+          }
+        }
+      }
+    },
+    "source-map": {
+      "version": "0.6.1",
+      "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+      "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+      "dev": true
+    },
+    "source-map-resolve": {
+      "version": "0.5.2",
+      "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.2.tgz",
+      "integrity": "sha512-MjqsvNwyz1s0k81Goz/9vRBe9SZdB09Bdw+/zYyO+3CuPk6fouTaxscHkgtE8jKvf01kVfl8riHzERQ/kefaSA==",
+      "requires": {
+        "atob": "2.1.2",
+        "decode-uri-component": "0.2.0",
+        "resolve-url": "0.2.1",
+        "source-map-url": "0.4.0",
+        "urix": "0.1.0"
+      }
+    },
+    "source-map-support": {
+      "version": "0.5.9",
+      "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.9.tgz",
+      "integrity": "sha512-gR6Rw4MvUlYy83vP0vxoVNzM6t8MUXqNuRsuBmBHQDu1Fh6X015FrLdgoDKcNdkwGubozq0P4N0Q37UyFVr1EA==",
+      "dev": true,
+      "requires": {
+        "buffer-from": "1.1.1",
+        "source-map": "0.6.1"
+      }
+    },
+    "source-map-url": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz",
+      "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM="
+    },
+    "split-string": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz",
+      "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==",
+      "requires": {
+        "extend-shallow": "3.0.2"
+      }
+    },
+    "sprintf-js": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+      "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
+      "dev": true
+    },
+    "static-extend": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz",
+      "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=",
+      "requires": {
+        "define-property": "0.2.5",
+        "object-copy": "0.1.0"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "requires": {
+            "is-descriptor": "0.1.6"
+          }
+        }
+      }
+    },
+    "statuses": {
+      "version": "1.5.0",
+      "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+      "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=",
+      "dev": true
+    },
+    "stream-log-stats": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/stream-log-stats/-/stream-log-stats-2.0.2.tgz",
+      "integrity": "sha512-b1LccxXhMlOQQrzSqapQHyZ3UI00QTAv+8VecFgsJz//sGB5LFl/+mkFeWBVVI2/E4DlCT4sGgvLExB/VTVFfA==",
+      "dev": true,
+      "requires": {
+        "JSONStream": "1.3.5",
+        "ansi-escape-sequences": "3.0.0",
+        "byte-size": "3.0.0",
+        "common-log-format": "0.1.4",
+        "lodash.throttle": "4.1.1",
+        "stream-via": "1.0.4",
+        "table-layout": "0.4.4"
+      },
+      "dependencies": {
+        "ansi-escape-sequences": {
+          "version": "3.0.0",
+          "resolved": "https://registry.npmjs.org/ansi-escape-sequences/-/ansi-escape-sequences-3.0.0.tgz",
+          "integrity": "sha1-HBg5S2r5t2/5pjUJ+kl2af0s5T4=",
+          "dev": true,
+          "requires": {
+            "array-back": "1.0.4"
+          }
+        },
+        "array-back": {
+          "version": "1.0.4",
+          "resolved": "https://registry.npmjs.org/array-back/-/array-back-1.0.4.tgz",
+          "integrity": "sha1-ZEun8JX3/898Q7Xw3DnTwfA8Bjs=",
+          "dev": true,
+          "requires": {
+            "typical": "2.6.1"
+          }
+        },
+        "byte-size": {
+          "version": "3.0.0",
+          "resolved": "http://registry.npmjs.org/byte-size/-/byte-size-3.0.0.tgz",
+          "integrity": "sha1-QG+eI2aqXav2NnLrKR17sJSV2nU=",
+          "dev": true
+        },
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "stream-read-all": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/stream-read-all/-/stream-read-all-0.1.2.tgz",
+      "integrity": "sha512-KX42xBg853m+KnwRtwCKT95ShopAbY/MNKs2dBQ0WkNeuJdqgQYRtGRbTlxdx0L6t979h3z/wMq2eMSAu7Tygw==",
+      "dev": true
+    },
+    "stream-slice": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/stream-slice/-/stream-slice-0.1.2.tgz",
+      "integrity": "sha1-LcT04bk2+xPz6zmi3vGTJ5jQeks=",
+      "dev": true
+    },
+    "stream-via": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/stream-via/-/stream-via-1.0.4.tgz",
+      "integrity": "sha512-DBp0lSvX5G9KGRDTkR/R+a29H+Wk2xItOF+MpZLLNDWbEV9tGPnqLPxHEYjmiz8xGtJHRIqmI+hCjmNzqoA4nQ==",
+      "dev": true
+    },
+    "streaming-json-stringify": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/streaming-json-stringify/-/streaming-json-stringify-3.1.0.tgz",
+      "integrity": "sha1-gCAEN6mTzDnE/gAmO3s7kDrIevU=",
+      "dev": true,
+      "requires": {
+        "json-stringify-safe": "5.0.1",
+        "readable-stream": "2.3.6"
+      }
+    },
+    "string_decoder": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+      "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+      "dev": true,
+      "requires": {
+        "safe-buffer": "5.1.2"
+      }
+    },
+    "strip-ansi": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+      "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+      "dev": true,
+      "requires": {
+        "ansi-regex": "2.1.1"
+      }
+    },
+    "strip-bom": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
+      "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=",
+      "dev": true,
+      "optional": true
+    },
+    "supports-color": {
+      "version": "5.4.0",
+      "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz",
+      "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==",
+      "dev": true,
+      "requires": {
+        "has-flag": "3.0.0"
+      }
+    },
+    "table-layout": {
+      "version": "0.4.4",
+      "resolved": "https://registry.npmjs.org/table-layout/-/table-layout-0.4.4.tgz",
+      "integrity": "sha512-uNaR3SRMJwfdp9OUr36eyEi6LLsbcTqTO/hfTsNviKsNeyMBPICJCC7QXRF3+07bAP6FRwA8rczJPBqXDc0CkQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0",
+        "deep-extend": "0.6.0",
+        "lodash.padend": "4.6.1",
+        "typical": "2.6.1",
+        "wordwrapjs": "3.0.0"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "test-value": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/test-value/-/test-value-3.0.0.tgz",
+      "integrity": "sha512-sVACdAWcZkSU9x7AOmJo5TqE+GyNJknHaHsMrR6ZnhjVlVN9Yx6FjHrsKZ3BjIpPCT68zYesPWkakrNupwfOTQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "2.0.0",
+        "typical": "2.6.1"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "thenify": {
+      "version": "3.3.0",
+      "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.0.tgz",
+      "integrity": "sha1-5p44obq+lpsBCCB5eLn2K4hgSDk=",
+      "dev": true,
+      "requires": {
+        "any-promise": "1.3.0"
+      }
+    },
+    "thenify-all": {
+      "version": "1.6.0",
+      "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
+      "integrity": "sha1-GhkY1ALY/D+Y+/I02wvMjMEOlyY=",
+      "dev": true,
+      "requires": {
+        "thenify": "3.3.0"
+      }
+    },
+    "through": {
+      "version": "2.3.8",
+      "resolved": "http://registry.npmjs.org/through/-/through-2.3.8.tgz",
+      "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
+      "dev": true
+    },
+    "to-object-path": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz",
+      "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=",
+      "requires": {
+        "kind-of": "3.2.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "1.1.6"
+          }
+        }
+      }
+    },
+    "to-regex": {
+      "version": "3.0.2",
+      "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz",
+      "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==",
+      "requires": {
+        "define-property": "2.0.2",
+        "extend-shallow": "3.0.2",
+        "regex-not": "1.0.2",
+        "safe-regex": "1.1.0"
+      }
+    },
+    "to-regex-range": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz",
+      "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=",
+      "requires": {
+        "is-number": "3.0.0",
+        "repeat-string": "1.6.1"
+      }
+    },
+    "toidentifier": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
+      "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==",
+      "dev": true
+    },
+    "ts-mocha": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/ts-mocha/-/ts-mocha-2.0.0.tgz",
+      "integrity": "sha512-Rj6+vvwKtOTs5GsNO1jLl4DIXUGnyAg5HFt2Yb4SHIRN45clTJkHWpNdTxCSL0u+1oeavSYJah6d1PZ++Ju5pw==",
+      "dev": true,
+      "requires": {
+        "ts-node": "7.0.0",
+        "tsconfig-paths": "3.6.0"
+      }
+    },
+    "ts-node": {
+      "version": "7.0.0",
+      "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-7.0.0.tgz",
+      "integrity": "sha512-klJsfswHP0FuOLsvBZ/zzCfUvakOSSxds78mVeK7I+qP76YWtxf16hEZsp3U+b0kIo82R5UatGFeblYMqabb2Q==",
+      "dev": true,
+      "requires": {
+        "arrify": "1.0.1",
+        "buffer-from": "1.1.1",
+        "diff": "3.5.0",
+        "make-error": "1.3.5",
+        "minimist": "1.2.0",
+        "mkdirp": "0.5.1",
+        "source-map-support": "0.5.9",
+        "yn": "2.0.0"
+      }
+    },
+    "tsconfig-paths": {
+      "version": "3.6.0",
+      "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.6.0.tgz",
+      "integrity": "sha512-mrqQIP2F4e03aMTCiPdedCIT300//+q0ET53o5WqqtQjmEICxP9yfz/sHTpPqXpssuJEzODsEzJaLRaf5J2X1g==",
+      "dev": true,
+      "optional": true,
+      "requires": {
+        "@types/json5": "0.0.29",
+        "deepmerge": "2.2.1",
+        "json5": "1.0.1",
+        "minimist": "1.2.0",
+        "strip-bom": "3.0.0"
+      }
+    },
+    "tslib": {
+      "version": "1.9.3",
+      "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz",
+      "integrity": "sha512-4krF8scpejhaOgqzBEcGM7yDIEfi0/8+8zDRZhNZZ2kjmHJ4hv3zCbQWxoJGz1iw5U0Jl0nma13xzHXcncMavQ=="
+    },
+    "tslint": {
+      "version": "5.12.0",
+      "resolved": "https://registry.npmjs.org/tslint/-/tslint-5.12.0.tgz",
+      "integrity": "sha512-CKEcH1MHUBhoV43SA/Jmy1l24HJJgI0eyLbBNSRyFlsQvb9v6Zdq+Nz2vEOH00nC5SUx4SneJ59PZUS/ARcokQ==",
+      "dev": true,
+      "requires": {
+        "babel-code-frame": "6.26.0",
+        "builtin-modules": "1.1.1",
+        "chalk": "2.4.1",
+        "commander": "2.15.1",
+        "diff": "3.5.0",
+        "glob": "7.1.2",
+        "js-yaml": "3.13.1",
+        "minimatch": "3.0.4",
+        "resolve": "1.8.1",
+        "semver": "5.5.0",
+        "tslib": "1.9.3",
+        "tsutils": "2.29.0"
+      },
+      "dependencies": {
+        "builtin-modules": {
+          "version": "1.1.1",
+          "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-1.1.1.tgz",
+          "integrity": "sha1-Jw8HbFpywC9bZaR9+Uxf46J4iS8=",
+          "dev": true
+        }
+      }
+    },
+    "tsutils": {
+      "version": "2.29.0",
+      "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-2.29.0.tgz",
+      "integrity": "sha512-g5JVHCIJwzfISaXpXE1qvNalca5Jwob6FjI4AoPlqMusJ6ftFE7IkkFoMhVLRgK+4Kx3gkzb8UZK5t5yTTvEmA==",
+      "dev": true,
+      "requires": {
+        "tslib": "1.9.3"
+      }
+    },
+    "type-detect": {
+      "version": "4.0.8",
+      "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
+      "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==",
+      "dev": true
+    },
+    "type-is": {
+      "version": "1.6.16",
+      "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz",
+      "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==",
+      "dev": true,
+      "requires": {
+        "media-typer": "0.3.0",
+        "mime-types": "2.1.21"
+      }
+    },
+    "typescript": {
+      "version": "3.2.2",
+      "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.2.2.tgz",
+      "integrity": "sha512-VCj5UiSyHBjwfYacmDuc/NOk4QQixbE+Wn7MFJuS0nRuPQbof132Pw4u53dm264O8LPc2MVsc7RJNml5szurkg==",
+      "dev": true
+    },
+    "typical": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/typical/-/typical-3.0.0.tgz",
+      "integrity": "sha512-2/pGDQD/q1iJWlrj357aEKGIlRvHirm81x04lsg51hreiohy2snAXoFc9dIHFWEx9LsfOVA5K7lUGM9rcUqwlQ==",
+      "dev": true
+    },
+    "union-value": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.1.tgz",
+      "integrity": "sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==",
+      "requires": {
+        "arr-union": "3.1.0",
+        "get-value": "2.0.6",
+        "is-extendable": "0.1.1",
+        "set-value": "2.0.1"
+      }
+    },
+    "universalify": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
+      "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
+    },
+    "unpipe": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+      "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=",
+      "dev": true
+    },
+    "unset-value": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz",
+      "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=",
+      "requires": {
+        "has-value": "0.3.1",
+        "isobject": "3.0.1"
+      },
+      "dependencies": {
+        "has-value": {
+          "version": "0.3.1",
+          "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz",
+          "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=",
+          "requires": {
+            "get-value": "2.0.6",
+            "has-values": "0.1.4",
+            "isobject": "2.1.0"
+          },
+          "dependencies": {
+            "isobject": {
+              "version": "2.1.0",
+              "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz",
+              "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=",
+              "requires": {
+                "isarray": "1.0.0"
+              }
+            }
+          }
+        },
+        "has-values": {
+          "version": "0.1.4",
+          "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz",
+          "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E="
+        }
+      }
+    },
+    "urix": {
+      "version": "0.1.0",
+      "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
+      "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI="
+    },
+    "use": {
+      "version": "3.1.1",
+      "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz",
+      "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ=="
+    },
+    "util-deprecate": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+      "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=",
+      "dev": true
+    },
+    "vary": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+      "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=",
+      "dev": true
+    },
+    "walk-back": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/walk-back/-/walk-back-3.0.0.tgz",
+      "integrity": "sha1-I1h4ejXakQMtrV6S+AsSNw2HlcU=",
+      "dev": true
+    },
+    "wordwrapjs": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/wordwrapjs/-/wordwrapjs-3.0.0.tgz",
+      "integrity": "sha512-mO8XtqyPvykVCsrwj5MlOVWvSnCdT+C+QVbm6blradR7JExAhbkZ7hZ9A+9NUtwzSqrlUo9a67ws0EiILrvRpw==",
+      "dev": true,
+      "requires": {
+        "reduce-flatten": "1.0.1",
+        "typical": "2.6.1"
+      },
+      "dependencies": {
+        "reduce-flatten": {
+          "version": "1.0.1",
+          "resolved": "https://registry.npmjs.org/reduce-flatten/-/reduce-flatten-1.0.1.tgz",
+          "integrity": "sha1-JYx479FT3fk8tWEjf2EYTzaW4yc=",
+          "dev": true
+        },
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "wrappy": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+      "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=",
+      "dev": true
+    },
+    "ws": {
+      "version": "5.2.2",
+      "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz",
+      "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==",
+      "dev": true,
+      "requires": {
+        "async-limiter": "1.0.0"
+      }
+    },
+    "ylru": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/ylru/-/ylru-1.2.1.tgz",
+      "integrity": "sha512-faQrqNMzcPCHGVC2aaOINk13K+aaBDUPjGWl0teOXywElLjyVAB6Oe2jj62jHYtwsU49jXhScYbvPENK+6zAvQ==",
+      "dev": true
+    },
+    "yn": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/yn/-/yn-2.0.0.tgz",
+      "integrity": "sha1-5a2ryKz0CPY4X8dklWhMiOavaJo=",
+      "dev": true
+    }
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/package.json b/src/third_party/v8/tools/turbolizer/package.json
new file mode 100644
index 0000000..dee2835
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/package.json
@@ -0,0 +1,34 @@
+{
+  "name": "turbolizer",
+  "version": "0.1.0",
+  "description": "Visualization tool for V8 TurboFan IR graphs",
+  "scripts": {
+    "build": "rollup -c",
+    "watch": "rollup -c -w",
+    "deploy": "./deploy.sh",
+    "test": "ts-mocha -p tsconfig.test.json test/**/*-test.ts",
+    "dev-server": "ws",
+    "presubmit": "tslint --project ./tslint.json --fix"
+  },
+  "author": "The V8 team",
+  "license": "MIT",
+  "dependencies": {
+    "@types/d3": "^5.7.2",
+    "d3": "^5.7.0",
+    "rollup-plugin-node-resolve": "^4.0.0",
+    "rollup-plugin-typescript2": "^0.20.1"
+  },
+  "repository": {
+    "type": "git",
+    "url": "https://github.com/v8/v8.git"
+  },
+  "devDependencies": {
+    "chai": "^4.2.0",
+    "local-web-server": "^2.6.0",
+    "mocha": "^5.2.0",
+    "rollup": "^0.68.2",
+    "ts-mocha": "^2.0.0",
+    "tslint": "^5.12.0",
+    "typescript": "^3.2.2"
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/right-arrow.png b/src/third_party/v8/tools/turbolizer/right-arrow.png
new file mode 100644
index 0000000..ef39643
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/right-arrow.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/rollup.config.js b/src/third_party/v8/tools/turbolizer/rollup.config.js
new file mode 100644
index 0000000..05b69b8
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/rollup.config.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import typescript from 'rollup-plugin-typescript2';
+import node from 'rollup-plugin-node-resolve';
+
+import path from 'path'
+
+const onwarn = warning => {
+  // Silence circular dependency warning for moment package
+  const node_modules = path.normalize('node_modules/');
+  if (warning.code === 'CIRCULAR_DEPENDENCY' &&
+    !warning.importer.indexOf(node_modules)) {
+    return
+  }
+
+  console.warn(`(!) ${warning.message}`)
+}
+
+export default {
+  input: "src/turbo-visualizer.ts",
+  plugins: [node(), typescript({
+    abortOnError: false
+  })],
+  output: {
+    file: "build/turbolizer.js",
+    format: "iife",
+    sourcemap: true
+  },
+  onwarn: onwarn
+};
diff --git a/src/third_party/v8/tools/turbolizer/search2.png b/src/third_party/v8/tools/turbolizer/search2.png
new file mode 100644
index 0000000..88dd193
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/search2.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/src/code-view.ts b/src/third_party/v8/tools/turbolizer/src/code-view.ts
new file mode 100644
index 0000000..ab0af14
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/code-view.ts
@@ -0,0 +1,282 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+interface PR {
+  prettyPrint(_: unknown, el: HTMLElement): void;
+}
+
+declare global {
+  const PR: PR;
+}
+
+import { Source, SourceResolver, sourcePositionToStringKey } from "../src/source-resolver";
+import { SelectionBroker } from "../src/selection-broker";
+import { View } from "../src/view";
+import { MySelection } from "../src/selection";
+import { ViewElements } from "../src/util";
+import { SelectionHandler } from "./selection-handler";
+
+export enum CodeMode {
+  MAIN_SOURCE = "main function",
+  INLINED_SOURCE = "inlined function"
+}
+
+export class CodeView extends View {
+  broker: SelectionBroker;
+  source: Source;
+  sourceResolver: SourceResolver;
+  codeMode: CodeMode;
+  sourcePositionToHtmlElement: Map<string, HTMLElement>;
+  showAdditionalInliningPosition: boolean;
+  selectionHandler: SelectionHandler;
+  selection: MySelection;
+
+  createViewElement() {
+    const sourceContainer = document.createElement("div");
+    sourceContainer.classList.add("source-container");
+    return sourceContainer;
+  }
+
+  constructor(parent: HTMLElement, broker: SelectionBroker, sourceResolver: SourceResolver, sourceFunction: Source, codeMode: CodeMode) {
+    super(parent);
+    const view = this;
+    view.broker = broker;
+    view.sourceResolver = sourceResolver;
+    view.source = sourceFunction;
+    view.codeMode = codeMode;
+    this.sourcePositionToHtmlElement = new Map();
+    this.showAdditionalInliningPosition = false;
+
+    const selectionHandler = {
+      clear: function () {
+        view.selection.clear();
+        view.updateSelection();
+        broker.broadcastClear(this);
+      },
+      select: function (sourcePositions, selected) {
+        const locations = [];
+        for (const sourcePosition of sourcePositions) {
+          locations.push(sourcePosition);
+          sourceResolver.addInliningPositions(sourcePosition, locations);
+        }
+        if (locations.length == 0) return;
+        view.selection.select(locations, selected);
+        view.updateSelection();
+        broker.broadcastSourcePositionSelect(this, locations, selected);
+      },
+      brokeredSourcePositionSelect: function (locations, selected) {
+        const firstSelect = view.selection.isEmpty();
+        for (const location of locations) {
+          const translated = sourceResolver.translateToSourceId(view.source.sourceId, location);
+          if (!translated) continue;
+          view.selection.select([translated], selected);
+        }
+        view.updateSelection(firstSelect);
+      },
+      brokeredClear: function () {
+        view.selection.clear();
+        view.updateSelection();
+      },
+    };
+    view.selection = new MySelection(sourcePositionToStringKey);
+    broker.addSourcePositionHandler(selectionHandler);
+    this.selectionHandler = selectionHandler;
+    this.initializeCode();
+  }
+
+  addHtmlElementToSourcePosition(sourcePosition, element) {
+    const key = sourcePositionToStringKey(sourcePosition);
+    if (this.sourcePositionToHtmlElement.has(key)) {
+      console.log("Warning: duplicate source position", sourcePosition);
+    }
+    this.sourcePositionToHtmlElement.set(key, element);
+  }
+
+  getHtmlElementForSourcePosition(sourcePosition) {
+    const key = sourcePositionToStringKey(sourcePosition);
+    return this.sourcePositionToHtmlElement.get(key);
+  }
+
+  updateSelection(scrollIntoView: boolean = false): void {
+    const mkVisible = new ViewElements(this.divNode.parentNode as HTMLElement);
+    for (const [sp, el] of this.sourcePositionToHtmlElement.entries()) {
+      const isSelected = this.selection.isKeySelected(sp);
+      mkVisible.consider(el, isSelected);
+      el.classList.toggle("selected", isSelected);
+    }
+    mkVisible.apply(scrollIntoView);
+  }
+
+  getCodeHtmlElementName() {
+    return `source-pre-${this.source.sourceId}`;
+  }
+
+  getCodeHeaderHtmlElementName() {
+    return `source-pre-${this.source.sourceId}-header`;
+  }
+
+  getHtmlCodeLines(): NodeListOf<HTMLElement> {
+    const ordereList = this.divNode.querySelector(`#${this.getCodeHtmlElementName()} ol`);
+    return ordereList.childNodes as NodeListOf<HTMLElement>;
+  }
+
+  onSelectLine(lineNumber: number, doClear: boolean) {
+    if (doClear) {
+      this.selectionHandler.clear();
+    }
+    const positions = this.sourceResolver.linetoSourcePositions(lineNumber - 1);
+    if (positions !== undefined) {
+      this.selectionHandler.select(positions, undefined);
+    }
+  }
+
+  onSelectSourcePosition(sourcePosition, doClear: boolean) {
+    if (doClear) {
+      this.selectionHandler.clear();
+    }
+    this.selectionHandler.select([sourcePosition], undefined);
+  }
+
+  initializeCode() {
+    const view = this;
+    const source = this.source;
+    const sourceText = source.sourceText;
+    if (!sourceText) return;
+    const sourceContainer = view.divNode;
+    if (this.codeMode == CodeMode.MAIN_SOURCE) {
+      sourceContainer.classList.add("main-source");
+    } else {
+      sourceContainer.classList.add("inlined-source");
+    }
+    const codeHeader = document.createElement("div");
+    codeHeader.setAttribute("id", this.getCodeHeaderHtmlElementName());
+    codeHeader.classList.add("code-header");
+    const codeFileFunction = document.createElement("div");
+    codeFileFunction.classList.add("code-file-function");
+    codeFileFunction.innerHTML = `${source.sourceName}:${source.functionName}`;
+    codeHeader.appendChild(codeFileFunction);
+    const codeModeDiv = document.createElement("div");
+    codeModeDiv.classList.add("code-mode");
+    codeModeDiv.innerHTML = `${this.codeMode}`;
+    codeHeader.appendChild(codeModeDiv);
+    const clearDiv = document.createElement("div");
+    clearDiv.style.clear = "both";
+    codeHeader.appendChild(clearDiv);
+    sourceContainer.appendChild(codeHeader);
+    const codePre = document.createElement("pre");
+    codePre.setAttribute("id", this.getCodeHtmlElementName());
+    codePre.classList.add("prettyprint");
+    sourceContainer.appendChild(codePre);
+
+    codeHeader.onclick = function myFunction() {
+      if (codePre.style.display === "none") {
+        codePre.style.display = "block";
+      } else {
+        codePre.style.display = "none";
+      }
+    };
+    if (sourceText != "") {
+      codePre.classList.add("linenums");
+      codePre.textContent = sourceText;
+      try {
+        // Wrap in try to work when offline.
+        PR.prettyPrint(undefined, sourceContainer);
+      } catch (e) {
+        console.log(e);
+      }
+
+      view.divNode.onclick = function (e: MouseEvent) {
+        if (e.target instanceof Element && e.target.tagName == "DIV") {
+          const targetDiv = e.target as HTMLDivElement;
+          if (targetDiv.classList.contains("line-number")) {
+            e.stopPropagation();
+            view.onSelectLine(Number(targetDiv.dataset.lineNumber), !e.shiftKey);
+          }
+        } else {
+          view.selectionHandler.clear();
+        }
+      };
+
+      const base: number = source.startPosition;
+      let current = 0;
+      const lineListDiv = this.getHtmlCodeLines();
+      let newlineAdjust = 0;
+      for (let i = 0; i < lineListDiv.length; i++) {
+        // Line numbers are not zero-based.
+        const lineNumber = i + 1;
+        const currentLineElement = lineListDiv[i];
+        currentLineElement.id = "li" + i;
+        currentLineElement.dataset.lineNumber = "" + lineNumber;
+        const spans = currentLineElement.childNodes;
+        for (const currentSpan of spans) {
+          if (currentSpan instanceof HTMLSpanElement) {
+            const pos = base + current;
+            const end = pos + currentSpan.textContent.length;
+            current += currentSpan.textContent.length;
+            this.insertSourcePositions(currentSpan, lineNumber, pos, end, newlineAdjust);
+            newlineAdjust = 0;
+          }
+        }
+
+        this.insertLineNumber(currentLineElement, lineNumber);
+
+        while ((current < sourceText.length) &&
+          (sourceText[current] == '\n' || sourceText[current] == '\r')) {
+          ++current;
+          ++newlineAdjust;
+        }
+      }
+    }
+  }
+
+  insertSourcePositions(currentSpan, lineNumber, pos, end, adjust) {
+    const view = this;
+    const sps = this.sourceResolver.sourcePositionsInRange(this.source.sourceId, pos - adjust, end);
+    let offset = 0;
+    for (const sourcePosition of sps) {
+      this.sourceResolver.addAnyPositionToLine(lineNumber, sourcePosition);
+      const textnode = currentSpan.tagName == 'SPAN' ? currentSpan.lastChild : currentSpan;
+      if (!(textnode instanceof Text)) continue;
+      const splitLength = Math.max(0, sourcePosition.scriptOffset - pos - offset);
+      offset += splitLength;
+      const replacementNode = textnode.splitText(splitLength);
+      const span = document.createElement('span');
+      span.setAttribute("scriptOffset", sourcePosition.scriptOffset);
+      span.classList.add("source-position");
+      const marker = document.createElement('span');
+      marker.classList.add("marker");
+      span.appendChild(marker);
+      const inlining = this.sourceResolver.getInliningForPosition(sourcePosition);
+      if (inlining != undefined && view.showAdditionalInliningPosition) {
+        const sourceName = this.sourceResolver.getSourceName(inlining.sourceId);
+        const inliningMarker = document.createElement('span');
+        inliningMarker.classList.add("inlining-marker");
+        inliningMarker.setAttribute("data-descr", `${sourceName} was inlined here`);
+        span.appendChild(inliningMarker);
+      }
+      span.onclick = function (e) {
+        e.stopPropagation();
+        view.onSelectSourcePosition(sourcePosition, !e.shiftKey);
+      };
+      view.addHtmlElementToSourcePosition(sourcePosition, span);
+      textnode.parentNode.insertBefore(span, replacementNode);
+    }
+  }
+
+  insertLineNumber(lineElement: HTMLElement, lineNumber: number) {
+    const view = this;
+    const lineNumberElement = document.createElement("div");
+    lineNumberElement.classList.add("line-number");
+    lineNumberElement.dataset.lineNumber = `${lineNumber}`;
+    lineNumberElement.innerText = `${lineNumber}`;
+    lineElement.insertBefore(lineNumberElement, lineElement.firstChild);
+    // Don't add lines to source positions of not in backwardsCompatibility mode.
+    if (this.source.backwardsCompatibility === true) {
+      for (const sourcePosition of this.sourceResolver.linetoSourcePositions(lineNumber - 1)) {
+        view.addHtmlElementToSourcePosition(sourcePosition, lineElement);
+      }
+    }
+  }
+
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/constants.ts b/src/third_party/v8/tools/turbolizer/src/constants.ts
new file mode 100644
index 0000000..47dee85
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/constants.ts
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export const MAX_RANK_SENTINEL = 0;
+export const GRAPH_MARGIN = 250;
+export const SOURCE_PANE_ID = 'left';
+export const SOURCE_COLLAPSE_ID = 'source-shrink';
+export const SOURCE_EXPAND_ID = 'source-expand';
+export const INTERMEDIATE_PANE_ID = 'middle';
+export const GRAPH_PANE_ID = 'graph';
+export const SCHEDULE_PANE_ID = 'schedule';
+export const GENERATED_PANE_ID = 'right';
+export const DISASSEMBLY_PANE_ID = 'disassembly';
+export const DISASSEMBLY_COLLAPSE_ID = 'disassembly-shrink';
+export const DISASSEMBLY_EXPAND_ID = 'disassembly-expand';
+export const RANGES_PANE_ID = "ranges";
+export const RANGES_COLLAPSE_ID = "ranges-shrink";
+export const RANGES_EXPAND_ID = "ranges-expand";
+export const UNICODE_BLOCK = '&#9611;';
+export const PROF_COLS = [
+  { perc: 0, col: { r: 255, g: 255, b: 255 } },
+  { perc: 0.5, col: { r: 255, g: 255, b: 128 } },
+  { perc: 5, col: { r: 255, g: 128, b: 0 } },
+  { perc: 15, col: { r: 255, g: 0, b: 0 } },
+  { perc: 100, col: { r: 0, g: 0, b: 0 } }
+];
diff --git a/src/third_party/v8/tools/turbolizer/src/disassembly-view.ts b/src/third_party/v8/tools/turbolizer/src/disassembly-view.ts
new file mode 100644
index 0000000..0455437
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/disassembly-view.ts
@@ -0,0 +1,394 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { PROF_COLS, UNICODE_BLOCK } from "../src/constants";
+import { SelectionBroker } from "../src/selection-broker";
+import { TextView } from "../src/text-view";
+import { MySelection } from "./selection";
+import { anyToString, interpolate } from "./util";
+import { InstructionSelectionHandler } from "./selection-handler";
+
+const toolboxHTML = `<div id="disassembly-toolbox">
+<form>
+  <label><input id="show-instruction-address" type="checkbox" name="instruction-address">Show addresses</label>
+  <label><input id="show-instruction-binary" type="checkbox" name="instruction-binary">Show binary literal</label>
+  <label><input id="highlight-gap-instructions" type="checkbox" name="instruction-binary">Highlight gap instructions</label>
+</form>
+</div>`;
+
+export class DisassemblyView extends TextView {
+  SOURCE_POSITION_HEADER_REGEX: any;
+  addrEventCounts: any;
+  totalEventCounts: any;
+  maxEventCounts: any;
+  posLines: Array<any>;
+  instructionSelectionHandler: InstructionSelectionHandler;
+  offsetSelection: MySelection;
+  showInstructionAddressHandler: () => void;
+  showInstructionBinaryHandler: () => void;
+  highlightGapInstructionsHandler: () => void;
+
+  createViewElement() {
+    const pane = document.createElement('div');
+    pane.setAttribute('id', "disassembly");
+    pane.innerHTML =
+      `<pre id='disassembly-text-pre' class='prettyprint prettyprinted'>
+       <ul class='disassembly-list nolinenums noindent'>
+       </ul>
+     </pre>`;
+
+    return pane;
+  }
+
+  constructor(parentId, broker: SelectionBroker) {
+    super(parentId, broker);
+    const view = this;
+    const ADDRESS_STYLE = {
+      associateData: (text, fragment: HTMLElement) => {
+        const matches = text.match(/(?<address>0?x?[0-9a-fA-F]{8,16})(?<addressSpace>\s+)(?<offset>[0-9a-f]+)(?<offsetSpace>\s*)/);
+        const offset = Number.parseInt(matches.groups["offset"], 16);
+        const instructionKind = view.sourceResolver.getInstructionKindForPCOffset(offset);
+        fragment.dataset.instructionKind = instructionKind;
+        fragment.title = view.sourceResolver.instructionKindToReadableName(instructionKind);
+        const blockIds = view.sourceResolver.getBlockIdsForOffset(offset);
+        const blockIdElement = document.createElement("SPAN");
+        blockIdElement.className = "block-id com linkable-text";
+        blockIdElement.innerText = "";
+        if (blockIds && blockIds.length > 0) {
+          blockIds.forEach(blockId => view.addHtmlElementForBlockId(blockId, fragment));
+          blockIdElement.innerText = `B${blockIds.join(",")}:`;
+          blockIdElement.dataset.blockId = `${blockIds.join(",")}`;
+        }
+        fragment.appendChild(blockIdElement);
+        const addressElement = document.createElement("SPAN");
+        addressElement.className = "instruction-address";
+        addressElement.innerText = matches.groups["address"];
+        const offsetElement = document.createElement("SPAN");
+        offsetElement.innerText = matches.groups["offset"];
+        fragment.appendChild(addressElement);
+        fragment.appendChild(document.createTextNode(matches.groups["addressSpace"]));
+        fragment.appendChild(offsetElement);
+        fragment.appendChild(document.createTextNode(matches.groups["offsetSpace"]));
+        fragment.classList.add('tag');
+
+        if (!Number.isNaN(offset)) {
+          let pcOffset = view.sourceResolver.getKeyPcOffset(offset);
+          if (pcOffset == -1) pcOffset = Number(offset);
+          fragment.dataset.pcOffset = `${pcOffset}`;
+          addressElement.classList.add('linkable-text');
+          offsetElement.classList.add('linkable-text');
+        }
+        return true;
+      }
+    };
+    const UNCLASSIFIED_STYLE = {
+      css: 'com'
+    };
+    const NUMBER_STYLE = {
+      css: ['instruction-binary', 'lit']
+    };
+    const COMMENT_STYLE = {
+      css: 'com'
+    };
+    const OPCODE_ARGS = {
+      associateData: function (text, fragment) {
+        fragment.innerHTML = text;
+        const replacer = (match, hexOffset) => {
+          const offset = Number.parseInt(hexOffset, 16);
+          let keyOffset = view.sourceResolver.getKeyPcOffset(offset);
+          if (keyOffset == -1) keyOffset = Number(offset);
+          const blockIds = view.sourceResolver.getBlockIdsForOffset(offset);
+          let block = "";
+          let blockIdData = "";
+          if (blockIds && blockIds.length > 0) {
+            block = `B${blockIds.join(",")} `;
+            blockIdData = `data-block-id="${blockIds.join(",")}"`;
+          }
+          return `<span class="tag linkable-text" data-pc-offset="${keyOffset}" ${blockIdData}>${block}${match}</span>`;
+        };
+        const html = text.replace(/<.0?x?([0-9a-fA-F]+)>/g, replacer);
+        fragment.innerHTML = html;
+        return true;
+      }
+    };
+    const OPCODE_STYLE = {
+      css: 'kwd'
+    };
+    const BLOCK_HEADER_STYLE = {
+      associateData: function (text, fragment) {
+        if (view.sourceResolver.hasBlockStartInfo()) return false;
+        const matches = /\d+/.exec(text);
+        if (!matches) return true;
+        const blockId = matches[0];
+        fragment.dataset.blockId = blockId;
+        fragment.innerHTML = text;
+        fragment.className = "com block";
+        return true;
+      }
+    };
+    const SOURCE_POSITION_HEADER_STYLE = {
+      css: 'com'
+    };
+    view.SOURCE_POSITION_HEADER_REGEX = /^\s*--[^<]*<.*(not inlined|inlined\((\d+)\)):(\d+)>\s*--/;
+    const patterns = [
+      [
+        [/^0?x?[0-9a-fA-F]{8,16}\s+[0-9a-f]+\s+/, ADDRESS_STYLE, 1],
+        [view.SOURCE_POSITION_HEADER_REGEX, SOURCE_POSITION_HEADER_STYLE, -1],
+        [/^\s+-- B\d+ start.*/, BLOCK_HEADER_STYLE, -1],
+        [/^.*/, UNCLASSIFIED_STYLE, -1]
+      ],
+      [
+        [/^\s*[0-9a-f]+\s+/, NUMBER_STYLE, 2],
+        [/^\s*[0-9a-f]+\s+[0-9a-f]+\s+/, NUMBER_STYLE, 2],
+        [/^.*/, null, -1]
+      ],
+      [
+        [/^REX.W \S+\s+/, OPCODE_STYLE, 3],
+        [/^\S+\s+/, OPCODE_STYLE, 3],
+        [/^\S+$/, OPCODE_STYLE, -1],
+        [/^.*/, null, -1]
+      ],
+      [
+        [/^\s+/, null],
+        [/^[^;]+$/, OPCODE_ARGS, -1],
+        [/^[^;]+/, OPCODE_ARGS, 4],
+        [/^;/, COMMENT_STYLE, 5]
+      ],
+      [
+        [/^.+$/, COMMENT_STYLE, -1]
+      ]
+    ];
+    view.setPatterns(patterns);
+
+    const linkHandler = (e: MouseEvent) => {
+      if (!(e.target instanceof HTMLElement)) return;
+      const offsetAsString = typeof e.target.dataset.pcOffset != "undefined" ? e.target.dataset.pcOffset : e.target.parentElement.dataset.pcOffset;
+      const offset = Number.parseInt(offsetAsString, 10);
+      if ((typeof offsetAsString) != "undefined" && !Number.isNaN(offset)) {
+        view.offsetSelection.select([offset], true);
+        const nodes = view.sourceResolver.nodesForPCOffset(offset)[0];
+        if (nodes.length > 0) {
+          e.stopPropagation();
+          if (!e.shiftKey) {
+            view.selectionHandler.clear();
+          }
+          view.selectionHandler.select(nodes, true);
+        } else {
+          view.updateSelection();
+        }
+      }
+      return undefined;
+    };
+    view.divNode.addEventListener('click', linkHandler);
+
+    const linkHandlerBlock = e => {
+      const blockId = e.target.dataset.blockId;
+      if (typeof blockId != "undefined") {
+        const blockIds = blockId.split(",");
+        if (!e.shiftKey) {
+          view.selectionHandler.clear();
+        }
+        view.blockSelectionHandler.select(blockIds, true);
+      }
+    };
+    view.divNode.addEventListener('click', linkHandlerBlock);
+
+    this.offsetSelection = new MySelection(anyToString);
+    const instructionSelectionHandler = {
+      clear: function () {
+        view.offsetSelection.clear();
+        view.updateSelection();
+        broker.broadcastClear(instructionSelectionHandler);
+      },
+      select: function (instructionIds, selected) {
+        view.offsetSelection.select(instructionIds, selected);
+        view.updateSelection();
+        broker.broadcastBlockSelect(instructionSelectionHandler, instructionIds, selected);
+      },
+      brokeredInstructionSelect: function (instructionIds, selected) {
+        const firstSelect = view.offsetSelection.isEmpty();
+        const keyPcOffsets = view.sourceResolver.instructionsToKeyPcOffsets(instructionIds);
+        view.offsetSelection.select(keyPcOffsets, selected);
+        view.updateSelection(firstSelect);
+      },
+      brokeredClear: function () {
+        view.offsetSelection.clear();
+        view.updateSelection();
+      }
+    };
+    this.instructionSelectionHandler = instructionSelectionHandler;
+    broker.addInstructionHandler(instructionSelectionHandler);
+
+    const toolbox = document.createElement("div");
+    toolbox.id = "toolbox-anchor";
+    toolbox.innerHTML = toolboxHTML;
+    view.divNode.insertBefore(toolbox, view.divNode.firstChild);
+    const instructionAddressInput: HTMLInputElement = view.divNode.querySelector("#show-instruction-address");
+    const lastShowInstructionAddress = window.sessionStorage.getItem("show-instruction-address");
+    instructionAddressInput.checked = lastShowInstructionAddress == 'true';
+    const showInstructionAddressHandler = () => {
+      window.sessionStorage.setItem("show-instruction-address", `${instructionAddressInput.checked}`);
+      for (const el of view.divNode.querySelectorAll(".instruction-address")) {
+        el.classList.toggle("invisible", !instructionAddressInput.checked);
+      }
+    };
+    instructionAddressInput.addEventListener("change", showInstructionAddressHandler);
+    this.showInstructionAddressHandler = showInstructionAddressHandler;
+
+    const instructionBinaryInput: HTMLInputElement = view.divNode.querySelector("#show-instruction-binary");
+    const lastShowInstructionBinary = window.sessionStorage.getItem("show-instruction-binary");
+    instructionBinaryInput.checked = lastShowInstructionBinary == 'true';
+    const showInstructionBinaryHandler = () => {
+      window.sessionStorage.setItem("show-instruction-binary", `${instructionBinaryInput.checked}`);
+      for (const el of view.divNode.querySelectorAll(".instruction-binary")) {
+        el.classList.toggle("invisible", !instructionBinaryInput.checked);
+      }
+    };
+    instructionBinaryInput.addEventListener("change", showInstructionBinaryHandler);
+    this.showInstructionBinaryHandler = showInstructionBinaryHandler;
+
+    const highlightGapInstructionsInput: HTMLInputElement = view.divNode.querySelector("#highlight-gap-instructions");
+    const lastHighlightGapInstructions = window.sessionStorage.getItem("highlight-gap-instructions");
+    highlightGapInstructionsInput.checked = lastHighlightGapInstructions == 'true';
+    const highlightGapInstructionsHandler = () => {
+      window.sessionStorage.setItem("highlight-gap-instructions", `${highlightGapInstructionsInput.checked}`);
+      view.divNode.classList.toggle("highlight-gap-instructions", highlightGapInstructionsInput.checked);
+    };
+
+    highlightGapInstructionsInput.addEventListener("change", highlightGapInstructionsHandler);
+    this.highlightGapInstructionsHandler = highlightGapInstructionsHandler;
+  }
+
+  updateSelection(scrollIntoView: boolean = false) {
+    super.updateSelection(scrollIntoView);
+    const keyPcOffsets = this.sourceResolver.nodesToKeyPcOffsets(this.selection.selectedKeys());
+    if (this.offsetSelection) {
+      for (const key of this.offsetSelection.selectedKeys()) {
+        keyPcOffsets.push(Number(key));
+      }
+    }
+    for (const keyPcOffset of keyPcOffsets) {
+      const elementsToSelect = this.divNode.querySelectorAll(`[data-pc-offset='${keyPcOffset}']`);
+      for (const el of elementsToSelect) {
+        el.classList.toggle("selected", true);
+      }
+    }
+  }
+
+  initializeCode(sourceText, sourcePosition: number = 0) {
+    const view = this;
+    view.addrEventCounts = null;
+    view.totalEventCounts = null;
+    view.maxEventCounts = null;
+    view.posLines = new Array();
+    // Comment lines for line 0 include sourcePosition already, only need to
+    // add sourcePosition for lines > 0.
+    view.posLines[0] = sourcePosition;
+    if (sourceText && sourceText != "") {
+      const base = sourcePosition;
+      let current = 0;
+      const sourceLines = sourceText.split("\n");
+      for (let i = 1; i < sourceLines.length; i++) {
+        // Add 1 for newline character that is split off.
+        current += sourceLines[i - 1].length + 1;
+        view.posLines[i] = base + current;
+      }
+    }
+  }
+
+  initializePerfProfile(eventCounts) {
+    const view = this;
+    if (eventCounts !== undefined) {
+      view.addrEventCounts = eventCounts;
+
+      view.totalEventCounts = {};
+      view.maxEventCounts = {};
+      for (const evName in view.addrEventCounts) {
+        if (view.addrEventCounts.hasOwnProperty(evName)) {
+          const keys = Object.keys(view.addrEventCounts[evName]);
+          const values = keys.map(key => view.addrEventCounts[evName][key]);
+          view.totalEventCounts[evName] = values.reduce((a, b) => a + b);
+          view.maxEventCounts[evName] = values.reduce((a, b) => Math.max(a, b));
+        }
+      }
+    } else {
+      view.addrEventCounts = null;
+      view.totalEventCounts = null;
+      view.maxEventCounts = null;
+    }
+  }
+
+  showContent(data): void {
+    console.time("disassembly-view");
+    super.initializeContent(data, null);
+    this.showInstructionAddressHandler();
+    this.showInstructionBinaryHandler();
+    this.highlightGapInstructionsHandler();
+    console.timeEnd("disassembly-view");
+  }
+
+  // Shorten decimals and remove trailing zeroes for readability.
+  humanize(num) {
+    return num.toFixed(3).replace(/\.?0+$/, "") + "%";
+  }
+
+  processLine(line) {
+    const view = this;
+    let fragments = super.processLine(line);
+
+    // Add profiling data per instruction if available.
+    if (view.totalEventCounts) {
+      const matches = /^(0x[0-9a-fA-F]+)\s+\d+\s+[0-9a-fA-F]+/.exec(line);
+      if (matches) {
+        const newFragments = [];
+        for (const event in view.addrEventCounts) {
+          if (!view.addrEventCounts.hasOwnProperty(event)) continue;
+          const count = view.addrEventCounts[event][matches[1]];
+          let str = " ";
+          const cssCls = "prof";
+          if (count !== undefined) {
+            const perc = count / view.totalEventCounts[event] * 100;
+
+            let col = { r: 255, g: 255, b: 255 };
+            for (let i = 0; i < PROF_COLS.length; i++) {
+              if (perc === PROF_COLS[i].perc) {
+                col = PROF_COLS[i].col;
+                break;
+              } else if (perc > PROF_COLS[i].perc && perc < PROF_COLS[i + 1].perc) {
+                const col1 = PROF_COLS[i].col;
+                const col2 = PROF_COLS[i + 1].col;
+
+                const val = perc - PROF_COLS[i].perc;
+                const max = PROF_COLS[i + 1].perc - PROF_COLS[i].perc;
+
+                col.r = Math.round(interpolate(val, max, col1.r, col2.r));
+                col.g = Math.round(interpolate(val, max, col1.g, col2.g));
+                col.b = Math.round(interpolate(val, max, col1.b, col2.b));
+                break;
+              }
+            }
+
+            str = UNICODE_BLOCK;
+
+            const fragment = view.createFragment(str, cssCls);
+            fragment.title = event + ": " + view.humanize(perc) + " (" + count + ")";
+            fragment.style.color = "rgb(" + col.r + ", " + col.g + ", " + col.b + ")";
+
+            newFragments.push(fragment);
+          } else {
+            newFragments.push(view.createFragment(str, cssCls));
+          }
+        }
+        fragments = newFragments.concat(fragments);
+      }
+    }
+    return fragments;
+  }
+
+  detachSelection() { return null; }
+
+  public searchInputAction(searchInput: HTMLInputElement, e: Event, onlyVisible: boolean): void {
+    throw new Error("Method not implemented.");
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/edge.ts b/src/third_party/v8/tools/turbolizer/src/edge.ts
new file mode 100644
index 0000000..30d265c
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/edge.ts
@@ -0,0 +1,90 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { GNode, DEFAULT_NODE_BUBBLE_RADIUS } from "../src/node";
+import { Graph } from "./graph";
+
+export const MINIMUM_EDGE_SEPARATION = 20;
+
+export class Edge {
+  target: GNode;
+  source: GNode;
+  index: number;
+  type: string;
+  backEdgeNumber: number;
+  visible: boolean;
+
+  constructor(target: GNode, index: number, source: GNode, type: string) {
+    this.target = target;
+    this.source = source;
+    this.index = index;
+    this.type = type;
+    this.backEdgeNumber = 0;
+    this.visible = false;
+  }
+
+  stringID() {
+    return this.source.id + "," + this.index + "," + this.target.id;
+  }
+
+  isVisible() {
+    return this.visible && this.source.visible && this.target.visible;
+  }
+
+  getInputHorizontalPosition(graph: Graph, showTypes: boolean) {
+    if (this.backEdgeNumber > 0) {
+      return graph.maxGraphNodeX + this.backEdgeNumber * MINIMUM_EDGE_SEPARATION;
+    }
+    const source = this.source;
+    const target = this.target;
+    const index = this.index;
+    const inputX = target.x + target.getInputX(index);
+    const inputApproach = target.getInputApproach(this.index);
+    const outputApproach = source.getOutputApproach(showTypes);
+    if (inputApproach > outputApproach) {
+      return inputX;
+    } else {
+      const inputOffset = MINIMUM_EDGE_SEPARATION * (index + 1);
+      return (target.x < source.x)
+        ? (target.x + target.getTotalNodeWidth() + inputOffset)
+        : (target.x - inputOffset);
+    }
+  }
+
+  generatePath(graph: Graph, showTypes: boolean) {
+    const target = this.target;
+    const source = this.source;
+    const inputX = target.x + target.getInputX(this.index);
+    const arrowheadHeight = 7;
+    const inputY = target.y - 2 * DEFAULT_NODE_BUBBLE_RADIUS - arrowheadHeight;
+    const outputX = source.x + source.getOutputX();
+    const outputY = source.y + source.getNodeHeight(showTypes) + DEFAULT_NODE_BUBBLE_RADIUS;
+    let inputApproach = target.getInputApproach(this.index);
+    const outputApproach = source.getOutputApproach(showTypes);
+    const horizontalPos = this.getInputHorizontalPosition(graph, showTypes);
+
+    let result = "M" + outputX + "," + outputY +
+      "L" + outputX + "," + outputApproach +
+      "L" + horizontalPos + "," + outputApproach;
+
+    if (horizontalPos != inputX) {
+      result += "L" + horizontalPos + "," + inputApproach;
+    } else {
+      if (inputApproach < outputApproach) {
+        inputApproach = outputApproach;
+      }
+    }
+
+    result += "L" + inputX + "," + inputApproach +
+      "L" + inputX + "," + inputY;
+    return result;
+  }
+
+  isBackEdge() {
+    return this.target.hasBackEdges() && (this.target.rank < this.source.rank);
+  }
+
+}
+
+export const edgeToStr = (e: Edge) => e.stringID();
diff --git a/src/third_party/v8/tools/turbolizer/src/graph-layout.ts b/src/third_party/v8/tools/turbolizer/src/graph-layout.ts
new file mode 100644
index 0000000..3687c28
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/graph-layout.ts
@@ -0,0 +1,461 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { MAX_RANK_SENTINEL } from "../src/constants";
+import { MINIMUM_EDGE_SEPARATION, Edge } from "../src/edge";
+import { NODE_INPUT_WIDTH, MINIMUM_NODE_OUTPUT_APPROACH, DEFAULT_NODE_BUBBLE_RADIUS, GNode } from "../src/node";
+import { Graph } from "./graph";
+
+const DEFAULT_NODE_ROW_SEPARATION = 130;
+const traceLayout = false;
+
+function newGraphOccupation(graph: Graph) {
+  const isSlotFilled = [];
+  let maxSlot = 0;
+  let minSlot = 0;
+  let nodeOccupation: Array<[number, number]> = [];
+
+  function slotToIndex(slot: number) {
+    if (slot >= 0) {
+      return slot * 2;
+    } else {
+      return slot * 2 + 1;
+    }
+  }
+
+  function positionToSlot(pos: number) {
+    return Math.floor(pos / NODE_INPUT_WIDTH);
+  }
+
+  function slotToLeftPosition(slot: number) {
+    return slot * NODE_INPUT_WIDTH;
+  }
+
+  function findSpace(pos: number, width: number, direction: number) {
+    const widthSlots = Math.floor((width + NODE_INPUT_WIDTH - 1) /
+      NODE_INPUT_WIDTH);
+    const currentSlot = positionToSlot(pos + width / 2);
+    let currentScanSlot = currentSlot;
+    let widthSlotsRemainingLeft = widthSlots;
+    let widthSlotsRemainingRight = widthSlots;
+    let slotsChecked = 0;
+    while (true) {
+      const mod = slotsChecked++ % 2;
+      currentScanSlot = currentSlot + (mod ? -1 : 1) * (slotsChecked >> 1);
+      if (!isSlotFilled[slotToIndex(currentScanSlot)]) {
+        if (mod) {
+          if (direction <= 0) --widthSlotsRemainingLeft;
+        } else {
+          if (direction >= 0) --widthSlotsRemainingRight;
+        }
+        if (widthSlotsRemainingLeft == 0 ||
+          widthSlotsRemainingRight == 0 ||
+          (widthSlotsRemainingLeft + widthSlotsRemainingRight) == widthSlots &&
+          (widthSlots == slotsChecked)) {
+          if (mod) {
+            return [currentScanSlot, widthSlots];
+          } else {
+            return [currentScanSlot - widthSlots + 1, widthSlots];
+          }
+        }
+      } else {
+        if (mod) {
+          widthSlotsRemainingLeft = widthSlots;
+        } else {
+          widthSlotsRemainingRight = widthSlots;
+        }
+      }
+    }
+  }
+
+  function setIndexRange(from: number, to: number, value: boolean) {
+    if (to < from) {
+      throw ("illegal slot range");
+    }
+    while (from <= to) {
+      if (from > maxSlot) {
+        maxSlot = from;
+      }
+      if (from < minSlot) {
+        minSlot = from;
+      }
+      isSlotFilled[slotToIndex(from++)] = value;
+    }
+  }
+
+  function occupySlotRange(from: number, to: number) {
+    if (traceLayout) {
+      console.log("Occupied [" + slotToLeftPosition(from) + "  " + slotToLeftPosition(to + 1) + ")");
+    }
+    setIndexRange(from, to, true);
+  }
+
+  function clearSlotRange(from: number, to: number) {
+    if (traceLayout) {
+      console.log("Cleared [" + slotToLeftPosition(from) + "  " + slotToLeftPosition(to + 1) + ")");
+    }
+    setIndexRange(from, to, false);
+  }
+
+  function occupyPositionRange(from: number, to: number) {
+    occupySlotRange(positionToSlot(from), positionToSlot(to - 1));
+  }
+
+  function clearPositionRange(from: number, to: number) {
+    clearSlotRange(positionToSlot(from), positionToSlot(to - 1));
+  }
+
+  function occupyPositionRangeWithMargin(from: number, to: number, margin: number) {
+    const fromMargin = from - Math.floor(margin);
+    const toMargin = to + Math.floor(margin);
+    occupyPositionRange(fromMargin, toMargin);
+  }
+
+  function clearPositionRangeWithMargin(from: number, to: number, margin: number) {
+    const fromMargin = from - Math.floor(margin);
+    const toMargin = to + Math.floor(margin);
+    clearPositionRange(fromMargin, toMargin);
+  }
+
+  const occupation = {
+    occupyNodeInputs: function (node: GNode, showTypes: boolean) {
+      for (let i = 0; i < node.inputs.length; ++i) {
+        if (node.inputs[i].isVisible()) {
+          const edge = node.inputs[i];
+          if (!edge.isBackEdge()) {
+            const horizontalPos = edge.getInputHorizontalPosition(graph, showTypes);
+            if (traceLayout) {
+              console.log("Occupying input " + i + " of " + node.id + " at " + horizontalPos);
+            }
+            occupyPositionRangeWithMargin(horizontalPos,
+              horizontalPos,
+              NODE_INPUT_WIDTH / 2);
+          }
+        }
+      }
+    },
+    occupyNode: function (node: GNode) {
+      const getPlacementHint = function (n: GNode) {
+        let pos = 0;
+        let direction = -1;
+        let outputEdges = 0;
+        let inputEdges = 0;
+        for (const outputEdge of n.outputs) {
+          if (outputEdge.isVisible()) {
+            const output = outputEdge.target;
+            for (let l = 0; l < output.inputs.length; ++l) {
+              if (output.rank > n.rank) {
+                const inputEdge = output.inputs[l];
+                if (inputEdge.isVisible()) {
+                  ++inputEdges;
+                }
+                if (output.inputs[l].source == n) {
+                  pos += output.x + output.getInputX(l) + NODE_INPUT_WIDTH / 2;
+                  outputEdges++;
+                  if (l >= (output.inputs.length / 2)) {
+                    direction = 1;
+                  }
+                }
+              }
+            }
+          }
+        }
+        if (outputEdges != 0) {
+          pos = pos / outputEdges;
+        }
+        if (outputEdges > 1 || inputEdges == 1) {
+          direction = 0;
+        }
+        return [direction, pos];
+      };
+      const width = node.getTotalNodeWidth();
+      const margin = MINIMUM_EDGE_SEPARATION;
+      const paddedWidth = width + 2 * margin;
+      const placementHint = getPlacementHint(node);
+      const x = placementHint[1] - paddedWidth + margin;
+      if (traceLayout) {
+        console.log("Node " + node.id + " placement hint [" + x + ", " + (x + paddedWidth) + ")");
+      }
+      const placement = findSpace(x, paddedWidth, placementHint[0]);
+      const firstSlot = placement[0];
+      const slotWidth = placement[1];
+      const endSlotExclusive = firstSlot + slotWidth - 1;
+      occupySlotRange(firstSlot, endSlotExclusive);
+      nodeOccupation.push([firstSlot, endSlotExclusive]);
+      if (placementHint[0] < 0) {
+        return slotToLeftPosition(firstSlot + slotWidth) - width - margin;
+      } else if (placementHint[0] > 0) {
+        return slotToLeftPosition(firstSlot) + margin;
+      } else {
+        return slotToLeftPosition(firstSlot + slotWidth / 2) - (width / 2);
+      }
+    },
+    clearOccupiedNodes: function () {
+      nodeOccupation.forEach(([firstSlot, endSlotExclusive]) => {
+        clearSlotRange(firstSlot, endSlotExclusive);
+      });
+      nodeOccupation = [];
+    },
+    clearNodeOutputs: function (source: GNode, showTypes: boolean) {
+      source.outputs.forEach(function (edge) {
+        if (edge.isVisible()) {
+          const target = edge.target;
+          for (const inputEdge of target.inputs) {
+            if (inputEdge.source === source) {
+              const horizontalPos = edge.getInputHorizontalPosition(graph, showTypes);
+              clearPositionRangeWithMargin(horizontalPos,
+                horizontalPos,
+                NODE_INPUT_WIDTH / 2);
+            }
+          }
+        }
+      });
+    },
+    print: function () {
+      let s = "";
+      for (let currentSlot = -40; currentSlot < 40; ++currentSlot) {
+        if (currentSlot != 0) {
+          s += " ";
+        } else {
+          s += "|";
+        }
+      }
+      console.log(s);
+      s = "";
+      for (let currentSlot2 = -40; currentSlot2 < 40; ++currentSlot2) {
+        if (isSlotFilled[slotToIndex(currentSlot2)]) {
+          s += "*";
+        } else {
+          s += " ";
+        }
+      }
+      console.log(s);
+    }
+  };
+  return occupation;
+}
+
+export function layoutNodeGraph(graph: Graph, showTypes: boolean): void {
+  // First determine the set of nodes that have no outputs. Those are the
+  // basis for bottom-up DFS to determine rank and node placement.
+
+  const start = performance.now();
+
+  const endNodesHasNoOutputs = [];
+  const startNodesHasNoInputs = [];
+  for (const n of graph.nodes()) {
+    endNodesHasNoOutputs[n.id] = true;
+    startNodesHasNoInputs[n.id] = true;
+  }
+  graph.forEachEdge((e: Edge) => {
+    endNodesHasNoOutputs[e.source.id] = false;
+    startNodesHasNoInputs[e.target.id] = false;
+  });
+
+  // Finialize the list of start and end nodes.
+  const endNodes: Array<GNode> = [];
+  const startNodes: Array<GNode> = [];
+  let visited: Array<boolean> = [];
+  const rank: Array<number> = [];
+  for (const n of graph.nodes()) {
+    if (endNodesHasNoOutputs[n.id]) {
+      endNodes.push(n);
+    }
+    if (startNodesHasNoInputs[n.id]) {
+      startNodes.push(n);
+    }
+    visited[n.id] = false;
+    rank[n.id] = -1;
+    n.rank = 0;
+    n.visitOrderWithinRank = 0;
+    n.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH;
+  }
+
+  if (traceLayout) {
+    console.log(`layoutGraph init ${performance.now() - start}`);
+  }
+
+  let maxRank = 0;
+  visited = [];
+  let visitOrderWithinRank = 0;
+
+  const worklist: Array<GNode> = startNodes.slice();
+  while (worklist.length != 0) {
+    const n: GNode = worklist.pop();
+    let changed = false;
+    if (n.rank == MAX_RANK_SENTINEL) {
+      n.rank = 1;
+      changed = true;
+    }
+    let begin = 0;
+    let end = n.inputs.length;
+    if (n.nodeLabel.opcode == 'Phi' ||
+      n.nodeLabel.opcode == 'EffectPhi' ||
+      n.nodeLabel.opcode == 'InductionVariablePhi') {
+      // Keep with merge or loop node
+      begin = n.inputs.length - 1;
+    } else if (n.hasBackEdges()) {
+      end = 1;
+    }
+    for (let l = begin; l < end; ++l) {
+      const input = n.inputs[l].source;
+      if (input.visible && input.rank >= n.rank) {
+        n.rank = input.rank + 1;
+        changed = true;
+      }
+    }
+    if (changed) {
+      const hasBackEdges = n.hasBackEdges();
+      for (let l = n.outputs.length - 1; l >= 0; --l) {
+        if (hasBackEdges && (l != 0)) {
+          worklist.unshift(n.outputs[l].target);
+        } else {
+          worklist.push(n.outputs[l].target);
+        }
+      }
+    }
+    if (n.rank > maxRank) {
+      maxRank = n.rank;
+    }
+  }
+
+  if (traceLayout) {
+    console.log(`layoutGraph worklist ${performance.now() - start}`);
+  }
+
+  visited = [];
+  function dfsFindRankLate(n: GNode) {
+    if (visited[n.id]) return;
+    visited[n.id] = true;
+    const originalRank = n.rank;
+    let newRank = n.rank;
+    let isFirstInput = true;
+    for (const outputEdge of n.outputs) {
+      const output = outputEdge.target;
+      dfsFindRankLate(output);
+      const outputRank = output.rank;
+      if (output.visible && (isFirstInput || outputRank <= newRank) &&
+        (outputRank > originalRank)) {
+        newRank = outputRank - 1;
+      }
+      isFirstInput = false;
+    }
+    if (n.nodeLabel.opcode != "Start" && n.nodeLabel.opcode != "Phi" && n.nodeLabel.opcode != "EffectPhi" && n.nodeLabel.opcode != "InductionVariablePhi") {
+      n.rank = newRank;
+    }
+  }
+
+  startNodes.forEach(dfsFindRankLate);
+
+  visited = [];
+  function dfsRankOrder(n: GNode) {
+    if (visited[n.id]) return;
+    visited[n.id] = true;
+    for (const outputEdge of n.outputs) {
+      if (outputEdge.isVisible()) {
+        const output = outputEdge.target;
+        dfsRankOrder(output);
+      }
+    }
+    if (n.visitOrderWithinRank == 0) {
+      n.visitOrderWithinRank = ++visitOrderWithinRank;
+    }
+  }
+  startNodes.forEach(dfsRankOrder);
+
+  endNodes.forEach(function (n) {
+    n.rank = maxRank + 1;
+  });
+
+  const rankSets: Array<Array<GNode>> = [];
+  // Collect sets for each rank.
+  for (const n of graph.nodes()) {
+    n.y = n.rank * (DEFAULT_NODE_ROW_SEPARATION + n.getNodeHeight(showTypes) +
+      2 * DEFAULT_NODE_BUBBLE_RADIUS);
+    if (n.visible) {
+      if (rankSets[n.rank] === undefined) {
+        rankSets[n.rank] = [n];
+      } else {
+        rankSets[n.rank].push(n);
+      }
+    }
+  }
+
+  // Iterate backwards from highest to lowest rank, placing nodes so that they
+  // spread out from the "center" as much as possible while still being
+  // compact and not overlapping live input lines.
+  const occupation = newGraphOccupation(graph);
+
+  rankSets.reverse().forEach(function (rankSet: Array<GNode>) {
+
+    for (const node of rankSet) {
+      occupation.clearNodeOutputs(node, showTypes);
+    }
+
+    if (traceLayout) {
+      console.log("After clearing outputs");
+      occupation.print();
+    }
+
+    let placedCount = 0;
+    rankSet = rankSet.sort((a: GNode, b: GNode) => {
+      if (a.visitOrderWithinRank < b.visitOrderWithinRank) {
+        return -1;
+      } else if (a.visitOrderWithinRank == b.visitOrderWithinRank) {
+        return 0;
+      } else {
+        return 1;
+      }
+    });
+
+    for (const nodeToPlace of rankSet) {
+      if (nodeToPlace.visible) {
+        nodeToPlace.x = occupation.occupyNode(nodeToPlace);
+        if (traceLayout) {
+          console.log("Node " + nodeToPlace.id + " is placed between [" + nodeToPlace.x + ", " + (nodeToPlace.x + nodeToPlace.getTotalNodeWidth()) + ")");
+        }
+        const staggeredFlooredI = Math.floor(placedCount++ % 3);
+        const delta = MINIMUM_EDGE_SEPARATION * staggeredFlooredI;
+        nodeToPlace.outputApproach += delta;
+      } else {
+        nodeToPlace.x = 0;
+      }
+    }
+
+    if (traceLayout) {
+      console.log("Before clearing nodes");
+      occupation.print();
+    }
+
+    occupation.clearOccupiedNodes();
+
+    if (traceLayout) {
+      console.log("After clearing nodes");
+      occupation.print();
+    }
+
+    for (const node of rankSet) {
+      occupation.occupyNodeInputs(node, showTypes);
+    }
+
+    if (traceLayout) {
+      console.log("After occupying inputs");
+      occupation.print();
+    }
+
+    if (traceLayout) {
+      console.log("After determining bounding box");
+      occupation.print();
+    }
+  });
+
+  graph.maxBackEdgeNumber = 0;
+  graph.forEachEdge((e: Edge) => {
+    if (e.isBackEdge()) {
+      e.backEdgeNumber = ++graph.maxBackEdgeNumber;
+    } else {
+      e.backEdgeNumber = 0;
+    }
+  });
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/graph-view.ts b/src/third_party/v8/tools/turbolizer/src/graph-view.ts
new file mode 100644
index 0000000..3cb5e6f
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/graph-view.ts
@@ -0,0 +1,958 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as d3 from "d3";
+import { layoutNodeGraph } from "../src/graph-layout";
+import { GNode, nodeToStr } from "../src/node";
+import { NODE_INPUT_WIDTH } from "../src/node";
+import { DEFAULT_NODE_BUBBLE_RADIUS } from "../src/node";
+import { Edge, edgeToStr } from "../src/edge";
+import { PhaseView } from "../src/view";
+import { MySelection } from "../src/selection";
+import { partial } from "../src/util";
+import { NodeSelectionHandler, ClearableHandler } from "./selection-handler";
+import { Graph } from "./graph";
+import { SelectionBroker } from "./selection-broker";
+
+function nodeToStringKey(n: GNode) {
+  return "" + n.id;
+}
+
+interface GraphState {
+  showTypes: boolean;
+  selection: MySelection;
+  mouseDownNode: any;
+  justDragged: boolean;
+  justScaleTransGraph: boolean;
+  hideDead: boolean;
+}
+
+export class GraphView extends PhaseView {
+  divElement: d3.Selection<any, any, any, any>;
+  svg: d3.Selection<any, any, any, any>;
+  showPhaseByName: (p: string, s: Set<any>) => void;
+  state: GraphState;
+  selectionHandler: NodeSelectionHandler & ClearableHandler;
+  graphElement: d3.Selection<any, any, any, any>;
+  visibleNodes: d3.Selection<any, GNode, any, any>;
+  visibleEdges: d3.Selection<any, Edge, any, any>;
+  drag: d3.DragBehavior<any, GNode, GNode>;
+  panZoom: d3.ZoomBehavior<SVGElement, any>;
+  visibleBubbles: d3.Selection<any, any, any, any>;
+  transitionTimout: number;
+  graph: Graph;
+  broker: SelectionBroker;
+  phaseName: string;
+  toolbox: HTMLElement;
+
+  createViewElement() {
+    const pane = document.createElement('div');
+    pane.setAttribute('id', "graph");
+    return pane;
+  }
+
+  constructor(idOrContainer: string | HTMLElement, broker: SelectionBroker,
+    showPhaseByName: (s: string) => void, toolbox: HTMLElement) {
+    super(idOrContainer);
+    const view = this;
+    this.broker = broker;
+    this.showPhaseByName = showPhaseByName;
+    this.divElement = d3.select(this.divNode);
+    this.phaseName = "";
+    this.toolbox = toolbox;
+    const svg = this.divElement.append("svg")
+      .attr('version', '2.0')
+      .attr("width", "100%")
+      .attr("height", "100%");
+    svg.on("click", function (d) {
+      view.selectionHandler.clear();
+    });
+    // Listen for key events. Note that the focus handler seems
+    // to be important even if it does nothing.
+    svg
+      .on("focus", e => { })
+      .on("keydown", e => { view.svgKeyDown(); });
+
+    view.svg = svg;
+
+    this.state = {
+      selection: null,
+      mouseDownNode: null,
+      justDragged: false,
+      justScaleTransGraph: false,
+      showTypes: false,
+      hideDead: false
+    };
+
+    this.selectionHandler = {
+      clear: function () {
+        view.state.selection.clear();
+        broker.broadcastClear(this);
+        view.updateGraphVisibility();
+      },
+      select: function (nodes: Array<GNode>, selected: boolean) {
+        const locations = [];
+        for (const node of nodes) {
+          if (node.nodeLabel.sourcePosition) {
+            locations.push(node.nodeLabel.sourcePosition);
+          }
+          if (node.nodeLabel.origin && node.nodeLabel.origin.bytecodePosition) {
+            locations.push({ bytecodePosition: node.nodeLabel.origin.bytecodePosition });
+          }
+        }
+        view.state.selection.select(nodes, selected);
+        broker.broadcastSourcePositionSelect(this, locations, selected);
+        view.updateGraphVisibility();
+      },
+      brokeredNodeSelect: function (locations, selected: boolean) {
+        if (!view.graph) return;
+        const selection = view.graph.nodes(n => {
+          return locations.has(nodeToStringKey(n))
+            && (!view.state.hideDead || n.isLive());
+        });
+        view.state.selection.select(selection, selected);
+        // Update edge visibility based on selection.
+        for (const n of view.graph.nodes()) {
+          if (view.state.selection.isSelected(n)) {
+            n.visible = true;
+            n.inputs.forEach(e => {
+              e.visible = e.visible || view.state.selection.isSelected(e.source);
+            });
+            n.outputs.forEach(e => {
+              e.visible = e.visible || view.state.selection.isSelected(e.target);
+            });
+          }
+        }
+        view.updateGraphVisibility();
+      },
+      brokeredClear: function () {
+        view.state.selection.clear();
+        view.updateGraphVisibility();
+      }
+    };
+
+    view.state.selection = new MySelection(nodeToStringKey);
+
+    const defs = svg.append('svg:defs');
+    defs.append('svg:marker')
+      .attr('id', 'end-arrow')
+      .attr('viewBox', '0 -4 8 8')
+      .attr('refX', 2)
+      .attr('markerWidth', 2.5)
+      .attr('markerHeight', 2.5)
+      .attr('orient', 'auto')
+      .append('svg:path')
+      .attr('d', 'M0,-4L8,0L0,4');
+
+    this.graphElement = svg.append("g");
+    view.visibleEdges = this.graphElement.append("g");
+    view.visibleNodes = this.graphElement.append("g");
+
+    view.drag = d3.drag<any, GNode, GNode>()
+      .on("drag", function (d) {
+        d.x += d3.event.dx;
+        d.y += d3.event.dy;
+        view.updateGraphVisibility();
+      });
+
+    function zoomed() {
+      if (d3.event.shiftKey) return false;
+      view.graphElement.attr("transform", d3.event.transform);
+      return true;
+    }
+
+    const zoomSvg = d3.zoom<SVGElement, any>()
+      .scaleExtent([0.2, 40])
+      .on("zoom", zoomed)
+      .on("start", function () {
+        if (d3.event.shiftKey) return;
+        d3.select('body').style("cursor", "move");
+      })
+      .on("end", function () {
+        d3.select('body').style("cursor", "auto");
+      });
+
+    svg.call(zoomSvg).on("dblclick.zoom", null);
+
+    view.panZoom = zoomSvg;
+
+  }
+
+  getEdgeFrontier(nodes: Iterable<GNode>, inEdges: boolean,
+    edgeFilter: (e: Edge, i: number) => boolean) {
+    const frontier: Set<Edge> = new Set();
+    for (const n of nodes) {
+      const edges = inEdges ? n.inputs : n.outputs;
+      let edgeNumber = 0;
+      edges.forEach((edge: Edge) => {
+        if (edgeFilter == undefined || edgeFilter(edge, edgeNumber)) {
+          frontier.add(edge);
+        }
+        ++edgeNumber;
+      });
+    }
+    return frontier;
+  }
+
+  getNodeFrontier(nodes: Iterable<GNode>, inEdges: boolean,
+    edgeFilter: (e: Edge, i: number) => boolean) {
+    const view = this;
+    const frontier: Set<GNode> = new Set();
+    let newState = true;
+    const edgeFrontier = view.getEdgeFrontier(nodes, inEdges, edgeFilter);
+    // Control key toggles edges rather than just turning them on
+    if (d3.event.ctrlKey) {
+      edgeFrontier.forEach(function (edge: Edge) {
+        if (edge.visible) {
+          newState = false;
+        }
+      });
+    }
+    edgeFrontier.forEach(function (edge: Edge) {
+      edge.visible = newState;
+      if (newState) {
+        const node = inEdges ? edge.source : edge.target;
+        node.visible = true;
+        frontier.add(node);
+      }
+    });
+    view.updateGraphVisibility();
+    if (newState) {
+      return frontier;
+    } else {
+      return undefined;
+    }
+  }
+
+  initializeContent(data, rememberedSelection) {
+    this.show();
+    function createImgInput(id: string, title: string, onClick): HTMLElement {
+      const input = document.createElement("input");
+      input.setAttribute("id", id);
+      input.setAttribute("type", "image");
+      input.setAttribute("title", title);
+      input.setAttribute("src", `img/${id}-icon.png`);
+      input.className = "button-input graph-toolbox-item";
+      input.addEventListener("click", onClick);
+      return input;
+    }
+    this.toolbox.appendChild(createImgInput("layout", "layout graph",
+      partial(this.layoutAction, this)));
+    this.toolbox.appendChild(createImgInput("show-all", "show all nodes",
+      partial(this.showAllAction, this)));
+    this.toolbox.appendChild(createImgInput("show-control", "show only control nodes",
+      partial(this.showControlAction, this)));
+    this.toolbox.appendChild(createImgInput("toggle-hide-dead", "toggle hide dead nodes",
+      partial(this.toggleHideDead, this)));
+    this.toolbox.appendChild(createImgInput("hide-unselected", "hide unselected",
+      partial(this.hideUnselectedAction, this)));
+    this.toolbox.appendChild(createImgInput("hide-selected", "hide selected",
+      partial(this.hideSelectedAction, this)));
+    this.toolbox.appendChild(createImgInput("zoom-selection", "zoom selection",
+      partial(this.zoomSelectionAction, this)));
+    this.toolbox.appendChild(createImgInput("toggle-types", "toggle types",
+      partial(this.toggleTypesAction, this)));
+
+    this.phaseName = data.name;
+    this.createGraph(data.data, rememberedSelection);
+    this.broker.addNodeHandler(this.selectionHandler);
+
+    if (rememberedSelection != null && rememberedSelection.size > 0) {
+      this.attachSelection(rememberedSelection);
+      this.connectVisibleSelectedNodes();
+      this.viewSelection();
+    } else {
+      this.viewWholeGraph();
+    }
+  }
+
+  deleteContent() {
+    for (const item of this.toolbox.querySelectorAll(".graph-toolbox-item")) {
+      item.parentElement.removeChild(item);
+    }
+
+    for (const n of this.graph.nodes()) {
+      n.visible = false;
+    }
+    this.graph.forEachEdge((e: Edge) => {
+      e.visible = false;
+    });
+    this.updateGraphVisibility();
+  }
+
+  public hide(): void {
+    super.hide();
+    this.deleteContent();
+  }
+
+  createGraph(data, rememberedSelection) {
+    this.graph = new Graph(data);
+
+    this.showControlAction(this);
+
+    if (rememberedSelection != undefined) {
+      for (const n of this.graph.nodes()) {
+        n.visible = n.visible || rememberedSelection.has(nodeToStringKey(n));
+      }
+    }
+
+    this.graph.forEachEdge(e => e.visible = e.source.visible && e.target.visible);
+
+    this.layoutGraph();
+    this.updateGraphVisibility();
+  }
+
+  connectVisibleSelectedNodes() {
+    const view = this;
+    for (const n of view.state.selection) {
+      n.inputs.forEach(function (edge: Edge) {
+        if (edge.source.visible && edge.target.visible) {
+          edge.visible = true;
+        }
+      });
+      n.outputs.forEach(function (edge: Edge) {
+        if (edge.source.visible && edge.target.visible) {
+          edge.visible = true;
+        }
+      });
+    }
+  }
+
+  updateInputAndOutputBubbles() {
+    const view = this;
+    const g = this.graph;
+    const s = this.visibleBubbles;
+    s.classed("filledBubbleStyle", function (c) {
+      const components = this.id.split(',');
+      if (components[0] == "ib") {
+        const edge = g.nodeMap[components[3]].inputs[components[2]];
+        return edge.isVisible();
+      } else {
+        return g.nodeMap[components[1]].areAnyOutputsVisible() == 2;
+      }
+    }).classed("halfFilledBubbleStyle", function (c) {
+      const components = this.id.split(',');
+      if (components[0] == "ib") {
+        return false;
+      } else {
+        return g.nodeMap[components[1]].areAnyOutputsVisible() == 1;
+      }
+    }).classed("bubbleStyle", function (c) {
+      const components = this.id.split(',');
+      if (components[0] == "ib") {
+        const edge = g.nodeMap[components[3]].inputs[components[2]];
+        return !edge.isVisible();
+      } else {
+        return g.nodeMap[components[1]].areAnyOutputsVisible() == 0;
+      }
+    });
+    s.each(function (c) {
+      const components = this.id.split(',');
+      if (components[0] == "ob") {
+        const from = g.nodeMap[components[1]];
+        const x = from.getOutputX();
+        const y = from.getNodeHeight(view.state.showTypes) + DEFAULT_NODE_BUBBLE_RADIUS;
+        const transform = "translate(" + x + "," + y + ")";
+        this.setAttribute('transform', transform);
+      }
+    });
+  }
+
+  attachSelection(s) {
+    if (!(s instanceof Set)) return;
+    this.selectionHandler.clear();
+    const selected = [...this.graph.nodes(n =>
+      s.has(this.state.selection.stringKey(n)) && (!this.state.hideDead || n.isLive()))];
+    this.selectionHandler.select(selected, true);
+  }
+
+  detachSelection() {
+    return this.state.selection.detachSelection();
+  }
+
+  selectAllNodes() {
+    if (!d3.event.shiftKey) {
+      this.state.selection.clear();
+    }
+    const allVisibleNodes = [...this.graph.nodes(n => n.visible)];
+    this.state.selection.select(allVisibleNodes, true);
+    this.updateGraphVisibility();
+  }
+
+  layoutAction(graph: GraphView) {
+    graph.layoutGraph();
+    graph.updateGraphVisibility();
+    graph.viewWholeGraph();
+    graph.focusOnSvg();
+  }
+
+  showAllAction(view: GraphView) {
+    for (const n of view.graph.nodes()) {
+      n.visible = !view.state.hideDead || n.isLive();
+    }
+    view.graph.forEachEdge((e: Edge) => {
+      e.visible = e.source.visible || e.target.visible;
+    });
+    view.updateGraphVisibility();
+    view.viewWholeGraph();
+    view.focusOnSvg();
+  }
+
+  showControlAction(view: GraphView) {
+    for (const n of view.graph.nodes()) {
+      n.visible = n.cfg && (!view.state.hideDead || n.isLive());
+    }
+    view.graph.forEachEdge((e: Edge) => {
+      e.visible = e.type == 'control' && e.source.visible && e.target.visible;
+    });
+    view.updateGraphVisibility();
+    view.viewWholeGraph();
+    view.focusOnSvg();
+  }
+
+  toggleHideDead(view: GraphView) {
+    view.state.hideDead = !view.state.hideDead;
+    if (view.state.hideDead) {
+      view.hideDead();
+    } else {
+      view.showDead();
+    }
+    const element = document.getElementById('toggle-hide-dead');
+    element.classList.toggle('button-input-toggled', view.state.hideDead);
+    view.focusOnSvg();
+  }
+
+  hideDead() {
+    for (const n of this.graph.nodes()) {
+      if (!n.isLive()) {
+        n.visible = false;
+        this.state.selection.select([n], false);
+      }
+    }
+    this.updateGraphVisibility();
+  }
+
+  showDead() {
+    for (const n of this.graph.nodes()) {
+      if (!n.isLive()) {
+        n.visible = true;
+      }
+    }
+    this.updateGraphVisibility();
+  }
+
+  hideUnselectedAction(view: GraphView) {
+    for (const n of view.graph.nodes()) {
+      if (!view.state.selection.isSelected(n)) {
+        n.visible = false;
+      }
+    }
+    view.updateGraphVisibility();
+    view.focusOnSvg();
+  }
+
+  hideSelectedAction(view: GraphView) {
+    for (const n of view.graph.nodes()) {
+      if (view.state.selection.isSelected(n)) {
+        n.visible = false;
+      }
+    }
+    view.selectionHandler.clear();
+    view.focusOnSvg();
+  }
+
+  zoomSelectionAction(view: GraphView) {
+    view.viewSelection();
+    view.focusOnSvg();
+  }
+
+  toggleTypesAction(view: GraphView) {
+    view.toggleTypes();
+    view.focusOnSvg();
+  }
+
+  searchInputAction(searchBar: HTMLInputElement, e: KeyboardEvent, onlyVisible: boolean) {
+    if (e.keyCode == 13) {
+      this.selectionHandler.clear();
+      const query = searchBar.value;
+      window.sessionStorage.setItem("lastSearch", query);
+      if (query.length == 0) return;
+
+      const reg = new RegExp(query);
+      const filterFunction = (n: GNode) => {
+        return (reg.exec(n.getDisplayLabel()) != null ||
+          (this.state.showTypes && reg.exec(n.getDisplayType())) ||
+          (reg.exec(n.getTitle())) ||
+          reg.exec(n.nodeLabel.opcode) != null);
+      };
+
+      const selection = [...this.graph.nodes(n => {
+        if ((e.ctrlKey || n.visible || !onlyVisible) && filterFunction(n)) {
+          if (e.ctrlKey || !onlyVisible) n.visible = true;
+          return true;
+        }
+        return false;
+      })];
+
+      this.selectionHandler.select(selection, true);
+      this.connectVisibleSelectedNodes();
+      this.updateGraphVisibility();
+      searchBar.blur();
+      this.viewSelection();
+      this.focusOnSvg();
+    }
+    e.stopPropagation();
+  }
+
+  focusOnSvg() {
+    (document.getElementById("graph").childNodes[0] as HTMLElement).focus();
+  }
+
+  svgKeyDown() {
+    const view = this;
+    const state = this.state;
+
+    const showSelectionFrontierNodes = (inEdges: boolean, filter: (e: Edge, i: number) => boolean, doSelect: boolean) => {
+      const frontier = view.getNodeFrontier(state.selection, inEdges, filter);
+      if (frontier != undefined && frontier.size) {
+        if (doSelect) {
+          if (!d3.event.shiftKey) {
+            state.selection.clear();
+          }
+          state.selection.select([...frontier], true);
+        }
+        view.updateGraphVisibility();
+      }
+    };
+
+    let eventHandled = true; // unless the below switch defaults
+    switch (d3.event.keyCode) {
+      case 49:
+      case 50:
+      case 51:
+      case 52:
+      case 53:
+      case 54:
+      case 55:
+      case 56:
+      case 57:
+        // '1'-'9'
+        showSelectionFrontierNodes(true,
+          (edge: Edge, index: number) => index == (d3.event.keyCode - 49),
+          !d3.event.ctrlKey);
+        break;
+      case 97:
+      case 98:
+      case 99:
+      case 100:
+      case 101:
+      case 102:
+      case 103:
+      case 104:
+      case 105:
+        // 'numpad 1'-'numpad 9'
+        showSelectionFrontierNodes(true,
+          (edge, index) => index == (d3.event.keyCode - 97),
+          !d3.event.ctrlKey);
+        break;
+      case 67:
+        // 'c'
+        showSelectionFrontierNodes(d3.event.altKey,
+          (edge, index) => edge.type == 'control',
+          true);
+        break;
+      case 69:
+        // 'e'
+        showSelectionFrontierNodes(d3.event.altKey,
+          (edge, index) => edge.type == 'effect',
+          true);
+        break;
+      case 79:
+        // 'o'
+        showSelectionFrontierNodes(false, undefined, false);
+        break;
+      case 73:
+        // 'i'
+        if (!d3.event.ctrlKey && !d3.event.shiftKey) {
+          showSelectionFrontierNodes(true, undefined, false);
+        } else {
+          eventHandled = false;
+        }
+        break;
+      case 65:
+        // 'a'
+        view.selectAllNodes();
+        break;
+      case 38:
+      // UP
+      case 40: {
+        // DOWN
+        showSelectionFrontierNodes(d3.event.keyCode == 38, undefined, true);
+        break;
+      }
+      case 82:
+        // 'r'
+        if (!d3.event.ctrlKey && !d3.event.shiftKey) {
+          this.layoutAction(this);
+        } else {
+          eventHandled = false;
+        }
+        break;
+      case 80:
+        // 'p'
+        view.selectOrigins();
+        break;
+      default:
+        eventHandled = false;
+        break;
+      case 83:
+        // 's'
+        if (!d3.event.ctrlKey && !d3.event.shiftKey) {
+          this.hideSelectedAction(this);
+        } else {
+          eventHandled = false;
+        }
+        break;
+      case 85:
+        // 'u'
+        if (!d3.event.ctrlKey && !d3.event.shiftKey) {
+          this.hideUnselectedAction(this);
+        } else {
+          eventHandled = false;
+        }
+        break;
+    }
+    if (eventHandled) {
+      d3.event.preventDefault();
+    }
+  }
+
+  layoutGraph() {
+    console.time("layoutGraph");
+    layoutNodeGraph(this.graph, this.state.showTypes);
+    const extent = this.graph.redetermineGraphBoundingBox(this.state.showTypes);
+    this.panZoom.translateExtent(extent);
+    this.minScale();
+    console.timeEnd("layoutGraph");
+  }
+
+  selectOrigins() {
+    const state = this.state;
+    const origins = [];
+    let phase = this.phaseName;
+    const selection = new Set<any>();
+    for (const n of state.selection) {
+      const origin = n.nodeLabel.origin;
+      if (origin) {
+        phase = origin.phase;
+        const node = this.graph.nodeMap[origin.nodeId];
+        if (phase === this.phaseName && node) {
+          origins.push(node);
+        } else {
+          selection.add(`${origin.nodeId}`);
+        }
+      }
+    }
+    // Only go through phase reselection if we actually need
+    // to display another phase.
+    if (selection.size > 0 && phase !== this.phaseName) {
+      this.showPhaseByName(phase, selection);
+    } else if (origins.length > 0) {
+      this.selectionHandler.clear();
+      this.selectionHandler.select(origins, true);
+    }
+  }
+
+  // call to propagate changes to graph
+  updateGraphVisibility() {
+    const view = this;
+    const graph = this.graph;
+    const state = this.state;
+    if (!graph) return;
+
+    const filteredEdges = [...graph.filteredEdges(function (e) {
+      return e.source.visible && e.target.visible;
+    })];
+    const selEdges = view.visibleEdges.selectAll<SVGPathElement, Edge>("path").data(filteredEdges, edgeToStr);
+
+    // remove old links
+    selEdges.exit().remove();
+
+    // add new paths
+    const newEdges = selEdges.enter()
+      .append('path');
+
+    newEdges.style('marker-end', 'url(#end-arrow)')
+      .attr("id", function (edge) { return "e," + edge.stringID(); })
+      .on("click", function (edge) {
+        d3.event.stopPropagation();
+        if (!d3.event.shiftKey) {
+          view.selectionHandler.clear();
+        }
+        view.selectionHandler.select([edge.source, edge.target], true);
+      })
+      .attr("adjacentToHover", "false")
+      .classed('value', function (e) {
+        return e.type == 'value' || e.type == 'context';
+      }).classed('control', function (e) {
+        return e.type == 'control';
+      }).classed('effect', function (e) {
+        return e.type == 'effect';
+      }).classed('frame-state', function (e) {
+        return e.type == 'frame-state';
+      }).attr('stroke-dasharray', function (e) {
+        if (e.type == 'frame-state') return "10,10";
+        return (e.type == 'effect') ? "5,5" : "";
+      });
+
+    const newAndOldEdges = newEdges.merge(selEdges);
+
+    newAndOldEdges.classed('hidden', e => !e.isVisible());
+
+    // select existing nodes
+    const filteredNodes = [...graph.nodes(n => n.visible)];
+    const allNodes = view.visibleNodes.selectAll<SVGGElement, GNode>("g");
+    const selNodes = allNodes.data(filteredNodes, nodeToStr);
+
+    // remove old nodes
+    selNodes.exit().remove();
+
+    // add new nodes
+    const newGs = selNodes.enter()
+      .append("g");
+
+    newGs.classed("turbonode", function (n) { return true; })
+      .classed("control", function (n) { return n.isControl(); })
+      .classed("live", function (n) { return n.isLive(); })
+      .classed("dead", function (n) { return !n.isLive(); })
+      .classed("javascript", function (n) { return n.isJavaScript(); })
+      .classed("input", function (n) { return n.isInput(); })
+      .classed("simplified", function (n) { return n.isSimplified(); })
+      .classed("machine", function (n) { return n.isMachine(); })
+      .on('mouseenter', function (node) {
+        const visibleEdges = view.visibleEdges.selectAll<SVGPathElement, Edge>('path');
+        const adjInputEdges = visibleEdges.filter(e => e.target === node);
+        const adjOutputEdges = visibleEdges.filter(e => e.source === node);
+        adjInputEdges.attr('relToHover', "input");
+        adjOutputEdges.attr('relToHover', "output");
+        const adjInputNodes = adjInputEdges.data().map(e => e.source);
+        const visibleNodes = view.visibleNodes.selectAll<SVGGElement, GNode>("g");
+        visibleNodes.data<GNode>(adjInputNodes, nodeToStr).attr('relToHover', "input");
+        const adjOutputNodes = adjOutputEdges.data().map(e => e.target);
+        visibleNodes.data<GNode>(adjOutputNodes, nodeToStr).attr('relToHover', "output");
+        view.updateGraphVisibility();
+      })
+      .on('mouseleave', function (node) {
+        const visibleEdges = view.visibleEdges.selectAll<SVGPathElement, Edge>('path');
+        const adjEdges = visibleEdges.filter(e => e.target === node || e.source === node);
+        adjEdges.attr('relToHover', "none");
+        const adjNodes = adjEdges.data().map(e => e.target).concat(adjEdges.data().map(e => e.source));
+        const visibleNodes = view.visibleNodes.selectAll<SVGPathElement, GNode>("g");
+        visibleNodes.data(adjNodes, nodeToStr).attr('relToHover', "none");
+        view.updateGraphVisibility();
+      })
+      .on("click", d => {
+        if (!d3.event.shiftKey) view.selectionHandler.clear();
+        view.selectionHandler.select([d], undefined);
+        d3.event.stopPropagation();
+      })
+      .call(view.drag);
+
+    newGs.append("rect")
+      .attr("rx", 10)
+      .attr("ry", 10)
+      .attr('width', function (d) {
+        return d.getTotalNodeWidth();
+      })
+      .attr('height', function (d) {
+        return d.getNodeHeight(view.state.showTypes);
+      });
+
+    function appendInputAndOutputBubbles(g, d) {
+      for (let i = 0; i < d.inputs.length; ++i) {
+        const x = d.getInputX(i);
+        const y = -DEFAULT_NODE_BUBBLE_RADIUS;
+        g.append('circle')
+          .classed("filledBubbleStyle", function (c) {
+            return d.inputs[i].isVisible();
+          })
+          .classed("bubbleStyle", function (c) {
+            return !d.inputs[i].isVisible();
+          })
+          .attr("id", "ib," + d.inputs[i].stringID())
+          .attr("r", DEFAULT_NODE_BUBBLE_RADIUS)
+          .attr("transform", function (d) {
+            return "translate(" + x + "," + y + ")";
+          })
+          .on("click", function (this: SVGCircleElement, d) {
+            const components = this.id.split(',');
+            const node = graph.nodeMap[components[3]];
+            const edge = node.inputs[components[2]];
+            const visible = !edge.isVisible();
+            node.setInputVisibility(components[2], visible);
+            d3.event.stopPropagation();
+            view.updateGraphVisibility();
+          });
+      }
+      if (d.outputs.length != 0) {
+        const x = d.getOutputX();
+        const y = d.getNodeHeight(view.state.showTypes) + DEFAULT_NODE_BUBBLE_RADIUS;
+        g.append('circle')
+          .classed("filledBubbleStyle", function (c) {
+            return d.areAnyOutputsVisible() == 2;
+          })
+          .classed("halFilledBubbleStyle", function (c) {
+            return d.areAnyOutputsVisible() == 1;
+          })
+          .classed("bubbleStyle", function (c) {
+            return d.areAnyOutputsVisible() == 0;
+          })
+          .attr("id", "ob," + d.id)
+          .attr("r", DEFAULT_NODE_BUBBLE_RADIUS)
+          .attr("transform", function (d) {
+            return "translate(" + x + "," + y + ")";
+          })
+          .on("click", function (d) {
+            d.setOutputVisibility(d.areAnyOutputsVisible() == 0);
+            d3.event.stopPropagation();
+            view.updateGraphVisibility();
+          });
+      }
+    }
+
+    newGs.each(function (d) {
+      appendInputAndOutputBubbles(d3.select(this), d);
+    });
+
+    newGs.each(function (d) {
+      d3.select(this).append("text")
+        .classed("label", true)
+        .attr("text-anchor", "right")
+        .attr("dx", 5)
+        .attr("dy", 5)
+        .append('tspan')
+        .text(function (l) {
+          return d.getDisplayLabel();
+        })
+        .append("title")
+        .text(function (l) {
+          return d.getTitle();
+        });
+      if (d.nodeLabel.type != undefined) {
+        d3.select(this).append("text")
+          .classed("label", true)
+          .classed("type", true)
+          .attr("text-anchor", "right")
+          .attr("dx", 5)
+          .attr("dy", d.labelbbox.height + 5)
+          .append('tspan')
+          .text(function (l) {
+            return d.getDisplayType();
+          })
+          .append("title")
+          .text(function (l) {
+            return d.getType();
+          });
+      }
+    });
+
+    const newAndOldNodes = newGs.merge(selNodes);
+
+    newAndOldNodes.select<SVGTextElement>('.type').each(function (d) {
+      this.setAttribute('visibility', view.state.showTypes ? 'visible' : 'hidden');
+    });
+
+    newAndOldNodes
+      .classed("selected", function (n) {
+        if (state.selection.isSelected(n)) return true;
+        return false;
+      })
+      .attr("transform", function (d) { return "translate(" + d.x + "," + d.y + ")"; })
+      .select('rect')
+      .attr('height', function (d) { return d.getNodeHeight(view.state.showTypes); });
+
+    view.visibleBubbles = d3.selectAll('circle');
+
+    view.updateInputAndOutputBubbles();
+
+    graph.maxGraphX = graph.maxGraphNodeX;
+    newAndOldEdges.attr("d", function (edge) {
+      return edge.generatePath(graph, view.state.showTypes);
+    });
+  }
+
+  getSvgViewDimensions() {
+    return [this.container.clientWidth, this.container.clientHeight];
+  }
+
+  getSvgExtent(): [[number, number], [number, number]] {
+    return [[0, 0], [this.container.clientWidth, this.container.clientHeight]];
+  }
+
+  minScale() {
+    const dimensions = this.getSvgViewDimensions();
+    const minXScale = dimensions[0] / (2 * this.graph.width);
+    const minYScale = dimensions[1] / (2 * this.graph.height);
+    const minScale = Math.min(minXScale, minYScale);
+    this.panZoom.scaleExtent([minScale, 40]);
+    return minScale;
+  }
+
+  onresize() {
+    const trans = d3.zoomTransform(this.svg.node());
+    const ctrans = this.panZoom.constrain()(trans, this.getSvgExtent(), this.panZoom.translateExtent());
+    this.panZoom.transform(this.svg, ctrans);
+  }
+
+  toggleTypes() {
+    const view = this;
+    view.state.showTypes = !view.state.showTypes;
+    const element = document.getElementById('toggle-types');
+    element.classList.toggle('button-input-toggled', view.state.showTypes);
+    view.updateGraphVisibility();
+  }
+
+  viewSelection() {
+    const view = this;
+    let minX;
+    let maxX;
+    let minY;
+    let maxY;
+    let hasSelection = false;
+    view.visibleNodes.selectAll<SVGGElement, GNode>("g").each(function (n) {
+      if (view.state.selection.isSelected(n)) {
+        hasSelection = true;
+        minX = minX ? Math.min(minX, n.x) : n.x;
+        maxX = maxX ? Math.max(maxX, n.x + n.getTotalNodeWidth()) :
+          n.x + n.getTotalNodeWidth();
+        minY = minY ? Math.min(minY, n.y) : n.y;
+        maxY = maxY ? Math.max(maxY, n.y + n.getNodeHeight(view.state.showTypes)) :
+          n.y + n.getNodeHeight(view.state.showTypes);
+      }
+    });
+    if (hasSelection) {
+      view.viewGraphRegion(minX - NODE_INPUT_WIDTH, minY - 60,
+        maxX + NODE_INPUT_WIDTH, maxY + 60);
+    }
+  }
+
+  viewGraphRegion(minX, minY, maxX, maxY) {
+    const [width, height] = this.getSvgViewDimensions();
+    const dx = maxX - minX;
+    const dy = maxY - minY;
+    const x = (minX + maxX) / 2;
+    const y = (minY + maxY) / 2;
+    const scale = Math.min(width / dx, height / dy) * 0.9;
+    this.svg
+      .transition().duration(120).call(this.panZoom.scaleTo, scale)
+      .transition().duration(120).call(this.panZoom.translateTo, x, y);
+  }
+
+  viewWholeGraph() {
+    this.panZoom.scaleTo(this.svg, 0);
+    this.panZoom.translateTo(this.svg,
+      this.graph.minGraphX + this.graph.width / 2,
+      this.graph.minGraphY + this.graph.height / 2);
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/graph.ts b/src/third_party/v8/tools/turbolizer/src/graph.ts
new file mode 100644
index 0000000..0eb2e3e
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/graph.ts
@@ -0,0 +1,107 @@
+import { GNode } from "./node";
+import { Edge, MINIMUM_EDGE_SEPARATION } from "./edge";
+
+export class Graph {
+  nodeMap: Array<GNode>;
+  minGraphX: number;
+  maxGraphX: number;
+  minGraphY: number;
+  maxGraphY: number;
+  maxGraphNodeX: number;
+  maxBackEdgeNumber: number;
+  width: number;
+  height: number;
+
+  constructor(data: any) {
+    this.nodeMap = [];
+
+    this.minGraphX = 0;
+    this.maxGraphX = 1;
+    this.minGraphY = 0;
+    this.maxGraphY = 1;
+    this.width = 1;
+    this.height = 1;
+
+    data.nodes.forEach((jsonNode: any) => {
+      this.nodeMap[jsonNode.id] = new GNode(jsonNode.nodeLabel);
+    });
+
+    data.edges.forEach((e: any) => {
+      const t = this.nodeMap[e.target];
+      const s = this.nodeMap[e.source];
+      const newEdge = new Edge(t, e.index, s, e.type);
+      t.inputs.push(newEdge);
+      s.outputs.push(newEdge);
+      if (e.type == 'control') {
+        // Every source of a control edge is a CFG node.
+        s.cfg = true;
+      }
+    });
+
+  }
+
+  *nodes(p = (n: GNode) => true) {
+    for (const node of this.nodeMap) {
+      if (!node || !p(node)) continue;
+      yield node;
+    }
+  }
+
+  *filteredEdges(p: (e: Edge) => boolean) {
+    for (const node of this.nodes()) {
+      for (const edge of node.inputs) {
+        if (p(edge)) yield edge;
+      }
+    }
+  }
+
+  forEachEdge(p: (e: Edge) => void) {
+    for (const node of this.nodeMap) {
+      if (!node) continue;
+      for (const edge of node.inputs) {
+        p(edge);
+      }
+    }
+  }
+
+  redetermineGraphBoundingBox(showTypes: boolean): [[number, number], [number, number]] {
+    this.minGraphX = 0;
+    this.maxGraphNodeX = 1;
+    this.maxGraphX = undefined;  // see below
+    this.minGraphY = 0;
+    this.maxGraphY = 1;
+
+    for (const node of this.nodes()) {
+      if (!node.visible) {
+        continue;
+      }
+
+      if (node.x < this.minGraphX) {
+        this.minGraphX = node.x;
+      }
+      if ((node.x + node.getTotalNodeWidth()) > this.maxGraphNodeX) {
+        this.maxGraphNodeX = node.x + node.getTotalNodeWidth();
+      }
+      if ((node.y - 50) < this.minGraphY) {
+        this.minGraphY = node.y - 50;
+      }
+      if ((node.y + node.getNodeHeight(showTypes) + 50) > this.maxGraphY) {
+        this.maxGraphY = node.y + node.getNodeHeight(showTypes) + 50;
+      }
+    }
+
+    this.maxGraphX = this.maxGraphNodeX +
+      this.maxBackEdgeNumber * MINIMUM_EDGE_SEPARATION;
+
+    this.width = this.maxGraphX - this.minGraphX;
+    this.height = this.maxGraphY - this.minGraphY;
+
+    const extent: [[number, number], [number, number]] = [
+      [this.minGraphX - this.width / 2, this.minGraphY - this.height / 2],
+      [this.maxGraphX + this.width / 2, this.maxGraphY + this.height / 2]
+    ];
+
+    return extent;
+  }
+
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/graphmultiview.ts b/src/third_party/v8/tools/turbolizer/src/graphmultiview.ts
new file mode 100644
index 0000000..4f8f633
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/graphmultiview.ts
@@ -0,0 +1,143 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { GraphView } from "../src/graph-view";
+import { ScheduleView } from "../src/schedule-view";
+import { SequenceView } from "../src/sequence-view";
+import { SourceResolver } from "../src/source-resolver";
+import { SelectionBroker } from "../src/selection-broker";
+import { View, PhaseView } from "../src/view";
+
+const multiviewID = "multiview";
+
+const toolboxHTML = `
+<div class="graph-toolbox">
+  <select id="phase-select">
+    <option disabled selected>(please open a file)</option>
+  </select>
+  <input id="search-input" type="text" title="search nodes for regex" alt="search node for regex" class="search-input"
+    placeholder="find with regexp&hellip;">
+  <label><input id="search-only-visible" type="checkbox" name="instruction-address" alt="Apply search to visible nodes only">only visible</label>
+</div>`;
+
+export class GraphMultiView extends View {
+  sourceResolver: SourceResolver;
+  selectionBroker: SelectionBroker;
+  graph: GraphView;
+  schedule: ScheduleView;
+  sequence: SequenceView;
+  selectMenu: HTMLSelectElement;
+  currentPhaseView: PhaseView;
+
+  createViewElement() {
+    const pane = document.createElement("div");
+    pane.setAttribute("id", multiviewID);
+    pane.setAttribute("tabindex", "1");
+    pane.className = "viewpane";
+    return pane;
+  }
+
+  hide() {
+    this.hideCurrentPhase();
+    super.hide();
+  }
+
+  constructor(id, selectionBroker, sourceResolver) {
+    super(id);
+    const view = this;
+    view.sourceResolver = sourceResolver;
+    view.selectionBroker = selectionBroker;
+    const toolbox = document.createElement("div");
+    toolbox.className = "toolbox-anchor";
+    toolbox.innerHTML = toolboxHTML;
+    view.divNode.appendChild(toolbox);
+    const searchInput = toolbox.querySelector("#search-input") as HTMLInputElement;
+    const onlyVisibleCheckbox = toolbox.querySelector("#search-only-visible") as HTMLInputElement;
+    searchInput.addEventListener("keyup", e => {
+      if (!view.currentPhaseView) return;
+      view.currentPhaseView.searchInputAction(searchInput, e, onlyVisibleCheckbox.checked);
+    });
+    view.divNode.addEventListener("keyup", (e: KeyboardEvent) => {
+      if (e.keyCode == 191) { // keyCode == '/'
+        searchInput.focus();
+      }
+    });
+    searchInput.setAttribute("value", window.sessionStorage.getItem("lastSearch") || "");
+    this.graph = new GraphView(this.divNode, selectionBroker, view.displayPhaseByName.bind(this),
+      toolbox.querySelector(".graph-toolbox"));
+    this.schedule = new ScheduleView(this.divNode, selectionBroker);
+    this.sequence = new SequenceView(this.divNode, selectionBroker);
+    this.selectMenu = toolbox.querySelector("#phase-select") as HTMLSelectElement;
+  }
+
+  initializeSelect() {
+    const view = this;
+    view.selectMenu.innerHTML = "";
+    view.sourceResolver.forEachPhase(phase => {
+      const optionElement = document.createElement("option");
+      let maxNodeId = "";
+      if (phase.type == "graph" && phase.highestNodeId != 0) {
+        maxNodeId = ` ${phase.highestNodeId}`;
+      }
+      optionElement.text = `${phase.name}${maxNodeId}`;
+      view.selectMenu.add(optionElement);
+    });
+    this.selectMenu.onchange = function (this: HTMLSelectElement) {
+      const phaseIndex = this.selectedIndex;
+      window.sessionStorage.setItem("lastSelectedPhase", phaseIndex.toString());
+      view.displayPhase(view.sourceResolver.getPhase(phaseIndex));
+    };
+  }
+
+  show() {
+    // Insert before is used so that the display is inserted before the
+    // resizer for the RangeView.
+    this.container.insertBefore(this.divNode, this.container.firstChild);
+    this.initializeSelect();
+    const lastPhaseIndex = +window.sessionStorage.getItem("lastSelectedPhase");
+    const initialPhaseIndex = this.sourceResolver.repairPhaseId(lastPhaseIndex);
+    this.selectMenu.selectedIndex = initialPhaseIndex;
+    this.displayPhase(this.sourceResolver.getPhase(initialPhaseIndex));
+  }
+
+  displayPhase(phase, selection?: Set<any>) {
+    if (phase.type == "graph") {
+      this.displayPhaseView(this.graph, phase, selection);
+    } else if (phase.type == "schedule") {
+      this.displayPhaseView(this.schedule, phase, selection);
+    } else if (phase.type == "sequence") {
+      this.displayPhaseView(this.sequence, phase, selection);
+    }
+  }
+
+  displayPhaseView(view: PhaseView, data, selection?: Set<any>) {
+    const rememberedSelection = selection ? selection : this.hideCurrentPhase();
+    view.initializeContent(data, rememberedSelection);
+    this.currentPhaseView = view;
+  }
+
+  displayPhaseByName(phaseName, selection?: Set<any>) {
+    const phaseId = this.sourceResolver.getPhaseIdByName(phaseName);
+    this.selectMenu.selectedIndex = phaseId;
+    this.displayPhase(this.sourceResolver.getPhase(phaseId), selection);
+  }
+
+  hideCurrentPhase() {
+    let rememberedSelection = null;
+    if (this.currentPhaseView != null) {
+      rememberedSelection = this.currentPhaseView.detachSelection();
+      this.currentPhaseView.hide();
+      this.currentPhaseView = null;
+    }
+    return rememberedSelection;
+  }
+
+  onresize() {
+    if (this.currentPhaseView) this.currentPhaseView.onresize();
+  }
+
+  detachSelection() {
+    return null;
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/info-view.ts b/src/third_party/v8/tools/turbolizer/src/info-view.ts
new file mode 100644
index 0000000..3858536
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/info-view.ts
@@ -0,0 +1,17 @@
+import { View } from "./view";
+
+export class InfoView extends View {
+
+  constructor(idOrContainer: HTMLElement | string) {
+    super(idOrContainer);
+    fetch("info-view.html")
+      .then(response => response.text())
+      .then(htmlText => this.divNode.innerHTML = htmlText);
+  }
+
+  createViewElement(): HTMLElement {
+    const infoContainer = document.createElement("div");
+    infoContainer.classList.add("info-container");
+    return infoContainer;
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/node-label.ts b/src/third_party/v8/tools/turbolizer/src/node-label.ts
new file mode 100644
index 0000000..6e7d41d
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/node-label.ts
@@ -0,0 +1,86 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function formatOrigin(origin) {
+  if (origin.nodeId) {
+    return `#${origin.nodeId} in phase ${origin.phase}/${origin.reducer}`;
+  }
+  if (origin.bytecodePosition) {
+    return `Bytecode line ${origin.bytecodePosition} in phase ${origin.phase}/${origin.reducer}`;
+  }
+  return "unknown origin";
+}
+
+export class NodeLabel {
+  id: number;
+  label: string;
+  title: string;
+  live: boolean;
+  properties: string;
+  sourcePosition: any;
+  origin: any;
+  opcode: string;
+  control: boolean;
+  opinfo: string;
+  type: string;
+  inplaceUpdatePhase: string;
+
+  constructor(id: number, label: string, title: string, live: boolean, properties: string, sourcePosition: any, origin: any, opcode: string, control: boolean, opinfo: string, type: string) {
+    this.id = id;
+    this.label = label;
+    this.title = title;
+    this.live = live;
+    this.properties = properties;
+    this.sourcePosition = sourcePosition;
+    this.origin = origin;
+    this.opcode = opcode;
+    this.control = control;
+    this.opinfo = opinfo;
+    this.type = type;
+    this.inplaceUpdatePhase = null;
+  }
+
+  equals(that?: NodeLabel) {
+    if (!that) return false;
+    if (this.id != that.id) return false;
+    if (this.label != that.label) return false;
+    if (this.title != that.title) return false;
+    if (this.live != that.live) return false;
+    if (this.properties != that.properties) return false;
+    if (this.opcode != that.opcode) return false;
+    if (this.control != that.control) return false;
+    if (this.opinfo != that.opinfo) return false;
+    if (this.type != that.type) return false;
+    return true;
+  }
+
+  getTitle() {
+    let propsString = "";
+    if (this.properties === "") {
+      propsString = "no properties";
+    } else {
+      propsString = "[" + this.properties + "]";
+    }
+    let title = this.title + "\n" + propsString + "\n" + this.opinfo;
+    if (this.origin) {
+      title += `\nOrigin: ${formatOrigin(this.origin)}`;
+    }
+    if (this.inplaceUpdatePhase) {
+      title += `\nInplace update in phase: ${this.inplaceUpdatePhase}`;
+    }
+    return title;
+  }
+
+  getDisplayLabel() {
+    const result = `${this.id}: ${this.label}`;
+    if (result.length > 40) {
+      return `${this.id}: ${this.opcode}`;
+    }
+    return result;
+  }
+
+  setInplaceUpdatePhase(name: string): any {
+    this.inplaceUpdatePhase = name;
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/node.ts b/src/third_party/v8/tools/turbolizer/src/node.ts
new file mode 100644
index 0000000..02906d1
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/node.ts
@@ -0,0 +1,180 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { MINIMUM_EDGE_SEPARATION, Edge } from "../src/edge";
+import { NodeLabel } from "./node-label";
+import { MAX_RANK_SENTINEL } from "./constants";
+import { alignUp, measureText } from "./util";
+
+export const DEFAULT_NODE_BUBBLE_RADIUS = 12;
+export const NODE_INPUT_WIDTH = 50;
+export const MINIMUM_NODE_OUTPUT_APPROACH = 15;
+const MINIMUM_NODE_INPUT_APPROACH = 15 + 2 * DEFAULT_NODE_BUBBLE_RADIUS;
+
+export class GNode {
+  id: number;
+  nodeLabel: NodeLabel;
+  displayLabel: string;
+  inputs: Array<Edge>;
+  outputs: Array<Edge>;
+  visible: boolean;
+  x: number;
+  y: number;
+  rank: number;
+  outputApproach: number;
+  cfg: boolean;
+  labelbbox: { width: number, height: number };
+  width: number;
+  normalheight: number;
+  visitOrderWithinRank: number;
+
+  constructor(nodeLabel: NodeLabel) {
+    this.id = nodeLabel.id;
+    this.nodeLabel = nodeLabel;
+    this.displayLabel = nodeLabel.getDisplayLabel();
+    this.inputs = [];
+    this.outputs = [];
+    this.visible = false;
+    this.x = 0;
+    this.y = 0;
+    this.rank = MAX_RANK_SENTINEL;
+    this.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH;
+    // Every control node is a CFG node.
+    this.cfg = nodeLabel.control;
+    this.labelbbox = measureText(this.displayLabel);
+    const typebbox = measureText(this.getDisplayType());
+    const innerwidth = Math.max(this.labelbbox.width, typebbox.width);
+    this.width = alignUp(innerwidth + NODE_INPUT_WIDTH * 2,
+      NODE_INPUT_WIDTH);
+    const innerheight = Math.max(this.labelbbox.height, typebbox.height);
+    this.normalheight = innerheight + 20;
+    this.visitOrderWithinRank = 0;
+  }
+
+  isControl() {
+    return this.nodeLabel.control;
+  }
+  isInput() {
+    return this.nodeLabel.opcode == 'Parameter' || this.nodeLabel.opcode.endsWith('Constant');
+  }
+  isLive() {
+    return this.nodeLabel.live !== false;
+  }
+  isJavaScript() {
+    return this.nodeLabel.opcode.startsWith('JS');
+  }
+  isSimplified() {
+    if (this.isJavaScript()) return false;
+    const opcode = this.nodeLabel.opcode;
+    return opcode.endsWith('Phi') ||
+      opcode.startsWith('Boolean') ||
+      opcode.startsWith('Number') ||
+      opcode.startsWith('String') ||
+      opcode.startsWith('Change') ||
+      opcode.startsWith('Object') ||
+      opcode.startsWith('Reference') ||
+      opcode.startsWith('Any') ||
+      opcode.endsWith('ToNumber') ||
+      (opcode == 'AnyToBoolean') ||
+      (opcode.startsWith('Load') && opcode.length > 4) ||
+      (opcode.startsWith('Store') && opcode.length > 5);
+  }
+  isMachine() {
+    return !(this.isControl() || this.isInput() ||
+      this.isJavaScript() || this.isSimplified());
+  }
+  getTotalNodeWidth() {
+    const inputWidth = this.inputs.length * NODE_INPUT_WIDTH;
+    return Math.max(inputWidth, this.width);
+  }
+  getTitle() {
+    return this.nodeLabel.getTitle();
+  }
+  getDisplayLabel() {
+    return this.nodeLabel.getDisplayLabel();
+  }
+  getType() {
+    return this.nodeLabel.type;
+  }
+  getDisplayType() {
+    let typeString = this.nodeLabel.type;
+    if (typeString == undefined) return "";
+    if (typeString.length > 24) {
+      typeString = typeString.substr(0, 25) + "...";
+    }
+    return typeString;
+  }
+  deepestInputRank() {
+    let deepestRank = 0;
+    this.inputs.forEach(function (e) {
+      if (e.isVisible() && !e.isBackEdge()) {
+        if (e.source.rank > deepestRank) {
+          deepestRank = e.source.rank;
+        }
+      }
+    });
+    return deepestRank;
+  }
+  areAnyOutputsVisible() {
+    let visibleCount = 0;
+    this.outputs.forEach(function (e) { if (e.isVisible())++visibleCount; });
+    if (this.outputs.length == visibleCount) return 2;
+    if (visibleCount != 0) return 1;
+    return 0;
+  }
+  setOutputVisibility(v) {
+    let result = false;
+    this.outputs.forEach(function (e) {
+      e.visible = v;
+      if (v) {
+        if (!e.target.visible) {
+          e.target.visible = true;
+          result = true;
+        }
+      }
+    });
+    return result;
+  }
+  setInputVisibility(i, v) {
+    const edge = this.inputs[i];
+    edge.visible = v;
+    if (v) {
+      if (!edge.source.visible) {
+        edge.source.visible = true;
+        return true;
+      }
+    }
+    return false;
+  }
+  getInputApproach(index) {
+    return this.y - MINIMUM_NODE_INPUT_APPROACH -
+      (index % 4) * MINIMUM_EDGE_SEPARATION - DEFAULT_NODE_BUBBLE_RADIUS;
+  }
+  getNodeHeight(showTypes: boolean): number {
+    if (showTypes) {
+      return this.normalheight + this.labelbbox.height;
+    } else {
+      return this.normalheight;
+    }
+  }
+  getOutputApproach(showTypes: boolean) {
+    return this.y + this.outputApproach + this.getNodeHeight(showTypes) +
+      + DEFAULT_NODE_BUBBLE_RADIUS;
+  }
+  getInputX(index) {
+    const result = this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2) +
+      (index - this.inputs.length + 1) * NODE_INPUT_WIDTH;
+    return result;
+  }
+  getOutputX() {
+    return this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2);
+  }
+  hasBackEdges() {
+    return (this.nodeLabel.opcode == "Loop") ||
+      ((this.nodeLabel.opcode == "Phi" || this.nodeLabel.opcode == "EffectPhi" || this.nodeLabel.opcode == "InductionVariablePhi") &&
+        this.inputs[this.inputs.length - 1].source.nodeLabel.opcode == "Loop");
+  }
+}
+
+export const nodeToStr = (n: GNode) => "N" + n.id;
diff --git a/src/third_party/v8/tools/turbolizer/src/range-view.ts b/src/third_party/v8/tools/turbolizer/src/range-view.ts
new file mode 100644
index 0000000..17058e4
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/range-view.ts
@@ -0,0 +1,938 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { createElement } from "../src/util";
+import { SequenceView } from "../src/sequence-view";
+import { RegisterAllocation, Range, ChildRange, Interval } from "../src/source-resolver";
+
+class Constants {
+  // Determines how many rows each div group holds for the purposes of
+  // hiding by syncHidden.
+  static readonly ROW_GROUP_SIZE = 20;
+  static readonly POSITIONS_PER_INSTRUCTION = 4;
+  static readonly FIXED_REGISTER_LABEL_WIDTH = 6;
+
+  static readonly INTERVAL_TEXT_FOR_NONE = "none";
+  static readonly INTERVAL_TEXT_FOR_CONST = "const";
+  static readonly INTERVAL_TEXT_FOR_STACK = "stack:";
+}
+
+// This class holds references to the HTMLElements that represent each cell.
+class Grid {
+  elements: Array<Array<HTMLElement>>;
+
+  constructor() {
+    this.elements = [];
+  }
+
+  setRow(row: number, elementsRow: Array<HTMLElement>) {
+    this.elements[row] = elementsRow;
+  }
+
+  getCell(row: number, column: number) {
+    return this.elements[row][column];
+  }
+
+  getInterval(row: number, column: number) {
+    // The cell is within an inner wrapper div which is within the interval div.
+    return this.getCell(row, column).parentElement.parentElement;
+  }
+}
+
+// This class is used as a wrapper to hide the switch between the
+// two different Grid objects used, one for each phase,
+// before and after register allocation.
+class GridAccessor {
+  sequenceView: SequenceView;
+  grids: Map<number, Grid>;
+
+  constructor(sequenceView: SequenceView) {
+    this.sequenceView = sequenceView;
+    this.grids = new Map<number, Grid>();
+  }
+
+  private currentGrid() {
+    return this.grids.get(this.sequenceView.currentPhaseIndex);
+  }
+
+  getAnyGrid() {
+    return this.grids.values().next().value;
+  }
+
+  hasGrid() {
+    return this.grids.has(this.sequenceView.currentPhaseIndex);
+  }
+
+  addGrid(grid: Grid) {
+    if (this.hasGrid()) console.warn("Overwriting existing Grid.");
+    this.grids.set(this.sequenceView.currentPhaseIndex, grid);
+  }
+
+  getCell(row: number, column: number) {
+    return this.currentGrid().getCell(row, column);
+  }
+
+  getInterval(row: number, column: number) {
+    return this.currentGrid().getInterval(row, column);
+  }
+}
+
+// This class is used as a wrapper to access the interval HTMLElements
+class IntervalElementsAccessor {
+  sequenceView: SequenceView;
+  map: Map<number, Array<HTMLElement>>;
+
+  constructor(sequenceView: SequenceView) {
+    this.sequenceView = sequenceView;
+    this.map = new Map<number, Array<HTMLElement>>();
+  }
+
+  private currentIntervals() {
+    const intervals = this.map.get(this.sequenceView.currentPhaseIndex);
+    if (intervals == undefined) {
+      this.map.set(this.sequenceView.currentPhaseIndex, new Array<HTMLElement>());
+      return this.currentIntervals();
+    }
+    return intervals;
+  }
+
+  addInterval(interval: HTMLElement) {
+    this.currentIntervals().push(interval);
+  }
+
+  forEachInterval(callback: (phase: number, interval: HTMLElement) => void) {
+    for (const phase of this.map.keys()) {
+      for (const interval of this.map.get(phase)) {
+        callback(phase, interval);
+      }
+    }
+  }
+}
+
+// A simple class used to hold two Range objects. This is used to allow the two fixed register live
+// ranges of normal and deferred to be easily combined into a single row.
+class RangePair {
+  ranges: [Range, Range];
+
+  constructor(ranges: [Range, Range]) {
+    this.ranges = ranges;
+  }
+
+  forEachRange(callback: (range: Range) => void) {
+    this.ranges.forEach((range: Range) => { if (range) callback(range); });
+  }
+}
+
+// A number of css variables regarding dimensions of HTMLElements are required by RangeView.
+class CSSVariables {
+  positionWidth: number;
+  blockBorderWidth: number;
+
+  constructor() {
+    const getNumberValue = varName => {
+      return parseFloat(getComputedStyle(document.body)
+             .getPropertyValue(varName).match(/[+-]?\d+(\.\d+)?/g)[0]);
+    };
+    this.positionWidth = getNumberValue("--range-position-width");
+    this.blockBorderWidth = getNumberValue("--range-block-border");
+  }
+}
+
+// Store the required data from the blocks JSON.
+class BlocksData {
+  blockBorders: Set<number>;
+  blockInstructionCountMap: Map<number, number>;
+
+  constructor(blocks: Array<any>) {
+    this.blockBorders = new Set<number>();
+    this.blockInstructionCountMap = new Map<number, number>();
+    for (const block of blocks) {
+      this.blockInstructionCountMap.set(block.id, block.instructions.length);
+      const maxInstructionInBlock = block.instructions[block.instructions.length - 1].id;
+      this.blockBorders.add(maxInstructionInBlock);
+    }
+  }
+
+  isInstructionBorder(position: number) {
+    return ((position + 1) % Constants.POSITIONS_PER_INSTRUCTION) == 0;
+  }
+
+  isBlockBorder(position: number) {
+    return this.isInstructionBorder(position)
+        && this.blockBorders.has(Math.floor(position / Constants.POSITIONS_PER_INSTRUCTION));
+  }
+}
+
+class Divs {
+  // Already existing.
+  container: HTMLElement;
+  resizerBar: HTMLElement;
+  snapper: HTMLElement;
+
+  // Created by constructor.
+  content: HTMLElement;
+  // showOnLoad contains all content that may change depending on the JSON.
+  showOnLoad: HTMLElement;
+  xAxisLabel: HTMLElement;
+  yAxisLabel: HTMLElement;
+  registerHeaders: HTMLElement;
+  registers: HTMLElement;
+
+  // Assigned from RangeView.
+  wholeHeader: HTMLElement;
+  positionHeaders: HTMLElement;
+  yAxis: HTMLElement;
+  grid: HTMLElement;
+
+  constructor() {
+    this.container = document.getElementById("ranges");
+    this.resizerBar = document.getElementById("resizer-ranges");
+    this.snapper = document.getElementById("show-hide-ranges");
+
+    this.content = document.createElement("div");
+    this.content.appendChild(this.elementForTitle());
+
+    this.showOnLoad = document.createElement("div");
+    this.showOnLoad.style.visibility = "hidden";
+    this.content.appendChild(this.showOnLoad);
+
+    this.xAxisLabel = createElement("div", "range-header-label-x");
+    this.xAxisLabel.innerText = "Blocks, Instructions, and Positions";
+    this.showOnLoad.appendChild(this.xAxisLabel);
+    this.yAxisLabel = createElement("div", "range-header-label-y");
+    this.yAxisLabel.innerText = "Registers";
+    this.showOnLoad.appendChild(this.yAxisLabel);
+
+    this.registerHeaders = createElement("div", "range-register-labels");
+    this.registers = createElement("div", "range-registers");
+    this.registerHeaders.appendChild(this.registers);
+  }
+
+  elementForTitle() {
+    const titleEl = createElement("div", "range-title-div");
+    const titleBar = createElement("div", "range-title");
+    titleBar.appendChild(createElement("div", "", "Live Ranges"));
+    const titleHelp = createElement("div", "range-title-help", "?");
+    titleHelp.title = "Each row represents a single TopLevelLiveRange (or two if deferred exists)."
+      + "\nEach interval belongs to a LiveRange contained within that row's TopLevelLiveRange."
+      + "\nAn interval is identified by i, the index of the LiveRange within the TopLevelLiveRange,"
+      + "\nand j, the index of the interval within the LiveRange, to give i:j.";
+    titleEl.appendChild(titleBar);
+    titleEl.appendChild(titleHelp);
+    return titleEl;
+  }
+}
+
+class Helper {
+  static virtualRegisterName(registerIndex: string) {
+    return "v" + registerIndex;
+  }
+
+  static fixedRegisterName(range: Range) {
+    return range.child_ranges[0].op.text;
+  }
+
+  static getPositionElementsFromInterval(interval: HTMLElement) {
+    return interval.children[1].children;
+  }
+
+  static forEachFixedRange(source: RegisterAllocation, row: number,
+                           callback: (registerIndex: string, row: number, registerName: string,
+                                      ranges: RangePair) => void) {
+
+    const forEachRangeInMap = (rangeMap: Map<string, Range>) => {
+      // There are two fixed live ranges for each register, one for normal, another for deferred.
+      // These are combined into a single row.
+      const fixedRegisterMap = new Map<string, {ranges: [Range, Range], registerIndex: number}>();
+      for (const [registerIndex, range] of rangeMap) {
+        const registerName = this.fixedRegisterName(range);
+        if (fixedRegisterMap.has(registerName)) {
+          const entry = fixedRegisterMap.get(registerName);
+          entry.ranges[1] = range;
+          // Only use the deferred register index if no normal index exists.
+          if (!range.is_deferred) {
+            entry.registerIndex = parseInt(registerIndex, 10);
+          }
+        } else {
+          fixedRegisterMap.set(registerName, {ranges: [range, undefined],
+                                              registerIndex: parseInt(registerIndex, 10)});
+        }
+      }
+      // Sort the registers by number.
+      const sortedMap = new Map([...fixedRegisterMap.entries()].sort(([nameA, _], [nameB, __]) => {
+        // Larger numbers create longer strings.
+        if (nameA.length > nameB.length) return 1;
+        if (nameA.length < nameB.length) return -1;
+        // Sort lexicographically if same length.
+        if (nameA > nameB) return 1;
+        if (nameA < nameB) return -1;
+        return 0;
+      }));
+      for (const [registerName, {ranges, registerIndex}] of sortedMap) {
+        callback("" + (-registerIndex - 1), row, registerName, new RangePair(ranges));
+        ++row;
+      }
+    };
+
+    forEachRangeInMap(source.fixedLiveRanges);
+    forEachRangeInMap(source.fixedDoubleLiveRanges);
+
+    return row;
+  }
+}
+
+class RowConstructor {
+  view: RangeView;
+
+  constructor(view: RangeView) {
+    this.view = view;
+  }
+
+  // Constructs the row of HTMLElements for grid while providing a callback for each position
+  // depending on whether that position is the start of an interval or not.
+  // RangePair is used to allow the two fixed register live ranges of normal and deferred to be
+  // easily combined into a single row.
+  construct(grid: Grid, row: number, registerIndex: string, ranges: RangePair,
+            getElementForEmptyPosition: (position: number) => HTMLElement,
+            callbackForInterval: (position: number, interval: HTMLElement) => void) {
+    const positionArray = new Array<HTMLElement>(this.view.numPositions);
+    // Construct all of the new intervals.
+    const intervalMap = this.elementsForIntervals(registerIndex, ranges);
+    for (let position = 0; position < this.view.numPositions; ++position) {
+      const interval = intervalMap.get(position);
+      if (interval == undefined) {
+        positionArray[position] = getElementForEmptyPosition(position);
+      } else {
+        callbackForInterval(position, interval);
+        this.view.intervalsAccessor.addInterval(interval);
+        const intervalPositionElements = Helper.getPositionElementsFromInterval(interval);
+        for (let j = 0; j < intervalPositionElements.length; ++j) {
+          // Point positionsArray to the new elements.
+          positionArray[position + j] = (intervalPositionElements[j] as HTMLElement);
+        }
+        position += intervalPositionElements.length - 1;
+      }
+    }
+    grid.setRow(row, positionArray);
+    ranges.forEachRange((range: Range) => this.setUses(grid, row, range));
+  }
+
+  // This is the main function used to build new intervals.
+  // Returns a map of LifeTimePositions to intervals.
+  private elementsForIntervals(registerIndex: string, ranges: RangePair) {
+    const intervalMap = new Map<number, HTMLElement>();
+    let tooltip = "";
+    ranges.forEachRange((range: Range) => {
+      for (const childRange of range.child_ranges) {
+        switch (childRange.type) {
+          case "none":
+            tooltip = Constants.INTERVAL_TEXT_FOR_NONE;
+            break;
+          case "spill_range":
+            tooltip = Constants.INTERVAL_TEXT_FOR_STACK + registerIndex;
+            break;
+          default:
+            if (childRange.op.type == "constant") {
+              tooltip = Constants.INTERVAL_TEXT_FOR_CONST;
+            } else {
+              if (childRange.op.text) {
+                tooltip = childRange.op.text;
+              } else {
+                tooltip = childRange.op;
+              }
+            }
+            break;
+        }
+        childRange.intervals.forEach((intervalNums, index) => {
+          const interval = new Interval(intervalNums);
+          const intervalEl = this.elementForInterval(childRange, interval, tooltip,
+                                                     index, range.is_deferred);
+          intervalMap.set(interval.start, intervalEl);
+        });
+      }
+    });
+    return intervalMap;
+  }
+
+  private elementForInterval(childRange: ChildRange, interval: Interval,
+                             tooltip: string, index: number, isDeferred: boolean): HTMLElement {
+    const intervalEl = createElement("div", "range-interval");
+    const title = childRange.id + ":" + index + " " + tooltip;
+    intervalEl.setAttribute("title", isDeferred ? "deferred: " + title : title);
+    this.setIntervalColor(intervalEl, tooltip);
+    const intervalInnerWrapper = createElement("div", "range-interval-wrapper");
+    intervalEl.style.gridColumn = (interval.start + 1) + " / " + (interval.end + 1);
+    intervalInnerWrapper.style.gridTemplateColumns = "repeat(" + (interval.end - interval.start)
+                                        + ",calc(" + this.view.cssVariables.positionWidth + "ch + "
+                                        + this.view.cssVariables.blockBorderWidth + "px)";
+    const intervalTextEl = this.elementForIntervalString(tooltip, interval.end - interval.start);
+    intervalEl.appendChild(intervalTextEl);
+    for (let i = interval.start; i < interval.end; ++i) {
+      const classes = "range-position range-interval-position range-empty"
+                    + (this.view.blocksData.isBlockBorder(i) ? " range-block-border" :
+                      this.view.blocksData.isInstructionBorder(i) ? " range-instr-border" : "");
+      const positionEl = createElement("div", classes, "_");
+      positionEl.style.gridColumn = (i - interval.start + 1) + "";
+      intervalInnerWrapper.appendChild(positionEl);
+    }
+    intervalEl.appendChild(intervalInnerWrapper);
+    return intervalEl;
+  }
+
+  private setIntervalColor(interval: HTMLElement, tooltip: string) {
+    if (tooltip.includes(Constants.INTERVAL_TEXT_FOR_NONE)) return;
+    if (tooltip.includes(Constants.INTERVAL_TEXT_FOR_STACK + "-")) {
+      interval.style.backgroundColor = "rgb(250, 158, 168)";
+    } else if (tooltip.includes(Constants.INTERVAL_TEXT_FOR_STACK)) {
+      interval.style.backgroundColor = "rgb(250, 158, 100)";
+    } else if (tooltip.includes(Constants.INTERVAL_TEXT_FOR_CONST)) {
+      interval.style.backgroundColor = "rgb(153, 158, 230)";
+    } else {
+      interval.style.backgroundColor = "rgb(153, 220, 168)";
+    }
+  }
+
+  private elementForIntervalString(tooltip: string, numCells: number) {
+    const spanEl = createElement("span", "range-interval-text");
+    this.setIntervalString(spanEl, tooltip, numCells);
+    return spanEl;
+  }
+
+  // Each interval displays a string of information about it.
+  private setIntervalString(spanEl: HTMLElement, tooltip: string, numCells: number) {
+    const spacePerCell = this.view.cssVariables.positionWidth;
+    // One character space is removed to accommodate for padding.
+    const spaceAvailable = (numCells * spacePerCell) - 0.5;
+    let str = tooltip + "";
+    const length = tooltip.length;
+    spanEl.style.width = null;
+    let paddingLeft = null;
+    // Add padding if possible
+    if (length <= spaceAvailable) {
+      paddingLeft = (length == spaceAvailable) ? "0.5ch" : "1ch";
+    } else {
+      str = "";
+    }
+    spanEl.style.paddingTop = null;
+    spanEl.style.paddingLeft = paddingLeft;
+    spanEl.innerHTML = str;
+  }
+
+  private setUses(grid: Grid, row: number, range: Range) {
+    for (const liveRange of range.child_ranges) {
+      if (liveRange.uses) {
+        for (const use of liveRange.uses) {
+          grid.getCell(row, use).classList.toggle("range-use", true);
+        }
+      }
+    }
+  }
+}
+
+class RangeViewConstructor {
+  view: RangeView;
+  gridTemplateColumns: string;
+  grid: Grid;
+
+  // Group the rows in divs to make hiding/showing divs more efficient.
+  currentGroup: HTMLElement;
+  currentPlaceholderGroup: HTMLElement;
+
+  constructor(rangeView: RangeView) {
+    this.view = rangeView;
+  }
+
+  construct() {
+    this.gridTemplateColumns = "repeat(" + this.view.numPositions
+                             + ",calc(" + this.view.cssVariables.positionWidth + "ch + "
+                             + this.view.cssVariables.blockBorderWidth + "px)";
+
+    this.grid = new Grid();
+    this.view.gridAccessor.addGrid(this.grid);
+
+    this.view.divs.wholeHeader = this.elementForHeader();
+    this.view.divs.showOnLoad.appendChild(this.view.divs.wholeHeader);
+
+    const gridContainer = document.createElement("div");
+    this.view.divs.grid = this.elementForGrid();
+    this.view.divs.yAxis = createElement("div", "range-y-axis");
+    this.view.divs.yAxis.appendChild(this.view.divs.registerHeaders);
+    this.view.divs.yAxis.onscroll = () => {
+      this.view.scrollHandler.syncScroll(ToSync.TOP, this.view.divs.yAxis, this.view.divs.grid);
+      this.view.scrollHandler.saveScroll();
+    };
+    gridContainer.appendChild(this.view.divs.yAxis);
+    gridContainer.appendChild(this.view.divs.grid);
+    this.view.divs.showOnLoad.appendChild(gridContainer);
+
+    this.resetGroups();
+    let row = 0;
+    row = this.addVirtualRanges(row);
+    this.addFixedRanges(row);
+  }
+
+  // The following three functions are for constructing the groups which the rows are contained
+  // within and which make up the grid. This is so as to allow groups of rows to easily be displayed
+  // and hidden for performance reasons. As rows are constructed, they are added to the currentGroup
+  // div. Each row in currentGroup is matched with an equivalent placeholder row in
+  // currentPlaceholderGroup that will be shown when currentGroup is hidden so as to maintain the
+  // dimensions and scroll positions of the grid.
+
+  private resetGroups () {
+    this.currentGroup = createElement("div", "range-positions-group range-hidden");
+    this.currentPlaceholderGroup = createElement("div", "range-positions-group");
+  }
+
+  private appendGroupsToGrid() {
+    this.view.divs.grid.appendChild(this.currentPlaceholderGroup);
+    this.view.divs.grid.appendChild(this.currentGroup);
+  }
+
+  private addRowToGroup(row: number, rowEl: HTMLElement) {
+    this.currentGroup.appendChild(rowEl);
+    this.currentPlaceholderGroup
+        .appendChild(createElement("div", "range-positions range-positions-placeholder", "_"));
+    if ((row + 1) % Constants.ROW_GROUP_SIZE == 0) {
+      this.appendGroupsToGrid();
+      this.resetGroups();
+    }
+  }
+
+  private addVirtualRanges(row: number) {
+    const source = this.view.sequenceView.sequence.register_allocation;
+    for (const [registerIndex, range] of source.liveRanges) {
+      const registerName = Helper.virtualRegisterName(registerIndex);
+      const registerEl = this.elementForVirtualRegister(registerName);
+      this.addRowToGroup(row, this.elementForRow(row, registerIndex,
+                                                 new RangePair([range, undefined])));
+      this.view.divs.registers.appendChild(registerEl);
+      ++row;
+    }
+    return row;
+  }
+
+  private addFixedRanges(row: number) {
+    row = Helper.forEachFixedRange(this.view.sequenceView.sequence.register_allocation, row,
+                                   (registerIndex: string, row: number,
+                                    registerName: string, ranges: RangePair) => {
+      const registerEl = this.elementForFixedRegister(registerName);
+      this.addRowToGroup(row, this.elementForRow(row, registerIndex, ranges));
+      this.view.divs.registers.appendChild(registerEl);
+    });
+    if (row % Constants.ROW_GROUP_SIZE != 0) {
+      this.appendGroupsToGrid();
+    }
+  }
+
+  // Each row of positions and intervals associated with a register is contained in a single
+  // HTMLElement. RangePair is used to allow the two fixed register live ranges of normal and
+  // deferred to be easily combined into a single row.
+  private elementForRow(row: number, registerIndex: string, ranges: RangePair) {
+    const rowEl = createElement("div", "range-positions");
+    rowEl.style.gridTemplateColumns = this.gridTemplateColumns;
+
+    const getElementForEmptyPosition = (position: number) => {
+      const blockBorder = this.view.blocksData.isBlockBorder(position);
+      const classes = "range-position range-empty "
+                    + (blockBorder ? "range-block-border" :
+                    this.view.blocksData.isInstructionBorder(position) ? "range-instr-border"
+                                                                       : "range-position-border");
+      const positionEl = createElement("div", classes, "_");
+      positionEl.style.gridColumn = (position + 1) + "";
+      rowEl.appendChild(positionEl);
+      return positionEl;
+    };
+
+    const callbackForInterval = (_, interval: HTMLElement) => {
+      rowEl.appendChild(interval);
+    };
+
+    this.view.rowConstructor.construct(this.grid, row, registerIndex, ranges,
+                                       getElementForEmptyPosition, callbackForInterval);
+    return rowEl;
+  }
+
+  private elementForVirtualRegister(registerName: string) {
+    const regEl = createElement("div", "range-reg", registerName);
+    regEl.setAttribute("title", registerName);
+    return regEl;
+  }
+
+  private elementForFixedRegister(registerName: string) {
+    let text = registerName;
+    const span = "".padEnd(Constants.FIXED_REGISTER_LABEL_WIDTH - text.length, "_");
+    text = "HW - <span class='range-transparent'>" + span + "</span>" + text;
+    const regEl = createElement("div", "range-reg");
+    regEl.innerHTML = text;
+    regEl.setAttribute("title", registerName);
+    return regEl;
+  }
+
+  // The header element contains the three headers for the LifeTimePosition axis.
+  private elementForHeader() {
+    const headerEl = createElement("div", "range-header");
+    this.view.divs.positionHeaders = createElement("div", "range-position-labels");
+
+    this.view.divs.positionHeaders.appendChild(this.elementForBlockHeader());
+    this.view.divs.positionHeaders.appendChild(this.elementForInstructionHeader());
+    this.view.divs.positionHeaders.appendChild(this.elementForPositionHeader());
+
+    headerEl.appendChild(this.view.divs.positionHeaders);
+    headerEl.onscroll = () => {
+      this.view.scrollHandler.syncScroll(ToSync.LEFT,
+                                         this.view.divs.wholeHeader, this.view.divs.grid);
+      this.view.scrollHandler.saveScroll();
+    };
+    return headerEl;
+  }
+
+  // The LifeTimePosition axis shows three headers, for positions, instructions, and blocks.
+
+  private elementForBlockHeader() {
+    const headerEl = createElement("div", "range-block-ids");
+    headerEl.style.gridTemplateColumns = this.gridTemplateColumns;
+
+    const elementForBlockIndex = (index: number, firstInstruction: number, instrCount: number) => {
+      const str = "B" + index;
+      const element =
+        createElement("div", "range-block-id range-header-element range-block-border", str);
+      element.setAttribute("title", str);
+      const firstGridCol = (firstInstruction * Constants.POSITIONS_PER_INSTRUCTION) + 1;
+      const lastGridCol = firstGridCol + (instrCount * Constants.POSITIONS_PER_INSTRUCTION);
+      element.style.gridColumn = firstGridCol + " / " + lastGridCol;
+      return element;
+    };
+
+    let blockIndex = 0;
+    for (let i = 0; i < this.view.sequenceView.numInstructions;) {
+      const instrCount = this.view.blocksData.blockInstructionCountMap.get(blockIndex);
+      headerEl.appendChild(elementForBlockIndex(blockIndex, i, instrCount));
+      ++blockIndex;
+      i += instrCount;
+    }
+    return headerEl;
+  }
+
+  private elementForInstructionHeader() {
+    const headerEl = createElement("div", "range-instruction-ids");
+    headerEl.style.gridTemplateColumns = this.gridTemplateColumns;
+
+    const elementForInstructionIndex = (index: number, isBlockBorder: boolean) => {
+      const classes = "range-instruction-id range-header-element "
+                    + (isBlockBorder ? "range-block-border" : "range-instr-border");
+      const element = createElement("div", classes, "" + index);
+      element.setAttribute("title", "" + index);
+      const firstGridCol = (index * Constants.POSITIONS_PER_INSTRUCTION) + 1;
+      element.style.gridColumn = firstGridCol + " / "
+                               + (firstGridCol + Constants.POSITIONS_PER_INSTRUCTION);
+      return element;
+    };
+
+    for (let i = 0; i < this.view.sequenceView.numInstructions; ++i) {
+      const blockBorder = this.view.blocksData.blockBorders.has(i);
+      headerEl.appendChild(elementForInstructionIndex(i, blockBorder));
+    }
+    return headerEl;
+  }
+
+  private elementForPositionHeader() {
+    const headerEl = createElement("div", "range-positions range-positions-header");
+    headerEl.style.gridTemplateColumns = this.gridTemplateColumns;
+
+    const elementForPositionIndex = (index: number, isBlockBorder: boolean) => {
+      const classes = "range-position range-header-element " +
+        (isBlockBorder ? "range-block-border"
+                       : this.view.blocksData.isInstructionBorder(index) ? "range-instr-border"
+                                                                         : "range-position-border");
+      const element = createElement("div", classes, "" + index);
+      element.setAttribute("title", "" + index);
+      return element;
+    };
+
+    for (let i = 0; i < this.view.numPositions; ++i) {
+      headerEl.appendChild(elementForPositionIndex(i, this.view.blocksData.isBlockBorder(i)));
+    }
+    return headerEl;
+  }
+
+  private elementForGrid() {
+    const gridEl = createElement("div", "range-grid");
+    gridEl.onscroll = () => {
+      this.view.scrollHandler.syncScroll(ToSync.TOP, this.view.divs.grid, this.view.divs.yAxis);
+      this.view.scrollHandler.syncScroll(ToSync.LEFT,
+                                         this.view.divs.grid, this.view.divs.wholeHeader);
+      this.view.scrollHandler.saveScroll();
+    };
+    return gridEl;
+  }
+}
+
+// Handles the work required when the phase is changed.
+// Between before and after register allocation for example.
+class PhaseChangeHandler {
+  view: RangeView;
+
+  constructor(view: RangeView) {
+    this.view = view;
+  }
+
+  // Called when the phase view is switched between before and after register allocation.
+  phaseChange() {
+    if (!this.view.gridAccessor.hasGrid()) {
+      // If this phase view has not been seen yet then the intervals need to be constructed.
+      this.addNewIntervals();
+    }
+    // Show all intervals pertaining to the current phase view.
+    this.view.intervalsAccessor.forEachInterval((phase, interval) => {
+      interval.classList.toggle("range-hidden", phase != this.view.sequenceView.currentPhaseIndex);
+    });
+  }
+
+  private addNewIntervals() {
+    // All Grids should point to the same HTMLElement for empty cells in the grid,
+    // so as to avoid duplication. The current Grid is used to retrieve these elements.
+    const currentGrid = this.view.gridAccessor.getAnyGrid();
+    const newGrid = new Grid();
+    this.view.gridAccessor.addGrid(newGrid);
+    const source = this.view.sequenceView.sequence.register_allocation;
+    let row = 0;
+    for (const [registerIndex, range] of source.liveRanges) {
+      this.addnewIntervalsInRange(currentGrid, newGrid, row, registerIndex,
+                                  new RangePair([range, undefined]));
+      ++row;
+    }
+    Helper.forEachFixedRange(this.view.sequenceView.sequence.register_allocation, row,
+                             (registerIndex, row, _, ranges) => {
+      this.addnewIntervalsInRange(currentGrid, newGrid, row, registerIndex, ranges);
+    });
+  }
+
+  private addnewIntervalsInRange(currentGrid: Grid, newGrid: Grid, row: number,
+                                 registerIndex: string, ranges: RangePair) {
+    const numReplacements = new Map<HTMLElement, number>();
+
+    const getElementForEmptyPosition = (position: number) => {
+      return currentGrid.getCell(row, position);
+    };
+
+    // Inserts new interval beside existing intervals.
+    const callbackForInterval = (position: number, interval: HTMLElement) => {
+      // Overlapping intervals are placed beside each other and the relevant ones displayed.
+      let currentInterval = currentGrid.getInterval(row, position);
+      // The number of intervals already inserted is tracked so that the inserted intervals
+      // are ordered correctly.
+      const intervalsAlreadyInserted = numReplacements.get(currentInterval);
+      numReplacements.set(currentInterval, intervalsAlreadyInserted ? intervalsAlreadyInserted + 1
+                                                                    : 1);
+      if (intervalsAlreadyInserted) {
+        for (let j = 0; j < intervalsAlreadyInserted; ++j) {
+          currentInterval = (currentInterval.nextElementSibling as HTMLElement);
+        }
+      }
+      interval.classList.add("range-hidden");
+      currentInterval.insertAdjacentElement('afterend', interval);
+    };
+
+    this.view.rowConstructor.construct(newGrid, row, registerIndex, ranges,
+                                          getElementForEmptyPosition, callbackForInterval);
+  }
+}
+
+enum ToSync { LEFT, TOP }
+
+// Handles saving and syncing the scroll positions of the grid.
+class ScrollHandler {
+  divs: Divs;
+  scrollTop: number;
+  scrollLeft: number;
+  scrollTopTimeout: NodeJS.Timeout;
+  scrollLeftTimeout: NodeJS.Timeout;
+  scrollTopFunc: (this: GlobalEventHandlers, ev: Event) => any;
+  scrollLeftFunc: (this: GlobalEventHandlers, ev: Event) => any;
+
+  constructor(divs: Divs) {
+    this.divs = divs;
+  }
+
+  // This function is used to hide the rows which are not currently in view and
+  // so reduce the performance cost of things like hit tests and scrolling.
+  syncHidden() {
+
+    const getOffset = (rowEl: HTMLElement, placeholderRowEl: HTMLElement, isHidden: boolean) => {
+      return isHidden ? placeholderRowEl.offsetTop : rowEl.offsetTop;
+    };
+
+    const toHide = new Array<[HTMLElement, HTMLElement]>();
+
+    const sampleCell = this.divs.registers.children[1] as HTMLElement;
+    const buffer = 2 * sampleCell.clientHeight;
+    const min = this.divs.grid.offsetTop + this.divs.grid.scrollTop - buffer;
+    const max = min + this.divs.grid.clientHeight + buffer;
+
+    // The rows are grouped by being contained within a group div. This is so as to allow
+    // groups of rows to easily be displayed and hidden with less of a performance cost.
+    // Each row in the mainGroup div is matched with an equivalent placeholder row in
+    // the placeholderGroup div that will be shown when mainGroup is hidden so as to maintain
+    // the dimensions and scroll positions of the grid.
+
+    const rangeGroups = this.divs.grid.children;
+    for (let i = 1; i < rangeGroups.length; i += 2) {
+      const mainGroup = rangeGroups[i] as HTMLElement;
+      const placeholderGroup = rangeGroups[i - 1] as HTMLElement;
+      const isHidden = mainGroup.classList.contains("range-hidden");
+      // The offsets are used to calculate whether the group is in view.
+      const offsetMin = getOffset(mainGroup.firstChild as HTMLElement,
+                                  placeholderGroup.firstChild as HTMLElement, isHidden);
+      const offsetMax = getOffset(mainGroup.lastChild as HTMLElement,
+                                  placeholderGroup.lastChild as HTMLElement, isHidden);
+      if (offsetMax > min && offsetMin < max) {
+        if (isHidden) {
+          // Show the rows, hide the placeholders.
+          mainGroup.classList.toggle("range-hidden", false);
+          placeholderGroup.classList.toggle("range-hidden", true);
+        }
+      } else if (!isHidden) {
+        // Only hide the rows once the new rows are shown so that scrollLeft is not lost.
+        toHide.push([mainGroup, placeholderGroup]);
+      }
+    }
+    for (const [mainGroup, placeholderGroup] of toHide) {
+      // Hide the rows, show the placeholders.
+      mainGroup.classList.toggle("range-hidden", true);
+      placeholderGroup.classList.toggle("range-hidden", false);
+    }
+  }
+
+  // This function is required to keep the axes labels in line with the grid
+  // content when scrolling.
+  syncScroll(toSync: ToSync, source: HTMLElement, target: HTMLElement) {
+    // Continually delay timeout until scrolling has stopped.
+    toSync == ToSync.TOP ? clearTimeout(this.scrollTopTimeout)
+                         : clearTimeout(this.scrollLeftTimeout);
+    if (target.onscroll) {
+      if (toSync == ToSync.TOP) this.scrollTopFunc = target.onscroll;
+      else this.scrollLeftFunc = target.onscroll;
+    }
+    // Clear onscroll to prevent the target syncing back with the source.
+    target.onscroll = null;
+
+    if (toSync == ToSync.TOP) target.scrollTop = source.scrollTop;
+    else target.scrollLeft = source.scrollLeft;
+
+    // Only show / hide the grid content once scrolling has stopped.
+    if (toSync == ToSync.TOP) {
+      this.scrollTopTimeout = setTimeout(() => {
+        target.onscroll = this.scrollTopFunc;
+        this.syncHidden();
+      }, 500);
+    } else {
+      this.scrollLeftTimeout = setTimeout(() => {
+        target.onscroll = this.scrollLeftFunc;
+        this.syncHidden();
+      }, 500);
+    }
+  }
+
+  saveScroll() {
+    this.scrollLeft = this.divs.grid.scrollLeft;
+    this.scrollTop = this.divs.grid.scrollTop;
+  }
+
+  restoreScroll() {
+    if (this.scrollLeft) {
+      this.divs.grid.scrollLeft = this.scrollLeft;
+      this.divs.grid.scrollTop = this.scrollTop;
+    }
+  }
+}
+
+// RangeView displays the live range data as passed in by SequenceView.
+// The data is displayed in a grid format, with the fixed and virtual registers
+// along one axis, and the LifeTimePositions along the other. Each LifeTimePosition
+// is part of an Instruction in SequenceView, which itself is part of an Instruction
+// Block. The live ranges are displayed as intervals, each belonging to a register,
+// and spanning across a certain range of LifeTimePositions.
+// When the phase being displayed changes between before register allocation and
+// after register allocation, only the intervals need to be changed.
+export class RangeView {
+  sequenceView: SequenceView;
+
+  initialized: boolean;
+  isShown: boolean;
+  numPositions: number;
+  cssVariables: CSSVariables;
+  divs: Divs;
+  rowConstructor: RowConstructor;
+  phaseChangeHandler: PhaseChangeHandler;
+  scrollHandler: ScrollHandler;
+  blocksData: BlocksData;
+  intervalsAccessor: IntervalElementsAccessor;
+  gridAccessor: GridAccessor;
+
+  constructor(sequence: SequenceView) {
+    this.initialized = false;
+    this.isShown = false;
+    this.sequenceView = sequence;
+  }
+
+  initializeContent(blocks: Array<any>) {
+    if (!this.initialized) {
+      this.gridAccessor = new GridAccessor(this.sequenceView);
+      this.intervalsAccessor = new IntervalElementsAccessor(this.sequenceView);
+      this.cssVariables = new CSSVariables();
+      this.blocksData = new BlocksData(blocks);
+      this.divs = new Divs();
+      this.scrollHandler = new ScrollHandler(this.divs);
+      this.numPositions = this.sequenceView.numInstructions * Constants.POSITIONS_PER_INSTRUCTION;
+      this.rowConstructor = new RowConstructor(this);
+      const constructor = new RangeViewConstructor(this);
+      constructor.construct();
+      this.phaseChangeHandler = new PhaseChangeHandler(this);
+      this.initialized = true;
+    } else {
+      // If the RangeView has already been initialized then the phase must have
+      // been changed.
+      this.phaseChangeHandler.phaseChange();
+    }
+  }
+
+  show() {
+    if (!this.isShown) {
+      this.isShown = true;
+      this.divs.container.appendChild(this.divs.content);
+      this.divs.resizerBar.style.visibility = "visible";
+      this.divs.container.style.visibility = "visible";
+      this.divs.snapper.style.visibility = "visible";
+      // Dispatch a resize event to ensure that the
+      // panel is shown.
+      window.dispatchEvent(new Event('resize'));
+
+      setTimeout(() => {
+        this.scrollHandler.restoreScroll();
+        this.scrollHandler.syncHidden();
+        this.divs.showOnLoad.style.visibility = "visible";
+      }, 100);
+    }
+  }
+
+  hide() {
+    if (this.initialized) {
+      this.isShown = false;
+      this.divs.container.removeChild(this.divs.content);
+      this.divs.resizerBar.style.visibility = "hidden";
+      this.divs.container.style.visibility = "hidden";
+      this.divs.snapper.style.visibility = "hidden";
+      this.divs.showOnLoad.style.visibility = "hidden";
+    } else {
+      window.document.getElementById('ranges').style.visibility = "hidden";
+    }
+    // Dispatch a resize event to ensure that the
+    // panel is hidden.
+    window.dispatchEvent(new Event('resize'));
+  }
+
+  onresize() {
+    if (this.isShown) this.scrollHandler.syncHidden();
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/resizer.ts b/src/third_party/v8/tools/turbolizer/src/resizer.ts
new file mode 100644
index 0000000..ce05193
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/resizer.ts
@@ -0,0 +1,322 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as d3 from "d3";
+import * as C from "../src/constants";
+
+class Snapper {
+  resizer: Resizer;
+  sourceExpand: HTMLElement;
+  sourceCollapse: HTMLElement;
+  disassemblyExpand: HTMLElement;
+  disassemblyCollapse: HTMLElement;
+  rangesExpand: HTMLElement;
+  rangesCollapse: HTMLElement;
+
+  constructor(resizer: Resizer) {
+    this.resizer = resizer;
+    this.sourceExpand = document.getElementById(C.SOURCE_EXPAND_ID);
+    this.sourceCollapse = document.getElementById(C.SOURCE_COLLAPSE_ID);
+    this.disassemblyExpand = document.getElementById(C.DISASSEMBLY_EXPAND_ID);
+    this.disassemblyCollapse = document.getElementById(C.DISASSEMBLY_COLLAPSE_ID);
+    this.rangesExpand = document.getElementById(C.RANGES_EXPAND_ID);
+    this.rangesCollapse = document.getElementById(C.RANGES_COLLAPSE_ID);
+
+    document.getElementById("show-hide-source").addEventListener("click", () => {
+      this.resizer.resizerLeft.classed("snapped", !this.resizer.resizerLeft.classed("snapped"));
+      this.setSourceExpanded(!this.sourceExpand.classList.contains("invisible"));
+      this.resizer.updatePanes();
+    });
+    document.getElementById("show-hide-disassembly").addEventListener("click", () => {
+      this.resizer.resizerRight.classed("snapped", !this.resizer.resizerRight.classed("snapped"));
+      this.setDisassemblyExpanded(!this.disassemblyExpand.classList.contains("invisible"));
+      this.resizer.updatePanes();
+    });
+    document.getElementById("show-hide-ranges").addEventListener("click", () => {
+      this.resizer.resizerRanges.classed("snapped", !this.resizer.resizerRanges.classed("snapped"));
+      this.setRangesExpanded(!this.rangesExpand.classList.contains("invisible"));
+      this.resizer.updatePanes();
+    });
+  }
+
+  restoreExpandedState(): void {
+    this.resizer.resizerLeft.classed("snapped", window.sessionStorage.getItem("expandedState-source") == "false");
+    this.resizer.resizerRight.classed("snapped", window.sessionStorage.getItem("expandedState-disassembly") == "false");
+    this.resizer.resizerRanges.classed("snapped", window.sessionStorage.getItem("expandedState-ranges") == "false");
+    this.setSourceExpanded(this.getLastExpandedState("source", true));
+    this.setDisassemblyExpanded(this.getLastExpandedState("disassembly", true));
+    this.setRangesExpanded(this.getLastExpandedState("ranges", true));
+  }
+
+  getLastExpandedState(type: string, defaultState: boolean): boolean {
+    const state = window.sessionStorage.getItem("expandedState-" + type);
+    if (state === null) return defaultState;
+    return state === 'true';
+  }
+
+  sourceUpdate(isSourceExpanded: boolean): void {
+    window.sessionStorage.setItem("expandedState-source", `${isSourceExpanded}`);
+    this.sourceExpand.classList.toggle("invisible", isSourceExpanded);
+    this.sourceCollapse.classList.toggle("invisible", !isSourceExpanded);
+    document.getElementById("show-hide-ranges").style.marginLeft = isSourceExpanded ? null : "40px";
+  }
+
+  setSourceExpanded(isSourceExpanded: boolean): void {
+    this.sourceUpdate(isSourceExpanded);
+    this.resizer.updateLeftWidth();
+  }
+
+  disassemblyUpdate(isDisassemblyExpanded: boolean): void {
+    window.sessionStorage.setItem("expandedState-disassembly", `${isDisassemblyExpanded}`);
+    this.disassemblyExpand.classList.toggle("invisible", isDisassemblyExpanded);
+    this.disassemblyCollapse.classList.toggle("invisible", !isDisassemblyExpanded);
+  }
+
+  setDisassemblyExpanded(isDisassemblyExpanded: boolean): void {
+    this.disassemblyUpdate(isDisassemblyExpanded);
+    this.resizer.updateRightWidth();
+  }
+
+  rangesUpdate(isRangesExpanded: boolean): void {
+    window.sessionStorage.setItem("expandedState-ranges", `${isRangesExpanded}`);
+    this.rangesExpand.classList.toggle("invisible", isRangesExpanded);
+    this.rangesCollapse.classList.toggle("invisible", !isRangesExpanded);
+  }
+
+  setRangesExpanded(isRangesExpanded: boolean): void {
+    this.rangesUpdate(isRangesExpanded);
+    this.resizer.updateRanges();
+  }
+}
+
+export class Resizer {
+  snapper: Snapper;
+  deadWidth: number;
+  deadHeight: number;
+  left: HTMLElement;
+  right: HTMLElement;
+  ranges: HTMLElement;
+  middle: HTMLElement;
+  sepLeft: number;
+  sepRight: number;
+  sepRangesHeight: number;
+  panesUpdatedCallback: () => void;
+  resizerRight: d3.Selection<HTMLDivElement, any, any, any>;
+  resizerLeft: d3.Selection<HTMLDivElement, any, any, any>;
+  resizerRanges: d3.Selection<HTMLDivElement, any, any, any>;
+
+  private readonly SOURCE_PANE_DEFAULT_PERCENT = 1 / 4;
+  private readonly DISASSEMBLY_PANE_DEFAULT_PERCENT = 3 / 4;
+  private readonly RANGES_PANE_HEIGHT_DEFAULT_PERCENT = 3 / 4;
+  private readonly RESIZER_RANGES_HEIGHT_BUFFER_PERCENTAGE = 5;
+  private readonly RESIZER_SIZE = document.getElementById("resizer-ranges").offsetHeight;
+
+  constructor(panesUpdatedCallback: () => void, deadWidth: number, deadHeight: number) {
+    const resizer = this;
+    resizer.panesUpdatedCallback = panesUpdatedCallback;
+    resizer.deadWidth = deadWidth;
+    resizer.deadHeight = deadHeight;
+    resizer.left = document.getElementById(C.SOURCE_PANE_ID);
+    resizer.right = document.getElementById(C.GENERATED_PANE_ID);
+    resizer.ranges = document.getElementById(C.RANGES_PANE_ID);
+    resizer.middle = document.getElementById("middle");
+    resizer.resizerLeft = d3.select('#resizer-left');
+    resizer.resizerRight = d3.select('#resizer-right');
+    resizer.resizerRanges = d3.select('#resizer-ranges');
+    // Set default sizes, if they weren't set.
+    if (window.sessionStorage.getItem("source-pane-percent") === null) {
+      window.sessionStorage.setItem("source-pane-percent", `${this.SOURCE_PANE_DEFAULT_PERCENT}`);
+    }
+    if (window.sessionStorage.getItem("disassembly-pane-percent") === null) {
+      window.sessionStorage.setItem("disassembly-pane-percent", `${this.DISASSEMBLY_PANE_DEFAULT_PERCENT}`);
+    }
+    if (window.sessionStorage.getItem("ranges-pane-height-percent") === null) {
+      window.sessionStorage.setItem("ranges-pane-height-percent", `${this.RANGES_PANE_HEIGHT_DEFAULT_PERCENT}`);
+    }
+
+    this.updateSizes();
+
+    const dragResizeLeft = d3.drag()
+      .on('drag', function () {
+        const x = d3.mouse(this.parentElement)[0];
+        resizer.sepLeft = Math.min(Math.max(0, x), resizer.sepRight);
+        resizer.updatePanes();
+      })
+      .on('start', function () {
+        resizer.resizerLeft.classed("dragged", true);
+      })
+      .on('end', function () {
+        // If the panel is close enough to the left, treat it as if it was pulled all the way to the lefg.
+        const x = d3.mouse(this.parentElement)[0];
+        if (x <= deadWidth) {
+          resizer.sepLeft = 0;
+          resizer.updatePanes();
+        }
+        // Snap if dragged all the way to the left.
+        resizer.resizerLeft.classed("snapped", resizer.sepLeft === 0);
+        if (!resizer.isLeftSnapped()) {
+          window.sessionStorage.setItem("source-pane-percent", `${resizer.sepLeft / document.body.getBoundingClientRect().width}`);
+        }
+        resizer.snapper.setSourceExpanded(!resizer.isLeftSnapped());
+        resizer.resizerLeft.classed("dragged", false);
+      });
+    resizer.resizerLeft.call(dragResizeLeft);
+
+    const dragResizeRight = d3.drag()
+      .on('drag', function () {
+        const x = d3.mouse(this.parentElement)[0];
+        resizer.sepRight = Math.max(resizer.sepLeft, Math.min(x, document.body.getBoundingClientRect().width));
+        resizer.updatePanes();
+      })
+      .on('start', function () {
+        resizer.resizerRight.classed("dragged", true);
+      })
+      .on('end', function () {
+        // If the panel is close enough to the right, treat it as if it was pulled all the way to the right.
+        const x = d3.mouse(this.parentElement)[0];
+        const clientWidth = document.body.getBoundingClientRect().width;
+        if (x >= (clientWidth - deadWidth)) {
+          resizer.sepRight = clientWidth - 1;
+          resizer.updatePanes();
+        }
+        // Snap if dragged all the way to the right.
+        resizer.resizerRight.classed("snapped", resizer.sepRight >= clientWidth - 1);
+        if (!resizer.isRightSnapped()) {
+          window.sessionStorage.setItem("disassembly-pane-percent", `${resizer.sepRight / clientWidth}`);
+        }
+        resizer.snapper.setDisassemblyExpanded(!resizer.isRightSnapped());
+        resizer.resizerRight.classed("dragged", false);
+      });
+    resizer.resizerRight.call(dragResizeRight);
+
+    const dragResizeRanges = d3.drag()
+      .on('drag', function () {
+        const y = d3.mouse(this.parentElement)[1];
+        resizer.sepRangesHeight = Math.max(100, Math.min(y, window.innerHeight) - resizer.RESIZER_RANGES_HEIGHT_BUFFER_PERCENTAGE);
+        resizer.updatePanes();
+      })
+      .on('start', function () {
+        resizer.resizerRanges.classed("dragged", true);
+      })
+      .on('end', function () {
+        // If the panel is close enough to the bottom, treat it as if it was pulled all the way to the bottom.
+        const y = d3.mouse(this.parentElement)[1];
+        if (y >= (window.innerHeight - deadHeight)) {
+          resizer.sepRangesHeight = window.innerHeight;
+          resizer.updatePanes();
+        }
+        // Snap if dragged all the way to the bottom.
+        resizer.resizerRanges.classed("snapped", resizer.sepRangesHeight >= window.innerHeight - 1);
+        if (!resizer.isRangesSnapped()) {
+          window.sessionStorage.setItem("ranges-pane-height-percent", `${resizer.sepRangesHeight / window.innerHeight}`);
+        }
+        resizer.snapper.setRangesExpanded(!resizer.isRangesSnapped());
+        resizer.resizerRanges.classed("dragged", false);
+      });
+    resizer.resizerRanges.call(dragResizeRanges);
+
+    window.onresize = function () {
+      resizer.updateSizes();
+      resizer.updatePanes();
+    };
+    resizer.snapper = new Snapper(resizer);
+    resizer.snapper.restoreExpandedState();
+  }
+
+  isLeftSnapped() {
+    return this.resizerLeft.classed("snapped");
+  }
+
+  isRightSnapped() {
+    return this.resizerRight.classed("snapped");
+  }
+
+  isRangesSnapped() {
+    return this.resizerRanges.classed("snapped");
+  }
+
+  updateRangesPane() {
+    const clientHeight = window.innerHeight;
+    const rangesIsHidden = this.ranges.style.visibility == "hidden";
+    let resizerSize = this.RESIZER_SIZE;
+    if (rangesIsHidden) {
+      resizerSize = 0;
+      this.sepRangesHeight = clientHeight;
+    }
+
+    const rangeHeight = clientHeight - this.sepRangesHeight;
+    this.ranges.style.height = rangeHeight + 'px';
+    const panelWidth = this.sepRight - this.sepLeft - (2 * resizerSize);
+    this.ranges.style.width = panelWidth + 'px';
+    const multiview = document.getElementById("multiview");
+    if (multiview && multiview.style) {
+        multiview.style.height = (this.sepRangesHeight - resizerSize) + 'px';
+        multiview.style.width = panelWidth + 'px';
+    }
+
+    // Resize the range grid and labels.
+    const rangeGrid = (this.ranges.getElementsByClassName("range-grid")[0] as HTMLElement);
+    if (rangeGrid) {
+      const yAxis = (this.ranges.getElementsByClassName("range-y-axis")[0] as HTMLElement);
+      const rangeHeader = (this.ranges.getElementsByClassName("range-header")[0] as HTMLElement);
+
+      const gridWidth = panelWidth - yAxis.clientWidth;
+      rangeGrid.style.width = Math.floor(gridWidth - 1) + 'px';
+      // Take live ranges' right scrollbar into account.
+      rangeHeader.style.width = (gridWidth - rangeGrid.offsetWidth + rangeGrid.clientWidth - 1) + 'px';
+      // Set resizer to horizontal.
+      this.resizerRanges.style('width', panelWidth + 'px');
+
+      const rangeTitle = (this.ranges.getElementsByClassName("range-title-div")[0] as HTMLElement);
+      const rangeHeaderLabel = (this.ranges.getElementsByClassName("range-header-label-x")[0] as HTMLElement);
+      const gridHeight = rangeHeight - rangeHeader.clientHeight - rangeTitle.clientHeight - rangeHeaderLabel.clientHeight;
+      rangeGrid.style.height = gridHeight + 'px';
+      // Take live ranges' bottom scrollbar into account.
+      yAxis.style.height = (gridHeight - rangeGrid.offsetHeight + rangeGrid.clientHeight) + 'px';
+    }
+    this.resizerRanges.style('ranges', this.ranges.style.height);
+  }
+
+  updatePanes() {
+    this.left.style.width = this.sepLeft + 'px';
+    this.resizerLeft.style('left', this.sepLeft + 'px');
+    this.right.style.width = (document.body.getBoundingClientRect().width - this.sepRight) + 'px';
+    this.resizerRight.style('right', (document.body.getBoundingClientRect().width - this.sepRight - 1) + 'px');
+    this.updateRangesPane();
+    this.panesUpdatedCallback();
+  }
+
+  updateRanges() {
+    if (this.isRangesSnapped()) {
+      this.sepRangesHeight = window.innerHeight;
+    } else {
+      const sepRangesHeight = window.sessionStorage.getItem("ranges-pane-height-percent");
+      this.sepRangesHeight = window.innerHeight * Number.parseFloat(sepRangesHeight);
+    }
+  }
+
+  updateLeftWidth() {
+    if (this.isLeftSnapped()) {
+      this.sepLeft = 0;
+    } else {
+      const sepLeft = window.sessionStorage.getItem("source-pane-percent");
+      this.sepLeft = document.body.getBoundingClientRect().width * Number.parseFloat(sepLeft);
+    }
+  }
+
+  updateRightWidth() {
+    if (this.isRightSnapped()) {
+      this.sepRight = document.body.getBoundingClientRect().width;
+    } else {
+      const sepRight = window.sessionStorage.getItem("disassembly-pane-percent");
+      this.sepRight = document.body.getBoundingClientRect().width * Number.parseFloat(sepRight);
+    }
+  }
+
+  updateSizes() {
+    this.updateLeftWidth();
+    this.updateRightWidth();
+    this.updateRanges();
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/schedule-view.ts b/src/third_party/v8/tools/turbolizer/src/schedule-view.ts
new file mode 100644
index 0000000..3397874
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/schedule-view.ts
@@ -0,0 +1,189 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { Schedule, SourceResolver } from "../src/source-resolver";
+import { TextView } from "../src/text-view";
+
+export class ScheduleView extends TextView {
+  schedule: Schedule;
+  sourceResolver: SourceResolver;
+
+  createViewElement() {
+    const pane = document.createElement('div');
+    pane.setAttribute('id', "schedule");
+    pane.classList.add("scrollable");
+    pane.setAttribute("tabindex", "0");
+    return pane;
+  }
+
+  constructor(parentId, broker) {
+    super(parentId, broker);
+    this.sourceResolver = broker.sourceResolver;
+  }
+
+  attachSelection(s) {
+    const view = this;
+    if (!(s instanceof Set)) return;
+    view.selectionHandler.clear();
+    view.blockSelectionHandler.clear();
+    const selected = new Array();
+    for (const key of s) selected.push(key);
+    view.selectionHandler.select(selected, true);
+  }
+
+  detachSelection() {
+    this.blockSelection.clear();
+    return this.selection.detachSelection();
+  }
+
+  initializeContent(data, rememberedSelection) {
+    this.divNode.innerHTML = '';
+    this.schedule = data.schedule;
+    this.addBlocks(data.schedule.blocks);
+    this.attachSelection(rememberedSelection);
+    this.show();
+  }
+
+  createElementFromString(htmlString) {
+    const div = document.createElement('div');
+    div.innerHTML = htmlString.trim();
+    return div.firstChild;
+  }
+
+  elementForBlock(block) {
+    const view = this;
+    function createElement(tag: string, cls: string, content?: string) {
+      const el = document.createElement(tag);
+      el.className = cls;
+      if (content != undefined) el.innerHTML = content;
+      return el;
+    }
+
+    function mkNodeLinkHandler(nodeId) {
+      return function (e) {
+        e.stopPropagation();
+        if (!e.shiftKey) {
+          view.selectionHandler.clear();
+        }
+        view.selectionHandler.select([nodeId], true);
+      };
+    }
+
+    function getMarker(start, end) {
+      if (start != end) {
+        return ["&#8857;", `This node generated instructions in range [${start},${end}). ` +
+          `This is currently unreliable for constants.`];
+      }
+      if (start != -1) {
+        return ["&#183;", `The instruction selector did not generate instructions ` +
+          `for this node, but processed the node at instruction ${start}. ` +
+          `This usually means that this node was folded into another node; ` +
+          `the highlighted machine code is a guess.`];
+      }
+      return ["", `This not is not in the final schedule.`];
+    }
+
+    function createElementForNode(node) {
+      const nodeEl = createElement("div", "node");
+
+      const [start, end] = view.sourceResolver.getInstruction(node.id);
+      const [marker, tooltip] = getMarker(start, end);
+      const instrMarker = createElement("div", "instr-marker com", marker);
+      instrMarker.setAttribute("title", tooltip);
+      instrMarker.onclick = mkNodeLinkHandler(node.id);
+      nodeEl.appendChild(instrMarker);
+
+      const nodeId = createElement("div", "node-id tag clickable", node.id);
+      nodeId.onclick = mkNodeLinkHandler(node.id);
+      view.addHtmlElementForNodeId(node.id, nodeId);
+      nodeEl.appendChild(nodeId);
+      const nodeLabel = createElement("div", "node-label", node.label);
+      nodeEl.appendChild(nodeLabel);
+      if (node.inputs.length > 0) {
+        const nodeParameters = createElement("div", "parameter-list comma-sep-list");
+        for (const param of node.inputs) {
+          const paramEl = createElement("div", "parameter tag clickable", param);
+          nodeParameters.appendChild(paramEl);
+          paramEl.onclick = mkNodeLinkHandler(param);
+          view.addHtmlElementForNodeId(param, paramEl);
+        }
+        nodeEl.appendChild(nodeParameters);
+      }
+
+      return nodeEl;
+    }
+
+    function mkBlockLinkHandler(blockId) {
+      return function (e) {
+        e.stopPropagation();
+        if (!e.shiftKey) {
+          view.blockSelectionHandler.clear();
+        }
+        view.blockSelectionHandler.select(["" + blockId], true);
+      };
+    }
+
+    const scheduleBlock = createElement("div", "schedule-block");
+    scheduleBlock.classList.toggle("deferred", block.isDeferred);
+
+    const [start, end] = view.sourceResolver.getInstructionRangeForBlock(block.id);
+    const instrMarker = createElement("div", "instr-marker com", "&#8857;");
+    instrMarker.setAttribute("title", `Instructions range for this block is [${start}, ${end})`);
+    instrMarker.onclick = mkBlockLinkHandler(block.id);
+    scheduleBlock.appendChild(instrMarker);
+
+    const blockId = createElement("div", "block-id com clickable", block.id);
+    blockId.onclick = mkBlockLinkHandler(block.id);
+    scheduleBlock.appendChild(blockId);
+    const blockPred = createElement("div", "predecessor-list block-list comma-sep-list");
+    for (const pred of block.pred) {
+      const predEl = createElement("div", "block-id com clickable", pred);
+      predEl.onclick = mkBlockLinkHandler(pred);
+      blockPred.appendChild(predEl);
+    }
+    if (block.pred.length) scheduleBlock.appendChild(blockPred);
+    const nodes = createElement("div", "nodes");
+    for (const node of block.nodes) {
+      nodes.appendChild(createElementForNode(node));
+    }
+    scheduleBlock.appendChild(nodes);
+    const blockSucc = createElement("div", "successor-list block-list comma-sep-list");
+    for (const succ of block.succ) {
+      const succEl = createElement("div", "block-id com clickable", succ);
+      succEl.onclick = mkBlockLinkHandler(succ);
+      blockSucc.appendChild(succEl);
+    }
+    if (block.succ.length) scheduleBlock.appendChild(blockSucc);
+    this.addHtmlElementForBlockId(block.id, scheduleBlock);
+    return scheduleBlock;
+  }
+
+  addBlocks(blocks) {
+    for (const block of blocks) {
+      const blockEl = this.elementForBlock(block);
+      this.divNode.appendChild(blockEl);
+    }
+  }
+
+  lineString(node) {
+    return `${node.id}: ${node.label}(${node.inputs.join(", ")})`;
+  }
+
+  searchInputAction(searchBar, e, onlyVisible) {
+    e.stopPropagation();
+    this.selectionHandler.clear();
+    const query = searchBar.value;
+    if (query.length == 0) return;
+    const select = [];
+    window.sessionStorage.setItem("lastSearch", query);
+    const reg = new RegExp(query);
+    for (const node of this.schedule.nodes) {
+      if (node === undefined) continue;
+      if (reg.exec(this.lineString(node)) != null) {
+        select.push(node.id);
+      }
+    }
+    this.selectionHandler.select(select, true);
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/selection-broker.ts b/src/third_party/v8/tools/turbolizer/src/selection-broker.ts
new file mode 100644
index 0000000..7e0c0dd
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/selection-broker.ts
@@ -0,0 +1,89 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { SourceResolver, sourcePositionValid } from "../src/source-resolver";
+import { ClearableHandler, SelectionHandler, NodeSelectionHandler, BlockSelectionHandler, InstructionSelectionHandler } from "../src/selection-handler";
+
+export class SelectionBroker {
+  sourceResolver: SourceResolver;
+  allHandlers: Array<ClearableHandler>;
+  sourcePositionHandlers: Array<SelectionHandler>;
+  nodeHandlers: Array<NodeSelectionHandler>;
+  blockHandlers: Array<BlockSelectionHandler>;
+  instructionHandlers: Array<InstructionSelectionHandler>;
+
+  constructor(sourceResolver) {
+    this.allHandlers = [];
+    this.sourcePositionHandlers = [];
+    this.nodeHandlers = [];
+    this.blockHandlers = [];
+    this.instructionHandlers = [];
+    this.sourceResolver = sourceResolver;
+  }
+
+  addSourcePositionHandler(handler: SelectionHandler & ClearableHandler) {
+    this.allHandlers.push(handler);
+    this.sourcePositionHandlers.push(handler);
+  }
+
+  addNodeHandler(handler: NodeSelectionHandler & ClearableHandler) {
+    this.allHandlers.push(handler);
+    this.nodeHandlers.push(handler);
+  }
+
+  addBlockHandler(handler: BlockSelectionHandler & ClearableHandler) {
+    this.allHandlers.push(handler);
+    this.blockHandlers.push(handler);
+  }
+
+  addInstructionHandler(handler: InstructionSelectionHandler & ClearableHandler) {
+    this.allHandlers.push(handler);
+    this.instructionHandlers.push(handler);
+  }
+
+  broadcastInstructionSelect(from, instructionOffsets, selected) {
+    for (const b of this.instructionHandlers) {
+      if (b != from) b.brokeredInstructionSelect(instructionOffsets, selected);
+    }
+  }
+
+  broadcastSourcePositionSelect(from, sourcePositions, selected) {
+    sourcePositions = sourcePositions.filter(l => {
+      if (!sourcePositionValid(l)) {
+        console.log("Warning: invalid source position");
+        return false;
+      }
+      return true;
+    });
+    for (const b of this.sourcePositionHandlers) {
+      if (b != from) b.brokeredSourcePositionSelect(sourcePositions, selected);
+    }
+    const nodes = this.sourceResolver.sourcePositionsToNodeIds(sourcePositions);
+    for (const b of this.nodeHandlers) {
+      if (b != from) b.brokeredNodeSelect(nodes, selected);
+    }
+  }
+
+  broadcastNodeSelect(from, nodes, selected) {
+    for (const b of this.nodeHandlers) {
+      if (b != from) b.brokeredNodeSelect(nodes, selected);
+    }
+    const sourcePositions = this.sourceResolver.nodeIdsToSourcePositions(nodes);
+    for (const b of this.sourcePositionHandlers) {
+      if (b != from) b.brokeredSourcePositionSelect(sourcePositions, selected);
+    }
+  }
+
+  broadcastBlockSelect(from, blocks, selected) {
+    for (const b of this.blockHandlers) {
+      if (b != from) b.brokeredBlockSelect(blocks, selected);
+    }
+  }
+
+  broadcastClear(from) {
+    this.allHandlers.forEach(function (b) {
+      if (b != from) b.brokeredClear();
+    });
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/selection-handler.ts b/src/third_party/v8/tools/turbolizer/src/selection-handler.ts
new file mode 100644
index 0000000..a605149
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/selection-handler.ts
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export interface ClearableHandler {
+  brokeredClear(): void;
+}
+
+export interface SelectionHandler {
+  clear(): void;
+  select(nodeIds: any, selected: any): void;
+  brokeredSourcePositionSelect(sourcePositions: any, selected: any): void;
+}
+
+export interface NodeSelectionHandler {
+  clear(): void;
+  select(nodeIds: any, selected: any): void;
+  brokeredNodeSelect(nodeIds: any, selected: any): void;
+}
+
+export interface BlockSelectionHandler {
+  clear(): void;
+  select(nodeIds: any, selected: any): void;
+  brokeredBlockSelect(blockIds: any, selected: any): void;
+}
+
+export interface InstructionSelectionHandler {
+  clear(): void;
+  select(instructionIds: any, selected: any): void;
+  brokeredInstructionSelect(instructionIds: any, selected: any): void;
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/selection.ts b/src/third_party/v8/tools/turbolizer/src/selection.ts
new file mode 100644
index 0000000..90fe3bd
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/selection.ts
@@ -0,0 +1,59 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export class MySelection {
+  selection: any;
+  stringKey: (o: any) => string;
+
+  constructor(stringKeyFnc) {
+    this.selection = new Map();
+    this.stringKey = stringKeyFnc;
+  }
+
+  isEmpty(): boolean {
+    return this.selection.size == 0;
+  }
+
+  clear(): void {
+    this.selection = new Map();
+  }
+
+  select(s: Iterable<any>, isSelected?: boolean) {
+    for (const i of s) {
+      if (!i) continue;
+      if (isSelected == undefined) {
+        isSelected = !this.selection.has(this.stringKey(i));
+      }
+      if (isSelected) {
+        this.selection.set(this.stringKey(i), i);
+      } else {
+        this.selection.delete(this.stringKey(i));
+      }
+    }
+  }
+
+  isSelected(i: any): boolean {
+    return this.selection.has(this.stringKey(i));
+  }
+
+  isKeySelected(key: string): boolean {
+    return this.selection.has(key);
+  }
+
+  selectedKeys() {
+    const result = new Set();
+    for (const i of this.selection.keys()) {
+      result.add(i);
+    }
+    return result;
+  }
+
+  detachSelection() {
+    const result = this.selectedKeys();
+    this.clear();
+    return result;
+  }
+
+  [Symbol.iterator]() { return this.selection.values(); }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/sequence-view.ts b/src/third_party/v8/tools/turbolizer/src/sequence-view.ts
new file mode 100644
index 0000000..187b162
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/sequence-view.ts
@@ -0,0 +1,370 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { Sequence } from "../src/source-resolver";
+import { createElement } from "../src/util";
+import { TextView } from "../src/text-view";
+import { RangeView } from "../src/range-view";
+
+export class SequenceView extends TextView {
+  sequence: Sequence;
+  searchInfo: Array<any>;
+  phaseSelect: HTMLSelectElement;
+  numInstructions: number;
+  currentPhaseIndex: number;
+  phaseIndexes: Set<number>;
+  isShown: boolean;
+  rangeView: RangeView;
+  showRangeView: boolean;
+  toggleRangeViewEl: HTMLElement;
+
+  createViewElement() {
+    const pane = document.createElement('div');
+    pane.setAttribute('id', "sequence");
+    pane.classList.add("scrollable");
+    pane.setAttribute("tabindex", "0");
+    return pane;
+  }
+
+  constructor(parentId, broker) {
+    super(parentId, broker);
+    this.numInstructions = 0;
+    this.phaseIndexes = new Set<number>();
+    this.isShown = false;
+    this.showRangeView = false;
+    this.rangeView = null;
+    this.toggleRangeViewEl = this.elementForToggleRangeView();
+  }
+
+  attachSelection(s) {
+    const view = this;
+    if (!(s instanceof Set)) return;
+    view.selectionHandler.clear();
+    view.blockSelectionHandler.clear();
+    const selected = new Array();
+    for (const key of s) selected.push(key);
+    view.selectionHandler.select(selected, true);
+  }
+
+  detachSelection() {
+    this.blockSelection.clear();
+    return this.selection.detachSelection();
+  }
+
+  show() {
+    this.currentPhaseIndex = this.phaseSelect.selectedIndex;
+    if (!this.isShown) {
+      this.isShown = true;
+      this.phaseIndexes.add(this.currentPhaseIndex);
+      this.container.appendChild(this.divNode);
+      this.container.getElementsByClassName("graph-toolbox")[0].appendChild(this.toggleRangeViewEl);
+    }
+    if (this.showRangeView) this.rangeView.show();
+  }
+
+  hide() {
+    // A single SequenceView object is used for two phases (i.e before and after
+    // register allocation), tracking the indexes lets the redundant hides and
+    // shows be avoided when switching between the two.
+    this.currentPhaseIndex = this.phaseSelect.selectedIndex;
+    if (!this.phaseIndexes.has(this.currentPhaseIndex)) {
+      this.isShown = false;
+      this.container.removeChild(this.divNode);
+      this.container.getElementsByClassName("graph-toolbox")[0].removeChild(this.toggleRangeViewEl);
+      if (this.showRangeView) this.rangeView.hide();
+    }
+  }
+
+  onresize() {
+    if (this.showRangeView) this.rangeView.onresize();
+  }
+
+  initializeContent(data, rememberedSelection) {
+    this.divNode.innerHTML = '';
+    this.sequence = data.sequence;
+    this.searchInfo = [];
+    this.divNode.onclick = (e: MouseEvent) => {
+      if (!(e.target instanceof HTMLElement)) return;
+      const instructionId = Number.parseInt(e.target.dataset.instructionId, 10);
+      if (!instructionId) return;
+      if (!e.shiftKey) this.broker.broadcastClear(null);
+      this.broker.broadcastInstructionSelect(null, [instructionId], true);
+    };
+    this.phaseSelect = (document.getElementById('phase-select') as HTMLSelectElement);
+    this.currentPhaseIndex = this.phaseSelect.selectedIndex;
+
+    this.addBlocks(this.sequence.blocks);
+    const lastBlock = this.sequence.blocks[this.sequence.blocks.length - 1];
+    this.numInstructions = lastBlock.instructions[lastBlock.instructions.length - 1].id + 1;
+    this.addRangeView();
+    this.attachSelection(rememberedSelection);
+    this.show();
+  }
+
+  elementForBlock(block) {
+    const view = this;
+
+    function mkLinkHandler(id, handler) {
+      return function (e) {
+        e.stopPropagation();
+        if (!e.shiftKey) {
+          handler.clear();
+        }
+        handler.select(["" + id], true);
+      };
+    }
+
+    function mkBlockLinkHandler(blockId) {
+      return mkLinkHandler(blockId, view.blockSelectionHandler);
+    }
+
+    function mkOperandLinkHandler(text) {
+      return mkLinkHandler(text, view.selectionHandler);
+    }
+
+    function elementForOperandWithSpan(span, text, searchInfo, isVirtual) {
+      const selectionText = isVirtual ? "virt_" + text : text;
+      span.onclick = mkOperandLinkHandler(selectionText);
+      searchInfo.push(text);
+      view.addHtmlElementForNodeId(selectionText, span);
+      const container = createElement("div", "");
+      container.appendChild(span);
+      return container;
+    }
+
+    function elementForOperand(operand, searchInfo) {
+      let isVirtual = false;
+      let className = "parameter tag clickable " + operand.type;
+      if (operand.text[0] == 'v' && !(operand.tooltip && operand.tooltip.includes("Float"))) {
+        isVirtual = true;
+        className += " virtual-reg";
+      }
+      const span = createElement("span", className, operand.text);
+      if (operand.tooltip) {
+        span.setAttribute("title", operand.tooltip);
+      }
+      return elementForOperandWithSpan(span, operand.text, searchInfo, isVirtual);
+    }
+
+    function elementForPhiOperand(text, searchInfo) {
+      const span = createElement("span", "parameter tag clickable virtual-reg", text);
+      return elementForOperandWithSpan(span, text, searchInfo, true);
+    }
+
+    function elementForInstruction(instruction, searchInfo) {
+      const instNodeEl = createElement("div", "instruction-node");
+
+      const instId = createElement("div", "instruction-id", instruction.id);
+      const offsets = view.sourceResolver.instructionToPcOffsets(instruction.id);
+      instId.classList.add("clickable");
+      instId.dataset.instructionId = instruction.id;
+      if (offsets) {
+        instId.setAttribute("title", `This instruction generated gap code at pc-offset 0x${offsets.gap.toString(16)}, code at pc-offset 0x${offsets.arch.toString(16)}, condition handling at pc-offset 0x${offsets.condition.toString(16)}.`);
+      }
+      instNodeEl.appendChild(instId);
+
+      const instContentsEl = createElement("div", "instruction-contents");
+      instNodeEl.appendChild(instContentsEl);
+
+      // Print gap moves.
+      const gapEl = createElement("div", "gap", "gap");
+      let hasGaps = false;
+      for (const gap of instruction.gaps) {
+        const moves = createElement("div", "comma-sep-list gap-move");
+        for (const move of gap) {
+          hasGaps = true;
+          const moveEl = createElement("div", "move");
+          const destinationEl = elementForOperand(move[0], searchInfo);
+          moveEl.appendChild(destinationEl);
+          const assignEl = createElement("div", "assign", "=");
+          moveEl.appendChild(assignEl);
+          const sourceEl = elementForOperand(move[1], searchInfo);
+          moveEl.appendChild(sourceEl);
+          moves.appendChild(moveEl);
+        }
+        gapEl.appendChild(moves);
+      }
+      if (hasGaps) {
+        instContentsEl.appendChild(gapEl);
+      }
+
+      const instEl = createElement("div", "instruction");
+      instContentsEl.appendChild(instEl);
+
+      if (instruction.outputs.length > 0) {
+        const outputs = createElement("div", "comma-sep-list input-output-list");
+        for (const output of instruction.outputs) {
+          const outputEl = elementForOperand(output, searchInfo);
+          outputs.appendChild(outputEl);
+        }
+        instEl.appendChild(outputs);
+        const assignEl = createElement("div", "assign", "=");
+        instEl.appendChild(assignEl);
+      }
+
+      const text = instruction.opcode + instruction.flags;
+      const instLabel = createElement("div", "node-label", text);
+      if (instruction.opcode == "ArchNop" && instruction.outputs.length == 1 && instruction.outputs[0].tooltip) {
+        instLabel.innerText = instruction.outputs[0].tooltip;
+      }
+
+      searchInfo.push(text);
+      view.addHtmlElementForNodeId(text, instLabel);
+      instEl.appendChild(instLabel);
+
+      if (instruction.inputs.length > 0) {
+        const inputs = createElement("div", "comma-sep-list input-output-list");
+        for (const input of instruction.inputs) {
+          const inputEl = elementForOperand(input, searchInfo);
+          inputs.appendChild(inputEl);
+        }
+        instEl.appendChild(inputs);
+      }
+
+      if (instruction.temps.length > 0) {
+        const temps = createElement("div", "comma-sep-list input-output-list temps");
+        for (const temp of instruction.temps) {
+          const tempEl = elementForOperand(temp, searchInfo);
+          temps.appendChild(tempEl);
+        }
+        instEl.appendChild(temps);
+      }
+
+      return instNodeEl;
+    }
+
+    const sequenceBlock = createElement("div", "schedule-block");
+    sequenceBlock.classList.toggle("deferred", block.deferred);
+
+    const blockId = createElement("div", "block-id com clickable", block.id);
+    blockId.onclick = mkBlockLinkHandler(block.id);
+    sequenceBlock.appendChild(blockId);
+    const blockPred = createElement("div", "predecessor-list block-list comma-sep-list");
+    for (const pred of block.predecessors) {
+      const predEl = createElement("div", "block-id com clickable", pred);
+      predEl.onclick = mkBlockLinkHandler(pred);
+      blockPred.appendChild(predEl);
+    }
+    if (block.predecessors.length > 0) sequenceBlock.appendChild(blockPred);
+    const phis = createElement("div", "phis");
+    sequenceBlock.appendChild(phis);
+
+    const phiLabel = createElement("div", "phi-label", "phi:");
+    phis.appendChild(phiLabel);
+
+    const phiContents = createElement("div", "phi-contents");
+    phis.appendChild(phiContents);
+
+    for (const phi of block.phis) {
+      const phiEl = createElement("div", "phi");
+      phiContents.appendChild(phiEl);
+
+      const outputEl = elementForOperand(phi.output, this.searchInfo);
+      phiEl.appendChild(outputEl);
+
+      const assignEl = createElement("div", "assign", "=");
+      phiEl.appendChild(assignEl);
+
+      for (const input of phi.operands) {
+        const inputEl = elementForPhiOperand(input, this.searchInfo);
+        phiEl.appendChild(inputEl);
+      }
+    }
+
+    const instructions = createElement("div", "instructions");
+    for (const instruction of block.instructions) {
+      instructions.appendChild(elementForInstruction(instruction, this.searchInfo));
+    }
+    sequenceBlock.appendChild(instructions);
+    const blockSucc = createElement("div", "successor-list block-list comma-sep-list");
+    for (const succ of block.successors) {
+      const succEl = createElement("div", "block-id com clickable", succ);
+      succEl.onclick = mkBlockLinkHandler(succ);
+      blockSucc.appendChild(succEl);
+    }
+    if (block.successors.length > 0) sequenceBlock.appendChild(blockSucc);
+    this.addHtmlElementForBlockId(block.id, sequenceBlock);
+    return sequenceBlock;
+  }
+
+  addBlocks(blocks) {
+    for (const block of blocks) {
+      const blockEl = this.elementForBlock(block);
+      this.divNode.appendChild(blockEl);
+    }
+  }
+
+  addRangeView() {
+    const preventRangeView = reason => {
+      const toggleRangesInput = this.toggleRangeViewEl.firstChild as HTMLInputElement;
+      if (this.rangeView) {
+        toggleRangesInput.checked = false;
+        this.toggleRangeView(toggleRangesInput);
+      }
+      toggleRangesInput.disabled = true;
+      this.toggleRangeViewEl.style.textDecoration = "line-through";
+      this.toggleRangeViewEl.setAttribute("title", reason);
+    };
+
+    if (this.sequence.register_allocation) {
+      if (!this.rangeView) {
+        this.rangeView = new RangeView(this);
+      }
+      const source = this.sequence.register_allocation;
+      if (source.fixedLiveRanges.size == 0 && source.liveRanges.size == 0) {
+        preventRangeView("No live ranges to show");
+      } else if (this.numInstructions >= 249) {
+        // This is due to the css grid-column being limited to 1000 columns.
+        // Performance issues would otherwise impose some limit.
+        // TODO(george.wort@arm.com): Allow the user to specify an instruction range
+        //                            to display that spans less than 249 instructions.
+        preventRangeView(
+          "Live range display is only supported for sequences with less than 249 instructions");
+      }
+      if (this.showRangeView) {
+        this.rangeView.initializeContent(this.sequence.blocks);
+      }
+    } else {
+      preventRangeView("No live range data provided");
+    }
+  }
+
+  elementForToggleRangeView() {
+    const toggleRangeViewEl = createElement("label", "", "show live ranges");
+    const toggleRangesInput = createElement("input", "range-toggle-show") as HTMLInputElement;
+    toggleRangesInput.setAttribute("type", "checkbox");
+    toggleRangesInput.oninput = () => this.toggleRangeView(toggleRangesInput);
+    toggleRangeViewEl.insertBefore(toggleRangesInput, toggleRangeViewEl.firstChild);
+    return toggleRangeViewEl;
+  }
+
+  toggleRangeView(toggleRangesInput: HTMLInputElement) {
+    toggleRangesInput.disabled = true;
+    this.showRangeView = toggleRangesInput.checked;
+    if (this.showRangeView) {
+      this.rangeView.initializeContent(this.sequence.blocks);
+      this.rangeView.show();
+    } else {
+      this.rangeView.hide();
+    }
+    window.dispatchEvent(new Event('resize'));
+    toggleRangesInput.disabled = false;
+  }
+
+  searchInputAction(searchBar, e) {
+    e.stopPropagation();
+    this.selectionHandler.clear();
+    const query = searchBar.value;
+    if (query.length == 0) return;
+    const select = [];
+    window.sessionStorage.setItem("lastSearch", query);
+    const reg = new RegExp(query);
+    for (const item of this.searchInfo) {
+      if (reg.exec(item) != null) {
+        select.push(item);
+      }
+    }
+    this.selectionHandler.select(select, true);
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/source-resolver.ts b/src/third_party/v8/tools/turbolizer/src/source-resolver.ts
new file mode 100644
index 0000000..085b44f
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/source-resolver.ts
@@ -0,0 +1,766 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { sortUnique, anyToString } from "../src/util";
+import { NodeLabel } from "./node-label";
+
+function sourcePositionLe(a, b) {
+  if (a.inliningId == b.inliningId) {
+    return a.scriptOffset - b.scriptOffset;
+  }
+  return a.inliningId - b.inliningId;
+}
+
+function sourcePositionEq(a, b) {
+  return a.inliningId == b.inliningId &&
+    a.scriptOffset == b.scriptOffset;
+}
+
+export function sourcePositionToStringKey(sourcePosition: AnyPosition): string {
+  if (!sourcePosition) return "undefined";
+  if ('inliningId' in sourcePosition && 'scriptOffset' in sourcePosition) {
+    return "SP:" + sourcePosition.inliningId + ":" + sourcePosition.scriptOffset;
+  }
+  if (sourcePosition.bytecodePosition) {
+    return "BCP:" + sourcePosition.bytecodePosition;
+  }
+  return "undefined";
+}
+
+export function sourcePositionValid(l) {
+  return (typeof l.scriptOffset !== undefined
+    && typeof l.inliningId !== undefined) || typeof l.bytecodePosition != undefined;
+}
+
+export interface SourcePosition {
+  scriptOffset: number;
+  inliningId: number;
+}
+
+interface TurboFanOrigin {
+  phase: string;
+  reducer: string;
+}
+
+export interface NodeOrigin {
+  nodeId: number;
+}
+
+interface BytecodePosition {
+  bytecodePosition: number;
+}
+
+export type Origin = NodeOrigin | BytecodePosition;
+export type TurboFanNodeOrigin = NodeOrigin & TurboFanOrigin;
+export type TurboFanBytecodeOrigin = BytecodePosition & TurboFanOrigin;
+
+type AnyPosition = SourcePosition | BytecodePosition;
+
+export interface Source {
+  sourcePositions: Array<SourcePosition>;
+  sourceName: string;
+  functionName: string;
+  sourceText: string;
+  sourceId: number;
+  startPosition?: number;
+  backwardsCompatibility: boolean;
+}
+interface Inlining {
+  inliningPosition: SourcePosition;
+  sourceId: number;
+}
+interface OtherPhase {
+  type: "disassembly" | "sequence" | "schedule";
+  name: string;
+  data: any;
+}
+
+interface InstructionsPhase {
+  type: "instructions";
+  name: string;
+  data: any;
+  instructionOffsetToPCOffset?: any;
+  blockIdtoInstructionRange?: any;
+  nodeIdToInstructionRange?: any;
+  codeOffsetsInfo?: CodeOffsetsInfo;
+}
+
+interface GraphPhase {
+  type: "graph";
+  name: string;
+  data: any;
+  highestNodeId: number;
+  nodeLabelMap: Array<NodeLabel>;
+}
+
+type Phase = GraphPhase | InstructionsPhase | OtherPhase;
+
+export interface Schedule {
+  nodes: Array<any>;
+}
+
+export class Interval {
+  start: number;
+  end: number;
+
+  constructor(numbers: [number, number]) {
+    this.start = numbers[0];
+    this.end = numbers[1];
+  }
+}
+
+export interface ChildRange {
+  id: string;
+  type: string;
+  op: any;
+  intervals: Array<[number, number]>;
+  uses: Array<number>;
+}
+
+export interface Range {
+  child_ranges: Array<ChildRange>;
+  is_deferred: boolean;
+}
+
+export class RegisterAllocation {
+  fixedDoubleLiveRanges: Map<string, Range>;
+  fixedLiveRanges: Map<string, Range>;
+  liveRanges: Map<string, Range>;
+
+  constructor(registerAllocation) {
+    this.fixedDoubleLiveRanges = new Map<string, Range>(Object.entries(registerAllocation.fixed_double_live_ranges));
+    this.fixedLiveRanges = new Map<string, Range>(Object.entries(registerAllocation.fixed_live_ranges));
+    this.liveRanges = new Map<string, Range>(Object.entries(registerAllocation.live_ranges));
+  }
+}
+
+export interface Sequence {
+  blocks: Array<any>;
+  register_allocation: RegisterAllocation;
+}
+
+class CodeOffsetsInfo {
+  codeStartRegisterCheck: number;
+  deoptCheck: number;
+  initPoison: number;
+  blocksStart: number;
+  outOfLineCode: number;
+  deoptimizationExits: number;
+  pools: number;
+  jumpTables: number;
+}
+export class TurbolizerInstructionStartInfo {
+  gap: number;
+  arch: number;
+  condition: number;
+}
+
+export class SourceResolver {
+  nodePositionMap: Array<AnyPosition>;
+  sources: Array<Source>;
+  inlinings: Array<Inlining>;
+  inliningsMap: Map<string, Inlining>;
+  positionToNodes: Map<string, Array<string>>;
+  phases: Array<Phase>;
+  phaseNames: Map<string, number>;
+  disassemblyPhase: Phase;
+  lineToSourcePositions: Map<string, Array<AnyPosition>>;
+  nodeIdToInstructionRange: Array<[number, number]>;
+  blockIdToInstructionRange: Array<[number, number]>;
+  instructionToPCOffset: Array<TurbolizerInstructionStartInfo>;
+  pcOffsetToInstructions: Map<number, Array<number>>;
+  pcOffsets: Array<number>;
+  blockIdToPCOffset: Array<number>;
+  blockStartPCtoBlockIds: Map<number, Array<number>>;
+  codeOffsetsInfo: CodeOffsetsInfo;
+
+  constructor() {
+    // Maps node ids to source positions.
+    this.nodePositionMap = [];
+    // Maps source ids to source objects.
+    this.sources = [];
+    // Maps inlining ids to inlining objects.
+    this.inlinings = [];
+    // Maps source position keys to inlinings.
+    this.inliningsMap = new Map();
+    // Maps source position keys to node ids.
+    this.positionToNodes = new Map();
+    // Maps phase ids to phases.
+    this.phases = [];
+    // Maps phase names to phaseIds.
+    this.phaseNames = new Map();
+    // The disassembly phase is stored separately.
+    this.disassemblyPhase = undefined;
+    // Maps line numbers to source positions
+    this.lineToSourcePositions = new Map();
+    // Maps node ids to instruction ranges.
+    this.nodeIdToInstructionRange = [];
+    // Maps block ids to instruction ranges.
+    this.blockIdToInstructionRange = [];
+    // Maps instruction numbers to PC offsets.
+    this.instructionToPCOffset = [];
+    // Maps PC offsets to instructions.
+    this.pcOffsetToInstructions = new Map();
+    this.pcOffsets = [];
+    this.blockIdToPCOffset = [];
+    this.blockStartPCtoBlockIds = new Map();
+    this.codeOffsetsInfo = null;
+  }
+
+  getBlockIdsForOffset(offset): Array<number> {
+    return this.blockStartPCtoBlockIds.get(offset);
+  }
+
+  hasBlockStartInfo() {
+    return this.blockIdToPCOffset.length > 0;
+  }
+
+  setSources(sources, mainBackup) {
+    if (sources) {
+      for (const [sourceId, source] of Object.entries(sources)) {
+        this.sources[sourceId] = source;
+        this.sources[sourceId].sourcePositions = [];
+      }
+    }
+    // This is a fallback if the JSON is incomplete (e.g. due to compiler crash).
+    if (!this.sources[-1]) {
+      this.sources[-1] = mainBackup;
+      this.sources[-1].sourcePositions = [];
+    }
+  }
+
+  setInlinings(inlinings) {
+    if (inlinings) {
+      for (const [inliningId, inlining] of Object.entries<Inlining>(inlinings)) {
+        this.inlinings[inliningId] = inlining;
+        this.inliningsMap.set(sourcePositionToStringKey(inlining.inliningPosition), inlining);
+      }
+    }
+    // This is a default entry for the script itself that helps
+    // keep other code more uniform.
+    this.inlinings[-1] = { sourceId: -1, inliningPosition: null };
+  }
+
+  setNodePositionMap(map) {
+    if (!map) return;
+    if (typeof map[0] != 'object') {
+      const alternativeMap = {};
+      for (const [nodeId, scriptOffset] of Object.entries<number>(map)) {
+        alternativeMap[nodeId] = { scriptOffset: scriptOffset, inliningId: -1 };
+      }
+      map = alternativeMap;
+    }
+
+    for (const [nodeId, sourcePosition] of Object.entries<SourcePosition>(map)) {
+      if (sourcePosition == undefined) {
+        console.log("Warning: undefined source position ", sourcePosition, " for nodeId ", nodeId);
+      }
+      const inliningId = sourcePosition.inliningId;
+      const inlining = this.inlinings[inliningId];
+      if (inlining) {
+        const sourceId = inlining.sourceId;
+        this.sources[sourceId].sourcePositions.push(sourcePosition);
+      }
+      this.nodePositionMap[nodeId] = sourcePosition;
+      const key = sourcePositionToStringKey(sourcePosition);
+      if (!this.positionToNodes.has(key)) {
+        this.positionToNodes.set(key, []);
+      }
+      this.positionToNodes.get(key).push(nodeId);
+    }
+    for (const [, source] of Object.entries(this.sources)) {
+      source.sourcePositions = sortUnique(source.sourcePositions,
+        sourcePositionLe, sourcePositionEq);
+    }
+  }
+
+  sourcePositionsToNodeIds(sourcePositions) {
+    const nodeIds = new Set();
+    for (const sp of sourcePositions) {
+      const key = sourcePositionToStringKey(sp);
+      const nodeIdsForPosition = this.positionToNodes.get(key);
+      if (!nodeIdsForPosition) continue;
+      for (const nodeId of nodeIdsForPosition) {
+        nodeIds.add(nodeId);
+      }
+    }
+    return nodeIds;
+  }
+
+  nodeIdsToSourcePositions(nodeIds): Array<AnyPosition> {
+    const sourcePositions = new Map();
+    for (const nodeId of nodeIds) {
+      const sp = this.nodePositionMap[nodeId];
+      const key = sourcePositionToStringKey(sp);
+      sourcePositions.set(key, sp);
+    }
+    const sourcePositionArray = [];
+    for (const sp of sourcePositions.values()) {
+      sourcePositionArray.push(sp);
+    }
+    return sourcePositionArray;
+  }
+
+  forEachSource(f: (value: Source, index: number, array: Array<Source>) => void) {
+    this.sources.forEach(f);
+  }
+
+  translateToSourceId(sourceId: number, location?: SourcePosition) {
+    for (const position of this.getInlineStack(location)) {
+      const inlining = this.inlinings[position.inliningId];
+      if (!inlining) continue;
+      if (inlining.sourceId == sourceId) {
+        return position;
+      }
+    }
+    return location;
+  }
+
+  addInliningPositions(sourcePosition: AnyPosition, locations: Array<SourcePosition>) {
+    const inlining = this.inliningsMap.get(sourcePositionToStringKey(sourcePosition));
+    if (!inlining) return;
+    const sourceId = inlining.sourceId;
+    const source = this.sources[sourceId];
+    for (const sp of source.sourcePositions) {
+      locations.push(sp);
+      this.addInliningPositions(sp, locations);
+    }
+  }
+
+  getInliningForPosition(sourcePosition: AnyPosition) {
+    return this.inliningsMap.get(sourcePositionToStringKey(sourcePosition));
+  }
+
+  getSource(sourceId: number) {
+    return this.sources[sourceId];
+  }
+
+  getSourceName(sourceId: number) {
+    const source = this.sources[sourceId];
+    return `${source.sourceName}:${source.functionName}`;
+  }
+
+  sourcePositionFor(sourceId: number, scriptOffset: number) {
+    if (!this.sources[sourceId]) {
+      return null;
+    }
+    const list = this.sources[sourceId].sourcePositions;
+    for (let i = 0; i < list.length; i++) {
+      const sourcePosition = list[i];
+      const position = sourcePosition.scriptOffset;
+      const nextPosition = list[Math.min(i + 1, list.length - 1)].scriptOffset;
+      if ((position <= scriptOffset && scriptOffset < nextPosition)) {
+        return sourcePosition;
+      }
+    }
+    return null;
+  }
+
+  sourcePositionsInRange(sourceId: number, start: number, end: number) {
+    if (!this.sources[sourceId]) return [];
+    const res = [];
+    const list = this.sources[sourceId].sourcePositions;
+    for (const sourcePosition of list) {
+      if (start <= sourcePosition.scriptOffset && sourcePosition.scriptOffset < end) {
+        res.push(sourcePosition);
+      }
+    }
+    return res;
+  }
+
+  getInlineStack(sourcePosition?: SourcePosition) {
+    if (!sourcePosition) return [];
+
+    const inliningStack = [];
+    let cur = sourcePosition;
+    while (cur && cur.inliningId != -1) {
+      inliningStack.push(cur);
+      const inlining = this.inlinings[cur.inliningId];
+      if (!inlining) {
+        break;
+      }
+      cur = inlining.inliningPosition;
+    }
+    if (cur && cur.inliningId == -1) {
+      inliningStack.push(cur);
+    }
+    return inliningStack;
+  }
+
+  recordOrigins(phase: GraphPhase) {
+    if (phase.type != "graph") return;
+    for (const node of phase.data.nodes) {
+      phase.highestNodeId = Math.max(phase.highestNodeId, node.id);
+      if (node.origin != undefined &&
+        node.origin.bytecodePosition != undefined) {
+        const position = { bytecodePosition: node.origin.bytecodePosition };
+        this.nodePositionMap[node.id] = position;
+        const key = sourcePositionToStringKey(position);
+        if (!this.positionToNodes.has(key)) {
+          this.positionToNodes.set(key, []);
+        }
+        const A = this.positionToNodes.get(key);
+        if (!A.includes(node.id)) A.push(`${node.id}`);
+      }
+
+      // Backwards compatibility.
+      if (typeof node.pos === "number") {
+        node.sourcePosition = { scriptOffset: node.pos, inliningId: -1 };
+      }
+    }
+  }
+
+  readNodeIdToInstructionRange(nodeIdToInstructionRange) {
+    for (const [nodeId, range] of Object.entries<[number, number]>(nodeIdToInstructionRange)) {
+      this.nodeIdToInstructionRange[nodeId] = range;
+    }
+  }
+
+  readBlockIdToInstructionRange(blockIdToInstructionRange) {
+    for (const [blockId, range] of Object.entries<[number, number]>(blockIdToInstructionRange)) {
+      this.blockIdToInstructionRange[blockId] = range;
+    }
+  }
+
+  getInstruction(nodeId: number): [number, number] {
+    const X = this.nodeIdToInstructionRange[nodeId];
+    if (X === undefined) return [-1, -1];
+    return X;
+  }
+
+  getInstructionRangeForBlock(blockId: number): [number, number] {
+    const X = this.blockIdToInstructionRange[blockId];
+    if (X === undefined) return [-1, -1];
+    return X;
+  }
+
+  readInstructionOffsetToPCOffset(instructionToPCOffset) {
+    for (const [instruction, numberOrInfo] of Object.entries<number | TurbolizerInstructionStartInfo>(instructionToPCOffset)) {
+      let info: TurbolizerInstructionStartInfo;
+      if (typeof numberOrInfo == "number") {
+        info = { gap: numberOrInfo, arch: numberOrInfo, condition: numberOrInfo };
+      } else {
+        info = numberOrInfo;
+      }
+      this.instructionToPCOffset[instruction] = info;
+      if (!this.pcOffsetToInstructions.has(info.gap)) {
+        this.pcOffsetToInstructions.set(info.gap, []);
+      }
+      this.pcOffsetToInstructions.get(info.gap).push(Number(instruction));
+    }
+    this.pcOffsets = Array.from(this.pcOffsetToInstructions.keys()).sort((a, b) => b - a);
+  }
+
+  hasPCOffsets() {
+    return this.pcOffsetToInstructions.size > 0;
+  }
+
+  getKeyPcOffset(offset: number): number {
+    if (this.pcOffsets.length === 0) return -1;
+    for (const key of this.pcOffsets) {
+      if (key <= offset) {
+        return key;
+      }
+    }
+    return -1;
+  }
+
+  getInstructionKindForPCOffset(offset: number) {
+    if (this.codeOffsetsInfo) {
+      if (offset >= this.codeOffsetsInfo.deoptimizationExits) {
+        if (offset >= this.codeOffsetsInfo.pools) {
+          return "pools";
+        } else if (offset >= this.codeOffsetsInfo.jumpTables) {
+          return "jump-tables";
+        } else {
+          return "deoptimization-exits";
+        }
+      }
+      if (offset < this.codeOffsetsInfo.deoptCheck) {
+        return "code-start-register";
+      } else if (offset < this.codeOffsetsInfo.initPoison) {
+        return "deopt-check";
+      } else if (offset < this.codeOffsetsInfo.blocksStart) {
+        return "init-poison";
+      }
+    }
+    const keyOffset = this.getKeyPcOffset(offset);
+    if (keyOffset != -1) {
+      const infos = this.pcOffsetToInstructions.get(keyOffset).map(instrId => this.instructionToPCOffset[instrId]).filter(info => info.gap != info.condition);
+      if (infos.length > 0) {
+        const info = infos[0];
+        if (!info || info.gap == info.condition) return "unknown";
+        if (offset < info.arch) return "gap";
+        if (offset < info.condition) return "arch";
+        return "condition";
+      }
+    }
+    return "unknown";
+  }
+
+  instructionKindToReadableName(instructionKind) {
+    switch (instructionKind) {
+      case "code-start-register": return "Check code register for right value";
+      case "deopt-check": return "Check if function was marked for deoptimization";
+      case "init-poison": return "Initialization of poison register";
+      case "gap": return "Instruction implementing a gap move";
+      case "arch": return "Instruction implementing the actual machine operation";
+      case "condition": return "Code implementing conditional after instruction";
+      case "pools": return "Data in a pool (e.g. constant pool)";
+      case "jump-tables": return "Part of a jump table";
+      case "deoptimization-exits": return "Jump to deoptimization exit";
+    }
+    return null;
+  }
+
+  instructionRangeToKeyPcOffsets([start, end]: [number, number]): Array<TurbolizerInstructionStartInfo> {
+    if (start == end) return [this.instructionToPCOffset[start]];
+    return this.instructionToPCOffset.slice(start, end);
+  }
+
+  instructionToPcOffsets(instr: number): TurbolizerInstructionStartInfo {
+    return this.instructionToPCOffset[instr];
+  }
+
+  instructionsToKeyPcOffsets(instructionIds: Iterable<number>): Array<number> {
+    const keyPcOffsets = [];
+    for (const instructionId of instructionIds) {
+      keyPcOffsets.push(this.instructionToPCOffset[instructionId].gap);
+    }
+    return keyPcOffsets;
+  }
+
+  nodesToKeyPcOffsets(nodes) {
+    let offsets = [];
+    for (const node of nodes) {
+      const range = this.nodeIdToInstructionRange[node];
+      if (!range) continue;
+      offsets = offsets.concat(this.instructionRangeToKeyPcOffsets(range));
+    }
+    return offsets;
+  }
+
+  nodesForPCOffset(offset: number): [Array<string>, Array<string>] {
+    if (this.pcOffsets.length === 0) return [[], []];
+    for (const key of this.pcOffsets) {
+      if (key <= offset) {
+        const instrs = this.pcOffsetToInstructions.get(key);
+        const nodes = [];
+        const blocks = [];
+        for (const instr of instrs) {
+          for (const [nodeId, range] of this.nodeIdToInstructionRange.entries()) {
+            if (!range) continue;
+            const [start, end] = range;
+            if (start == end && instr == start) {
+              nodes.push("" + nodeId);
+            }
+            if (start <= instr && instr < end) {
+              nodes.push("" + nodeId);
+            }
+          }
+        }
+        return [nodes, blocks];
+      }
+    }
+    return [[], []];
+  }
+
+  parsePhases(phases) {
+    const nodeLabelMap = [];
+    for (const [, phase] of Object.entries<Phase>(phases)) {
+      switch (phase.type) {
+        case 'disassembly':
+          this.disassemblyPhase = phase;
+          if (phase['blockIdToOffset']) {
+            for (const [blockId, pc] of Object.entries<number>(phase['blockIdToOffset'])) {
+              this.blockIdToPCOffset[blockId] = pc;
+              if (!this.blockStartPCtoBlockIds.has(pc)) {
+                this.blockStartPCtoBlockIds.set(pc, []);
+              }
+              this.blockStartPCtoBlockIds.get(pc).push(Number(blockId));
+            }
+          }
+          break;
+        case 'schedule':
+          this.phaseNames.set(phase.name, this.phases.length);
+          this.phases.push(this.parseSchedule(phase));
+          break;
+        case 'sequence':
+          this.phaseNames.set(phase.name, this.phases.length);
+          this.phases.push(this.parseSequence(phase));
+          break;
+        case 'instructions':
+          if (phase.nodeIdToInstructionRange) {
+            this.readNodeIdToInstructionRange(phase.nodeIdToInstructionRange);
+          }
+          if (phase.blockIdtoInstructionRange) {
+            this.readBlockIdToInstructionRange(phase.blockIdtoInstructionRange);
+          }
+          if (phase.instructionOffsetToPCOffset) {
+            this.readInstructionOffsetToPCOffset(phase.instructionOffsetToPCOffset);
+          }
+          if (phase.codeOffsetsInfo) {
+            this.codeOffsetsInfo = phase.codeOffsetsInfo;
+          }
+          break;
+        case 'graph':
+          const graphPhase: GraphPhase = Object.assign(phase, { highestNodeId: 0 });
+          this.phaseNames.set(graphPhase.name, this.phases.length);
+          this.phases.push(graphPhase);
+          this.recordOrigins(graphPhase);
+          this.internNodeLabels(graphPhase, nodeLabelMap);
+          graphPhase.nodeLabelMap = nodeLabelMap.slice();
+          break;
+        default:
+          throw "Unsupported phase type";
+      }
+    }
+  }
+
+  internNodeLabels(phase: GraphPhase, nodeLabelMap: Array<NodeLabel>) {
+    for (const n of phase.data.nodes) {
+      const label = new NodeLabel(n.id, n.label, n.title, n.live,
+        n.properties, n.sourcePosition, n.origin, n.opcode, n.control,
+        n.opinfo, n.type);
+      const previous = nodeLabelMap[label.id];
+      if (!label.equals(previous)) {
+        if (previous != undefined) {
+          label.setInplaceUpdatePhase(phase.name);
+        }
+        nodeLabelMap[label.id] = label;
+      }
+      n.nodeLabel = nodeLabelMap[label.id];
+    }
+  }
+
+  repairPhaseId(anyPhaseId) {
+    return Math.max(0, Math.min(anyPhaseId | 0, this.phases.length - 1));
+  }
+
+  getPhase(phaseId: number) {
+    return this.phases[phaseId];
+  }
+
+  getPhaseIdByName(phaseName: string) {
+    return this.phaseNames.get(phaseName);
+  }
+
+  forEachPhase(f: (value: Phase, index: number, array: Array<Phase>) => void) {
+    this.phases.forEach(f);
+  }
+
+  addAnyPositionToLine(lineNumber: number | string, sourcePosition: AnyPosition) {
+    const lineNumberString = anyToString(lineNumber);
+    if (!this.lineToSourcePositions.has(lineNumberString)) {
+      this.lineToSourcePositions.set(lineNumberString, []);
+    }
+    const A = this.lineToSourcePositions.get(lineNumberString);
+    if (!A.includes(sourcePosition)) A.push(sourcePosition);
+  }
+
+  setSourceLineToBytecodePosition(sourceLineToBytecodePosition: Array<number> | undefined) {
+    if (!sourceLineToBytecodePosition) return;
+    sourceLineToBytecodePosition.forEach((pos, i) => {
+      this.addAnyPositionToLine(i, { bytecodePosition: pos });
+    });
+  }
+
+  linetoSourcePositions(lineNumber: number | string) {
+    const positions = this.lineToSourcePositions.get(anyToString(lineNumber));
+    if (positions === undefined) return [];
+    return positions;
+  }
+
+  parseSchedule(phase) {
+    function createNode(state: any, match) {
+      let inputs = [];
+      if (match.groups.args) {
+        const nodeIdsString = match.groups.args.replace(/\s/g, '');
+        const nodeIdStrings = nodeIdsString.split(',');
+        inputs = nodeIdStrings.map(n => Number.parseInt(n, 10));
+      }
+      const node = {
+        id: Number.parseInt(match.groups.id, 10),
+        label: match.groups.label,
+        inputs: inputs
+      };
+      if (match.groups.blocks) {
+        const nodeIdsString = match.groups.blocks.replace(/\s/g, '').replace(/B/g, '');
+        const nodeIdStrings = nodeIdsString.split(',');
+        const successors = nodeIdStrings.map(n => Number.parseInt(n, 10));
+        state.currentBlock.succ = successors;
+      }
+      state.nodes[node.id] = node;
+      state.currentBlock.nodes.push(node);
+    }
+    function createBlock(state, match) {
+      let predecessors = [];
+      if (match.groups.in) {
+        const blockIdsString = match.groups.in.replace(/\s/g, '').replace(/B/g, '');
+        const blockIdStrings = blockIdsString.split(',');
+        predecessors = blockIdStrings.map(n => Number.parseInt(n, 10));
+      }
+      const block = {
+        id: Number.parseInt(match.groups.id, 10),
+        isDeferred: match.groups.deferred != undefined,
+        pred: predecessors.sort(),
+        succ: [],
+        nodes: []
+      };
+      state.blocks[block.id] = block;
+      state.currentBlock = block;
+    }
+    function setGotoSuccessor(state, match) {
+      state.currentBlock.succ = [Number.parseInt(match.groups.successor.replace(/\s/g, ''), 10)];
+    }
+    const rules = [
+      {
+        lineRegexps:
+          [/^\s*(?<id>\d+):\ (?<label>.*)\((?<args>.*)\)$/,
+            /^\s*(?<id>\d+):\ (?<label>.*)\((?<args>.*)\)\ ->\ (?<blocks>.*)$/,
+            /^\s*(?<id>\d+):\ (?<label>.*)$/
+          ],
+        process: createNode
+      },
+      {
+        lineRegexps:
+          [/^\s*---\s*BLOCK\ B(?<id>\d+)\s*(?<deferred>\(deferred\))?(\ <-\ )?(?<in>[^-]*)?\ ---$/
+          ],
+        process: createBlock
+      },
+      {
+        lineRegexps:
+          [/^\s*Goto\s*->\s*B(?<successor>\d+)\s*$/
+          ],
+        process: setGotoSuccessor
+      }
+    ];
+
+    const lines = phase.data.split(/[\n]/);
+    const state = { currentBlock: undefined, blocks: [], nodes: [] };
+
+    nextLine:
+    for (const line of lines) {
+      for (const rule of rules) {
+        for (const lineRegexp of rule.lineRegexps) {
+          const match = line.match(lineRegexp);
+          if (match) {
+            rule.process(state, match);
+            continue nextLine;
+          }
+        }
+      }
+      console.log("Warning: unmatched schedule line \"" + line + "\"");
+    }
+    phase.schedule = state;
+    return phase;
+  }
+
+  parseSequence(phase) {
+    phase.sequence = { blocks: phase.blocks,
+                       register_allocation: phase.register_allocation ? new RegisterAllocation(phase.register_allocation)
+                                                                      : undefined };
+    return phase;
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/tabs.ts b/src/third_party/v8/tools/turbolizer/src/tabs.ts
new file mode 100644
index 0000000..0416b9e
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/tabs.ts
@@ -0,0 +1,114 @@
+
+export class Tabs {
+  private container: HTMLElement;
+  private tabBar: HTMLElement;
+  private nextTabId: number;
+
+  private mkTabBar(container: HTMLElement) {
+    container.classList.add("nav-tabs-container");
+    this.tabBar = document.createElement("ul");
+    this.tabBar.id = `tab-bar-${container.id}`;
+    this.tabBar.className = "nav-tabs";
+    this.tabBar.ondrop = this.tabBarOnDrop.bind(this);
+    this.tabBar.ondragover = this.tabBarOnDragover.bind(this);
+    this.tabBar.onclick = this.tabBarOnClick.bind(this);
+
+    const defaultDiv = document.createElement("div");
+    defaultDiv.className = "tab-content tab-default";
+    defaultDiv.id = `tab-content-${container.id}-default`;
+    container.insertBefore(defaultDiv, container.firstChild);
+    container.insertBefore(this.tabBar, container.firstChild);
+  }
+
+  constructor(container: HTMLElement) {
+    this.container = container;
+    this.nextTabId = 0;
+    this.mkTabBar(container);
+  }
+
+  activateTab(tab: HTMLLIElement) {
+    if (typeof tab.dataset.divid !== "string") return;
+    for (const li of this.tabBar.querySelectorAll<HTMLLIElement>("li.active")) {
+      li.classList.remove("active");
+      this.showTab(li, false);
+    }
+    tab.classList.add("active");
+    this.showTab(tab, true);
+  }
+
+  clearTabsAndContent() {
+    for (const tab of this.tabBar.querySelectorAll(".nav-tabs > li")) {
+        if (!(tab instanceof HTMLLIElement)) continue;
+        if (tab.classList.contains("persistent-tab")) continue;
+        const tabDiv = document.getElementById(tab.dataset.divid);
+        tabDiv.parentNode.removeChild(tabDiv);
+        tab.parentNode.removeChild(tab);
+    }
+  }
+
+  private showTab(li: HTMLElement, show: boolean = true) {
+    const tabDiv = document.getElementById(li.dataset.divid);
+    tabDiv.style.display = show ? "block" : "none";
+  }
+
+  public addTab(caption: string): HTMLLIElement {
+    const newTab = document.createElement("li");
+    newTab.innerHTML = caption;
+    newTab.id = `tab-header-${this.container.id}-${this.nextTabId++}`;
+    const lastTab = this.tabBar.querySelector("li.last-tab");
+    this.tabBar.insertBefore(newTab, lastTab);
+    return newTab;
+  }
+
+  public addTabAndContent(caption: string): [HTMLLIElement, HTMLDivElement] {
+    const contentDiv = document.createElement("div");
+    contentDiv.className = "tab-content tab-default";
+    contentDiv.id = `tab-content-${this.container.id}-${this.nextTabId++}`;
+    contentDiv.style.display = "none";
+    this.container.appendChild(contentDiv);
+
+    const newTab = this.addTab(caption);
+    newTab.dataset.divid = contentDiv.id;
+    newTab.draggable = true;
+    newTab.ondragstart = this.tabOnDragStart.bind(this);
+    const lastTab = this.tabBar.querySelector("li.last-tab");
+    this.tabBar.insertBefore(newTab, lastTab);
+    return [newTab, contentDiv];
+  }
+
+  private moveTabDiv(tab: HTMLLIElement) {
+    const tabDiv = document.getElementById(tab.dataset.divid);
+    tabDiv.style.display = "none";
+    tab.classList.remove("active");
+    this.tabBar.parentNode.appendChild(tabDiv);
+  }
+
+  private tabBarOnDrop(e: DragEvent) {
+    if (!(e.target instanceof HTMLElement)) return;
+    e.preventDefault();
+    const tabId = e.dataTransfer.getData("text");
+    const tab = document.getElementById(tabId) as HTMLLIElement;
+    if (tab.parentNode != this.tabBar) {
+      this.moveTabDiv(tab);
+    }
+    const dropTab =
+      e.target.parentNode == this.tabBar
+        ? e.target : this.tabBar.querySelector("li.last-tab");
+    this.tabBar.insertBefore(tab, dropTab);
+    this.activateTab(tab);
+  }
+
+  private tabBarOnDragover(e) {
+    e.preventDefault();
+  }
+
+  private tabOnDragStart(e: DragEvent) {
+    if (!(e.target instanceof HTMLElement)) return;
+    e.dataTransfer.setData("text", e.target.id);
+  }
+
+  private tabBarOnClick(e: MouseEvent) {
+    const li = e.target as HTMLLIElement;
+    this.activateTab(li);
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/text-view.ts b/src/third_party/v8/tools/turbolizer/src/text-view.ts
new file mode 100644
index 0000000..d13c070
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/text-view.ts
@@ -0,0 +1,250 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { PhaseView } from "../src/view";
+import { anyToString, ViewElements, isIterable } from "../src/util";
+import { MySelection } from "../src/selection";
+import { SourceResolver } from "./source-resolver";
+import { SelectionBroker } from "./selection-broker";
+import { NodeSelectionHandler, BlockSelectionHandler } from "./selection-handler";
+
+export abstract class TextView extends PhaseView {
+  selectionHandler: NodeSelectionHandler;
+  blockSelectionHandler: BlockSelectionHandler;
+  selection: MySelection;
+  blockSelection: MySelection;
+  textListNode: HTMLUListElement;
+  nodeIdToHtmlElementsMap: Map<string, Array<HTMLElement>>;
+  blockIdToHtmlElementsMap: Map<string, Array<HTMLElement>>;
+  blockIdtoNodeIds: Map<string, Array<string>>;
+  nodeIdToBlockId: Array<string>;
+  patterns: any;
+  sourceResolver: SourceResolver;
+  broker: SelectionBroker;
+
+  constructor(id, broker) {
+    super(id);
+    const view = this;
+    view.textListNode = view.divNode.getElementsByTagName('ul')[0];
+    view.patterns = null;
+    view.nodeIdToHtmlElementsMap = new Map();
+    view.blockIdToHtmlElementsMap = new Map();
+    view.blockIdtoNodeIds = new Map();
+    view.nodeIdToBlockId = [];
+    view.selection = new MySelection(anyToString);
+    view.blockSelection = new MySelection(anyToString);
+    view.broker = broker;
+    view.sourceResolver = broker.sourceResolver;
+    const selectionHandler = {
+      clear: function () {
+        view.selection.clear();
+        view.updateSelection();
+        broker.broadcastClear(selectionHandler);
+      },
+      select: function (nodeIds, selected) {
+        view.selection.select(nodeIds, selected);
+        view.updateSelection();
+        broker.broadcastNodeSelect(selectionHandler, view.selection.selectedKeys(), selected);
+      },
+      brokeredNodeSelect: function (nodeIds, selected) {
+        const firstSelect = view.blockSelection.isEmpty();
+        view.selection.select(nodeIds, selected);
+        view.updateSelection(firstSelect);
+      },
+      brokeredClear: function () {
+        view.selection.clear();
+        view.updateSelection();
+      }
+    };
+    this.selectionHandler = selectionHandler;
+    broker.addNodeHandler(selectionHandler);
+    view.divNode.addEventListener('click', e => {
+      if (!e.shiftKey) {
+        view.selectionHandler.clear();
+      }
+      e.stopPropagation();
+    });
+    const blockSelectionHandler = {
+      clear: function () {
+        view.blockSelection.clear();
+        view.updateSelection();
+        broker.broadcastClear(blockSelectionHandler);
+      },
+      select: function (blockIds, selected) {
+        view.blockSelection.select(blockIds, selected);
+        view.updateSelection();
+        broker.broadcastBlockSelect(blockSelectionHandler, blockIds, selected);
+      },
+      brokeredBlockSelect: function (blockIds, selected) {
+        const firstSelect = view.blockSelection.isEmpty();
+        view.blockSelection.select(blockIds, selected);
+        view.updateSelection(firstSelect);
+      },
+      brokeredClear: function () {
+        view.blockSelection.clear();
+        view.updateSelection();
+      }
+    };
+    this.blockSelectionHandler = blockSelectionHandler;
+    broker.addBlockHandler(blockSelectionHandler);
+  }
+
+  addHtmlElementForNodeId(anyNodeId: any, htmlElement: HTMLElement) {
+    const nodeId = anyToString(anyNodeId);
+    if (!this.nodeIdToHtmlElementsMap.has(nodeId)) {
+      this.nodeIdToHtmlElementsMap.set(nodeId, []);
+    }
+    this.nodeIdToHtmlElementsMap.get(nodeId).push(htmlElement);
+  }
+
+  addHtmlElementForBlockId(anyBlockId, htmlElement) {
+    const blockId = anyToString(anyBlockId);
+    if (!this.blockIdToHtmlElementsMap.has(blockId)) {
+      this.blockIdToHtmlElementsMap.set(blockId, []);
+    }
+    this.blockIdToHtmlElementsMap.get(blockId).push(htmlElement);
+  }
+
+  addNodeIdToBlockId(anyNodeId, anyBlockId) {
+    const blockId = anyToString(anyBlockId);
+    if (!this.blockIdtoNodeIds.has(blockId)) {
+      this.blockIdtoNodeIds.set(blockId, []);
+    }
+    this.blockIdtoNodeIds.get(blockId).push(anyToString(anyNodeId));
+    this.nodeIdToBlockId[anyNodeId] = blockId;
+  }
+
+  blockIdsForNodeIds(nodeIds) {
+    const blockIds = [];
+    for (const nodeId of nodeIds) {
+      const blockId = this.nodeIdToBlockId[nodeId];
+      if (blockId == undefined) continue;
+      blockIds.push(blockId);
+    }
+    return blockIds;
+  }
+
+  updateSelection(scrollIntoView: boolean = false) {
+    if (this.divNode.parentNode == null) return;
+    const mkVisible = new ViewElements(this.divNode.parentNode as HTMLElement);
+    const view = this;
+    const elementsToSelect = view.divNode.querySelectorAll(`[data-pc-offset]`);
+    for (const el of elementsToSelect) {
+      el.classList.toggle("selected", false);
+    }
+    for (const [blockId, elements] of this.blockIdToHtmlElementsMap.entries()) {
+      const isSelected = view.blockSelection.isSelected(blockId);
+      for (const element of elements) {
+        mkVisible.consider(element, isSelected);
+        element.classList.toggle("selected", isSelected);
+      }
+    }
+    for (const key of this.nodeIdToHtmlElementsMap.keys()) {
+      for (const element of this.nodeIdToHtmlElementsMap.get(key)) {
+        element.classList.toggle("selected", false);
+      }
+    }
+    for (const nodeId of view.selection.selectedKeys()) {
+      const elements = this.nodeIdToHtmlElementsMap.get(nodeId);
+      if (!elements) continue;
+      for (const element of elements) {
+        mkVisible.consider(element, true);
+        element.classList.toggle("selected", true);
+      }
+    }
+    mkVisible.apply(scrollIntoView);
+  }
+
+  setPatterns(patterns) {
+    this.patterns = patterns;
+  }
+
+  clearText() {
+    while (this.textListNode.firstChild) {
+      this.textListNode.removeChild(this.textListNode.firstChild);
+    }
+  }
+
+  createFragment(text, style) {
+    const fragment = document.createElement("SPAN");
+
+    if (typeof style.associateData == 'function') {
+      if (style.associateData(text, fragment) === false) {
+         return null;
+      }
+    } else {
+      if (style.css != undefined) {
+        const css = isIterable(style.css) ? style.css : [style.css];
+        for (const cls of css) {
+          fragment.classList.add(cls);
+        }
+      }
+      fragment.innerText = text;
+    }
+
+    return fragment;
+  }
+
+  processLine(line) {
+    const view = this;
+    const result = [];
+    let patternSet = 0;
+    while (true) {
+      const beforeLine = line;
+      for (const pattern of view.patterns[patternSet]) {
+        const matches = line.match(pattern[0]);
+        if (matches != null) {
+          if (matches[0] != '') {
+            const style = pattern[1] != null ? pattern[1] : {};
+            const text = matches[0];
+            if (text != '') {
+              const fragment = view.createFragment(matches[0], style);
+              if (fragment !== null) result.push(fragment);
+            }
+            line = line.substr(matches[0].length);
+          }
+          let nextPatternSet = patternSet;
+          if (pattern.length > 2) {
+            nextPatternSet = pattern[2];
+          }
+          if (line == "") {
+            if (nextPatternSet != -1) {
+              throw ("illegal parsing state in text-view in patternSet" + patternSet);
+            }
+            return result;
+          }
+          patternSet = nextPatternSet;
+          break;
+        }
+      }
+      if (beforeLine == line) {
+        throw ("input not consumed in text-view in patternSet" + patternSet);
+      }
+    }
+  }
+
+  processText(text) {
+    const view = this;
+    const textLines = text.split(/[\n]/);
+    let lineNo = 0;
+    for (const line of textLines) {
+      const li = document.createElement("LI");
+      li.className = "nolinenums";
+      li.dataset.lineNo = "" + lineNo++;
+      const fragments = view.processLine(line);
+      for (const fragment of fragments) {
+        li.appendChild(fragment);
+      }
+      view.textListNode.appendChild(li);
+    }
+  }
+
+  initializeContent(data, rememberedSelection) {
+    this.clearText();
+    this.processText(data);
+    this.show();
+  }
+
+  public onresize(): void {}
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/turbo-visualizer.ts b/src/third_party/v8/tools/turbolizer/src/turbo-visualizer.ts
new file mode 100644
index 0000000..2dd01c2
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/turbo-visualizer.ts
@@ -0,0 +1,151 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { SourceResolver } from "../src/source-resolver";
+import { SelectionBroker } from "../src/selection-broker";
+import { DisassemblyView } from "../src/disassembly-view";
+import { GraphMultiView } from "../src/graphmultiview";
+import { CodeMode, CodeView } from "../src/code-view";
+import { Tabs } from "../src/tabs";
+import { Resizer } from "../src/resizer";
+import * as C from "../src/constants";
+import { InfoView } from "./info-view";
+
+window.onload = function () {
+  let multiview: GraphMultiView = null;
+  let disassemblyView: DisassemblyView = null;
+  let sourceViews: Array<CodeView> = [];
+  let selectionBroker: SelectionBroker = null;
+  let sourceResolver: SourceResolver = null;
+  const resizer = new Resizer(panesUpdatedCallback, 75, 75);
+  const sourceTabsContainer = document.getElementById(C.SOURCE_PANE_ID);
+  const sourceTabs = new Tabs(sourceTabsContainer);
+  sourceTabs.addTab("&#x2b;").classList.add("last-tab", "persistent-tab");
+  const disassemblyTabsContainer = document.getElementById(C.GENERATED_PANE_ID);
+  const disassemblyTabs = new Tabs(disassemblyTabsContainer);
+  disassemblyTabs.addTab("&#x2b;").classList.add("last-tab", "persistent-tab");
+  const [infoTab, infoContainer] = sourceTabs.addTabAndContent("Info");
+  infoTab.classList.add("persistent-tab");
+  infoContainer.classList.add("viewpane", "scrollable");
+  const infoView = new InfoView(infoContainer);
+  infoView.show();
+  sourceTabs.activateTab(infoTab);
+
+  function panesUpdatedCallback() {
+    if (multiview) multiview.onresize();
+  }
+
+  function loadFile(txtRes: string) {
+    sourceTabs.clearTabsAndContent();
+    disassemblyTabs.clearTabsAndContent();
+    // If the JSON isn't properly terminated, assume compiler crashed and
+    // add best-guess empty termination
+    if (txtRes[txtRes.length - 2] == ',') {
+      txtRes += '{"name":"disassembly","type":"disassembly","data":""}]}';
+    }
+    try {
+      sourceViews.forEach(sv => sv.hide());
+      if (multiview) multiview.hide();
+      multiview = null;
+      document.getElementById("ranges").innerHTML = '';
+      document.getElementById('ranges').style.visibility = "hidden";
+      document.getElementById('show-hide-ranges').style.visibility = "hidden";
+      if (disassemblyView) disassemblyView.hide();
+      sourceViews = [];
+      sourceResolver = new SourceResolver();
+      selectionBroker = new SelectionBroker(sourceResolver);
+
+      const jsonObj = JSON.parse(txtRes);
+
+      let fnc = null;
+      // Backwards compatibility.
+      if (typeof jsonObj.function == 'string') {
+        fnc = {
+          functionName: fnc,
+          sourceId: -1,
+          startPosition: jsonObj.sourcePosition,
+          endPosition: jsonObj.sourcePosition + jsonObj.source.length,
+          sourceText: jsonObj.source,
+          backwardsCompatibility: true
+        };
+      } else {
+        fnc = Object.assign(jsonObj.function, { backwardsCompatibility: false });
+      }
+
+      sourceResolver.setInlinings(jsonObj.inlinings);
+      sourceResolver.setSourceLineToBytecodePosition(jsonObj.sourceLineToBytecodePosition);
+      sourceResolver.setSources(jsonObj.sources, fnc);
+      sourceResolver.setNodePositionMap(jsonObj.nodePositions);
+      sourceResolver.parsePhases(jsonObj.phases);
+
+      const [sourceTab, sourceContainer] = sourceTabs.addTabAndContent("Source");
+      sourceContainer.classList.add("viewpane", "scrollable");
+      sourceTabs.activateTab(sourceTab);
+      const sourceView = new CodeView(sourceContainer, selectionBroker, sourceResolver, fnc, CodeMode.MAIN_SOURCE);
+      sourceView.show();
+      sourceViews.push(sourceView);
+
+      sourceResolver.forEachSource(source => {
+        const sourceView = new CodeView(sourceContainer, selectionBroker, sourceResolver, source, CodeMode.INLINED_SOURCE);
+        sourceView.show();
+        sourceViews.push(sourceView);
+      });
+
+      const [disassemblyTab, disassemblyContainer] = disassemblyTabs.addTabAndContent("Disassembly");
+      disassemblyContainer.classList.add("viewpane", "scrollable");
+      disassemblyTabs.activateTab(disassemblyTab);
+      disassemblyView = new DisassemblyView(disassemblyContainer, selectionBroker);
+      disassemblyView.initializeCode(fnc.sourceText);
+      if (sourceResolver.disassemblyPhase) {
+        disassemblyView.initializePerfProfile(jsonObj.eventCounts);
+        disassemblyView.showContent(sourceResolver.disassemblyPhase.data);
+        disassemblyView.show();
+      }
+
+      multiview = new GraphMultiView(C.INTERMEDIATE_PANE_ID, selectionBroker, sourceResolver);
+      multiview.show();
+    } catch (err) {
+      if (window.confirm("Error: Exception during load of TurboFan JSON file:\n" +
+        "error: " + err.message + "\nDo you want to clear session storage?")) {
+        window.sessionStorage.clear();
+      }
+      return;
+    }
+  }
+
+  function initializeUploadHandlers() {
+    // The <input> form #upload-helper with type file can't be a picture.
+    // We hence keep it hidden, and forward the click from the picture
+    // button #upload.
+    document.getElementById("upload").addEventListener("click", e => {
+      document.getElementById("upload-helper").click();
+      e.stopPropagation();
+    });
+    document.getElementById("upload-helper").addEventListener("change",
+      function (this: HTMLInputElement) {
+        const uploadFile = this.files && this.files[0];
+        if (uploadFile) {
+          const filereader = new FileReader();
+          filereader.onload = () => {
+            const txtRes = filereader.result;
+            if (typeof txtRes == 'string') {
+              loadFile(txtRes);
+            }
+          };
+          filereader.readAsText(uploadFile);
+        }
+      }
+    );
+    window.addEventListener("keydown", (e: KeyboardEvent) => {
+      if (e.keyCode == 76 && e.ctrlKey) { // CTRL + L
+        document.getElementById("upload-helper").click();
+        e.stopPropagation();
+        e.preventDefault();
+      }
+    });
+  }
+
+  initializeUploadHandlers();
+  resizer.updatePanes();
+};
diff --git a/src/third_party/v8/tools/turbolizer/src/util.ts b/src/third_party/v8/tools/turbolizer/src/util.ts
new file mode 100644
index 0000000..8d2fc84
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/util.ts
@@ -0,0 +1,100 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export function anyToString(x: any): string {
+  return "" + x;
+}
+
+function computeScrollTop(container, element) {
+  const height = container.offsetHeight;
+  const margin = Math.floor(height / 4);
+  const pos = element.offsetTop;
+  const currentScrollTop = container.scrollTop;
+  if (pos < currentScrollTop + margin) {
+    return Math.max(0, pos - margin);
+  } else if (pos > (currentScrollTop + 3 * margin)) {
+    return Math.max(0, pos - 3 * margin);
+  }
+  return pos;
+}
+
+export class ViewElements {
+  container: HTMLElement;
+  scrollTop: number;
+
+  constructor(container: HTMLElement) {
+    this.container = container;
+    this.scrollTop = undefined;
+  }
+
+  consider(element, doConsider) {
+    if (!doConsider) return;
+    const newScrollTop = computeScrollTop(this.container, element);
+    if (isNaN(newScrollTop)) {
+      console.log("NOO");
+    }
+    if (this.scrollTop === undefined) {
+      this.scrollTop = newScrollTop;
+    } else {
+      this.scrollTop = Math.min(this.scrollTop, newScrollTop);
+    }
+  }
+
+  apply(doApply) {
+    if (!doApply || this.scrollTop === undefined) return;
+    this.container.scrollTop = this.scrollTop;
+  }
+}
+
+export function sortUnique<T>(arr: Array<T>, f: (a: T, b: T) => number, equal: (a: T, b: T) => boolean) {
+  if (arr.length == 0) return arr;
+  arr = arr.sort(f);
+  const ret = [arr[0]];
+  for (let i = 1; i < arr.length; i++) {
+    if (!equal(arr[i - 1], arr[i])) {
+      ret.push(arr[i]);
+    }
+  }
+  return ret;
+}
+
+// Partial application without binding the receiver
+export function partial(f: any, ...arguments1: Array<any>) {
+  return function (this: any, ...arguments2: Array<any>) {
+    f.apply(this, [...arguments1, ...arguments2]);
+  };
+}
+
+export function isIterable(obj: any): obj is Iterable<any> {
+  return obj != null && obj != undefined
+    && typeof obj != 'string' && typeof obj[Symbol.iterator] === 'function';
+}
+
+export function alignUp(raw: number, multiple: number): number {
+  return Math.floor((raw + multiple - 1) / multiple) * multiple;
+}
+
+export function measureText(text: string) {
+  const textMeasure = document.getElementById('text-measure');
+  if (textMeasure instanceof SVGTSpanElement) {
+    textMeasure.textContent = text;
+    return {
+      width: textMeasure.getBBox().width,
+      height: textMeasure.getBBox().height,
+    };
+  }
+  return { width: 0, height: 0 };
+}
+
+// Interpolate between the given start and end values by a fraction of val/max.
+export function interpolate(val: number, max: number, start: number, end: number) {
+  return start + (end - start) * (val / max);
+}
+
+export function createElement(tag: string, cls: string, content?: string) {
+  const el = document.createElement(tag);
+  el.className = cls;
+  if (content != undefined) el.innerText = content;
+  return el;
+}
diff --git a/src/third_party/v8/tools/turbolizer/src/view.ts b/src/third_party/v8/tools/turbolizer/src/view.ts
new file mode 100644
index 0000000..d93eeed
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/src/view.ts
@@ -0,0 +1,33 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export abstract class View {
+  protected container: HTMLElement;
+  protected divNode: HTMLElement;
+  protected abstract createViewElement(): HTMLElement;
+
+  constructor(idOrContainer: string | HTMLElement) {
+    this.container = typeof idOrContainer == "string" ? document.getElementById(idOrContainer) : idOrContainer;
+    this.divNode = this.createViewElement();
+  }
+
+  public show(): void {
+    this.container.appendChild(this.divNode);
+  }
+
+  public hide(): void {
+    this.container.removeChild(this.divNode);
+  }
+}
+
+export abstract class PhaseView extends View {
+  public abstract initializeContent(data: any, rememberedSelection: Set<any>): void;
+  public abstract detachSelection(): Set<string>;
+  public abstract onresize(): void;
+  public abstract searchInputAction(searchInput: HTMLInputElement, e: Event, onlyVisible: boolean): void;
+
+  constructor(idOrContainer: string | HTMLElement) {
+    super(idOrContainer);
+  }
+}
diff --git a/src/third_party/v8/tools/turbolizer/tabs.css b/src/third_party/v8/tools/turbolizer/tabs.css
new file mode 100644
index 0000000..b3958bf
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/tabs.css
@@ -0,0 +1,57 @@
+.content {
+    display: grid;
+    grid-template-areas:
+        "tabs"
+        "window";
+    grid-template-columns: 1fr;
+    grid-template-rows: auto 1fr;
+    min-height: calc(100vh);
+}
+
+.nav-tabs-container {
+    grid-area: tabs;
+    padding: 0px;
+    background-color: #999999;
+    border-bottom: 4px solid #CCCCCC;
+}
+
+.tab-content {
+    grid-area: window;
+    background-color: white;
+    padding: 0px;
+    display:none;
+}
+
+.tab-content.tab-default {
+    display: block;
+}
+
+ul.nav-tabs {
+    padding: 0px;
+    margin: 0px;
+    overflow: auto;
+    display: table-row;
+    min-height: 2ex;
+}
+
+.nav-tabs li {
+    display: inline-block;
+    padding-left: 10px;
+    padding-right: 10px;
+    padding-top: 4px;
+    padding-bottom: 4px;
+    min-width: 20px;
+    text-decoration: none;
+    color: black;
+    text-align: center;
+    user-select: none;
+    cursor: pointer;
+}
+
+.nav-tabs li:hover {
+    background-color: #EEEEEE;
+}
+
+.nav-tabs li.active {
+    background-color: #CCCCCC;
+}
\ No newline at end of file
diff --git a/src/third_party/v8/tools/turbolizer/test/source-resolver-test.ts b/src/third_party/v8/tools/turbolizer/test/source-resolver-test.ts
new file mode 100644
index 0000000..38d6745
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/test/source-resolver-test.ts
@@ -0,0 +1,10 @@
+import { SourceResolver } from '../src/source-resolver';
+import { expect } from 'chai';
+import { describe, it } from 'mocha';
+
+describe('SourceResolver', () => {
+  it('should be constructible', () => {
+    const a: SourceResolver = new SourceResolver();
+    expect(a.sources.length).to.equal(0);
+  });
+});
diff --git a/src/third_party/v8/tools/turbolizer/tsconfig.json b/src/third_party/v8/tools/turbolizer/tsconfig.json
new file mode 100644
index 0000000..cd036ac
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/tsconfig.json
@@ -0,0 +1,39 @@
+{
+  "compilerOptions": {
+    "outDir": "build/",
+    "allowJs": false,
+    "target": "es2018",
+    "module": "es2015",
+    "sourceMap": true,
+    "experimentalDecorators": true,
+    "emitDecoratorMetadata": true,
+    "moduleResolution": "node",
+    "noUnusedLocals": true,
+    "noImplicitReturns": true,
+    "noImplicitThis": true,
+    "lib": ["dom", "es6", "dom.iterable", "scripthost", "es2018"]
+  },
+  "files": [
+    "src/util.ts",
+    "src/node.ts",
+    "src/edge.ts",
+    "src/graph.ts",
+    "src/node-label.ts",
+    "src/source-resolver.ts",
+    "src/selection.ts",
+    "src/selection-broker.ts",
+    "src/selection-handler.ts",
+    "src/constants.ts",
+    "src/view.ts",
+    "src/text-view.ts",
+    "src/code-view.ts",
+    "src/graph-layout.ts",
+    "src/graph-view.ts",
+    "src/schedule-view.ts",
+    "src/disassembly-view.ts",
+    "src/graphmultiview.ts",
+    "src/turbo-visualizer.ts",
+    "src/resizer.ts",
+    "src/info-view.ts"
+  ]
+}
diff --git a/src/third_party/v8/tools/turbolizer/tsconfig.test.json b/src/third_party/v8/tools/turbolizer/tsconfig.test.json
new file mode 100644
index 0000000..1b7a591
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/tsconfig.test.json
@@ -0,0 +1,6 @@
+{
+    "extends": "./tsconfig.json",
+    "compilerOptions": {
+        "module": "commonjs"
+    }
+}
diff --git a/src/third_party/v8/tools/turbolizer/tsfmt.json b/src/third_party/v8/tools/turbolizer/tsfmt.json
new file mode 100644
index 0000000..2ff95b8
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/tsfmt.json
@@ -0,0 +1,16 @@
+{
+  "tabSize": 2,
+  "indentSize": 2,
+  "convertTabsToSpaces": true,
+  "insertSpaceAfterCommaDelimiter": true,
+  "insertSpaceAfterSemicolonInForStatements": true,
+  "insertSpaceBeforeAndAfterBinaryOperators": true,
+  "insertSpaceAfterKeywordsInControlFlowStatements": true,
+  "insertSpaceAfterFunctionKeywordForAnonymousFunctions": true,
+  "insertSpaceAfterOpeningAndBeforeClosingNonemptyParenthesis": false,
+  "insertSpaceAfterOpeningAndBeforeClosingNonemptyBrackets": false,
+  "insertSpaceAfterOpeningAndBeforeClosingTemplateStringBraces": false,
+  "insertSpaceBeforeFunctionParenthesis": false,
+  "placeOpenBraceOnNewLineForFunctions": false,
+  "placeOpenBraceOnNewLineForControlBlocks": false
+}
\ No newline at end of file
diff --git a/src/third_party/v8/tools/turbolizer/tslint.json b/src/third_party/v8/tools/turbolizer/tslint.json
new file mode 100644
index 0000000..e07e057
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/tslint.json
@@ -0,0 +1,45 @@
+{
+  "defaultSeverity": "error",
+  "extends": "tslint:recommended",
+  "jsRules": {},
+  "rules": {
+    "curly": [true, "ignore-same-line"],
+    "quotemark": [false, "double", "avoid-escape", "avoid-template"],
+    "only-arrow-functions": [false],
+    "no-var-keyword": true,
+    "prefer-const": [true],
+    "max-line-length": [false, {
+      "limit": 80
+    }],
+    "ordered-imports": false,
+    "array-type": [true, "generic"],
+    "semicolon": true,
+    "member-access": false,
+    "object-literal-shorthand": false,
+    "object-literal-key-quotes": [true, "as-needed"],
+    "object-literal-sort-keys": false,
+    "space-before-function-paren": [true, {
+      "anonymous": "always"
+    }],
+    "triple-equals": false,
+    "no-string-throw": false,
+    "no-empty": [true, "allow-empty-catch", "allow-empty-functions"],
+    "trailing-comma": false,
+    "member-ordering": false,
+    "no-string-literal": false,
+    "arrow-parens": [true, "ban-single-arg-parens"],
+    "no-console": false,
+    "interface-name": false,
+    "no-bitwise": false,
+    "no-shadowed-variable": false,
+    "prefer-for-of": true,
+    "align": true,
+    "arrow-return-shorthand": true,
+    "max-classes-per-file": false,
+    "variable-name": true,
+    "forin": false,
+    "one-variable-per-declaration": true,
+    "no-consecutive-blank-lines": true
+  },
+  "rulesDirectory": []
+}
diff --git a/src/third_party/v8/tools/turbolizer/turbo-visualizer-ranges.css b/src/third_party/v8/tools/turbolizer/turbo-visualizer-ranges.css
new file mode 100644
index 0000000..03976e2
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/turbo-visualizer-ranges.css
@@ -0,0 +1,238 @@
+/* CSS specific to the live ranges div associated with
+   the RangeView typescript class in src/range-view.ts. */
+
+:root {
+  --range-y-axis-width: 18ch;
+  --range-position-width: 3.5ch;
+  --range-block-border: 6px;
+  --range-instr-border: 3px;
+  --range-position-border: 1px;
+}
+
+.range-bold {
+  font-weight: bold;
+  color: black;
+}
+
+#ranges {
+  font-family: monospace;
+  min-height: auto;
+  overflow: hidden;
+}
+
+#resizer-ranges {
+  height: 10px;
+}
+
+.range-title-div {
+  padding: 2ch 2ch 2ch 2ch;
+  white-space: nowrap;
+  overflow: auto;
+}
+
+.range-title {
+  text-decoration: underline;
+  font-weight: bold;
+  font-size: large;
+  display: inline-block;
+}
+
+.range-title-help {
+  margin-left: 2ch;
+  width: 1ch;
+  padding: 0 0.25ch;
+  border: 1px dotted black;
+  color: slategray;
+  display: inline-block;
+}
+
+input.range-toggle-show {
+  vertical-align: middle;
+}
+
+.range-header-label-x {
+  text-align: center;
+  margin-left: 13ch;
+}
+
+.range-header-label-y {
+  width: 11ch;
+  float: left;
+  white-space: pre-wrap;
+  word-wrap: break-word;
+  margin-left: 6ch;
+  margin-top: 4ch;
+}
+
+.range-y-axis {
+  display: inline-block;
+  width: var(--range-y-axis-width);
+  overflow: hidden;
+  white-space: nowrap;
+  vertical-align: top;
+}
+
+.range-header {
+  display: flex;
+  overflow: hidden;
+  height: 8ch;
+  margin-left: var(--range-y-axis-width);
+}
+
+.range-position-labels,
+.range-register-labels {
+  background-color: lightgray;
+}
+
+.range-register-labels {
+  float: right;
+}
+
+.range-position-labels {
+  margin-top: auto;
+}
+
+.range-registers {
+  float: right;
+  overflow: hidden;
+  text-align: right;
+}
+
+.range-positions-header,
+.range-instruction-ids,
+.range-block-ids {
+  overflow: hidden;
+  white-space: nowrap;
+  display: grid;
+  grid-gap: 0;
+}
+
+.range-reg {
+  width: 13ch;
+  text-align: right;
+}
+
+.range-reg::after {
+  content: ":";
+}
+
+.range-grid {
+  overflow: auto;
+  display: inline-block;
+  white-space: nowrap;
+}
+
+.range-block-id {
+  display: inline-block;
+  text-align: center;
+}
+
+.range-instruction-id {
+  display: inline-block;
+  text-align: center;
+}
+
+.range-position {
+  display: inline-block;
+  text-align: center;
+  z-index: 1;
+}
+
+.range-transparent,
+.range-position.range-empty {
+  color: transparent;
+}
+
+.range-block-id:hover,
+.range-instruction-id:hover,
+.range-reg:hover,
+.range-position:hover {
+  background-color: rgba(0, 0, 255, 0.10);
+}
+
+.range-position.range-header-element {
+  border-bottom: 2px solid rgb(109, 107, 107);
+}
+
+.range-block-id,
+.range-instruction-id,
+.range-reg,
+.range-interval,
+.range-position {
+  position: relative;
+  border: var(--range-position-border) solid rgb(109, 107, 107);
+}
+
+.range-block-id,
+.range-instruction-id,
+.range-interval,
+.range-position {
+  border-left: 0;
+}
+
+.range-block-ids > .range-block-id:first-child,
+.range-instruction-ids > .range-instruction-id:first-child,
+.range-positions > .range-position:first-child {
+  border-left: var(--range-position-border) solid rgb(109, 107, 107);
+}
+
+.range-position.range-interval-position {
+  border: none;
+}
+
+.range-interval-text {
+  position: absolute;
+  padding-left: 0.5ch;
+  z-index: 2;
+  pointer-events: none
+}
+
+.range-position.range-use {
+  border-left: var(--range-instr-border) solid red;
+}
+
+.range-block-border,
+.range-block-border.range-position.range-interval-position:last-child {
+  border-right: var(--range-block-border) solid rgb(109, 107, 107);
+}
+
+.range-block-border.range-position.range-interval-position {
+  border-right: var(--range-block-border) solid transparent;
+}
+
+.range-instr-border,
+.range-instr-border.range-position.range-interval-position:last-child {
+  border-right: var(--range-instr-border) solid rgb(109, 107, 107);
+}
+
+.range-instr-border.range-position.range-interval-position {
+  border-right: var(--range-instr-border) solid transparent;
+}
+
+.range,
+.range-interval,
+.range-interval-wrapper,
+.range-positions {
+  white-space: nowrap;
+  display: inline-block;
+}
+
+.range-interval-wrapper,
+.range-positions {
+  display: grid;
+  grid-gap: 0;
+}
+
+.range-interval {
+  background-color: rgb(153, 158, 168);
+}
+
+.range-hidden {
+  display: none !important;
+}
+
+.range-positions-placeholder {
+  width: 100%;
+  border: var(--range-position-border) solid transparent;
+  color: transparent;
+}
\ No newline at end of file
diff --git a/src/third_party/v8/tools/turbolizer/turbo-visualizer.css b/src/third_party/v8/tools/turbolizer/turbo-visualizer.css
new file mode 100644
index 0000000..c7da769
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/turbo-visualizer.css
@@ -0,0 +1,757 @@
+.visible-transition {
+  transition-delay: 0s;
+  transition-duration: 1s;
+  transition-property: all;
+  transition-timing-function: ease;
+}
+
+.show-hide-pane {
+  background: #A0A0A0;
+  bottom: 0;
+  position: absolute;
+  margin-bottom: 0.5em;
+  margin-right: 0.5em;
+  margin-left: 0.5em;
+  border-radius: 5px;
+  padding: 0.5em;
+  z-index: 20;
+  opacity: 0.7;
+  cursor: pointer;
+}
+
+.search-input {
+  vertical-align: middle;
+  width: 145px;
+  opacity: 1;
+  box-sizing: border-box;
+  height: 1.5em;
+}
+
+#phase-select {
+  box-sizing: border-box;
+  height: 1.5em;
+}
+
+#search-only-visible {
+  vertical-align: middle;
+}
+
+.button-input {
+  vertical-align: middle;
+  width: 24px;
+  opacity: 0.4;
+  cursor: pointer;
+}
+
+.button-input-toggled {
+  border-radius: 5px;
+  background-color: #505050;
+}
+
+.button-input:focus {
+  outline: none;
+}
+
+.invisible {
+  display: none;
+}
+
+.selected {
+  background-color: #FFFF33;
+}
+
+.selected.block,
+.selected.block-id,
+.selected.schedule-block {
+  background-color: #AAFFAA;
+}
+
+ol.linenums {
+  -webkit-padding-start: 8px;
+}
+
+.line-number {
+  display: inline-block;
+  min-width: 3ex;
+  text-align: right;
+  color: #444444;
+  margin-right: 0.5ex;
+  padding-right: 0.5ex;
+  background: #EEEEEE;
+  /* font-size: 80%; */
+  user-select: none;
+  height: 120%;
+}
+
+.line-number:hover {
+  background-color: #CCCCCC;
+}
+
+.prettyprint ol.linenums>li.selected {
+  background-color: #FFFF33 !important;
+}
+
+li.selected .line-number {
+  background-color: #FFFF33;
+}
+
+.prettyprint ol.linenums>li {
+  list-style-type: decimal;
+  display: block;
+}
+
+.source-container {
+  border-bottom: 2px solid #AAAAAA;
+}
+
+.code-header {
+  background-color: #CCCCCC;
+  padding-left: 1em;
+  padding-right: 1em;
+  padding-top: 1ex;
+  padding-bottom: 1ex;
+  font-family: monospace;
+  user-select: none;
+}
+
+.main-source .code-header {
+  border-top: 2px solid #AAAAAA;
+  font-weight: bold;
+}
+
+.code-header .code-file-function {
+  font-family: monospace;
+  float: left;
+  user-select: text;
+}
+
+.code-header .code-mode {
+  float: right;
+  font-family: sans-serif;
+  font-size: small;
+}
+
+.info-container {
+  font-family: sans-serif;
+  font-size: small;
+}
+
+.info-topic {
+  border: 1px solid lightgray;
+  margin: 2px;
+}
+
+.info-topic-header {
+  background-color: lightgray;
+  padding: 1px;
+}
+
+.info-topic-content {
+  padding: 2px;
+}
+
+
+html,
+body {
+  margin: 0;
+  padding: 0;
+  overflow: hidden;
+  display: flex;
+  flex-direction: row;
+  width: 100vw;
+}
+
+p {
+  text-align: center;
+  overflow: overlay;
+  position: relative;
+}
+
+marker {
+  fill: #080808;
+}
+
+g rect {
+  fill: #F0F0F0;
+  stroke: #080808;
+  stroke-width: 2px;
+}
+
+g.dead {
+  opacity: .5;
+}
+
+g.unsorted rect {
+  opacity: 0.5;
+}
+
+div.scrollable {
+  overflow-y: auto;
+  overflow-x: hidden;
+}
+
+g.turbonode[relToHover="input"] rect {
+  stroke: #67e62c;
+  stroke-width: 16px;
+}
+
+g.turbonode[relToHover="output"] rect {
+  stroke: #d23b14;
+  stroke-width: 16px;
+}
+
+path[relToHover="input"] {
+  stroke: #67e62c;
+  stroke-width: 16px;
+}
+
+path[relToHover="output"] {
+  stroke: #d23b14;
+  stroke-width: 16px;
+}
+
+
+g.turbonode:hover rect {
+  stroke: #000000;
+  stroke-width: 7px;
+}
+
+g.control rect {
+  fill: #EFCC00;
+  stroke: #080808;
+  stroke-width: 5px;
+}
+
+g.javascript rect {
+  fill: #DD7E6B;
+}
+
+g.simplified rect {
+  fill: #3C78D8;
+}
+
+g.machine rect {
+  fill: #6AA84F;
+}
+
+g.input rect {
+  fill: #CFE2F3;
+}
+
+g.selected rect {
+  fill: #FFFF33;
+}
+
+circle.bubbleStyle {
+  fill: #080808;
+  fill-opacity: 0.0;
+  stroke: #080808;
+  stroke-width: 2px;
+}
+
+circle.bubbleStyle:hover {
+  stroke-width: 3px;
+}
+
+circle.filledBubbleStyle {
+  fill: #080808;
+  stroke: #080808;
+  stroke-width: 2px;
+}
+
+circle.filledBubbleStyle:hover {
+  fill: #080808;
+  stroke-width: 3px;
+}
+
+circle.halfFilledBubbleStyle {
+  fill: #808080;
+  stroke: #101010;
+  stroke-width: 2px;
+}
+
+circle.halfFilledBubbleStyle:hover {
+  fill: #808080;
+  stroke-width: 3px;
+}
+
+path {
+  fill: none;
+  stroke: #080808;
+  stroke-width: 4px;
+  cursor: default;
+}
+
+path:hover {
+  stroke-width: 6px;
+}
+
+path.hidden {
+  fill: none;
+  stroke-width: 0;
+}
+
+path.link.selected {
+  stroke: #FFFF33;
+}
+
+pre.prettyprint {
+  border: none !important;
+  padding: 0px;
+}
+
+li.L1,
+li.L3,
+li.L5,
+li.L7,
+li.L9 {
+  background: none !important
+}
+
+li.nolinenums {
+  list-style-type: none;
+}
+
+ul.noindent {
+  -webkit-padding-start: 0px;
+  -webkit-margin-before: 0px;
+  -webkit-margin-after: 0px;
+}
+
+input:hover,
+.show-hide-pane:hover input {
+  opacity: 1;
+  cursor: pointer;
+}
+
+.linkable-text {
+  text-decoration: underline;
+}
+
+.linkable-text:hover {
+  cursor: pointer;
+  font-weight: bold;
+}
+
+
+#left {
+  user-select: none;
+}
+
+#middle {
+  background-color: #F8F8F8;
+  user-select: none;
+  flex: 1;
+  z-index: 7;
+}
+
+#middle.display-inline-flex,
+#middle.display-inline-flex #multiview,
+#middle.display-inline-flex #ranges {
+  display: inline-flex;
+}
+
+.viewpane {
+  height: 100vh;
+  background-color: #FFFFFF;
+  display: flex;
+  flex-direction: column;
+}
+
+#show-hide-disassembly {
+  right: 0;
+}
+
+#show-hide-source {
+  left: 0;
+}
+
+#graph {
+  width: 100%;
+  height: 100%;
+}
+
+.graph-toolbox {
+  position: relative;
+  border-bottom: 2px solid #eee8d5;
+  z-index: 5;
+  background: rgba(100%, 100%, 100%);
+  box-sizing: border-box;
+  padding: 3px;
+  overflow-x: hidden;
+}
+
+.disassembly-toolbox {
+  position: relative;
+  padding-bottom: 3px;
+  z-index: 5;
+  background: rgba(100%, 100%, 100%, 0.7);
+  padding-top: 3px;
+  box-sizing: border-box;
+  margin-left: 4px;
+  margin-right: 4px;
+}
+
+#load-file {
+  position: absolute;
+  top: 0;
+  right: 0;
+  margin-top: 0.5em;
+  margin-right: 0.5em;
+  z-index: 20;
+  opacity: 0.7;
+}
+
+#load-file input {
+  background: #A0A0A0;
+  border-radius: 5px;
+  padding: 0.5em;
+}
+
+#upload-helper {
+  display: none;
+}
+
+.prof {
+  cursor: default;
+}
+
+tspan {
+  font-size: 500%;
+  font-family: sans-serif;
+}
+
+text {
+  dominant-baseline: text-before-edge;
+}
+
+.tab-content {
+  z-index: 6;
+}
+
+.resizer {
+  z-index: 10;
+  width: 10px;
+  height: 100vh;
+  background: #a0a0a0;
+  cursor: pointer;
+}
+
+.resizer:hover,
+.resizer.dragged {
+  background: orange;
+}
+
+.source-position {
+  /* border-left: 1px solid #FF3333; */
+  width: 0;
+  display: inline-block;
+}
+
+.source-position .inlining-marker {
+  content: "";
+  position: relative;
+  display: inline-block;
+  top: -0.5ex;
+  margin-left: -4px;
+  margin-right: -4px;
+  border-width: 5px;
+  border-style: solid;
+  border-color: #555 transparent transparent transparent;
+}
+
+.source-position .marker {
+  content: "";
+  display: inline-block;
+  bottom: -1ex;
+  width: 0px;
+  margin-left: -4px;
+  margin-right: -4px;
+  margin-bottom: -1ex;
+  border-width: 5px;
+  border-style: solid;
+  border-color: transparent transparent #555 transparent;
+}
+
+.source-position.selected .marker {
+  border-color: transparent transparent #F00 transparent;
+}
+
+.source-position .inlining-marker:hover {
+  border-color: transparent transparent #AA5 transparent;
+}
+
+.source-position .inlining-marker[data-descr]:hover::after {
+  content: attr(data-descr);
+  position: absolute;
+  font-size: 10px;
+  z-index: 1;
+  background-color: #555;
+  color: #fff;
+  text-align: center;
+  border-radius: 6px;
+  padding: 6px;
+  top: 6px;
+  left: 50%;
+  margin-left: -80px;
+}
+
+#sequence {
+  font-family: monospace;
+}
+
+#schedule {
+  font-family: monospace;
+}
+
+.schedule-block {
+  margin: 5px;
+  background-color: white;
+  padding-left: 5px;
+}
+
+.schedule-block .block-id {
+  display: inline-block;
+  font-size: large;
+  text-decoration: underline;
+  padding-left: 1ex;
+}
+
+.schedule-block .block-id:hover {
+  font-weight: bold;
+}
+
+.schedule-block>.block-id::before {
+  content: "Block B";
+}
+
+.schedule-block.deferred>.block-id::after {
+  content: " (deferred)";
+}
+
+.schedule-block .block-list {
+  display: inline-block;
+}
+
+.schedule-block .block-list * {
+  display: inline-block;
+}
+
+.schedule-block .block-list .block-id {
+  padding-left: 1ex;
+}
+
+.schedule-block .block-list .block-id:before {
+  content: "B";
+}
+
+.schedule-block .predecessor-list::before {
+  display: inline-block;
+  content: "  \2B05  ";
+  padding-left: 1ex;
+  padding-right: 1ex;
+}
+
+.schedule-block .successor-list::before {
+  display: inline-block;
+  content: "  \2B95  ";
+  padding-left: 1ex;
+  padding-right: 1ex;
+}
+
+.schedule-block .nodes .node * {
+  display: inline-block;
+}
+
+.schedule-block .nodes .node .node-id {
+  padding-right: 1ex;
+  min-width: 5ex;
+  text-align: right;
+}
+
+.schedule-block .nodes .node .node-id:after {
+  content: ":";
+}
+
+.schedule-block .nodes .node .node-label {
+  user-select: text;
+}
+
+.schedule-block .nodes .node .parameter-list:before {
+  content: "(";
+}
+
+.schedule-block .nodes .node .parameter-list:after {
+  content: ")";
+}
+
+.schedule-block .instr-marker {
+  padding-right: .5ex;
+  padding-left: .5ex;
+  min-width: 1em;
+  background: #EEEEEE;
+  /* display: none; */
+}
+
+.schedule-block>.instr-marker {
+  display: inline;
+}
+
+.instruction * {
+  padding-right: .5ex;
+}
+
+.instruction span {
+  padding-right: 0;
+}
+
+.phi-label,
+.instruction-id {
+  display: inline-block;
+  padding-right: .5ex;
+  padding-left: .5ex;
+  min-width: 1ex;
+  vertical-align: top;
+}
+
+.instruction-id:after {
+  content: ":";
+}
+
+.instruction-node,
+.gap,
+.instruction {
+  display: block;
+}
+
+.phi-contents,
+.instruction-contents,
+.gap *,
+.instruction * {
+  display: inline-block;
+}
+
+.phi * {
+  padding-right: 1ex;
+  display: inline-block;
+}
+
+.phi span {
+  padding-right: 0;
+}
+
+.gap .gap-move {
+  padding-left: .5ex;
+  padding-right: .5ex;
+}
+
+.gap>*:before {
+  content: "(";
+}
+
+.gap>*:after {
+  content: ")";
+}
+
+.virtual-reg {
+  outline: 1px dotted blue;
+}
+
+.parameter.constant {
+  outline: 1px dotted red;
+}
+
+.clickable:hover {
+  text-decoration: underline;
+}
+
+.clickable:hover {
+  font-weight: bold;
+}
+
+.comma-sep-list>* {
+  padding-right: 1ex;
+}
+
+.comma-sep-list>*:after {
+  content: ",";
+}
+
+.comma-sep-list>*:last-child:after {
+  content: "";
+}
+
+.comma-sep-list>*:last-child {
+  padding-right: 0ex;
+}
+
+.temps:before {
+  content: "temps: ";
+}
+
+.temps {
+  padding-left: .5ex;
+  outline: 1px dotted grey;
+}
+
+.last-tab {
+  display: none !important;
+}
+
+ul.disassembly-list .block-id {
+  width: 4ex;
+  display: block;
+  padding-top: 2px;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="gap"]+span+span {
+  background-color: #FAEEEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="arch"]+span+span {
+  background-color: #EEFFEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="condition"]+span+span {
+  background-color: #FFFFEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="gap"] {
+  background-color: #FAEEEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="arch"] {
+  background-color: #EEFFEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="condition"] {
+  background-color: #FFFFEE;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="deopt-check"] {
+  background-color: #FAEEFA;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="init-poison"] {
+  background-color: #EEFFAA;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="pools"] {
+  background-color: #6AA84F;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="code-start-register"] {
+  background-color: #FFCCCC;
+}
+
+div.highlight-gap-instructions [data-instruction-kind="deoptimization-exits"] {
+  background-color: #CCCCFF;
+}
+
+[data-instruction-kind].selected {
+  background-color: yellow;
+}
+
+div.highlight-gap-instructions [data-instruction-kind].selected {
+  background-color: yellow;
+}
diff --git a/src/third_party/v8/tools/turbolizer/turbolizer.png b/src/third_party/v8/tools/turbolizer/turbolizer.png
new file mode 100644
index 0000000..1af1a49
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/turbolizer.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/up-arrow.png b/src/third_party/v8/tools/turbolizer/up-arrow.png
new file mode 100644
index 0000000..68cb14e
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/up-arrow.png
Binary files differ
diff --git a/src/third_party/v8/tools/turbolizer/upload-icon.png b/src/third_party/v8/tools/turbolizer/upload-icon.png
new file mode 100644
index 0000000..c1a289b
--- /dev/null
+++ b/src/third_party/v8/tools/turbolizer/upload-icon.png
Binary files differ
diff --git a/src/third_party/v8/tools/ubsan/blacklist.txt b/src/third_party/v8/tools/ubsan/blacklist.txt
new file mode 100644
index 0000000..ea4e79b
--- /dev/null
+++ b/src/third_party/v8/tools/ubsan/blacklist.txt
@@ -0,0 +1,17 @@
+#############################################################################
+# UBSan blacklist.
+
+# Bug 8735: PropertyCallbackInfo<void> vs PropertyCallbackInfo<T>.
+fun:*v8*internal*PropertyCallbackArguments*CallAccessorSetter*
+fun:*v8*internal*PropertyCallbackArguments*BasicCallNamedGetterCallback*
+fun:*v8*internal*InvokeAccessorGetterCallback*
+
+# Bug 8735: WeakCallbackInfo<void> vs. WeakCallbackInfo<T>.
+fun:*v8*internal*GlobalHandles*PendingPhantomCallback*Invoke*
+fun:*v8*internal*GlobalHandles*Node*PostGarbageCollectionProcessing*
+
+# Simulators casting C++ functions to a generic signature.
+fun:*v8*internal*UnsafeDirectApiCall*
+fun:*v8*internal*UnsafeDirectGetterCall*
+fun:*v8*internal*UnsafeGenericFunctionCall*
+fun:*v8*internal*UnsafeProfilingApiCall*
diff --git a/src/third_party/v8/tools/ubsan/vptr_blacklist.txt b/src/third_party/v8/tools/ubsan/vptr_blacklist.txt
new file mode 100644
index 0000000..ccad5b1
--- /dev/null
+++ b/src/third_party/v8/tools/ubsan/vptr_blacklist.txt
@@ -0,0 +1,12 @@
+#############################################################################
+# UBSan vptr blacklist.
+# Function and type based blacklisting use a mangled name, and it is especially
+# tricky to represent C++ types. For now, any possible changes by name manglings
+# are simply represented as wildcard expressions of regexp, and thus it might be
+# over-blacklisted.
+
+#############################################################################
+# UBsan goes into an infinite recursion when __dynamic_cast instrumented with
+# "vptr". See crbug.com/609786.
+
+src:*/third_party/libc\+\+abi/trunk/src/private_typeinfo.cpp
diff --git a/src/third_party/v8/tools/unittests/__init__.py b/src/third_party/v8/tools/unittests/__init__.py
new file mode 100644
index 0000000..3841a86
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/__init__.py
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/src/third_party/v8/tools/unittests/compare_torque_output_test.py b/src/third_party/v8/tools/unittests/compare_torque_output_test.py
new file mode 100644
index 0000000..a6086d9
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/compare_torque_output_test.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import unittest
+
+TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+COMPARE_SCRIPT = os.path.join(TOOLS_DIR, 'compare_torque_output.py')
+TEST_DATA = os.path.join(TOOLS_DIR, 'unittests', 'testdata', 'compare_torque')
+
+
+class PredictableTest(unittest.TestCase):
+  def setUp(self):
+    fd, self.tmp_file  = tempfile.mkstemp()
+    os.close(fd)
+
+  def _compare_from(self, test_folder):
+    file1 = os.path.join(TEST_DATA, test_folder, 'f1')
+    file2 = os.path.join(TEST_DATA, test_folder, 'f2')
+    proc = subprocess.Popen([
+          'python', '-u',
+          COMPARE_SCRIPT, file1, file2, self.tmp_file
+        ], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    _, err = proc.communicate()
+    return proc.returncode, err
+
+  def test_content_diff(self):
+    exitcode, output = self._compare_from('test1')
+    self.assertEqual(1, exitcode)
+    full_match = r'^Found.*-line 2\+line 2 with diff.*\+line 3\n\n$'
+    self.assertRegexpMatches(output, re.compile(full_match, re.M | re.S))
+
+  def test_no_diff(self):
+    exitcode, output = self._compare_from('test2')
+    self.assertEqual(0, exitcode)
+    self.assertFalse(output)
+
+  def test_right_only(self):
+    exitcode, output = self._compare_from('test3')
+    self.assertEqual(1, exitcode)
+    self.assertRegexpMatches(output, r'Some files exist only in.*f2\nfile3')
+
+  def test_left_only(self):
+    exitcode, output = self._compare_from('test4')
+    self.assertEqual(1, exitcode)
+    self.assertRegexpMatches(output, r'Some files exist only in.*f1\nfile4')
+
+  def tearDown(self):
+    os.unlink(self.tmp_file)
diff --git a/src/third_party/v8/tools/unittests/predictable_wrapper_test.py b/src/third_party/v8/tools/unittests/predictable_wrapper_test.py
new file mode 100755
index 0000000..c085fb8
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/predictable_wrapper_test.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+import tempfile
+import unittest
+
+TOOLS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+PREDICTABLE_WRAPPER = os.path.join(
+    TOOLS_DIR, 'predictable_wrapper.py')
+
+PREDICTABLE_MOCKED = os.path.join(
+    TOOLS_DIR, 'unittests', 'testdata', 'predictable_mocked.py')
+
+def call_wrapper(mode):
+  """Call the predictable wrapper under test with a mocked file to test.
+
+  Instead of d8, we use python and a python mock script. This mock script is
+  expecting two arguments, mode (one of 'equal', 'differ' or 'missing') and
+  a path to a temporary file for simulating non-determinism.
+  """
+  fd, state_file = tempfile.mkstemp()
+  os.close(fd)
+  try:
+    args = [
+      sys.executable,
+      PREDICTABLE_WRAPPER,
+      sys.executable,
+      PREDICTABLE_MOCKED,
+      mode,
+      state_file,
+    ]
+    proc = subprocess.Popen(args, stdout=subprocess.PIPE)
+    proc.communicate()
+    return proc.returncode
+  finally:
+    os.unlink(state_file)
+
+
+class PredictableTest(unittest.TestCase):
+  def testEqualAllocationOutput(self):
+    self.assertEqual(0, call_wrapper('equal'))
+
+  def testNoAllocationOutput(self):
+    self.assertEqual(2, call_wrapper('missing'))
+
+  def testDifferentAllocationOutput(self):
+    self.assertEqual(3, call_wrapper('differ'))
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/third_party/v8/tools/unittests/run_perf_test.py b/src/third_party/v8/tools/unittests/run_perf_test.py
new file mode 100755
index 0000000..28f71b2
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/run_perf_test.py
@@ -0,0 +1,648 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from collections import namedtuple
+import json
+import os
+import platform
+import shutil
+import subprocess
+import sys
+import tempfile
+import unittest
+
+import coverage
+import mock
+
+# Requires python-coverage and python-mock. Native python coverage
+# version >= 3.7.1 should be installed to get the best speed.
+
+BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+RUN_PERF = os.path.join(BASE_DIR, 'run_perf.py')
+TEST_DATA = os.path.join(BASE_DIR, 'unittests', 'testdata')
+
+TEST_WORKSPACE = os.path.join(tempfile.gettempdir(), 'test-v8-run-perf')
+
+V8_JSON = {
+  'path': ['.'],
+  'owners': ['username@chromium.org'],
+  'binary': 'd7',
+  'timeout': 60,
+  'flags': ['--flag'],
+  'main': 'run.js',
+  'run_count': 1,
+  'results_regexp': '^%s: (.+)$',
+  'tests': [
+    {'name': 'Richards'},
+    {'name': 'DeltaBlue'},
+  ]
+}
+
+V8_NESTED_SUITES_JSON = {
+  'path': ['.'],
+  'owners': ['username@chromium.org'],
+  'flags': ['--flag'],
+  'run_count': 1,
+  'units': 'score',
+  'tests': [
+    {'name': 'Richards',
+     'path': ['richards'],
+     'binary': 'd7',
+     'main': 'run.js',
+     'resources': ['file1.js', 'file2.js'],
+     'run_count': 2,
+     'results_regexp': '^Richards: (.+)$'},
+    {'name': 'Sub',
+     'path': ['sub'],
+     'tests': [
+       {'name': 'Leaf',
+        'path': ['leaf'],
+        'run_count_x64': 3,
+        'units': 'ms',
+        'main': 'run.js',
+        'results_regexp': '^Simple: (.+) ms.$'},
+     ]
+    },
+    {'name': 'DeltaBlue',
+     'path': ['delta_blue'],
+     'main': 'run.js',
+     'flags': ['--flag2'],
+     'results_regexp': '^DeltaBlue: (.+)$'},
+    {'name': 'ShouldntRun',
+     'path': ['.'],
+     'archs': ['arm'],
+     'main': 'run.js'},
+  ]
+}
+
+V8_GENERIC_JSON = {
+  'path': ['.'],
+  'owners': ['username@chromium.org'],
+  'binary': 'cc',
+  'flags': ['--flag'],
+  'generic': True,
+  'run_count': 1,
+  'units': 'ms',
+}
+
+
+class UnitTest(unittest.TestCase):
+  @classmethod
+  def setUpClass(cls):
+    sys.path.insert(0, BASE_DIR)
+    import run_perf
+    global run_perf
+
+  def testBuildDirectory(self):
+    base_path = os.path.join(TEST_DATA, 'builddirs', 'dir1', 'out')
+    expected_path = os.path.join(base_path, 'build')
+    self.assertEquals(
+        expected_path, run_perf.find_build_directory(base_path, 'x64'))
+
+
+class PerfTest(unittest.TestCase):
+  @classmethod
+  def setUpClass(cls):
+    sys.path.insert(0, BASE_DIR)
+    cls._cov = coverage.coverage(
+        include=([os.path.join(BASE_DIR, 'run_perf.py')]))
+    cls._cov.start()
+    import run_perf
+    from testrunner.local import command
+    from testrunner.objects.output import Output, NULL_OUTPUT
+    global command, run_perf, Output, NULL_OUTPUT
+
+  @classmethod
+  def tearDownClass(cls):
+    cls._cov.stop()
+    print('')
+    print(cls._cov.report())
+
+  def setUp(self):
+    self.maxDiff = None
+    if os.path.exists(TEST_WORKSPACE):
+      shutil.rmtree(TEST_WORKSPACE)
+    os.makedirs(TEST_WORKSPACE)
+
+  def tearDown(self):
+    mock.patch.stopall()
+    if os.path.exists(TEST_WORKSPACE):
+      shutil.rmtree(TEST_WORKSPACE)
+
+  def _WriteTestInput(self, json_content):
+    self._test_input = os.path.join(TEST_WORKSPACE, 'test.json')
+    with open(self._test_input, 'w') as f:
+      f.write(json.dumps(json_content))
+
+  def _MockCommand(self, *args, **kwargs):
+    on_bots = kwargs.pop('on_bots', False)
+    # Fake output for each test run.
+    test_outputs = [Output(stdout=arg,
+                           timed_out=kwargs.get('timed_out', False),
+                           exit_code=kwargs.get('exit_code', 0),
+                           duration=42)
+                    for arg in args[1]]
+    def create_cmd(*args, **kwargs):
+      cmd = mock.MagicMock()
+      def execute(*args, **kwargs):
+        return test_outputs.pop()
+      cmd.execute = mock.MagicMock(side_effect=execute)
+      return cmd
+
+    mock.patch.object(
+        run_perf.command, 'PosixCommand',
+        mock.MagicMock(side_effect=create_cmd)).start()
+
+    build_dir = 'Release' if on_bots else 'x64.release'
+    out_dirs = ['out', 'out-secondary']
+    return_values = [
+      os.path.join(os.path.dirname(BASE_DIR), out, build_dir)
+      for out in out_dirs
+    ]
+    mock.patch.object(
+        run_perf, 'find_build_directory',
+        mock.MagicMock(side_effect=return_values)).start()
+
+    # Check that d8 is called from the correct cwd for each test run.
+    dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]]
+    def chdir(*args, **kwargs):
+      self.assertEqual(dirs.pop(), args[0])
+    os.chdir = mock.MagicMock(side_effect=chdir)
+
+    subprocess.check_call = mock.MagicMock()
+    platform.system = mock.MagicMock(return_value='Linux')
+
+  def _CallMain(self, *args):
+    self._test_output = os.path.join(TEST_WORKSPACE, 'results.json')
+    all_args=[
+      '--json-test-results',
+      self._test_output,
+      self._test_input,
+    ]
+    all_args += args
+    return run_perf.Main(all_args)
+
+  def _LoadResults(self, file_name=None):
+    with open(file_name or self._test_output) as f:
+      return json.load(f)
+
+  def _VerifyResults(self, suite, units, traces, file_name=None):
+    self.assertListEqual(sorted([
+      {'units': units,
+       'graphs': [suite, trace['name']],
+       'results': trace['results'],
+       'stddev': trace['stddev']} for trace in traces]),
+      sorted(self._LoadResults(file_name)['traces']))
+
+  def _VerifyRunnableDurations(self, runs, timeout, file_name=None):
+    self.assertListEqual([
+      {
+        'graphs': ['test'],
+        'durations': [42] * runs,
+        'timeout': timeout,
+      },
+    ], self._LoadResults(file_name)['runnables'])
+
+  def _VerifyErrors(self, errors):
+    self.assertListEqual(errors, self._LoadResults()['errors'])
+
+  def _VerifyMock(self, binary, *args, **kwargs):
+    shell = os.path.join(os.path.dirname(BASE_DIR), binary)
+    command.Command.assert_called_with(
+        cmd_prefix=[],
+        shell=shell,
+        args=list(args),
+        timeout=kwargs.get('timeout', 60),
+        handle_sigterm=True)
+
+  def _VerifyMockMultiple(self, *args, **kwargs):
+    self.assertEqual(len(args), len(command.Command.call_args_list))
+    for arg, actual in zip(args, command.Command.call_args_list):
+      expected = {
+        'cmd_prefix': [],
+        'shell': os.path.join(os.path.dirname(BASE_DIR), arg[0]),
+        'args': list(arg[1:]),
+        'timeout': kwargs.get('timeout', 60),
+        'handle_sigterm': True,
+      }
+      self.assertTupleEqual((expected, ), actual)
+
+  def testOneRun(self):
+    self._WriteTestInput(V8_JSON)
+    self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+    ])
+    self._VerifyRunnableDurations(1, 60)
+    self._VerifyErrors([])
+    self._VerifyMock(
+        os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testOneRunWithTestFlags(self):
+    test_input = dict(V8_JSON)
+    test_input['test_flags'] = ['2', 'test_name']
+    self._WriteTestInput(test_input)
+    self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567'])
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join(
+      'out', 'x64.release', 'd7'), '--flag', 'run.js', '--', '2', 'test_name')
+
+  def testTwoRuns_Units_SuiteName(self):
+    test_input = dict(V8_JSON)
+    test_input['run_count'] = 2
+    test_input['name'] = 'v8'
+    test_input['units'] = 'ms'
+    self._WriteTestInput(test_input)
+    self._MockCommand(['.', '.'],
+                      ['Richards: 100\nDeltaBlue: 200\n',
+                       'Richards: 50\nDeltaBlue: 300\n'])
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('v8', 'ms', [
+      {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [300.0, 200.0], 'stddev': ''},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join(
+      'out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testTwoRuns_SubRegexp(self):
+    test_input = dict(V8_JSON)
+    test_input['run_count'] = 2
+    del test_input['results_regexp']
+    test_input['tests'][0]['results_regexp'] = '^Richards: (.+)$'
+    test_input['tests'][1]['results_regexp'] = '^DeltaBlue: (.+)$'
+    self._WriteTestInput(test_input)
+    self._MockCommand(['.', '.'],
+                      ['Richards: 100\nDeltaBlue: 200\n',
+                       'Richards: 50\nDeltaBlue: 300\n'])
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [300.0, 200.0], 'stddev': ''},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join(
+      'out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testPerfectConfidenceRuns(self):
+    self._WriteTestInput(V8_JSON)
+    self._MockCommand(
+        ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'] * 10)
+    self.assertEqual(0, self._CallMain('--confidence-level', '1'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234] * 10, 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0] * 10, 'stddev': ''},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join(
+      'out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testNoisyConfidenceRuns(self):
+    self._WriteTestInput(V8_JSON)
+    self._MockCommand(
+        ['.'],
+        reversed([
+          # First 10 runs are mandatory. DeltaBlue is slightly noisy.
+          'x\nRichards: 1.234\nDeltaBlue: 10757567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10557567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          # Need 4 more runs for confidence in DeltaBlue results.
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+        ]),
+    )
+    self.assertEqual(0, self._CallMain('--confidence-level', '1'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234] * 14, 'stddev': ''},
+      {
+        'name': 'DeltaBlue',
+        'results': [10757567.0, 10557567.0] + [10657567.0] * 12,
+        'stddev': '',
+      },
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join(
+      'out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testNestedSuite(self):
+    self._WriteTestInput(V8_NESTED_SUITES_JSON)
+    self._MockCommand(['delta_blue', 'sub/leaf', 'richards'],
+                      ['DeltaBlue: 200\n',
+                       'Simple: 1 ms.\n',
+                       'Simple: 2 ms.\n',
+                       'Simple: 3 ms.\n',
+                       'Richards: 100\n',
+                       'Richards: 50\n'])
+    self.assertEqual(0, self._CallMain())
+    self.assertListEqual(sorted([
+      {'units': 'score',
+       'graphs': ['test', 'Richards'],
+       'results': [50.0, 100.0],
+       'stddev': ''},
+      {'units': 'ms',
+       'graphs': ['test', 'Sub', 'Leaf'],
+       'results': [3.0, 2.0, 1.0],
+       'stddev': ''},
+      {'units': 'score',
+       'graphs': ['test', 'DeltaBlue'],
+       'results': [200.0],
+       'stddev': ''},
+      ]), sorted(self._LoadResults()['traces']))
+    self._VerifyErrors([])
+    self._VerifyMockMultiple(
+        (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd8'),
+         '--flag', '--flag2', 'run.js'))
+
+  def testOneRunStdDevRegExp(self):
+    test_input = dict(V8_JSON)
+    test_input['stddev_regexp'] = '^%s\-stddev: (.+)$'
+    self._WriteTestInput(test_input)
+    self._MockCommand(['.'], ['Richards: 1.234\nRichards-stddev: 0.23\n'
+                              'DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n'])
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': '0.23'},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': '106'},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(
+        os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testTwoRunsStdDevRegExp(self):
+    test_input = dict(V8_JSON)
+    test_input['stddev_regexp'] = '^%s\-stddev: (.+)$'
+    test_input['run_count'] = 2
+    self._WriteTestInput(test_input)
+    self._MockCommand(['.'], ['Richards: 3\nRichards-stddev: 0.7\n'
+                              'DeltaBlue: 6\nDeltaBlue-boom: 0.9\n',
+                              'Richards: 2\nRichards-stddev: 0.5\n'
+                              'DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n'])
+    self.assertEqual(1, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [2.0, 3.0], 'stddev': '0.7'},
+      {'name': 'DeltaBlue', 'results': [5.0, 6.0], 'stddev': '0.8'},
+    ])
+    self._VerifyErrors(
+        ['Test test/Richards should only run once since a stddev is provided '
+         'by the test.',
+         'Test test/DeltaBlue should only run once since a stddev is provided '
+         'by the test.',
+         'Regexp "^DeltaBlue\-stddev: (.+)$" did not match for test '
+         'test/DeltaBlue.'])
+    self._VerifyMock(
+        os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testBuildbot(self):
+    self._WriteTestInput(V8_JSON)
+    self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'],
+                      on_bots=True)
+    mock.patch.object(
+        run_perf.Platform, 'ReadBuildConfig',
+        mock.MagicMock(return_value={'is_android': False})).start()
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
+
+  def testBuildbotWithTotal(self):
+    test_input = dict(V8_JSON)
+    test_input['total'] = True
+    self._WriteTestInput(test_input)
+    self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'],
+                      on_bots=True)
+    mock.patch.object(
+        run_perf.Platform, 'ReadBuildConfig',
+        mock.MagicMock(return_value={'is_android': False})).start()
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+      {'name': 'Total', 'results': [3626.491097190233], 'stddev': ''},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
+
+  def testBuildbotWithTotalAndErrors(self):
+    test_input = dict(V8_JSON)
+    test_input['total'] = True
+    self._WriteTestInput(test_input)
+    self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n'],
+                      on_bots=True)
+    mock.patch.object(
+        run_perf.Platform, 'ReadBuildConfig',
+        mock.MagicMock(return_value={'is_android': False})).start()
+    self.assertEqual(1, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+    ])
+    self._VerifyErrors(
+        ['Regexp "^Richards: (.+)$" '
+         'returned a non-numeric for test test/Richards.',
+         'Not all traces have produced results. Can not compute total for '
+         'test.'])
+    self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
+
+  def testRegexpNoMatch(self):
+    self._WriteTestInput(V8_JSON)
+    self._MockCommand(['.'], ['x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n'])
+    self.assertEqual(1, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+    ])
+    self._VerifyErrors(
+        ['Regexp "^Richards: (.+)$" did not match for test test/Richards.'])
+    self._VerifyMock(
+        os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testOneRunCrashed(self):
+    test_input = dict(V8_JSON)
+    test_input['retry_count'] = 1
+    self._WriteTestInput(test_input)
+    self._MockCommand(
+        ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', ''],
+        exit_code=-1)
+    self.assertEqual(1, self._CallMain())
+    self._VerifyResults('test', 'score', [])
+    self._VerifyErrors([])
+    self._VerifyMock(
+        os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testOneRunTimingOut(self):
+    test_input = dict(V8_JSON)
+    test_input['timeout'] = 70
+    test_input['retry_count'] = 0
+    self._WriteTestInput(test_input)
+    self._MockCommand(['.'], [''], timed_out=True)
+    self.assertEqual(1, self._CallMain())
+    self._VerifyResults('test', 'score', [])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
+                     '--flag', 'run.js', timeout=70)
+
+  def testAndroid(self):
+    self._WriteTestInput(V8_JSON)
+    mock.patch('run_perf.AndroidPlatform.PreExecution').start()
+    mock.patch('run_perf.AndroidPlatform.PostExecution').start()
+    mock.patch('run_perf.AndroidPlatform.PreTests').start()
+    mock.patch('run_perf.find_build_directory').start()
+    mock.patch(
+        'run_perf.AndroidPlatform.Run',
+        return_value=(Output(stdout='Richards: 1.234\nDeltaBlue: 10657567\n'),
+                      NULL_OUTPUT)).start()
+    mock.patch('testrunner.local.android._Driver', autospec=True).start()
+    mock.patch(
+        'run_perf.Platform.ReadBuildConfig',
+        return_value={'is_android': True}).start()
+    self.assertEqual(0, self._CallMain('--arch', 'arm'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+    ])
+
+  def testTwoRuns_Trybot(self):
+    test_input = dict(V8_JSON)
+    test_input['run_count'] = 2
+    self._WriteTestInput(test_input)
+    self._MockCommand(['.', '.', '.', '.'],
+                      ['Richards: 100\nDeltaBlue: 200\n',
+                       'Richards: 200\nDeltaBlue: 20\n',
+                       'Richards: 50\nDeltaBlue: 200\n',
+                       'Richards: 100\nDeltaBlue: 20\n'])
+    test_output_secondary = os.path.join(
+        TEST_WORKSPACE, 'results_secondary.json')
+    self.assertEqual(0, self._CallMain(
+        '--outdir-secondary', 'out-secondary',
+        '--json-test-results-secondary', test_output_secondary,
+    ))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [100.0, 200.0], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [20.0, 20.0], 'stddev': ''},
+    ])
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [200.0, 200.0], 'stddev': ''},
+    ], test_output_secondary)
+    self._VerifyRunnableDurations(2, 60, test_output_secondary)
+    self._VerifyErrors([])
+    self._VerifyMockMultiple(
+        (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+        (os.path.join('out-secondary', 'x64.release', 'd7'),
+         '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+        (os.path.join('out-secondary', 'x64.release', 'd7'),
+         '--flag', 'run.js'),
+    )
+
+  def testWrongBinaryWithProf(self):
+    test_input = dict(V8_JSON)
+    self._WriteTestInput(test_input)
+    self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
+    self.assertEqual(0, self._CallMain('--extra-flags=--prof'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
+                     '--flag', '--prof', 'run.js')
+
+  #############################################################################
+  ### System tests
+
+  def _RunPerf(self, mocked_d8, test_json):
+    output_json = os.path.join(TEST_WORKSPACE, 'output.json')
+    args = [
+      os.sys.executable, RUN_PERF,
+      '--binary-override-path', os.path.join(TEST_DATA, mocked_d8),
+      '--json-test-results', output_json,
+      os.path.join(TEST_DATA, test_json),
+    ]
+    subprocess.check_output(args)
+    return self._LoadResults(output_json)
+
+  def testNormal(self):
+    results = self._RunPerf('d8_mocked1.py', 'test1.json')
+    self.assertListEqual([], results['errors'])
+    self.assertListEqual(sorted([
+      {
+        'units': 'score',
+        'graphs': ['test1', 'Richards'],
+        'results': [1.2, 1.2],
+        'stddev': '',
+      },
+      {
+        'units': 'score',
+        'graphs': ['test1', 'DeltaBlue'],
+        'results': [2.1, 2.1],
+        'stddev': '',
+      },
+    ]), sorted(results['traces']))
+
+  def testResultsProcessor(self):
+    results = self._RunPerf('d8_mocked2.py', 'test2.json')
+    self.assertListEqual([], results['errors'])
+    self.assertListEqual([
+      {
+        'units': 'score',
+        'graphs': ['test2', 'Richards'],
+        'results': [1.2, 1.2],
+        'stddev': '',
+      },
+      {
+        'units': 'score',
+        'graphs': ['test2', 'DeltaBlue'],
+        'results': [2.1, 2.1],
+        'stddev': '',
+      },
+    ], results['traces'])
+
+  def testResultsProcessorNested(self):
+    results = self._RunPerf('d8_mocked2.py', 'test3.json')
+    self.assertListEqual([], results['errors'])
+    self.assertListEqual([
+      {
+        'units': 'score',
+        'graphs': ['test3', 'Octane', 'Richards'],
+        'results': [1.2],
+        'stddev': '',
+      },
+      {
+        'units': 'score',
+        'graphs': ['test3', 'Octane', 'DeltaBlue'],
+        'results': [2.1],
+        'stddev': '',
+      },
+    ], results['traces'])
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/third_party/v8/tools/unittests/run_tests_test.py b/src/third_party/v8/tools/unittests/run_tests_test.py
new file mode 100755
index 0000000..4cd2bde
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/run_tests_test.py
@@ -0,0 +1,657 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Global system tests for V8 test runners and fuzzers.
+
+This hooks up the framework under tools/testrunner testing high-level scenarios
+with different test suite extensions and build configurations.
+"""
+
+# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
+# independent.
+# TODO(machenbach): Move coverage recording to a global test entry point to
+# include other unittest suites in the coverage report.
+# TODO(machenbach): Coverage data from multiprocessing doesn't work.
+# TODO(majeski): Add some tests for the fuzzers.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import collections
+import contextlib
+import json
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import unittest
+
+from cStringIO import StringIO
+
+TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
+RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')
+
+Result = collections.namedtuple(
+    'Result', ['stdout', 'stderr', 'returncode'])
+
+Result.__str__ = lambda self: (
+    '\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
+    (self.returncode, self.stdout, self.stderr))
+
+
+@contextlib.contextmanager
+def temp_dir():
+  """Wrapper making a temporary directory available."""
+  path = None
+  try:
+    path = tempfile.mkdtemp('v8_test_')
+    yield path
+  finally:
+    if path:
+      shutil.rmtree(path)
+
+
+@contextlib.contextmanager
+def temp_base(baseroot='testroot1'):
+  """Wrapper that sets up a temporary V8 test root.
+
+  Args:
+    baseroot: The folder with the test root blueprint. Relevant files will be
+        copied to the temporary test root, to guarantee a fresh setup with no
+        dirty state.
+  """
+  basedir = os.path.join(TEST_DATA_ROOT, baseroot)
+  with temp_dir() as tempbase:
+    builddir = os.path.join(tempbase, 'out', 'build')
+    testroot = os.path.join(tempbase, 'test')
+    os.makedirs(builddir)
+    shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
+    shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
+
+    for suite in os.listdir(os.path.join(basedir, 'test')):
+      os.makedirs(os.path.join(testroot, suite))
+      for entry in os.listdir(os.path.join(basedir, 'test', suite)):
+        shutil.copy(
+            os.path.join(basedir, 'test', suite, entry),
+            os.path.join(testroot, suite))
+    yield tempbase
+
+
+@contextlib.contextmanager
+def capture():
+  """Wrapper that replaces system stdout/stderr an provides the streams."""
+  oldout = sys.stdout
+  olderr = sys.stderr
+  try:
+    stdout=StringIO()
+    stderr=StringIO()
+    sys.stdout = stdout
+    sys.stderr = stderr
+    yield stdout, stderr
+  finally:
+    sys.stdout = oldout
+    sys.stderr = olderr
+
+
+def run_tests(basedir, *args, **kwargs):
+  """Executes the test runner with captured output."""
+  with capture() as (stdout, stderr):
+    sys_args = ['--command-prefix', sys.executable] + list(args)
+    if kwargs.get('infra_staging', False):
+      sys_args.append('--infra-staging')
+    else:
+      sys_args.append('--no-infra-staging')
+    code = standard_runner.StandardTestRunner(basedir=basedir).execute(sys_args)
+    return Result(stdout.getvalue(), stderr.getvalue(), code)
+
+
+def override_build_config(basedir, **kwargs):
+  """Override the build config with new values provided as kwargs."""
+  path = os.path.join(basedir, 'out', 'build', 'v8_build_config.json')
+  with open(path) as f:
+    config = json.load(f)
+    config.update(kwargs)
+  with open(path, 'w') as f:
+    json.dump(config, f)
+
+
+class SystemTest(unittest.TestCase):
+  @classmethod
+  def setUpClass(cls):
+    # Try to set up python coverage and run without it if not available.
+    cls._cov = None
+    try:
+      import coverage
+      if int(coverage.__version__.split('.')[0]) < 4:
+        cls._cov = None
+        print('Python coverage version >= 4 required.')
+        raise ImportError()
+      cls._cov = coverage.Coverage(
+          source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
+          omit=['*unittest*', '*__init__.py'],
+      )
+      cls._cov.exclude('raise NotImplementedError')
+      cls._cov.exclude('if __name__ == .__main__.:')
+      cls._cov.exclude('except TestRunnerError:')
+      cls._cov.exclude('except KeyboardInterrupt:')
+      cls._cov.exclude('if options.verbose:')
+      cls._cov.exclude('if verbose:')
+      cls._cov.exclude('pass')
+      cls._cov.exclude('assert False')
+      cls._cov.start()
+    except ImportError:
+      print('Running without python coverage.')
+    sys.path.append(TOOLS_ROOT)
+    global standard_runner
+    from testrunner import standard_runner
+    global num_fuzzer
+    from testrunner import num_fuzzer
+    from testrunner.local import command
+    from testrunner.local import pool
+    command.setup_testing()
+    pool.setup_testing()
+
+  @classmethod
+  def tearDownClass(cls):
+    if cls._cov:
+      cls._cov.stop()
+      print('')
+      print(cls._cov.report(show_missing=True))
+
+  def testPass(self):
+    """Test running only passing tests in two variants.
+
+    Also test printing durations.
+    """
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default,stress',
+          '--time',
+          'sweet/bananas',
+          'sweet/raspberries',
+      )
+      self.assertIn('sweet/bananas default: pass', result.stdout, result)
+      # TODO(majeski): Implement for test processors
+      # self.assertIn('Total time:', result.stderr, result)
+      # self.assertIn('sweet/bananas', result.stderr, result)
+      self.assertEqual(0, result.returncode, result)
+
+  def testShardedProc(self):
+    with temp_base() as basedir:
+      for shard in [1, 2]:
+        result = run_tests(
+            basedir,
+            '--progress=verbose',
+            '--variants=default,stress',
+            '--shard-count=2',
+            '--shard-run=%d' % shard,
+            'sweet/blackberries',
+            'sweet/raspberries',
+            infra_staging=False,
+        )
+        # One of the shards gets one variant of each test.
+        self.assertIn('2 tests ran', result.stdout, result)
+        if shard == 1:
+          self.assertIn('sweet/raspberries default', result.stdout, result)
+          self.assertIn('sweet/raspberries stress', result.stdout, result)
+          self.assertEqual(0, result.returncode, result)
+        else:
+          self.assertIn(
+            'sweet/blackberries default: FAIL', result.stdout, result)
+          self.assertIn(
+            'sweet/blackberries stress: FAIL', result.stdout, result)
+          self.assertEqual(1, result.returncode, result)
+
+  @unittest.skip("incompatible with test processors")
+  def testSharded(self):
+    """Test running a particular shard."""
+    with temp_base() as basedir:
+      for shard in [1, 2]:
+        result = run_tests(
+            basedir,
+            '--progress=verbose',
+            '--variants=default,stress',
+            '--shard-count=2',
+            '--shard-run=%d' % shard,
+            'sweet/bananas',
+            'sweet/raspberries',
+        )
+        # One of the shards gets one variant of each test.
+        self.assertIn('Running 2 tests', result.stdout, result)
+        self.assertIn('sweet/bananas', result.stdout, result)
+        self.assertIn('sweet/raspberries', result.stdout, result)
+        self.assertEqual(0, result.returncode, result)
+
+  def testFail(self):
+    """Test running only failing tests in two variants."""
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default,stress',
+          'sweet/strawberries',
+          infra_staging=False,
+      )
+      self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
+      self.assertEqual(1, result.returncode, result)
+
+  def check_cleaned_json_output(
+      self, expected_results_name, actual_json, basedir):
+    # Check relevant properties of the json output.
+    with open(actual_json) as f:
+      json_output = json.load(f)
+
+    # Replace duration in actual output as it's non-deterministic. Also
+    # replace the python executable prefix as it has a different absolute
+    # path dependent on where this runs.
+    def replace_variable_data(data):
+      data['duration'] = 1
+      data['command'] = ' '.join(
+          ['/usr/bin/python'] + data['command'].split()[1:])
+      data['command'] = data['command'].replace(basedir + '/', '')
+    for data in json_output['slowest_tests']:
+      replace_variable_data(data)
+    for data in json_output['results']:
+      replace_variable_data(data)
+    json_output['duration_mean'] = 1
+    # We need lexicographic sorting here to avoid non-deterministic behaviour
+    # The original sorting key is duration, but in our fake test we have
+    # non-deterministic durations before we reset them to 1
+    json_output['slowest_tests'].sort(key= lambda x: str(x))
+
+    with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
+      expected_test_results = json.load(f)
+
+    pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
+    msg = None  # Set to pretty_json for bootstrapping.
+    self.assertDictEqual(json_output, expected_test_results, msg)
+
+  def testFailWithRerunAndJSON(self):
+    """Test re-running a failing test and output to json."""
+    with temp_base() as basedir:
+      json_path = os.path.join(basedir, 'out.json')
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default',
+          '--rerun-failures-count=2',
+          '--random-seed=123',
+          '--json-test-results', json_path,
+          'sweet/strawberries',
+          infra_staging=False,
+      )
+      self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
+      # With test processors we don't count reruns as separated failures.
+      # TODO(majeski): fix it?
+      self.assertIn('1 tests failed', result.stdout, result)
+      self.assertEqual(0, result.returncode, result)
+
+      # TODO(majeski): Previously we only reported the variant flags in the
+      # flags field of the test result.
+      # After recent changes we report all flags, including the file names.
+      # This is redundant to the command. Needs investigation.
+      self.maxDiff = None
+      self.check_cleaned_json_output(
+          'expected_test_results1.json', json_path, basedir)
+
+  def testFlakeWithRerunAndJSON(self):
+    """Test re-running a failing test and output to json."""
+    with temp_base(baseroot='testroot2') as basedir:
+      json_path = os.path.join(basedir, 'out.json')
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default',
+          '--rerun-failures-count=2',
+          '--random-seed=123',
+          '--json-test-results', json_path,
+          'sweet',
+          infra_staging=False,
+      )
+      self.assertIn('sweet/bananaflakes default: pass', result.stdout, result)
+      self.assertIn('All tests succeeded', result.stdout, result)
+      self.assertEqual(0, result.returncode, result)
+      self.maxDiff = None
+      self.check_cleaned_json_output(
+          'expected_test_results2.json', json_path, basedir)
+
+  def testAutoDetect(self):
+    """Fake a build with several auto-detected options.
+
+    Using all those options at once doesn't really make much sense. This is
+    merely for getting coverage.
+    """
+    with temp_base() as basedir:
+      override_build_config(
+          basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
+          is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
+          v8_enable_i18n_support=False, v8_target_cpu='x86',
+          v8_enable_verify_csa=False, v8_enable_lite_mode=False,
+          v8_enable_pointer_compression=False)
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default',
+          'sweet/bananas',
+      )
+      expect_text = (
+          '>>> Autodetected:\n'
+          'asan\n'
+          'cfi_vptr\n'
+          'dcheck_always_on\n'
+          'msan\n'
+          'no_i18n\n'
+          'tsan\n'
+          'ubsan_vptr\n'
+          '>>> Running tests for ia32.release')
+      self.assertIn(expect_text, result.stdout, result)
+      self.assertEqual(0, result.returncode, result)
+      # TODO(machenbach): Test some more implications of the auto-detected
+      # options, e.g. that the right env variables are set.
+
+  def testSkips(self):
+    """Test skipping tests in status file for a specific variant."""
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=nooptimization',
+          'sweet/strawberries',
+          infra_staging=False,
+      )
+      self.assertIn('0 tests ran', result.stdout, result)
+      self.assertEqual(2, result.returncode, result)
+
+  def testRunSkips(self):
+    """Inverse the above. Test parameter to keep running skipped tests."""
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=nooptimization',
+          '--run-skipped',
+          'sweet/strawberries',
+      )
+      self.assertIn('1 tests failed', result.stdout, result)
+      self.assertIn('1 tests ran', result.stdout, result)
+      self.assertEqual(1, result.returncode, result)
+
+  def testDefault(self):
+    """Test using default test suites, though no tests are run since they don't
+    exist in a test setting.
+    """
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          infra_staging=False,
+      )
+      self.assertIn('0 tests ran', result.stdout, result)
+      self.assertEqual(2, result.returncode, result)
+
+  def testNoBuildConfig(self):
+    """Test failing run when build config is not found."""
+    with temp_dir() as basedir:
+      result = run_tests(basedir)
+      self.assertIn('Failed to load build config', result.stdout, result)
+      self.assertEqual(5, result.returncode, result)
+
+  def testInconsistentArch(self):
+    """Test failing run when attempting to wrongly override the arch."""
+    with temp_base() as basedir:
+      result = run_tests(basedir, '--arch=ia32')
+      self.assertIn(
+          '--arch value (ia32) inconsistent with build config (x64).',
+          result.stdout, result)
+      self.assertEqual(5, result.returncode, result)
+
+  def testWrongVariant(self):
+    """Test using a bogus variant."""
+    with temp_base() as basedir:
+      result = run_tests(basedir, '--variants=meh')
+      self.assertEqual(5, result.returncode, result)
+
+  def testModeFromBuildConfig(self):
+    """Test auto-detection of mode from build config."""
+    with temp_base() as basedir:
+      result = run_tests(basedir, '--outdir=out/build', 'sweet/bananas')
+      self.assertIn('Running tests for x64.release', result.stdout, result)
+      self.assertEqual(0, result.returncode, result)
+
+  @unittest.skip("not available with test processors")
+  def testReport(self):
+    """Test the report feature.
+
+    This also exercises various paths in statusfile logic.
+    """
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--variants=default',
+          'sweet',
+          '--report',
+      )
+      self.assertIn(
+          '3 tests are expected to fail that we should fix',
+          result.stdout, result)
+      self.assertEqual(1, result.returncode, result)
+
+  @unittest.skip("not available with test processors")
+  def testWarnUnusedRules(self):
+    """Test the unused-rules feature."""
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--variants=default,nooptimization',
+          'sweet',
+          '--warn-unused',
+      )
+      self.assertIn( 'Unused rule: carrots', result.stdout, result)
+      self.assertIn( 'Unused rule: regress/', result.stdout, result)
+      self.assertEqual(1, result.returncode, result)
+
+  @unittest.skip("not available with test processors")
+  def testCatNoSources(self):
+    """Test printing sources, but the suite's tests have none available."""
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--variants=default',
+          'sweet/bananas',
+          '--cat',
+      )
+      self.assertIn('begin source: sweet/bananas', result.stdout, result)
+      self.assertIn('(no source available)', result.stdout, result)
+      self.assertEqual(0, result.returncode, result)
+
+  def testPredictable(self):
+    """Test running a test in verify-predictable mode.
+
+    The test will fail because of missing allocation output. We verify that and
+    that the predictable flags are passed and printed after failure.
+    """
+    with temp_base() as basedir:
+      override_build_config(basedir, v8_enable_verify_predictable=True)
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default',
+          'sweet/bananas',
+          infra_staging=False,
+      )
+      self.assertIn('1 tests ran', result.stdout, result)
+      self.assertIn('sweet/bananas default: FAIL', result.stdout, result)
+      self.assertIn('Test had no allocation output', result.stdout, result)
+      self.assertIn('--predictable --verify-predictable', result.stdout, result)
+      self.assertEqual(1, result.returncode, result)
+
+  def testSlowArch(self):
+    """Test timeout factor manipulation on slow architecture."""
+    with temp_base() as basedir:
+      override_build_config(basedir, v8_target_cpu='arm64')
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default',
+          'sweet/bananas',
+      )
+      # TODO(machenbach): We don't have a way for testing if the correct
+      # timeout was used.
+      self.assertEqual(0, result.returncode, result)
+
+  def testRandomSeedStressWithDefault(self):
+    """Test using random-seed-stress feature has the right number of tests."""
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default',
+          '--random-seed-stress-count=2',
+          'sweet/bananas',
+          infra_staging=False,
+      )
+      self.assertIn('2 tests ran', result.stdout, result)
+      self.assertEqual(0, result.returncode, result)
+
+  def testRandomSeedStressWithSeed(self):
+    """Test using random-seed-stress feature passing a random seed."""
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default',
+          '--random-seed-stress-count=2',
+          '--random-seed=123',
+          'sweet/strawberries',
+      )
+      self.assertIn('2 tests ran', result.stdout, result)
+      # We use a failing test so that the command is printed and we can verify
+      # that the right random seed was passed.
+      self.assertIn('--random-seed=123', result.stdout, result)
+      self.assertEqual(1, result.returncode, result)
+
+  def testSpecificVariants(self):
+    """Test using NO_VARIANTS modifiers in status files skips the desire tests.
+
+    The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
+    But the status file applies a modifier to each skipping one of the
+    variants.
+    """
+    with temp_base() as basedir:
+      override_build_config(basedir, is_asan=True)
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default,stress',
+          'sweet/bananas',
+          'sweet/raspberries',
+      )
+      # Both tests are either marked as running in only default or only
+      # slow variant.
+      self.assertIn('2 tests ran', result.stdout, result)
+      self.assertEqual(0, result.returncode, result)
+
+  def testStatusFilePresubmit(self):
+    """Test that the fake status file is well-formed."""
+    with temp_base() as basedir:
+      from testrunner.local import statusfile
+      self.assertTrue(statusfile.PresubmitCheck(
+          os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
+
+  def testDotsProgress(self):
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--progress=dots',
+          'sweet/cherries',
+          'sweet/bananas',
+          '--no-sorting', '-j1', # make results order deterministic
+          infra_staging=False,
+      )
+      self.assertIn('2 tests ran', result.stdout, result)
+      self.assertIn('F.', result.stdout, result)
+      self.assertEqual(1, result.returncode, result)
+
+  def testMonoProgress(self):
+    self._testCompactProgress('mono')
+
+  def testColorProgress(self):
+    self._testCompactProgress('color')
+
+  def _testCompactProgress(self, name):
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--progress=%s' % name,
+          'sweet/cherries',
+          'sweet/bananas',
+          infra_staging=False,
+      )
+      if name == 'color':
+        expected = ('\033[34m%  28\033[0m|'
+                    '\033[32m+   1\033[0m|'
+                    '\033[31m-   1\033[0m]: Done')
+      else:
+        expected = '%  28|+   1|-   1]: Done'
+      self.assertIn(expected, result.stdout)
+      self.assertIn('sweet/cherries', result.stdout)
+      self.assertIn('sweet/bananas', result.stdout)
+      self.assertEqual(1, result.returncode, result)
+
+  def testExitAfterNFailures(self):
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--exit-after-n-failures=2',
+          '-j1',
+          'sweet/mangoes',       # PASS
+          'sweet/strawberries',  # FAIL
+          'sweet/blackberries',  # FAIL
+          'sweet/raspberries',   # should not run
+      )
+      self.assertIn('sweet/mangoes default: pass', result.stdout, result)
+      self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
+      self.assertIn('Too many failures, exiting...', result.stdout, result)
+      self.assertIn('sweet/blackberries default: FAIL', result.stdout, result)
+      self.assertNotIn('sweet/raspberries', result.stdout, result)
+      self.assertIn('2 tests failed', result.stdout, result)
+      self.assertIn('3 tests ran', result.stdout, result)
+      self.assertEqual(1, result.returncode, result)
+
+  def testNumFuzzer(self):
+    sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/build']
+
+    with temp_base() as basedir:
+      with capture() as (stdout, stderr):
+        code = num_fuzzer.NumFuzzer(basedir=basedir).execute(sys_args)
+        result = Result(stdout.getvalue(), stderr.getvalue(), code)
+
+      self.assertEqual(0, result.returncode, result)
+
+  def testRunnerFlags(self):
+    """Test that runner-specific flags are passed to tests."""
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--progress=verbose',
+          '--variants=default',
+          '--random-seed=42',
+          'sweet/bananas',
+          '-v',
+      )
+
+      self.assertIn(
+          '--test bananas --random-seed=42 --nohard-abort --testing-d8-test-runner',
+          result.stdout, result)
+      self.assertEqual(0, result.returncode, result)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/third_party/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 b/src/third_party/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8
new file mode 100644
index 0000000..9daeafb
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8
@@ -0,0 +1 @@
+test
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f1/file1 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f1/file1
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f1/file1
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f1/file2 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f1/file2
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f1/file2
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f2/file1 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f2/file1
new file mode 100644
index 0000000..e8e11f1
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f2/file1
@@ -0,0 +1,3 @@
+line 1
+line 2 with diff
+line 3
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f2/file2 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f2/file2
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test1/f2/file2
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f1/file1 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f1/file1
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f1/file1
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f1/file2 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f1/file2
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f1/file2
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f2/file1 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f2/file1
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f2/file1
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f2/file2 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f2/file2
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test2/f2/file2
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f1/file1 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f1/file1
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f1/file1
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f1/file2 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f1/file2
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f1/file2
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f2/file1 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f2/file1
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f2/file1
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f2/file2 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f2/file2
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f2/file2
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f2/file3 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f2/file3
new file mode 100644
index 0000000..f88e440
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test3/f2/file3
@@ -0,0 +1 @@
+Line
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f1/file1 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f1/file1
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f1/file1
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f1/file2 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f1/file2
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f1/file2
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f1/file4 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f1/file4
new file mode 100644
index 0000000..f88e440
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f1/file4
@@ -0,0 +1 @@
+Line
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f2/file1 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f2/file1
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f2/file1
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f2/file2 b/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f2/file2
new file mode 100644
index 0000000..0839846
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/compare_torque/test4/f2/file2
@@ -0,0 +1,2 @@
+line 1
+line 2
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/d8_mocked1.py b/src/third_party/v8/tools/unittests/testdata/d8_mocked1.py
new file mode 100644
index 0000000..ff330af
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/d8_mocked1.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print('Richards: 1.2')
+print('DeltaBlue: 2.1')
diff --git a/src/third_party/v8/tools/unittests/testdata/d8_mocked2.py b/src/third_party/v8/tools/unittests/testdata/d8_mocked2.py
new file mode 100644
index 0000000..3630462
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/d8_mocked2.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print('Richards1: 1')
+print('DeltaBlue1: 1')
+print('Richards2: 0.2')
+print('DeltaBlue2: 1.0')
+print('DeltaBlue3: 0.1')
diff --git a/src/third_party/v8/tools/unittests/testdata/expected_test_results1.json b/src/third_party/v8/tools/unittests/testdata/expected_test_results1.json
new file mode 100644
index 0000000..08ac623
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/expected_test_results1.json
@@ -0,0 +1,155 @@
+{
+  "duration_mean": 1, 
+  "results": [
+    {
+      "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", 
+      "duration": 1, 
+      "exit_code": 1, 
+      "expected": [
+        "PASS"
+      ], 
+      "flags": [
+        "--test", 
+        "strawberries", 
+        "--random-seed=123", 
+        "--nohard-abort", 
+        "--testing-d8-test-runner"
+      ], 
+      "framework_name": "standard_runner", 
+      "name": "sweet/strawberries", 
+      "random_seed": 123, 
+      "result": "FAIL", 
+      "run": 1, 
+      "stderr": "", 
+      "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n", 
+      "target_name": "d8_mocked.py", 
+      "variant": "default", 
+      "variant_flags": []
+    }, 
+    {
+      "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", 
+      "duration": 1, 
+      "exit_code": 1, 
+      "expected": [
+        "PASS"
+      ], 
+      "flags": [
+        "--test", 
+        "strawberries", 
+        "--random-seed=123", 
+        "--nohard-abort", 
+        "--testing-d8-test-runner"
+      ], 
+      "framework_name": "standard_runner", 
+      "name": "sweet/strawberries", 
+      "random_seed": 123, 
+      "result": "FAIL", 
+      "run": 2, 
+      "stderr": "", 
+      "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n", 
+      "target_name": "d8_mocked.py", 
+      "variant": "default", 
+      "variant_flags": []
+    }, 
+    {
+      "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", 
+      "duration": 1, 
+      "exit_code": 1, 
+      "expected": [
+        "PASS"
+      ], 
+      "flags": [
+        "--test", 
+        "strawberries", 
+        "--random-seed=123", 
+        "--nohard-abort", 
+        "--testing-d8-test-runner"
+      ], 
+      "framework_name": "standard_runner", 
+      "name": "sweet/strawberries", 
+      "random_seed": 123, 
+      "result": "FAIL", 
+      "run": 3, 
+      "stderr": "", 
+      "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n", 
+      "target_name": "d8_mocked.py", 
+      "variant": "default", 
+      "variant_flags": []
+    }
+  ], 
+  "slowest_tests": [ 
+    {
+      "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", 
+      "duration": 1, 
+      "exit_code": 1, 
+      "expected": [
+        "PASS"
+      ], 
+      "flags": [
+        "--test", 
+        "strawberries", 
+        "--random-seed=123", 
+        "--nohard-abort", 
+        "--testing-d8-test-runner"
+      ], 
+      "framework_name": "standard_runner", 
+      "marked_slow": true, 
+      "name": "sweet/strawberries", 
+      "random_seed": 123, 
+      "result": "FAIL", 
+      "run": 1, 
+      "target_name": "d8_mocked.py", 
+      "variant": "default", 
+      "variant_flags": []
+    }, 
+    {
+      "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", 
+      "duration": 1, 
+      "exit_code": 1, 
+      "expected": [
+        "PASS"
+      ], 
+      "flags": [
+        "--test", 
+        "strawberries", 
+        "--random-seed=123", 
+        "--nohard-abort", 
+        "--testing-d8-test-runner"
+      ], 
+      "framework_name": "standard_runner", 
+      "marked_slow": true, 
+      "name": "sweet/strawberries", 
+      "random_seed": 123, 
+      "result": "FAIL", 
+      "run": 2, 
+      "target_name": "d8_mocked.py", 
+      "variant": "default", 
+      "variant_flags": []
+    }, 
+    {
+      "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", 
+      "duration": 1, 
+      "exit_code": 1, 
+      "expected": [
+        "PASS"
+      ], 
+      "flags": [
+        "--test", 
+        "strawberries", 
+        "--random-seed=123", 
+        "--nohard-abort", 
+        "--testing-d8-test-runner"
+      ], 
+      "framework_name": "standard_runner", 
+      "marked_slow": true, 
+      "name": "sweet/strawberries", 
+      "random_seed": 123, 
+      "result": "FAIL", 
+      "run": 3, 
+      "target_name": "d8_mocked.py", 
+      "variant": "default", 
+      "variant_flags": []
+    }
+  ], 
+  "test_total": 3
+}
\ No newline at end of file
diff --git a/src/third_party/v8/tools/unittests/testdata/expected_test_results2.json b/src/third_party/v8/tools/unittests/testdata/expected_test_results2.json
new file mode 100644
index 0000000..dc353f6
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/expected_test_results2.json
@@ -0,0 +1,102 @@
+{
+  "duration_mean": 1, 
+  "results": [
+    {
+      "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", 
+      "duration": 1, 
+      "exit_code": 1, 
+      "expected": [
+        "PASS"
+      ], 
+      "flags": [
+        "bananaflakes", 
+        "--random-seed=123", 
+        "--nohard-abort", 
+        "--testing-d8-test-runner"
+      ], 
+      "framework_name": "standard_runner", 
+      "name": "sweet/bananaflakes", 
+      "random_seed": 123, 
+      "result": "FAIL", 
+      "run": 1, 
+      "stderr": "", 
+      "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n", 
+      "target_name": "d8_mocked.py", 
+      "variant": "default", 
+      "variant_flags": []
+    }, 
+    {
+      "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", 
+      "duration": 1, 
+      "exit_code": 0, 
+      "expected": [
+        "PASS"
+      ], 
+      "flags": [
+        "bananaflakes", 
+        "--random-seed=123", 
+        "--nohard-abort", 
+        "--testing-d8-test-runner"
+      ], 
+      "framework_name": "standard_runner", 
+      "name": "sweet/bananaflakes", 
+      "random_seed": 123, 
+      "result": "PASS", 
+      "run": 2, 
+      "stderr": "", 
+      "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n", 
+      "target_name": "d8_mocked.py", 
+      "variant": "default", 
+      "variant_flags": []
+    }
+  ], 
+  "slowest_tests": [
+    {
+      "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", 
+      "duration": 1, 
+      "exit_code": 0, 
+      "expected": [
+        "PASS"
+      ], 
+      "flags": [
+        "bananaflakes", 
+        "--random-seed=123", 
+        "--nohard-abort", 
+        "--testing-d8-test-runner"
+      ], 
+      "framework_name": "standard_runner", 
+      "marked_slow": false, 
+      "name": "sweet/bananaflakes", 
+      "random_seed": 123, 
+      "result": "", 
+      "run": 2, 
+      "target_name": "d8_mocked.py", 
+      "variant": "default", 
+      "variant_flags": []
+    }, 
+    {
+      "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", 
+      "duration": 1, 
+      "exit_code": 1, 
+      "expected": [
+        "PASS"
+      ], 
+      "flags": [
+        "bananaflakes", 
+        "--random-seed=123", 
+        "--nohard-abort", 
+        "--testing-d8-test-runner"
+      ], 
+      "framework_name": "standard_runner", 
+      "marked_slow": false, 
+      "name": "sweet/bananaflakes", 
+      "random_seed": 123, 
+      "result": "FAIL", 
+      "run": 1, 
+      "target_name": "d8_mocked.py", 
+      "variant": "default", 
+      "variant_flags": []
+    }
+  ], 
+  "test_total": 2
+}
diff --git a/src/third_party/v8/tools/unittests/testdata/predictable_mocked.py b/src/third_party/v8/tools/unittests/testdata/predictable_mocked.py
new file mode 100644
index 0000000..b9e73f6
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/predictable_mocked.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import sys
+
+assert len(sys.argv) == 3
+
+if sys.argv[1] == 'equal':
+  # 1. Scenario: print equal allocation hashes.
+  print('### Allocations = 9497, hash = 0xc322c6b0')
+elif sys.argv[1] == 'differ':
+  # 2. Scenario: print different allocation hashes. This prints a different
+  # hash on the second run, based on the content of a semaphore file. This
+  # file is expected to be empty in the beginning.
+  with open(sys.argv[2]) as f:
+    if f.read():
+      print('### Allocations = 9497, hash = 0xc322c6b0')
+    else:
+      print('### Allocations = 9497, hash = 0xc322c6b1')
+  with open(sys.argv[2], 'w') as f:
+    f.write('something')
+else:
+  # 3. Scenario: missing allocation hashes. Don't print anything.
+  assert 'missing'
+
+sys.exit(0)
diff --git a/src/third_party/v8/tools/unittests/testdata/results_processor.py b/src/third_party/v8/tools/unittests/testdata/results_processor.py
new file mode 100644
index 0000000..d8c5ad9
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/results_processor.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Fake results processor for testing that just sums some things up.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import fileinput
+import re
+
+richards = 0.0
+deltablue = 0.0
+
+for line in fileinput.input():
+  match = re.match(r'^Richards\d: (.*)$', line)
+  if match:
+    richards += float(match.group(1))
+  match = re.match(r'^DeltaBlue\d: (.*)$', line)
+  if match:
+    deltablue += float(match.group(1))
+
+print('Richards: %f' % richards)
+print('DeltaBlue: %f' % deltablue)
diff --git a/src/third_party/v8/tools/unittests/testdata/test1.json b/src/third_party/v8/tools/unittests/testdata/test1.json
new file mode 100644
index 0000000..939d6e2
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/test1.json
@@ -0,0 +1,12 @@
+{
+  "path": ["."],
+  "owners": ["username@chromium.org"],
+  "flags": [],
+  "main": "run.js",
+  "run_count": 2,
+  "results_regexp": "^%s: (.+)$",
+  "tests": [
+    {"name": "Richards"},
+    {"name": "DeltaBlue"}
+  ]
+}
diff --git a/src/third_party/v8/tools/unittests/testdata/test2.json b/src/third_party/v8/tools/unittests/testdata/test2.json
new file mode 100644
index 0000000..632c4e5
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/test2.json
@@ -0,0 +1,13 @@
+{
+  "path": ["."],
+  "owners": ["username@chromium.org"],
+  "flags": [],
+  "main": "run.js",
+  "run_count": 2,
+  "results_processor": "results_processor.py",
+  "results_regexp": "^%s: (.+)$",
+  "tests": [
+    {"name": "Richards"},
+    {"name": "DeltaBlue"}
+  ]
+}
diff --git a/src/third_party/v8/tools/unittests/testdata/test3.json b/src/third_party/v8/tools/unittests/testdata/test3.json
new file mode 100644
index 0000000..3e871de
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/test3.json
@@ -0,0 +1,17 @@
+{
+  "path": ["."],
+  "owners": ["username@chromium.org"],
+  "flags": [],
+  "run_count": 1,
+  "results_processor": "results_processor.py",
+  "tests": [{
+    "path": ["."],
+    "name": "Octane",
+    "main": "run.js",
+    "results_regexp": "^%s: (.+)$",
+    "tests": [
+      {"name": "Richards"},
+      {"name": "DeltaBlue"}
+    ]
+  }]
+}
diff --git a/src/third_party/v8/tools/unittests/testdata/testroot1/d8_mocked.py b/src/third_party/v8/tools/unittests/testdata/testroot1/d8_mocked.py
new file mode 100644
index 0000000..d67e030
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/testroot1/d8_mocked.py
@@ -0,0 +1,19 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy d8 replacement. Just passes all test, except if 'berries' is in args.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import sys
+
+args = ' '.join(sys.argv[1:])
+print(args)
+# Let all berries fail.
+if 'berries' in args:
+  sys.exit(1)
+sys.exit(0)
diff --git a/src/third_party/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status b/src/third_party/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
new file mode 100644
index 0000000..a0bd517
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
@@ -0,0 +1,36 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+[ALWAYS, {
+  'raspberries': FAIL,
+  'strawberries': [PASS, ['mode == release', SLOW], ['mode == debug', NO_VARIANTS]],
+  'mangoes': [PASS, SLOW],
+
+  # Both cherries and apples are to test how PASS an FAIL from different
+  # sections are merged.
+  'cherries': [PASS, SLOW],
+  'apples': [FAIL],
+
+  # Unused rule.
+  'carrots': [PASS, FAIL],
+}],
+
+['variant == nooptimization', {
+  'strawberries': [SKIP],
+}],
+
+['arch == x64', {
+  'cherries': [FAIL],
+  'apples': [PASS, SLOW],
+
+  # Unused rule.
+  'regress/*': [CRASH],
+}],
+
+['asan', {
+  'bananas': [PASS, NO_VARIANTS],
+  'raspberries': [FAIL, NO_VARIANTS],
+}],
+]
diff --git a/src/third_party/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py b/src/third_party/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
new file mode 100644
index 0000000..a2dfc9d
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
@@ -0,0 +1,36 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy test suite extension with some fruity tests.
+"""
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+class TestLoader(testsuite.TestLoader):
+  def _list_test_filenames(self):
+    return [
+          'bananas', 'apples', 'cherries', 'mangoes', 'strawberries',
+          'blackberries', 'raspberries',
+    ]
+
+
+class TestSuite(testsuite.TestSuite):
+  def _test_loader_class(self):
+    return TestLoader
+
+  def _test_class(self):
+    return TestCase
+
+
+class TestCase(testcase.D8TestCase):
+  def get_shell(self):
+    return 'd8_mocked.py'
+
+  def _get_files_params(self):
+    return [self.name]
+
+def GetSuite(*args, **kwargs):
+  return TestSuite(*args, **kwargs)
diff --git a/src/third_party/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/src/third_party/v8/tools/unittests/testdata/testroot1/v8_build_config.json
new file mode 100644
index 0000000..8f8efc4
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -0,0 +1,24 @@
+{
+  "current_cpu": "x64",
+  "dcheck_always_on": false,
+  "is_android": false,
+  "is_asan": false,
+  "is_cfi": false,
+  "is_clang": true,
+  "is_component_build": false,
+  "is_debug": false,
+  "is_full_debug": false,
+  "is_gcov_coverage": false,
+  "is_ubsan_vptr": false,
+  "is_msan": false,
+  "is_tsan": false,
+  "target_cpu": "x64",
+  "v8_current_cpu": "x64",
+  "v8_enable_i18n_support": true,
+  "v8_enable_verify_predictable": false,
+  "v8_target_cpu": "x64",
+  "v8_enable_concurrent_marking": true,
+  "v8_enable_verify_csa": false,
+  "v8_enable_lite_mode": false,
+  "v8_enable_pointer_compression": true
+}
diff --git a/src/third_party/v8/tools/unittests/testdata/testroot2/d8_mocked.py b/src/third_party/v8/tools/unittests/testdata/testroot2/d8_mocked.py
new file mode 100644
index 0000000..48d6bce
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/testroot2/d8_mocked.py
@@ -0,0 +1,32 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy d8 replacement for flaky tests.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import sys
+
+PATH = os.path.dirname(os.path.abspath(__file__))
+
+print(' '.join(sys.argv[1:]))
+
+# Test files ending in 'flakes' should first fail then pass. We store state in
+# a file side by side with the executable. No clean-up required as all tests
+# run in a temp test root. Restriction: Only one variant is supported for now.
+for arg in sys.argv[1:]:
+  if arg.endswith('flakes'):
+    flake_state = os.path.join(PATH, arg)
+    if os.path.exists(flake_state):
+      sys.exit(0)
+    else:
+      with open(flake_state, 'w') as f:
+        f.write('something')
+      sys.exit(1)
+
+sys.exit(0)
diff --git a/src/third_party/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status b/src/third_party/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status
new file mode 100644
index 0000000..9ad8c81
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/testroot2/test/sweet/sweet.status
@@ -0,0 +1,6 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]
diff --git a/src/third_party/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py b/src/third_party/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
new file mode 100644
index 0000000..3606cd3
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
@@ -0,0 +1,34 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Dummy test suite extension with some flaky fruity tests.
+"""
+
+from testrunner.local import testsuite
+from testrunner.objects import testcase
+
+
+class TestLoader(testsuite.TestLoader):
+  def _list_test_filenames(self):
+    return ['bananaflakes']
+
+
+class TestSuite(testsuite.TestSuite):
+  def _test_loader_class(self):
+    return TestLoader
+
+  def _test_class(self):
+    return TestCase
+
+
+class TestCase(testcase.TestCase):
+  def get_shell(self):
+    return 'd8_mocked.py'
+
+  def _get_files_params(self):
+    return [self.name]
+
+def GetSuite(*args, **kwargs):
+  return TestSuite(*args, **kwargs)
diff --git a/src/third_party/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/src/third_party/v8/tools/unittests/testdata/testroot2/v8_build_config.json
new file mode 100644
index 0000000..7134998
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -0,0 +1,24 @@
+{
+  "current_cpu": "x64",
+  "dcheck_always_on": false,
+  "is_android": false,
+  "is_asan": false,
+  "is_cfi": false,
+  "is_clang": true,
+  "is_component_build": false,
+  "is_debug": false,
+  "is_full_debug": false,
+  "is_gcov_coverage": false,
+  "is_ubsan_vptr": false,
+  "is_msan": false,
+  "is_tsan": false,
+  "target_cpu": "x64",
+  "v8_current_cpu": "x64",
+  "v8_enable_i18n_support": true,
+  "v8_enable_verify_predictable": false,
+  "v8_target_cpu": "x64",
+  "v8_enable_concurrent_marking": true,
+  "v8_enable_verify_csa": false,
+  "v8_enable_lite_mode": false,
+  "v8_enable_pointer_compression": false
+}
diff --git a/src/third_party/v8/tools/unittests/v8_presubmit_test.py b/src/third_party/v8/tools/unittests/v8_presubmit_test.py
new file mode 100755
index 0000000..2c66d18
--- /dev/null
+++ b/src/third_party/v8/tools/unittests/v8_presubmit_test.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import tempfile
+import unittest
+
+# Configuring the path for the v8_presubmit module
+TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.append(TOOLS_ROOT)
+
+from v8_presubmit import FileContentsCache, CacheableSourceFileProcessor
+
+
+class FakeCachedProcessor(CacheableSourceFileProcessor):
+  def __init__(self, cache_file_path):
+    super(FakeCachedProcessor, self).__init__(
+      use_cache=True, cache_file_path=cache_file_path, file_type='.test')
+  def GetProcessorWorker(self):
+    return object
+  def GetProcessorScript(self):
+    return "echo", []
+  def DetectUnformattedFiles(_, cmd, worker, files):
+    raise NotImplementedError
+
+class FileContentsCacheTest(unittest.TestCase):
+  def setUp(self):
+    _, self.cache_file_path = tempfile.mkstemp()
+    cache = FileContentsCache(self.cache_file_path)
+    cache.Load()
+
+    def generate_file():
+      _, file_name = tempfile.mkstemp()
+      with open(file_name, "w") as f:
+        f.write(file_name)
+
+      return file_name
+
+    self.target_files = [generate_file() for _ in range(2)]
+    unchanged_files = cache.FilterUnchangedFiles(self.target_files)
+    self.assertEqual(len(unchanged_files), 2)
+    cache.Save()
+
+  def tearDown(self):
+    for file in [self.cache_file_path] + self.target_files:
+      os.remove(file)
+
+  def testCachesFiles(self):
+    cache = FileContentsCache(self.cache_file_path)
+    cache.Load()
+
+    changed_files = cache.FilterUnchangedFiles(self.target_files)
+    self.assertListEqual(changed_files, [])
+
+    modified_file = self.target_files[0]
+    with open(modified_file, "w") as f:
+      f.write("modification")
+
+    changed_files = cache.FilterUnchangedFiles(self.target_files)
+    self.assertListEqual(changed_files, [modified_file])
+
+  def testCacheableSourceFileProcessor(self):
+    class CachedProcessor(FakeCachedProcessor):
+      def DetectFilesToChange(_, files):
+        self.assertListEqual(files, [])
+        return []
+
+    cached_processor = CachedProcessor(cache_file_path=self.cache_file_path)
+    cached_processor.ProcessFiles(self.target_files)
+
+  def testCacheableSourceFileProcessorWithModifications(self):
+    modified_file = self.target_files[0]
+    with open(modified_file, "w") as f:
+      f.write("modification")
+
+    class CachedProcessor(FakeCachedProcessor):
+      def DetectFilesToChange(_, files):
+        self.assertListEqual(files, [modified_file])
+        return []
+
+    cached_processor = CachedProcessor(
+      cache_file_path=self.cache_file_path,
+    )
+    cached_processor.ProcessFiles(self.target_files)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/third_party/v8/tools/update-object-macros-undef.py b/src/third_party/v8/tools/update-object-macros-undef.py
new file mode 100755
index 0000000..866fdb6
--- /dev/null
+++ b/src/third_party/v8/tools/update-object-macros-undef.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+# vim:fenc=utf-8:shiftwidth=2:tabstop=2:softtabstop=2:extandtab
+
+"""
+Generate object-macros-undef.h from object-macros.h.
+"""
+
+import os.path
+import re
+import sys
+
+INPUT = 'src/objects/object-macros.h'
+OUTPUT = 'src/objects/object-macros-undef.h'
+HEADER = """// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generate this file using the {} script.
+
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
+
+""".format(os.path.basename(__file__))
+
+
+def main():
+  if not os.path.isfile(INPUT):
+    sys.exit("Input file {} does not exist; run this script in a v8 checkout."
+             .format(INPUT))
+  if not os.path.isfile(OUTPUT):
+    sys.exit("Output file {} does not exist; run this script in a v8 checkout."
+             .format(OUTPUT))
+  regexp = re.compile('^#define (\w+)')
+  seen = set()
+  with open(INPUT, 'r') as infile, open(OUTPUT, 'w') as outfile:
+    outfile.write(HEADER)
+    for line in infile:
+      match = regexp.match(line)
+      if match and match.group(1) not in seen:
+        seen.add(match.group(1))
+        outfile.write('#undef {}\n'.format(match.group(1)))
+
+if __name__ == "__main__":
+  main()
diff --git a/src/third_party/v8/tools/v8_presubmit.py b/src/third_party/v8/tools/v8_presubmit.py
new file mode 100755
index 0000000..db008aa
--- /dev/null
+++ b/src/third_party/v8/tools/v8_presubmit.py
@@ -0,0 +1,788 @@
+#!/usr/bin/env python
+#
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# for py2/py3 compatibility
+from __future__ import absolute_import
+from __future__ import print_function
+
+try:
+  import hashlib
+  md5er = hashlib.md5
+except ImportError as e:
+  import md5
+  md5er = md5.new
+
+
+import json
+import optparse
+import os
+from os.path import abspath, join, dirname, basename, exists
+import pickle
+import re
+import sys
+import subprocess
+import multiprocessing
+from subprocess import PIPE
+
+from testrunner.local import statusfile
+from testrunner.local import testsuite
+from testrunner.local import utils
+
+# Special LINT rules diverging from default and reason.
+# build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
+#   We now run our own header guard check in PRESUBMIT.py.
+# build/include_what_you_use: Started giving false positives for variables
+#   named "string" and "map" assuming that you needed to include STL headers.
+# runtime/references: As of May 2020 the C++ style guide suggests using
+#   references for out parameters, see
+#   https://google.github.io/styleguide/cppguide.html#Inputs_and_Outputs.
+
+LINT_RULES = """
+-build/header_guard
+-build/include_what_you_use
+-readability/fn_size
+-readability/multiline_comment
+-runtime/references
+-whitespace/comments
+""".split()
+
+LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
+FLAGS_LINE = re.compile("//\s*Flags:.*--([A-z0-9-])+_[A-z0-9].*\n")
+ASSERT_OPTIMIZED_PATTERN = re.compile("assertOptimized")
+FLAGS_ENABLE_OPT = re.compile("//\s*Flags:.*--opt[^-].*\n")
+ASSERT_UNOPTIMIZED_PATTERN = re.compile("assertUnoptimized")
+FLAGS_NO_ALWAYS_OPT = re.compile("//\s*Flags:.*--no-?always-opt.*\n")
+
+TOOLS_PATH = dirname(abspath(__file__))
+
+def CppLintWorker(command):
+  try:
+    process = subprocess.Popen(command, stderr=subprocess.PIPE)
+    process.wait()
+    out_lines = ""
+    error_count = -1
+    while True:
+      out_line = process.stderr.readline()
+      if out_line == '' and process.poll() != None:
+        if error_count == -1:
+          print("Failed to process %s" % command.pop())
+          return 1
+        break
+      m = LINT_OUTPUT_PATTERN.match(out_line)
+      if m:
+        out_lines += out_line
+        error_count += 1
+    sys.stdout.write(out_lines)
+    return error_count
+  except KeyboardInterrupt:
+    process.kill()
+  except:
+    print('Error running cpplint.py. Please make sure you have depot_tools' +
+          ' in your $PATH. Lint check skipped.')
+    process.kill()
+
+def TorqueLintWorker(command):
+  try:
+    process = subprocess.Popen(command, stderr=subprocess.PIPE)
+    process.wait()
+    out_lines = ""
+    error_count = 0
+    while True:
+      out_line = process.stderr.readline()
+      if out_line == '' and process.poll() != None:
+        break
+      out_lines += out_line
+      error_count += 1
+    sys.stdout.write(out_lines)
+    if error_count != 0:
+        sys.stdout.write(
+          "warning: formatting and overwriting unformatted Torque files\n")
+    return error_count
+  except KeyboardInterrupt:
+    process.kill()
+  except:
+    print('Error running format-torque.py')
+    process.kill()
+
+def JSLintWorker(command):
+  def format_file(command):
+    try:
+      file_name = command[-1]
+      with open(file_name, "r") as file_handle:
+        contents = file_handle.read()
+
+      process = subprocess.Popen(command, stdout=PIPE, stderr=subprocess.PIPE)
+      output, err = process.communicate()
+      rc = process.returncode
+      if rc != 0:
+        sys.stdout.write("error code " + str(rc) + " running clang-format.\n")
+        return rc
+
+      if output != contents:
+        return 1
+
+      return 0
+    except KeyboardInterrupt:
+      process.kill()
+    except Exception:
+      print('Error running clang-format. Please make sure you have depot_tools' +
+            ' in your $PATH. Lint check skipped.')
+      process.kill()
+
+  rc = format_file(command)
+  if rc == 1:
+    # There are files that need to be formatted, let's format them in place.
+    file_name = command[-1]
+    sys.stdout.write("Formatting %s.\n" % (file_name))
+    rc = format_file(command[:-1] + ["-i", file_name])
+  return rc
+
+class FileContentsCache(object):
+
+  def __init__(self, sums_file_name):
+    self.sums = {}
+    self.sums_file_name = sums_file_name
+
+  def Load(self):
+    try:
+      sums_file = None
+      try:
+        sums_file = open(self.sums_file_name, 'r')
+        self.sums = pickle.load(sums_file)
+      except:
+        # Cannot parse pickle for any reason. Not much we can do about it.
+        pass
+    finally:
+      if sums_file:
+        sums_file.close()
+
+  def Save(self):
+    try:
+      sums_file = open(self.sums_file_name, 'w')
+      pickle.dump(self.sums, sums_file)
+    except:
+      # Failed to write pickle. Try to clean-up behind us.
+      if sums_file:
+        sums_file.close()
+      try:
+        os.unlink(self.sums_file_name)
+      except:
+        pass
+    finally:
+      sums_file.close()
+
+  def FilterUnchangedFiles(self, files):
+    changed_or_new = []
+    for file in files:
+      try:
+        handle = open(file, "r")
+        file_sum = md5er(handle.read()).digest()
+        if not file in self.sums or self.sums[file] != file_sum:
+          changed_or_new.append(file)
+          self.sums[file] = file_sum
+      finally:
+        handle.close()
+    return changed_or_new
+
+  def RemoveFile(self, file):
+    if file in self.sums:
+      self.sums.pop(file)
+
+
+class SourceFileProcessor(object):
+  """
+  Utility class that can run through a directory structure, find all relevant
+  files and invoke a custom check on the files.
+  """
+
+  def RunOnPath(self, path):
+    """Runs processor on all files under the given path."""
+
+    all_files = []
+    for file in self.GetPathsToSearch():
+      all_files += self.FindFilesIn(join(path, file))
+    return self.ProcessFiles(all_files)
+
+  def RunOnFiles(self, files):
+    """Runs processor only on affected files."""
+
+    # Helper for getting directory pieces.
+    dirs = lambda f: dirname(f).split(os.sep)
+
+    # Path offsets where to look (to be in sync with RunOnPath).
+    # Normalize '.' to check for it with str.startswith.
+    search_paths = [('' if p == '.' else p) for p in self.GetPathsToSearch()]
+
+    all_files = [
+      f.AbsoluteLocalPath()
+      for f in files
+      if (not self.IgnoreFile(f.LocalPath()) and
+          self.IsRelevant(f.LocalPath()) and
+          all(not self.IgnoreDir(d) for d in dirs(f.LocalPath())) and
+          any(map(f.LocalPath().startswith, search_paths)))
+    ]
+
+    return self.ProcessFiles(all_files)
+
+  def IgnoreDir(self, name):
+    return (name.startswith('.') or
+            name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken',
+                     'octane', 'sunspider', 'traces-arm64'))
+
+  def IgnoreFile(self, name):
+    return name.startswith('.')
+
+  def FindFilesIn(self, path):
+    result = []
+    for (root, dirs, files) in os.walk(path):
+      for ignored in [x for x in dirs if self.IgnoreDir(x)]:
+        dirs.remove(ignored)
+      for file in files:
+        if not self.IgnoreFile(file) and self.IsRelevant(file):
+          result.append(join(root, file))
+    return result
+
+
+class CacheableSourceFileProcessor(SourceFileProcessor):
+  """Utility class that allows caching ProcessFiles() method calls.
+
+  In order to use it, create a ProcessFilesWithoutCaching method that returns
+  the files requiring intervention after processing the source files.
+  """
+
+  def __init__(self, use_cache, cache_file_path, file_type):
+    self.use_cache = use_cache
+    self.cache_file_path = cache_file_path
+    self.file_type = file_type
+
+  def GetProcessorWorker(self):
+    """Expected to return the worker function to run the formatter."""
+    raise NotImplementedError
+
+  def GetProcessorScript(self):
+    """Expected to return a tuple
+    (path to the format processor script, list of arguments)."""
+    raise NotImplementedError
+
+  def GetProcessorCommand(self):
+    format_processor, options = self.GetProcessorScript()
+    if not format_processor:
+      print('Could not find the formatter for % files' % self.file_type)
+      sys.exit(1)
+
+    command = [sys.executable, format_processor]
+    command.extend(options)
+
+    return command
+
+  def ProcessFiles(self, files):
+    if self.use_cache:
+      cache = FileContentsCache(self.cache_file_path)
+      cache.Load()
+      files = cache.FilterUnchangedFiles(files)
+
+    if len(files) == 0:
+      print('No changes in %s files detected. Skipping check' % self.file_type)
+      return True
+
+    files_requiring_changes = self.DetectFilesToChange(files)
+    print (
+      'Total %s files found that require formatting: %d' %
+      (self.file_type, len(files_requiring_changes)))
+    if self.use_cache:
+      for file in files_requiring_changes:
+        cache.RemoveFile(file)
+
+      cache.Save()
+
+    return files_requiring_changes == []
+
+  def DetectFilesToChange(self, files):
+    command = self.GetProcessorCommand()
+    worker = self.GetProcessorWorker()
+
+    commands = [command + [file] for file in files]
+    count = multiprocessing.cpu_count()
+    pool = multiprocessing.Pool(count)
+    try:
+      results = pool.map_async(worker, commands).get(timeout=240)
+    except KeyboardInterrupt:
+      print("\nCaught KeyboardInterrupt, terminating workers.")
+      pool.terminate()
+      pool.join()
+      sys.exit(1)
+
+    unformatted_files = []
+    for index, errors in enumerate(results):
+      if errors > 0:
+        unformatted_files.append(files[index])
+
+    return unformatted_files
+
+
+class CppLintProcessor(CacheableSourceFileProcessor):
+  """
+  Lint files to check that they follow the google code style.
+  """
+
+  def __init__(self, use_cache=True):
+    super(CppLintProcessor, self).__init__(
+      use_cache=use_cache, cache_file_path='.cpplint-cache', file_type='C/C++')
+
+  def IsRelevant(self, name):
+    return name.endswith('.cc') or name.endswith('.h')
+
+  def IgnoreDir(self, name):
+    return (super(CppLintProcessor, self).IgnoreDir(name)
+            or (name == 'third_party'))
+
+  IGNORE_LINT = [
+    'export-template.h',
+    'flag-definitions.h',
+    'gay-fixed.cc',
+    'gay-precision.cc',
+    'gay-shortest.cc',
+  ]
+
+  def IgnoreFile(self, name):
+    return (super(CppLintProcessor, self).IgnoreFile(name)
+              or (name in CppLintProcessor.IGNORE_LINT))
+
+  def GetPathsToSearch(self):
+    dirs = ['include', 'samples', 'src']
+    test_dirs = ['cctest', 'common', 'fuzzer', 'inspector', 'unittests']
+    return dirs + [join('test', dir) for dir in test_dirs]
+
+  def GetProcessorWorker(self):
+    return CppLintWorker
+
+  def GetProcessorScript(self):
+    filters = ','.join([n for n in LINT_RULES])
+    arguments = ['--filter', filters]
+    for path in [TOOLS_PATH] + os.environ["PATH"].split(os.pathsep):
+      path = path.strip('"')
+      cpplint = os.path.join(path, 'cpplint.py')
+      if os.path.isfile(cpplint):
+        return cpplint, arguments
+
+    return None, arguments
+
+
+class TorqueLintProcessor(CacheableSourceFileProcessor):
+  """
+  Check .tq files to verify they follow the Torque style guide.
+  """
+
+  def __init__(self, use_cache=True):
+    super(TorqueLintProcessor, self).__init__(
+      use_cache=use_cache, cache_file_path='.torquelint-cache',
+      file_type='Torque')
+
+  def IsRelevant(self, name):
+    return name.endswith('.tq')
+
+  def GetPathsToSearch(self):
+    dirs = ['third_party', 'src']
+    test_dirs = ['torque']
+    return dirs + [join('test', dir) for dir in test_dirs]
+
+  def GetProcessorWorker(self):
+    return TorqueLintWorker
+
+  def GetProcessorScript(self):
+    torque_tools = os.path.join(TOOLS_PATH, "torque")
+    torque_path = os.path.join(torque_tools, "format-torque.py")
+    arguments = ["-il"]
+    if os.path.isfile(torque_path):
+      return torque_path, arguments
+
+    return None, arguments
+
+class JSLintProcessor(CacheableSourceFileProcessor):
+  """
+  Check .{m}js file to verify they follow the JS Style guide.
+  """
+  def __init__(self, use_cache=True):
+    super(JSLintProcessor, self).__init__(
+      use_cache=use_cache, cache_file_path='.jslint-cache',
+      file_type='JavaScript')
+
+  def IsRelevant(self, name):
+    return name.endswith('.js') or name.endswith('.mjs')
+
+  def GetPathsToSearch(self):
+    return ['tools/system-analyzer']
+
+  def GetProcessorWorker(self):
+    return JSLintWorker
+
+  def GetProcessorScript(self):
+    for path in [TOOLS_PATH] + os.environ["PATH"].split(os.pathsep):
+      path = path.strip('"')
+      clang_format = os.path.join(path, 'clang_format.py')
+      if os.path.isfile(clang_format):
+        return clang_format, []
+
+    return None, []
+
+COPYRIGHT_HEADER_PATTERN = re.compile(
+    r'Copyright [\d-]*20[0-2][0-9] the V8 project authors. All rights reserved.')
+
+class SourceProcessor(SourceFileProcessor):
+  """
+  Check that all files include a copyright notice and no trailing whitespaces.
+  """
+
+  RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', '.status', '.tq', '.g4']
+
+  def __init__(self):
+    self.runtime_function_call_pattern = self.CreateRuntimeFunctionCallMatcher()
+
+  def CreateRuntimeFunctionCallMatcher(self):
+    runtime_h_path = join(dirname(TOOLS_PATH), 'src/runtime/runtime.h')
+    pattern = re.compile(r'\s+F\(([^,]*),.*\)')
+    runtime_functions = []
+    with open(runtime_h_path) as f:
+      for line in f.readlines():
+        m = pattern.match(line)
+        if m:
+          runtime_functions.append(m.group(1))
+    if len(runtime_functions) < 250:
+      print ("Runtime functions list is suspiciously short. "
+             "Consider updating the presubmit script.")
+      sys.exit(1)
+    str = '(\%\s+(' + '|'.join(runtime_functions) + '))[\s\(]'
+    return re.compile(str)
+
+  # Overwriting the one in the parent class.
+  def FindFilesIn(self, path):
+    if os.path.exists(path+'/.git'):
+      output = subprocess.Popen('git ls-files --full-name',
+                                stdout=PIPE, cwd=path, shell=True)
+      result = []
+      for file in output.stdout.read().split():
+        for dir_part in os.path.dirname(file).replace(os.sep, '/').split('/'):
+          if self.IgnoreDir(dir_part):
+            break
+        else:
+          if (self.IsRelevant(file) and os.path.exists(file)
+              and not self.IgnoreFile(file)):
+            result.append(join(path, file))
+      if output.wait() == 0:
+        return result
+    return super(SourceProcessor, self).FindFilesIn(path)
+
+  def IsRelevant(self, name):
+    for ext in SourceProcessor.RELEVANT_EXTENSIONS:
+      if name.endswith(ext):
+        return True
+    return False
+
+  def GetPathsToSearch(self):
+    return ['.']
+
+  def IgnoreDir(self, name):
+    return (super(SourceProcessor, self).IgnoreDir(name) or
+            name in ('third_party', 'out', 'obj', 'DerivedSources'))
+
+  IGNORE_COPYRIGHTS = ['box2d.js',
+                       'cpplint.py',
+                       'copy.js',
+                       'corrections.js',
+                       'crypto.js',
+                       'daemon.py',
+                       'earley-boyer.js',
+                       'fannkuch.js',
+                       'fasta.js',
+                       'injected-script.cc',
+                       'injected-script.h',
+                       'libraries.cc',
+                       'libraries-empty.cc',
+                       'lua_binarytrees.js',
+                       'meta-123.js',
+                       'memops.js',
+                       'poppler.js',
+                       'primes.js',
+                       'raytrace.js',
+                       'regexp-pcre.js',
+                       'resources-123.js',
+                       'sqlite.js',
+                       'sqlite-change-heap.js',
+                       'sqlite-pointer-masking.js',
+                       'sqlite-safe-heap.js',
+                       'v8-debugger-script.h',
+                       'v8-inspector-impl.cc',
+                       'v8-inspector-impl.h',
+                       'v8-runtime-agent-impl.cc',
+                       'v8-runtime-agent-impl.h',
+                       'gnuplot-4.6.3-emscripten.js',
+                       'zlib.js']
+  IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
+
+  IGNORE_COPYRIGHTS_DIRECTORIES = [
+      "test/test262/local-tests",
+      "test/mjsunit/wasm/bulk-memory-spec",
+  ]
+
+  def EndOfDeclaration(self, line):
+    return line == "}" or line == "};"
+
+  def StartOfDeclaration(self, line):
+    return line.find("//") == 0 or \
+           line.find("/*") == 0 or \
+           line.find(") {") != -1
+
+  def ProcessContents(self, name, contents):
+    result = True
+    base = basename(name)
+    if not base in SourceProcessor.IGNORE_TABS:
+      if '\t' in contents:
+        print("%s contains tabs" % name)
+        result = False
+    if not base in SourceProcessor.IGNORE_COPYRIGHTS and \
+        not any(ignore_dir in name for ignore_dir
+                in SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORIES):
+      if not COPYRIGHT_HEADER_PATTERN.search(contents):
+        print("%s is missing a correct copyright header." % name)
+        result = False
+    if ' \n' in contents or contents.endswith(' '):
+      line = 0
+      lines = []
+      parts = contents.split(' \n')
+      if not contents.endswith(' '):
+        parts.pop()
+      for part in parts:
+        line += part.count('\n') + 1
+        lines.append(str(line))
+      linenumbers = ', '.join(lines)
+      if len(lines) > 1:
+        print("%s has trailing whitespaces in lines %s." % (name, linenumbers))
+      else:
+        print("%s has trailing whitespaces in line %s." % (name, linenumbers))
+      result = False
+    if not contents.endswith('\n') or contents.endswith('\n\n'):
+      print("%s does not end with a single new line." % name)
+      result = False
+    # Sanitize flags for fuzzer.
+    if (".js" in name or ".mjs" in name) and ("mjsunit" in name or "debugger" in name):
+      match = FLAGS_LINE.search(contents)
+      if match:
+        print("%s Flags should use '-' (not '_')" % name)
+        result = False
+      if (not "mjsunit/mjsunit.js" in name and
+          not "mjsunit/mjsunit_numfuzz.js" in name):
+        if ASSERT_OPTIMIZED_PATTERN.search(contents) and \
+            not FLAGS_ENABLE_OPT.search(contents):
+          print("%s Flag --opt should be set if " \
+                "assertOptimized() is used" % name)
+          result = False
+        if ASSERT_UNOPTIMIZED_PATTERN.search(contents) and \
+            not FLAGS_NO_ALWAYS_OPT.search(contents):
+          print("%s Flag --no-always-opt should be set if " \
+                "assertUnoptimized() is used" % name)
+          result = False
+
+      match = self.runtime_function_call_pattern.search(contents)
+      if match:
+        print("%s has unexpected spaces in a runtime call '%s'" % (name, match.group(1)))
+        result = False
+    return result
+
+  def ProcessFiles(self, files):
+    success = True
+    violations = 0
+    for file in files:
+      try:
+        handle = open(file)
+        contents = handle.read()
+        if len(contents) > 0 and not self.ProcessContents(file, contents):
+          success = False
+          violations += 1
+      finally:
+        handle.close()
+    print("Total violating files: %s" % violations)
+    return success
+
+def _CheckStatusFileForDuplicateKeys(filepath):
+  comma_space_bracket = re.compile(", *]")
+  lines = []
+  with open(filepath) as f:
+    for line in f.readlines():
+      # Skip all-comment lines.
+      if line.lstrip().startswith("#"): continue
+      # Strip away comments at the end of the line.
+      comment_start = line.find("#")
+      if comment_start != -1:
+        line = line[:comment_start]
+      line = line.strip()
+      # Strip away trailing commas within the line.
+      line = comma_space_bracket.sub("]", line)
+      if len(line) > 0:
+        lines.append(line)
+
+  # Strip away trailing commas at line ends. Ugh.
+  for i in range(len(lines) - 1):
+    if (lines[i].endswith(",") and len(lines[i + 1]) > 0 and
+        lines[i + 1][0] in ("}", "]")):
+      lines[i] = lines[i][:-1]
+
+  contents = "\n".join(lines)
+  # JSON wants double-quotes.
+  contents = contents.replace("'", '"')
+  # Fill in keywords (like PASS, SKIP).
+  for key in statusfile.KEYWORDS:
+    contents = re.sub(r"\b%s\b" % key, "\"%s\"" % key, contents)
+
+  status = {"success": True}
+  def check_pairs(pairs):
+    keys = {}
+    for key, value in pairs:
+      if key in keys:
+        print("%s: Error: duplicate key %s" % (filepath, key))
+        status["success"] = False
+      keys[key] = True
+
+  json.loads(contents, object_pairs_hook=check_pairs)
+  return status["success"]
+
+
+class StatusFilesProcessor(SourceFileProcessor):
+  """Checks status files for incorrect syntax and duplicate keys."""
+
+  def IsRelevant(self, name):
+    # Several changes to files under the test directories could impact status
+    # files.
+    return True
+
+  def GetPathsToSearch(self):
+    return ['test', 'tools/testrunner']
+
+  def ProcessFiles(self, files):
+    success = True
+    for status_file_path in sorted(self._GetStatusFiles(files)):
+      success &= statusfile.PresubmitCheck(status_file_path)
+      success &= _CheckStatusFileForDuplicateKeys(status_file_path)
+    return success
+
+  def _GetStatusFiles(self, files):
+    test_path = join(dirname(TOOLS_PATH), 'test')
+    testrunner_path = join(TOOLS_PATH, 'testrunner')
+    status_files = set()
+
+    for file_path in files:
+      if file_path.startswith(testrunner_path):
+        for suitepath in os.listdir(test_path):
+          suitename = os.path.basename(suitepath)
+          status_file = os.path.join(
+              test_path, suitename, suitename + ".status")
+          if os.path.exists(status_file):
+            status_files.add(status_file)
+        return status_files
+
+    for file_path in files:
+      if file_path.startswith(test_path):
+        # Strip off absolute path prefix pointing to test suites.
+        pieces = file_path[len(test_path):].lstrip(os.sep).split(os.sep)
+        if pieces:
+          # Infer affected status file name. Only care for existing status
+          # files. Some directories under "test" don't have any.
+          if not os.path.isdir(join(test_path, pieces[0])):
+            continue
+          status_file = join(test_path, pieces[0], pieces[0] + ".status")
+          if not os.path.exists(status_file):
+            continue
+          status_files.add(status_file)
+    return status_files
+
+
+def CheckDeps(workspace):
+  checkdeps_py = join(workspace, 'buildtools', 'checkdeps', 'checkdeps.py')
+  return subprocess.call([sys.executable, checkdeps_py, workspace]) == 0
+
+
+def PyTests(workspace):
+  result = True
+  for script in [
+      join(workspace, 'tools', 'clusterfuzz', 'v8_foozzie_test.py'),
+      join(workspace, 'tools', 'release', 'test_scripts.py'),
+      join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
+      join(workspace, 'tools', 'unittests', 'run_perf_test.py'),
+      join(workspace, 'tools', 'testrunner', 'testproc', 'variant_unittest.py'),
+    ]:
+    print('Running ' + script)
+    result &= subprocess.call(
+        [sys.executable, script], stdout=subprocess.PIPE) == 0
+
+  return result
+
+
+def GetOptions():
+  result = optparse.OptionParser()
+  result.add_option('--no-lint', help="Do not run cpplint", default=False,
+                    action="store_true")
+  result.add_option('--no-linter-cache', help="Do not cache linter results",
+                    default=False, action="store_true")
+
+  return result
+
+
+def Main():
+  workspace = abspath(join(dirname(sys.argv[0]), '..'))
+  parser = GetOptions()
+  (options, args) = parser.parse_args()
+  success = True
+  print("Running checkdeps...")
+  success &= CheckDeps(workspace)
+  use_linter_cache = not options.no_linter_cache
+  if not options.no_lint:
+    print("Running C++ lint check...")
+    success &= CppLintProcessor(use_cache=use_linter_cache).RunOnPath(workspace)
+
+  print("Running Torque formatting check...")
+  success &= TorqueLintProcessor(use_cache=use_linter_cache).RunOnPath(
+    workspace)
+  print("Running JavaScript formatting check...")
+  success &= JSLintProcessor(use_cache=use_linter_cache).RunOnPath(
+    workspace)
+  print("Running copyright header, trailing whitespaces and " \
+        "two empty lines between declarations check...")
+  success &= SourceProcessor().RunOnPath(workspace)
+  print("Running status-files check...")
+  success &= StatusFilesProcessor().RunOnPath(workspace)
+  print("Running python tests...")
+  success &= PyTests(workspace)
+  if success:
+    return 0
+  else:
+    return 1
+
+
+if __name__ == '__main__':
+  sys.exit(Main())
diff --git a/src/third_party/v8/tools/v8heapconst.py b/src/third_party/v8/tools/v8heapconst.py
new file mode 100644
index 0000000..0dd31d4
--- /dev/null
+++ b/src/third_party/v8/tools/v8heapconst.py
@@ -0,0 +1,508 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can
+# be found in the LICENSE file.
+
+# This file is automatically generated by mkgrokdump and should not
+# be modified manually.
+
+# List of known V8 instance types.
+INSTANCE_TYPES = {
+  0: "INTERNALIZED_STRING_TYPE",
+  2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
+  8: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
+  10: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
+  18: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE",
+  26: "UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
+  32: "STRING_TYPE",
+  33: "CONS_STRING_TYPE",
+  34: "EXTERNAL_STRING_TYPE",
+  35: "SLICED_STRING_TYPE",
+  37: "THIN_STRING_TYPE",
+  40: "ONE_BYTE_STRING_TYPE",
+  41: "CONS_ONE_BYTE_STRING_TYPE",
+  42: "EXTERNAL_ONE_BYTE_STRING_TYPE",
+  43: "SLICED_ONE_BYTE_STRING_TYPE",
+  45: "THIN_ONE_BYTE_STRING_TYPE",
+  50: "UNCACHED_EXTERNAL_STRING_TYPE",
+  58: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
+  64: "SYMBOL_TYPE",
+  65: "BIG_INT_BASE_TYPE",
+  66: "HEAP_NUMBER_TYPE",
+  67: "ODDBALL_TYPE",
+  68: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE",
+  69: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE",
+  70: "FOREIGN_TYPE",
+  71: "WASM_TYPE_INFO_TYPE",
+  72: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+  73: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+  74: "CALLABLE_TASK_TYPE",
+  75: "CALLBACK_TASK_TYPE",
+  76: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+  77: "LOAD_HANDLER_TYPE",
+  78: "STORE_HANDLER_TYPE",
+  79: "FUNCTION_TEMPLATE_INFO_TYPE",
+  80: "OBJECT_TEMPLATE_INFO_TYPE",
+  81: "ACCESS_CHECK_INFO_TYPE",
+  82: "ACCESSOR_INFO_TYPE",
+  83: "ACCESSOR_PAIR_TYPE",
+  84: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+  85: "ALLOCATION_MEMENTO_TYPE",
+  86: "ALLOCATION_SITE_TYPE",
+  87: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
+  88: "ASM_WASM_DATA_TYPE",
+  89: "ASYNC_GENERATOR_REQUEST_TYPE",
+  90: "BREAK_POINT_TYPE",
+  91: "BREAK_POINT_INFO_TYPE",
+  92: "CACHED_TEMPLATE_OBJECT_TYPE",
+  93: "CALL_HANDLER_INFO_TYPE",
+  94: "CLASS_POSITIONS_TYPE",
+  95: "DEBUG_INFO_TYPE",
+  96: "ENUM_CACHE_TYPE",
+  97: "FEEDBACK_CELL_TYPE",
+  98: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+  99: "INTERCEPTOR_INFO_TYPE",
+  100: "INTERPRETER_DATA_TYPE",
+  101: "MODULE_REQUEST_TYPE",
+  102: "PROMISE_CAPABILITY_TYPE",
+  103: "PROMISE_REACTION_TYPE",
+  104: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
+  105: "PROTOTYPE_INFO_TYPE",
+  106: "SCRIPT_TYPE",
+  107: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+  108: "STACK_FRAME_INFO_TYPE",
+  109: "STACK_TRACE_FRAME_TYPE",
+  110: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+  111: "TUPLE2_TYPE",
+  112: "WASM_EXCEPTION_TAG_TYPE",
+  113: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+  114: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+  115: "WASM_JS_FUNCTION_DATA_TYPE",
+  116: "WASM_VALUE_TYPE",
+  117: "FIXED_ARRAY_TYPE",
+  118: "HASH_TABLE_TYPE",
+  119: "EPHEMERON_HASH_TABLE_TYPE",
+  120: "GLOBAL_DICTIONARY_TYPE",
+  121: "NAME_DICTIONARY_TYPE",
+  122: "NUMBER_DICTIONARY_TYPE",
+  123: "ORDERED_HASH_MAP_TYPE",
+  124: "ORDERED_HASH_SET_TYPE",
+  125: "ORDERED_NAME_DICTIONARY_TYPE",
+  126: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+  127: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+  128: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+  129: "SCOPE_INFO_TYPE",
+  130: "SCRIPT_CONTEXT_TABLE_TYPE",
+  131: "BYTE_ARRAY_TYPE",
+  132: "BYTECODE_ARRAY_TYPE",
+  133: "FIXED_DOUBLE_ARRAY_TYPE",
+  134: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
+  135: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
+  136: "AWAIT_CONTEXT_TYPE",
+  137: "BLOCK_CONTEXT_TYPE",
+  138: "CATCH_CONTEXT_TYPE",
+  139: "DEBUG_EVALUATE_CONTEXT_TYPE",
+  140: "EVAL_CONTEXT_TYPE",
+  141: "FUNCTION_CONTEXT_TYPE",
+  142: "MODULE_CONTEXT_TYPE",
+  143: "NATIVE_CONTEXT_TYPE",
+  144: "SCRIPT_CONTEXT_TYPE",
+  145: "WITH_CONTEXT_TYPE",
+  146: "EXPORTED_SUB_CLASS_BASE_TYPE",
+  147: "EXPORTED_SUB_CLASS_TYPE",
+  148: "EXPORTED_SUB_CLASS2_TYPE",
+  149: "SMALL_ORDERED_HASH_MAP_TYPE",
+  150: "SMALL_ORDERED_HASH_SET_TYPE",
+  151: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+  152: "DESCRIPTOR_ARRAY_TYPE",
+  153: "STRONG_DESCRIPTOR_ARRAY_TYPE",
+  154: "SOURCE_TEXT_MODULE_TYPE",
+  155: "SYNTHETIC_MODULE_TYPE",
+  156: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+  157: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+  158: "WEAK_FIXED_ARRAY_TYPE",
+  159: "TRANSITION_ARRAY_TYPE",
+  160: "CELL_TYPE",
+  161: "CODE_TYPE",
+  162: "CODE_DATA_CONTAINER_TYPE",
+  163: "COVERAGE_INFO_TYPE",
+  164: "EMBEDDER_DATA_ARRAY_TYPE",
+  165: "FEEDBACK_METADATA_TYPE",
+  166: "FEEDBACK_VECTOR_TYPE",
+  167: "FILLER_TYPE",
+  168: "FREE_SPACE_TYPE",
+  169: "INTERNAL_CLASS_TYPE",
+  170: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+  171: "MAP_TYPE",
+  172: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
+  173: "PREPARSE_DATA_TYPE",
+  174: "PROPERTY_ARRAY_TYPE",
+  175: "PROPERTY_CELL_TYPE",
+  176: "SHARED_FUNCTION_INFO_TYPE",
+  177: "SMI_BOX_TYPE",
+  178: "SMI_PAIR_TYPE",
+  179: "SORT_STATE_TYPE",
+  180: "WASM_ARRAY_TYPE",
+  181: "WASM_CAPI_FUNCTION_DATA_TYPE",
+  182: "WASM_STRUCT_TYPE",
+  183: "WEAK_ARRAY_LIST_TYPE",
+  184: "WEAK_CELL_TYPE",
+  185: "JS_PROXY_TYPE",
+  1057: "JS_OBJECT_TYPE",
+  186: "JS_GLOBAL_OBJECT_TYPE",
+  187: "JS_GLOBAL_PROXY_TYPE",
+  188: "JS_MODULE_NAMESPACE_TYPE",
+  1040: "JS_SPECIAL_API_OBJECT_TYPE",
+  1041: "JS_PRIMITIVE_WRAPPER_TYPE",
+  1042: "JS_MAP_KEY_ITERATOR_TYPE",
+  1043: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+  1044: "JS_MAP_VALUE_ITERATOR_TYPE",
+  1045: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+  1046: "JS_SET_VALUE_ITERATOR_TYPE",
+  1047: "JS_GENERATOR_OBJECT_TYPE",
+  1048: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
+  1049: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+  1050: "JS_DATA_VIEW_TYPE",
+  1051: "JS_TYPED_ARRAY_TYPE",
+  1052: "JS_MAP_TYPE",
+  1053: "JS_SET_TYPE",
+  1054: "JS_WEAK_MAP_TYPE",
+  1055: "JS_WEAK_SET_TYPE",
+  1056: "JS_API_OBJECT_TYPE",
+  1058: "JS_ARGUMENTS_OBJECT_TYPE",
+  1059: "JS_ARRAY_TYPE",
+  1060: "JS_ARRAY_BUFFER_TYPE",
+  1061: "JS_ARRAY_ITERATOR_TYPE",
+  1062: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+  1063: "JS_COLLATOR_TYPE",
+  1064: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+  1065: "JS_DATE_TYPE",
+  1066: "JS_DATE_TIME_FORMAT_TYPE",
+  1067: "JS_DISPLAY_NAMES_TYPE",
+  1068: "JS_ERROR_TYPE",
+  1069: "JS_FINALIZATION_REGISTRY_TYPE",
+  1070: "JS_LIST_FORMAT_TYPE",
+  1071: "JS_LOCALE_TYPE",
+  1072: "JS_MESSAGE_OBJECT_TYPE",
+  1073: "JS_NUMBER_FORMAT_TYPE",
+  1074: "JS_PLURAL_RULES_TYPE",
+  1075: "JS_PROMISE_TYPE",
+  1076: "JS_REG_EXP_TYPE",
+  1077: "JS_REG_EXP_STRING_ITERATOR_TYPE",
+  1078: "JS_RELATIVE_TIME_FORMAT_TYPE",
+  1079: "JS_SEGMENT_ITERATOR_TYPE",
+  1080: "JS_SEGMENTER_TYPE",
+  1081: "JS_SEGMENTS_TYPE",
+  1082: "JS_STRING_ITERATOR_TYPE",
+  1083: "JS_V8_BREAK_ITERATOR_TYPE",
+  1084: "JS_WEAK_REF_TYPE",
+  1085: "WASM_EXCEPTION_OBJECT_TYPE",
+  1086: "WASM_GLOBAL_OBJECT_TYPE",
+  1087: "WASM_INSTANCE_OBJECT_TYPE",
+  1088: "WASM_MEMORY_OBJECT_TYPE",
+  1089: "WASM_MODULE_OBJECT_TYPE",
+  1090: "WASM_TABLE_OBJECT_TYPE",
+  1091: "JS_BOUND_FUNCTION_TYPE",
+  1092: "JS_FUNCTION_TYPE",
+}
+
+# List of known V8 maps.
+KNOWN_MAPS = {
+  ("read_only_space", 0x02115): (171, "MetaMap"),
+  ("read_only_space", 0x0213d): (67, "NullMap"),
+  ("read_only_space", 0x02165): (153, "StrongDescriptorArrayMap"),
+  ("read_only_space", 0x0218d): (158, "WeakFixedArrayMap"),
+  ("read_only_space", 0x021cd): (96, "EnumCacheMap"),
+  ("read_only_space", 0x02201): (117, "FixedArrayMap"),
+  ("read_only_space", 0x0224d): (8, "OneByteInternalizedStringMap"),
+  ("read_only_space", 0x02299): (168, "FreeSpaceMap"),
+  ("read_only_space", 0x022c1): (167, "OnePointerFillerMap"),
+  ("read_only_space", 0x022e9): (167, "TwoPointerFillerMap"),
+  ("read_only_space", 0x02311): (67, "UninitializedMap"),
+  ("read_only_space", 0x02389): (67, "UndefinedMap"),
+  ("read_only_space", 0x023cd): (66, "HeapNumberMap"),
+  ("read_only_space", 0x02401): (67, "TheHoleMap"),
+  ("read_only_space", 0x02461): (67, "BooleanMap"),
+  ("read_only_space", 0x02505): (131, "ByteArrayMap"),
+  ("read_only_space", 0x0252d): (117, "FixedCOWArrayMap"),
+  ("read_only_space", 0x02555): (118, "HashTableMap"),
+  ("read_only_space", 0x0257d): (64, "SymbolMap"),
+  ("read_only_space", 0x025a5): (40, "OneByteStringMap"),
+  ("read_only_space", 0x025cd): (129, "ScopeInfoMap"),
+  ("read_only_space", 0x025f5): (176, "SharedFunctionInfoMap"),
+  ("read_only_space", 0x0261d): (161, "CodeMap"),
+  ("read_only_space", 0x02645): (160, "CellMap"),
+  ("read_only_space", 0x0266d): (175, "GlobalPropertyCellMap"),
+  ("read_only_space", 0x02695): (70, "ForeignMap"),
+  ("read_only_space", 0x026bd): (159, "TransitionArrayMap"),
+  ("read_only_space", 0x026e5): (45, "ThinOneByteStringMap"),
+  ("read_only_space", 0x0270d): (166, "FeedbackVectorMap"),
+  ("read_only_space", 0x0273d): (67, "ArgumentsMarkerMap"),
+  ("read_only_space", 0x0279d): (67, "ExceptionMap"),
+  ("read_only_space", 0x027f9): (67, "TerminationExceptionMap"),
+  ("read_only_space", 0x02861): (67, "OptimizedOutMap"),
+  ("read_only_space", 0x028c1): (67, "StaleRegisterMap"),
+  ("read_only_space", 0x02921): (130, "ScriptContextTableMap"),
+  ("read_only_space", 0x02949): (127, "ClosureFeedbackCellArrayMap"),
+  ("read_only_space", 0x02971): (165, "FeedbackMetadataArrayMap"),
+  ("read_only_space", 0x02999): (117, "ArrayListMap"),
+  ("read_only_space", 0x029c1): (65, "BigIntMap"),
+  ("read_only_space", 0x029e9): (128, "ObjectBoilerplateDescriptionMap"),
+  ("read_only_space", 0x02a11): (132, "BytecodeArrayMap"),
+  ("read_only_space", 0x02a39): (162, "CodeDataContainerMap"),
+  ("read_only_space", 0x02a61): (163, "CoverageInfoMap"),
+  ("read_only_space", 0x02a89): (133, "FixedDoubleArrayMap"),
+  ("read_only_space", 0x02ab1): (120, "GlobalDictionaryMap"),
+  ("read_only_space", 0x02ad9): (97, "ManyClosuresCellMap"),
+  ("read_only_space", 0x02b01): (117, "ModuleInfoMap"),
+  ("read_only_space", 0x02b29): (121, "NameDictionaryMap"),
+  ("read_only_space", 0x02b51): (97, "NoClosuresCellMap"),
+  ("read_only_space", 0x02b79): (122, "NumberDictionaryMap"),
+  ("read_only_space", 0x02ba1): (97, "OneClosureCellMap"),
+  ("read_only_space", 0x02bc9): (123, "OrderedHashMapMap"),
+  ("read_only_space", 0x02bf1): (124, "OrderedHashSetMap"),
+  ("read_only_space", 0x02c19): (125, "OrderedNameDictionaryMap"),
+  ("read_only_space", 0x02c41): (173, "PreparseDataMap"),
+  ("read_only_space", 0x02c69): (174, "PropertyArrayMap"),
+  ("read_only_space", 0x02c91): (93, "SideEffectCallHandlerInfoMap"),
+  ("read_only_space", 0x02cb9): (93, "SideEffectFreeCallHandlerInfoMap"),
+  ("read_only_space", 0x02ce1): (93, "NextCallSideEffectFreeCallHandlerInfoMap"),
+  ("read_only_space", 0x02d09): (126, "SimpleNumberDictionaryMap"),
+  ("read_only_space", 0x02d31): (149, "SmallOrderedHashMapMap"),
+  ("read_only_space", 0x02d59): (150, "SmallOrderedHashSetMap"),
+  ("read_only_space", 0x02d81): (151, "SmallOrderedNameDictionaryMap"),
+  ("read_only_space", 0x02da9): (154, "SourceTextModuleMap"),
+  ("read_only_space", 0x02dd1): (155, "SyntheticModuleMap"),
+  ("read_only_space", 0x02df9): (157, "UncompiledDataWithoutPreparseDataMap"),
+  ("read_only_space", 0x02e21): (156, "UncompiledDataWithPreparseDataMap"),
+  ("read_only_space", 0x02e49): (71, "WasmTypeInfoMap"),
+  ("read_only_space", 0x02e71): (183, "WeakArrayListMap"),
+  ("read_only_space", 0x02e99): (119, "EphemeronHashTableMap"),
+  ("read_only_space", 0x02ec1): (164, "EmbedderDataArrayMap"),
+  ("read_only_space", 0x02ee9): (184, "WeakCellMap"),
+  ("read_only_space", 0x02f11): (32, "StringMap"),
+  ("read_only_space", 0x02f39): (41, "ConsOneByteStringMap"),
+  ("read_only_space", 0x02f61): (33, "ConsStringMap"),
+  ("read_only_space", 0x02f89): (37, "ThinStringMap"),
+  ("read_only_space", 0x02fb1): (35, "SlicedStringMap"),
+  ("read_only_space", 0x02fd9): (43, "SlicedOneByteStringMap"),
+  ("read_only_space", 0x03001): (34, "ExternalStringMap"),
+  ("read_only_space", 0x03029): (42, "ExternalOneByteStringMap"),
+  ("read_only_space", 0x03051): (50, "UncachedExternalStringMap"),
+  ("read_only_space", 0x03079): (0, "InternalizedStringMap"),
+  ("read_only_space", 0x030a1): (2, "ExternalInternalizedStringMap"),
+  ("read_only_space", 0x030c9): (10, "ExternalOneByteInternalizedStringMap"),
+  ("read_only_space", 0x030f1): (18, "UncachedExternalInternalizedStringMap"),
+  ("read_only_space", 0x03119): (26, "UncachedExternalOneByteInternalizedStringMap"),
+  ("read_only_space", 0x03141): (58, "UncachedExternalOneByteStringMap"),
+  ("read_only_space", 0x03169): (67, "SelfReferenceMarkerMap"),
+  ("read_only_space", 0x03191): (67, "BasicBlockCountersMarkerMap"),
+  ("read_only_space", 0x031d5): (87, "ArrayBoilerplateDescriptionMap"),
+  ("read_only_space", 0x032bd): (99, "InterceptorInfoMap"),
+  ("read_only_space", 0x053c9): (72, "PromiseFulfillReactionJobTaskMap"),
+  ("read_only_space", 0x053f1): (73, "PromiseRejectReactionJobTaskMap"),
+  ("read_only_space", 0x05419): (74, "CallableTaskMap"),
+  ("read_only_space", 0x05441): (75, "CallbackTaskMap"),
+  ("read_only_space", 0x05469): (76, "PromiseResolveThenableJobTaskMap"),
+  ("read_only_space", 0x05491): (79, "FunctionTemplateInfoMap"),
+  ("read_only_space", 0x054b9): (80, "ObjectTemplateInfoMap"),
+  ("read_only_space", 0x054e1): (81, "AccessCheckInfoMap"),
+  ("read_only_space", 0x05509): (82, "AccessorInfoMap"),
+  ("read_only_space", 0x05531): (83, "AccessorPairMap"),
+  ("read_only_space", 0x05559): (84, "AliasedArgumentsEntryMap"),
+  ("read_only_space", 0x05581): (85, "AllocationMementoMap"),
+  ("read_only_space", 0x055a9): (88, "AsmWasmDataMap"),
+  ("read_only_space", 0x055d1): (89, "AsyncGeneratorRequestMap"),
+  ("read_only_space", 0x055f9): (90, "BreakPointMap"),
+  ("read_only_space", 0x05621): (91, "BreakPointInfoMap"),
+  ("read_only_space", 0x05649): (92, "CachedTemplateObjectMap"),
+  ("read_only_space", 0x05671): (94, "ClassPositionsMap"),
+  ("read_only_space", 0x05699): (95, "DebugInfoMap"),
+  ("read_only_space", 0x056c1): (98, "FunctionTemplateRareDataMap"),
+  ("read_only_space", 0x056e9): (100, "InterpreterDataMap"),
+  ("read_only_space", 0x05711): (101, "ModuleRequestMap"),
+  ("read_only_space", 0x05739): (102, "PromiseCapabilityMap"),
+  ("read_only_space", 0x05761): (103, "PromiseReactionMap"),
+  ("read_only_space", 0x05789): (104, "PropertyDescriptorObjectMap"),
+  ("read_only_space", 0x057b1): (105, "PrototypeInfoMap"),
+  ("read_only_space", 0x057d9): (106, "ScriptMap"),
+  ("read_only_space", 0x05801): (107, "SourceTextModuleInfoEntryMap"),
+  ("read_only_space", 0x05829): (108, "StackFrameInfoMap"),
+  ("read_only_space", 0x05851): (109, "StackTraceFrameMap"),
+  ("read_only_space", 0x05879): (110, "TemplateObjectDescriptionMap"),
+  ("read_only_space", 0x058a1): (111, "Tuple2Map"),
+  ("read_only_space", 0x058c9): (112, "WasmExceptionTagMap"),
+  ("read_only_space", 0x058f1): (113, "WasmExportedFunctionDataMap"),
+  ("read_only_space", 0x05919): (114, "WasmIndirectFunctionTableMap"),
+  ("read_only_space", 0x05941): (115, "WasmJSFunctionDataMap"),
+  ("read_only_space", 0x05969): (116, "WasmValueMap"),
+  ("read_only_space", 0x05991): (135, "SloppyArgumentsElementsMap"),
+  ("read_only_space", 0x059b9): (152, "DescriptorArrayMap"),
+  ("read_only_space", 0x059e1): (172, "OnHeapBasicBlockProfilerDataMap"),
+  ("read_only_space", 0x05a09): (181, "WasmCapiFunctionDataMap"),
+  ("read_only_space", 0x05a31): (169, "InternalClassMap"),
+  ("read_only_space", 0x05a59): (178, "SmiPairMap"),
+  ("read_only_space", 0x05a81): (177, "SmiBoxMap"),
+  ("read_only_space", 0x05aa9): (146, "ExportedSubClassBaseMap"),
+  ("read_only_space", 0x05ad1): (147, "ExportedSubClassMap"),
+  ("read_only_space", 0x05af9): (68, "AbstractInternalClassSubclass1Map"),
+  ("read_only_space", 0x05b21): (69, "AbstractInternalClassSubclass2Map"),
+  ("read_only_space", 0x05b49): (134, "InternalClassWithSmiElementsMap"),
+  ("read_only_space", 0x05b71): (170, "InternalClassWithStructElementsMap"),
+  ("read_only_space", 0x05b99): (148, "ExportedSubClass2Map"),
+  ("read_only_space", 0x05bc1): (179, "SortStateMap"),
+  ("read_only_space", 0x05be9): (86, "AllocationSiteWithWeakNextMap"),
+  ("read_only_space", 0x05c11): (86, "AllocationSiteWithoutWeakNextMap"),
+  ("read_only_space", 0x05c39): (77, "LoadHandler1Map"),
+  ("read_only_space", 0x05c61): (77, "LoadHandler2Map"),
+  ("read_only_space", 0x05c89): (77, "LoadHandler3Map"),
+  ("read_only_space", 0x05cb1): (78, "StoreHandler0Map"),
+  ("read_only_space", 0x05cd9): (78, "StoreHandler1Map"),
+  ("read_only_space", 0x05d01): (78, "StoreHandler2Map"),
+  ("read_only_space", 0x05d29): (78, "StoreHandler3Map"),
+  ("map_space", 0x02115): (1057, "ExternalMap"),
+  ("map_space", 0x0213d): (1072, "JSMessageObjectMap"),
+  ("map_space", 0x02165): (182, "WasmRttEqrefMap"),
+  ("map_space", 0x0218d): (182, "WasmRttExternrefMap"),
+  ("map_space", 0x021b5): (182, "WasmRttFuncrefMap"),
+  ("map_space", 0x021dd): (182, "WasmRttI31refMap"),
+}
+
+# List of known V8 objects.
+KNOWN_OBJECTS = {
+  ("read_only_space", 0x021b5): "EmptyWeakFixedArray",
+  ("read_only_space", 0x021bd): "EmptyDescriptorArray",
+  ("read_only_space", 0x021f5): "EmptyEnumCache",
+  ("read_only_space", 0x02229): "EmptyFixedArray",
+  ("read_only_space", 0x02231): "NullValue",
+  ("read_only_space", 0x02339): "UninitializedValue",
+  ("read_only_space", 0x023b1): "UndefinedValue",
+  ("read_only_space", 0x023f5): "NanValue",
+  ("read_only_space", 0x02429): "TheHoleValue",
+  ("read_only_space", 0x02455): "HoleNanValue",
+  ("read_only_space", 0x02489): "TrueValue",
+  ("read_only_space", 0x024c9): "FalseValue",
+  ("read_only_space", 0x024f9): "empty_string",
+  ("read_only_space", 0x02735): "EmptyScopeInfo",
+  ("read_only_space", 0x02765): "ArgumentsMarker",
+  ("read_only_space", 0x027c5): "Exception",
+  ("read_only_space", 0x02821): "TerminationException",
+  ("read_only_space", 0x02889): "OptimizedOut",
+  ("read_only_space", 0x028e9): "StaleRegister",
+  ("read_only_space", 0x031b9): "EmptyPropertyArray",
+  ("read_only_space", 0x031c1): "EmptyByteArray",
+  ("read_only_space", 0x031c9): "EmptyObjectBoilerplateDescription",
+  ("read_only_space", 0x031fd): "EmptyArrayBoilerplateDescription",
+  ("read_only_space", 0x03209): "EmptyClosureFeedbackCellArray",
+  ("read_only_space", 0x03211): "EmptySlowElementDictionary",
+  ("read_only_space", 0x03235): "EmptyOrderedHashMap",
+  ("read_only_space", 0x03249): "EmptyOrderedHashSet",
+  ("read_only_space", 0x0325d): "EmptyFeedbackMetadata",
+  ("read_only_space", 0x03269): "EmptyPropertyCell",
+  ("read_only_space", 0x0327d): "EmptyPropertyDictionary",
+  ("read_only_space", 0x032a5): "EmptyOrderedPropertyDictionary",
+  ("read_only_space", 0x032e5): "NoOpInterceptorInfo",
+  ("read_only_space", 0x0330d): "EmptyWeakArrayList",
+  ("read_only_space", 0x03319): "InfinityValue",
+  ("read_only_space", 0x03325): "MinusZeroValue",
+  ("read_only_space", 0x03331): "MinusInfinityValue",
+  ("read_only_space", 0x0333d): "SelfReferenceMarker",
+  ("read_only_space", 0x0337d): "BasicBlockCountersMarker",
+  ("read_only_space", 0x033c1): "OffHeapTrampolineRelocationInfo",
+  ("read_only_space", 0x033cd): "TrampolineTrivialCodeDataContainer",
+  ("read_only_space", 0x033d9): "TrampolinePromiseRejectionCodeDataContainer",
+  ("read_only_space", 0x033e5): "GlobalThisBindingScopeInfo",
+  ("read_only_space", 0x0341d): "EmptyFunctionScopeInfo",
+  ("read_only_space", 0x03445): "NativeScopeInfo",
+  ("read_only_space", 0x03461): "HashSeed",
+  ("old_space", 0x02115): "ArgumentsIteratorAccessor",
+  ("old_space", 0x02159): "ArrayLengthAccessor",
+  ("old_space", 0x0219d): "BoundFunctionLengthAccessor",
+  ("old_space", 0x021e1): "BoundFunctionNameAccessor",
+  ("old_space", 0x02225): "ErrorStackAccessor",
+  ("old_space", 0x02269): "FunctionArgumentsAccessor",
+  ("old_space", 0x022ad): "FunctionCallerAccessor",
+  ("old_space", 0x022f1): "FunctionNameAccessor",
+  ("old_space", 0x02335): "FunctionLengthAccessor",
+  ("old_space", 0x02379): "FunctionPrototypeAccessor",
+  ("old_space", 0x023bd): "RegExpResultIndicesAccessor",
+  ("old_space", 0x02401): "StringLengthAccessor",
+  ("old_space", 0x02445): "InvalidPrototypeValidityCell",
+  ("old_space", 0x024cd): "EmptyScript",
+  ("old_space", 0x0250d): "ManyClosuresCell",
+  ("old_space", 0x02519): "ArrayConstructorProtector",
+  ("old_space", 0x0252d): "NoElementsProtector",
+  ("old_space", 0x02541): "IsConcatSpreadableProtector",
+  ("old_space", 0x02555): "ArraySpeciesProtector",
+  ("old_space", 0x02569): "TypedArraySpeciesProtector",
+  ("old_space", 0x0257d): "PromiseSpeciesProtector",
+  ("old_space", 0x02591): "RegExpSpeciesProtector",
+  ("old_space", 0x025a5): "StringLengthProtector",
+  ("old_space", 0x025b9): "ArrayIteratorProtector",
+  ("old_space", 0x025cd): "ArrayBufferDetachingProtector",
+  ("old_space", 0x025e1): "PromiseHookProtector",
+  ("old_space", 0x025f5): "PromiseResolveProtector",
+  ("old_space", 0x02609): "MapIteratorProtector",
+  ("old_space", 0x0261d): "PromiseThenProtector",
+  ("old_space", 0x02631): "SetIteratorProtector",
+  ("old_space", 0x02645): "StringIteratorProtector",
+  ("old_space", 0x02659): "SingleCharacterStringCache",
+  ("old_space", 0x02a61): "StringSplitCache",
+  ("old_space", 0x02e69): "RegExpMultipleCache",
+  ("old_space", 0x03271): "BuiltinsConstantsTable",
+  ("old_space", 0x03651): "AsyncFunctionAwaitRejectSharedFun",
+  ("old_space", 0x03679): "AsyncFunctionAwaitResolveSharedFun",
+  ("old_space", 0x036a1): "AsyncGeneratorAwaitRejectSharedFun",
+  ("old_space", 0x036c9): "AsyncGeneratorAwaitResolveSharedFun",
+  ("old_space", 0x036f1): "AsyncGeneratorYieldResolveSharedFun",
+  ("old_space", 0x03719): "AsyncGeneratorReturnResolveSharedFun",
+  ("old_space", 0x03741): "AsyncGeneratorReturnClosedRejectSharedFun",
+  ("old_space", 0x03769): "AsyncGeneratorReturnClosedResolveSharedFun",
+  ("old_space", 0x03791): "AsyncIteratorValueUnwrapSharedFun",
+  ("old_space", 0x037b9): "PromiseAllResolveElementSharedFun",
+  ("old_space", 0x037e1): "PromiseAllSettledResolveElementSharedFun",
+  ("old_space", 0x03809): "PromiseAllSettledRejectElementSharedFun",
+  ("old_space", 0x03831): "PromiseAnyRejectElementSharedFun",
+  ("old_space", 0x03859): "PromiseCapabilityDefaultRejectSharedFun",
+  ("old_space", 0x03881): "PromiseCapabilityDefaultResolveSharedFun",
+  ("old_space", 0x038a9): "PromiseCatchFinallySharedFun",
+  ("old_space", 0x038d1): "PromiseGetCapabilitiesExecutorSharedFun",
+  ("old_space", 0x038f9): "PromiseThenFinallySharedFun",
+  ("old_space", 0x03921): "PromiseThrowerFinallySharedFun",
+  ("old_space", 0x03949): "PromiseValueThunkFinallySharedFun",
+  ("old_space", 0x03971): "ProxyRevokeSharedFun",
+}
+
+# Lower 32 bits of first page addresses for various heap spaces.
+HEAP_FIRST_PAGES = {
+  0x08100000: "old_space",
+  0x08140000: "map_space",
+  0x08040000: "read_only_space",
+}
+
+# List of known V8 Frame Markers.
+FRAME_MARKERS = (
+  "ENTRY",
+  "CONSTRUCT_ENTRY",
+  "EXIT",
+  "OPTIMIZED",
+  "WASM",
+  "WASM_TO_JS",
+  "JS_TO_WASM",
+  "WASM_DEBUG_BREAK",
+  "C_WASM_ENTRY",
+  "WASM_EXIT",
+  "WASM_COMPILE_LAZY",
+  "INTERPRETED",
+  "STUB",
+  "BUILTIN_CONTINUATION",
+  "JAVA_SCRIPT_BUILTIN_CONTINUATION",
+  "JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH",
+  "INTERNAL",
+  "CONSTRUCT",
+  "ARGUMENTS_ADAPTOR",
+  "BUILTIN",
+  "BUILTIN_EXIT",
+  "NATIVE",
+)
+
+# This set of constants is generated from a shipping build.
diff --git a/src/third_party/v8/tools/v8windbg/BUILD.gn b/src/third_party/v8/tools/v8windbg/BUILD.gn
new file mode 100644
index 0000000..10d06a1
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/BUILD.gn
@@ -0,0 +1,116 @@
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+config("v8windbg_config") {
+  # Required for successful compilation of SDK header file DbgModel.h.
+  cflags_cc = [ "/Zc:twoPhase-" ]
+
+  include_dirs = [ "../.." ]
+}
+
+# Basic support for WinDbg extensions, with nothing specific to V8.
+source_set("v8windbg_base") {
+  testonly = true
+
+  sources = [
+    "base/dbgext.cc",
+    "base/dbgext.h",
+    "base/utilities.cc",
+    "base/utilities.h",
+  ]
+
+  libs = [
+    "DbgEng.lib",
+    "DbgModel.lib",
+    "RuntimeObject.lib",
+    "comsuppwd.lib",
+  ]
+
+  public_configs = [ ":v8windbg_config" ]
+}
+
+# An extension DLL that can be loaded into WinDbg with `.load v8windbg`.
+v8_shared_library("v8windbg") {
+  testonly = true
+
+  sources = [
+    "base/dbgext.def",
+    "src/cur-isolate.cc",
+    "src/cur-isolate.h",
+    "src/list-chunks.cc",
+    "src/list-chunks.h",
+    "src/local-variables.cc",
+    "src/local-variables.h",
+    "src/object-inspection.cc",
+    "src/object-inspection.h",
+    "src/v8-debug-helper-interop.cc",
+    "src/v8-debug-helper-interop.h",
+    "src/v8windbg-extension.cc",
+    "src/v8windbg-extension.h",
+  ]
+
+  deps = [
+    ":v8windbg_base",
+    "../debug_helper:v8_debug_helper",
+  ]
+}
+
+# Copies Windows SDK files that v8windbg_test needs.
+action("copy_prereqs") {
+  testonly = true
+
+  script = "copy-prereqs.py"
+
+  inputs = [
+    script,
+    "//build/vs_toolchain.py",
+  ]
+
+  outputs = [ "$root_out_dir/dbgeng.dll" ]
+
+  args = [
+    rebase_path("//build"),
+    rebase_path(root_out_dir),
+    target_cpu,
+  ]
+}
+
+# A test that launches a separate d8 process and debugs it using v8windbg.
+v8_source_set("v8windbg_test") {
+  testonly = true
+
+  sources = [
+    "test/debug-callbacks.cc",
+    "test/debug-callbacks.h",
+    "test/v8windbg-test.cc",
+    "test/v8windbg-test.h",
+  ]
+
+  deps = [ "../..:v8_libbase" ]  # For CHECK macro.
+
+  data_deps = [
+    ":copy_prereqs",
+    ":v8windbg",
+    ":v8windbg_test_script",
+    "../..:d8",
+  ]
+
+  libs = [
+    "DbgEng.lib",
+    "DbgModel.lib",
+    "Pathcch.lib",
+    "RuntimeObject.lib",
+  ]
+
+  configs = [ ":v8windbg_config" ]
+}
+
+# Copies the JavaScript file used by v8windbg_test.
+copy("v8windbg_test_script") {
+  testonly = true
+  sources = [ "test/script.js" ]
+  outputs = [ "$target_out_dir/v8windbg-test-script.js" ]
+}
diff --git a/src/third_party/v8/tools/v8windbg/README.md b/src/third_party/v8/tools/v8windbg/README.md
new file mode 100644
index 0000000..dc0c4e1
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/README.md
@@ -0,0 +1,151 @@
+# v8windbg
+
+V8windbg is a WinDbg extension for the V8 engine. It adjusts the behavior of the
+Locals pane and corresponding `dx` commands to display useful data when
+inspecting V8 object types. It is intended to be as robust as possible in dumps
+with limited memory, and should work equally well in live sessions, crash dumps,
+and time travel debugging.
+
+## Building
+
+Run `autoninja v8windbg` in your output directory.
+
+## Using
+
+In WinDbgX, run `.load path\to\your\output\dir\v8windbg.dll` to load the
+extension. To inspect V8 objects, use the Locals window or the `dx` command as
+usual.
+
+**Important notes:**
+
+- The version of v8windbg must exactly match the version and build configuration
+  of the process you're debugging. (To find the version number of a module in a
+  crash dump, enter `lm` and click the module name, or run `lmDvm modulename`.)
+- V8windbg relies on detailed symbols (symbol_level = 2).
+- Ensure also that WinDbg can load the symbols (.pdb file) for the module
+  containing V8.
+- Cross-architecture debugging is possible in some cases:
+  - To debug an x86 process on x64, load the x86 build of v8windbg.
+  - To debug an ARM64 process on x64, load the ARM64 simulator build of v8windbg
+    (built with target_cpu="x64" and v8_target_cpu="arm64").
+
+As well as improving the Locals pane behavior, v8windbg also provides a few
+functions that can be called from within `dx` commands:
+
+- `@$v8object()` returns information about the fields of a tagged V8 value,
+  passed in as a plain number like `dx @$v8object(0x34f49880471)`. This invokes
+  the same logic that is used for the locals pane. You may also pass a type hint
+  as an optional second parameter if you find that v8windbg is not inferring the
+  correct type (which can happen when the memory for the object's Map wasn't
+  collected in a crash dump). The type hint is a fully-qualified C++ class name,
+  like `dx @$v8object(0x34f49880471, "v8::internal::JSArray")`.
+- `@$curisolate()` gets the Isolate pointer for the current thread, if the
+  current thread has a JavaScript Isolate associated.
+- `@$listchunks()` returns a list of the memory chunks in the Heap for the
+  current Isolate.
+
+*Tip:*: to see what objects are present in a chunk of heap memory, you can cast
+it to an array of `TaggedValue`, like this:
+
+`dx (v8::internal::TaggedValue(*)[64])0x34f49880450`
+
+## Architecture
+
+V8windbg uses the [DataModel] as much as possible as opposed to the older
+[DbgEng] APIs. It uses the [WRL COM] APIs due to limitations in Clang's support
+for [C++/WinRT COM].
+
+Where possible, v8windbg uses the cross-platform v8_debug_helper library to
+avoid depending on V8 internals.
+
+The source in `./base` is a generic starting point for implementing a WinDbg
+extension. The V8-specific implementation under `./src` then implements the two
+functions declared in `dbgext.h` to create and destroy the extension instance.
+
+`./src` file index:
+
+- `cur-isolate.{cc,h}` implements the `IModelMethod` for `@$curisolate()`.
+- `list-chunks.{cc,h}` implements the `IModelMethod` for `@$listchunks()`. Its
+  result is a custom object that supports iteration and indexing.
+- `local-variables.{cc,h}` implements the `IModelPropertyAccessor` that provides
+  content to show in the Locals pane for stack frames corresponding to builtins
+  or runtime-generated code.
+- `object-inspection.{cc,h}` contains various classes that allow the debugger to
+  show fields within V8 objects.
+- `v8-debug-helper-interop.{cc,h}` makes requests to the V8 postmortem debugging
+  API, and converts the results into simple C++ structs.
+- `v8windbg-extension.{cc,h}` is responsible for initializing the extension and
+  cleaning up when the extension is unloaded.
+
+When the extension is initialized (`Extension::Initialize()`):
+
+- It registers a "parent model" for all known V8 object types, such as
+  `v8::internal::HeapObject` and `v8::internal::Symbol`. Any time WinDbg needs
+  to represent a value with one of these types, it creates an `IModelObject`
+  representing the value and attaches the parent model. This particular parent
+  model supports `IStringDisplayableConcept` and `IDynamicKeyProviderConcept`,
+  meaning the debugger will call a custom method every time it wants to get a
+  description string or a list of fields for any of these objects.
+- It registers a different parent model, with a single property getter named
+  "Value", for handle types such as `v8::internal::Handle<*>`. The "Value"
+  getter returns the correctly-typed tagged pointer contained by the handle.
+- It overrides the getter functions for "LocalVariables" and "Parameters" on the
+  parent model for stack frames. When the user selects a stack frame, WinDbg
+  calls these getter functions to determine what it should show in the Locals
+  pane.
+- It registers the function aliases such as `@$curisolate()`.
+
+The `./test` directory contains a test function that exercises v8windbg. It does
+not require WinDbg, but uses DbgEng.dll and DbgModel.dll from the Windows SDK
+(these are slightly older versions of the same modules used by WinDbg). The test
+function launches a separate d8 process, attaches to that process as a debugger,
+lets d8 run until it hits a breakpoint, and then checks the output of a few `dx`
+commands.
+
+## Debugging the extension
+
+To debug the extension, launch a WinDbgx instance to debug with an active
+target, e.g.
+
+`windbgx \src\github\v8\out\x64.debug\d8.exe -e "console.log('hello');"`
+
+or
+
+`windbgx \src\github\v8\out\x64.debug\d8.exe c:\temp\test.js`
+
+The WinDbgx process itself does not host the extensions, but uses a helper
+process. Attach another instance of WinDbgx to the `enghost.exe` helper process,
+e.g.
+
+`windbgx -pn enghost.exe`
+
+Set a breakpoint in this second session for when the extension initializes, e.g.
+
+`bm v8windbg!DebugExtensionInitialize`
+
+..and/or whenever a function of interest is invoked, e.g.
+
+ - `bp v8windbg!CurrIsolateAlias::Call` for the invocation of `@$curisolate()`
+ - `bp v8windbg!GetHeapObject` for the interpretation of V8 objects.
+
+Load the extension in the target debugger (the first WinDbg session), which
+should trigger the breakpoint.
+
+`.load "C:\\src\\github\\v8windbg\\x64\\v8windbg.dll"`
+
+Note: For D8, the below is a good breakpoint to set just before any script is
+run:
+
+`bp d8_exe!v8::Shell::ExecuteString`
+
+..or the below for once the V8 engine is entered (for component builds):
+
+`bp v8!v8::Script::Run`
+
+Then trigger the extension code of interest via something like `dx source` or
+`dx @$curisolate()`.
+
+[DataModel]: https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/data-model-cpp-overview
+[DbgEng]: https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/writing-dbgeng-extension-code
+[C++/WinRT COM]: https://docs.microsoft.com/en-us/windows/uwp/cpp-and-winrt-apis/consume-com
+[WRL COM]: https://docs.microsoft.com/en-us/cpp/cppcx/wrl/windows-runtime-cpp-template-library-wrl?view=vs-2019
diff --git a/src/third_party/v8/tools/v8windbg/base/dbgext.cc b/src/third_party/v8/tools/v8windbg/base/dbgext.cc
new file mode 100644
index 0000000..e3f0095
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/base/dbgext.cc
@@ -0,0 +1,75 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/base/dbgext.h"
+
+#include <crtdbg.h>
+#include <wrl/module.h>
+
+#include "tools/v8windbg/base/utilities.h"
+
+// See
+// https://docs.microsoft.com/en-us/visualstudio/debugger/crt-debugging-techniques
+// for the memory leak and debugger reporting macros used from <crtdbg.h>
+_CrtMemState mem_old, mem_new, mem_diff;
+int original_crt_dbg_flag = 0;
+
+WRL::ComPtr<IDataModelManager> sp_data_model_manager;
+WRL::ComPtr<IDebugHost> sp_debug_host;
+WRL::ComPtr<IDebugControl5> sp_debug_control;
+WRL::ComPtr<IDebugHostMemory2> sp_debug_host_memory;
+WRL::ComPtr<IDebugHostSymbols> sp_debug_host_symbols;
+WRL::ComPtr<IDebugHostExtensibility> sp_debug_host_extensibility;
+
+extern "C" {
+
+HRESULT
+__stdcall DebugExtensionInitialize(PULONG /*pVersion*/, PULONG /*pFlags*/) {
+  original_crt_dbg_flag = _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF);
+  _CrtMemCheckpoint(&mem_old);
+
+  WRL::ComPtr<IDebugClient> sp_debug_client;
+  WRL::ComPtr<IHostDataModelAccess> sp_data_model_access;
+
+  RETURN_IF_FAIL(DebugCreate(__uuidof(IDebugClient), &sp_debug_client));
+
+  RETURN_IF_FAIL(sp_debug_client.As(&sp_data_model_access));
+  RETURN_IF_FAIL(sp_debug_client.As(&sp_debug_control));
+
+  RETURN_IF_FAIL(sp_data_model_access->GetDataModel(&sp_data_model_manager,
+                                                    &sp_debug_host));
+
+  RETURN_IF_FAIL(sp_debug_host.As(&sp_debug_host_memory));
+  RETURN_IF_FAIL(sp_debug_host.As(&sp_debug_host_symbols));
+  RETURN_IF_FAIL(sp_debug_host.As(&sp_debug_host_extensibility));
+
+  return CreateExtension();
+}
+
+void __stdcall DebugExtensionUninitialize() {
+  DestroyExtension();
+  sp_debug_host = nullptr;
+  sp_data_model_manager = nullptr;
+  sp_debug_host_memory = nullptr;
+  sp_debug_host_symbols = nullptr;
+  sp_debug_host_extensibility = nullptr;
+
+  _CrtMemCheckpoint(&mem_new);
+  if (_CrtMemDifference(&mem_diff, &mem_old, &mem_new)) {
+    _CrtMemDumpStatistics(&mem_diff);
+  }
+  _CrtSetDbgFlag(original_crt_dbg_flag);
+}
+
+HRESULT __stdcall DebugExtensionCanUnload(void) {
+  if (!WRL::Module<WRL::InProc>::GetModule().Terminate()) {
+    _RPTF0(_CRT_WARN, "Failed to unload WRL\n");
+    return S_FALSE;
+  }
+  return S_OK;
+}
+
+void __stdcall DebugExtensionUnload() { return; }
+
+}  // extern "C"
diff --git a/src/third_party/v8/tools/v8windbg/base/dbgext.def b/src/third_party/v8/tools/v8windbg/base/dbgext.def
new file mode 100644
index 0000000..7f32756
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/base/dbgext.def
@@ -0,0 +1,5 @@
+EXPORTS
+  DebugExtensionInitialize
+  DebugExtensionUninitialize
+  DebugExtensionCanUnload
+  DebugExtensionUnload
diff --git a/src/third_party/v8/tools/v8windbg/base/dbgext.h b/src/third_party/v8/tools/v8windbg/base/dbgext.h
new file mode 100644
index 0000000..8b36a8f
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/base/dbgext.h
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_BASE_DBGEXT_H_
+#define V8_TOOLS_V8WINDBG_BASE_DBGEXT_H_
+
+#if !defined(UNICODE) || !defined(_UNICODE)
+#error Unicode not defined
+#endif
+
+#include <DbgEng.h>
+#include <DbgModel.h>
+#include <Windows.h>
+#include <crtdbg.h>
+#include <wrl/client.h>
+
+#include <string>
+
+namespace WRL = Microsoft::WRL;
+
+// Globals for use throughout the extension. (Populated on load).
+extern WRL::ComPtr<IDataModelManager> sp_data_model_manager;
+extern WRL::ComPtr<IDebugHost> sp_debug_host;
+extern WRL::ComPtr<IDebugControl5> sp_debug_control;
+extern WRL::ComPtr<IDebugHostMemory2> sp_debug_host_memory;
+extern WRL::ComPtr<IDebugHostSymbols> sp_debug_host_symbols;
+extern WRL::ComPtr<IDebugHostExtensibility> sp_debug_host_extensibility;
+
+// To be implemented by the custom extension code. (Called on load).
+HRESULT CreateExtension();
+void DestroyExtension();
+
+#endif  // V8_TOOLS_V8WINDBG_BASE_DBGEXT_H_
diff --git a/src/third_party/v8/tools/v8windbg/base/utilities.cc b/src/third_party/v8/tools/v8windbg/base/utilities.cc
new file mode 100644
index 0000000..1f0e2bc
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/base/utilities.cc
@@ -0,0 +1,255 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/base/utilities.h"
+
+#include <comutil.h>
+#include <oleauto.h>
+
+#include <vector>
+
+namespace {
+
+HRESULT BoxObject(IDataModelManager* p_manager, IUnknown* p_object,
+                  ModelObjectKind kind, IModelObject** pp_model_object) {
+  *pp_model_object = nullptr;
+
+  VARIANT vt_val;
+  vt_val.vt = VT_UNKNOWN;
+  vt_val.punkVal = p_object;
+
+  HRESULT hr = p_manager->CreateIntrinsicObject(kind, &vt_val, pp_model_object);
+  return hr;
+}
+
+}  // namespace
+
+HRESULT CreateProperty(IDataModelManager* p_manager,
+                       IModelPropertyAccessor* p_property,
+                       IModelObject** pp_property_object) {
+  return BoxObject(p_manager, p_property, ObjectPropertyAccessor,
+                   pp_property_object);
+}
+
+HRESULT CreateMethod(IDataModelManager* p_manager, IModelMethod* p_method,
+                     IModelObject** pp_method_object) {
+  return BoxObject(p_manager, p_method, ObjectMethod, pp_method_object);
+}
+
+HRESULT UnboxProperty(IModelObject* object, IModelPropertyAccessor** result) {
+  ModelObjectKind kind = (ModelObjectKind)-1;
+  RETURN_IF_FAIL(object->GetKind(&kind));
+  if (kind != ObjectPropertyAccessor) return E_FAIL;
+  _variant_t variant;
+  RETURN_IF_FAIL(object->GetIntrinsicValue(&variant));
+  if (variant.vt != VT_UNKNOWN) return E_FAIL;
+  WRL::ComPtr<IModelPropertyAccessor> accessor;
+  RETURN_IF_FAIL(WRL::ComPtr<IUnknown>(variant.punkVal).As(&accessor));
+  *result = accessor.Detach();
+  return S_OK;
+}
+
+HRESULT CreateTypedIntrinsic(uint64_t value, IDebugHostType* type,
+                             IModelObject** result) {
+  // Figure out what kind of VARIANT we need to make.
+  IntrinsicKind kind;
+  VARTYPE carrier;
+  RETURN_IF_FAIL(type->GetIntrinsicType(&kind, &carrier));
+
+  VARIANT vt_val;
+  switch (carrier) {
+    case VT_BOOL:
+      vt_val.boolVal = value ? VARIANT_TRUE : VARIANT_FALSE;
+      break;
+    case VT_I1:
+      vt_val.cVal = static_cast<int8_t>(value);
+      break;
+    case VT_UI1:
+      vt_val.bVal = static_cast<uint8_t>(value);
+      break;
+    case VT_I2:
+      vt_val.iVal = static_cast<int16_t>(value);
+      break;
+    case VT_UI2:
+      vt_val.uiVal = static_cast<uint16_t>(value);
+      break;
+    case VT_INT:
+      vt_val.intVal = static_cast<int>(value);
+      break;
+    case VT_UINT:
+      vt_val.uintVal = static_cast<unsigned int>(value);
+      break;
+    case VT_I4:
+      vt_val.lVal = static_cast<int32_t>(value);
+      break;
+    case VT_UI4:
+      vt_val.ulVal = static_cast<uint32_t>(value);
+      break;
+    case VT_INT_PTR:
+      vt_val.llVal = static_cast<intptr_t>(value);
+      break;
+    case VT_UINT_PTR:
+      vt_val.ullVal = static_cast<uintptr_t>(value);
+      break;
+    case VT_I8:
+      vt_val.llVal = static_cast<int64_t>(value);
+      break;
+    case VT_UI8:
+      vt_val.ullVal = static_cast<uint64_t>(value);
+      break;
+    default:
+      return E_FAIL;
+  }
+  vt_val.vt = carrier;
+  return sp_data_model_manager->CreateTypedIntrinsicObject(&vt_val, type,
+                                                           result);
+}
+
+HRESULT CreateULong64(ULONG64 value, IModelObject** pp_int) {
+  HRESULT hr = S_OK;
+  *pp_int = nullptr;
+
+  VARIANT vt_val;
+  vt_val.vt = VT_UI8;
+  vt_val.ullVal = value;
+
+  hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+                                                    pp_int);
+  return hr;
+}
+
+HRESULT UnboxULong64(IModelObject* object, ULONG64* value, bool convert) {
+  ModelObjectKind kind = (ModelObjectKind)-1;
+  RETURN_IF_FAIL(object->GetKind(&kind));
+  if (kind != ObjectIntrinsic) return E_FAIL;
+  _variant_t variant;
+  RETURN_IF_FAIL(object->GetIntrinsicValue(&variant));
+  if (convert) {
+    RETURN_IF_FAIL(VariantChangeType(&variant, &variant, 0, VT_UI8));
+  }
+  if (variant.vt != VT_UI8) return E_FAIL;
+  *value = variant.ullVal;
+  return S_OK;
+}
+
+HRESULT GetInt32(IDebugHostConstant* object, int* value) {
+  variant_t variant;
+  RETURN_IF_FAIL(object->GetValue(&variant));
+
+  if (variant.vt != VT_I4) return E_FAIL;
+  *value = variant.ullVal;
+  return S_OK;
+}
+
+HRESULT CreateInt32(int value, IModelObject** pp_int) {
+  HRESULT hr = S_OK;
+  *pp_int = nullptr;
+
+  VARIANT vt_val;
+  vt_val.vt = VT_I4;
+  vt_val.intVal = value;
+
+  hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+                                                    pp_int);
+  return hr;
+}
+
+HRESULT CreateUInt32(uint32_t value, IModelObject** pp_int) {
+  HRESULT hr = S_OK;
+  *pp_int = nullptr;
+
+  VARIANT vt_val;
+  vt_val.vt = VT_UI4;
+  vt_val.uintVal = value;
+
+  hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+                                                    pp_int);
+  return hr;
+}
+
+HRESULT CreateBool(bool value, IModelObject** pp_val) {
+  HRESULT hr = S_OK;
+  *pp_val = nullptr;
+
+  VARIANT vt_val;
+  vt_val.vt = VT_BOOL;
+  vt_val.boolVal = value;
+
+  hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+                                                    pp_val);
+  return hr;
+}
+
+HRESULT CreateNumber(double value, IModelObject** pp_val) {
+  HRESULT hr = S_OK;
+  *pp_val = nullptr;
+
+  VARIANT vt_val;
+  vt_val.vt = VT_R8;
+  vt_val.dblVal = value;
+
+  hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+                                                    pp_val);
+  return hr;
+}
+
+HRESULT CreateString(std::u16string value, IModelObject** pp_val) {
+  HRESULT hr = S_OK;
+  *pp_val = nullptr;
+
+  VARIANT vt_val;
+  vt_val.vt = VT_BSTR;
+  vt_val.bstrVal =
+      ::SysAllocString(reinterpret_cast<const OLECHAR*>(value.c_str()));
+
+  hr = sp_data_model_manager->CreateIntrinsicObject(ObjectIntrinsic, &vt_val,
+                                                    pp_val);
+  return hr;
+}
+
+HRESULT UnboxString(IModelObject* object, BSTR* value) {
+  ModelObjectKind kind = (ModelObjectKind)-1;
+  RETURN_IF_FAIL(object->GetKind(&kind));
+  if (kind != ObjectIntrinsic) return E_FAIL;
+  _variant_t variant;
+  RETURN_IF_FAIL(object->GetIntrinsicValue(&variant));
+  if (variant.vt != VT_BSTR) return E_FAIL;
+  *value = variant.Detach().bstrVal;
+  return S_OK;
+}
+
+HRESULT GetModelAtIndex(WRL::ComPtr<IModelObject>& sp_parent,
+                        WRL::ComPtr<IModelObject>& sp_index,
+                        IModelObject** p_result) {
+  WRL::ComPtr<IIndexableConcept> sp_indexable_concept;
+  RETURN_IF_FAIL(sp_parent->GetConcept(__uuidof(IIndexableConcept),
+                                       &sp_indexable_concept, nullptr));
+
+  std::vector<IModelObject*> p_indexers{sp_index.Get()};
+  return sp_indexable_concept->GetAt(sp_parent.Get(), 1, p_indexers.data(),
+                                     p_result, nullptr);
+}
+
+HRESULT GetCurrentThread(WRL::ComPtr<IDebugHostContext>& sp_host_context,
+                         IModelObject** p_current_thread) {
+  WRL::ComPtr<IModelObject> sp_boxed_context, sp_root_namespace;
+  WRL::ComPtr<IModelObject> sp_debugger, sp_sessions, sp_processes, sp_threads;
+  WRL::ComPtr<IModelObject> sp_curr_session, sp_curr_process;
+
+  RETURN_IF_FAIL(BoxObject(sp_data_model_manager.Get(), sp_host_context.Get(),
+                           ObjectContext, &sp_boxed_context));
+  RETURN_IF_FAIL(sp_data_model_manager->GetRootNamespace(&sp_root_namespace));
+  RETURN_IF_FAIL(
+      sp_root_namespace->GetKeyValue(L"Debugger", &sp_debugger, nullptr));
+  RETURN_IF_FAIL(sp_debugger->GetKeyValue(L"Sessions", &sp_sessions, nullptr));
+  RETURN_IF_FAIL(
+      GetModelAtIndex(sp_sessions, sp_boxed_context, &sp_curr_session));
+  RETURN_IF_FAIL(
+      sp_curr_session->GetKeyValue(L"Processes", &sp_processes, nullptr));
+  RETURN_IF_FAIL(
+      GetModelAtIndex(sp_processes, sp_boxed_context, &sp_curr_process));
+  RETURN_IF_FAIL(
+      sp_curr_process->GetKeyValue(L"Threads", &sp_threads, nullptr));
+  return GetModelAtIndex(sp_threads, sp_boxed_context, p_current_thread);
+}
diff --git a/src/third_party/v8/tools/v8windbg/base/utilities.h b/src/third_party/v8/tools/v8windbg/base/utilities.h
new file mode 100644
index 0000000..06af6c3
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/base/utilities.h
@@ -0,0 +1,87 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_BASE_UTILITIES_H_
+#define V8_TOOLS_V8WINDBG_BASE_UTILITIES_H_
+
+#include "tools/v8windbg/base/dbgext.h"
+
+inline const wchar_t* U16ToWChar(const char16_t* p_u16) {
+  static_assert(sizeof(wchar_t) == sizeof(char16_t), "wrong wchar size");
+  return reinterpret_cast<const wchar_t*>(p_u16);
+}
+
+inline const wchar_t* U16ToWChar(std::u16string& str) {
+  return U16ToWChar(str.data());
+}
+
+#if defined(WIN32)
+inline std::u16string ConvertToU16String(std::string utf8_string) {
+  int len_chars =
+      ::MultiByteToWideChar(CP_UTF8, 0, utf8_string.c_str(), -1, nullptr, 0);
+
+  char16_t* p_buff =
+      static_cast<char16_t*>(malloc(len_chars * sizeof(char16_t)));
+
+  // On Windows wchar_t is the same a char16_t
+  static_assert(sizeof(wchar_t) == sizeof(char16_t), "wrong wchar size");
+  len_chars =
+      ::MultiByteToWideChar(CP_UTF8, 0, utf8_string.c_str(), -1,
+                            reinterpret_cast<wchar_t*>(p_buff), len_chars);
+  std::u16string result{p_buff};
+  free(p_buff);
+
+  return result;
+}
+#else
+#error String encoding conversion must be provided for the target platform.
+#endif
+
+HRESULT CreateProperty(IDataModelManager* p_manager,
+                       IModelPropertyAccessor* p_property,
+                       IModelObject** pp_property_object);
+
+HRESULT CreateMethod(IDataModelManager* p_manager, IModelMethod* p_method,
+                     IModelObject** pp_method_object);
+
+HRESULT UnboxProperty(IModelObject* object, IModelPropertyAccessor** result);
+
+HRESULT CreateTypedIntrinsic(uint64_t value, IDebugHostType* type,
+                             IModelObject** result);
+
+HRESULT CreateULong64(ULONG64 value, IModelObject** pp_int);
+
+HRESULT UnboxULong64(IModelObject* object, ULONG64* value,
+                     bool convert = false);
+
+HRESULT GetInt32(IDebugHostConstant* object, int* value);
+
+HRESULT CreateInt32(int value, IModelObject** pp_int);
+
+HRESULT CreateUInt32(uint32_t value, IModelObject** pp_int);
+
+HRESULT CreateBool(bool value, IModelObject** pp_val);
+
+HRESULT CreateNumber(double value, IModelObject** pp_val);
+
+HRESULT CreateString(std::u16string value, IModelObject** pp_val);
+
+HRESULT UnboxString(IModelObject* object, BSTR* value);
+
+HRESULT GetModelAtIndex(WRL::ComPtr<IModelObject>& sp_parent,
+                        WRL::ComPtr<IModelObject>& sp_index,
+                        IModelObject** p_result);
+
+HRESULT GetCurrentThread(WRL::ComPtr<IDebugHostContext>& sp_host_context,
+                         IModelObject** p_current_thread);
+
+#define RETURN_IF_FAIL(expression) \
+  do {                             \
+    HRESULT hr = expression;       \
+    if (FAILED(hr)) {              \
+      return hr;                   \
+    }                              \
+  } while (false)
+
+#endif  // V8_TOOLS_V8WINDBG_BASE_UTILITIES_H_
diff --git a/src/third_party/v8/tools/v8windbg/copy-prereqs.py b/src/third_party/v8/tools/v8windbg/copy-prereqs.py
new file mode 100644
index 0000000..c13efe6
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/copy-prereqs.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+# Copyright 2020 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This program copies dbgeng.dll from the Windows SDK to the output directory,
+so that we can test v8windbg. (The version of dbgeng.dll in system32, which
+would be loaded otherwise, is insufficient.)
+Arguments:
+1. The directory that contains vs_toolchain.py
+2. The directory to copy to
+3. The cpu type for this build
+"""
+
+import sys
+import os
+
+vs_toolchain_dir, target_dir, target_cpu = sys.argv[1:]
+
+sys.path.insert(0, vs_toolchain_dir)
+import vs_toolchain
+
+def CopyDebuggerFile(debug_file):
+  win_sdk_dir = vs_toolchain.SetEnvironmentAndGetSDKDir()
+  if not win_sdk_dir:
+    return
+
+  full_path = os.path.join(win_sdk_dir, 'Debuggers', target_cpu, debug_file)
+  if not os.path.exists(full_path):
+    return
+
+  target_path = os.path.join(target_dir, debug_file)
+  vs_toolchain._CopyRuntimeImpl(target_path, full_path, verbose=False)
+
+  # Ninja expects the file's timestamp to be newer than this script.
+  os.utime(target_path, None)
+
+CopyDebuggerFile('dbgeng.dll')
diff --git a/src/third_party/v8/tools/v8windbg/src/cur-isolate.cc b/src/third_party/v8/tools/v8windbg/src/cur-isolate.cc
new file mode 100644
index 0000000..f39098f
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/cur-isolate.cc
@@ -0,0 +1,94 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/cur-isolate.h"
+
+HRESULT GetIsolateKey(WRL::ComPtr<IDebugHostContext>& sp_ctx,
+                      int* isolate_key) {
+  auto sp_v8_module = Extension::Current()->GetV8Module(sp_ctx);
+  if (sp_v8_module == nullptr) return E_FAIL;
+
+  WRL::ComPtr<IDebugHostSymbol> sp_isolate_sym;
+  RETURN_IF_FAIL(sp_v8_module->FindSymbolByName(kIsolateKey, &sp_isolate_sym));
+  SymbolKind kind;
+  RETURN_IF_FAIL(sp_isolate_sym->GetSymbolKind(&kind));
+  if (kind != SymbolData) return E_FAIL;
+  WRL::ComPtr<IDebugHostData> sp_isolate_key_data;
+  RETURN_IF_FAIL(sp_isolate_sym.As(&sp_isolate_key_data));
+  Location loc;
+  RETURN_IF_FAIL(sp_isolate_key_data->GetLocation(&loc));
+  ULONG64 bytes_read;
+  RETURN_IF_FAIL(sp_debug_host_memory->ReadBytes(
+      sp_ctx.Get(), loc, isolate_key, sizeof(isolate_key), &bytes_read));
+  return S_OK;
+}
+
+HRESULT GetCurrentIsolate(WRL::ComPtr<IModelObject>& sp_result) {
+  sp_result = nullptr;
+
+  // Get the current context
+  WRL::ComPtr<IDebugHostContext> sp_host_context;
+  RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_host_context));
+
+  WRL::ComPtr<IModelObject> sp_curr_thread;
+  RETURN_IF_FAIL(GetCurrentThread(sp_host_context, &sp_curr_thread));
+
+  WRL::ComPtr<IModelObject> sp_environment, sp_environment_block;
+  WRL::ComPtr<IModelObject> sp_tls_slots, sp_slot_index, sp_isolate_ptr;
+  RETURN_IF_FAIL(
+      sp_curr_thread->GetKeyValue(L"Environment", &sp_environment, nullptr));
+
+  RETURN_IF_FAIL(sp_environment->GetKeyValue(L"EnvironmentBlock",
+                                             &sp_environment_block, nullptr));
+
+  // EnvironmentBlock and TlsSlots are native types (TypeUDT) and thus
+  // GetRawValue rather than GetKeyValue should be used to get field (member)
+  // values.
+  ModelObjectKind kind;
+  RETURN_IF_FAIL(sp_environment_block->GetKind(&kind));
+  if (kind != ModelObjectKind::ObjectTargetObject) return E_FAIL;
+
+  RETURN_IF_FAIL(sp_environment_block->GetRawValue(SymbolField, L"TlsSlots", 0,
+                                                   &sp_tls_slots));
+
+  int isolate_key = -1;
+  RETURN_IF_FAIL(GetIsolateKey(sp_host_context, &isolate_key));
+  RETURN_IF_FAIL(CreateInt32(isolate_key, &sp_slot_index));
+
+  RETURN_IF_FAIL(GetModelAtIndex(sp_tls_slots, sp_slot_index, &sp_isolate_ptr));
+
+  // Need to dereference the slot and then get the address held in it
+  WRL::ComPtr<IModelObject> sp_dereferenced_slot;
+  RETURN_IF_FAIL(sp_isolate_ptr->Dereference(&sp_dereferenced_slot));
+
+  uint64_t isolate_ptr;
+  RETURN_IF_FAIL(UnboxULong64(sp_dereferenced_slot.Get(), &isolate_ptr));
+  Location isolate_addr{isolate_ptr};
+
+  // If we got the isolate_key OK, then must have the V8 module loaded
+  // Get the internal Isolate type from it
+  WRL::ComPtr<IDebugHostType> sp_isolate_type, sp_isolate_ptr_type;
+  RETURN_IF_FAIL(Extension::Current()
+                     ->GetV8Module(sp_host_context)
+                     ->FindTypeByName(kIsolate, &sp_isolate_type));
+  RETURN_IF_FAIL(
+      sp_isolate_type->CreatePointerTo(PointerStandard, &sp_isolate_ptr_type));
+
+  RETURN_IF_FAIL(sp_data_model_manager->CreateTypedObject(
+      sp_host_context.Get(), isolate_addr, sp_isolate_type.Get(), &sp_result));
+
+  return S_OK;
+}
+
+IFACEMETHODIMP CurrIsolateAlias::Call(IModelObject* p_context_object,
+                                      ULONG64 arg_count,
+                                      IModelObject** pp_arguments,
+                                      IModelObject** pp_result,
+                                      IKeyStore** pp_metadata) noexcept {
+  *pp_result = nullptr;
+  WRL::ComPtr<IModelObject> sp_result;
+  RETURN_IF_FAIL(GetCurrentIsolate(sp_result));
+  *pp_result = sp_result.Detach();
+  return S_OK;
+}
diff --git a/src/third_party/v8/tools/v8windbg/src/cur-isolate.h b/src/third_party/v8/tools/v8windbg/src/cur-isolate.h
new file mode 100644
index 0000000..2be24ce
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/cur-isolate.h
@@ -0,0 +1,34 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_CUR_ISOLATE_H_
+#define V8_TOOLS_V8WINDBG_SRC_CUR_ISOLATE_H_
+
+#include <crtdbg.h>
+#include <wrl/implements.h>
+
+#include <string>
+#include <vector>
+
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+HRESULT GetCurrentIsolate(WRL::ComPtr<IModelObject>& sp_result);
+
+constexpr wchar_t kIsolateKey[] = L"isolate_key_";
+constexpr wchar_t kIsolate[] = L"v8::internal::Isolate";
+
+class CurrIsolateAlias
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IModelMethod> {
+ public:
+  IFACEMETHOD(Call)
+  (IModelObject* p_context_object, ULONG64 arg_count,
+   _In_reads_(arg_count) IModelObject** pp_arguments, IModelObject** pp_result,
+   IKeyStore** pp_metadata);
+};
+
+#endif  // V8_TOOLS_V8WINDBG_SRC_CUR_ISOLATE_H_
diff --git a/src/third_party/v8/tools/v8windbg/src/list-chunks.cc b/src/third_party/v8/tools/v8windbg/src/list-chunks.cc
new file mode 100644
index 0000000..90b3ff6
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/list-chunks.cc
@@ -0,0 +1,238 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/list-chunks.h"
+
+#include "tools/v8windbg/src/cur-isolate.h"
+
+// v8windbg!ListChunksAlias::Call
+IFACEMETHODIMP ListChunksAlias::Call(IModelObject* p_context_object,
+                                     ULONG64 arg_count,
+                                     _In_reads_(arg_count)
+                                         IModelObject** pp_arguments,
+                                     IModelObject** pp_result,
+                                     IKeyStore** pp_metadata) noexcept {
+  WRL::ComPtr<IDebugHostContext> sp_ctx;
+  RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_ctx));
+
+  WRL::ComPtr<IModelObject> result;
+  RETURN_IF_FAIL(
+      sp_data_model_manager->CreateSyntheticObject(sp_ctx.Get(), &result));
+
+  auto sp_iterator{WRL::Make<MemoryChunks>()};
+
+  RETURN_IF_FAIL(result->SetConcept(
+      __uuidof(IIndexableConcept),
+      static_cast<IIndexableConcept*>(sp_iterator.Get()), nullptr));
+  RETURN_IF_FAIL(result->SetConcept(
+      __uuidof(IIterableConcept),
+      static_cast<IIterableConcept*>(sp_iterator.Get()), nullptr));
+
+  *pp_result = result.Detach();
+  if (pp_metadata) {
+    *pp_metadata = nullptr;
+  }
+  return S_OK;
+}
+
+ChunkData::ChunkData() = default;
+ChunkData::~ChunkData() = default;
+ChunkData::ChunkData(const ChunkData&) = default;
+ChunkData::ChunkData(ChunkData&&) = default;
+ChunkData& ChunkData::operator=(const ChunkData&) = default;
+ChunkData& ChunkData::operator=(ChunkData&&) = default;
+
+MemoryChunkIterator::MemoryChunkIterator(
+    WRL::ComPtr<IDebugHostContext>& host_context)
+    : sp_ctx_(host_context) {}
+MemoryChunkIterator::~MemoryChunkIterator() = default;
+
+HRESULT MemoryChunkIterator::PopulateChunkData() {
+  WRL::ComPtr<IModelObject> sp_isolate, sp_heap, sp_space;
+  chunks_.clear();
+
+  RETURN_IF_FAIL(GetCurrentIsolate(sp_isolate));
+
+  RETURN_IF_FAIL(
+      sp_isolate->GetRawValue(SymbolField, L"heap_", RawSearchNone, &sp_heap));
+  RETURN_IF_FAIL(
+      sp_heap->GetRawValue(SymbolField, L"space_", RawSearchNone, &sp_space));
+
+  WRL::ComPtr<IDebugHostType> sp_space_type;
+  RETURN_IF_FAIL(sp_space->GetTypeInfo(&sp_space_type));
+
+  // Iterate over the array of Space pointers
+  WRL::ComPtr<IIterableConcept> sp_iterable;
+  RETURN_IF_FAIL(
+      sp_space->GetConcept(__uuidof(IIterableConcept), &sp_iterable, nullptr));
+
+  WRL::ComPtr<IModelIterator> sp_space_iterator;
+  RETURN_IF_FAIL(sp_iterable->GetIterator(sp_space.Get(), &sp_space_iterator));
+
+  // Loop through all the spaces in the array
+  WRL::ComPtr<IModelObject> sp_space_ptr;
+  while (sp_space_iterator->GetNext(&sp_space_ptr, 0, nullptr, nullptr) !=
+         E_BOUNDS) {
+    // Should have gotten a "v8::internal::Space *". Dereference, then get field
+    // "memory_chunk_list_" [Type: v8::base::List<v8::internal::MemoryChunk>]
+    WRL::ComPtr<IModelObject> sp_space, sp_chunk_list, sp_mem_chunk_ptr,
+        sp_mem_chunk;
+    RETURN_IF_FAIL(sp_space_ptr->Dereference(&sp_space));
+    RETURN_IF_FAIL(sp_space->GetRawValue(SymbolField, L"memory_chunk_list_",
+                                         RawSearchNone, &sp_chunk_list));
+
+    // Then get field "front_" [Type: v8::internal::MemoryChunk *]
+    RETURN_IF_FAIL(sp_chunk_list->GetRawValue(
+        SymbolField, L"front_", RawSearchNone, &sp_mem_chunk_ptr));
+
+    // Loop here on the list of MemoryChunks for the space
+    while (true) {
+      // See if it is a nullptr (i.e. no chunks in this space)
+      uint64_t front_val;
+      RETURN_IF_FAIL(
+          UnboxULong64(sp_mem_chunk_ptr.Get(), &front_val, true /*convert*/));
+      if (front_val == 0) {
+        break;
+      }
+
+      // Dereference and get fields "area_start_" and "area_end_" (both uint64)
+      RETURN_IF_FAIL(sp_mem_chunk_ptr->Dereference(&sp_mem_chunk));
+
+      WRL::ComPtr<IModelObject> sp_start, sp_end;
+      RETURN_IF_FAIL(sp_mem_chunk->GetRawValue(SymbolField, L"area_start_",
+                                               RawSearchNone, &sp_start));
+      RETURN_IF_FAIL(sp_mem_chunk->GetRawValue(SymbolField, L"area_end_",
+                                               RawSearchNone, &sp_end));
+
+      ChunkData chunk_entry;
+      chunk_entry.area_start = sp_start;
+      chunk_entry.area_end = sp_end;
+      chunk_entry.space = sp_space;
+      chunks_.push_back(chunk_entry);
+
+      // Follow the list_node_.next_ to the next memory chunk
+      WRL::ComPtr<IModelObject> sp_list_node;
+      RETURN_IF_FAIL(sp_mem_chunk->GetRawValue(SymbolField, L"list_node_",
+                                               RawSearchNone, &sp_list_node));
+
+      sp_mem_chunk_ptr = nullptr;
+      sp_mem_chunk = nullptr;
+      RETURN_IF_FAIL(sp_list_node->GetRawValue(
+          SymbolField, L"next_", RawSearchNone, &sp_mem_chunk_ptr));
+      // Top of the loop will check if this is a nullptr and exit if so
+    }
+    sp_space_ptr = nullptr;
+  }
+
+  return S_OK;
+}
+
+IFACEMETHODIMP MemoryChunkIterator::Reset() noexcept {
+  position_ = 0;
+  return S_OK;
+}
+
+IFACEMETHODIMP MemoryChunkIterator::GetNext(IModelObject** object,
+                                            ULONG64 dimensions,
+                                            IModelObject** indexers,
+                                            IKeyStore** metadata) noexcept {
+  if (dimensions > 1) return E_INVALIDARG;
+
+  if (position_ == 0) {
+    RETURN_IF_FAIL(PopulateChunkData());
+  }
+
+  if (metadata != nullptr) *metadata = nullptr;
+
+  WRL::ComPtr<IModelObject> sp_index, sp_value;
+
+  if (dimensions == 1) {
+    RETURN_IF_FAIL(CreateULong64(position_, &sp_index));
+  }
+
+  RETURN_IF_FAIL(GetAt(position_, &sp_value));
+
+  // Now update counter and transfer ownership of results, because nothing can
+  // fail from this point onward.
+  ++position_;
+  if (dimensions == 1) {
+    *indexers = sp_index.Detach();
+  }
+  *object = sp_value.Detach();
+  return S_OK;
+}
+
+HRESULT MemoryChunkIterator::GetAt(uint64_t index,
+                                   IModelObject** result) const {
+  if (index >= chunks_.size()) return E_BOUNDS;
+
+  // Create the synthetic object representing the chunk here
+  const ChunkData& curr_chunk = chunks_.at(index);
+  WRL::ComPtr<IModelObject> sp_value;
+  RETURN_IF_FAIL(
+      sp_data_model_manager->CreateSyntheticObject(sp_ctx_.Get(), &sp_value));
+  RETURN_IF_FAIL(
+      sp_value->SetKey(L"area_start", curr_chunk.area_start.Get(), nullptr));
+  RETURN_IF_FAIL(
+      sp_value->SetKey(L"area_end", curr_chunk.area_end.Get(), nullptr));
+  RETURN_IF_FAIL(sp_value->SetKey(L"space", curr_chunk.space.Get(), nullptr));
+
+  *result = sp_value.Detach();
+  return S_OK;
+}
+
+MemoryChunks::MemoryChunks() = default;
+MemoryChunks::~MemoryChunks() = default;
+
+IFACEMETHODIMP MemoryChunks::GetDimensionality(
+    IModelObject* context_object, ULONG64* dimensionality) noexcept {
+  *dimensionality = 1;
+  return S_OK;
+}
+
+IFACEMETHODIMP MemoryChunks::GetAt(IModelObject* context_object,
+                                   ULONG64 indexer_count,
+                                   IModelObject** indexers,
+                                   IModelObject** object,
+                                   IKeyStore** metadata) noexcept {
+  if (indexer_count != 1) return E_INVALIDARG;
+  if (metadata != nullptr) *metadata = nullptr;
+  WRL::ComPtr<IDebugHostContext> sp_ctx;
+  RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
+
+  // This should be instantiated once for each synthetic object returned,
+  // so should be able to cache/reuse an iterator
+  if (opt_chunks_ == nullptr) {
+    opt_chunks_ = WRL::Make<MemoryChunkIterator>(sp_ctx);
+    _ASSERT(opt_chunks_ != nullptr);
+    RETURN_IF_FAIL(opt_chunks_->PopulateChunkData());
+  }
+
+  uint64_t index;
+  RETURN_IF_FAIL(UnboxULong64(indexers[0], &index, true /*convert*/));
+
+  return opt_chunks_->GetAt(index, object);
+}
+
+IFACEMETHODIMP MemoryChunks::SetAt(IModelObject* context_object,
+                                   ULONG64 indexer_count,
+                                   IModelObject** indexers,
+                                   IModelObject* value) noexcept {
+  return E_NOTIMPL;
+}
+
+IFACEMETHODIMP MemoryChunks::GetDefaultIndexDimensionality(
+    IModelObject* context_object, ULONG64* dimensionality) noexcept {
+  *dimensionality = 1;
+  return S_OK;
+}
+
+IFACEMETHODIMP MemoryChunks::GetIterator(IModelObject* context_object,
+                                         IModelIterator** iterator) noexcept {
+  WRL::ComPtr<IDebugHostContext> sp_ctx;
+  RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
+  auto sp_memory_iterator{WRL::Make<MemoryChunkIterator>(sp_ctx)};
+  *iterator = sp_memory_iterator.Detach();
+  return S_OK;
+}
diff --git a/src/third_party/v8/tools/v8windbg/src/list-chunks.h b/src/third_party/v8/tools/v8windbg/src/list-chunks.h
new file mode 100644
index 0000000..10eec10
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/list-chunks.h
@@ -0,0 +1,100 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_LIST_CHUNKS_H_
+#define V8_TOOLS_V8WINDBG_SRC_LIST_CHUNKS_H_
+
+#include <crtdbg.h>
+#include <wrl/implements.h>
+
+#include <optional>
+#include <string>
+#include <vector>
+
+#include "src/base/optional.h"
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+class ListChunksAlias
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IModelMethod> {
+ public:
+  IFACEMETHOD(Call)
+  (IModelObject* p_context_object, ULONG64 arg_count,
+   _In_reads_(arg_count) IModelObject** pp_arguments, IModelObject** pp_result,
+   IKeyStore** pp_metadata);
+};
+
+struct ChunkData {
+  ChunkData();
+  ~ChunkData();
+  ChunkData(const ChunkData&);
+  ChunkData(ChunkData&&);
+  ChunkData& operator=(const ChunkData&);
+  ChunkData& operator=(ChunkData&&);
+  WRL::ComPtr<IModelObject> area_start;
+  WRL::ComPtr<IModelObject> area_end;
+  WRL::ComPtr<IModelObject> space;
+};
+
+class MemoryChunkIterator
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IModelIterator> {
+ public:
+  MemoryChunkIterator(WRL::ComPtr<IDebugHostContext>& host_context);
+  ~MemoryChunkIterator() override;
+
+  HRESULT PopulateChunkData();
+
+  IFACEMETHOD(Reset)();
+
+  IFACEMETHOD(GetNext)
+  (IModelObject** object, ULONG64 dimensions, IModelObject** indexers,
+   IKeyStore** metadata);
+
+  const std::vector<ChunkData>& GetChunks() const { return chunks_; }
+
+  HRESULT GetAt(uint64_t index, IModelObject** result) const;
+
+ private:
+  ULONG position_ = 0;
+  std::vector<ChunkData> chunks_;
+  WRL::ComPtr<IDebugHostContext> sp_ctx_;
+};
+
+class MemoryChunks
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IIndexableConcept, IIterableConcept> {
+ public:
+  MemoryChunks();
+  ~MemoryChunks() override;
+
+  // IIndexableConcept members
+  IFACEMETHOD(GetDimensionality)
+  (IModelObject* context_object, ULONG64* dimensionality);
+
+  IFACEMETHOD(GetAt)
+  (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
+   IModelObject** object, IKeyStore** metadata);
+
+  IFACEMETHOD(SetAt)
+  (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
+   IModelObject* value);
+
+  // IIterableConcept
+  IFACEMETHOD(GetDefaultIndexDimensionality)
+  (IModelObject* context_object, ULONG64* dimensionality);
+
+  IFACEMETHOD(GetIterator)
+  (IModelObject* context_object, IModelIterator** iterator);
+
+ private:
+  WRL::ComPtr<MemoryChunkIterator> opt_chunks_;
+};
+
+#endif  // V8_TOOLS_V8WINDBG_SRC_LIST_CHUNKS_H_
diff --git a/src/third_party/v8/tools/v8windbg/src/local-variables.cc b/src/third_party/v8/tools/v8windbg/src/local-variables.cc
new file mode 100644
index 0000000..e00d06b
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/local-variables.cc
@@ -0,0 +1,132 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/local-variables.h"
+
+#include <vector>
+
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/object-inspection.h"
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+V8LocalVariables::V8LocalVariables(WRL::ComPtr<IModelPropertyAccessor> original,
+                                   bool is_parameters)
+    : original_(original), is_parameters_(is_parameters) {}
+V8LocalVariables::~V8LocalVariables() = default;
+
+IFACEMETHODIMP V8LocalVariables::GetValue(PCWSTR key, IModelObject* context,
+                                          IModelObject** value) noexcept {
+  // See if the frame can fetch locals based on symbols. If so, it's a normal
+  // C++ frame, so we can be done.
+  HRESULT original_hr = original_->GetValue(key, context, value);
+  if (SUCCEEDED(original_hr)) return original_hr;
+
+  // Next, try to find out about the instruction pointer. If it is within the V8
+  // module, or points to unknown space outside a module (generated code), then
+  // we're interested. Otherwise, we have nothing useful to do.
+  WRL::ComPtr<IModelObject> attributes;
+  RETURN_IF_FAIL(context->GetKeyValue(L"Attributes", &attributes, nullptr));
+  WRL::ComPtr<IModelObject> boxed_instruction_offset;
+  RETURN_IF_FAIL(attributes->GetKeyValue(L"InstructionOffset",
+                                         &boxed_instruction_offset, nullptr));
+  ULONG64 instruction_offset{};
+  RETURN_IF_FAIL(
+      UnboxULong64(boxed_instruction_offset.Get(), &instruction_offset));
+  WRL::ComPtr<IDebugHostSymbols> symbols;
+  RETURN_IF_FAIL(sp_debug_host.As(&symbols));
+  WRL::ComPtr<IDebugHostContext> host_context;
+  RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&host_context));
+  WRL::ComPtr<IDebugHostModule> module;
+  if (SUCCEEDED(symbols->FindModuleByLocation(host_context.Get(),
+                                              instruction_offset, &module))) {
+    Location module_base;
+    RETURN_IF_FAIL(module->GetBaseLocation(&module_base));
+    WRL::ComPtr<IDebugHostModule> v8_module =
+        Extension::Current()->GetV8Module(host_context);
+    if (v8_module == nullptr) {
+      // Anything in a module must not be in the V8 module if the V8 module
+      // doesn't exist.
+      return original_hr;
+    }
+    Location v8_base;
+    RETURN_IF_FAIL(v8_module->GetBaseLocation(&v8_base));
+    if (module_base != v8_base) {
+      // It's in a module, but not the one that contains V8.
+      return original_hr;
+    }
+  }
+
+  // Initialize an empty result object.
+  WRL::ComPtr<IModelObject> result;
+  RETURN_IF_FAIL(sp_data_model_manager->CreateSyntheticObject(
+      host_context.Get(), &result));
+  WRL::ComPtr<IModelObject> parent_model;
+  RETURN_IF_FAIL(sp_data_model_manager->AcquireNamedModel(
+      is_parameters_ ? L"Debugger.Models.Parameters"
+                     : L"Debugger.Models.LocalVariables",
+      &parent_model));
+  RETURN_IF_FAIL(result->AddParentModel(parent_model.Get(), /*context=*/nullptr,
+                                        /*override=*/false));
+
+  if (is_parameters_) {
+    // We're not actually adding any parameters data yet; we just need it to not
+    // fail so that the locals pane displays the LocalVariables. The locals pane
+    // displays nothing if getting either LocalVariables or Parameters fails.
+    *value = result.Detach();
+    return S_OK;
+  }
+
+  // Get the stack and frame pointers for the current frame.
+  WRL::ComPtr<IModelObject> boxed_stack_offset;
+  RETURN_IF_FAIL(
+      attributes->GetKeyValue(L"StackOffset", &boxed_stack_offset, nullptr));
+  ULONG64 stack_offset{};
+  RETURN_IF_FAIL(UnboxULong64(boxed_stack_offset.Get(), &stack_offset));
+  WRL::ComPtr<IModelObject> boxed_frame_offset;
+  RETURN_IF_FAIL(
+      attributes->GetKeyValue(L"FrameOffset", &boxed_frame_offset, nullptr));
+  ULONG64 frame_offset{};
+  RETURN_IF_FAIL(UnboxULong64(boxed_frame_offset.Get(), &frame_offset));
+
+  // Eventually v8_debug_helper will provide some help here, but for now, just
+  // provide the option to view the whole stack frame as tagged data. It can
+  // be somewhat useful.
+  WRL::ComPtr<IDebugHostType> object_type =
+      Extension::Current()->GetV8ObjectType(host_context);
+  if (object_type == nullptr) {
+    // There's nothing useful to do if we can't find the symbol for
+    // v8::internal::Object.
+    return original_hr;
+  }
+  ULONG64 object_size{};
+  RETURN_IF_FAIL(object_type->GetSize(&object_size));
+  ULONG64 num_objects = (frame_offset - stack_offset) / object_size;
+  ArrayDimension dimensions[] = {
+      {/*start=*/0, /*length=*/num_objects, /*stride=*/object_size}};
+  WRL::ComPtr<IDebugHostType> object_array_type;
+  RETURN_IF_FAIL(object_type->CreateArrayOf(/*dimensions=*/1, dimensions,
+                                            &object_array_type));
+  WRL::ComPtr<IModelObject> array;
+  RETURN_IF_FAIL(sp_data_model_manager->CreateTypedObject(
+      host_context.Get(), stack_offset, object_array_type.Get(), &array));
+  RETURN_IF_FAIL(
+      result->SetKey(L"memory interpreted as Objects", array.Get(), nullptr));
+
+  std::vector<Property> properties = GetStackFrame(host_context, frame_offset);
+  for (const auto& prop : properties) {
+    WRL::ComPtr<IModelObject> property;
+    RETURN_IF_FAIL(GetModelForProperty(prop, host_context, &property));
+    result->SetKey(reinterpret_cast<const wchar_t*>(prop.name.c_str()),
+                   property.Get(), nullptr);
+  }
+
+  *value = result.Detach();
+  return S_OK;
+}
+
+IFACEMETHODIMP V8LocalVariables::SetValue(PCWSTR key, IModelObject* context,
+                                          IModelObject* value) noexcept {
+  return E_NOTIMPL;
+}
diff --git a/src/third_party/v8/tools/v8windbg/src/local-variables.h b/src/third_party/v8/tools/v8windbg/src/local-variables.h
new file mode 100644
index 0000000..169a93a
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/local-variables.h
@@ -0,0 +1,36 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_LOCAL_VARIABLES_H_
+#define V8_TOOLS_V8WINDBG_SRC_LOCAL_VARIABLES_H_
+
+#include <comutil.h>
+#include <wrl/implements.h>
+
+#include "tools/v8windbg/base/dbgext.h"
+
+// An implementation of the property accessor for the "LocalVariables" or
+// "Parameters" property on Debugger.Models.StackFrame. This allows us to modify
+// the variables shown in each frame.
+class V8LocalVariables
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IModelPropertyAccessor> {
+ public:
+  V8LocalVariables(WRL::ComPtr<IModelPropertyAccessor> original,
+                   bool is_parameters);
+  ~V8LocalVariables() override;
+
+  IFACEMETHOD(GetValue)
+  (PCWSTR key, IModelObject* context, IModelObject** value);
+  IFACEMETHOD(SetValue)(PCWSTR key, IModelObject* context, IModelObject* value);
+
+ private:
+  // The built-in accessor which we are overriding.
+  WRL::ComPtr<IModelPropertyAccessor> original_;
+  // Whether this is for Parameters rather than LocalVariables.
+  bool is_parameters_;
+};
+
+#endif  // V8_TOOLS_V8WINDBG_SRC_LOCAL_VARIABLES_H_
diff --git a/src/third_party/v8/tools/v8windbg/src/object-inspection.cc b/src/third_party/v8/tools/v8windbg/src/object-inspection.cc
new file mode 100644
index 0000000..b206dfa
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/object-inspection.cc
@@ -0,0 +1,696 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/object-inspection.h"
+
+#include "src/flags/flags.h"
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+V8CachedObject::V8CachedObject(Location location,
+                               std::string uncompressed_type_name,
+                               WRL::ComPtr<IDebugHostContext> context,
+                               bool is_compressed)
+    : location_(std::move(location)),
+      uncompressed_type_name_(std::move(uncompressed_type_name)),
+      context_(std::move(context)),
+      is_compressed_(is_compressed) {}
+HRESULT V8CachedObject::Create(IModelObject* p_v8_object_instance,
+                               IV8CachedObject** result) {
+  Location location;
+  RETURN_IF_FAIL(p_v8_object_instance->GetLocation(&location));
+
+  WRL::ComPtr<IDebugHostContext> context;
+  RETURN_IF_FAIL(p_v8_object_instance->GetContext(&context));
+
+  // If the object is of type v8::internal::TaggedValue, and this build uses
+  // compressed pointers, then the value is compressed. Other types such as
+  // v8::internal::Object represent uncompressed tagged values.
+  WRL::ComPtr<IDebugHostType> sp_type;
+  _bstr_t type_name;
+  bool is_compressed =
+      COMPRESS_POINTERS_BOOL &&
+      SUCCEEDED(p_v8_object_instance->GetTypeInfo(&sp_type)) &&
+      SUCCEEDED(sp_type->GetName(type_name.GetAddress())) &&
+      static_cast<const char*>(type_name) == std::string(kTaggedValue);
+
+  const char* uncompressed_type_name =
+      is_compressed ? kObject : static_cast<const char*>(type_name);
+
+  *result = WRL::Make<V8CachedObject>(location, uncompressed_type_name, context,
+                                      is_compressed)
+                .Detach();
+  return S_OK;
+}
+V8CachedObject::V8CachedObject(V8HeapObject heap_object)
+    : heap_object_(std::move(heap_object)), heap_object_initialized_(true) {}
+
+V8CachedObject::~V8CachedObject() = default;
+
+IFACEMETHODIMP V8CachedObject::GetCachedV8HeapObject(
+    V8HeapObject** pp_heap_object) noexcept {
+  if (!heap_object_initialized_) {
+    heap_object_initialized_ = true;
+    uint64_t tagged_ptr = 0;
+    uint64_t bytes_read;
+    HRESULT hr = sp_debug_host_memory->ReadBytes(
+        context_.Get(), location_, reinterpret_cast<void*>(&tagged_ptr),
+        is_compressed_ ? i::kTaggedSize : sizeof(void*), &bytes_read);
+    // S_FALSE can be returned if fewer bytes were read than were requested. We
+    // need all of the bytes, so check for S_OK.
+    if (hr != S_OK) {
+      std::stringstream message;
+      message << "Unable to read memory";
+      if (location_.IsVirtualAddress()) {
+        message << " at 0x" << std::hex << location_.GetOffset();
+      }
+      heap_object_.friendly_name = ConvertToU16String(message.str());
+    } else {
+      if (is_compressed_)
+        tagged_ptr = ExpandCompressedPointer(static_cast<uint32_t>(tagged_ptr));
+      heap_object_ =
+          ::GetHeapObject(context_, tagged_ptr, location_.GetOffset(),
+                          uncompressed_type_name_.c_str(), is_compressed_);
+    }
+  }
+  *pp_heap_object = &this->heap_object_;
+  return S_OK;
+}
+
+IndexedFieldData::IndexedFieldData(Property property)
+    : property_(std::move(property)) {}
+
+IndexedFieldData::~IndexedFieldData() = default;
+
+IFACEMETHODIMP IndexedFieldData::GetProperty(Property** property) noexcept {
+  if (!property) return E_POINTER;
+  *property = &this->property_;
+  return S_OK;
+}
+
+V8ObjectKeyEnumerator::V8ObjectKeyEnumerator(
+    WRL::ComPtr<IV8CachedObject>& v8_cached_object)
+    : sp_v8_cached_object_{v8_cached_object} {}
+V8ObjectKeyEnumerator::~V8ObjectKeyEnumerator() = default;
+
+IFACEMETHODIMP V8ObjectKeyEnumerator::Reset() noexcept {
+  index_ = 0;
+  return S_OK;
+}
+
+IFACEMETHODIMP V8ObjectKeyEnumerator::GetNext(BSTR* key, IModelObject** value,
+                                              IKeyStore** metadata) noexcept {
+  V8HeapObject* p_v8_heap_object;
+  sp_v8_cached_object_->GetCachedV8HeapObject(&p_v8_heap_object);
+
+  if (static_cast<size_t>(index_) >= p_v8_heap_object->properties.size())
+    return E_BOUNDS;
+
+  auto* name_ptr = p_v8_heap_object->properties[index_].name.c_str();
+  *key = ::SysAllocString(U16ToWChar(name_ptr));
+  ++index_;
+  return S_OK;
+}
+
+IFACEMETHODIMP V8LocalDataModel::InitializeObject(
+    IModelObject* model_object,
+    IDebugHostTypeSignature* matching_type_signature,
+    IDebugHostSymbolEnumerator* wildcard_matches) noexcept {
+  return S_OK;
+}
+
+IFACEMETHODIMP V8LocalDataModel::GetName(BSTR* model_name) noexcept {
+  return E_NOTIMPL;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::InitializeObject(
+    IModelObject* model_object,
+    IDebugHostTypeSignature* matching_type_signature,
+    IDebugHostSymbolEnumerator* wildcard_matches) noexcept {
+  return S_OK;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::GetName(BSTR* model_name) noexcept {
+  return E_NOTIMPL;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::ToDisplayString(
+    IModelObject* context_object, IKeyStore* metadata,
+    BSTR* display_string) noexcept {
+  WRL::ComPtr<IV8CachedObject> sp_v8_cached_object;
+  RETURN_IF_FAIL(GetCachedObject(context_object, &sp_v8_cached_object));
+  V8HeapObject* p_v8_heap_object;
+  RETURN_IF_FAIL(sp_v8_cached_object->GetCachedV8HeapObject(&p_v8_heap_object));
+  *display_string = ::SysAllocString(
+      reinterpret_cast<const wchar_t*>(p_v8_heap_object->friendly_name.data()));
+  return S_OK;
+}
+
+namespace {
+
+// Creates a synthetic object, attaches a parent model, and sets the context
+// object for that parent data model. Caller is responsible for ensuring that
+// the parent model's Concepts have been initialized correctly and that the
+// data model context is of an appropriate type for the parent model.
+HRESULT CreateSyntheticObjectWithParentAndDataContext(
+    IDebugHostContext* ctx, IModelObject* parent_model, IUnknown* data_context,
+    IModelObject** result) {
+  WRL::ComPtr<IModelObject> value;
+  RETURN_IF_FAIL(sp_data_model_manager->CreateSyntheticObject(ctx, &value));
+  RETURN_IF_FAIL(
+      value->AddParentModel(parent_model, nullptr, true /*override*/));
+  RETURN_IF_FAIL(value->SetContextForDataModel(parent_model, data_context));
+  *result = value.Detach();
+  return S_OK;
+}
+
+// Creates an IModelObject for a V8 object whose value is represented by the
+// data in cached_object. This is an alternative to  CreateTypedObject for
+// particularly complex cases (compressed values and those that don't exist
+// anywhere in memory).
+HRESULT CreateSyntheticObjectForV8Object(IDebugHostContext* ctx,
+                                         V8CachedObject* cached_object,
+                                         IModelObject** result) {
+  // Explicitly add the parent model and data context. On a plain typed object,
+  // the parent model would be attached automatically because we registered for
+  // a matching type signature, and the data context would be set during
+  // V8ObjectDataModel::GetCachedObject.
+  return CreateSyntheticObjectWithParentAndDataContext(
+      ctx, Extension::Current()->GetObjectDataModel(), cached_object, result);
+}
+
+// Creates an IModelObject to represent a field that is not a struct or array.
+HRESULT GetModelForBasicField(const uint64_t address,
+                              const std::u16string& type_name,
+                              const std::string& uncompressed_type_name,
+                              WRL::ComPtr<IDebugHostContext>& sp_ctx,
+                              IModelObject** result) {
+  if (type_name == ConvertToU16String(uncompressed_type_name)) {
+    // For untagged and uncompressed tagged fields, create an IModelObject
+    // representing a normal native data type.
+    WRL::ComPtr<IDebugHostType> type =
+        Extension::Current()->GetTypeFromV8Module(sp_ctx, type_name.c_str());
+    if (type == nullptr) return E_FAIL;
+    return sp_data_model_manager->CreateTypedObject(
+        sp_ctx.Get(), Location{address}, type.Get(), result);
+  }
+
+  // For compressed tagged fields, we need to do something a little more
+  // complicated. We could just use CreateTypedObject with the type
+  // v8::internal::TaggedValue, but then we'd sacrifice any other data
+  // that we've learned about the field's specific type. So instead we
+  // create a synthetic object.
+  WRL::ComPtr<V8CachedObject> cached_object = WRL::Make<V8CachedObject>(
+      Location(address), uncompressed_type_name, sp_ctx,
+      /*is_compressed=*/true);
+  return CreateSyntheticObjectForV8Object(sp_ctx.Get(), cached_object.Get(),
+                                          result);
+}
+
+// Creates an IModelObject representing the value of a bitfield.
+HRESULT GetModelForBitField(uint64_t address, const uint8_t num_bits,
+                            uint8_t shift_bits, const std::u16string& type_name,
+                            WRL::ComPtr<IDebugHostContext>& sp_ctx,
+                            IModelObject** result) {
+  // Look up the type by name.
+  WRL::ComPtr<IDebugHostType> type =
+      Extension::Current()->GetTypeFromV8Module(sp_ctx, type_name.c_str());
+  if (type == nullptr) return E_FAIL;
+
+  // Figure out exactly which bytes contain the bitfield's data. This depends on
+  // platform byte order (little-endian for Windows).
+  constexpr int kBitsPerByte = 8;
+  uint8_t shift_bytes = shift_bits / kBitsPerByte;
+  address += shift_bytes;
+  shift_bits -= shift_bytes * kBitsPerByte;
+  size_t bits_to_read = shift_bits + num_bits;
+  size_t bytes_to_read = (bits_to_read + kBitsPerByte - 1) / kBitsPerByte;
+
+  uintptr_t value = 0;
+
+  // V8 guarantees that bitfield structs are no bigger than a single pointer.
+  if (bytes_to_read > sizeof(value)) {
+    std::stringstream message;
+    message << "Fatal v8windbg error: found bitfield struct of "
+            << bytes_to_read << "bytes, which exceeds the supported size of "
+            << sizeof(value);
+    return CreateString(ConvertToU16String(message.str()), result);
+  }
+
+  uint64_t bytes_read;
+  HRESULT hr = sp_debug_host_memory->ReadBytes(sp_ctx.Get(), address,
+                                               reinterpret_cast<void*>(&value),
+                                               bytes_to_read, &bytes_read);
+
+  // S_FALSE can be returned if fewer bytes were read than were requested. We
+  // need all of the bytes, so check for S_OK.
+  if (hr != S_OK) {
+    std::stringstream message;
+    message << "Unable to read memory at 0x" << std::hex << address;
+    return CreateString(ConvertToU16String(message.str()), result);
+  }
+
+  // Decode the bitfield.
+  value = (value >> shift_bits) & ((1 << num_bits) - 1);
+
+  return CreateTypedIntrinsic(value, type.Get(), result);
+}
+
+// Creates an IModelObject to represent the packed fields in a Torque struct.
+// Note that Torque structs are not C++ structs and do not have any type
+// definitions in the V8 symbols.
+HRESULT GetModelForStruct(const uint64_t address,
+                          const std::vector<StructField>& fields,
+                          WRL::ComPtr<IDebugHostContext>& sp_ctx,
+                          IModelObject** result) {
+  WRL::ComPtr<IModelObject> sp_value;
+  RETURN_IF_FAIL(
+      sp_data_model_manager->CreateSyntheticObject(sp_ctx.Get(), &sp_value));
+
+  // There's no need for any fancy Concepts here; just add key-value pairs for
+  // each field.
+  for (const StructField& field : fields) {
+    WRL::ComPtr<IModelObject> field_model;
+    if (field.num_bits == 0) {
+      RETURN_IF_FAIL(GetModelForBasicField(
+          address + field.offset, field.type_name, field.uncompressed_type_name,
+          sp_ctx, &field_model));
+    } else {
+      RETURN_IF_FAIL(GetModelForBitField(address + field.offset, field.num_bits,
+                                         field.shift_bits, field.type_name,
+                                         sp_ctx, &field_model));
+    }
+    RETURN_IF_FAIL(
+        sp_value->SetKey(reinterpret_cast<const wchar_t*>(field.name.c_str()),
+                         field_model.Get(), nullptr));
+  }
+
+  *result = sp_value.Detach();
+  return S_OK;
+}
+
+// Creates an IModelObject representing an array of some type that we expect to
+// be defined in the V8 symbols.
+HRESULT GetModelForNativeArray(const uint64_t address,
+                               const std::u16string& type_name, size_t count,
+                               WRL::ComPtr<IDebugHostContext>& sp_ctx,
+                               IModelObject** result) {
+  WRL::ComPtr<IDebugHostType> type =
+      Extension::Current()->GetTypeFromV8Module(sp_ctx, type_name.c_str());
+  if (type == nullptr) return E_FAIL;
+
+  ULONG64 object_size{};
+  RETURN_IF_FAIL(type->GetSize(&object_size));
+
+  ArrayDimension dimensions[] = {
+      {/*start=*/0, /*length=*/count, /*stride=*/object_size}};
+  WRL::ComPtr<IDebugHostType> array_type;
+  RETURN_IF_FAIL(
+      type->CreateArrayOf(/*dimensions=*/1, dimensions, &array_type));
+
+  return sp_data_model_manager->CreateTypedObject(
+      sp_ctx.Get(), Location{address}, array_type.Get(), result);
+}
+
+// Creates an IModelObject that represents an array of structs or compressed
+// tagged values.
+HRESULT GetModelForCustomArray(const Property& prop,
+                               WRL::ComPtr<IDebugHostContext>& sp_ctx,
+                               IModelObject** result) {
+  // Create the context which should be provided to the indexing and iterating
+  // functionality provided by the parent model. This is instance-specific data,
+  // whereas the parent model object could be shared among many custom arrays.
+  WRL::ComPtr<IndexedFieldData> context_data =
+      WRL::Make<IndexedFieldData>(prop);
+
+  return CreateSyntheticObjectWithParentAndDataContext(
+      sp_ctx.Get(), Extension::Current()->GetIndexedFieldDataModel(),
+      context_data.Get(), result);
+}
+
+
+// Creates an IModelObject representing the data in an array at the given index.
+// context_object is expected to be an object of the form created by
+// GetModelForCustomArray, meaning its context for the IndexedFieldParent data
+// model is an IIndexedFieldData containing the description of the array.
+HRESULT GetModelForCustomArrayElement(IModelObject* context_object,
+                                      size_t index, IModelObject** object) {
+  // Open a few layers of wrapper objects to get to the Property object that
+  // describes the array.
+  WRL::ComPtr<IUnknown> data_model_context;
+  RETURN_IF_FAIL(context_object->GetContextForDataModel(
+      Extension::Current()->GetIndexedFieldDataModel(), &data_model_context));
+  WRL::ComPtr<IIndexedFieldData> indexed_field_data;
+  RETURN_IF_FAIL(data_model_context.As(&indexed_field_data));
+  Property* prop;
+  RETURN_IF_FAIL(indexed_field_data->GetProperty(&prop));
+
+  if (index >= prop->length) {
+    return E_BOUNDS;
+  }
+
+  WRL::ComPtr<IDebugHostContext> sp_ctx;
+  RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
+
+  ULONG64 address = prop->addr_value + index * prop->item_size;
+
+  switch (prop->type) {
+    case PropertyType::kArray:
+      return GetModelForBasicField(address, prop->type_name,
+                                   prop->uncompressed_type_name, sp_ctx,
+                                   object);
+    case PropertyType::kStructArray:
+      return GetModelForStruct(address, prop->fields, sp_ctx, object);
+    default:
+      return E_FAIL;  // Only array properties should be possible here.
+  }
+}
+
+}  // namespace
+
+IFACEMETHODIMP IndexedFieldParent::InitializeObject(
+    IModelObject* model_object,
+    IDebugHostTypeSignature* matching_type_signature,
+    IDebugHostSymbolEnumerator* wildcard_matches) noexcept {
+  return S_OK;
+}
+
+IFACEMETHODIMP IndexedFieldParent::GetName(BSTR* model_name) noexcept {
+  return E_NOTIMPL;
+}
+
+IFACEMETHODIMP IndexedFieldParent::GetDimensionality(
+    IModelObject* context_object, ULONG64* dimensionality) noexcept {
+  *dimensionality = 1;
+  return S_OK;
+}
+
+IFACEMETHODIMP IndexedFieldParent::GetAt(IModelObject* context_object,
+                                         ULONG64 indexer_count,
+                                         IModelObject** indexers,
+                                         IModelObject** object,
+                                         IKeyStore** metadata) noexcept {
+  if (indexer_count != 1) return E_INVALIDARG;
+  if (metadata != nullptr) *metadata = nullptr;
+
+  ULONG64 index;
+  RETURN_IF_FAIL(UnboxULong64(indexers[0], &index, /*convert=*/true));
+
+  return GetModelForCustomArrayElement(context_object, index, object);
+}
+
+IFACEMETHODIMP IndexedFieldParent::SetAt(IModelObject* context_object,
+                                         ULONG64 indexer_count,
+                                         IModelObject** indexers,
+                                         IModelObject* value) noexcept {
+  return E_NOTIMPL;
+}
+
+IFACEMETHODIMP IndexedFieldParent::GetDefaultIndexDimensionality(
+    IModelObject* context_object, ULONG64* dimensionality) noexcept {
+  *dimensionality = 1;
+  return S_OK;
+}
+
+IFACEMETHODIMP IndexedFieldParent::GetIterator(
+    IModelObject* context_object, IModelIterator** iterator) noexcept {
+  auto indexed_field_iterator{WRL::Make<IndexedFieldIterator>(context_object)};
+  *iterator = indexed_field_iterator.Detach();
+  return S_OK;
+}
+
+IndexedFieldIterator::IndexedFieldIterator(IModelObject* context_object)
+    : context_object_(context_object) {}
+IndexedFieldIterator::~IndexedFieldIterator() = default;
+
+IFACEMETHODIMP IndexedFieldIterator::Reset() noexcept {
+  next_ = 0;
+  return S_OK;
+}
+
+IFACEMETHODIMP IndexedFieldIterator::GetNext(IModelObject** object,
+                                             ULONG64 dimensions,
+                                             IModelObject** indexers,
+                                             IKeyStore** metadata) noexcept {
+  if (dimensions > 1) return E_INVALIDARG;
+
+  WRL::ComPtr<IModelObject> sp_index, sp_value;
+  RETURN_IF_FAIL(
+      GetModelForCustomArrayElement(context_object_.Get(), next_, &sp_value));
+  RETURN_IF_FAIL(CreateULong64(next_, &sp_index));
+
+  // Everything that could fail (including the bounds check) has succeeded, so
+  // increment the index.
+  ++next_;
+
+  // Write results (none of these steps can fail, which is important because we
+  // transfer ownership of two separate objects).
+  if (dimensions == 1) {
+    indexers[0] = sp_index.Detach();
+  }
+  *object = sp_value.Detach();
+  if (metadata != nullptr) *metadata = nullptr;
+  return S_OK;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::GetKey(IModelObject* context_object,
+                                         PCWSTR key, IModelObject** key_value,
+                                         IKeyStore** metadata,
+                                         bool* has_key) noexcept {
+  if (metadata != nullptr) *metadata = nullptr;
+
+  WRL::ComPtr<IV8CachedObject> sp_v8_cached_object;
+  RETURN_IF_FAIL(GetCachedObject(context_object, &sp_v8_cached_object));
+  V8HeapObject* p_v8_heap_object;
+  RETURN_IF_FAIL(sp_v8_cached_object->GetCachedV8HeapObject(&p_v8_heap_object));
+
+  *has_key = false;
+  for (const auto& prop : p_v8_heap_object->properties) {
+    const char16_t* p_key = reinterpret_cast<const char16_t*>(key);
+    if (prop.name.compare(p_key) == 0) {
+      *has_key = true;
+      if (key_value != nullptr) {
+        WRL::ComPtr<IDebugHostContext> sp_ctx;
+        RETURN_IF_FAIL(context_object->GetContext(&sp_ctx));
+        RETURN_IF_FAIL(GetModelForProperty(prop, sp_ctx, key_value));
+      }
+      return S_OK;
+    }
+  }
+
+  return S_OK;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::SetKey(IModelObject* context_object,
+                                         PCWSTR key, IModelObject* key_value,
+                                         IKeyStore* metadata) noexcept {
+  return E_NOTIMPL;
+}
+
+IFACEMETHODIMP V8ObjectDataModel::EnumerateKeys(
+    IModelObject* context_object, IKeyEnumerator** pp_enumerator) noexcept {
+  WRL::ComPtr<IV8CachedObject> sp_v8_cached_object;
+  RETURN_IF_FAIL(GetCachedObject(context_object, &sp_v8_cached_object));
+
+  auto enumerator{WRL::Make<V8ObjectKeyEnumerator>(sp_v8_cached_object)};
+  *pp_enumerator = enumerator.Detach();
+  return S_OK;
+}
+
+IFACEMETHODIMP V8LocalValueProperty::GetValue(
+    PCWSTR pwsz_key, IModelObject* p_v8_local_instance,
+    IModelObject** pp_value) noexcept {
+  // Get the parametric type within v8::Local<*>
+  // Set value to a pointer to an instance of this type.
+
+  WRL::ComPtr<IDebugHostType> sp_type;
+  RETURN_IF_FAIL(p_v8_local_instance->GetTypeInfo(&sp_type));
+
+  bool is_generic;
+  RETURN_IF_FAIL(sp_type->IsGeneric(&is_generic));
+  if (!is_generic) return E_FAIL;
+
+  WRL::ComPtr<IDebugHostSymbol> sp_generic_arg;
+  RETURN_IF_FAIL(sp_type->GetGenericArgumentAt(0, &sp_generic_arg));
+
+  _bstr_t generic_name;
+  RETURN_IF_FAIL(sp_generic_arg->GetName(generic_name.GetAddress()));
+
+  WRL::ComPtr<IDebugHostContext> sp_ctx;
+  RETURN_IF_FAIL(p_v8_local_instance->GetContext(&sp_ctx));
+
+  WRL::ComPtr<IDebugHostType> sp_value_type =
+      Extension::Current()->GetTypeFromV8Module(
+          sp_ctx, reinterpret_cast<const char16_t*>(
+                      static_cast<const wchar_t*>(generic_name)));
+  if (sp_value_type == nullptr ||
+      !Extension::Current()->DoesTypeDeriveFromObject(sp_value_type)) {
+    // The value type doesn't derive from v8::internal::Object (probably a
+    // public API type), so just use plain v8::internal::Object. We could
+    // consider mapping some public API types to their corresponding internal
+    // types here, at the possible cost of increased maintenance.
+    sp_value_type = Extension::Current()->GetV8ObjectType(sp_ctx);
+  }
+
+  Location loc;
+  RETURN_IF_FAIL(p_v8_local_instance->GetLocation(&loc));
+
+  // Read the pointer at the Object location
+  ULONG64 obj_address;
+  RETURN_IF_FAIL(
+      sp_debug_host_memory->ReadPointers(sp_ctx.Get(), loc, 1, &obj_address));
+
+  // If the val_ is a nullptr, then there is no value in the Local.
+  if (obj_address == 0) {
+    RETURN_IF_FAIL(CreateString(std::u16string{u"<empty>"}, pp_value));
+  } else {
+    // Should be a v8::internal::Object at the address
+    RETURN_IF_FAIL(sp_data_model_manager->CreateTypedObject(
+        sp_ctx.Get(), obj_address, sp_value_type.Get(), pp_value));
+  }
+
+  return S_OK;
+}
+
+IFACEMETHODIMP V8LocalValueProperty::SetValue(
+    PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/,
+    IModelObject* /*p_value*/) noexcept {
+  return E_NOTIMPL;
+}
+
+IFACEMETHODIMP V8InternalCompilerNodeIdProperty::GetValue(
+    PCWSTR pwsz_key, IModelObject* p_v8_compiler_node_instance,
+    IModelObject** pp_value) noexcept {
+  WRL::ComPtr<IModelObject> sp_bit_field;
+  RETURN_IF_FAIL(p_v8_compiler_node_instance->GetRawValue(
+      SymbolKind::SymbolField, L"bit_field_", RawSearchNone, &sp_bit_field));
+
+  uint64_t bit_field_value;
+  RETURN_IF_FAIL(
+      UnboxULong64(sp_bit_field.Get(), &bit_field_value, true /*convert*/));
+
+  WRL::ComPtr<IDebugHostContext> sp_host_context;
+  RETURN_IF_FAIL(p_v8_compiler_node_instance->GetContext(&sp_host_context));
+
+  WRL::ComPtr<IDebugHostType> sp_id_field_type;
+  RETURN_IF_FAIL(Extension::Current()
+                     ->GetV8Module(sp_host_context)
+                     ->FindTypeByName(L"v8::internal::compiler::Node::IdField",
+                                      &sp_id_field_type));
+
+  // Get 2nd template parameter as 24 in class.
+  // v8::base::BitField<v8::internal::compiler::NodeId, 0, 24>.
+  bool is_generic;
+  RETURN_IF_FAIL(sp_id_field_type->IsGeneric(&is_generic));
+  if (!is_generic) return E_FAIL;
+
+  WRL::ComPtr<IDebugHostSymbol> sp_k_size_arg;
+  RETURN_IF_FAIL(sp_id_field_type->GetGenericArgumentAt(2, &sp_k_size_arg));
+
+  WRL::ComPtr<IDebugHostConstant> sp_k_size_constant;
+  RETURN_IF_FAIL(sp_k_size_arg.As(&sp_k_size_constant));
+
+  int k_size;
+  RETURN_IF_FAIL(GetInt32(sp_k_size_constant.Get(), &k_size));
+
+  // Compute node_id.
+  uint32_t node_id = bit_field_value & (0xFFFFFFFF >> k_size);
+  RETURN_IF_FAIL(CreateUInt32(node_id, pp_value));
+
+  return S_OK;
+}
+
+IFACEMETHODIMP V8InternalCompilerNodeIdProperty::SetValue(
+    PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/,
+    IModelObject* /*p_value*/) noexcept {
+  return E_NOTIMPL;
+}
+
+IFACEMETHODIMP V8InternalCompilerBitsetNameProperty::GetValue(
+    PCWSTR pwsz_key, IModelObject* p_v8_compiler_type_instance,
+    IModelObject** pp_value) noexcept {
+  WRL::ComPtr<IModelObject> sp_payload;
+  RETURN_IF_FAIL(p_v8_compiler_type_instance->GetRawValue(
+      SymbolKind::SymbolField, L"payload_", RawSearchNone, &sp_payload));
+
+  uint64_t payload_value;
+  RETURN_IF_FAIL(
+      UnboxULong64(sp_payload.Get(), &payload_value, true /*convert*/));
+
+  const char* bitset_name = ::BitsetName(payload_value);
+  if (!bitset_name) return E_FAIL;
+  std::string name(bitset_name);
+  RETURN_IF_FAIL(CreateString(ConvertToU16String(name), pp_value));
+
+  return S_OK;
+}
+
+IFACEMETHODIMP V8InternalCompilerBitsetNameProperty::SetValue(
+    PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/,
+    IModelObject* /*p_value*/) noexcept {
+  return E_NOTIMPL;
+}
+
+constexpr wchar_t usage[] =
+    LR"(Invalid arguments.
+First argument should be a uint64 representing the tagged value to investigate.
+Second argument is optional, and may be a fully-qualified type name such as
+v8::internal::String.)";
+
+IFACEMETHODIMP InspectV8ObjectMethod::Call(IModelObject* p_context_object,
+                                           ULONG64 arg_count,
+                                           _In_reads_(arg_count)
+                                               IModelObject** pp_arguments,
+                                           IModelObject** pp_result,
+                                           IKeyStore** pp_metadata) noexcept {
+  // Read the arguments.
+  ULONG64 tagged_value;
+  _bstr_t type_name;
+  if (arg_count < 1 ||
+      FAILED(UnboxULong64(pp_arguments[0], &tagged_value, /*convert=*/true)) ||
+      (arg_count >= 2 &&
+       FAILED(UnboxString(pp_arguments[1], type_name.GetAddress())))) {
+    sp_data_model_manager->CreateErrorObject(E_INVALIDARG, usage, pp_result);
+    return E_INVALIDARG;
+  }
+
+  WRL::ComPtr<IDebugHostContext> sp_ctx;
+  RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_ctx));
+
+  // We can't use CreateTypedObject for a value which may not actually reside
+  // anywhere in memory, so create a synthetic object.
+  WRL::ComPtr<V8CachedObject> cached_object =
+      WRL::Make<V8CachedObject>(::GetHeapObject(
+          sp_ctx, tagged_value, 0, static_cast<const char*>(type_name),
+          /*is_compressed=*/false));
+  return CreateSyntheticObjectForV8Object(sp_ctx.Get(), cached_object.Get(),
+                                          pp_result);
+}
+
+// Creates an IModelObject representing the data in the given property.
+HRESULT GetModelForProperty(const Property& prop,
+                            WRL::ComPtr<IDebugHostContext>& sp_ctx,
+                            IModelObject** result) {
+  switch (prop.type) {
+    case PropertyType::kPointer:
+      return GetModelForBasicField(prop.addr_value, prop.type_name,
+                                   prop.uncompressed_type_name, sp_ctx, result);
+    case PropertyType::kStruct:
+      return GetModelForStruct(prop.addr_value, prop.fields, sp_ctx, result);
+    case PropertyType::kArray:
+    case PropertyType::kStructArray:
+      if (prop.type == PropertyType::kArray &&
+          prop.type_name == ConvertToU16String(prop.uncompressed_type_name)) {
+        // An array of things that are not structs or compressed tagged values
+        // is most cleanly represented by a native array.
+        return GetModelForNativeArray(prop.addr_value, prop.type_name,
+                                      prop.length, sp_ctx, result);
+      }
+      // Otherwise, we must construct a custom iterable object.
+      return GetModelForCustomArray(prop, sp_ctx, result);
+    default:
+      return E_FAIL;
+  }
+}
diff --git a/src/third_party/v8/tools/v8windbg/src/object-inspection.h b/src/third_party/v8/tools/v8windbg/src/object-inspection.h
new file mode 100644
index 0000000..238d861
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/object-inspection.h
@@ -0,0 +1,297 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_OBJECT_INSPECTION_H_
+#define V8_TOOLS_V8WINDBG_SRC_OBJECT_INSPECTION_H_
+
+#include <comutil.h>
+#include <wrl/implements.h>
+
+#include <sstream>
+#include <string>
+#include <vector>
+
+#include "tools/v8windbg/base/dbgext.h"
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+// The representation of the underlying V8 object that will be cached on the
+// DataModel representation. (Needs to implement IUnknown).
+class __declspec(uuid("6392E072-37BB-4220-A5FF-114098923A02")) IV8CachedObject
+    : public IUnknown {
+ public:
+  virtual HRESULT __stdcall GetCachedV8HeapObject(
+      V8HeapObject** pp_heap_object) = 0;
+};
+
+class V8CachedObject
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IV8CachedObject> {
+ public:
+  V8CachedObject(Location location, std::string uncompressed_type_name,
+                 WRL::ComPtr<IDebugHostContext> context, bool is_compressed);
+  V8CachedObject(V8HeapObject heap_object);
+  ~V8CachedObject() override;
+
+  static HRESULT Create(IModelObject* p_v8_object_instance,
+                        IV8CachedObject** result);
+
+  IFACEMETHOD(GetCachedV8HeapObject)(V8HeapObject** pp_heap_object);
+
+ private:
+  // The properties and description of the object, if already read.
+  V8HeapObject heap_object_;
+  bool heap_object_initialized_ = false;
+
+  // Data that is necessary for reading the object.
+  Location location_;
+  std::string uncompressed_type_name_;
+  WRL::ComPtr<IDebugHostContext> context_;
+  bool is_compressed_ = false;
+};
+
+// A simple COM wrapper class to hold data required for IndexedFieldParent.
+// (Needs to implement IUnknown).
+class __declspec(uuid("6392E072-37BB-4220-A5FF-114098923A03")) IIndexedFieldData
+    : public IUnknown {
+ public:
+  // Get a pointer to the Property object held by this IIndexedFieldData. The
+  // pointer returned in this way is valid only while the containing
+  // IIndexedFieldData is alive.
+  virtual HRESULT __stdcall GetProperty(Property** property) = 0;
+};
+
+class IndexedFieldData
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IIndexedFieldData> {
+ public:
+  IndexedFieldData(Property property);
+  ~IndexedFieldData() override;
+
+  // Get a pointer to the Property object held by this IndexedFieldData. The
+  // pointer returned in this way is valid only while the containing
+  // IndexedFieldData is alive.
+  IFACEMETHOD(GetProperty)(Property** property);
+
+ private:
+  Property property_;
+};
+
+// A parent model that provides indexing support for fields that contain arrays
+// of something more complicated than basic native types.
+class IndexedFieldParent
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IDataModelConcept, IIterableConcept, IIndexableConcept> {
+ public:
+  // IDataModelConcept
+  IFACEMETHOD(InitializeObject)
+  (IModelObject* model_object, IDebugHostTypeSignature* matching_type_signature,
+   IDebugHostSymbolEnumerator* wildcard_matches);
+
+  // IDataModelConcept
+  IFACEMETHOD(GetName)(BSTR* model_name);
+
+  // IIndexableConcept
+  IFACEMETHOD(GetAt)
+  (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
+   _COM_Errorptr_ IModelObject** object, IKeyStore** metadata);
+
+  // IIndexableConcept
+  IFACEMETHOD(GetDimensionality)
+  (IModelObject* context_object, ULONG64* dimensionality);
+
+  // IIndexableConcept
+  IFACEMETHOD(SetAt)
+  (IModelObject* context_object, ULONG64 indexer_count, IModelObject** indexers,
+   IModelObject* value);
+
+  // IIterableConcept
+  IFACEMETHOD(GetDefaultIndexDimensionality)
+  (IModelObject* context_object, ULONG64* dimensionality);
+
+  // IIterableConcept
+  IFACEMETHOD(GetIterator)
+  (IModelObject* context_object, IModelIterator** iterator);
+};
+
+// An iterator for the values within an array field.
+class IndexedFieldIterator
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IModelIterator> {
+ public:
+  IndexedFieldIterator(IModelObject* context_object);
+  ~IndexedFieldIterator() override;
+
+  IFACEMETHOD(Reset)();
+
+  IFACEMETHOD(GetNext)
+  (IModelObject** object, ULONG64 dimensions, IModelObject** indexers,
+   IKeyStore** metadata);
+
+ private:
+  size_t next_ = 0;
+  WRL::ComPtr<IModelObject> context_object_;
+};
+
+// Enumerates the names of fields on V8 objects.
+class V8ObjectKeyEnumerator
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IKeyEnumerator> {
+ public:
+  V8ObjectKeyEnumerator(WRL::ComPtr<IV8CachedObject>& v8_cached_object);
+  ~V8ObjectKeyEnumerator() override;
+
+  IFACEMETHOD(Reset)();
+
+  // This method will be called with a nullptr 'value' for each key if returned
+  // from an IDynamicKeyProviderConcept. It will call GetKey on the
+  // IDynamicKeyProviderConcept interface after each key returned.
+  IFACEMETHOD(GetNext)(BSTR* key, IModelObject** value, IKeyStore** metadata);
+
+ private:
+  int index_ = 0;
+  WRL::ComPtr<IV8CachedObject> sp_v8_cached_object_;
+};
+
+// A parent model for V8 handle types such as v8::internal::Handle<*>.
+class V8LocalDataModel
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IDataModelConcept> {
+ public:
+  IFACEMETHOD(InitializeObject)
+  (IModelObject* model_object, IDebugHostTypeSignature* matching_type_signature,
+   IDebugHostSymbolEnumerator* wildcard_matches);
+
+  IFACEMETHOD(GetName)(BSTR* model_name);
+};
+
+// A parent model for V8 object types such as v8::internal::Object.
+class V8ObjectDataModel
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IDataModelConcept, IStringDisplayableConcept,
+          IDynamicKeyProviderConcept> {
+ public:
+  HRESULT GetCachedObject(IModelObject* context_object,
+                          IV8CachedObject** result) {
+    // Get the IModelObject for this parent object. As it is a dynamic provider,
+    // there is only one parent directly on the object.
+    WRL::ComPtr<IModelObject> sp_parent_model, sp_context_adjuster;
+    RETURN_IF_FAIL(context_object->GetParentModel(0, &sp_parent_model,
+                                                  &sp_context_adjuster));
+
+    // See if the cached object is already present
+    WRL::ComPtr<IUnknown> sp_context;
+    HRESULT hr = context_object->GetContextForDataModel(sp_parent_model.Get(),
+                                                        &sp_context);
+
+    WRL::ComPtr<IV8CachedObject> sp_v8_cached_object;
+
+    if (SUCCEEDED(hr)) {
+      RETURN_IF_FAIL(sp_context.As(&sp_v8_cached_object));
+    } else {
+      RETURN_IF_FAIL(
+          V8CachedObject::Create(context_object, &sp_v8_cached_object));
+      RETURN_IF_FAIL(sp_v8_cached_object.As(&sp_context));
+      RETURN_IF_FAIL(context_object->SetContextForDataModel(
+          sp_parent_model.Get(), sp_context.Get()));
+    }
+
+    *result = sp_v8_cached_object.Detach();
+    return S_OK;
+  }
+
+  IFACEMETHOD(InitializeObject)
+  (IModelObject* model_object, IDebugHostTypeSignature* matching_type_signature,
+   IDebugHostSymbolEnumerator* wildcard_matches);
+
+  IFACEMETHOD(GetName)(BSTR* model_name);
+
+  IFACEMETHOD(ToDisplayString)
+  (IModelObject* context_object, IKeyStore* metadata, BSTR* display_string);
+
+  // IDynamicKeyProviderConcept
+  IFACEMETHOD(GetKey)
+  (IModelObject* context_object, PCWSTR key, IModelObject** key_value,
+   IKeyStore** metadata, bool* has_key);
+
+  IFACEMETHOD(SetKey)
+  (IModelObject* context_object, PCWSTR key, IModelObject* key_value,
+   IKeyStore* metadata);
+
+  IFACEMETHOD(EnumerateKeys)
+  (IModelObject* context_object, IKeyEnumerator** pp_enumerator);
+};
+
+// The implemention of the "Value" getter for V8 handle types.
+class V8LocalValueProperty
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IModelPropertyAccessor> {
+ public:
+  IFACEMETHOD(GetValue)
+  (PCWSTR pwsz_key, IModelObject* p_v8_object_instance,
+   IModelObject** pp_value);
+
+  IFACEMETHOD(SetValue)
+  (PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/,
+   IModelObject* /*p_value*/);
+};
+
+// The implemention of the "NodeId" getter for v8::internal::compiler::Node
+// type.
+class V8InternalCompilerNodeIdProperty
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IModelPropertyAccessor> {
+ public:
+  IFACEMETHOD(GetValue)
+  (PCWSTR pwsz_key, IModelObject* p_v8_object_instance,
+   IModelObject** pp_value);
+
+  IFACEMETHOD(SetValue)
+  (PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/,
+   IModelObject* /*p_value*/);
+};
+
+// The implemention of the "bitset_name" getter for v8::internal::compiler::Type
+// type.
+class V8InternalCompilerBitsetNameProperty
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IModelPropertyAccessor> {
+ public:
+  IFACEMETHOD(GetValue)
+  (PCWSTR pwsz_key, IModelObject* p_v8_compiler_type_instance,
+   IModelObject** pp_value);
+
+  IFACEMETHOD(SetValue)
+  (PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/,
+   IModelObject* /*p_value*/);
+};
+
+// A way that someone can directly inspect a tagged value, even if that value
+// isn't in memory (from a register, or the user's imagination, etc.).
+class InspectV8ObjectMethod
+    : public WRL::RuntimeClass<
+          WRL::RuntimeClassFlags<WRL::RuntimeClassType::ClassicCom>,
+          IModelMethod> {
+ public:
+  IFACEMETHOD(Call)
+  (IModelObject* p_context_object, ULONG64 arg_count,
+   _In_reads_(arg_count) IModelObject** pp_arguments, IModelObject** pp_result,
+   IKeyStore** pp_metadata);
+};
+
+HRESULT GetModelForProperty(const Property& prop,
+                            WRL::ComPtr<IDebugHostContext>& sp_ctx,
+                            IModelObject** result);
+
+#endif  // V8_TOOLS_V8WINDBG_SRC_OBJECT_INSPECTION_H_
diff --git a/src/third_party/v8/tools/v8windbg/src/v8-debug-helper-interop.cc b/src/third_party/v8/tools/v8windbg/src/v8-debug-helper-interop.cc
new file mode 100644
index 0000000..74d0a9d
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/v8-debug-helper-interop.cc
@@ -0,0 +1,176 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/v8-debug-helper-interop.h"
+
+#include <Windows.h>
+#include <crtdbg.h>
+
+#include "src/common/globals.h"
+#include "tools/debug_helper/debug-helper.h"
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+namespace d = v8::debug_helper;
+
+// We need a plain C function pointer for interop with v8_debug_helper. We can
+// use this to get one as long as we never need two at once.
+class MemReaderScope {
+ public:
+  explicit MemReaderScope(WRL::ComPtr<IDebugHostContext> sp_context)
+      : sp_context_(sp_context) {
+    _ASSERTE(!context_);
+    context_ = sp_context_.Get();
+  }
+  ~MemReaderScope() { context_ = nullptr; }
+  d::MemoryAccessor GetReader() { return &MemReaderScope::Read; }
+
+ private:
+  MemReaderScope(const MemReaderScope&) = delete;
+  MemReaderScope& operator=(const MemReaderScope&) = delete;
+  static d::MemoryAccessResult Read(uintptr_t address, void* destination,
+                                    size_t byte_count) {
+    ULONG64 bytes_read;
+    Location loc{address};
+    HRESULT hr = sp_debug_host_memory->ReadBytes(context_, loc, destination,
+                                                 byte_count, &bytes_read);
+    // TODO determine when an address is valid but inaccessible
+    return SUCCEEDED(hr) ? d::MemoryAccessResult::kOk
+                         : d::MemoryAccessResult::kAddressNotValid;
+  }
+  WRL::ComPtr<IDebugHostContext> sp_context_;
+  static IDebugHostContext* context_;
+};
+IDebugHostContext* MemReaderScope::context_;
+
+StructField::StructField(std::u16string field_name, std::u16string type_name,
+                         std::string uncompressed_type_name, uint64_t offset,
+                         uint8_t num_bits, uint8_t shift_bits)
+    : name(field_name),
+      type_name(type_name),
+      uncompressed_type_name(uncompressed_type_name),
+      offset(offset),
+      num_bits(num_bits),
+      shift_bits(shift_bits) {}
+StructField::~StructField() = default;
+StructField::StructField(const StructField&) = default;
+StructField::StructField(StructField&&) = default;
+StructField& StructField::operator=(const StructField&) = default;
+StructField& StructField::operator=(StructField&&) = default;
+
+Property::Property(std::u16string property_name, std::u16string type_name,
+                   std::string uncompressed_type_name, uint64_t address,
+                   size_t item_size)
+    : name(property_name),
+      type(PropertyType::kPointer),
+      type_name(type_name),
+      uncompressed_type_name(uncompressed_type_name),
+      addr_value(address),
+      item_size(item_size) {}
+Property::~Property() = default;
+Property::Property(const Property&) = default;
+Property::Property(Property&&) = default;
+Property& Property::operator=(const Property&) = default;
+Property& Property::operator=(Property&&) = default;
+
+V8HeapObject::V8HeapObject() = default;
+V8HeapObject::~V8HeapObject() = default;
+V8HeapObject::V8HeapObject(const V8HeapObject&) = default;
+V8HeapObject::V8HeapObject(V8HeapObject&&) = default;
+V8HeapObject& V8HeapObject::operator=(const V8HeapObject&) = default;
+V8HeapObject& V8HeapObject::operator=(V8HeapObject&&) = default;
+
+std::vector<Property> GetPropertiesAsVector(size_t num_properties,
+                                            d::ObjectProperty** properties) {
+  std::vector<Property> result;
+  for (size_t property_index = 0; property_index < num_properties;
+       ++property_index) {
+    const auto& source_prop = *(properties)[property_index];
+    Property dest_prop(ConvertToU16String(source_prop.name),
+                       ConvertToU16String(source_prop.type),
+                       source_prop.decompressed_type, source_prop.address,
+                       source_prop.size);
+    if (source_prop.kind != d::PropertyKind::kSingle) {
+      dest_prop.type = PropertyType::kArray;
+      dest_prop.length = source_prop.num_values;
+    }
+    if (dest_prop.type_name.empty() || source_prop.num_struct_fields > 0) {
+      // If the helper library didn't provide a type, then it should have
+      // provided struct fields instead. Set the struct type flag and copy the
+      // fields into the result.
+      dest_prop.type =
+          static_cast<PropertyType>(static_cast<int>(dest_prop.type) |
+                                    static_cast<int>(PropertyType::kStruct));
+      for (size_t field_index = 0; field_index < source_prop.num_struct_fields;
+           ++field_index) {
+        const auto& struct_field = *source_prop.struct_fields[field_index];
+        dest_prop.fields.push_back({ConvertToU16String(struct_field.name),
+                                    ConvertToU16String(struct_field.type),
+                                    struct_field.decompressed_type,
+                                    struct_field.offset, struct_field.num_bits,
+                                    struct_field.shift_bits});
+      }
+    }
+    result.push_back(dest_prop);
+  }
+  return result;
+}
+
+V8HeapObject GetHeapObject(WRL::ComPtr<IDebugHostContext> sp_context,
+                           uint64_t tagged_ptr, uint64_t referring_pointer,
+                           const char* type_name, bool is_compressed) {
+  // Read the value at the address, and see if it is a tagged pointer
+
+  V8HeapObject obj;
+  MemReaderScope reader_scope(sp_context);
+
+  d::HeapAddresses heap_addresses = {0, 0, 0, 0};
+  // TODO ideally we'd provide real heap page pointers. For now, just testing
+  // decompression based on the pointer to wherever we found this value,
+  // which is likely (though not guaranteed) to be a heap pointer itself.
+  heap_addresses.any_heap_pointer = referring_pointer;
+
+  auto props = d::GetObjectProperties(tagged_ptr, reader_scope.GetReader(),
+                                      heap_addresses, type_name);
+  obj.friendly_name = ConvertToU16String(props->brief);
+  obj.properties =
+      GetPropertiesAsVector(props->num_properties, props->properties);
+
+  // For each guessed type, create a synthetic property that will request data
+  // about the same object again but with a more specific type hint.
+  if (referring_pointer != 0) {
+    for (size_t type_index = 0; type_index < props->num_guessed_types;
+         ++type_index) {
+      const std::string& type_name = props->guessed_types[type_index];
+      Property dest_prop(
+          ConvertToU16String(("guessed type " + type_name).c_str()),
+          ConvertToU16String(is_compressed ? kTaggedValue : type_name),
+          type_name, referring_pointer,
+          is_compressed ? i::kTaggedSize : sizeof(void*));
+      obj.properties.push_back(dest_prop);
+    }
+  }
+
+  return obj;
+}
+
+std::vector<std::u16string> ListObjectClasses() {
+  const d::ClassList* class_list = d::ListObjectClasses();
+  std::vector<std::u16string> result;
+  for (size_t i = 0; i < class_list->num_class_names; ++i) {
+    result.push_back(ConvertToU16String(class_list->class_names[i]));
+  }
+  return result;
+}
+
+const char* BitsetName(uint64_t payload) { return d::BitsetName(payload); }
+
+std::vector<Property> GetStackFrame(WRL::ComPtr<IDebugHostContext> sp_context,
+
+                                    uint64_t frame_pointer) {
+  MemReaderScope reader_scope(sp_context);
+  auto props = d::GetStackFrame(static_cast<uintptr_t>(frame_pointer),
+                                reader_scope.GetReader());
+  return GetPropertiesAsVector(props->num_properties, props->properties);
+}
diff --git a/src/third_party/v8/tools/v8windbg/src/v8-debug-helper-interop.h b/src/third_party/v8/tools/v8windbg/src/v8-debug-helper-interop.h
new file mode 100644
index 0000000..f7d78c5
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/v8-debug-helper-interop.h
@@ -0,0 +1,143 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_V8_DEBUG_HELPER_INTEROP_H_
+#define V8_TOOLS_V8WINDBG_SRC_V8_DEBUG_HELPER_INTEROP_H_
+
+#include <wrl/client.h>
+
+#include <DbgModel.h>
+
+#include <cstdint>
+#include <map>
+#include <string>
+#include <vector>
+
+namespace WRL = Microsoft::WRL;
+
+constexpr char kObject[] = "v8::internal::Object";
+constexpr char16_t kObjectU[] = u"v8::internal::Object";
+constexpr char kTaggedValue[] = "v8::internal::TaggedValue";
+constexpr char16_t kTaggedValueU[] = u"v8::internal::TaggedValue";
+
+enum class PropertyType {
+  kPointer = 0,
+  kArray = 1,
+  kStruct = 2,
+  kStructArray = kArray | kStruct,
+};
+
+struct StructField {
+  StructField(std::u16string field_name, std::u16string type_name,
+              std::string uncompressed_type_name, uint64_t address,
+              uint8_t num_bits, uint8_t shift_bits);
+  ~StructField();
+  StructField(const StructField&);
+  StructField(StructField&&);
+  StructField& operator=(const StructField&);
+  StructField& operator=(StructField&&);
+
+  std::u16string name;
+
+  // Statically-determined type, such as from .tq definition. This type should
+  // be treated as if it were used in the v8::internal namespace; that is, type
+  // "X::Y" can mean any of the following, in order of decreasing preference:
+  // - v8::internal::X::Y
+  // - v8::X::Y
+  // - X::Y
+  std::u16string type_name;
+
+  // In some cases, |type_name| may be a simple type representing a compressed
+  // pointer such as v8::internal::TaggedValue. In those cases,
+  // |uncompressed_type_name| will contain the type of the object when
+  // decompressed. Otherwise, |uncompressed_type_name| will match |type_name|.
+  // In any case, it is safe to pass the |uncompressed_type_name| value as the
+  // type_hint on a subsequent call to GetObjectProperties.
+  std::string uncompressed_type_name;
+
+  // Offset, in bytes, from beginning of struct.
+  uint64_t offset;
+
+  // The number of bits that are present, if this value is a bitfield. Zero
+  // indicates that this value is not a bitfield (the full value is stored).
+  uint8_t num_bits;
+
+  // The number of bits by which this value has been left-shifted for storage as
+  // a bitfield.
+  uint8_t shift_bits;
+};
+
+struct Property {
+  Property(std::u16string property_name, std::u16string type_name,
+           std::string uncompressed_type_name, uint64_t address,
+           size_t item_size);
+  ~Property();
+  Property(const Property&);
+  Property(Property&&);
+  Property& operator=(const Property&);
+  Property& operator=(Property&&);
+
+  std::u16string name;
+  PropertyType type;
+
+  // Statically-determined type, such as from .tq definition. Can be an empty
+  // string if this property is itself a Torque-defined struct; in that case use
+  // |fields| instead. This type should be treated as if it were used in the
+  // v8::internal namespace; that is, type "X::Y" can mean any of the following,
+  // in order of decreasing preference:
+  // - v8::internal::X::Y
+  // - v8::X::Y
+  // - X::Y
+  std::u16string type_name;
+
+  // In some cases, |type_name| may be a simple type representing a compressed
+  // pointer such as v8::internal::TaggedValue. In those cases,
+  // |uncompressed_type_name| will contain the type of the object when
+  // decompressed. Otherwise, |uncompressed_type_name| will match |type_name|.
+  // In any case, it is safe to pass the |uncompressed_type_name| value as the
+  // type_hint on a subsequent call to GetObjectProperties.
+  std::string uncompressed_type_name;
+
+  // The address where the property value can be found in the debuggee's address
+  // space, or the address of the first value for an array.
+  uint64_t addr_value;
+
+  // Size of each array item, if this property is an array.
+  size_t item_size;
+
+  // Number of array items, if this property is an array.
+  size_t length = 0;
+
+  // Fields within this property, if this property is a struct, or fields within
+  // each array element, if this property is a struct array.
+  std::vector<StructField> fields;
+};
+
+struct V8HeapObject {
+  V8HeapObject();
+  ~V8HeapObject();
+  V8HeapObject(const V8HeapObject&);
+  V8HeapObject(V8HeapObject&&);
+  V8HeapObject& operator=(const V8HeapObject&);
+  V8HeapObject& operator=(V8HeapObject&&);
+  std::u16string friendly_name;  // String to print in single-line description.
+  std::vector<Property> properties;
+};
+
+V8HeapObject GetHeapObject(WRL::ComPtr<IDebugHostContext> sp_context,
+                           uint64_t address, uint64_t referring_pointer,
+                           const char* type_name, bool is_compressed);
+
+// Expand a compressed pointer from 32 bits to the format that
+// GetObjectProperties expects for compressed pointers.
+inline uint64_t ExpandCompressedPointer(uint32_t ptr) { return ptr; }
+
+std::vector<std::u16string> ListObjectClasses();
+
+const char* BitsetName(uint64_t payload);
+
+std::vector<Property> GetStackFrame(WRL::ComPtr<IDebugHostContext> sp_context,
+                                    uint64_t frame_pointer);
+
+#endif  // V8_TOOLS_V8WINDBG_SRC_V8_DEBUG_HELPER_INTEROP_H_
diff --git a/src/third_party/v8/tools/v8windbg/src/v8windbg-extension.cc b/src/third_party/v8/tools/v8windbg/src/v8windbg-extension.cc
new file mode 100644
index 0000000..58a520c
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/v8windbg-extension.cc
@@ -0,0 +1,387 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/src/v8windbg-extension.h"
+
+#include <iostream>
+
+#include "tools/v8windbg/base/utilities.h"
+#include "tools/v8windbg/src/cur-isolate.h"
+#include "tools/v8windbg/src/list-chunks.h"
+#include "tools/v8windbg/src/local-variables.h"
+#include "tools/v8windbg/src/object-inspection.h"
+
+std::unique_ptr<Extension> Extension::current_extension_ = nullptr;
+const wchar_t* pcur_isolate = L"curisolate";
+const wchar_t* plist_chunks = L"listchunks";
+const wchar_t* pv8_object = L"v8object";
+
+HRESULT CreateExtension() {
+  if (Extension::Current() != nullptr || sp_data_model_manager == nullptr ||
+      sp_debug_host == nullptr) {
+    return E_FAIL;
+  } else {
+    std::unique_ptr<Extension> new_extension(new (std::nothrow) Extension());
+    if (new_extension == nullptr) return E_FAIL;
+    RETURN_IF_FAIL(new_extension->Initialize());
+    Extension::SetExtension(std::move(new_extension));
+    return S_OK;
+  }
+}
+
+void DestroyExtension() { Extension::SetExtension(nullptr); }
+
+bool Extension::DoesTypeDeriveFromObject(
+    const WRL::ComPtr<IDebugHostType>& sp_type) {
+  _bstr_t name;
+  HRESULT hr = sp_type->GetName(name.GetAddress());
+  if (!SUCCEEDED(hr)) return false;
+  if (std::string(static_cast<const char*>(name)) == kObject) return true;
+
+  WRL::ComPtr<IDebugHostSymbolEnumerator> sp_super_class_enumerator;
+  hr = sp_type->EnumerateChildren(SymbolKind::SymbolBaseClass, nullptr,
+                                  &sp_super_class_enumerator);
+  if (!SUCCEEDED(hr)) return false;
+
+  while (true) {
+    WRL::ComPtr<IDebugHostSymbol> sp_type_symbol;
+    if (sp_super_class_enumerator->GetNext(&sp_type_symbol) != S_OK) break;
+    WRL::ComPtr<IDebugHostBaseClass> sp_base_class;
+    if (FAILED(sp_type_symbol.As(&sp_base_class))) continue;
+    WRL::ComPtr<IDebugHostType> sp_base_type;
+    hr = sp_base_class->GetType(&sp_base_type);
+    if (!SUCCEEDED(hr)) continue;
+    if (DoesTypeDeriveFromObject(sp_base_type)) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
+WRL::ComPtr<IDebugHostType> Extension::GetV8ObjectType(
+    WRL::ComPtr<IDebugHostContext>& sp_ctx) {
+  return GetTypeFromV8Module(sp_ctx, kObjectU);
+}
+
+WRL::ComPtr<IDebugHostType> Extension::GetTypeFromV8Module(
+    WRL::ComPtr<IDebugHostContext>& sp_ctx, const char16_t* type_name) {
+  bool is_equal;
+  if (sp_v8_module_ctx_ == nullptr ||
+      !SUCCEEDED(sp_v8_module_ctx_->IsEqualTo(sp_ctx.Get(), &is_equal)) ||
+      !is_equal) {
+    // Context changed; clear the dictionary.
+    cached_v8_module_types_.clear();
+  }
+
+  GetV8Module(sp_ctx);  // Will force the correct module to load
+  if (sp_v8_module_ == nullptr) return nullptr;
+
+  auto& dictionary_entry = cached_v8_module_types_[type_name];
+  if (dictionary_entry == nullptr) {
+    const std::wstring type_name_w(reinterpret_cast<const wchar_t*>(type_name));
+    // The contract from debug_helper functions is to provide type names that
+    // would be valid if used in C++ code within the v8::internal namespace.
+    // They might be fully qualified but aren't required to be. Thus, we must
+    // simluate an "unqualified name lookup" here, by searching for the type
+    // starting in the innermost namespace and working outward.
+    if (SUCCEEDED(sp_v8_module_->FindTypeByName(
+            (L"v8::internal::" + type_name_w).c_str(), &dictionary_entry))) {
+      return dictionary_entry;
+    }
+    if (SUCCEEDED(sp_v8_module_->FindTypeByName((L"v8::" + type_name_w).c_str(),
+                                                &dictionary_entry))) {
+      return dictionary_entry;
+    }
+    sp_v8_module_->FindTypeByName(reinterpret_cast<PCWSTR>(type_name),
+                                  &dictionary_entry);
+  }
+  return dictionary_entry;
+}
+
+namespace {
+
+// Returns whether the given module appears to have symbols for V8 code.
+bool IsV8Module(IDebugHostModule* module) {
+  WRL::ComPtr<IDebugHostSymbol> sp_isolate_sym;
+  // The below symbol is specific to the main V8 module.
+  if (FAILED(module->FindSymbolByName(L"v8::Script::Run", &sp_isolate_sym))) {
+    return false;
+  }
+  return true;
+}
+
+}  // namespace
+
+WRL::ComPtr<IDebugHostModule> Extension::GetV8Module(
+    WRL::ComPtr<IDebugHostContext>& sp_ctx) {
+  // Return the cached version if it exists and the context is the same
+
+  // Note: Context will often have the CUSTOM flag set, which never compares
+  // equal. So for now DON'T compare by context, but by proc_id. (An API is in
+  // progress to compare by address space, which should be usable when shipped).
+  /*
+  if (sp_v8_module_ != nullptr) {
+    bool is_equal;
+    if (SUCCEEDED(sp_v8_module_ctx_->IsEqualTo(sp_ctx.Get(), &is_equal)) &&
+  is_equal) { return sp_v8_module_; } else { sp_v8_module_ = nullptr;
+      sp_v8_module_ctx_ = nullptr;
+    }
+  }
+  */
+  WRL::ComPtr<IDebugSystemObjects> sp_sys_objects;
+  ULONG proc_id = 0;
+  if (SUCCEEDED(sp_debug_control.As(&sp_sys_objects))) {
+    if (SUCCEEDED(sp_sys_objects->GetCurrentProcessSystemId(&proc_id))) {
+      if (proc_id == v8_module_proc_id_ && sp_v8_module_ != nullptr)
+        return sp_v8_module_;
+    }
+  }
+
+  // Search first for a few known module names, to avoid loading symbols for
+  // unrelated modules if we can easily avoid it. Generally, failing to find a
+  // module is fast but failing to find a symbol within a module is slow. Note
+  // that "v8" is listed first because it's highly likely to be the correct
+  // module if it exists. The others might include V8 symbols depending on the
+  // build configuration.
+  std::vector<const wchar_t*> known_names = {
+      L"v8", L"v8_for_testing", L"cctest_exe", L"chrome",
+      L"d8", L"msedge",         L"node",       L"unittests_exe"};
+  for (const wchar_t* name : known_names) {
+    WRL::ComPtr<IDebugHostModule> sp_module;
+    if (SUCCEEDED(sp_debug_host_symbols->FindModuleByName(sp_ctx.Get(), name,
+                                                          &sp_module))) {
+      if (IsV8Module(sp_module.Get())) {
+        sp_v8_module_ = sp_module;
+        sp_v8_module_ctx_ = sp_ctx;
+        v8_module_proc_id_ = proc_id;
+        return sp_v8_module_;
+      }
+    }
+  }
+
+  // Loop through all modules looking for the one that holds a known symbol.
+  WRL::ComPtr<IDebugHostSymbolEnumerator> sp_enum;
+  if (SUCCEEDED(
+          sp_debug_host_symbols->EnumerateModules(sp_ctx.Get(), &sp_enum))) {
+    HRESULT hr = S_OK;
+    while (true) {
+      WRL::ComPtr<IDebugHostSymbol> sp_mod_sym;
+      hr = sp_enum->GetNext(&sp_mod_sym);
+      // hr == E_BOUNDS : hit the end of the enumerator
+      // hr == E_ABORT  : a user interrupt was requested
+      if (FAILED(hr)) break;
+      WRL::ComPtr<IDebugHostModule> sp_module;
+      if (SUCCEEDED(sp_mod_sym.As(&sp_module))) /* should always succeed */
+      {
+        if (IsV8Module(sp_module.Get())) {
+          sp_v8_module_ = sp_module;
+          sp_v8_module_ctx_ = sp_ctx;
+          v8_module_proc_id_ = proc_id;
+          break;
+        }
+      }
+    }
+  }
+  // This will be the located module, or still nullptr if above fails
+  return sp_v8_module_;
+}
+
+Extension::Extension() = default;
+
+HRESULT Extension::Initialize() {
+  // Create an instance of the DataModel parent for v8::internal::Object types.
+  auto object_data_model{WRL::Make<V8ObjectDataModel>()};
+  RETURN_IF_FAIL(sp_data_model_manager->CreateDataModelObject(
+      object_data_model.Get(), &sp_object_data_model_));
+  RETURN_IF_FAIL(sp_object_data_model_->SetConcept(
+      __uuidof(IStringDisplayableConcept),
+      static_cast<IStringDisplayableConcept*>(object_data_model.Get()),
+      nullptr));
+  RETURN_IF_FAIL(sp_object_data_model_->SetConcept(
+      __uuidof(IDynamicKeyProviderConcept),
+      static_cast<IDynamicKeyProviderConcept*>(object_data_model.Get()),
+      nullptr));
+
+  // Register that parent model for all known types of V8 object.
+  std::vector<std::u16string> object_class_names = ListObjectClasses();
+  object_class_names.push_back(kObjectU);
+  object_class_names.push_back(kTaggedValueU);
+  for (const std::u16string& name : object_class_names) {
+    WRL::ComPtr<IDebugHostTypeSignature> sp_object_type_signature;
+    RETURN_IF_FAIL(sp_debug_host_symbols->CreateTypeSignature(
+        reinterpret_cast<const wchar_t*>(name.c_str()), nullptr,
+        &sp_object_type_signature));
+    RETURN_IF_FAIL(sp_data_model_manager->RegisterModelForTypeSignature(
+        sp_object_type_signature.Get(), sp_object_data_model_.Get()));
+    registered_types_.push_back(
+        {sp_object_type_signature.Get(), sp_object_data_model_.Get()});
+  }
+
+  // Create an instance of the DataModel parent for custom iterable fields.
+  auto indexed_field_model{WRL::Make<IndexedFieldParent>()};
+  RETURN_IF_FAIL(sp_data_model_manager->CreateDataModelObject(
+      indexed_field_model.Get(), &sp_indexed_field_model_));
+  RETURN_IF_FAIL(sp_indexed_field_model_->SetConcept(
+      __uuidof(IIndexableConcept),
+      static_cast<IIndexableConcept*>(indexed_field_model.Get()), nullptr));
+  RETURN_IF_FAIL(sp_indexed_field_model_->SetConcept(
+      __uuidof(IIterableConcept),
+      static_cast<IIterableConcept*>(indexed_field_model.Get()), nullptr));
+
+  // Create an instance of the DataModel parent class for v8::Local<*> types.
+  auto local_data_model{WRL::Make<V8LocalDataModel>()};
+  RETURN_IF_FAIL(sp_data_model_manager->CreateDataModelObject(
+      local_data_model.Get(), &sp_local_data_model_));
+
+  // Register that parent model for all known types that act like v8::Local.
+  std::vector<const wchar_t*> handle_class_names = {
+      L"v8::Local<*>", L"v8::MaybeLocal<*>", L"v8::internal::Handle<*>",
+      L"v8::internal::MaybeHandle<*>"};
+  for (const wchar_t* name : handle_class_names) {
+    WRL::ComPtr<IDebugHostTypeSignature> signature;
+    RETURN_IF_FAIL(
+        sp_debug_host_symbols->CreateTypeSignature(name, nullptr, &signature));
+    RETURN_IF_FAIL(sp_data_model_manager->RegisterModelForTypeSignature(
+        signature.Get(), sp_local_data_model_.Get()));
+    registered_types_.push_back({signature.Get(), sp_local_data_model_.Get()});
+  }
+
+  // Add the 'Value' property to the parent model.
+  auto local_value_property{WRL::Make<V8LocalValueProperty>()};
+  WRL::ComPtr<IModelObject> sp_local_value_property_model;
+  RETURN_IF_FAIL(CreateProperty(sp_data_model_manager.Get(),
+                                local_value_property.Get(),
+                                &sp_local_value_property_model));
+  RETURN_IF_FAIL(sp_local_data_model_->SetKey(
+      L"Value", sp_local_value_property_model.Get(), nullptr));
+
+  // Register all function aliases.
+  std::vector<std::pair<const wchar_t*, WRL::ComPtr<IModelMethod>>> functions =
+      {{pcur_isolate, WRL::Make<CurrIsolateAlias>()},
+       {plist_chunks, WRL::Make<ListChunksAlias>()},
+       {pv8_object, WRL::Make<InspectV8ObjectMethod>()}};
+  for (const auto& function : functions) {
+    WRL::ComPtr<IModelObject> method;
+    RETURN_IF_FAIL(CreateMethod(sp_data_model_manager.Get(),
+                                function.second.Get(), &method));
+    RETURN_IF_FAIL(sp_debug_host_extensibility->CreateFunctionAlias(
+        function.first, method.Get()));
+  }
+
+  // Register a handler for supplying stack frame locals. It has to override the
+  // getter functions for "LocalVariables" and "Parameters".
+  WRL::ComPtr<IModelObject> stack_frame;
+  RETURN_IF_FAIL(sp_data_model_manager->AcquireNamedModel(
+      L"Debugger.Models.StackFrame", &stack_frame));
+  RETURN_IF_FAIL(OverrideLocalsGetter(stack_frame.Get(), L"LocalVariables",
+                                      /*is_parameters=*/false));
+  RETURN_IF_FAIL(OverrideLocalsGetter(stack_frame.Get(), L"Parameters",
+                                      /*is_parameters=*/true));
+
+  // Add node_id property for v8::internal::compiler::Node.
+  RETURN_IF_FAIL(
+      RegisterAndAddPropertyForClass<V8InternalCompilerNodeIdProperty>(
+          L"v8::internal::compiler::Node", L"node_id",
+          sp_compiler_node_data_model_));
+
+  // Add bitset_name property for v8::internal::compiler::Type.
+  RETURN_IF_FAIL(
+      RegisterAndAddPropertyForClass<V8InternalCompilerBitsetNameProperty>(
+          L"v8::internal::compiler::Type", L"bitset_name",
+          sp_compiler_type_data_model_));
+
+  return S_OK;
+}
+
+template <class PropertyClass>
+HRESULT Extension::RegisterAndAddPropertyForClass(
+    const wchar_t* class_name, const wchar_t* property_name,
+    WRL::ComPtr<IModelObject> sp_data_model) {
+  // Create an instance of the DataModel parent class.
+  auto instance_data_model{WRL::Make<V8LocalDataModel>()};
+  RETURN_IF_FAIL(sp_data_model_manager->CreateDataModelObject(
+      instance_data_model.Get(), &sp_data_model));
+
+  // Register that parent model.
+  WRL::ComPtr<IDebugHostTypeSignature> class_signature;
+  RETURN_IF_FAIL(sp_debug_host_symbols->CreateTypeSignature(class_name, nullptr,
+                                                            &class_signature));
+  RETURN_IF_FAIL(sp_data_model_manager->RegisterModelForTypeSignature(
+      class_signature.Get(), sp_data_model.Get()));
+  registered_types_.push_back({class_signature.Get(), sp_data_model.Get()});
+
+  // Add the property to the parent model.
+  auto property{WRL::Make<PropertyClass>()};
+  WRL::ComPtr<IModelObject> sp_property_model;
+  RETURN_IF_FAIL(CreateProperty(sp_data_model_manager.Get(), property.Get(),
+                                &sp_property_model));
+  RETURN_IF_FAIL(
+      sp_data_model->SetKey(property_name, sp_property_model.Get(), nullptr));
+
+  return S_OK;
+}
+
+HRESULT Extension::OverrideLocalsGetter(IModelObject* stack_frame,
+                                        const wchar_t* key_name,
+                                        bool is_parameters) {
+  WRL::ComPtr<IModelObject> original_boxed_getter;
+  WRL::ComPtr<IKeyStore> original_getter_metadata;
+  RETURN_IF_FAIL(stack_frame->GetKey(key_name, &original_boxed_getter,
+                                     &original_getter_metadata));
+  WRL::ComPtr<IModelPropertyAccessor> original_getter;
+  RETURN_IF_FAIL(UnboxProperty(original_boxed_getter.Get(), &original_getter));
+  auto new_getter{WRL::Make<V8LocalVariables>(original_getter, is_parameters)};
+  WRL::ComPtr<IModelObject> new_boxed_getter;
+  RETURN_IF_FAIL(CreateProperty(sp_data_model_manager.Get(), new_getter.Get(),
+                                &new_boxed_getter));
+  RETURN_IF_FAIL(stack_frame->SetKey(key_name, new_boxed_getter.Get(),
+                                     original_getter_metadata.Get()));
+  overridden_properties_.push_back(
+      {stack_frame, reinterpret_cast<const char16_t*>(key_name),
+       original_boxed_getter.Get(), original_getter_metadata.Get()});
+  return S_OK;
+}
+
+Extension::PropertyOverride::PropertyOverride() = default;
+Extension::PropertyOverride::PropertyOverride(IModelObject* parent,
+                                              std::u16string key_name,
+                                              IModelObject* original_value,
+                                              IKeyStore* original_metadata)
+    : parent(parent),
+      key_name(std::move(key_name)),
+      original_value(original_value),
+      original_metadata(original_metadata) {}
+Extension::PropertyOverride::~PropertyOverride() = default;
+Extension::PropertyOverride::PropertyOverride(const PropertyOverride&) =
+    default;
+Extension::PropertyOverride& Extension::PropertyOverride::operator=(
+    const PropertyOverride&) = default;
+
+Extension::RegistrationType::RegistrationType() = default;
+Extension::RegistrationType::RegistrationType(
+    IDebugHostTypeSignature* sp_signature, IModelObject* sp_data_model)
+    : sp_signature(sp_signature), sp_data_model(sp_data_model) {}
+Extension::RegistrationType::~RegistrationType() = default;
+Extension::RegistrationType::RegistrationType(const RegistrationType&) =
+    default;
+Extension::RegistrationType& Extension::RegistrationType::operator=(
+    const RegistrationType&) = default;
+
+Extension::~Extension() {
+  sp_debug_host_extensibility->DestroyFunctionAlias(pcur_isolate);
+  sp_debug_host_extensibility->DestroyFunctionAlias(plist_chunks);
+  sp_debug_host_extensibility->DestroyFunctionAlias(pv8_object);
+
+  for (const auto& registered : registered_types_) {
+    sp_data_model_manager->UnregisterModelForTypeSignature(
+        registered.sp_data_model.Get(), registered.sp_signature.Get());
+  }
+
+  for (const auto& override : overridden_properties_) {
+    override.parent->SetKey(
+        reinterpret_cast<const wchar_t*>(override.key_name.c_str()),
+        override.original_value.Get(), override.original_metadata.Get());
+  }
+}
diff --git a/src/third_party/v8/tools/v8windbg/src/v8windbg-extension.h b/src/third_party/v8/tools/v8windbg/src/v8windbg-extension.h
new file mode 100644
index 0000000..4633161
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/src/v8windbg-extension.h
@@ -0,0 +1,98 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_SRC_V8WINDBG_EXTENSION_H_
+#define V8_TOOLS_V8WINDBG_SRC_V8WINDBG_EXTENSION_H_
+
+#include <memory>
+#include <unordered_map>
+#include <vector>
+
+#include "tools/v8windbg/base/utilities.h"
+
+// Responsible for initializing and uninitializing the extension. Also provides
+// various convenience functions.
+class Extension {
+ public:
+  Extension();
+  HRESULT Initialize();
+  ~Extension();
+  WRL::ComPtr<IDebugHostModule> GetV8Module(
+      WRL::ComPtr<IDebugHostContext>& sp_ctx);
+  WRL::ComPtr<IDebugHostType> GetTypeFromV8Module(
+      WRL::ComPtr<IDebugHostContext>& sp_ctx, const char16_t* type_name);
+  WRL::ComPtr<IDebugHostType> GetV8ObjectType(
+      WRL::ComPtr<IDebugHostContext>& sp_ctx);
+  void TryRegisterType(WRL::ComPtr<IDebugHostType>& sp_type,
+                       std::u16string type_name);
+  bool DoesTypeDeriveFromObject(const WRL::ComPtr<IDebugHostType>& sp_type);
+  static Extension* Current() { return current_extension_.get(); }
+  static void SetExtension(std::unique_ptr<Extension> new_extension) {
+    current_extension_ = std::move(new_extension);
+  }
+
+  // Returns the parent model for instances of v8::internal::Object and similar
+  // classes, which contain as their first and only field a tagged V8 value.
+  IModelObject* GetObjectDataModel() { return sp_object_data_model_.Get(); }
+
+  // Returns the parent model that provides indexing support for fields that
+  // contain arrays of something more complicated than basic native types.
+  IModelObject* GetIndexedFieldDataModel() {
+    return sp_indexed_field_model_.Get();
+  }
+
+ private:
+  HRESULT OverrideLocalsGetter(IModelObject* parent, const wchar_t* key_name,
+                               bool is_parameters);
+
+  template <class PropertyClass>
+  HRESULT RegisterAndAddPropertyForClass(
+      const wchar_t* class_name, const wchar_t* property_name,
+      WRL::ComPtr<IModelObject> sp_data_model);
+
+  // A property that has been overridden by this extension. The original value
+  // must be put back in place during ~Extension.
+  struct PropertyOverride {
+    PropertyOverride();
+    PropertyOverride(IModelObject* parent, std::u16string key_name,
+                     IModelObject* original_value,
+                     IKeyStore* original_metadata);
+    ~PropertyOverride();
+    PropertyOverride(const PropertyOverride&);
+    PropertyOverride& operator=(const PropertyOverride&);
+    WRL::ComPtr<IModelObject> parent;
+    std::u16string key_name;
+    WRL::ComPtr<IModelObject> original_value;
+    WRL::ComPtr<IKeyStore> original_metadata;
+  };
+
+  struct RegistrationType {
+    RegistrationType();
+    RegistrationType(IDebugHostTypeSignature* sp_signature,
+                     IModelObject* sp_data_model);
+    ~RegistrationType();
+    RegistrationType(const RegistrationType&);
+    RegistrationType& operator=(const RegistrationType&);
+
+    WRL::ComPtr<IDebugHostTypeSignature> sp_signature;
+    WRL::ComPtr<IModelObject> sp_data_model;
+  };
+
+  static std::unique_ptr<Extension> current_extension_;
+
+  WRL::ComPtr<IModelObject> sp_object_data_model_;
+  WRL::ComPtr<IModelObject> sp_local_data_model_;
+  WRL::ComPtr<IModelObject> sp_compiler_node_data_model_;
+  WRL::ComPtr<IModelObject> sp_compiler_type_data_model_;
+  WRL::ComPtr<IModelObject> sp_indexed_field_model_;
+
+  WRL::ComPtr<IDebugHostModule> sp_v8_module_;
+  std::unordered_map<std::u16string, WRL::ComPtr<IDebugHostType>>
+      cached_v8_module_types_;
+  std::vector<RegistrationType> registered_types_;
+  std::vector<PropertyOverride> overridden_properties_;
+  WRL::ComPtr<IDebugHostContext> sp_v8_module_ctx_;
+  ULONG v8_module_proc_id_;
+};
+#endif  // V8_TOOLS_V8WINDBG_SRC_V8WINDBG_EXTENSION_H_
diff --git a/src/third_party/v8/tools/v8windbg/test/debug-callbacks.cc b/src/third_party/v8/tools/v8windbg/test/debug-callbacks.cc
new file mode 100644
index 0000000..0b11195
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/test/debug-callbacks.cc
@@ -0,0 +1,94 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "tools/v8windbg/test/debug-callbacks.h"
+
+namespace v8 {
+namespace internal {
+namespace v8windbg_test {
+
+MyOutput::MyOutput(WRL::ComPtr<IDebugClient5> p_client) : p_client_(p_client) {
+  p_client_->SetOutputCallbacks(this);
+}
+
+MyOutput::~MyOutput() { p_client_->SetOutputCallbacks(nullptr); }
+
+HRESULT __stdcall MyOutput::QueryInterface(REFIID InterfaceId,
+                                           PVOID* Interface) {
+  return E_NOTIMPL;
+}
+ULONG __stdcall MyOutput::AddRef(void) { return 0; }
+ULONG __stdcall MyOutput::Release(void) { return 0; }
+HRESULT __stdcall MyOutput::Output(ULONG Mask, PCSTR Text) {
+  if (Mask & DEBUG_OUTPUT_NORMAL) {
+    log_ += Text;
+  }
+  return S_OK;
+}
+
+HRESULT __stdcall MyCallback::QueryInterface(REFIID InterfaceId,
+                                             PVOID* Interface) {
+  return E_NOTIMPL;
+}
+ULONG __stdcall MyCallback::AddRef(void) { return S_OK; }
+ULONG __stdcall MyCallback::Release(void) { return S_OK; }
+HRESULT __stdcall MyCallback::GetInterestMask(PULONG Mask) {
+  *Mask = DEBUG_EVENT_BREAKPOINT | DEBUG_EVENT_CREATE_PROCESS;
+  return S_OK;
+}
+HRESULT __stdcall MyCallback::Breakpoint(PDEBUG_BREAKPOINT Bp) {
+  ULONG64 bp_offset;
+  HRESULT hr = Bp->GetOffset(&bp_offset);
+  if (FAILED(hr)) return hr;
+
+  // Break on breakpoints? Seems reasonable.
+  return DEBUG_STATUS_BREAK;
+}
+HRESULT __stdcall MyCallback::Exception(PEXCEPTION_RECORD64 Exception,
+                                        ULONG FirstChance) {
+  return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::CreateThread(ULONG64 Handle, ULONG64 DataOffset,
+                                           ULONG64 StartOffset) {
+  return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::ExitThread(ULONG ExitCode) { return E_NOTIMPL; }
+HRESULT __stdcall MyCallback::ExitProcess(ULONG ExitCode) { return E_NOTIMPL; }
+HRESULT __stdcall MyCallback::LoadModule(ULONG64 ImageFileHandle,
+                                         ULONG64 BaseOffset, ULONG ModuleSize,
+                                         PCSTR ModuleName, PCSTR ImageName,
+                                         ULONG CheckSum, ULONG TimeDateStamp) {
+  return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::UnloadModule(PCSTR ImageBaseName,
+                                           ULONG64 BaseOffset) {
+  return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::SystemError(ULONG Error, ULONG Level) {
+  return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::SessionStatus(ULONG Status) { return E_NOTIMPL; }
+HRESULT __stdcall MyCallback::ChangeDebuggeeState(ULONG Flags,
+                                                  ULONG64 Argument) {
+  return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::ChangeEngineState(ULONG Flags, ULONG64 Argument) {
+  return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::ChangeSymbolState(ULONG Flags, ULONG64 Argument) {
+  return E_NOTIMPL;
+}
+HRESULT __stdcall MyCallback::CreateProcessW(
+    ULONG64 ImageFileHandle, ULONG64 Handle, ULONG64 BaseOffset,
+    ULONG ModuleSize, PCSTR ModuleName, PCSTR ImageName, ULONG CheckSum,
+    ULONG TimeDateStamp, ULONG64 InitialThreadHandle, ULONG64 ThreadDataOffset,
+    ULONG64 StartOffset) {
+  // Should fire once the target process is launched. Break to create
+  // breakpoints, etc.
+  return DEBUG_STATUS_BREAK;
+}
+
+}  // namespace v8windbg_test
+}  // namespace internal
+}  // namespace v8
diff --git a/src/third_party/v8/tools/v8windbg/test/debug-callbacks.h b/src/third_party/v8/tools/v8windbg/test/debug-callbacks.h
new file mode 100644
index 0000000..8855d6f
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/test/debug-callbacks.h
@@ -0,0 +1,90 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_TEST_DEBUG_CALLBACKS_H_
+#define V8_TOOLS_V8WINDBG_TEST_DEBUG_CALLBACKS_H_
+
+#if !defined(UNICODE) || !defined(_UNICODE)
+#error Unicode not defined
+#endif
+
+#include <DbgEng.h>
+#include <DbgModel.h>
+#include <Windows.h>
+#include <crtdbg.h>
+#include <pathcch.h>
+#include <wrl/client.h>
+
+#include <string>
+
+namespace WRL = Microsoft::WRL;
+
+namespace v8 {
+namespace internal {
+namespace v8windbg_test {
+
+class MyOutput : public IDebugOutputCallbacks {
+ public:
+  MyOutput(WRL::ComPtr<IDebugClient5> p_client);
+  ~MyOutput();
+  MyOutput(const MyOutput&) = delete;
+  MyOutput& operator=(const MyOutput&) = delete;
+
+  // Inherited via IDebugOutputCallbacks
+  HRESULT __stdcall QueryInterface(REFIID InterfaceId,
+                                   PVOID* Interface) override;
+  ULONG __stdcall AddRef(void) override;
+  ULONG __stdcall Release(void) override;
+  HRESULT __stdcall Output(ULONG Mask, PCSTR Text) override;
+
+  const std::string& GetLog() const { return log_; }
+  void ClearLog() { log_.clear(); }
+
+ private:
+  WRL::ComPtr<IDebugClient5> p_client_;
+  std::string log_;
+};
+
+// For return values, see:
+// https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/debug-status-xxx
+class MyCallback : public IDebugEventCallbacks {
+ public:
+  // Inherited via IDebugEventCallbacks
+  HRESULT __stdcall QueryInterface(REFIID InterfaceId,
+                                   PVOID* Interface) override;
+  ULONG __stdcall AddRef(void) override;
+  ULONG __stdcall Release(void) override;
+  HRESULT __stdcall GetInterestMask(PULONG Mask) override;
+  HRESULT __stdcall Breakpoint(PDEBUG_BREAKPOINT Bp) override;
+  HRESULT __stdcall Exception(PEXCEPTION_RECORD64 Exception,
+                              ULONG FirstChance) override;
+  HRESULT __stdcall CreateThread(ULONG64 Handle, ULONG64 DataOffset,
+                                 ULONG64 StartOffset) override;
+  HRESULT __stdcall ExitThread(ULONG ExitCode) override;
+  HRESULT __stdcall ExitProcess(ULONG ExitCode) override;
+  HRESULT __stdcall LoadModule(ULONG64 ImageFileHandle, ULONG64 BaseOffset,
+                               ULONG ModuleSize, PCSTR ModuleName,
+                               PCSTR ImageName, ULONG CheckSum,
+                               ULONG TimeDateStamp) override;
+  HRESULT __stdcall UnloadModule(PCSTR ImageBaseName,
+                                 ULONG64 BaseOffset) override;
+  HRESULT __stdcall SystemError(ULONG Error, ULONG Level) override;
+  HRESULT __stdcall SessionStatus(ULONG Status) override;
+  HRESULT __stdcall ChangeDebuggeeState(ULONG Flags, ULONG64 Argument) override;
+  HRESULT __stdcall ChangeEngineState(ULONG Flags, ULONG64 Argument) override;
+  HRESULT __stdcall ChangeSymbolState(ULONG Flags, ULONG64 Argument) override;
+  HRESULT __stdcall CreateProcessW(ULONG64 ImageFileHandle, ULONG64 Handle,
+                                   ULONG64 BaseOffset, ULONG ModuleSize,
+                                   PCSTR ModuleName, PCSTR ImageName,
+                                   ULONG CheckSum, ULONG TimeDateStamp,
+                                   ULONG64 InitialThreadHandle,
+                                   ULONG64 ThreadDataOffset,
+                                   ULONG64 StartOffset) override;
+};
+
+}  // namespace v8windbg_test
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TOOLS_V8WINDBG_TEST_DEBUG_CALLBACKS_H_
diff --git a/src/third_party/v8/tools/v8windbg/test/script.js b/src/third_party/v8/tools/v8windbg/test/script.js
new file mode 100644
index 0000000..6ec21e0
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/test/script.js
@@ -0,0 +1,14 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function a() {
+  JSON.stringify({firstProp: 12345, secondProp: null}, function replacer() {});
+}
+
+function b() {
+  var hello = 'hello';
+  a();
+}
+
+b();
diff --git a/src/third_party/v8/tools/v8windbg/test/v8windbg-test.cc b/src/third_party/v8/tools/v8windbg/test/v8windbg-test.cc
new file mode 100644
index 0000000..59414f3
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/test/v8windbg-test.cc
@@ -0,0 +1,243 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstdio>
+#include <exception>
+#include <vector>
+
+#include "src/base/logging.h"
+#include "tools/v8windbg/test/debug-callbacks.h"
+
+// See the docs at
+// https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/using-the-debugger-engine-api
+
+namespace v8 {
+namespace internal {
+namespace v8windbg_test {
+
+namespace {
+
+// Loads a named extension library upon construction and unloads it upon
+// destruction.
+class LoadExtensionScope {
+ public:
+  LoadExtensionScope(WRL::ComPtr<IDebugControl4> p_debug_control,
+                     std::wstring extension_path)
+      : p_debug_control_(p_debug_control),
+        extension_path_(std::move(extension_path)) {
+    p_debug_control->AddExtensionWide(extension_path_.c_str(), 0, &ext_handle_);
+    // HACK: Below fails, but is required for the extension to actually
+    // initialize. Just the AddExtension call doesn't actually load and
+    // initialize it.
+    p_debug_control->CallExtension(ext_handle_, "Foo", "Bar");
+  }
+  ~LoadExtensionScope() {
+    // Let the extension uninitialize so it can deallocate memory, meaning any
+    // reported memory leaks should be real bugs.
+    p_debug_control_->RemoveExtension(ext_handle_);
+  }
+
+ private:
+  LoadExtensionScope(const LoadExtensionScope&) = delete;
+  LoadExtensionScope& operator=(const LoadExtensionScope&) = delete;
+  WRL::ComPtr<IDebugControl4> p_debug_control_;
+  ULONG64 ext_handle_;
+  // This string is part of the heap snapshot when the extension loads, so keep
+  // it alive until after the extension unloads and checks for any heap changes.
+  std::wstring extension_path_;
+};
+
+// Initializes COM upon construction and uninitializes it upon destruction.
+class ComScope {
+ public:
+  ComScope() { hr_ = CoInitializeEx(nullptr, COINIT_MULTITHREADED); }
+  ~ComScope() {
+    // "To close the COM library gracefully on a thread, each successful call to
+    // CoInitialize or CoInitializeEx, including any call that returns S_FALSE,
+    // must be balanced by a corresponding call to CoUninitialize."
+    // https://docs.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-coinitializeex
+    if (SUCCEEDED(hr_)) {
+      CoUninitialize();
+    }
+  }
+  HRESULT hr() { return hr_; }
+
+ private:
+  HRESULT hr_;
+};
+
+// Sets a breakpoint. Returns S_OK if the function name resolved successfully
+// and the breakpoint is in a non-deferred state.
+HRESULT SetBreakpoint(WRL::ComPtr<IDebugControl4> p_debug_control,
+                      const char* function_name) {
+  WRL::ComPtr<IDebugBreakpoint> bp;
+  HRESULT hr =
+      p_debug_control->AddBreakpoint(DEBUG_BREAKPOINT_CODE, DEBUG_ANY_ID, &bp);
+  if (FAILED(hr)) return hr;
+  hr = bp->SetOffsetExpression(function_name);
+  if (FAILED(hr)) return hr;
+  hr = bp->AddFlags(DEBUG_BREAKPOINT_ENABLED);
+  if (FAILED(hr)) return hr;
+
+  // Check whether the symbol could be found.
+  uint64_t offset;
+  hr = bp->GetOffset(&offset);
+  return hr;
+}
+
+// Sets a breakpoint. Depending on the build configuration, the function might
+// be in the v8 or d8 module, so this function tries to set both.
+HRESULT SetBreakpointInV8OrD8(WRL::ComPtr<IDebugControl4> p_debug_control,
+                              const std::string& function_name) {
+  // Component builds call the V8 module "v8". Try this first, because there is
+  // also a module named "d8" or "d8_exe" where we should avoid attempting to
+  // set a breakpoint.
+  HRESULT hr = SetBreakpoint(p_debug_control, ("v8!" + function_name).c_str());
+  if (SUCCEEDED(hr)) return hr;
+
+  // x64 release builds call it "d8".
+  hr = SetBreakpoint(p_debug_control, ("d8!" + function_name).c_str());
+  if (SUCCEEDED(hr)) return hr;
+
+  // x86 release builds call it "d8_exe".
+  return SetBreakpoint(p_debug_control, ("d8_exe!" + function_name).c_str());
+}
+
+void RunAndCheckOutput(const char* friendly_name, const char* command,
+                       std::vector<const char*> expected_substrings,
+                       MyOutput* output, IDebugControl4* p_debug_control) {
+  output->ClearLog();
+  CHECK(SUCCEEDED(p_debug_control->Execute(DEBUG_OUTCTL_ALL_CLIENTS, command,
+                                           DEBUG_EXECUTE_ECHO)));
+  for (const char* expected : expected_substrings) {
+    CHECK(output->GetLog().find(expected) != std::string::npos);
+  }
+}
+
+}  // namespace
+
+void RunTests() {
+  // Initialize COM... Though it doesn't seem to matter if you don't!
+  ComScope com_scope;
+  CHECK(SUCCEEDED(com_scope.hr()));
+
+  // Get the file path of the module containing this test function. It should be
+  // in the output directory alongside the data dependencies required by this
+  // test (d8.exe, v8windbg.dll, and v8windbg-test-script.js).
+  HMODULE module = nullptr;
+  bool success =
+      GetModuleHandleEx(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+                            GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+                        reinterpret_cast<LPCWSTR>(&RunTests), &module);
+  CHECK(success);
+  wchar_t this_module_path[MAX_PATH];
+  DWORD path_size = GetModuleFileName(module, this_module_path, MAX_PATH);
+  CHECK(path_size != 0);
+  HRESULT hr = PathCchRemoveFileSpec(this_module_path, MAX_PATH);
+  CHECK(SUCCEEDED(hr));
+
+  // Get the Debug client
+  WRL::ComPtr<IDebugClient5> p_client;
+  hr = DebugCreate(__uuidof(IDebugClient5), &p_client);
+  CHECK(SUCCEEDED(hr));
+
+  WRL::ComPtr<IDebugSymbols3> p_symbols;
+  hr = p_client->QueryInterface(__uuidof(IDebugSymbols3), &p_symbols);
+  CHECK(SUCCEEDED(hr));
+
+  // Symbol loading fails if the pdb is in the same folder as the exe, but it's
+  // not on the path.
+  hr = p_symbols->SetSymbolPathWide(this_module_path);
+  CHECK(SUCCEEDED(hr));
+
+  // Set the event callbacks
+  MyCallback callback;
+  hr = p_client->SetEventCallbacks(&callback);
+  CHECK(SUCCEEDED(hr));
+
+  // Launch the process with the debugger attached
+  std::wstring command_line =
+      std::wstring(L"\"") + this_module_path + L"\\d8.exe\" \"" +
+      this_module_path + L"\\obj\\tools\\v8windbg\\v8windbg-test-script.js\"";
+  DEBUG_CREATE_PROCESS_OPTIONS proc_options;
+  proc_options.CreateFlags = DEBUG_PROCESS;
+  proc_options.EngCreateFlags = 0;
+  proc_options.VerifierFlags = 0;
+  proc_options.Reserved = 0;
+  hr = p_client->CreateProcessWide(
+      0, const_cast<wchar_t*>(command_line.c_str()), DEBUG_PROCESS);
+  CHECK(SUCCEEDED(hr));
+
+  // Wait for the attach event
+  WRL::ComPtr<IDebugControl4> p_debug_control;
+  hr = p_client->QueryInterface(__uuidof(IDebugControl4), &p_debug_control);
+  CHECK(SUCCEEDED(hr));
+  hr = p_debug_control->WaitForEvent(0, INFINITE);
+  CHECK(SUCCEEDED(hr));
+
+  // Break again after non-delay-load modules are loaded.
+  hr = p_debug_control->AddEngineOptions(DEBUG_ENGOPT_INITIAL_BREAK);
+  CHECK(SUCCEEDED(hr));
+  hr = p_debug_control->WaitForEvent(0, INFINITE);
+  CHECK(SUCCEEDED(hr));
+
+  // Set a breakpoint in a C++ function called by the script.
+  hr = SetBreakpointInV8OrD8(p_debug_control, "v8::internal::JsonStringify");
+  CHECK(SUCCEEDED(hr));
+
+  hr = p_debug_control->SetExecutionStatus(DEBUG_STATUS_GO);
+  CHECK(SUCCEEDED(hr));
+
+  // Wait for the breakpoint.
+  hr = p_debug_control->WaitForEvent(0, INFINITE);
+  CHECK(SUCCEEDED(hr));
+
+  ULONG type, proc_id, thread_id, desc_used;
+  byte desc[1024];
+  hr = p_debug_control->GetLastEventInformation(
+      &type, &proc_id, &thread_id, nullptr, 0, nullptr,
+      reinterpret_cast<PSTR>(desc), 1024, &desc_used);
+  CHECK(SUCCEEDED(hr));
+
+  LoadExtensionScope extension_loaded(
+      p_debug_control, this_module_path + std::wstring(L"\\v8windbg.dll"));
+
+  // Set the output callbacks after the extension is loaded, so it gets
+  // destroyed before the extension unloads. This avoids reporting incorrectly
+  // reporting that the output buffer was leaked during extension teardown.
+  MyOutput output(p_client);
+
+  // Set stepping mode.
+  hr = p_debug_control->SetCodeLevel(DEBUG_LEVEL_SOURCE);
+  CHECK(SUCCEEDED(hr));
+
+  // Do some actual testing
+  RunAndCheckOutput("bitfields",
+                    "p;dx replacer.Value.shared_function_info.flags",
+                    {"kNamedExpression"}, &output, p_debug_control.Get());
+
+  RunAndCheckOutput("in-object properties",
+                    "dx object.Value.@\"in-object properties\"[1]",
+                    {"NullValue", "Oddball"}, &output, p_debug_control.Get());
+
+  RunAndCheckOutput(
+      "arrays of structs",
+      "dx object.Value.map.instance_descriptors.descriptors[1].key",
+      {"\"secondProp\"", "SeqOneByteString"}, &output, p_debug_control.Get());
+
+  RunAndCheckOutput(
+      "local variables",
+      "dx -r1 @$curthread.Stack.Frames.Where(f => "
+      "f.ToDisplayString().Contains(\"InterpreterEntryTrampoline\")).Skip(1)."
+      "First().LocalVariables.@\"memory interpreted as Objects\"",
+      {"\"hello\""}, &output, p_debug_control.Get());
+
+  // Detach before exiting
+  hr = p_client->DetachProcesses();
+  CHECK(SUCCEEDED(hr));
+}
+
+}  // namespace v8windbg_test
+}  // namespace internal
+}  // namespace v8
diff --git a/src/third_party/v8/tools/v8windbg/test/v8windbg-test.h b/src/third_party/v8/tools/v8windbg/test/v8windbg-test.h
new file mode 100644
index 0000000..48a8454
--- /dev/null
+++ b/src/third_party/v8/tools/v8windbg/test/v8windbg-test.h
@@ -0,0 +1,18 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TOOLS_V8WINDBG_TEST_V8WINDBG_TEST_H_
+#define V8_TOOLS_V8WINDBG_TEST_V8WINDBG_TEST_H_
+
+namespace v8 {
+namespace internal {
+namespace v8windbg_test {
+
+void RunTests();
+
+}
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TOOLS_V8WINDBG_TEST_V8WINDBG_TEST_H_
diff --git a/src/third_party/v8/tools/valgrind/asan/dummy b/src/third_party/v8/tools/valgrind/asan/dummy
new file mode 100644
index 0000000..0e89814
--- /dev/null
+++ b/src/third_party/v8/tools/valgrind/asan/dummy
@@ -0,0 +1,2 @@
+# src/base has some more tools in this folder, which we don't use. But we need
+# to have the folder so that the data deps we inherit doesn't error out.
\ No newline at end of file
diff --git a/src/third_party/v8/tools/vim/ninja-build.vim b/src/third_party/v8/tools/vim/ninja-build.vim
new file mode 100644
index 0000000..7c88525
--- /dev/null
+++ b/src/third_party/v8/tools/vim/ninja-build.vim
@@ -0,0 +1,116 @@
+" Copyright (c) 2015 the V8 project authors. All rights reserved.
+" Use of this source code is governed by a BSD-style license that can be
+" found in the LICENSE file.
+"
+" Adds a "Compile this file" function, using ninja. On Mac, binds Cmd-k to
+" this command. On Windows, Ctrl-F7 (which is the same as the VS default).
+" On Linux, <Leader>o, which is \o by default ("o"=creates .o files)
+"
+" Adds a "Build this target" function, using ninja. This is not bound
+" to any key by default, but can be used via the :CrBuild command.
+" It builds 'd8' by default, but :CrBuild target1 target2 etc works as well,
+" i.e. :CrBuild all or :CrBuild d8 cctest unittests.
+"
+" Requires that gyp has already generated build.ninja files, and that ninja is
+" in your path (which it is automatically if depot_tools is in your path).
+" Bumps the number of parallel jobs in ninja automatically if goma is
+" detected.
+"
+" Add the following to your .vimrc file:
+"     so /path/to/src/tools/vim/ninja-build.vim
+
+python << endpython
+import os
+import vim
+
+
+def path_to_current_buffer():
+  """Returns the absolute path of the current buffer."""
+  return vim.current.buffer.name
+
+
+def path_to_source_root():
+  """Returns the absolute path to the V8 source root."""
+  candidate = os.path.dirname(path_to_current_buffer())
+  # This is a list of files that need to identify the src directory. The shorter
+  # it is, the more likely it's wrong. The longer it is, the more likely it is to
+  # break when we rename directories.
+  fingerprints = ['.git', 'build', 'include', 'samples', 'src', 'testing',
+                  'third_party', 'tools']
+  while candidate and not all(
+      [os.path.isdir(os.path.join(candidate, fp)) for fp in fingerprints]):
+    candidate = os.path.dirname(candidate)
+  return candidate
+
+
+def path_to_build_dir(configuration):
+  """Returns <v8_root>/<output_dir>/(Release|Debug)."""
+
+  v8_root = path_to_source_root()
+  sys.path.append(os.path.join(v8_root, 'tools', 'ninja'))
+  from ninja_output import GetNinjaOutputDirectory
+  return GetNinjaOutputDirectory(v8_root, configuration)
+
+
+def compute_ninja_command_for_targets(targets='', configuration=None):
+  build_dir = path_to_build_dir(configuration);
+  build_cmd = ' '.join(['autoninja', '-C', build_dir, targets])
+  vim.command('return "%s"' % build_cmd)
+
+
+def compute_ninja_command_for_current_buffer(configuration=None):
+  """Returns the shell command to compile the file in the current buffer."""
+  build_dir = path_to_build_dir(configuration)
+
+  # ninja needs filepaths for the ^ syntax to be relative to the
+  # build directory.
+  file_to_build = path_to_current_buffer()
+  file_to_build = os.path.relpath(file_to_build, build_dir) + '^'
+  if sys.platform == 'win32':
+    # Escape \ for Vim, and ^ for both Vim and shell.
+    file_to_build = file_to_build.replace('\\', '\\\\').replace('^', '^^^^')
+  compute_ninja_command_for_targets(file_to_build, configuration)
+endpython
+
+fun! s:MakeWithCustomCommand(build_cmd)
+  let l:oldmakepgr = &makeprg
+  let &makeprg=a:build_cmd
+  silent make | cwindow
+  if !has('gui_running')
+    redraw!
+  endif
+  let &makeprg = l:oldmakepgr
+endfun
+
+fun! s:NinjaCommandForCurrentBuffer()
+  python compute_ninja_command_for_current_buffer()
+endfun
+
+fun! s:NinjaCommandForTargets(targets)
+  python compute_ninja_command_for_targets(vim.eval('a:targets'))
+endfun
+
+fun! CrCompileFile()
+  call s:MakeWithCustomCommand(s:NinjaCommandForCurrentBuffer())
+endfun
+
+fun! CrBuild(...)
+  let l:targets = a:0 > 0 ? join(a:000, ' ') : ''
+  if (l:targets !~ '\i')
+    let l:targets = 'd8'
+  endif
+  call s:MakeWithCustomCommand(s:NinjaCommandForTargets(l:targets))
+endfun
+
+command! CrCompileFile call CrCompileFile()
+command! -nargs=* CrBuild call CrBuild(<q-args>)
+
+if has('mac')
+  map <D-k> :CrCompileFile<cr>
+  imap <D-k> <esc>:CrCompileFile<cr>
+elseif has('win32')
+  map <C-F7> :CrCompileFile<cr>
+  imap <C-F7> <esc>:CrCompileFile<cr>
+elseif has('unix')
+  map <Leader>o :CrCompileFile<cr>
+endif
diff --git a/src/third_party/v8/tools/visual_studio/README.txt b/src/third_party/v8/tools/visual_studio/README.txt
new file mode 100644
index 0000000..b199e18
--- /dev/null
+++ b/src/third_party/v8/tools/visual_studio/README.txt
@@ -0,0 +1,12 @@
+The Microsoft Visual Studio project files for including V8 in a Visual
+Studio/Visual C++ Express solution has been retired. If a Visual
+Studio project/solution is needed there is the option of using GYP to
+generate these. Please look in the build directory in the root of the
+V8 project. It contains the required infrastructure and a README.txt
+file explaining how to get started.
+
+Generating Visual Studio projects using GYP is how the Chromium
+project integrated V8 into the Windows build.
+
+The main build system for V8 is still SCons, see the V8 wiki page
+http://code.google.com/p/v8/wiki/BuildingOnWindows for details.
diff --git a/src/third_party/v8/tools/wasm-compilation-hints/inject-compilation-hints.py b/src/third_party/v8/tools/wasm-compilation-hints/inject-compilation-hints.py
new file mode 100755
index 0000000..fd4b65b
--- /dev/null
+++ b/src/third_party/v8/tools/wasm-compilation-hints/inject-compilation-hints.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import argparse
+import io
+import sys
+
+from wasm import *
+
+FUNCTION_SECTION_ID = 3
+
+def parse_args():
+  parser = argparse.ArgumentParser(\
+      description="Inject compilation hints into a Wasm module.")
+  parser.add_argument("-i", "--in-wasm-file", \
+      type=str, \
+      help="original wasm module")
+  parser.add_argument("-o", "--out-wasm-file", \
+      type=str, \
+      help="wasm module with injected hints")
+  parser.add_argument("-x", "--hints-file", \
+      type=str, required=True, \
+      help="binary hints file to be injected as a custom section " + \
+          "'compilationHints'")
+  return parser.parse_args()
+
+if __name__ == "__main__":
+  args = parse_args()
+  in_wasm_file = args.in_wasm_file if args.in_wasm_file else sys.stdin.fileno()
+  out_wasm_file = args.out_wasm_file if args.out_wasm_file else sys.stdout.fileno()
+  hints_bs = open(args.hints_file, "rb").read()
+  with io.open(in_wasm_file, "rb") as fin:
+    with io.open(out_wasm_file, "wb") as fout:
+      magic_number, bs = read_magic_number(fin);
+      fout.write(bs)
+      version, bs = read_version(fin);
+      fout.write(bs)
+      num_declared_functions = None
+      while True:
+        id, bs = read_varuintN(fin)
+        fout.write(bs)
+        if id == None:
+          break
+        payload_length, bs = read_varuintN(fin)
+        fout.write(bs)
+
+        # Peek into function section for upcoming validity check.
+        if id == FUNCTION_SECTION_ID:
+          num_declared_functions, bs = peek_varuintN(fin)
+
+        bs = fin.read(payload_length)
+        fout.write(bs)
+
+        # Instert hint section after function section.
+        if id == FUNCTION_SECTION_ID:
+          assert len(hints_bs) == num_declared_functions, "unexpected number of hints"
+          write_compilation_hints_section(fout, hints_bs)
diff --git a/src/third_party/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py b/src/third_party/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py
new file mode 100755
index 0000000..9077f97
--- /dev/null
+++ b/src/third_party/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+from __future__ import print_function
+import argparse
+import io
+import sys
+
+from wasm import *
+
+def parse_args():
+  parser = argparse.ArgumentParser(\
+      description="Read compilation hints from Wasm module.")
+  parser.add_argument("in_wasm_file", \
+      type=str, \
+      help="wasm module")
+  return parser.parse_args()
+
+if __name__ == "__main__":
+  args = parse_args()
+  in_wasm_file = args.in_wasm_file if args.in_wasm_file else sys.stdin.fileno()
+  with io.open(in_wasm_file, "rb") as fin:
+    read_magic_number(fin);
+    read_version(fin);
+    while True:
+      id, bs = read_varuintN(fin)
+      if id == None:
+        break
+      payload_length, bs = read_varuintN(fin)
+      if id == CUSTOM_SECTION_ID:
+        section_name_length, section_name_length_bs = read_varuintN(fin)
+        section_name_bs = fin.read(section_name_length)
+        if section_name_bs == "compilationHints":
+          num_hints, bs = read_varuintN(fin)
+          print("Custom section compilationHints with ", num_hints, "hints:")
+          for i in range(num_hints):
+            hint, bs = read_uint8(fin)
+            print(i, " ", hex(hint))
+        else:
+          remaining_length = payload_length \
+              - len(section_name_length_bs) \
+              - len(section_name_bs)
+          fin.read()
+      else:
+        fin.read(payload_length)
diff --git a/src/third_party/v8/tools/wasm-compilation-hints/wasm.py b/src/third_party/v8/tools/wasm-compilation-hints/wasm.py
new file mode 100644
index 0000000..ae3d084
--- /dev/null
+++ b/src/third_party/v8/tools/wasm-compilation-hints/wasm.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import io
+import math
+import struct
+import sys
+
+CUSTOM_SECTION_ID = 0
+FUNCTION_SECTION_ID = 3
+
+def peek_uint8(fin):
+  bs = fin.peek(1)[:1]
+  if len(bs) != 1:
+    return None, bs
+  return ord(bs[0]), bs
+
+def read_uint8(fin):
+  value, bs = peek_uint8(fin)
+  fin.read(len(bs))
+  return value, bs
+
+def peek_uint32(fin):
+  bs = fin.peek(4)[:4]
+  if len(bs) != 4:
+    return None, bs
+  return ord(bs[0]) | ord(bs[1]) << 8 | ord(bs[2]) << 16 | ord(bs[3]) << 24, bs
+
+def read_uint32(fin):
+  value, bs = peek_uint32(fin)
+  fin.read(len(bs))
+  return value, bs
+
+def peek_varuintN(fin):
+  value = 0
+  shift = 0
+  n = 1
+  while True:
+    bs = fin.peek(n)[:n]
+    if len(bs) < n:
+      return None, bs
+    b = ord(bs[-1])
+    value |= (b & 0x7F) << shift;
+    if (b & 0x80) == 0x00:
+      return value, bs
+    shift += 7;
+    n += 1
+
+def read_varuintN(fin):
+  value, bs = peek_varuintN(fin)
+  fin.read(len(bs))
+  return value, bs
+
+def to_varuintN(value):
+  bs = ""
+  while True:
+    b = value & 0x7F
+    value >>= 7
+    if (value != 0x00):
+      b |= 0x80
+    bs += chr(b)
+    if value == 0x00:
+      return bs
+
+def write_varuintN(value, fout):
+  bs = to_varuintN(value)
+  fout.write(bs)
+  return bs
+
+def peek_magic_number(fin, expected_magic_number=0x6d736100):
+  magic_number, bs = peek_uint32(fin)
+  assert magic_number == expected_magic_number, "unexpected magic number"
+  return magic_number, bs
+
+def read_magic_number(fin, expected_magic_number=0x6d736100):
+  magic_number, bs = peek_magic_number(fin, expected_magic_number)
+  fin.read(len(bs))
+  return magic_number, bs
+
+def peek_version(fin, expected_version=1):
+  version, bs = peek_uint32(fin)
+  assert version == expected_version, "unexpected version"
+  return version, bs
+
+def read_version(fin, expected_version=1):
+  version, bs = peek_version(fin, expected_version)
+  fin.read(len(bs))
+  return version, bs
+
+def write_custom_section(fout, section_name_bs, payload_bs):
+  section_name_length_bs = to_varuintN(len(section_name_bs))
+  payload_length_bs = to_varuintN(len(section_name_bs) \
+      + len(section_name_length_bs) + len(payload_bs))
+  section_id_bs = to_varuintN(CUSTOM_SECTION_ID)
+  fout.write(section_id_bs)
+  fout.write(payload_length_bs)
+  fout.write(section_name_length_bs)
+  fout.write(section_name_bs)
+  fout.write(payload_bs)
+
+def write_compilation_hints_section(fout, hints_bs):
+  num_compilation_hints_bs = to_varuintN(len(hints_bs))
+  section_name_bs = b"compilationHints"
+  payload_bs = num_compilation_hints_bs + hints_bs
+  write_custom_section(fout, section_name_bs, payload_bs)
diff --git a/src/third_party/v8/tools/wasm/update-wasm-fuzzers.sh b/src/third_party/v8/tools/wasm/update-wasm-fuzzers.sh
new file mode 100755
index 0000000..ffd7e01
--- /dev/null
+++ b/src/third_party/v8/tools/wasm/update-wasm-fuzzers.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+TOOLS_WASM_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+cd ${TOOLS_WASM_DIR}/../..
+
+rm -rf test/fuzzer/wasm_corpus
+
+tools/dev/gm.py x64.release all
+
+mkdir -p test/fuzzer/wasm_corpus
+
+# wasm
+./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
+  --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
+  --dump-wasm-module-path=./test/fuzzer/wasm_corpus/" unittests
+./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
+  --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
+  --dump-wasm-module-path=./test/fuzzer/wasm_corpus/" wasm-spec-tests/*
+./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
+  --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
+  --dump-wasm-module-path=./test/fuzzer/wasm_corpus/" mjsunit/wasm/*
+./tools/run-tests.py -j8 --variants=default --timeout=10 --arch=x64 \
+  --mode=release --no-presubmit --extra-flags="--dump-wasm-module \
+  --dump-wasm-module-path=./test/fuzzer/wasm_corpus/" \
+  $(cd test/; ls cctest/wasm/test-*.cc | \
+  sed -es/wasm\\///g | sed -es/[.]cc/\\/\\*/g)
+
+# Delete items over 20k.
+for x in $(find ./test/fuzzer/wasm_corpus/ -type f -size +20k)
+do
+  rm $x
+done
+
+# Upload changes.
+cd test/fuzzer
+upload_to_google_storage.py -a -b v8-wasm-fuzzer wasm_corpus
diff --git a/src/third_party/v8/tools/wasm/update-wasm-spec-tests.sh b/src/third_party/v8/tools/wasm/update-wasm-spec-tests.sh
new file mode 100755
index 0000000..dc9621a
--- /dev/null
+++ b/src/third_party/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -0,0 +1,149 @@
+#!/bin/bash
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Exit immediately if a command exits with a non-zero status.
+set -e
+
+# Treat unset variables as an error when substituting.
+set -u
+
+# return value of a pipeline is the status of the last command to exit with a
+# non-zero status, or zero if no command exited with a non-zero status
+set -o pipefail
+
+log_and_run() {
+  echo ">>" $*
+  if ! $*; then
+    echo "sub-command failed: $*"
+    exit
+  fi
+}
+
+###############################################################################
+# Setup directories.
+###############################################################################
+
+TOOLS_WASM_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+V8_DIR="${TOOLS_WASM_DIR}/../.."
+SPEC_TEST_DIR=${V8_DIR}/test/wasm-spec-tests
+TMP_DIR=${SPEC_TEST_DIR}/tmp
+
+JS_API_TEST_DIR=${V8_DIR}/test/wasm-js
+
+log_and_run cd ${V8_DIR}
+
+log_and_run rm -rf ${SPEC_TEST_DIR}/tests
+log_and_run mkdir ${SPEC_TEST_DIR}/tests
+
+log_and_run mkdir ${SPEC_TEST_DIR}/tests/proposals
+
+log_and_run rm -rf ${TMP_DIR}
+log_and_run mkdir ${TMP_DIR}
+
+log_and_run rm -rf ${JS_API_TEST_DIR}/tests
+log_and_run mkdir ${JS_API_TEST_DIR}/tests
+log_and_run mkdir ${JS_API_TEST_DIR}/tests/proposals
+
+###############################################################################
+# Generate the spec tests.
+###############################################################################
+
+echo Process spec
+log_and_run cd ${TMP_DIR}
+log_and_run git clone https://github.com/WebAssembly/spec
+log_and_run cd spec/interpreter
+
+# The next step requires that ocaml is installed. See the README.md in
+# https://github.com/WebAssembly/spec/tree/master/interpreter/.
+log_and_run make clean opt
+
+log_and_run cd ${TMP_DIR}/spec/test/core
+log_and_run cp *.wast ${SPEC_TEST_DIR}/tests/
+
+log_and_run ./run.py --wasm ${TMP_DIR}/spec/interpreter/wasm --out ${TMP_DIR}
+log_and_run cp ${TMP_DIR}/*.js ${SPEC_TEST_DIR}/tests/
+
+log_and_run cp -r ${TMP_DIR}/spec/test/js-api/* ${JS_API_TEST_DIR}/tests
+
+###############################################################################
+# Generate the proposal tests.
+###############################################################################
+
+repos='bulk-memory-operations reference-types js-types tail-call simd'
+
+for repo in ${repos}; do
+  echo "Process ${repo}"
+  echo ">> Process core tests"
+  log_and_run cd ${TMP_DIR}
+  log_and_run git clone https://github.com/WebAssembly/${repo}
+  # Compile the spec interpreter to generate the .js test cases later.
+  log_and_run cd ${repo}/interpreter
+  log_and_run make clean opt
+  log_and_run cd ../test/core
+  log_and_run mkdir ${SPEC_TEST_DIR}/tests/proposals/${repo}
+
+  # Iterate over all proposal tests. Those which differ from the spec tests are
+  # copied to the output directory and converted to .js tests.
+  for rel_filename in $(find . -name '*.wast'); do
+    abs_filename=$(realpath $rel_filename)
+    spec_filename=${TMP_DIR}/spec/test/core/${rel_filename}
+    if [ ! -f "$spec_filename" ] || ! cmp -s $abs_filename $spec_filename ; then
+      log_and_run cp ${rel_filename} ${SPEC_TEST_DIR}/tests/proposals/${repo}/
+      log_and_run ./run.py --wasm ../../interpreter/wasm ${rel_filename} --out _build 2> /dev/null
+    fi
+  done
+
+  if ls _build/*.js > /dev/null; then
+    log_and_run cp _build/*.js ${SPEC_TEST_DIR}/tests/proposals/${repo}/
+  fi
+
+  echo ">> Process js-api tests"
+  log_and_run mkdir ${JS_API_TEST_DIR}/tests/proposals/${repo}
+  log_and_run cp -r ${TMP_DIR}/${repo}/test/js-api/* ${JS_API_TEST_DIR}/tests/proposals/${repo}
+  # Delete duplicate tests
+  log_and_run cd ${JS_API_TEST_DIR}/tests
+  for spec_test_name in $(find ./ -name '*.any.js' -not -wholename '*/proposals/*'); do
+    proposal_test_name="proposals/${repo}/${spec_test_name}"
+    if [ -f "$proposal_test_name" ] && cmp -s $spec_test_name $proposal_test_name ; then
+      log_and_run rm $proposal_test_name
+    elif [ -f "$proposal_test_name" ]; then
+      echo "keep" $proposal_test_name
+    fi
+  done
+done
+
+###############################################################################
+# Report and cleanup.
+###############################################################################
+
+cd ${SPEC_TEST_DIR}
+echo
+echo "The following files will get uploaded:"
+ls -R tests
+echo
+
+cd ${JS_API_TEST_DIR}
+ls -R tests
+echo
+
+log_and_run rm -rf ${TMP_DIR}
+
+###############################################################################
+# Upload all spec tests.
+###############################################################################
+
+echo "****************************************************************************"
+echo "* For the following command you first have to authenticate with google cloud"
+echo "* storage. For that you have to execute"
+echo "*"
+echo "* > gsutil.py config"
+echo "*"
+echo "* When the script asks you for your project-id, use 0."
+echo "****************************************************************************"
+log_and_run cd ${SPEC_TEST_DIR}
+log_and_run upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
+
+log_and_run cd ${JS_API_TEST_DIR}
+log_and_run upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
diff --git a/src/third_party/v8/tools/wasm/wasm-import-profiler-end.js b/src/third_party/v8/tools/wasm/wasm-import-profiler-end.js
new file mode 100644
index 0000000..5b5eedd
--- /dev/null
+++ b/src/third_party/v8/tools/wasm/wasm-import-profiler-end.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Code to run at shutdown: print out the profiles for all instances.
+if (typeof WebAssembly.dumpAllProfiles == "function") WebAssembly.dumpAllProfiles();
diff --git a/src/third_party/v8/tools/wasm/wasm-import-profiler.js b/src/third_party/v8/tools/wasm/wasm-import-profiler.js
new file mode 100644
index 0000000..cfbb3fb
--- /dev/null
+++ b/src/third_party/v8/tools/wasm/wasm-import-profiler.js
@@ -0,0 +1,131 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(() => {
+  let all_profiles = [];
+  let instanceMap = new WeakMap();
+  let instanceCounter = 0;
+
+  function instrument(imports, profile) {
+    let orig_imports = imports;
+    return new Proxy(imports, {
+      get: (obj, module_name) => {
+        let orig_module = orig_imports[module_name];
+        return new Proxy(orig_module, {
+          get: (obj, item_name) => {
+            let orig_func = orig_module[item_name];
+            let item = orig_func;
+            if (typeof orig_func == "function") {
+              var full_name = module_name + "." + item_name;
+              print("instrumented " + full_name);
+              profile[full_name] = {name: full_name, count: 0, total: 0};
+              item = function profiled_func(...args) {
+                var before = performance.now();
+                var result = orig_func(...args);
+                var delta = performance.now() - before;
+                var data = profile[full_name];
+                data.count++;
+                data.total += delta;
+                return result;
+              }
+            }
+            return item;
+          }
+        })
+      }
+    });
+  }
+
+  function dumpProfile(profile) {
+    let array = [];
+    for (let key in profile) {
+      if (key == "instanceNum") continue;
+      let data = profile[key];
+      if (data.count == 0) continue;
+      array.push(data);
+    }
+    print(`--- Import profile for instance ${profile.instanceNum} ---`);
+    if (array.length == 0) return;
+    array.sort((a, b) => b.total - a.total);
+    for (let data of array) {
+      print(`${padl(data.name, 30)}: ${padr(data.count, 10)} ${padp(data.total, 10)}ms`);
+    }
+  }
+
+  function padl(s, len) {
+    s = s.toString();
+    while (s.length < len) s = s + " ";
+    return s;
+  }
+  function padr(s, len) {
+    s = s.toString();
+    while (s.length < len) s = " " + s;
+    return s;
+  }
+  function padp(s, len) {
+    s = s.toString();
+    var i = s.indexOf(".");
+    if (i == -1) i = s.length;
+    while (i++ < len) s = " " + s;
+    return s;
+  }
+
+  // patch: WebAssembly.instantiate (async)
+  let orig_instantiate = WebAssembly.instantiate;
+  WebAssembly.instantiate = (m, imports, ...args) => {
+    let profile = {};
+    let promise = orig_instantiate(m, instrument(imports, profile), ...args);
+    promise.then((instance) => {
+      instanceMap.set(instance, profile);
+      all_profiles.push(profile);
+      profile.instanceNum = instanceCounter++;
+    });
+    return promise;
+  }
+
+  // patch: new WebAssembly.Instance (sync)
+  let orig_new_instance = WebAssembly.Instance;
+  WebAssembly.Instance = new Proxy(orig_new_instance, {
+    construct: (target, args) => {
+      let profile = {};
+      args[1] = instrument(args[1], profile);
+      let instance = new orig_new_instance(...args);
+      instanceMap.set(instance, profile);
+      all_profiles.push(profile);
+      profile.instanceNum = instanceCounter++;
+      return instance;
+    }
+  });
+
+  // expose: WebAssembly.dumpProfile(instance)
+  WebAssembly.dumpProfile = (instance) => {
+    let profile = instanceMap.get(instance);
+    if (profile === undefined) return;
+    dumpProfile(profile);
+  }
+  // expose: WebAssembly.clearProfile(instance)
+  WebAssembly.clearProfile = (instance) => {
+    let profile = instanceMap.get(instance);
+    if (profile === undefined) return;
+    for (let key in profile) {
+      if (key == "instanceNum") continue;
+      let data = p[key];
+      data.count = 0;
+      data.total = 0;
+    }
+  }
+  // expose: WebAssembly.dumpAllProfiles()
+  WebAssembly.dumpAllProfiles = () => {
+    for (let profile of all_profiles) dumpProfile(profile);
+  }
+  // expose: WebAssembly.getProfile(instance)
+  // returns: {
+  //    func_name1: {name: func_name1, count: <num>, total: <num>}
+  //    func_name2: {name: func_name1, count: <num>, total: <num>}
+  //    ...
+  // }
+  WebAssembly.getProfile = (instance) => {
+    return instanceMap.get(instance);
+  }
+})();
diff --git a/src/third_party/v8/tools/whitespace.txt b/src/third_party/v8/tools/whitespace.txt
new file mode 100644
index 0000000..d6024b5
--- /dev/null
+++ b/src/third_party/v8/tools/whitespace.txt
@@ -0,0 +1,17 @@
+You can modify this file to create no-op changelists.
+
+Try to write something funny. And please don't add trailing whitespace.
+
+A Smi balks into a war and says:
+"I'm so deoptimized today!"
+The doubles heard this and started to unbox.
+The Smi looked at them when a crazy v8-autoroll account showed up...
+The autoroller bought a round of Himbeerbrause. Suddenly.....
+The bartender starts to shake the bottles........................
+I can't add trailing whitespaces, so I'm adding this line.......
+I'm starting to think that just adding trailing whitespaces might not be bad.
+
+Because whitespaces are not that funny.....
+Today's answer to life the universe and everything is 12950!
+Today's answer to life the universe and everything is 6728!
+Today's answer to life the universe and everything is 6728!!
diff --git a/src/third_party/v8/tools/windbg.js b/src/third_party/v8/tools/windbg.js
new file mode 100644
index 0000000..3fbc588
--- /dev/null
+++ b/src/third_party/v8/tools/windbg.js
@@ -0,0 +1,891 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/*=============================================================================
+  This is a convenience script for debugging with WinDbg (akin to gdbinit)
+  It can be loaded into WinDbg with: .scriptload full_path\windbg.js
+
+  To printout the help message below into the debugger's command window:
+  !help
+=============================================================================*/
+
+function help() {
+  if (supports_call_command()) {
+  print("--------------------------------------------------------------------");
+  print("  LIVE debugging only");
+  print("--------------------------------------------------------------------");
+  print("  !jlh(\"local_handle_var_name\")");
+  print("      prints object held by the handle");
+  print("      e.g. !jlh(\"key\") or !jlh(\"this->receiver_\")");
+  print("  !job(address_or_taggedint)");
+  print("      prints object at the address, e.g. !job(0x235cb869f9)");
+  print("  !jst() or !jst");
+  print("      prints javascript stack (output goes into the console)");
+  print("  !jsbp() or !jsbp");
+  print("      sets bp in v8::internal::Execution::Call");
+  print("");
+  }
+
+  print("--------------------------------------------------------------------");
+  print("  Setup of the script");
+  print("--------------------------------------------------------------------");
+  print("  !set_module(\"module_name_no_extension\")");
+  print("      we'll try the usual suspects for where v8's code might have");
+  print("      been linked into, but you can also set it manually,");
+  print("      e.g. !set_module(\"v8_for_testing\")");
+  print("  !set_iso(isolate_address)");
+  print("      call this function before using !mem or other heap routines");
+  print("");
+
+  print("--------------------------------------------------------------------");
+  print("  Managed heap");
+  print("--------------------------------------------------------------------");
+  print("  !mem or !mem(\"space1[ space2 ...]\")");
+  print("      prints memory chunks from the 'space' owned by the heap in the");
+  print("      isolate set by !set_iso; valid values for 'space' are:");
+  print("      new, old, map, code, lo [large], nlo [newlarge], ro [readonly]");
+  print("      if no 'space' specified prints memory chunks for all spaces,");
+  print("      e.g. !mem(\"code\"), !mem(\"ro new old\")");
+  print("  !where(address)");
+  print("      prints name of the space and address of the MemoryChunk the");
+  print("      'address' is from, e.g. !where(0x235cb869f9)");
+  print("  !rs(chunk_address, set_id = 0)");
+  print("      prints slots from the remembered set in the MemoryChunk. If");
+  print("      'chunk_address' isn't specified, prints for all chunks in the");
+  print("      old space; 'set_id' should match RememberedSetType enum,");
+  print("      e.g. !rs, !rs 0x2fb14780000, !rs(0x2fb14780000, 1)");
+  print("");
+
+  print("--------------------------------------------------------------------");
+  print("  Managed objects");
+  print("--------------------------------------------------------------------");
+  print("  !jot(tagged_addr, depth)");
+  print("      dumps the tree of objects using 'tagged_addr' as a root,");
+  print("      assumes that pointer fields are aligned at ptr_size boundary,");
+  print("      unspecified depth means 'unlimited',");
+  print("      e.g. !jot(0x235cb869f9, 2), !jot 0x235cb869f9");
+  print("  !jo_in_range(start_addr, end_addr)");
+  print("      prints address/map pointers of objects found inside the range");
+  print("      specified by 'start_addr' and 'end_addr', assumes the object");
+  print("      pointers to be aligned at ptr_size boundary,");
+  print("      e.g. !jo_in_range(0x235cb869f8 - 0x100, 0x235cb869f8 + 0x1a0");
+  print("  !jo_prev(address, max_slots = 100)");
+  print("      prints address and map pointer of the nearest object within");
+  print("      'max_slots' before the given 'address', assumes the object");
+  print("      pointers to be aligned at ptr_size boundary,");
+  print("      e.g. !jo_prev 0x235cb869f8, !jo_prev(0x235cb869f9, 16)");
+  print("  !jo_next(address, max_slots = 100)");
+  print("      prints address and map pointer of the nearest object within");
+  print("      'max_slots' following the given 'address', assumes the object");
+  print("      pointers to be aligned at ptr_size boundary,");
+  print("      e.g. !jo_next 0x235cb869f8, !jo_next(0x235cb869f9, 20)");
+  print("");
+
+  print("--------------------------------------------------------------------");
+  print("  Miscellaneous");
+  print("--------------------------------------------------------------------");
+  print("  !dp(address, count = 10)");
+  print("      similar to the built-in 'dp' command but augments output with");
+  print("      more data for values that are managed pointers, note that it");
+  print("      aligns the given 'address' at ptr_sized boundary,");
+  print("      e.g. !dp 0x235cb869f9, !dp(0x235cb869f9, 500), !dp @rsp");
+  print("  !handles(print_handles = false)");
+  print("      prints stats for handles, if 'print_handles' is true will");
+  print("      output all handles as well,");
+  print("      e.g. !handles, !handles(), !handles(true)");
+  print("");
+
+  print("--------------------------------------------------------------------");
+  print("  To run any function from this script (live or postmortem):");
+  print("");
+  print("  dx @$scriptContents.function_name(args)");
+  print("      e.g. dx @$scriptContents.pointer_size()");
+  print("      e.g. dx @$scriptContents.is_map(0x235cb869f9)");
+  print("--------------------------------------------------------------------");
+}
+
+/*=============================================================================
+  On scrip load
+=============================================================================*/
+
+/*=============================================================================
+  Output
+=============================================================================*/
+function print(s) {
+  host.diagnostics.debugLog(s + "\n");
+}
+
+function inspect(s) {
+  for (let k of Reflect.ownKeys(s)) {
+    // Attempting to print either of:
+    // 'Reflect.get(s, k)', 'typeof Reflect.get(s, k)', 's[k]'
+    // might throw: "Error: Object does not have a size",
+    // while 'typeof s[k]' returns 'undefined' and prints the full list of
+    // properties. Oh well...
+    print(`${k} => ${typeof s[k]}`);
+  }
+}
+
+function hex(number) {
+  return `0x${number.toString(16)}`;
+}
+
+/*=============================================================================
+  Utils (postmortem and live)
+=============================================================================*/
+// WinDbg wraps large integers (0x80000000+) into an object of library type that
+// fails isInteger test (and, consequently fail isSafeInteger test even if the
+// original value was a safe integer).
+// However, that library type does have a set of methods on it which you can use
+// to force conversion:
+// .asNumber() / .valueOf(): Performs conversion to JavaScript number.
+// Throws if the ordinal part of the 64-bit number does not pack into JavaScript
+// number without loss of precision.
+// .convertToNumber(): Performs conversion to JavaScript number.
+// Does NOT throw if the ordinal part of the 64-bit number does not pack into
+// JavaScript number. This will simply result in loss of precision.
+// The library will also add these methods to the prototype for the standard
+// number prototype. Meaning you can always .asNumber() / .convertToNumber() to
+// get either JavaScript number or the private Int64 type into a JavaScript
+// number.
+// We could use the conversion functions but it seems that doing the conversion
+// via toString is just as good and slightly more generic...
+function int(val) {
+  if (typeof val === 'number') {
+    return Number.isInteger(val) ? val : undefined;
+  }
+  if (typeof val === 'object') {
+    let n = parseInt(val.toString());
+    return isNaN(n) ? undefined : n;
+  }
+  return undefined;
+}
+
+function is_live_session() {
+  // Assume that there is a single session (not sure how to get multiple ones
+  // going, maybe, in kernel debugging?).
+  return (host.namespace.Debugger.Sessions[0].Attributes.Target.IsLiveTarget);
+}
+
+function is_TTD_session() {
+  // Assume that there is a single session (not sure how to get multiple ones
+  // going, maybe, in kernel debugging?).
+  return (host.namespace.Debugger.Sessions[0].Attributes.Target.IsTTDTarget);
+}
+
+function supports_call_command() {
+  return is_live_session() && !is_TTD_session();
+}
+
+function cast(address, type_name) {
+  return host.createTypedObject(address, module_name(), type_name);
+}
+
+function pointer_size() {
+  return host.namespace.Debugger.Sessions[0].Attributes.Machine.PointerSize;
+}
+
+function poi(address) {
+  try {
+    // readMemoryValues throws if cannot read from 'address'.
+    return host.memory.readMemoryValues(address, 1, pointer_size())[0];
+  }
+  catch (e){}
+}
+
+function get_register(name) {
+  return host.namespace.Debugger.State.DebuggerVariables.curthread
+         .Registers.User[name];
+}
+
+// JS doesn't do bitwise operations on large integers, so let's do it ourselves
+// using hex string representation.
+function bitwise_and(l, r) {
+  l = hex(l);
+  let l_length = l.length;
+  r = hex(r);
+  let r_length = r.length;
+  let res = "";
+  let length = Math.min(l_length, r_length) - 2;  // to account for "0x"
+  for (let i = 1; i <= length; i++) {
+    res = (parseInt(l[l_length - i], 16) & parseInt(r[r_length - i], 16))
+          .toString(16) + res;
+  }
+  return parseInt(res, 16);
+}
+
+
+/*=============================================================================
+  Script setup
+=============================================================================*/
+// In debug builds v8 code is compiled into v8.dll, and in release builds
+// the code is compiled directly into the executable. If you are debugging some
+// other embedder, run !set_module and provide the module name to use.
+const known_exes = ["d8", "unittests", "mksnapshot", "chrome", "chromium"];
+let module_name_cache;
+function module_name(use_this_module) {
+  if (use_this_module) {
+    module_name_cache = use_this_module;
+  }
+
+  if (!module_name_cache) {
+    let v8 = host.namespace.Debugger.State.DebuggerVariables.curprocess
+             .Modules.Where(
+                function(m) {
+                 return m.Name.indexOf("\\v8.dll") !== -1;
+                });
+
+    let v8_test = host.namespace.Debugger.State.DebuggerVariables.curprocess
+                  .Modules.Where(
+                      function(m) {
+                      return m.Name.indexOf("\\v8_for_testing.dll") !== -1;
+                      });
+
+    if (v8.Count() > 0) {
+      module_name_cache = "v8";
+    }
+    else if (v8_test.Count() > 0) {
+      module_name_cache = "v8_for_testing";
+    }
+    else {
+      for (let exe_name in known_exes) {
+        let exe = host.namespace.Debugger.State.DebuggerVariables.curprocess
+                  .Modules.Where(
+                    function(m) {
+                      return m.Name.indexOf(`\\${exe_name}.exe`) !== -1;
+                    });
+        if (exe.Count() > 0) {
+            module_name_cache = exe_name;
+            break;
+        }
+      }
+    }
+  }
+
+  if (!module_name_cache) {
+    print(`ERROR. Couldn't determine module name for v8's symbols.`);
+    print(`Please run !set_module (e.g. "!set_module \"v8_for_testing\"")`);
+  }
+  return module_name_cache;
+};
+
+let using_ptr_compr = false;
+let isolate_address = 0;
+function set_isolate_address(addr, ptr_compr) {
+  isolate_address = addr;
+
+  if (typeof ptr_compr === 'undefined') {
+    ptr_compr = (bitwise_and(isolate_address, 0xffffffff) == 0);
+  }
+  using_ptr_compr = ptr_compr;
+
+  if (using_ptr_compr) {
+    print("The target is using pointer compression.");
+  }
+}
+
+
+/*=============================================================================
+  Wrappers around V8's printing functions and other utils for live-debugging
+=============================================================================*/
+function make_call(fn) {
+  if (!supports_call_command()) {
+    print("ERROR: This command is supported in live sessions only!");
+    return;
+  }
+
+  // .call resets current frame to the top one, so have to manually remember
+  // and restore it after making the call.
+  let curframe = host.namespace.Debugger.State.DebuggerVariables.curframe;
+  let ctl = host.namespace.Debugger.Utility.Control;
+  let output = ctl.ExecuteCommand(`.call ${fn};g`);
+  curframe.SwitchTo();
+  return output;
+}
+
+function print_object(address) {
+  let output = make_call(`_v8_internal_Print_Object(${decomp(address)})`);
+
+  // skip the first few lines with meta info of .call command
+  let skip_line = true;
+  for (let line of output) {
+    if (!skip_line) {
+      print(line);
+      continue;
+    }
+    if (line.includes("deadlocks and corruption of the debuggee")) {
+      skip_line = false;
+    }
+  }
+}
+
+function print_object_from_handle(handle_to_object) {
+  let handle = host.evaluateExpression(handle_to_object);
+  let location = handle.location_;
+  let pobj = poi(location.address);  // handles use uncompressed pointers
+  print_object(pobj);
+}
+
+function print_js_stack() {
+  make_call("_v8_internal_Print_StackTrace()");
+}
+
+function set_user_js_bp() {
+  let ctl = host.namespace.Debugger.Utility.Control;
+  ctl.ExecuteCommand(`bp ${module_name()}!v8::internal::Execution::Call`)
+}
+
+
+/*=============================================================================
+  Managed heap related functions (live and post-mortem debugging)
+=============================================================================*/
+/*-----------------------------------------------------------------------------
+    Pointer compression
+-----------------------------------------------------------------------------*/
+function tagged_size() {
+  return using_ptr_compr ? 4 : pointer_size();
+}
+
+function get_compressed_ptr_base() {
+  if (!using_ptr_compr) return 0;
+
+  return isolate_address;
+}
+
+function decomp(value) {
+  if (value > 0xffffffff) return value;
+  return get_compressed_ptr_base() + value;
+}
+
+// Adjust for possible pointer compression ('address' is assumed to be on the
+// managed heap).
+function poim(address) {
+  try {
+    // readMemoryValues throws if cannot read from 'address'.
+    return host.memory.readMemoryValues(decomp(address), 1, tagged_size())[0];
+  }
+  catch (e){}
+}
+
+/*-----------------------------------------------------------------------------
+    Exploring objects
+-----------------------------------------------------------------------------*/
+function is_map(addr) {
+  let address = int(addr);
+  if (!Number.isSafeInteger(address) || address % 2 == 0) return false;
+
+  // the first field in all objects, including maps, is a map pointer, but for
+  // maps the pointer is always the same - the meta map that points to itself.
+  const map_addr = int(poim(address - 1));
+  if (!Number.isSafeInteger(map_addr)) return false;
+
+  const map_map_addr = int(poim(map_addr - 1));
+  if (!Number.isSafeInteger(map_map_addr)) return false;
+
+  return (map_addr === map_map_addr);
+}
+
+function is_likely_object(addr) {
+  let address = int(addr);
+  if (!Number.isSafeInteger(address) || address % 2 == 0) return false;
+
+  // the first field in all objects must be a map pointer
+  return is_map(poim(address - 1));
+}
+
+function find_object_near(aligned_addr, max_distance, step_op) {
+  if (!step_op) {
+    const step = tagged_size();
+    const prev =
+      find_object_near(aligned_addr, max_distance, x => x - step);
+    const next =
+      find_object_near(aligned_addr, max_distance, x => x + step);
+
+    if (!prev) return next;
+    if (!next) return prev;
+    return (addr - prev <= next - addr) ? prev : next;
+  }
+
+  let maybe_map_addr = poim(aligned_addr);
+  let iters = 0;
+  while (maybe_map_addr && iters < max_distance) {
+    if (is_map(maybe_map_addr)) {
+      return aligned_addr;
+    }
+    aligned_addr = step_op(aligned_addr);
+    maybe_map_addr = poim(aligned_addr);
+    iters++;
+  }
+}
+
+function find_object_prev(addr, max_distance) {
+  if (!Number.isSafeInteger(int(addr))) return;
+
+  const ptr_size = tagged_size();
+  const aligned_addr = addr - (addr % ptr_size);
+  return find_object_near(aligned_addr, max_distance, x => x - ptr_size);
+}
+
+function find_object_next(addr, max_distance) {
+  if (!Number.isSafeInteger(int(addr))) return;
+
+  const ptr_size = tagged_size();
+  const aligned_addr = addr - (addr % ptr_size) + ptr_size;
+  return find_object_near(aligned_addr, max_distance, x => x + ptr_size);
+}
+
+function print_object_prev(addr, max_slots = 100) {
+  let obj_addr = find_object_prev(addr, max_slots);
+  if (!obj_addr) {
+    print(
+      `No object found within ${max_slots} slots prior to ${hex(addr)}`);
+  }
+  else {
+    print(
+      `found object: ${hex(obj_addr + 1)} : ${hex(poim(obj_addr))}`);
+  }
+}
+
+function print_object_next(addr, max_slots = 100) {
+  let obj_addr = find_object_next(addr, max_slots);
+  if (!obj_addr) {
+    print(
+      `No object found within ${max_slots} slots following ${hex(addr)}`);
+  }
+  else {
+    print(
+      `found object: ${hex(obj_addr + 1)} : ${hex(poim(obj_addr))}`);
+  }
+}
+
+// This function assumes that pointers to objects are stored at ptr-size aligned
+// boundaries.
+function print_objects_in_range(start, end){
+  if (!Number.isSafeInteger(int(start)) || !Number.isSafeInteger(int(end))) {
+    return;
+  }
+  const ptr_size = pointer_size();
+  if (start < ptr_size || end <= start) return;
+
+  let iters = (end - start) / ptr_size;
+  let cur = start - ptr_size;
+  print(`===============================================`);
+  print(`objects in range ${hex(start)} - ${hex(end)}`);
+  print(`===============================================`);
+  let count = 0;
+  while (cur && cur < end) {
+    let obj = find_object_next(cur, iters);
+    if (obj) {
+      count++;
+      print(`${hex(obj + 1)} : ${hex(poim(obj))}`);
+      iters  = (end - cur) / ptr_size;
+    }
+    cur = obj + ptr_size;
+  }
+  print(`===============================================`);
+  print(`found ${count} objects in range ${hex(start)} - ${hex(end)}`)
+  print(`===============================================`);
+}
+
+// This function assumes the pointer fields to be ptr-size aligned.
+function print_objects_tree(root, depth_limit) {
+  if(!is_likely_object(root)) {
+    print(`${hex(root)} doesn't look like an object`);
+    return;
+  }
+
+  let path = [];
+
+  function impl(obj, depth, depth_limit) {
+    const ptr_size = tagged_size();
+    // print the current object and its map pointer
+    const this_obj =
+      `${" ".repeat(2 * depth)}${hex(obj)} : ${hex(poim(obj - 1))}`;
+    const cutoff = depth_limit && depth == depth_limit - 1;
+    print(`${this_obj}${cutoff ? " (...)" : ""}`);
+    if (cutoff) return;
+
+    path[depth] = obj;
+    path.length = depth + 1;
+    let cur = obj - 1 + ptr_size;
+
+    // Scan downwards until an address that is likely to be at the start of
+    // another object, in which case it's time to pop out from the recursion.
+    let iter = 0; // an arbitrary guard to avoid hanging the debugger
+    let seen = new Set(path);
+    while (!is_likely_object(cur + 1) && iter < 100) {
+      iter++;
+      let field = poim(cur);
+      if (is_likely_object(field)) {
+        if (seen.has(field)) {
+          print(
+            `${" ".repeat(2 * depth + 2)}cycle: ${hex(cur)}->${hex(field)}`);
+        }
+        else {
+          impl(field, depth + 1, depth_limit);
+        }
+      }
+      cur += ptr_size;
+    }
+  }
+  print(`===============================================`);
+  impl(root, 0, depth_limit);
+  print(`===============================================`);
+}
+
+/*-----------------------------------------------------------------------------
+    Memory spaces
+-----------------------------------------------------------------------------*/
+const NEVER_EVACUATE = 1 << 7; // see src\heap\spaces.h
+
+function print_memory_chunk_list(space_type, front, top, age_mark) {
+  let alloc_pos = top ? ` (allocating at: ${top})` : "";
+  let age_mark_pos = age_mark ? ` (age_mark at: ${top})` : "";
+  print(`${space_type}${alloc_pos}${age_mark_pos}:`);
+  if (front.isNull) {
+    print("<empty>\n");
+    return;
+  }
+
+  let cur = front;
+  while (!cur.isNull) {
+    let imm = cur.flags_ & NEVER_EVACUATE ? "*" : " ";
+    let addr = hex(cur.address);
+    let area = `${hex(cur.area_start_)} - ${hex(cur.area_end_)}`;
+    let dt = `dt ${addr} ${module_name()}!v8::internal::MemoryChunk`;
+    print(`${imm}    ${addr}:\t ${area} (${hex(cur.size_)}) : ${dt}`);
+    cur = cur.list_node_.next_;
+  }
+  print("");
+}
+
+const space_tags =
+  ['old', 'new_to', 'new_from', 'ro', 'map', 'code', 'lo', 'nlo'];
+
+function get_chunks_space(space_tag, front, chunks) {
+    let cur = front;
+    while (!cur.isNull) {
+        chunks.push({
+          'address':cur.address,
+          'area_start_':cur.area_start_,
+          'area_end_':cur.area_end_,
+          'space':space_tag});
+        cur = cur.list_node_.next_;
+    }
+}
+
+function get_chunks() {
+  let iso = cast(isolate_address, "v8::internal::Isolate");
+  let h = iso.heap_;
+
+  let chunks = [];
+  get_chunks_space('old', h.old_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('new_to',
+    h.new_space_.to_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('new_from',
+    h.new_space_.from_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('ro', h.read_only_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('map', h.map_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('code', h.code_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('lo', h.lo_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('nlo', h.new_lo_space_.memory_chunk_list_.front_, chunks);
+
+  return chunks;
+}
+
+function find_chunk(address) {
+  if (!Number.isSafeInteger(int(address))) return undefined;
+
+  let chunks = get_chunks(isolate_address);
+  for (let c of chunks) {
+    let chunk = cast(c.address, "v8::internal::MemoryChunk");
+    if (address >= chunk.area_start_ && address < chunk.area_end_) {
+      return c;
+    }
+  }
+
+  return undefined;
+}
+
+function print_memory(space = "all") {
+  if (isolate_address == 0) {
+    print("Please call !set_iso(isolate_address) first.");
+    return;
+  }
+
+  let iso = cast(isolate_address, "v8::internal::Isolate");
+  let h = iso.heap_;
+  print(`Heap at ${h.targetLocation}`);
+
+  let st = space.toLowerCase().split(" ");
+
+  print("Im   address:\t object area start - end (size)");
+  if (st.includes("all") || st.includes("old")) {
+    print_memory_chunk_list("OldSpace",
+      h.old_space_.memory_chunk_list_.front_,
+      h.old_space_.allocation_info_.top_);
+  }
+  if (st.includes("all") || st.includes("new")) {
+    // new space doesn't use the chunk list from its base class but from
+    // the to/from semi-spaces it points to
+    print_memory_chunk_list("NewSpace_To",
+      h.new_space_.to_space_.memory_chunk_list_.front_,
+      h.new_space_.allocation_info_.top_,
+      h.new_space_.to_space_.age_mark_);
+    print_memory_chunk_list("NewSpace_From",
+      h.new_space_.from_space_.memory_chunk_list_.front_);
+  }
+  if (st.includes("all") || st.includes("map")) {
+    print_memory_chunk_list("MapSpace",
+      h.map_space_.memory_chunk_list_.front_,
+      h.map_space_.allocation_info_.top_);
+  }
+  if (st.includes("all") || st.includes("code")) {
+    print_memory_chunk_list("CodeSpace",
+      h.code_space_.memory_chunk_list_.front_,
+      h.code_space_.allocation_info_.top_);
+  }
+  if (st.includes("all") || st.includes("large") || st.includes("lo")) {
+    print_memory_chunk_list("OldLargeObjectSpace",
+      h.lo_space_.memory_chunk_list_.front_);
+  }
+  if (st.includes("all") || st.includes("newlarge") || st.includes("nlo")) {
+    print_memory_chunk_list("NewLargeObjectSpace",
+      h.new_lo_space_.memory_chunk_list_.front_);
+  }
+  if (st.includes("all") || st.includes("readonly") || st.includes("ro")) {
+    print_memory_chunk_list("ReadOnlySpace",
+      h.read_only_space_.memory_chunk_list_.front_);
+  }
+}
+
+function print_owning_space(address) {
+  if (isolate_address == 0) {
+    print("Please call !set_iso(isolate_address) first.");
+    return;
+  }
+
+  address = decomp(address);
+  let c = find_chunk(address);
+  if (c) {
+      print(`${hex(address)} is in ${c.space} (chunk: ${hex(c.address)})`);
+  }
+  else {
+      print(`Address ${hex(address)} is not in managed heap`);
+  }
+}
+
+/*-----------------------------------------------------------------------------
+    Handles
+-----------------------------------------------------------------------------*/
+function print_handles_data(print_handles = false) {
+  if (isolate_address == 0) {
+    print("Please call !set_iso(isolate_address) first.");
+    return;
+  }
+
+  let iso = cast(isolate_address, "v8::internal::Isolate");
+  let hsd = iso.handle_scope_data_;
+  let hsimpl = iso.handle_scope_implementer_;
+
+  // depth level
+  print(`Nested depth level: ${hsd.level}`);
+
+  // count of handles
+  const ptr_size = pointer_size();
+  let blocks = hsimpl.blocks_;
+  const block_size = 1022; // v8::internal::KB - 2
+  const first_block = blocks.data_.address;
+  const last_block = (blocks.size_ == 0)
+                     ? first_block
+                     : first_block + ptr_size * (blocks.size_ - 1);
+
+  const count = (blocks.size_ == 0)
+              ? 0
+              : (blocks.size_ - 1) * block_size +
+                (hsd.next.address - poi(last_block))/ptr_size;
+  print(`Currently tracking ${count} local handles`);
+
+  // print the handles
+  if (print_handles && count > 0) {
+    for (let block = first_block; block < last_block;
+         block += block_size * ptr_size) {
+      print(`Handles in block at ${hex(block)}`);
+      for (let i = 0; i < block_size; i++) {
+        const location = poi(block + i * ptr_size);
+        print(`  ${hex(location)}->${hex(poi(location))}`);
+      }
+    }
+
+    let location = poi(last_block);
+    print(`Handles in block at ${hex(last_block)}`);
+    for (let location = poi(last_block); location < hsd.next.address;
+         location += ptr_size) {
+      print(`  ${hex(location)}->${hex(poi(location))}`);
+    }
+  }
+
+  // where will the next handle allocate at?
+  const prefix = "Next handle's location will be";
+  if (hsd.next.address < hsd.limit.address) {
+    print(`${prefix} at ${hex(hsd.next.address)}`);
+  }
+  else if (hsimpl.spare_) {
+    const location = hsimpl.spare_.address;
+    print(`${prefix} from the spare block at ${hex(location)}`);
+  }
+  else {
+    print(`${prefix} from a new block to be allocated`);
+  }
+}
+
+/*-----------------------------------------------------------------------------
+    dp
+-----------------------------------------------------------------------------*/
+function pad_right(addr) {
+  let addr_hex = hex(addr);
+  return `${addr_hex}${" ".repeat(pointer_size() * 2 + 2 - addr_hex.length)}`;
+}
+
+// TODO irinayat: would be nice to identify handles and smi as well
+function dp(addr, count = 10) {
+  if (isolate_address == 0) {
+    print(`To see where objects are located, run !set_iso.`);
+  }
+
+  if (!Number.isSafeInteger(int(addr))) {
+    print(`${hex(addr)} doesn't look like a valid address`);
+    return;
+  }
+
+  const ptr_size = tagged_size();
+  let aligned_addr = addr - (addr % ptr_size);
+  let val = poim(aligned_addr);
+  let iter = 0;
+  while (val && iter < count) {
+    const map = is_map(val);
+    const obj = is_likely_object(val) && !map;
+
+    const augm_map = map ? "map" : "";
+    const augm_obj = obj ? "obj" : "";
+    const augm_other = !map && !obj ? "val" : "";
+
+    let c = find_chunk(decomp(val));
+    const augm_space = c ? ` in ${c.space}` : "";
+    const augm = `${augm_map}${augm_obj}${augm_other}${augm_space}`;
+
+    const full_ptr = using_ptr_compr ?
+        pad_right((map || obj) ? decomp(val) : val) : "";
+    print(`${pad_right(aligned_addr)} ${pad_right(val)} ${full_ptr}   ${augm}`);
+
+    aligned_addr += ptr_size;
+    val = poim(aligned_addr);
+    iter++;
+  }
+}
+
+/*-----------------------------------------------------------------------------
+    Remembered Sets
+-----------------------------------------------------------------------------*/
+// set ids: 0 = OLD_TO_NEW, 1 = 0 = OLD_TO_OLD
+function print_remembered_set(chunk_addr, set_id = 0) {
+  if (!chunk_addr) {
+    if (isolate_address == 0) {
+      print("Please call !set_iso(isolate_address) or provide chunk address.");
+      return;
+    }
+
+    let iso = cast(isolate_address, "v8::internal::Isolate");
+    let h = iso.heap_;
+    let chunks = [];
+    get_chunks_space('old', h.old_space_.memory_chunk_list_.front_, chunks);
+    get_chunks_space('lo', h.lo_space_.memory_chunk_list_.front_, chunks);
+    for (let c of chunks) {
+      try {
+        print_remembered_set(c.address);
+      }
+      catch (e) {
+        print(`failed to process chunk ${hex(c.address)} due to ${e.message}`);
+      }
+    }
+    return;
+  }
+
+  print(`Remembered set in chunk ${hex(chunk_addr)}`);
+  let chunk = cast(chunk_addr, "v8::internal::MemoryChunk");
+
+  // chunk.slot_set_ is an array of SlotSet's. For standard pages there is 0 or
+  // 1 item in the array, but for large pages there will be more.
+  const page_size = 256 * 1024;
+  const sets_count = Math.floor((chunk.size_ + page_size - 1) / page_size);
+  let rs = chunk.slot_set_[set_id];
+  if (rs.isNull) {
+    print(`  <empty>`);
+    return;
+  }
+  if (rs[0].page_start_ != chunk_addr) {
+    print(`page_start_ [${hex(rs.page_start_)}] doesn't match chunk_addr!`);
+    return;
+  }
+
+  const ptr_size = tagged_size();
+  let count = 0;
+  for (let s = 0; s < sets_count; s++){
+    const buckets_count = rs[s].buckets_.Count();
+    for (let b = 0; b < buckets_count; b++) {
+      let bucket = rs[s].buckets_[b];
+      if (bucket.isNull) continue;
+      // there are 32 cells in each bucket, cell's size is 32 bits
+      print(`  bucket ${hex(bucket.address.asNumber())}:`);
+      const first_cell = bucket.address.asNumber();
+      for (let c = 0; c < 32; c++) {
+        let cell = host.memory.readMemoryValues(
+          first_cell + c * 4, 1, 4 /*size to read*/)[0];
+        if (cell == 0) continue;
+        let mask = 1;
+        for (let bit = 0; bit < 32; bit++){
+          if (cell & mask) {
+            count++;
+            const slot_offset = (b * 32 * 32 + c * 32 + bit) * ptr_size;
+            const slot = rs[s].page_start_ + slot_offset;
+            print(`    ${hex(slot)} -> ${hex(poim(slot))}`);
+          }
+          mask = mask << 1;
+        }
+      }
+    }
+  }
+
+  if (count == 0) print(`  <empty>`);
+  else print(`  ${count} remembered pointers in chunk ${hex(chunk_addr)}`);
+}
+
+
+/*=============================================================================
+  Initialize short aliased names for the most common commands
+=============================================================================*/
+function initializeScript() {
+  return [
+      new host.functionAlias(help, "help"),
+      new host.functionAlias(print_object_from_handle, "jlh"),
+      new host.functionAlias(print_object, "job"),
+      new host.functionAlias(print_js_stack, "jst"),
+
+      new host.functionAlias(set_isolate_address, "set_iso"),
+      new host.functionAlias(module_name, "set_module"),
+      new host.functionAlias(print_memory, "mem"),
+      new host.functionAlias(print_owning_space, "where"),
+      new host.functionAlias(print_handles_data, "handles"),
+      new host.functionAlias(print_remembered_set, "rs"),
+
+      new host.functionAlias(print_object_prev, "jo_prev"),
+      new host.functionAlias(print_object_next, "jo_next"),
+      new host.functionAlias(print_objects_in_range, "jo_in_range"),
+      new host.functionAlias(print_objects_tree, "jot"),
+
+      new host.functionAlias(dp, "dp"),
+
+      new host.functionAlias(set_user_js_bp, "jsbp"),
+  ]
+}
diff --git a/src/third_party/v8/tools/windows-tick-processor.bat b/src/third_party/v8/tools/windows-tick-processor.bat
new file mode 100755
index 0000000..56637e0
--- /dev/null
+++ b/src/third_party/v8/tools/windows-tick-processor.bat
@@ -0,0 +1,30 @@
+@echo off
+
+SET tools_dir=%~dp0
+IF 1%D8_PATH% == 1 (SET D8_PATH=%tools_dir%..)
+
+SET log_file=v8.log
+
+rem find the name of the log file to process, it must not start with a dash.
+rem we prepend cmdline args with a number (in fact, any letter or number)
+rem to cope with empty arguments.
+SET arg1=1%1
+IF NOT %arg1:~0,2% == 1 (IF NOT %arg1:~0,2% == 1- SET log_file=%1)
+SET arg2=2%2
+IF NOT %arg2:~0,2% == 2 (IF NOT %arg2:~0,2% == 2- SET log_file=%2)
+SET arg3=3%3
+IF NOT %arg3:~0,2% == 3 (IF NOT %arg3:~0,2% == 3- SET log_file=%3)
+SET arg4=4%4
+IF NOT %arg4:~0,2% == 4 (IF NOT %arg4:~0,2% == 4- SET log_file=%4)
+SET arg5=5%5
+IF NOT %arg5:~0,2% == 5 (IF NOT %arg5:~0,2% == 5- SET log_file=%5)
+SET arg6=6%6
+IF NOT %arg6:~0,2% == 6 (IF NOT %arg6:~0,2% == 6- SET log_file=%6)
+SET arg7=7%7
+IF NOT %arg7:~0,2% == 7 (IF NOT %arg7:~0,2% == 7- SET log_file=%7)
+SET arg8=8%8
+IF NOT %arg8:~0,2% == 8 (IF NOT %arg8:~0,2% == 8- SET log_file=%8)
+SET arg9=9%9
+IF NOT %arg9:~0,2% == 9 (IF NOT %arg9:~0,2% == 9- SET log_file=%9)
+
+type %log_file% | %D8_PATH%\d8 --module %tools_dir%tickprocessor-driver.js -- --windows %*
diff --git a/src/third_party/v8/tools/zone-stats/categories.js b/src/third_party/v8/tools/zone-stats/categories.js
new file mode 100644
index 0000000..581a69a
--- /dev/null
+++ b/src/third_party/v8/tools/zone-stats/categories.js
@@ -0,0 +1,129 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const UNCLASSIFIED_CATEGORY = 'unclassified';
+const UNCLASSIFIED_CATEGORY_NAME = 'Unclassified';
+
+// Categories for zones.
+export const CATEGORIES = new Map([
+  [
+    'parser', new Set([
+      'AstStringConstants',
+      'parser-zone',
+      'pre-parser-zone',
+    ])
+  ],
+  [
+    'misc', new Set([
+      'Run',
+      'CanonicalHandleScope',
+      'Temporary scoped zone',
+      'UpdateFieldType',
+    ])
+  ],
+  [
+    'interpreter', new Set([
+      'InterpreterCompilationJob',
+    ])
+  ],
+  [
+    'regexp', new Set([
+      'CompileIrregexp',
+    ])
+  ],
+  [
+    'compiler-huge', new Set([
+      'graph-zone',
+      'instruction-zone',
+      'pipeline-compilation-job-zone',
+      'register-allocation-zone',
+      'register-allocator-verifier-zone',
+    ])
+  ],
+  [
+    'compiler-other', new Set([
+      'Compile',
+      'V8.TFAllocateFPRegisters',
+      'V8.TFAllocateGeneralRegisters',
+      'V8.TFAssembleCode',
+      'V8.TFAssignSpillSlots',
+      'V8.TFBuildLiveRangeBundles',
+      'V8.TFBuildLiveRanges',
+      'V8.TFBytecodeGraphBuilder',
+      'V8.TFCommitAssignment',
+      'V8.TFConnectRanges',
+      'V8.TFControlFlowOptimization',
+      'V8.TFDecideSpillingMode',
+      'V8.TFDecompressionOptimization',
+      'V8.TFEarlyOptimization',
+      'V8.TFEarlyTrimming',
+      'V8.TFEffectLinearization',
+      'V8.TFEscapeAnalysis',
+      'V8.TFFinalizeCode',
+      'V8.TFFrameElision',
+      'V8.TFGenericLowering',
+      'V8.TFHeapBrokerInitialization',
+      'V8.TFInlining',
+      'V8.TFJumpThreading',
+      'V8.TFLateGraphTrimming',
+      'V8.TFLateOptimization',
+      'V8.TFLoadElimination',
+      'V8.TFLocateSpillSlots',
+      'V8.TFLoopPeeling',
+      'V8.TFMachineOperatorOptimization',
+      'V8.TFMeetRegisterConstraints',
+      'V8.TFMemoryOptimization',
+      'V8.TFOptimizeMoves',
+      'V8.TFPopulatePointerMaps',
+      'V8.TFResolveControlFlow',
+      'V8.TFResolvePhis',
+      'V8.TFScheduling',
+      'V8.TFSelectInstructions',
+      'V8.TFSerializeMetadata',
+      'V8.TFSimplifiedLowering',
+      'V8.TFStoreStoreElimination',
+      'V8.TFTypedLowering',
+      'V8.TFTyper',
+      'V8.TFUntyper',
+      'V8.TFVerifyGraph',
+      'ValidatePendingAssessment',
+      'codegen-zone',
+    ])
+  ],
+  [UNCLASSIFIED_CATEGORY, new Set()],
+]);
+
+// Maps category to description text that is shown in html.
+export const CATEGORY_NAMES = new Map([
+  ['parser', 'Parser'],
+  ['misc', 'Misc'],
+  ['interpreter', 'Ignition'],
+  ['regexp', 'Regexp compiler'],
+  ['compiler-huge', 'TurboFan (huge zones)'],
+  ['compiler-other', 'TurboFan (other zones)'],
+  [UNCLASSIFIED_CATEGORY, UNCLASSIFIED_CATEGORY_NAME],
+]);
+
+function buildZoneToCategoryMap() {
+  const map = new Map();
+  for (let [category, zone_names] of CATEGORIES.entries()) {
+    for (let zone_name of zone_names) {
+      if (map.has(zone_name)) {
+        console.error("Zone belongs to multiple categories: " + zone_name);
+      } else {
+        map.set(zone_name, category);
+      }
+    }
+  }
+  return map;
+}
+
+const CATEGORY_BY_ZONE = buildZoneToCategoryMap();
+
+// Maps zone name to category.
+export function categoryByZoneName(zone_name) {
+  const category = CATEGORY_BY_ZONE.get(zone_name);
+  if (category !== undefined) return category;
+  return UNCLASSIFIED_CATEGORY;
+}
diff --git a/src/third_party/v8/tools/zone-stats/details-selection-template.html b/src/third_party/v8/tools/zone-stats/details-selection-template.html
new file mode 100644
index 0000000..ef1e2f6
--- /dev/null
+++ b/src/third_party/v8/tools/zone-stats/details-selection-template.html
@@ -0,0 +1,146 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<style>
+#dataSelectionSection {
+  display: none;
+}
+
+.box {
+  border-left: dashed 1px #666666;
+  border-right: dashed 1px #666666;
+  border-bottom: dashed 1px #666666;
+  padding: 10px;
+  overflow: hidden;
+  position: relative;
+}
+
+.box:nth-of-type(1) {
+  border-top: dashed 1px #666666;
+  border-radius: 5px 5px 0px 0px;
+}
+
+.box:last-of-type {
+    border-radius: 0px 0px 5px 5px;
+}
+
+.box > ul {
+  margin: 0px;
+  padding: 0px;
+}
+
+.box > ul > li {
+  display: inline-block;
+}
+
+.box > ul > li:not(:first-child) {
+  margin-left: 10px;
+}
+
+.box > ul > li:first-child {
+  font-weight: bold;
+}
+
+.zonesSelectBox {
+  position: relative;
+  overflow: hidden;
+  float: left;
+  padding: 0px 5px 2px 0px;
+  margin: 3px;
+  border-radius: 3px;
+}
+
+.zonesSelectBox > label {
+  font-size: xx-small;
+}
+
+.zonesSelectBox > input {
+  vertical-align: middle;
+}
+
+.percentBackground {
+  position: absolute;
+  width: 200%;
+  height: 100%;
+  left: 0%;
+  top: 0px;
+  margin-left: -100%;
+  transition: all 1s ease-in-out;
+}
+
+.zonesSelectBox > .percentBackground  {
+  background: linear-gradient(90deg, #68b0f7 50%, #b3d9ff 50%);
+  z-index: -1;
+}
+.box > .percentBackground  {
+  background: linear-gradient(90deg, #e0edfe 50%, #fff 50%);
+  z-index: -2;
+}
+
+#categories {
+  margin-top: 10px;
+}
+
+#category-filter {
+  text-align: right;
+  width: 50px;
+}
+
+</style>
+<section id="dataSelectionSection">
+  <h2>Data selection</h2>
+  <ul>
+    <li>
+      <label for="isolate-select">
+        Isolate
+      </label>
+      <select id="isolate-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <label for="data-view-select">
+        Data view
+      </label>
+      <select id="data-view-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <label for="show-totals-select">
+        Show total allocated/used zone memory
+      </label>
+      <input type="checkbox" id="show-totals-select" checked>
+    </li>
+    <li>
+      <label for="data-kind-select">
+        Data kind
+      </label>
+      <select id="data-kind-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <label for="time-start-select">
+        Time start
+      </label>
+      <input type="number" id="time-start-select" value="0">ms</input>
+    </li>
+    <li>
+      <label for="time-end-select">
+        Time end
+      </label>
+      <input type="number" id="time-end-select" value="0">ms</input>
+    </li>
+    <li>
+      <label for="memory-usage-sample-select">
+        Memory usage sample (at a specific time in ms)
+      </label>
+      <select id="memory-usage-sample-select">
+        <option>No data</option>
+      </select>
+    </li>
+  </ul>
+
+  <div id="categories"></div>
+</section>
diff --git a/src/third_party/v8/tools/zone-stats/details-selection.js b/src/third_party/v8/tools/zone-stats/details-selection.js
new file mode 100644
index 0000000..1e57e0a
--- /dev/null
+++ b/src/third_party/v8/tools/zone-stats/details-selection.js
@@ -0,0 +1,363 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+import {CATEGORIES, CATEGORY_NAMES, categoryByZoneName} from './categories.js';
+
+export const VIEW_TOTALS = 'by-totals';
+export const VIEW_BY_ZONE_NAME = 'by-zone-name';
+export const VIEW_BY_ZONE_CATEGORY = 'by-zone-category';
+
+export const KIND_ALLOCATED_MEMORY = 'kind-detailed-allocated';
+export const KIND_USED_MEMORY = 'kind-detailed-used';
+export const KIND_FREED_MEMORY = 'kind-detailed-freed';
+
+defineCustomElement('details-selection', (templateText) =>
+ class DetailsSelection extends HTMLElement {
+  constructor() {
+    super();
+    const shadowRoot = this.attachShadow({mode: 'open'});
+    shadowRoot.innerHTML = templateText;
+    this.isolateSelect.addEventListener(
+        'change', e => this.handleIsolateChange(e));
+    this.dataViewSelect.addEventListener(
+        'change', e => this.notifySelectionChanged(e));
+    this.dataKindSelect.addEventListener(
+        'change', e => this.notifySelectionChanged(e));
+    this.showTotalsSelect.addEventListener(
+        'change', e => this.notifySelectionChanged(e));
+    this.memoryUsageSampleSelect.addEventListener(
+        'change', e => this.notifySelectionChanged(e));
+    this.timeStartSelect.addEventListener(
+        'change', e => this.notifySelectionChanged(e));
+    this.timeEndSelect.addEventListener(
+        'change', e => this.notifySelectionChanged(e));
+  }
+
+  connectedCallback() {
+    for (let category of CATEGORIES.keys()) {
+      this.$('#categories').appendChild(this.buildCategory(category));
+    }
+  }
+
+  set data(value) {
+    this._data = value;
+    this.dataChanged();
+  }
+
+  get data() {
+    return this._data;
+  }
+
+  get selectedIsolate() {
+    return this._data[this.selection.isolate];
+  }
+
+  get selectedData() {
+    console.assert(this.data, 'invalid data');
+    console.assert(this.selection, 'invalid selection');
+    const time = this.selection.time;
+    return this.selectedIsolate.samples.get(time);
+  }
+
+  $(id) {
+    return this.shadowRoot.querySelector(id);
+  }
+
+  querySelectorAll(query) {
+    return this.shadowRoot.querySelectorAll(query);
+  }
+
+  get dataViewSelect() {
+    return this.$('#data-view-select');
+  }
+
+  get dataKindSelect() {
+    return this.$('#data-kind-select');
+  }
+
+  get isolateSelect() {
+    return this.$('#isolate-select');
+  }
+
+  get memoryUsageSampleSelect() {
+    return this.$('#memory-usage-sample-select');
+  }
+
+  get showTotalsSelect() {
+    return this.$('#show-totals-select');
+  }
+
+  get timeStartSelect() {
+    return this.$('#time-start-select');
+  }
+
+  get timeEndSelect() {
+    return this.$('#time-end-select');
+  }
+
+  buildCategory(name) {
+    const div = document.createElement('div');
+    div.id = name;
+    div.classList.add('box');
+    const ul = document.createElement('ul');
+    div.appendChild(ul);
+    const name_li = document.createElement('li');
+    ul.appendChild(name_li);
+    name_li.innerHTML = CATEGORY_NAMES.get(name);
+    const percent_li = document.createElement('li');
+    ul.appendChild(percent_li);
+    percent_li.innerHTML = '0%';
+    percent_li.id = name + 'PercentContent';
+    const all_li = document.createElement('li');
+    ul.appendChild(all_li);
+    const all_button = document.createElement('button');
+    all_li.appendChild(all_button);
+    all_button.innerHTML = 'All';
+    all_button.addEventListener('click', e => this.selectCategory(name));
+    const none_li = document.createElement('li');
+    ul.appendChild(none_li);
+    const none_button = document.createElement('button');
+    none_li.appendChild(none_button);
+    none_button.innerHTML = 'None';
+    none_button.addEventListener('click', e => this.unselectCategory(name));
+    const innerDiv = document.createElement('div');
+    div.appendChild(innerDiv);
+    innerDiv.id = name + 'Content';
+    const percentDiv = document.createElement('div');
+    div.appendChild(percentDiv);
+    percentDiv.className = 'percentBackground';
+    percentDiv.id = name + 'PercentBackground';
+    return div;
+  }
+
+  dataChanged() {
+    this.selection = {categories: {}, zones: new Map()};
+    this.resetUI(true);
+    this.populateIsolateSelect();
+    this.handleIsolateChange();
+    this.$('#dataSelectionSection').style.display = 'block';
+  }
+
+  populateIsolateSelect() {
+    let isolates = Object.entries(this.data);
+    // Sort by peak heap memory consumption.
+    isolates.sort((a, b) => b[1].peakAllocatedMemory - a[1].peakAllocatedMemory);
+    this.populateSelect(
+        '#isolate-select', isolates, (key, isolate) => isolate.getLabel());
+  }
+
+  resetUI(resetIsolateSelect) {
+    if (resetIsolateSelect) removeAllChildren(this.isolateSelect);
+
+    removeAllChildren(this.dataViewSelect);
+    removeAllChildren(this.dataKindSelect);
+    removeAllChildren(this.memoryUsageSampleSelect);
+    this.clearCategories();
+  }
+
+  handleIsolateChange(e) {
+    this.selection.isolate = this.isolateSelect.value;
+    if (this.selection.isolate.length === 0) {
+      this.selection.isolate = null;
+      return;
+    }
+    this.resetUI(false);
+    this.populateSelect(
+        '#data-view-select', [
+          [VIEW_TOTALS, 'Total memory usage'],
+          [VIEW_BY_ZONE_NAME, 'Selected zones types'],
+          [VIEW_BY_ZONE_CATEGORY, 'Selected zone categories'],
+        ],
+        (key, label) => label, VIEW_TOTALS);
+    this.populateSelect(
+      '#data-kind-select', [
+        [KIND_ALLOCATED_MEMORY, 'Allocated memory per zone'],
+        [KIND_USED_MEMORY, 'Used memory per zone'],
+        [KIND_FREED_MEMORY, 'Freed memory per zone'],
+      ],
+      (key, label) => label, KIND_ALLOCATED_MEMORY);
+
+    this.populateSelect(
+      '#memory-usage-sample-select',
+      [...this.selectedIsolate.samples.entries()].filter(([time, sample]) => {
+        // Remove samples that does not have detailed per-zone data.
+        return sample.zones !== undefined;
+      }),
+      (time, sample, index) => {
+        return ((index + ': ').padStart(6, '\u00A0') +
+          formatSeconds(time).padStart(8, '\u00A0') + ' ' +
+          formatBytes(sample.allocated).padStart(12, '\u00A0'));
+      },
+      this.selectedIsolate.peakUsageTime);
+
+    this.timeStartSelect.value = this.selectedIsolate.start;
+    this.timeEndSelect.value = this.selectedIsolate.end;
+
+    this.populateCategories();
+    this.notifySelectionChanged();
+  }
+
+  notifySelectionChanged(e) {
+    if (!this.selection.isolate) return;
+
+    this.selection.data_view = this.dataViewSelect.value;
+    this.selection.data_kind = this.dataKindSelect.value;
+    this.selection.categories = Object.create(null);
+    this.selection.zones = new Map();
+    this.$('#categories').style.display = 'none';
+    for (let category of CATEGORIES.keys()) {
+      const selected = this.selectedInCategory(category);
+      if (selected.length > 0) this.selection.categories[category] = selected;
+      for (const zone_name of selected) {
+        this.selection.zones.set(zone_name, category);
+      }
+    }
+    this.$('#categories').style.display = 'block';
+    this.selection.category_names = CATEGORY_NAMES;
+    this.selection.show_totals = this.showTotalsSelect.checked;
+    this.selection.time = Number(this.memoryUsageSampleSelect.value);
+    this.selection.timeStart = Number(this.timeStartSelect.value);
+    this.selection.timeEnd = Number(this.timeEndSelect.value);
+    this.updatePercentagesInCategory();
+    this.updatePercentagesInZones();
+    this.dispatchEvent(new CustomEvent(
+        'change', {bubbles: true, composed: true, detail: this.selection}));
+  }
+
+  updatePercentagesInCategory() {
+    const overalls = Object.create(null);
+    let overall = 0;
+    // Reset all categories.
+    this.selection.category_names.forEach((_, category) => {
+      overalls[category] = 0;
+    });
+    // Only update categories that have selections.
+    Object.entries(this.selection.categories).forEach(([category, value]) => {
+      overalls[category] =
+          Object.values(value).reduce(
+              (accu, current) => {
+                  const zone_data = this.selectedData.zones.get(current);
+                  return zone_data === undefined ? accu
+                                                 : accu + zone_data.allocated;
+              }, 0) /
+          KB;
+      overall += overalls[category];
+    });
+    Object.entries(overalls).forEach(([category, category_overall]) => {
+      let percents = category_overall / overall * 100;
+      this.$(`#${category}PercentContent`).innerHTML =
+          `${percents.toFixed(1)}%`;
+      this.$('#' + category + 'PercentBackground').style.left = percents + '%';
+    });
+  }
+
+  updatePercentagesInZones() {
+    const selected_data = this.selectedData;
+    const zones_data = selected_data.zones;
+    const total_allocated = selected_data.allocated;
+    this.querySelectorAll('.zonesSelectBox  input').forEach(checkbox => {
+      const zone_name = checkbox.value;
+      const zone_data = zones_data.get(zone_name);
+      const zone_allocated = zone_data === undefined ? 0 : zone_data.allocated;
+      const percents = zone_allocated / total_allocated;
+      const percent_div = checkbox.parentNode.querySelector('.percentBackground');
+      percent_div.style.left = (percents * 100) + '%';
+      checkbox.parentNode.style.display = 'block';
+    });
+  }
+
+  selectedInCategory(category) {
+    let tmp = [];
+    this.querySelectorAll('input[name=' + category + 'Checkbox]:checked')
+        .forEach(checkbox => tmp.push(checkbox.value));
+    return tmp;
+  }
+
+  createOption(value, text) {
+    const option = document.createElement('option');
+    option.value = value;
+    option.text = text;
+    return option;
+  }
+
+  populateSelect(id, iterable, labelFn = null, autoselect = null) {
+    if (labelFn == null) labelFn = e => e;
+    let index = 0;
+    for (let [key, value] of iterable) {
+      index++;
+      const label = labelFn(key, value, index);
+      const option = this.createOption(key, label);
+      if (autoselect === key) {
+        option.selected = 'selected';
+      }
+      this.$(id).appendChild(option);
+    }
+  }
+
+  clearCategories() {
+    for (const category of CATEGORIES.keys()) {
+      let f = this.$('#' + category + 'Content');
+      while (f.firstChild) {
+        f.removeChild(f.firstChild);
+      }
+    }
+  }
+
+  populateCategories() {
+    this.clearCategories();
+    const categories = Object.create(null);
+    for (let cat of CATEGORIES.keys()) {
+      categories[cat] = [];
+    }
+
+    for (const [zone_name, zone_stats] of this.selectedIsolate.zones) {
+      const category = categoryByZoneName(zone_name);
+      categories[category].push(zone_name);
+    }
+    for (let category of Object.keys(categories)) {
+      categories[category].sort();
+      for (let zone_name of categories[category]) {
+        this.$('#' + category + 'Content')
+            .appendChild(this.createCheckBox(zone_name, category));
+      }
+    }
+  }
+
+  unselectCategory(category) {
+    this.querySelectorAll('input[name=' + category + 'Checkbox]')
+        .forEach(checkbox => checkbox.checked = false);
+    this.notifySelectionChanged();
+  }
+
+  selectCategory(category) {
+    this.querySelectorAll('input[name=' + category + 'Checkbox]')
+        .forEach(checkbox => checkbox.checked = true);
+    this.notifySelectionChanged();
+  }
+
+  createCheckBox(instance_type, category) {
+    const div = document.createElement('div');
+    div.classList.add('zonesSelectBox');
+    div.style.width = "200px";
+    const input = document.createElement('input');
+    div.appendChild(input);
+    input.type = 'checkbox';
+    input.name = category + 'Checkbox';
+    input.checked = 'checked';
+    input.id = instance_type + 'Checkbox';
+    input.instance_type = instance_type;
+    input.value = instance_type;
+    input.addEventListener('change', e => this.notifySelectionChanged(e));
+    const label = document.createElement('label');
+    div.appendChild(label);
+    label.innerText = instance_type;
+    label.htmlFor = instance_type + 'Checkbox';
+    const percentDiv = document.createElement('div');
+    percentDiv.className = 'percentBackground';
+    div.appendChild(percentDiv);
+    return div;
+  }
+});
diff --git a/src/third_party/v8/tools/zone-stats/global-timeline-template.html b/src/third_party/v8/tools/zone-stats/global-timeline-template.html
new file mode 100644
index 0000000..49e7564
--- /dev/null
+++ b/src/third_party/v8/tools/zone-stats/global-timeline-template.html
@@ -0,0 +1,16 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<style>
+#chart {
+  width: 100%;
+  height: 500px;
+}
+</style>
+<div id="container" style="display: none;">
+  <h2>Stats</h2>
+  <p>Peak allocated zone memory <span id="peak-memory-label"></span></p>
+
+  <h2>Timeline</h2>
+  <div id="chart"></div>
+</div>
diff --git a/src/third_party/v8/tools/zone-stats/global-timeline.js b/src/third_party/v8/tools/zone-stats/global-timeline.js
new file mode 100644
index 0000000..85b5d19
--- /dev/null
+++ b/src/third_party/v8/tools/zone-stats/global-timeline.js
@@ -0,0 +1,341 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+import {categoryByZoneName} from './categories.js';
+
+import {
+  VIEW_TOTALS,
+  VIEW_BY_ZONE_NAME,
+  VIEW_BY_ZONE_CATEGORY,
+
+  KIND_ALLOCATED_MEMORY,
+  KIND_USED_MEMORY,
+  KIND_FREED_MEMORY,
+} from './details-selection.js';
+
+defineCustomElement('global-timeline', (templateText) =>
+ class GlobalTimeline extends HTMLElement {
+  constructor() {
+    super();
+    const shadowRoot = this.attachShadow({mode: 'open'});
+    shadowRoot.innerHTML = templateText;
+  }
+
+  $(id) {
+    return this.shadowRoot.querySelector(id);
+  }
+
+  set data(value) {
+    this._data = value;
+    this.stateChanged();
+  }
+
+  get data() {
+    return this._data;
+  }
+
+  set selection(value) {
+    this._selection = value;
+    this.stateChanged();
+  }
+
+  get selection() {
+    return this._selection;
+  }
+
+  isValid() {
+    return this.data && this.selection;
+  }
+
+  hide() {
+    this.$('#container').style.display = 'none';
+  }
+
+  show() {
+    this.$('#container').style.display = 'block';
+  }
+
+  stateChanged() {
+    if (this.isValid()) {
+      const isolate_data = this.data[this.selection.isolate];
+      const peakAllocatedMemory = isolate_data.peakAllocatedMemory;
+      this.$('#peak-memory-label').innerText = formatBytes(peakAllocatedMemory);
+      this.drawChart();
+    } else {
+      this.hide();
+    }
+  }
+
+  getZoneLabels(zone_names) {
+    switch (this.selection.data_kind) {
+      case KIND_ALLOCATED_MEMORY:
+        return zone_names.map(name => {
+          return {label: name + " (allocated)", type: 'number'};
+        });
+
+      case KIND_USED_MEMORY:
+        return zone_names.map(name => {
+          return {label: name + " (used)", type: 'number'};
+        });
+
+        case KIND_FREED_MEMORY:
+          return zone_names.map(name => {
+            return {label: name + " (freed)", type: 'number'};
+          });
+
+        default:
+        // Don't show detailed per-zone information.
+        return [];
+    }
+  }
+
+  getTotalsData() {
+    const isolate_data = this.data[this.selection.isolate];
+    const labels = [
+      { label: "Time", type: "number" },
+      { label: "Total allocated", type: "number" },
+      { label: "Total used", type: "number" },
+      { label: "Total freed", type: "number" },
+    ];
+    const chart_data = [labels];
+
+    const timeStart = this.selection.timeStart;
+    const timeEnd = this.selection.timeEnd;
+    const filter_entries = timeStart > 0 || timeEnd > 0;
+
+    for (const [time, zone_data] of isolate_data.samples) {
+      if (filter_entries && (time < timeStart || time > timeEnd)) continue;
+      const data = [];
+      data.push(time * kMillis2Seconds);
+      data.push(zone_data.allocated / KB);
+      data.push(zone_data.used / KB);
+      data.push(zone_data.freed / KB);
+      chart_data.push(data);
+    }
+    return chart_data;
+  }
+
+  getZoneData() {
+    const isolate_data = this.data[this.selection.isolate];
+    const selected_zones = this.selection.zones;
+    const zone_names = isolate_data.sorted_zone_names.filter(
+        zone_name => selected_zones.has(zone_name));
+    const data_kind = this.selection.data_kind;
+    const show_totals = this.selection.show_totals;
+    const zones_labels = this.getZoneLabels(zone_names);
+
+    const totals_labels = show_totals
+        ? [
+            { label: "Total allocated", type: "number" },
+            { label: "Total used", type: "number" },
+            { label: "Total freed", type: "number" },
+          ]
+        : [];
+
+    const labels = [
+      { label: "Time", type: "number" },
+      ...totals_labels,
+      ...zones_labels,
+    ];
+    const chart_data = [labels];
+
+    const timeStart = this.selection.timeStart;
+    const timeEnd = this.selection.timeEnd;
+    const filter_entries = timeStart > 0 || timeEnd > 0;
+
+    for (const [time, zone_data] of isolate_data.samples) {
+      if (filter_entries && (time < timeStart || time > timeEnd)) continue;
+      const active_zone_stats = Object.create(null);
+      if (zone_data.zones !== undefined) {
+        for (const [zone_name, zone_stats] of zone_data.zones) {
+          if (!selected_zones.has(zone_name)) continue;  // Not selected, skip.
+
+          const current_stats = active_zone_stats[zone_name];
+          if (current_stats === undefined) {
+            active_zone_stats[zone_name] =
+                { allocated: zone_stats.allocated,
+                  used: zone_stats.used,
+                  freed: zone_stats.freed,
+                };
+          } else {
+            // We've got two zones with the same name.
+            console.log("=== Duplicate zone names: " + zone_name);
+            // Sum stats.
+            current_stats.allocated += zone_stats.allocated;
+            current_stats.used += zone_stats.used;
+            current_stats.freed += zone_stats.freed;
+          }
+        }
+      }
+
+      const data = [];
+      data.push(time * kMillis2Seconds);
+      if (show_totals) {
+        data.push(zone_data.allocated / KB);
+        data.push(zone_data.used / KB);
+        data.push(zone_data.freed / KB);
+      }
+
+      zone_names.forEach(zone => {
+        const sample = active_zone_stats[zone];
+        let value = null;
+        if (sample !== undefined) {
+          if (data_kind == KIND_ALLOCATED_MEMORY) {
+            value = sample.allocated / KB;
+          } else if (data_kind == KIND_FREED_MEMORY) {
+            value = sample.freed / KB;
+          } else {
+            // KIND_USED_MEMORY
+            value = sample.used / KB;
+          }
+        }
+        data.push(value);
+      });
+      chart_data.push(data);
+    }
+    return chart_data;
+  }
+
+  getCategoryData() {
+    const isolate_data = this.data[this.selection.isolate];
+    const categories = Object.keys(this.selection.categories);
+    const categories_names =
+        categories.map(k => this.selection.category_names.get(k));
+    const selected_zones = this.selection.zones;
+    const data_kind = this.selection.data_kind;
+    const show_totals = this.selection.show_totals;
+
+    const categories_labels = this.getZoneLabels(categories_names);
+
+    const totals_labels = show_totals
+        ? [
+            { label: "Total allocated", type: "number" },
+            { label: "Total used", type: "number" },
+            { label: "Total freed", type: "number" },
+          ]
+        : [];
+
+    const labels = [
+      { label: "Time", type: "number" },
+      ...totals_labels,
+      ...categories_labels,
+    ];
+    const chart_data = [labels];
+
+    const timeStart = this.selection.timeStart;
+    const timeEnd = this.selection.timeEnd;
+    const filter_entries = timeStart > 0 || timeEnd > 0;
+
+    for (const [time, zone_data] of isolate_data.samples) {
+      if (filter_entries && (time < timeStart || time > timeEnd)) continue;
+      const active_category_stats = Object.create(null);
+      if (zone_data.zones !== undefined) {
+        for (const [zone_name, zone_stats] of zone_data.zones) {
+          const category = selected_zones.get(zone_name);
+          if (category === undefined) continue;  // Zone was not selected.
+
+          const current_stats = active_category_stats[category];
+          if (current_stats === undefined) {
+            active_category_stats[category] =
+                { allocated: zone_stats.allocated,
+                  used: zone_stats.used,
+                  freed: zone_stats.freed,
+                };
+          } else {
+            // Sum stats.
+            current_stats.allocated += zone_stats.allocated;
+            current_stats.used += zone_stats.used;
+            current_stats.freed += zone_stats.freed;
+          }
+        }
+      }
+
+      const data = [];
+      data.push(time * kMillis2Seconds);
+      if (show_totals) {
+        data.push(zone_data.allocated / KB);
+        data.push(zone_data.used / KB);
+        data.push(zone_data.freed / KB);
+      }
+
+      categories.forEach(category => {
+        const sample = active_category_stats[category];
+        let value = null;
+        if (sample !== undefined) {
+          if (data_kind == KIND_ALLOCATED_MEMORY) {
+            value = sample.allocated / KB;
+          } else if (data_kind == KIND_FREED_MEMORY) {
+            value = sample.freed / KB;
+          } else {
+            // KIND_USED_MEMORY
+            value = sample.used / KB;
+          }
+        }
+        data.push(value);
+      });
+      chart_data.push(data);
+    }
+    return chart_data;
+  }
+
+  getChartData() {
+    switch (this.selection.data_view) {
+      case VIEW_BY_ZONE_NAME:
+        return this.getZoneData();
+      case VIEW_BY_ZONE_CATEGORY:
+        return this.getCategoryData();
+      case VIEW_TOTALS:
+      default:
+        return this.getTotalsData();
+      }
+  }
+
+  getChartOptions() {
+    const options = {
+      isStacked: true,
+      interpolateNulls: true,
+      hAxis: {
+        format: '###.##s',
+        title: 'Time [s]',
+      },
+      vAxis: {
+        format: '#,###KB',
+        title: 'Memory consumption [KBytes]'
+      },
+      chartArea: {left:100, width: '85%', height: '70%'},
+      legend: {position: 'top', maxLines: '1'},
+      pointsVisible: true,
+      pointSize: 3,
+      explorer: {},
+    };
+
+    // Overlay total allocated/used points on top of the graph.
+    const series = {}
+    if (this.selection.data_view == VIEW_TOTALS) {
+      series[0] = {type: 'line', color: "red"};
+      series[1] = {type: 'line', color: "blue"};
+      series[2] = {type: 'line', color: "orange"};
+    } else if (this.selection.show_totals) {
+      series[0] = {type: 'line', color: "red", lineDashStyle: [13, 13]};
+      series[1] = {type: 'line', color: "blue", lineDashStyle: [13, 13]};
+      series[2] = {type: 'line', color: "orange", lineDashStyle: [13, 13]};
+    }
+    return Object.assign(options, {series: series});
+  }
+
+  drawChart() {
+    console.assert(this.data, 'invalid data');
+    console.assert(this.selection, 'invalid selection');
+
+    const chart_data = this.getChartData();
+
+    const data = google.visualization.arrayToDataTable(chart_data);
+    const options = this.getChartOptions();
+    const chart = new google.visualization.AreaChart(this.$('#chart'));
+    this.show();
+    chart.draw(data, google.charts.Line.convertOptions(options));
+  }
+});
diff --git a/src/third_party/v8/tools/zone-stats/helper.js b/src/third_party/v8/tools/zone-stats/helper.js
new file mode 100644
index 0000000..a0d0485
--- /dev/null
+++ b/src/third_party/v8/tools/zone-stats/helper.js
@@ -0,0 +1,30 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const KB = 1024;
+const MB = KB * KB;
+const GB = MB * KB;
+const kMillis2Seconds = 1 / 1000;
+
+function formatBytes(bytes) {
+  const units = [' B', ' KB', ' MB', ' GB'];
+  const divisor = 1024;
+  let index = 0;
+  while (index < units.length && bytes >= divisor) {
+    index++;
+    bytes /= divisor;
+  }
+  return bytes.toFixed(2) + units[index];
+}
+
+function formatSeconds(millis) {
+  return (millis * kMillis2Seconds).toFixed(2) + 's';
+}
+
+function defineCustomElement(name, generator) {
+  let htmlTemplatePath = name + '-template.html';
+  fetch(htmlTemplatePath)
+    .then(stream => stream.text())
+    .then(templateText => customElements.define(name, generator(templateText)));
+}
diff --git a/src/third_party/v8/tools/zone-stats/index.html b/src/third_party/v8/tools/zone-stats/index.html
new file mode 100644
index 0000000..a7dfa2d
--- /dev/null
+++ b/src/third_party/v8/tools/zone-stats/index.html
@@ -0,0 +1,93 @@
+<!DOCTYPE html>
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+
+<html lang="en">
+
+<head>
+  <meta charset="UTF-8">
+  <title>V8 Zone Statistics</title>
+  <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet'>
+  <script
+          src="https://www.gstatic.com/charts/loader.js"></script>
+  <script
+          src="https://cdnjs.cloudflare.com/ajax/libs/pako/1.0.6/pako_inflate.js"
+          integrity1="sha256-N1z6ddQzX83fjw8v7uSNe7/MgOmMKdwFUv1+AJMDqNM="
+          crossorigin="anonymous"></script>
+
+  <script src="https://cdnjs.cloudflare.com/ajax/libs/oboe.js/2.1.5/oboe-browser.js"
+          crossorigin="anonymous"></script>
+  <script src="helper.js"></script>
+
+  <script type="module" src="details-selection.js"></script>
+  <script type="module" src="global-timeline.js"></script>
+  <script type="module" src="trace-file-reader.js"></script>
+
+  <style>
+body {
+  font-family: 'Roboto', sans-serif;
+  margin-left: 5%;
+  margin-right: 5%;
+}
+
+  </style>
+  <script>
+'use strict';
+
+google.charts.load('current', {'packages':['line', 'corechart', 'bar']});
+
+function $(id) { return document.querySelector(id); }
+
+function removeAllChildren(node) {
+  while (node.firstChild) {
+    node.removeChild(node.firstChild);
+  }
+}
+
+let state = Object.create(null);
+
+function globalDataChanged(e) {
+  state.data = e.detail;
+  // Emit one entry with the whole model for debugging purposes.
+  console.log(state.data);
+  state.selection = null;
+  $('#global-timeline').selection = state.selection;
+  $('#global-timeline').data = state.data;
+  $('#details-selection').data = state.data;
+}
+
+function globalSelectionChangedA(e) {
+  state.selection = e.detail;
+  console.log(state.selection);
+  $('#global-timeline').selection = state.selection;
+}
+
+  </script>
+</head>
+
+<body>
+  <h1>V8 Zone memory usage statistics</h1>
+  <trace-file-reader onchange="globalDataChanged(event)"></trace-file-reader>
+
+  <details-selection id="details-selection" onchange="globalSelectionChangedA(event)"></details-selection>
+  <global-timeline id="global-timeline"></global-timeline>
+
+  <p>Visualize zone usage profile and statistics that have been gathered using</p>
+  <ul>
+    <li><code>--trace-zone-stats</code> on V8</li>
+    <li>
+      <a
+        href="https://www.chromium.org/developers/how-tos/trace-event-profiling-tool">Chrome's
+        tracing infrastructure</a> collecting data for the category
+      <code>v8.zone_stats</code>.
+    </li>
+  </ul>
+  <p>
+    Note that the visualizer needs to run on a web server due to HTML imports
+    requiring <a
+         href="https://en.wikipedia.org/wiki/Cross-origin_resource_sharing">CORS</a>.
+  </p>
+</body>
+
+</html>
diff --git a/src/third_party/v8/tools/zone-stats/model.js b/src/third_party/v8/tools/zone-stats/model.js
new file mode 100644
index 0000000..80f4523
--- /dev/null
+++ b/src/third_party/v8/tools/zone-stats/model.js
@@ -0,0 +1,92 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+export class Isolate {
+  constructor(address) {
+    this.address = address;
+    this.start = null;
+    this.end = null;
+    this.peakUsageTime = null;
+    // Maps zone name to per-zone statistics.
+    this.zones = new Map();
+    // Zone names sorted by memory usage (from low to high).
+    this.sorted_zone_names = [];
+    // Maps time to total and per-zone memory usages.
+    this.samples = new Map();
+
+    this.peakAllocatedMemory = 0;
+
+    // Maps zone name to their max memory consumption.
+    this.zonePeakMemory = Object.create(null);
+    // Peak memory consumed by a single zone.
+    this.singleZonePeakMemory = 0;
+  }
+
+  finalize() {
+    this.samples.forEach(sample => this.finalizeSample(sample));
+    this.start = Math.floor(this.start);
+    this.end = Math.ceil(this.end);
+    this.sortZoneNamesByPeakMemory();
+  }
+
+  getLabel() {
+    let label = `${this.address}: `;
+    label += ` peak=${formatBytes(this.peakAllocatedMemory)}`;
+    label += ` time=[${this.start}, ${this.end}] ms`;
+    return label;
+  }
+
+  finalizeSample(sample) {
+    const time = sample.time;
+    if (this.start == null) {
+      this.start = time;
+      this.end = time;
+    } else {
+      this.end = Math.max(this.end, time);
+    }
+
+    const allocated = sample.allocated;
+    if (allocated > this.peakAllocatedMemory) {
+      this.peakUsageTime = time;
+      this.peakAllocatedMemory = allocated;
+    }
+
+    const sample_zones = sample.zones;
+    if (sample_zones !== undefined) {
+      sample.zones.forEach((zone_sample, zone_name) => {
+        let zone_stats = this.zones.get(zone_name);
+        if (zone_stats === undefined) {
+          zone_stats = {max_allocated: 0, max_used: 0};
+          this.zones.set(zone_name, zone_stats);
+        }
+
+        zone_stats.max_allocated =
+            Math.max(zone_stats.max_allocated, zone_sample.allocated);
+        zone_stats.max_used = Math.max(zone_stats.max_used, zone_sample.used);
+      });
+    }
+  }
+
+  sortZoneNamesByPeakMemory() {
+    let entries = [...this.zones.keys()];
+    entries.sort((a, b) =>
+      this.zones.get(a).max_allocated - this.zones.get(b).max_allocated
+    );
+    this.sorted_zone_names = entries;
+
+    let max = 0;
+    for (let [key, value] of entries) {
+      this.zonePeakMemory[key] = value;
+      max = Math.max(max, value);
+    }
+    this.singleZonePeakMemory = max;
+  }
+
+  getInstanceTypePeakMemory(type) {
+    if (!(type in this.zonePeakMemory)) return 0;
+    return this.zonePeakMemory[type];
+  }
+}
diff --git a/src/third_party/v8/tools/zone-stats/trace-file-reader-template.html b/src/third_party/v8/tools/zone-stats/trace-file-reader-template.html
new file mode 100644
index 0000000..ede7ee9
--- /dev/null
+++ b/src/third_party/v8/tools/zone-stats/trace-file-reader-template.html
@@ -0,0 +1,81 @@
+<!-- Copyright 2020 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<style>
+#fileReader {
+  width: 100%;
+  height: 100px;
+  line-height: 100px;
+  text-align: center;
+  border: solid 1px #000000;
+  border-radius: 5px;
+  cursor: pointer;
+  transition: all 0.5s ease-in-out;
+}
+
+#fileReader.done {
+    height: 20px;
+    line-height: 20px;
+}
+
+#fileReader:hover {
+  background-color: #e0edfe ;
+}
+
+.loading #fileReader {
+  cursor: wait;
+}
+
+#fileReader > input {
+  display: none;
+}
+
+
+#loader {
+  display: none;
+}
+
+.loading #loader {
+  display: block;
+  position: fixed;
+  top: 0px;
+  left: 0px;
+  width: 100%;
+  height: 100%;
+  background-color: rgba(255, 255, 255, 0.5);
+}
+
+#spinner {
+  position: absolute;
+  width: 100px;
+  height: 100px;
+  top: 40%;
+  left: 50%;
+  margin-left: -50px;
+  border: 30px solid #000;
+  border-top: 30px solid #36E;
+  border-radius: 50%;
+  animation: spin 1s ease-in-out infinite;
+}
+
+@keyframes spin {
+ 0% {
+    transform: rotate(0deg);
+ }
+ 100% {
+    transform: rotate(360deg);
+ }
+}
+</style>
+
+<section id="fileReaderSection">
+  <div id="fileReader" tabindex=1 >
+    <span id="label">
+      Drag and drop a trace file into this area, or click to choose from disk.
+     </span>
+    <input id="file" type="file" name="file" />
+  </div>
+  <div id="loader">
+    <div id="spinner"></div>
+  </div>
+</section>
diff --git a/src/third_party/v8/tools/zone-stats/trace-file-reader.js b/src/third_party/v8/tools/zone-stats/trace-file-reader.js
new file mode 100644
index 0000000..7b7cb6c
--- /dev/null
+++ b/src/third_party/v8/tools/zone-stats/trace-file-reader.js
@@ -0,0 +1,298 @@
+// Copyright 2020 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+import {Isolate} from './model.js';
+
+defineCustomElement('trace-file-reader', (templateText) =>
+ class TraceFileReader extends HTMLElement {
+  constructor() {
+    super();
+    const shadowRoot = this.attachShadow({mode: 'open'});
+    shadowRoot.innerHTML = templateText;
+    this.addEventListener('click', e => this.handleClick(e));
+    this.addEventListener('dragover', e => this.handleDragOver(e));
+    this.addEventListener('drop', e => this.handleChange(e));
+    this.$('#file').addEventListener('change', e => this.handleChange(e));
+    this.$('#fileReader').addEventListener('keydown', e => this.handleKeyEvent(e));
+  }
+
+  $(id) {
+    return this.shadowRoot.querySelector(id);
+  }
+
+  get section() {
+    return this.$('#fileReaderSection');
+  }
+
+  updateLabel(text) {
+    this.$('#label').innerText = text;
+  }
+
+  handleKeyEvent(event) {
+    if (event.key == "Enter") this.handleClick(event);
+  }
+
+  handleClick(event) {
+    this.$('#file').click();
+  }
+
+  handleChange(event) {
+    // Used for drop and file change.
+    event.preventDefault();
+    var host = event.dataTransfer ? event.dataTransfer : event.target;
+    this.readFile(host.files[0]);
+  }
+
+  handleDragOver(event) {
+    event.preventDefault();
+  }
+
+  connectedCallback() {
+    this.$('#fileReader').focus();
+  }
+
+  readFile(file) {
+    if (!file) {
+      this.updateLabel('Failed to load file.');
+      return;
+    }
+    this.$('#fileReader').blur();
+
+    this.section.className = 'loading';
+    const reader = new FileReader();
+
+    if (['application/gzip', 'application/x-gzip'].includes(file.type)) {
+      reader.onload = (e) => {
+        try {
+          // Decode data as strings of 64Kb chunks. Bigger chunks may cause
+          // parsing failures in Oboe.js.
+          const chunkedInflate = new pako.Inflate(
+            {to: 'string', chunkSize: 65536}
+          );
+          let processingState = undefined;
+          chunkedInflate.onData = (chunk) => {
+            if (processingState === undefined) {
+              processingState = this.startProcessing(file, chunk);
+            } else {
+              processingState.processChunk(chunk);
+            }
+          };
+          chunkedInflate.onEnd = () => {
+            if (processingState !== undefined) {
+              const result_data = processingState.endProcessing();
+              this.processLoadedData(file, result_data);
+            }
+          };
+          console.log("======");
+          const textResult = chunkedInflate.push(e.target.result);
+
+          this.section.className = 'success';
+          this.$('#fileReader').classList.add('done');
+        } catch (err) {
+          console.error(err);
+          this.section.className = 'failure';
+        }
+      };
+      // Delay the loading a bit to allow for CSS animations to happen.
+      setTimeout(() => reader.readAsArrayBuffer(file), 0);
+    } else {
+      reader.onload = (e) => {
+        try {
+          // Process the whole file in at once.
+          const processingState = this.startProcessing(file, e.target.result);
+          const dataModel = processingState.endProcessing();
+          this.processLoadedData(file, dataModel);
+
+          this.section.className = 'success';
+          this.$('#fileReader').classList.add('done');
+        } catch (err) {
+          console.error(err);
+          this.section.className = 'failure';
+        }
+      };
+      // Delay the loading a bit to allow for CSS animations to happen.
+      setTimeout(() => reader.readAsText(file), 0);
+    }
+  }
+
+  processLoadedData(file, dataModel) {
+    console.log("Trace file parsed successfully.");
+    this.extendAndSanitizeModel(dataModel);
+    this.updateLabel('Finished loading \'' + file.name + '\'.');
+    this.dispatchEvent(new CustomEvent(
+        'change', {bubbles: true, composed: true, detail: dataModel}));
+  }
+
+  createOrUpdateEntryIfNeeded(data, entry) {
+    console.assert(entry.isolate, 'entry should have an isolate');
+    if (!(entry.isolate in data)) {
+      data[entry.isolate] = new Isolate(entry.isolate);
+    }
+  }
+
+  extendAndSanitizeModel(data) {
+    const checkNonNegativeProperty = (obj, property) => {
+      console.assert(obj[property] >= 0, 'negative property', obj, property);
+    };
+
+    Object.values(data).forEach(isolate => isolate.finalize());
+  }
+
+  processOneZoneStatsEntry(data, entry_stats) {
+    this.createOrUpdateEntryIfNeeded(data, entry_stats);
+    const isolate_data = data[entry_stats.isolate];
+    let zones = undefined;
+    const entry_zones = entry_stats.zones;
+    if (entry_zones !== undefined) {
+      zones = new Map();
+      entry_zones.forEach(zone => {
+        // There might be multiple occurrences of the same zone in the set,
+        // combine numbers in this case.
+        const existing_zone_stats = zones.get(zone.name);
+        if (existing_zone_stats !== undefined) {
+          existing_zone_stats.allocated += zone.allocated;
+          existing_zone_stats.used += zone.used;
+          existing_zone_stats.freed += zone.freed;
+        } else {
+          zones.set(zone.name, { allocated: zone.allocated,
+                                 used: zone.used,
+                                 freed: zone.freed });
+        }
+      });
+    }
+    const time = entry_stats.time;
+    const sample = {
+      time: time,
+      allocated: entry_stats.allocated,
+      used: entry_stats.used,
+      freed: entry_stats.freed,
+      zones: zones
+    };
+    isolate_data.samples.set(time, sample);
+  }
+
+  startProcessing(file, chunk) {
+    const isV8TraceFile = chunk.includes('v8-zone-trace');
+    const processingState =
+        isV8TraceFile ? this.startProcessingAsV8TraceFile(file)
+                      : this.startProcessingAsChromeTraceFile(file);
+
+    processingState.processChunk(chunk);
+    return processingState;
+  }
+
+  startProcessingAsChromeTraceFile(file) {
+    console.log(`Processing log as chrome trace file.`);
+    const data = Object.create(null);  // Final data container.
+    const parseOneZoneEvent = (actual_data) => {
+      if ('stats' in actual_data) {
+        try {
+          const entry_stats = JSON.parse(actual_data.stats);
+          this.processOneZoneStatsEntry(data, entry_stats);
+        } catch (e) {
+          console.error('Unable to parse data set entry', e);
+        }
+      }
+    };
+    const zone_events_filter = (event) => {
+      if (event.name == 'V8.Zone_Stats') {
+        parseOneZoneEvent(event.args);
+      }
+      return oboe.drop;
+    };
+
+    const oboe_stream = oboe();
+    // Trace files support two formats.
+    oboe_stream
+        // 1) {traceEvents: [ data ]}
+        .node('traceEvents.*', zone_events_filter)
+        // 2) [ data ]
+        .node('!.*', zone_events_filter)
+        .fail((errorReport) => {
+          throw new Error("Trace data parse failed: " + errorReport.thrown);
+        });
+
+    let failed = false;
+
+    const processingState = {
+      file: file,
+
+      processChunk(chunk) {
+        if (failed) return false;
+        try {
+          oboe_stream.emit('data', chunk);
+          return true;
+        } catch (e) {
+          console.error('Unable to parse chrome trace file.', e);
+          failed = true;
+          return false;
+        }
+      },
+
+      endProcessing() {
+        if (failed) return null;
+        oboe_stream.emit('end');
+        return data;
+      },
+    };
+    return processingState;
+  }
+
+  startProcessingAsV8TraceFile(file) {
+    console.log('Processing log as V8 trace file.');
+    const data = Object.create(null);  // Final data container.
+
+    const processOneLine = (line) => {
+      try {
+        // Strip away a potentially present adb logcat prefix.
+        line = line.replace(/^I\/v8\s*\(\d+\):\s+/g, '');
+
+        const entry = JSON.parse(line);
+        if (entry === null || entry.type === undefined) return;
+        if ((entry.type === 'v8-zone-trace') && ('stats' in entry)) {
+          const entry_stats = entry.stats;
+          this.processOneZoneStatsEntry(data, entry_stats);
+        } else {
+          console.log('Unknown entry type: ' + entry.type);
+        }
+      } catch (e) {
+        console.log('Unable to parse line: \'' + line + '\' (' + e + ')');
+      }
+    };
+
+    let prev_chunk_leftover = "";
+
+    const processingState = {
+      file: file,
+
+      processChunk(chunk) {
+        const contents = chunk.split('\n');
+        const last_line = contents.pop();
+        const linesCount = contents.length;
+        if (linesCount == 0) {
+          // There was only one line in the chunk, it may still be unfinished.
+          prev_chunk_leftover += last_line;
+        } else {
+          contents[0] = prev_chunk_leftover + contents[0];
+          prev_chunk_leftover = last_line;
+          for (let line of contents) {
+            processOneLine(line);
+          }
+        }
+        return true;
+      },
+
+      endProcessing() {
+        if (prev_chunk_leftover.length > 0) {
+          processOneLine(prev_chunk_leftover);
+          prev_chunk_leftover = "";
+        }
+        return data;
+      },
+    };
+    return processingState;
+  }
+});