Import Cobalt 20.master.0.234144

Includes the following patches:
  https://cobalt-review.googlesource.com/c/cobalt/+/5590
  by n1214.hwang@samsung.com

  https://cobalt-review.googlesource.com/c/cobalt/+/5530
  by errong.leng@samsung.com

  https://cobalt-review.googlesource.com/c/cobalt/+/5570
  by devin.cai@mediatek.com
diff --git a/src/v8/tools/BUILD.gn b/src/v8/tools/BUILD.gn
index 1c0864d..e6fd743 100644
--- a/src/v8/tools/BUILD.gn
+++ b/src/v8/tools/BUILD.gn
@@ -2,59 +2,65 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import("../gni/isolate.gni")
+import("//build/config/sanitizers/sanitizers.gni")
+import("../gni/v8.gni")
 
 group("gn_all") {
   testonly = true
 
-  if (v8_test_isolation_mode != "noop") {
-    deps = [
-      ":check-static-initializers_run",
-      ":jsfunfuzz_run",
-      ":run-deopt-fuzzer_run",
-      ":run-gcmole_run",
-      ":run-num-fuzzer_run",
+  data_deps = [
+    ":v8_check_static_initializers",
+    "gcmole:v8_run_gcmole",
+    "jsfunfuzz:v8_jsfunfuzz",
+  ]
+}
+
+group("v8_check_static_initializers") {
+  data_deps = [
+    "..:d8",
+  ]
+
+  data = [
+    "check-static-initializers.sh",
+  ]
+}
+
+group("v8_android_test_runner_deps") {
+  testonly = true
+
+  if (is_android && !build_with_chromium) {
+    data_deps = [
+      "//build/android:test_runner_py",
+    ]
+    data = [
+      # This is used by android.py, but not included by test_runner_py above.
+      "//third_party/catapult/devil/devil/android/perf/",
     ]
   }
 }
 
-v8_isolate_run("check-static-initializers") {
-  deps = [
-    "..:d8_run",
+group("v8_testrunner") {
+  testonly = true
+
+  data_deps = [
+    "..:v8_python_base",
+    "..:v8_dump_build_config",
+    ":v8_android_test_runner_deps",
   ]
 
-  isolate = "check-static-initializers.isolate"
-}
-
-v8_isolate_run("jsfunfuzz") {
-  deps = [
-    "..:d8_run",
+  data = [
+    # Also add the num-fuzzer wrapper script in order to be able to run the
+    # num-fuzzer on all existing isolated V8 test suites.
+    "predictable_wrapper.py",
+    "run-num-fuzzer.py",
+    "run-tests.py",
+    "testrunner/",
   ]
 
-  isolate = "jsfunfuzz/jsfunfuzz.isolate"
-}
-
-v8_isolate_run("run-deopt-fuzzer") {
-  deps = [
-    "..:d8_run",
-  ]
-
-  isolate = "run-deopt-fuzzer.isolate"
-}
-
-v8_isolate_run("run-gcmole") {
-  deps = [
-    "..:d8_run",
-  ]
-
-  isolate = "gcmole/run-gcmole.isolate"
-}
-
-# TODO(machenbach): Add tests as dependencies.
-v8_isolate_run("run-num-fuzzer") {
-  deps = [
-    "..:d8_run",
-  ]
-
-  isolate = "run-num-fuzzer.isolate"
+  if (v8_code_coverage && sanitizer_coverage_flags == "bb,trace-pc-guard") {
+    data += [
+      "sanitizers/sancov_merger.py",
+      "../third_party/llvm/projects/compiler-rt/lib/sanitizer_common/scripts/sancov.py",
+    ]
+  }
 }
diff --git a/src/v8/tools/Makefile.tags b/src/v8/tools/Makefile.tags
new file mode 100644
index 0000000..372824d
--- /dev/null
+++ b/src/v8/tools/Makefile.tags
@@ -0,0 +1,30 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+# Variable default definitions. Override them by exporting them in your shell.
+V8_DIR ?= $(realpath $(dir $(lastword $(MAKEFILE_LIST)))/..)
+
+# Support for the GNU GLOBAL Source Code Tag System.
+$(V8_DIR)/gtags.files: $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+	@(cd $(V8_DIR) && find include src test -name '*.h' -o -name '*.cc' -o -name '*.c') > $@
+
+# We need to manually set the stack limit here, to work around bugs in
+# gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems.
+# Using $(wildcard ...) gracefully ignores non-existing files, so that stale
+# gtags.files after switching branches don't cause recipe failures.
+$(V8_DIR)/GPATH $(V8_DIR)/GRTAGS $(V8_DIR)/GSYMS $(V8_DIR)/GTAGS: $(V8_DIR)/gtags.files $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+	@cd $(V8_DIR) && bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<'
+
+$(V8_DIR)/tags: $(V8_DIR)/gtags.files $(wildcard $(addprefix $(V8_DIR)/,$(shell cat $(V8_DIR)/gtags.files 2> /dev/null)))
+	@(ctags --version | grep 'Exuberant Ctags' >/dev/null) || \
+	  (echo "Please install Exuberant Ctags (check 'ctags --version')" >&2; false)
+	@cd $(V8_DIR) && ctags --fields=+l -L gtags.files
+
+tags: $(V8_DIR)/tags
+
+tags.clean:
+	@rm -f $(addprefix $(V8_DIR), gtags.files GPATH GRTAGS GSYMS GTAGS tags)
+
+clean: tags.clean
diff --git a/src/v8/tools/__init__.py b/src/v8/tools/__init__.py
new file mode 100644
index 0000000..3841a86
--- /dev/null
+++ b/src/v8/tools/__init__.py
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/src/v8/tools/adb-d8.py b/src/v8/tools/adb-d8.py
index 4167146..4d4390f 100755
--- a/src/v8/tools/adb-d8.py
+++ b/src/v8/tools/adb-d8.py
@@ -158,7 +158,7 @@
   # command.
   adb = os.path.join(
     script_dir,
-    "../third_party/android_tools/sdk/platform-tools/adb"
+    "../third_party/android_sdk/public/platform-tools/adb"
   )
 
   # Read off any command line flags before build_dir (or --). Do this
diff --git a/src/v8/tools/android-run.py b/src/v8/tools/android-run.py
index 4765f86..66d333a 100755
--- a/src/v8/tools/android-run.py
+++ b/src/v8/tools/android-run.py
@@ -35,6 +35,9 @@
 # and output special error string in case of non-zero exit code.
 # Then we parse the output of 'adb shell' and look for that error string.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
 from os.path import join, dirname, abspath
 import subprocess
@@ -58,8 +61,8 @@
   exit_code = process.wait()
   os.close(fd_out)
   os.close(fd_err)
-  output = file(outname).read()
-  errors = file(errname).read()
+  output = open(outname).read()
+  errors = open(errname).read()
   os.unlink(outname)
   os.unlink(errname)
   sys.stdout.write(output)
diff --git a/src/v8/tools/avg.py b/src/v8/tools/avg.py
new file mode 100755
index 0000000..5741acd
--- /dev/null
+++ b/src/v8/tools/avg.py
@@ -0,0 +1,248 @@
+#!/usr/bin/env python
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can
+# be found in the LICENSE file.
+"""
+This script averages numbers output from another script. It is useful
+to average over a benchmark that outputs one or more results of the form
+  <key> <number> <unit>
+key and unit are optional, but only one number per line is processed.
+
+For example, if
+  $ bch --allow-natives-syntax toNumber.js
+outputs
+  Number('undefined'):  155763
+  (+'undefined'):  193050 Kps
+  23736 Kps
+then
+  $ avg.py 10 bch --allow-natives-syntax toNumber.js
+will output
+  [10/10] (+'undefined')         : avg 192,240.40 stddev   6,486.24 (185,529.00 - 206,186.00)
+  [10/10] Number('undefined')    : avg 156,990.10 stddev  16,327.56 (144,718.00 - 202,840.00) Kps
+  [10/10] [default]              : avg  22,885.80 stddev   1,941.80 ( 17,584.00 -  24,266.00) Kps
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import math
+import re
+import signal
+import subprocess
+import sys
+
+PARSER = argparse.ArgumentParser(
+    description="A script that averages numbers from another script's output",
+    epilog="Example:\n\tavg.py 10 bash -c \"echo A: 100; echo B 120; sleep .1\""
+)
+PARSER.add_argument(
+    'repetitions',
+    type=int,
+    help="number of times the command should be repeated")
+PARSER.add_argument(
+    'command',
+    nargs=argparse.REMAINDER,
+    help="command to run (no quotes needed)")
+PARSER.add_argument(
+    '--echo',
+    '-e',
+    action='store_true',
+    default=False,
+    help="set this flag to echo the command's output")
+
+ARGS = vars(PARSER.parse_args())
+
+if not ARGS['command']:
+  print("No command provided.")
+  exit(1)
+
+
+class FieldWidth:
+
+  def __init__(self, points=0, key=0, average=0, stddev=0, min_width=0, max_width=0):
+    self.widths = dict(points=points, key=key, average=average, stddev=stddev,
+                       min=min_width, max=max_width)
+
+  def max_widths(self, other):
+    self.widths = {k: max(v, other.widths[k]) for k, v in self.widths.items()}
+
+  def __getattr__(self, key):
+    return self.widths[key]
+
+
+def fmtS(string, width=0):
+  return "{0:<{1}}".format(string, width)
+
+
+def fmtN(num, width=0):
+  return "{0:>{1},.2f}".format(num, width)
+
+
+def fmt(num):
+  return "{0:>,.2f}".format(num)
+
+
+def format_line(points, key, average, stddev, min_value, max_value,
+                unit_string, widths):
+  return "{:>{}};  {:<{}};  {:>{}};  {:>{}};  {:>{}};  {:>{}};  {}".format(
+      points, widths.points,
+      key, widths.key,
+      average, widths.average,
+      stddev, widths.stddev,
+      min_value, widths.min,
+      max_value, widths.max,
+      unit_string)
+
+
+def fmt_reps(msrmnt):
+  rep_string = str(ARGS['repetitions'])
+  return "[{0:>{1}}/{2}]".format(msrmnt.size(), len(rep_string), rep_string)
+
+
+class Measurement:
+
+  def __init__(self, key, unit):
+    self.key = key
+    self.unit = unit
+    self.values = []
+    self.average = 0
+    self.count = 0
+    self.M2 = 0
+    self.min = float("inf")
+    self.max = -float("inf")
+
+  def addValue(self, value):
+    try:
+      num_value = float(value)
+      self.values.append(num_value)
+      self.min = min(self.min, num_value)
+      self.max = max(self.max, num_value)
+      self.count = self.count + 1
+      delta = num_value - self.average
+      self.average = self.average + delta / self.count
+      delta2 = num_value - self.average
+      self.M2 = self.M2 + delta * delta2
+    except ValueError:
+      print("Ignoring non-numeric value", value)
+
+  def status(self, widths):
+    return "{} {}: avg {} stddev {} ({} - {}) {}".format(
+        fmt_reps(self),
+        fmtS(self.key, widths.key), fmtN(self.average, widths.average),
+        fmtN(self.stddev(), widths.stddev), fmtN(self.min, widths.min),
+        fmtN(self.max, widths.max), fmtS(self.unit_string()))
+
+  def result(self, widths):
+    return format_line(self.size(), self.key, fmt(self.average),
+                       fmt(self.stddev()), fmt(self.min),
+                       fmt(self.max), self.unit_string(),
+                       widths)
+
+  def unit_string(self):
+    if not self.unit:
+      return ""
+    return self.unit
+
+  def variance(self):
+    if self.count < 2:
+      return float('NaN')
+    return self.M2 / (self.count - 1)
+
+  def stddev(self):
+    return math.sqrt(self.variance())
+
+  def size(self):
+    return len(self.values)
+
+  def widths(self):
+    return FieldWidth(
+        points=len("{}".format(self.size())) + 2,
+        key=len(self.key),
+        average=len(fmt(self.average)),
+        stddev=len(fmt(self.stddev())),
+        min_width=len(fmt(self.min)),
+        max_width=len(fmt(self.max)))
+
+
+def result_header(widths):
+  return format_line("#/{}".format(ARGS['repetitions']),
+                     "id", "avg", "stddev", "min", "max", "unit", widths)
+
+
+class Measurements:
+
+  def __init__(self):
+    self.all = {}
+    self.default_key = '[default]'
+    self.max_widths = FieldWidth(
+        points=len("{}".format(ARGS['repetitions'])) + 2,
+        key=len("id"),
+        average=len("avg"),
+        stddev=len("stddev"),
+        min_width=len("min"),
+        max_width=len("max"))
+    self.last_status_len = 0
+
+  def record(self, key, value, unit):
+    if not key:
+      key = self.default_key
+    if key not in self.all:
+      self.all[key] = Measurement(key, unit)
+    self.all[key].addValue(value)
+    self.max_widths.max_widths(self.all[key].widths())
+
+  def any(self):
+    if self.all:
+      return next(iter(self.all.values()))
+    return None
+
+  def print_results(self):
+    print("{:<{}}".format("", self.last_status_len), end="\r")
+    print(result_header(self.max_widths), sep=" ")
+    for key in sorted(self.all):
+      print(self.all[key].result(self.max_widths), sep=" ")
+
+  def print_status(self):
+    status = "No results found. Check format?"
+    measurement = MEASUREMENTS.any()
+    if measurement:
+      status = measurement.status(MEASUREMENTS.max_widths)
+    print("{:<{}}".format(status, self.last_status_len), end="\r")
+    self.last_status_len = len(status)
+
+
+MEASUREMENTS = Measurements()
+
+
+def signal_handler(signum, frame):
+  print("", end="\r")
+  MEASUREMENTS.print_results()
+  sys.exit(0)
+
+
+signal.signal(signal.SIGINT, signal_handler)
+
+SCORE_REGEX = (r'\A((console.timeEnd: )?'
+               r'(?P<key>[^\s:,]+)[,:]?)?'
+               r'(^\s*|\s+)'
+               r'(?P<value>[0-9]+(.[0-9]+)?)'
+               r'\ ?(?P<unit>[^\d\W]\w*)?[.\s]*\Z')
+
+for x in range(0, ARGS['repetitions']):
+  proc = subprocess.Popen(ARGS['command'], stdout=subprocess.PIPE)
+  for line in proc.stdout:
+    if ARGS['echo']:
+      print(line.decode(), end="")
+    for m in re.finditer(SCORE_REGEX, line.decode()):
+      MEASUREMENTS.record(m.group('key'), m.group('value'), m.group('unit'))
+  proc.wait()
+  if proc.returncode != 0:
+    print("Child exited with status %d" % proc.returncode)
+    break
+
+  MEASUREMENTS.print_status()
+
+# Print final results
+MEASUREMENTS.print_results()
diff --git a/src/v8/tools/bash-completion.sh b/src/v8/tools/bash-completion.sh
index 5b9f7f5..27e73b7 100755
--- a/src/v8/tools/bash-completion.sh
+++ b/src/v8/tools/bash-completion.sh
@@ -37,11 +37,11 @@
 _v8_flag() {
   local cur defines targets
   cur="${COMP_WORDS[COMP_CWORD]}"
-  defines=$(cat $v8_source/src/flag-definitions.h \
+  defines=$(cat $v8_source/src/flags/flag-definitions.h \
     | grep "^DEFINE" \
     | grep -v "DEFINE_IMPLICATION" \
     | sed -e 's/_/-/g'; \
-    cat $v8_source/src/flag-definitions.h \
+    cat $v8_source/src/flags/flag-definitions.h \
     | grep "^  V(harmony_" \
     | sed -e 's/^  V/DEFINE-BOOL/' \
     | sed -e 's/_/-/g')
@@ -49,7 +49,7 @@
     | sed -ne 's/^DEFINE-[^(]*(\([^,]*\).*/--\1/p'; \
     echo "$defines" \
     | sed -ne 's/^DEFINE-BOOL(\([^,]*\).*/--no\1/p'; \
-    cat $v8_source/src/d8.cc \
+    cat $v8_source/src/d8/d8.cc \
     | grep "strcmp(argv\[i\]" \
     | sed -ne 's/^[^"]*"--\([^"]*\)".*/--\1/p')
   COMPREPLY=($(compgen -W "$targets" -- "$cur"))
diff --git a/src/v8/tools/bigint-tester.py b/src/v8/tools/bigint-tester.py
index 0452a0d..0940369 100755
--- a/src/v8/tools/bigint-tester.py
+++ b/src/v8/tools/bigint-tester.py
@@ -3,6 +3,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import math
 import multiprocessing
@@ -13,9 +16,9 @@
 import tempfile
 
 # Configuration.
-kChars = "0123456789abcdefghijklmnopqrstuvwxyz"
+kChars = "0123456789abcdef"
 kBase = 16
-kLineLength = 71  # A bit less than 80.
+kLineLength = 70  # A bit less than 80.
 kNumInputsGenerate = 20
 kNumInputsStress = 1000
 
@@ -30,8 +33,6 @@
 // found in the LICENSE file.
 
 // Generated by %s.
-
-// Flags: --harmony-bigint
 """ % sys.argv[0]
 
 TEST_BODY = """
@@ -46,29 +47,36 @@
 }"""
 
 def GenRandom(length, negative=kRandom):
-  if length == 0: return "0"
+  if length == 0: return "0n"
   s = []
   if negative == kYes or (negative == kRandom and (random.randint(0, 1) == 0)):
     s.append("-")  # 50% chance of negative.
+  s.append("0x")
   s.append(kChars[random.randint(1, kBase - 1)])  # No leading zero.
   for i in range(1, length):
     s.append(kChars[random.randint(0, kBase - 1)])
+  s.append("n")
   return "".join(s)
 
-def Format(x, base):
+def Parse(x):
+  assert x[-1] == 'n', x
+  return int(x[:-1], kBase)
+
+def Format(x):
   original = x
   negative = False
-  if x == 0: return "0"
+  if x == 0: return "0n"
   if x < 0:
     negative = True
     x = -x
   s = ""
   while x > 0:
-    s = kChars[x % base] + s
-    x = x / base
+    s = kChars[x % kBase] + s
+    x = x / kBase
+  s = "0x" + s + "n"
   if negative:
     s = "-" + s
-  assert int(s, base) == original
+  assert Parse(s) == original
   return s
 
 class TestGenerator(object):
@@ -102,7 +110,7 @@
       with open(path, "w") as f:
         f.write(self.EmitData(count))
         f.write(self.EmitTestBody())
-      return subprocess.call("%s --harmony-bigint %s" % (binary, path),
+      return subprocess.call("%s %s" % (binary, path),
                              shell=True)
     finally:
       os.close(fd)
@@ -120,17 +128,16 @@
   # Subclasses should not override anything below.
   def EmitOne(self):
     x_str = self.GenerateInput()
-    x_num = int(x_str, kBase)
+    x_num = Parse(x_str)
     result_num = self.GenerateResult(x_num)
-    result_str = Format(result_num, kBase)
-    return "{\n  a: \"%s\",\n  r: \"%s\"\n}" % (x_str, result_str)
+    result_str = Format(result_num)
+    return "{\n  a: %s,\n  r: %s\n}" % (x_str, result_str)
 
   def EmitTestCore(self):
     return """\
-  var a = BigInt.parseInt(d.a, %(base)d);
-  var r = %(op)sa;
-  if (d.r !== r.toString(%(base)d)) {
-    print("Input:    " + a.toString(%(base)d));
+  var r = %(op)sd.a;
+  if (d.r !== r) {
+    print("Input:    " + d.a.toString(%(base)d));
     print("Result:   " + r.toString(%(base)d));
     print("Expected: " + d.r);
     error_count++;
@@ -152,21 +159,19 @@
   # Subclasses should not override anything below.
   def EmitOne(self):
     left_str, right_str = self.GenerateInputs()
-    left_num = int(left_str, kBase)
-    right_num = int(right_str, kBase)
+    left_num = Parse(left_str)
+    right_num = Parse(right_str)
     result_num = self.GenerateResult(left_num, right_num)
-    result_str = Format(result_num, kBase)
-    return ("{\n  a: \"%s\",\n  b: \"%s\",\n  r: \"%s\"\n}" %
+    result_str = Format(result_num)
+    return ("{\n  a: %s,\n  b: %s,\n  r: %s\n}" %
             (left_str, right_str, result_str))
 
   def EmitTestCore(self):
     return """\
-  var a = BigInt.parseInt(d.a, %(base)d);
-  var b = BigInt.parseInt(d.b, %(base)d);
-  var r = a %(op)s b;
-  if (d.r !== r.toString(%(base)d)) {
-    print("Input A:  " + a.toString(%(base)d));
-    print("Input B:  " + b.toString(%(base)d));
+  var r = d.a %(op)s d.b;
+  if (d.r !== r) {
+    print("Input A:  " + d.a.toString(%(base)d));
+    print("Input B:  " + d.b.toString(%(base)d));
     print("Result:   " + r.toString(%(base)d));
     print("Expected: " + d.r);
     print("Op: %(op)s");
@@ -292,7 +297,7 @@
   return RunOne(*args)
 def RunAll(args):
   for op in args.op:
-    for r in xrange(args.runs):
+    for r in range(args.runs):
       yield (op, args.num_inputs, args.binary)
 
 def Main():
diff --git a/src/v8/tools/blink_tests/TestExpectations b/src/v8/tools/blink_tests/TestExpectations
index 3655c5c..e69de29 100644
--- a/src/v8/tools/blink_tests/TestExpectations
+++ b/src/v8/tools/blink_tests/TestExpectations
@@ -1,5 +0,0 @@
-[ Linux ] virtual/pointerevent/fast/events/mouse-cursor-style-change-iframe.html [ Skip ]
-
-# Turn off Slimming Paint tests on linux.
-[ Linux ] virtual/slimmingpaint/ [ Skip ]
-
diff --git a/src/v8/tools/callstats.html b/src/v8/tools/callstats.html
index 2618b50..1ceca83 100644
--- a/src/v8/tools/callstats.html
+++ b/src/v8/tools/callstats.html
@@ -1,3 +1,4 @@
+<!DOCTYPE html>
 <html>
 <!--
 Copyright 2016 the V8 project authors. All rights reserved.  Use of this source
@@ -5,7 +6,8 @@
 -->
 
 <head>
-  <meta charset="UTF-8">
+  <meta charset="utf-8">
+  <title>V8 Runtime Stats Komparator</title>
   <style>
     body {
       font-family: arial;
@@ -228,8 +230,8 @@
       display: none;
     }
   </style>
-  <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
-  <script type="text/javascript">
+  <script src="https://www.gstatic.com/charts/loader.js"></script>
+  <script>
     "use strict"
     google.charts.load('current', {packages: ['corechart']});
 
@@ -957,7 +959,7 @@
       }
     }
   </script>
-  <script type="text/javascript">
+  <script>
   "use strict"
     // =========================================================================
     // Helpers
@@ -1058,7 +1060,7 @@
     }
 
   </script>
-  <script type="text/javascript">
+  <script>
   "use strict"
     // =========================================================================
     // EventHandlers
@@ -1305,7 +1307,7 @@
       window.open(url,'_blank');
     }
   </script>
-  <script type="text/javascript">
+  <script>
   "use strict"
     // =========================================================================
     class Versions {
@@ -1507,6 +1509,7 @@
         this.groups = [
           this.total,
           Group.groups.get('ic').entry(),
+          Group.groups.get('optimize-background').entry(),
           Group.groups.get('optimize').entry(),
           Group.groups.get('compile-background').entry(),
           Group.groups.get('compile').entry(),
@@ -1715,14 +1718,16 @@
     }
     Group.add('total', new Group('Total', /.*Total.*/, '#BBB'));
     Group.add('ic', new Group('IC', /.*IC_.*/, "#3366CC"));
+    Group.add('optimize-background', new Group('Optimize-Background',
+        /(.*OptimizeConcurrent.*)|RecompileConcurrent.*/, "#702000"));
     Group.add('optimize', new Group('Optimize',
         /StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*/, "#DC3912"));
     Group.add('compile-background', new Group('Compile-Background',
-        /(.*CompileBackground.*)/, "#b9a720"));
+        /(.*CompileBackground.*)/, "#b08000"));
     Group.add('compile', new Group('Compile',
         /(^Compile.*)|(.*_Compile.*)/, "#FFAA00"));
     Group.add('parse-background',
-        new Group('Parse-Background', /.*ParseBackground.*/, "#af744d"));
+        new Group('Parse-Background', /.*ParseBackground.*/, "#c05000"));
     Group.add('parse', new Group('Parse', /.*Parse.*/, "#FF6600"));
     Group.add('callback', new Group('Blink C++', /.*Callback.*/, "#109618"));
     Group.add('api', new Group('API', /.*API.*/, "#990099"));
diff --git a/src/v8/tools/callstats.py b/src/v8/tools/callstats.py
index 5215d63..7c5bd4a 100755
--- a/src/v8/tools/callstats.py
+++ b/src/v8/tools/callstats.py
@@ -17,6 +17,9 @@
 For each command, you can try ./runtime-call-stats.py help command.
 '''
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import json
 import os
@@ -33,6 +36,9 @@
 from math import sqrt
 
 
+MAX_NOF_RETRIES = 5
+
+
 # Run benchmarks.
 
 def print_command(cmd_args):
@@ -43,7 +49,7 @@
     elif ' ' in arg:
       arg = "'{}'".format(arg)
     return arg
-  print " ".join(map(fix_for_printing, cmd_args))
+  print(" ".join(map(fix_for_printing, cmd_args)))
 
 
 def start_replay_server(args, sites, discard_output=True):
@@ -63,15 +69,15 @@
       "--inject_scripts=deterministic.js,{}".format(injection),
       args.replay_wpr,
   ]
-  print "=" * 80
+  print("=" * 80)
   print_command(cmd_args)
   if discard_output:
     with open(os.devnull, 'w') as null:
       server = subprocess.Popen(cmd_args, stdout=null, stderr=null)
   else:
       server = subprocess.Popen(cmd_args)
-  print "RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid)
-  print "=" * 80
+  print("RUNNING REPLAY SERVER: %s with PID=%s" % (args.replay_bin, server.pid))
+  print("=" * 80)
   return {'process': server, 'injection': injection}
 
 
@@ -82,7 +88,7 @@
 
 
 def generate_injection(f, sites, refreshes=0):
-  print >> f, """\
+  print("""\
 (function() {
   var s = window.sessionStorage.getItem("refreshCounter");
   var refreshTotal = """, refreshes, """;
@@ -124,7 +130,7 @@
   var sites =
     """, json.dumps(sites), """;
   onLoad(window.location.href);
-})();"""
+})();""", file=f)
 
 def get_chrome_flags(js_flags, user_data_dir, arg_delimiter=""):
   return [
@@ -137,6 +143,8 @@
       "--no-first-run",
       "--user-data-dir={}{}{}".format(arg_delimiter, user_data_dir,
                                       arg_delimiter),
+      "--data-path={}{}{}".format(arg_delimiter,
+          os.path.join(user_data_dir, 'content-shell-data'), arg_delimiter),
     ]
 
 def get_chrome_replay_flags(args, arg_delimiter=""):
@@ -156,9 +164,9 @@
     ]
 
 def run_site(site, domain, args, timeout=None):
-  print "="*80
-  print "RUNNING DOMAIN %s" % domain
-  print "="*80
+  print("="*80)
+  print("RUNNING DOMAIN %s" % domain)
+  print("="*80)
   result_template = "{domain}#{count}.txt" if args.repeat else "{domain}.txt"
   count = 0
   if timeout is None: timeout = args.timeout
@@ -177,7 +185,7 @@
           user_data_dir = args.user_data_dir
         else:
           user_data_dir = tempfile.mkdtemp(prefix="chr_")
-        js_flags = "--runtime-call-stats --noconcurrent-recompilation"
+        js_flags = "--runtime-call-stats"
         if args.replay_wpr: js_flags += " --allow-natives-syntax"
         if args.js_flags: js_flags += " " + args.js_flags
         chrome_flags = get_chrome_flags(js_flags, user_data_dir)
@@ -191,9 +199,9 @@
             "timeout", str(timeout),
             args.with_chrome
         ] + chrome_flags + [ site ]
-        print "- " * 40
+        print("- " * 40)
         print_command(cmd_args)
-        print "- " * 40
+        print("- " * 40)
         with open(result, "wt") as f:
           with open(args.log_stderr or os.devnull, 'at') as err:
             status = subprocess.call(cmd_args, stdout=f, stderr=err)
@@ -207,13 +215,17 @@
         if os.path.isfile(result) and os.path.getsize(result) > 0:
           if args.print_url:
             with open(result, "at") as f:
-              print >> f
-              print >> f, "URL: {}".format(site)
+              print(file=f)
+              print("URL: {}".format(site), file=f)
           retries_since_good_run = 0
           break
-        if retries_since_good_run < 6:
-          timeout += 2 ** retries_since_good_run
-          retries_since_good_run += 1
+        if retries_since_good_run > MAX_NOF_RETRIES:
+          # Abort after too many retries, no point in ever increasing the
+          # timeout.
+          print("TOO MANY EMPTY RESULTS ABORTING RUN")
+          return
+        timeout += 2 ** retries_since_good_run
+        retries_since_good_run += 1
         print("EMPTY RESULT, REPEATING RUN ({})".format(
             retries_since_good_run));
       finally:
@@ -233,6 +245,8 @@
           if item['timeout'] > args.timeout: item['timeout'] = args.timeout
           sites.append(item)
     except ValueError:
+      args.error("Warning: Could not read sites file as JSON, falling back to "
+                 "primitive file")
       with open(args.sites_file, "rt") as f:
         for line in f:
           line = line.strip()
@@ -283,7 +297,7 @@
     # Run them.
     for site, domain, count, timeout in L:
       if count is not None: domain = "{}%{}".format(domain, count)
-      print(site, domain, timeout)
+      print((site, domain, timeout))
       run_site(site, domain, args, timeout)
   finally:
     if replay_server:
@@ -342,11 +356,22 @@
            'stddev': stddev, 'min': low, 'max': high, 'ci': ci }
 
 
+def add_category_total(entries, groups, category_prefix):
+  group_data = { 'time': 0, 'count': 0 }
+  for group_name, regexp in groups:
+    if not group_name.startswith('Group-' + category_prefix): continue
+    group_data['time'] += entries[group_name]['time']
+    group_data['count'] += entries[group_name]['count']
+  entries['Group-' + category_prefix + '-Total'] = group_data
+
+
 def read_stats(path, domain, args):
   groups = [];
   if args.aggregate:
     groups = [
         ('Group-IC', re.compile(".*IC_.*")),
+        ('Group-OptimizeBackground',
+         re.compile(".*OptimizeConcurrent.*|RecompileConcurrent.*")),
         ('Group-Optimize',
          re.compile("StackGuard|.*Optimize.*|.*Deoptimize.*|Recompile.*")),
         ('Group-CompileBackground', re.compile("(.*CompileBackground.*)")),
@@ -398,20 +423,10 @@
       group_data['time'] += entries[group_name]['time']
       group_data['count'] += entries[group_name]['count']
     entries['Group-Total-V8'] = group_data
-    # Calculate the Parse-Total group
-    group_data = { 'time': 0, 'count': 0 }
-    for group_name, regexp in groups:
-      if not group_name.startswith('Group-Parse'): continue
-      group_data['time'] += entries[group_name]['time']
-      group_data['count'] += entries[group_name]['count']
-    entries['Group-Parse-Total'] = group_data
-    # Calculate the Compile-Total group
-    group_data = { 'time': 0, 'count': 0 }
-    for group_name, regexp in groups:
-      if not group_name.startswith('Group-Compile'): continue
-      group_data['time'] += entries[group_name]['time']
-      group_data['count'] += entries[group_name]['count']
-    entries['Group-Compile-Total'] = group_data
+    # Calculate the Parse-Total, Compile-Total and Optimize-Total groups
+    add_category_total(entries, groups, 'Parse')
+    add_category_total(entries, groups, 'Compile')
+    add_category_total(entries, groups, 'Optimize')
     # Append the sums as single entries to domain.
     for key in entries:
       if key not in domain: domain[key] = { 'time_list': [], 'count_list': [] }
@@ -447,11 +462,11 @@
     def stats(s, units=""):
       conf = "{:0.1f}({:0.2f}%)".format(s['ci']['abs'], s['ci']['perc'])
       return "{:8.1f}{} +/- {:15s}".format(s['average'], units, conf)
-    print "{:>50s}  {}  {}".format(
+    print("{:>50s}  {}  {}".format(
       key,
       stats(value['time_stat'], units="ms"),
       stats(value['count_stat'])
-    )
+    ))
   # Print and calculate partial sums, if necessary.
   for i in range(low, high):
     print_entry(*L[i])
@@ -467,7 +482,7 @@
         partial['count_list'][j] += v
   # Print totals, if necessary.
   if args.totals:
-    print '-' * 80
+    print('-' * 80)
     if args.limit != 0 and not args.aggregate:
       partial['time_stat'] = statistics(partial['time_list'])
       partial['count_stat'] = statistics(partial['count_list'])
@@ -488,9 +503,9 @@
     create_total_page_stats(domains, args)
   for i, domain in enumerate(sorted(domains)):
     if len(domains) > 1:
-      if i > 0: print
-      print "{}:".format(domain)
-      print '=' * 80
+      if i > 0: print()
+      print("{}:".format(domain))
+      print('=' * 80)
     domain_stats = domains[domain]
     for key in domain_stats:
       domain_stats[key]['time_stat'] = \
@@ -530,10 +545,9 @@
   # Add a new "Total" page containing the summed up metrics.
   domains['Total'] = total
 
+# Generate Raw JSON file.
 
-# Generate JSON file.
-
-def do_json(args):
+def _read_logs(args):
   versions = {}
   for path in args.logdirs:
     if os.path.isdir(path):
@@ -547,6 +561,36 @@
             if domain not in versions[version]: versions[version][domain] = {}
             read_stats(os.path.join(root, filename),
                        versions[version][domain], args)
+
+  return versions
+
+def do_raw_json(args):
+  versions = _read_logs(args)
+
+  for version, domains in versions.items():
+    if args.aggregate:
+      create_total_page_stats(domains, args)
+    for domain, entries in domains.items():
+      raw_entries = []
+      for name, value in entries.items():
+        # We don't want the calculated sum in the JSON file.
+        if name == "Sum": continue
+        raw_entries.append({
+          'name': name,
+          'duration': value['time_list'],
+          'count': value['count_list'],
+        })
+
+      domains[domain] = raw_entries
+
+  print(json.dumps(versions, separators=(',', ':')))
+
+
+# Generate JSON file.
+
+def do_json(args):
+  versions = _read_logs(args)
+
   for version, domains in versions.items():
     if args.aggregate:
       create_total_page_stats(domains, args)
@@ -563,7 +607,7 @@
           entry.append(round(s['ci']['perc'], 2))
         stats.append(entry)
       domains[domain] = stats
-  print json.dumps(versions, separators=(',', ':'))
+  print(json.dumps(versions, separators=(',', ':')))
 
 
 # Help.
@@ -644,7 +688,7 @@
         "-l", "--log-stderr", type=str, metavar="<path>",
         help="specify where chrome's stderr should go (default: /dev/null)")
     subparser.add_argument(
-        "sites", type=str, metavar="<URL>", nargs="*",
+        "--sites", type=str, metavar="<URL>", nargs="*",
         help="specify benchmark website")
   add_replay_args(subparsers["run"])
 
@@ -691,6 +735,20 @@
       help="Create aggregated entries. Adds Group-* entries at the toplevel. " \
       "Additionally creates a Total page with all entries.")
 
+  # Command: raw-json.
+  subparsers["raw-json"] = subparser_adder.add_parser(
+      "raw-json", help="Collect raw results from 'run' command into" \
+          "a single json file.")
+  subparsers["raw-json"].set_defaults(
+      func=do_raw_json, error=subparsers["json"].error)
+  subparsers["raw-json"].add_argument(
+      "logdirs", type=str, metavar="<logdir>", nargs="*",
+      help="specify directories with log files to parse")
+  subparsers["raw-json"].add_argument(
+      "--aggregate", dest="aggregate", action="store_true", default=False,
+      help="Create aggregated entries. Adds Group-* entries at the toplevel. " \
+      "Additionally creates a Total page with all entries.")
+
   # Command: help.
   subparsers["help"] = subparser_adder.add_parser(
       "help", help="help information")
diff --git a/src/v8/tools/callstats.py.vpython b/src/v8/tools/callstats.py.vpython
new file mode 100644
index 0000000..11e3f34
--- /dev/null
+++ b/src/v8/tools/callstats.py.vpython
@@ -0,0 +1,43 @@
+# This is a vpython "spec" file.
+#
+# It describes patterns for python wheel dependencies of the python scripts in
+# the callstats.py, particularly for dependencies that have compiled components
+# (since pure-python dependencies can be easily vendored into third_party).
+#
+# When vpython is invoked, it finds this file and builds a python VirtualEnv,
+# containing all of the dependencies described in this file, fetching them from
+# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
+# this never requires the end-user machine to have a working python extension
+# compilation environment. All of these packages are built using:
+#   https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
+#
+# All python scripts in the repo share this same spec, to avoid dependency
+# fragmentation.
+#
+# If you have depot_tools installed in your $PATH, you can invoke python scripts
+# in this repo by running them as you normally would run them, except
+# substituting `vpython` instead of `python` on the command line, e.g.:
+#   vpython path/to/script.py some --arguments
+#
+# Read more about `vpython` and how to modify this file here:
+#   https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
+
+python_version: "2.7"
+
+wheel: <
+  name: "infra/python/wheels/numpy/${vpython_platform}"
+  version: "version:1.11.3"
+>
+
+wheel: <
+  name: "infra/python/wheels/scipy/${vpython_platform}"
+  version: "version:0.19.0"
+  match_tag: <
+    abi: "cp27mu"
+    platform: "manylinux1_i686"
+  >
+  match_tag: <
+    abi: "cp27mu"
+    platform: "manylinux1_x86_64"
+  >
+>
diff --git a/src/v8/tools/cfi/blacklist.txt b/src/v8/tools/cfi/blacklist.txt
index 0ad565e..9886fd3 100644
--- a/src/v8/tools/cfi/blacklist.txt
+++ b/src/v8/tools/cfi/blacklist.txt
@@ -1,4 +1,26 @@
 # All std:: types
 # This should be possible to remove, if/when we build against
 # a statically linked libc++.
-type:std::*
\ No newline at end of file
+type:std::*
+
+# Following entries Taken from chromium's tools/cfi/blacklist.txt
+[cfi-icall]
+
+######### Function pointers cast to incorrect type signatures
+
+# libicu is currently compiled such that in libicu the 'UChar' type is a
+# defined as a char16_t internally, but for the rest of chromium it's an
+# unsigned short, causing mismatched type signatures for icalls to/from icu
+# v8/src/intl.cc
+fun:*LocaleConvertCase*
+
+# PropertyCallbackArguments::Call methods cast function pointers
+src:*src/api/api-arguments-inl.h
+
+# v8 callback that casts argument template parameters
+fun:*PendingPhantomCallback*Invoke*
+
+# weak_callback_ is cast from original type.
+fun:*GlobalHandles*PostGarbageCollectionProcessing*
+
+fun:*InvokeAccessorGetterCallback*
diff --git a/src/v8/tools/check-static-initializers.sh b/src/v8/tools/check-static-initializers.sh
index da43170..fdd1e84 100755
--- a/src/v8/tools/check-static-initializers.sh
+++ b/src/v8/tools/check-static-initializers.sh
@@ -30,8 +30,8 @@
 # initializer in d8 matches the one defined below.
 
 # Allow:
-#  - _GLOBAL__I__ZN2v810LineEditor6first_E
-#  - _GLOBAL__I__ZN2v88internal32AtomicOps_Internalx86CPUFeaturesE
+# _GLOBAL__sub_I_d8.cc
+# _GLOBAL__sub_I_iostream.cpp
 expected_static_init_count=2
 
 v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
diff --git a/src/v8/tools/check-unused-symbols.sh b/src/v8/tools/check-unused-symbols.sh
new file mode 100755
index 0000000..0348938
--- /dev/null
+++ b/src/v8/tools/check-unused-symbols.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
+symbols=$(
+    grep \
+        --only-matching \
+        --perl-regexp 'V\(_, \K([^,\)]*)' \
+        -- "$v8_root/src/heap-symbols.h")
+
+# Find symbols which appear exactly once (in heap-symbols.h)
+grep \
+    --only-matching \
+    --no-filename \
+    --recursive \
+    --fixed-strings "$symbols" \
+    -- "$v8_root/src" "$v8_root/test/cctest" \
+| sort \
+| uniq -u \
+| sed -e 's/.*/Heap symbol "&" seems to be unused./'
+
+echo "Kthxbye."
diff --git a/src/v8/tools/clusterfuzz/BUILD.gn b/src/v8/tools/clusterfuzz/BUILD.gn
new file mode 100644
index 0000000..88219600
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/BUILD.gn
@@ -0,0 +1,21 @@
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+if (v8_correctness_fuzzer) {
+  copy("v8_correctness_fuzzer_resources") {
+    sources = [
+      "v8_commands.py",
+      "v8_foozzie.py",
+      "v8_foozzie_harness_adjust.js",
+      "v8_fuzz_config.py",
+      "v8_mock.js",
+      "v8_mock_archs.js",
+      "v8_suppressions.js",
+      "v8_suppressions.py",
+    ]
+    outputs = [ "$root_out_dir/{{source_file_part}}" ]
+  }
+}
diff --git a/src/v8/tools/clusterfuzz/testdata/failure_output.txt b/src/v8/tools/clusterfuzz/testdata/failure_output.txt
new file mode 100644
index 0000000..fe94bb9
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/testdata/failure_output.txt
@@ -0,0 +1,50 @@
+#
+# V8 correctness failure
+# V8 correctness configs: x64,ignition:x64,ignition_turbo
+# V8 correctness sources: f60
+# V8 correctness suppression: 
+#
+# CHECK
+#
+# Compared x64,ignition with x64,ignition_turbo
+#
+# Flags of x64,ignition:
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up --flag1 --flag2=0
+# Flags of x64,ignition_turbo:
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --flag3
+#
+# Difference:
+- unknown
++ not unknown
+#
+# Source file:
+name/to/file.js
+#
+### Start of configuration x64,ignition:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird error
+        ^
+3
+unknown
+
+
+### End of configuration x64,ignition
+#
+### Start of configuration x64,ignition_turbo:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+not unknown
+
+
+### End of configuration x64,ignition_turbo
+
diff --git a/src/v8/tools/clusterfuzz/testdata/fuzz-123.js b/src/v8/tools/clusterfuzz/testdata/fuzz-123.js
new file mode 100644
index 0000000..fbde573
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/testdata/fuzz-123.js
@@ -0,0 +1,6 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Empty test dummy.
+print("js-mutation: start generated test case");
diff --git a/src/v8/tools/clusterfuzz/testdata/sanity_check_output.txt b/src/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
new file mode 100644
index 0000000..636f4c9
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/testdata/sanity_check_output.txt
@@ -0,0 +1,47 @@
+#
+# V8 correctness failure
+# V8 correctness configs: x64,ignition:x64,ignition_turbo
+# V8 correctness sources: sanity check failed
+# V8 correctness suppression: 
+#
+# CHECK
+#
+# Compared x64,ignition with x64,ignition_turbo
+#
+# Flags of x64,ignition:
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345 --turbo-filter=~ --noopt --liftoff --no-wasm-tier-up
+# Flags of x64,ignition_turbo:
+--correctness-fuzzer-suppressions --expose-gc --allow-natives-syntax --invoke-weak-callbacks --omit-quit --es-staging --no-wasm-async-compilation --suppress-asm-messages --random-seed 12345
+#
+# Difference:
+- unknown
++ not unknown
+#
+### Start of configuration x64,ignition:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird error
+        ^
+3
+unknown
+
+
+### End of configuration x64,ignition
+#
+### Start of configuration x64,ignition_turbo:
+
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+not unknown
+
+
+### End of configuration x64,ignition_turbo
+
diff --git a/src/v8/tools/clusterfuzz/testdata/test_d8_1.py b/src/v8/tools/clusterfuzz/testdata/test_d8_1.py
new file mode 100644
index 0000000..4a3d008
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/testdata/test_d8_1.py
@@ -0,0 +1,17 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print("""
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird error
+        ^
+3
+unknown
+""")
diff --git a/src/v8/tools/clusterfuzz/testdata/test_d8_2.py b/src/v8/tools/clusterfuzz/testdata/test_d8_2.py
new file mode 100644
index 0000000..824b222
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/testdata/test_d8_2.py
@@ -0,0 +1,17 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print("""
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+unknown
+""")
diff --git a/src/v8/tools/clusterfuzz/testdata/test_d8_3.py b/src/v8/tools/clusterfuzz/testdata/test_d8_3.py
new file mode 100644
index 0000000..0b19a3f
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/testdata/test_d8_3.py
@@ -0,0 +1,17 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print("""
+1
+v8-foozzie source: name/to/a/file.js
+2
+v8-foozzie source: name/to/file.js
+  weird other error
+^
+3
+not unknown
+""")
diff --git a/src/v8/tools/clusterfuzz/testdata/v8_build_config.json b/src/v8/tools/clusterfuzz/testdata/v8_build_config.json
new file mode 100644
index 0000000..ea27b1c
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/testdata/v8_build_config.json
@@ -0,0 +1 @@
+{"v8_current_cpu": "x64"}
diff --git a/src/v8/tools/clusterfuzz/v8_commands.py b/src/v8/tools/clusterfuzz/v8_commands.py
new file mode 100644
index 0000000..0b3cae7
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/v8_commands.py
@@ -0,0 +1,64 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Fork from commands.py and output.py in v8 test driver.
+
+import signal
+import subprocess
+import sys
+from threading import Event, Timer
+
+
+class Output(object):
+  def __init__(self, exit_code, timed_out, stdout, pid):
+    self.exit_code = exit_code
+    self.timed_out = timed_out
+    self.stdout = stdout
+    self.pid = pid
+
+  def HasCrashed(self):
+    # Timed out tests will have exit_code -signal.SIGTERM.
+    if self.timed_out:
+      return False
+    return (self.exit_code < 0 and
+            self.exit_code != -signal.SIGABRT)
+
+  def HasTimedOut(self):
+    return self.timed_out
+
+
+def Execute(args, cwd, timeout=None):
+  popen_args = [c for c in args if c != ""]
+  try:
+    process = subprocess.Popen(
+      args=popen_args,
+      stdout=subprocess.PIPE,
+      stderr=subprocess.STDOUT,
+      cwd=cwd
+    )
+  except Exception as e:
+    sys.stderr.write("Error executing: %s\n" % popen_args)
+    raise e
+
+  timeout_event = Event()
+
+  def kill_process():
+    timeout_event.set()
+    try:
+      process.kill()
+    except OSError:
+      sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+
+
+  timer = Timer(timeout, kill_process)
+  timer.start()
+  stdout, _ = process.communicate()
+  timer.cancel()
+
+  return Output(
+      process.returncode,
+      timeout_event.is_set(),
+      stdout.decode('utf-8', 'replace').encode('utf-8'),
+      process.pid,
+  )
diff --git a/src/v8/tools/clusterfuzz/v8_foozzie.py b/src/v8/tools/clusterfuzz/v8_foozzie.py
new file mode 100755
index 0000000..55f76e8
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/v8_foozzie.py
@@ -0,0 +1,423 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+V8 correctness fuzzer launcher script.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import hashlib
+import itertools
+import json
+import os
+import random
+import re
+import sys
+import traceback
+
+import v8_commands
+import v8_suppressions
+
+CONFIGS = dict(
+  default=[],
+  ignition=[
+    '--turbo-filter=~',
+    '--noopt',
+    '--liftoff',
+    '--no-wasm-tier-up',
+  ],
+  ignition_asm=[
+    '--turbo-filter=~',
+    '--noopt',
+    '--validate-asm',
+    '--stress-validate-asm',
+  ],
+  ignition_eager=[
+    '--turbo-filter=~',
+    '--noopt',
+    '--no-lazy',
+    '--no-lazy-inner-functions',
+  ],
+  ignition_no_ic=[
+    '--turbo-filter=~',
+    '--noopt',
+    '--liftoff',
+    '--no-wasm-tier-up',
+    '--no-use-ic',
+    '--no-lazy-feedback-allocation',
+  ],
+  ignition_turbo=[],
+  ignition_turbo_no_ic=[
+    '--no-use-ic',
+  ],
+  ignition_turbo_opt=[
+    '--always-opt',
+    '--no-liftoff',
+    '--no-wasm-tier-up',
+    '--no-lazy-feedback-allocation'
+  ],
+  ignition_turbo_opt_eager=[
+    '--always-opt',
+    '--no-lazy',
+    '--no-lazy-inner-functions',
+    '--no-lazy-feedback-allocation',
+  ],
+  jitless=[
+    '--jitless',
+  ],
+  slow_path=[
+    '--force-slow-path',
+  ],
+  slow_path_opt=[
+    '--always-opt',
+    '--force-slow-path',
+    '--no-lazy-feedback-allocation',
+  ],
+  trusted=[
+    '--no-untrusted-code-mitigations',
+  ],
+  trusted_opt=[
+    '--always-opt',
+    '--no-untrusted-code-mitigations',
+    '--no-lazy-feedback-allocation',
+  ],
+)
+
+# Timeout in seconds for one d8 run.
+TIMEOUT = 3
+
+# Return codes.
+RETURN_PASS = 0
+RETURN_FAIL = 2
+
+BASE_PATH = os.path.dirname(os.path.abspath(__file__))
+PREAMBLE = [
+  os.path.join(BASE_PATH, 'v8_mock.js'),
+  os.path.join(BASE_PATH, 'v8_suppressions.js'),
+]
+ARCH_MOCKS = os.path.join(BASE_PATH, 'v8_mock_archs.js')
+SANITY_CHECKS = os.path.join(BASE_PATH, 'v8_sanity_checks.js')
+
+FLAGS = ['--correctness-fuzzer-suppressions', '--expose-gc',
+         '--allow-natives-syntax', '--invoke-weak-callbacks', '--omit-quit',
+         '--es-staging', '--no-wasm-async-compilation',
+         '--suppress-asm-messages']
+
+SUPPORTED_ARCHS = ['ia32', 'x64', 'arm', 'arm64']
+
+# Output for suppressed failure case.
+FAILURE_HEADER_TEMPLATE = """#
+# V8 correctness failure
+# V8 correctness configs: %(configs)s
+# V8 correctness sources: %(source_key)s
+# V8 correctness suppression: %(suppression)s
+"""
+
+# Extended output for failure case. The 'CHECK' is for the minimizer.
+FAILURE_TEMPLATE = FAILURE_HEADER_TEMPLATE + """#
+# CHECK
+#
+# Compared %(first_config_label)s with %(second_config_label)s
+#
+# Flags of %(first_config_label)s:
+%(first_config_flags)s
+# Flags of %(second_config_label)s:
+%(second_config_flags)s
+#
+# Difference:
+%(difference)s%(source_file_text)s
+#
+### Start of configuration %(first_config_label)s:
+%(first_config_output)s
+### End of configuration %(first_config_label)s
+#
+### Start of configuration %(second_config_label)s:
+%(second_config_output)s
+### End of configuration %(second_config_label)s
+"""
+
+SOURCE_FILE_TEMPLATE = """
+#
+# Source file:
+%s"""
+
+
+FUZZ_TEST_RE = re.compile(r'.*fuzz(-\d+\.js)')
+SOURCE_RE = re.compile(r'print\("v8-foozzie source: (.*)"\);')
+
+# The number of hex digits used from the hash of the original source file path.
+# Keep the number small to avoid duplicate explosion.
+ORIGINAL_SOURCE_HASH_LENGTH = 3
+
+# Placeholder string if no original source file could be determined.
+ORIGINAL_SOURCE_DEFAULT = 'none'
+
+
+def infer_arch(d8):
+  """Infer the V8 architecture from the build configuration next to the
+  executable.
+  """
+  with open(os.path.join(os.path.dirname(d8), 'v8_build_config.json')) as f:
+    arch = json.load(f)['v8_current_cpu']
+  return 'ia32' if arch == 'x86' else arch
+
+
+def parse_args():
+  parser = argparse.ArgumentParser()
+  parser.add_argument(
+    '--random-seed', type=int, required=True,
+    help='random seed passed to both runs')
+  parser.add_argument(
+      '--first-config', help='first configuration', default='ignition')
+  parser.add_argument(
+      '--second-config', help='second configuration', default='ignition_turbo')
+  parser.add_argument(
+      '--first-config-extra-flags', action='append', default=[],
+      help='Additional flags to pass to the run of the first configuration')
+  parser.add_argument(
+      '--second-config-extra-flags', action='append', default=[],
+      help='Additional flags to pass to the run of the second configuration')
+  parser.add_argument(
+      '--first-d8', default='d8',
+      help='optional path to first d8 executable, '
+           'default: bundled in the same directory as this script')
+  parser.add_argument(
+      '--second-d8',
+      help='optional path to second d8 executable, default: same as first')
+  parser.add_argument(
+      '--skip-sanity-checks', default=False, action='store_true',
+      help='skip sanity checks for testing purposes')
+  parser.add_argument('testcase', help='path to test case')
+  options = parser.parse_args()
+
+  # Ensure we have a test case.
+  assert (os.path.exists(options.testcase) and
+          os.path.isfile(options.testcase)), (
+      'Test case %s doesn\'t exist' % options.testcase)
+
+  # Use first d8 as default for second d8.
+  options.second_d8 = options.second_d8 or options.first_d8
+
+  # Ensure absolute paths.
+  if not os.path.isabs(options.first_d8):
+    options.first_d8 = os.path.join(BASE_PATH, options.first_d8)
+  if not os.path.isabs(options.second_d8):
+    options.second_d8 = os.path.join(BASE_PATH, options.second_d8)
+
+  # Ensure executables exist.
+  assert os.path.exists(options.first_d8)
+  assert os.path.exists(options.second_d8)
+
+  # Infer architecture from build artifacts.
+  options.first_arch = infer_arch(options.first_d8)
+  options.second_arch = infer_arch(options.second_d8)
+
+  # Ensure we make a sane comparison.
+  if (options.first_arch == options.second_arch and
+      options.first_config == options.second_config):
+    parser.error('Need either arch or config difference.')
+  assert options.first_arch in SUPPORTED_ARCHS
+  assert options.second_arch in SUPPORTED_ARCHS
+  assert options.first_config in CONFIGS
+  assert options.second_config in CONFIGS
+
+  return options
+
+
+def get_meta_data(content):
+  """Extracts original-source-file paths from test case content."""
+  sources = []
+  for line in content.splitlines():
+    match = SOURCE_RE.match(line)
+    if match:
+      sources.append(match.group(1))
+  return {'sources': sources}
+
+
+def content_bailout(content, ignore_fun):
+  """Print failure state and return if ignore_fun matches content."""
+  bug = (ignore_fun(content) or '').strip()
+  if bug:
+    print(FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression=bug))
+    return True
+  return False
+
+
+def pass_bailout(output, step_number):
+  """Print info and return if in timeout or crash pass states."""
+  if output.HasTimedOut():
+    # Dashed output, so that no other clusterfuzz tools can match the
+    # words timeout or crash.
+    print('# V8 correctness - T-I-M-E-O-U-T %d' % step_number)
+    return True
+  if output.HasCrashed():
+    print('# V8 correctness - C-R-A-S-H %d' % step_number)
+    return True
+  return False
+
+
+def fail_bailout(output, ignore_by_output_fun):
+  """Print failure state and return if ignore_by_output_fun matches output."""
+  bug = (ignore_by_output_fun(output.stdout) or '').strip()
+  if bug:
+    print(FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression=bug))
+    return True
+  return False
+
+
+def print_difference(
+    options, source_key, first_config_flags, second_config_flags,
+    first_config_output, second_config_output, difference, source=None):
+  # The first three entries will be parsed by clusterfuzz. Format changes
+  # will require changes on the clusterfuzz side.
+  first_config_label = '%s,%s' % (options.first_arch, options.first_config)
+  second_config_label = '%s,%s' % (options.second_arch, options.second_config)
+  source_file_text = SOURCE_FILE_TEMPLATE % source if source else ''
+  print((FAILURE_TEMPLATE % dict(
+      configs='%s:%s' % (first_config_label, second_config_label),
+      source_file_text=source_file_text,
+      source_key=source_key,
+      suppression='', # We can't tie bugs to differences.
+      first_config_label=first_config_label,
+      second_config_label=second_config_label,
+      first_config_flags=' '.join(first_config_flags),
+      second_config_flags=' '.join(second_config_flags),
+      first_config_output=
+          first_config_output.stdout.decode('utf-8', 'replace'),
+      second_config_output=
+          second_config_output.stdout.decode('utf-8', 'replace'),
+      source=source,
+      difference=difference.decode('utf-8', 'replace'),
+  )).encode('utf-8', 'replace'))
+
+
+def main():
+  options = parse_args()
+
+  # Suppressions are architecture and configuration specific.
+  suppress = v8_suppressions.get_suppression(
+      options.first_arch, options.first_config,
+      options.second_arch, options.second_config,
+  )
+
+  # Static bailout based on test case content or metadata.
+  with open(options.testcase) as f:
+    content = f.read()
+  if content_bailout(get_meta_data(content), suppress.ignore_by_metadata):
+    return RETURN_FAIL
+  if content_bailout(content, suppress.ignore_by_content):
+    return RETURN_FAIL
+
+  # Set up runtime arguments.
+  common_flags = FLAGS + ['--random-seed', str(options.random_seed)]
+  first_config_flags = (common_flags + CONFIGS[options.first_config] +
+                        options.first_config_extra_flags)
+  second_config_flags = (common_flags + CONFIGS[options.second_config] +
+                         options.second_config_extra_flags)
+
+  def run_d8(d8, config_flags, config_label=None, testcase=options.testcase):
+    preamble = PREAMBLE[:]
+    if options.first_arch != options.second_arch:
+      preamble.append(ARCH_MOCKS)
+    args = [d8] + config_flags + preamble + [testcase]
+    if config_label:
+      print('# Command line for %s comparison:' % config_label)
+      print(' '.join(args))
+    if d8.endswith('.py'):
+      # Wrap with python in tests.
+      args = [sys.executable] + args
+    return v8_commands.Execute(
+        args,
+        cwd=os.path.dirname(os.path.abspath(testcase)),
+        timeout=TIMEOUT,
+    )
+
+  # Sanity checks. Run both configurations with the sanity-checks file only and
+  # bail out early if different.
+  if not options.skip_sanity_checks:
+    first_config_output = run_d8(
+        options.first_d8, first_config_flags, testcase=SANITY_CHECKS)
+    second_config_output = run_d8(
+        options.second_d8, second_config_flags, testcase=SANITY_CHECKS)
+    difference, _ = suppress.diff(
+        first_config_output.stdout, second_config_output.stdout)
+    if difference:
+      # Special source key for sanity checks so that clusterfuzz dedupes all
+      # cases on this in case it's hit.
+      source_key = 'sanity check failed'
+      print_difference(
+          options, source_key, first_config_flags, second_config_flags,
+          first_config_output, second_config_output, difference)
+      return RETURN_FAIL
+
+  first_config_output = run_d8(options.first_d8, first_config_flags, 'first')
+
+  # Early bailout based on first run's output.
+  if pass_bailout(first_config_output, 1):
+    return RETURN_PASS
+
+  second_config_output = run_d8(
+      options.second_d8, second_config_flags, 'second')
+
+  # Bailout based on second run's output.
+  if pass_bailout(second_config_output, 2):
+    return RETURN_PASS
+
+  difference, source = suppress.diff(
+      first_config_output.stdout, second_config_output.stdout)
+
+  if source:
+    source_key = hashlib.sha1(source).hexdigest()[:ORIGINAL_SOURCE_HASH_LENGTH]
+  else:
+    source_key = ORIGINAL_SOURCE_DEFAULT
+
+  if difference:
+    # Only bail out due to suppressed output if there was a difference. If a
+    # suppression doesn't show up anymore in the statistics, we might want to
+    # remove it.
+    if fail_bailout(first_config_output, suppress.ignore_by_output1):
+      return RETURN_FAIL
+    if fail_bailout(second_config_output, suppress.ignore_by_output2):
+      return RETURN_FAIL
+
+    print_difference(
+        options, source_key, first_config_flags, second_config_flags,
+        first_config_output, second_config_output, difference, source)
+    return RETURN_FAIL
+
+  # TODO(machenbach): Figure out if we could also return a bug in case there's
+  # no difference, but one of the line suppressions has matched - and without
+  # the match there would be a difference.
+
+  print('# V8 correctness - pass')
+  return RETURN_PASS
+
+
+if __name__ == "__main__":
+  try:
+    result = main()
+  except SystemExit:
+    # Make sure clusterfuzz reports internal errors and wrong usage.
+    # Use one label for all internal and usage errors.
+    print(FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression='wrong_usage'))
+    result = RETURN_FAIL
+  except MemoryError:
+    # Running out of memory happens occasionally but is not actionable.
+    print('# V8 correctness - pass')
+    result = RETURN_PASS
+  except Exception as e:
+    print(FAILURE_HEADER_TEMPLATE % dict(
+        configs='', source_key='', suppression='internal_error'))
+    print('# Internal error: %s' % e)
+    traceback.print_exc(file=sys.stdout)
+    result = RETURN_FAIL
+
+  sys.exit(result)
diff --git a/src/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js b/src/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
new file mode 100644
index 0000000..4a8ed35
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/v8_foozzie_harness_adjust.js
@@ -0,0 +1,96 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Extensions to mjsunit and other test harnesses added between harness and
+// fuzzing code.
+
+try {
+  // Scope for utility functions.
+  (function() {
+    // Same as in mjsunit.js.
+    function classOf(object) {
+      // Argument must not be null or undefined.
+      var string = Object.prototype.toString.call(object);
+      // String has format [object <ClassName>].
+      return string.substring(8, string.length - 1);
+    }
+
+    // Override prettyPrinted with a version that also recusively prints object
+    // properties (with a depth of 3).
+    let origPrettyPrinted = prettyPrinted;
+    prettyPrinted = function prettyPrinted(value, depth=3) {
+      if (depth == 0) {
+        return "...";
+      }
+      switch (typeof value) {
+        case "object":
+          if (value === null) return "null";
+          var objectClass = classOf(value);
+          switch (objectClass) {
+            case "Object":
+              var name = value.constructor.name;
+              if (!name)
+                name = "Object";
+              return name + "{" + Object.keys(value).map(function(key, index) {
+                return (
+                    prettyPrinted(key, depth - 1) +
+                    ": " +
+                    prettyPrinted(value[key], depth - 1)
+                );
+              }).join(",")  + "}";
+          }
+      }
+      // Fall through to original version for all other types.
+      return origPrettyPrinted(value);
+    }
+
+    // We're not interested in stack traces.
+    MjsUnitAssertionError = function MjsUnitAssertionError(message) {}
+    MjsUnitAssertionError.prototype.toString = function () { return ""; };
+
+    // Do more printing in assertions for more correctness coverage.
+    failWithMessage = function failWithMessage(message) {
+      print(prettyPrinted(message))
+    }
+
+    assertSame = function assertSame(expected, found, name_opt) {
+      print(prettyPrinted(found));
+    }
+
+    assertNotSame = function assertNotSame(expected, found, name_opt) {
+      print(prettyPrinted(found));
+    }
+
+    assertEquals = function assertEquals(expected, found, name_opt) {
+      print(prettyPrinted(found));
+    }
+
+    assertNotEquals = function assertNotEquals(expected, found, name_opt) {
+      print(prettyPrinted(found));
+    }
+
+    assertNull = function assertNull(value, name_opt) {
+      print(prettyPrinted(value));
+    }
+
+    assertNotNull = function assertNotNull(value, name_opt) {
+      print(prettyPrinted(value));
+    }
+
+    // Suppress optimization status as it leads to false positives.
+    assertUnoptimized = function assertUnoptimized() {}
+
+    assertOptimized = function assertOptimized() {}
+
+    isNeverOptimize = function isNeverOptimize() {}
+
+    isAlwaysOptimize = function isAlwaysOptimize() {}
+
+    isInterpreted = function isInterpreted() {}
+
+    isOptimized = function isOptimized() {}
+
+    isTurboFanned = function isTurboFanned() {}
+  })();
+} catch(e) { }
diff --git a/src/v8/tools/clusterfuzz/v8_foozzie_test.py b/src/v8/tools/clusterfuzz/v8_foozzie_test.py
new file mode 100755
index 0000000..43b65e8
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/v8_foozzie_test.py
@@ -0,0 +1,160 @@
+#!/usr/bin/env python
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import subprocess
+import sys
+import unittest
+
+import v8_foozzie
+import v8_fuzz_config
+import v8_suppressions
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+FOOZZIE = os.path.join(BASE_DIR, 'v8_foozzie.py')
+TEST_DATA = os.path.join(BASE_DIR, 'testdata')
+
+
+class ConfigTest(unittest.TestCase):
+  def testExperiments(self):
+    """Test that probabilities add up to 100 and that all config names exist.
+    """
+    EXPERIMENTS = v8_fuzz_config.FOOZZIE_EXPERIMENTS
+    CONFIGS = v8_foozzie.CONFIGS
+    assert sum(x[0] for x in EXPERIMENTS) == 100
+    assert all(map(lambda x: x[1] in CONFIGS, EXPERIMENTS))
+    assert all(map(lambda x: x[2] in CONFIGS, EXPERIMENTS))
+    assert all(map(lambda x: x[3].endswith('d8'), EXPERIMENTS))
+
+  def testConfig(self):
+    """Smoke test how to choose experiments.
+
+    When experiment distribution changes this test might change, too.
+    """
+    class Rng(object):
+      def random(self):
+        return 0.5
+    self.assertEqual(
+        [
+          '--first-config=ignition',
+          '--second-config=ignition_turbo',
+          '--second-d8=d8',
+          '--second-config-extra-flags=--stress-scavenge=100',
+        ],
+        v8_fuzz_config.Config('foo', Rng(), 42).choose_foozzie_flags(),
+    )
+
+
+class UnitTest(unittest.TestCase):
+  def testDiff(self):
+    # TODO(machenbach): Mock out suppression configuration.
+    suppress = v8_suppressions.get_suppression(
+        'x64', 'ignition', 'x64', 'ignition_turbo')
+    one = ''
+    two = ''
+    diff = None, None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+    one = 'a \n  b\nc();'
+    two = 'a \n  b\nc();'
+    diff = None, None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+    # Ignore line before caret, caret position and error message.
+    one = """
+undefined
+weird stuff
+      ^
+somefile.js: TypeError: undefined is not a function
+  undefined
+"""
+    two = """
+undefined
+other weird stuff
+            ^
+somefile.js: TypeError: baz is not a function
+  undefined
+"""
+    diff = None, None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+    one = """
+Still equal
+Extra line
+"""
+    two = """
+Still equal
+"""
+    diff = '- Extra line', None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+    one = """
+Still equal
+"""
+    two = """
+Still equal
+Extra line
+"""
+    diff = '+ Extra line', None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+    one = """
+undefined
+somefile.js: TypeError: undefined is not a constructor
+"""
+    two = """
+undefined
+otherfile.js: TypeError: undefined is not a constructor
+"""
+    diff = """- somefile.js: TypeError: undefined is not a constructor
++ otherfile.js: TypeError: undefined is not a constructor""", None
+    self.assertEquals(diff, suppress.diff(one, two))
+
+
+def cut_verbose_output(stdout):
+  # This removes first lines containing d8 commands.
+  return '\n'.join(stdout.split('\n')[4:])
+
+
+def run_foozzie(first_d8, second_d8, *extra_flags):
+  return subprocess.check_output([
+    sys.executable, FOOZZIE,
+    '--random-seed', '12345',
+    '--first-d8', os.path.join(TEST_DATA, first_d8),
+    '--second-d8', os.path.join(TEST_DATA, second_d8),
+    '--first-config', 'ignition',
+    '--second-config', 'ignition_turbo',
+    os.path.join(TEST_DATA, 'fuzz-123.js'),
+  ] + list(extra_flags))
+
+
+class SystemTest(unittest.TestCase):
+  def testSyntaxErrorDiffPass(self):
+    stdout = run_foozzie('test_d8_1.py', 'test_d8_2.py', '--skip-sanity-checks')
+    self.assertEquals('# V8 correctness - pass\n', cut_verbose_output(stdout))
+
+  def testDifferentOutputFail(self):
+    with open(os.path.join(TEST_DATA, 'failure_output.txt')) as f:
+      expected_output = f.read()
+    with self.assertRaises(subprocess.CalledProcessError) as ctx:
+      run_foozzie('test_d8_1.py', 'test_d8_3.py', '--skip-sanity-checks',
+                  '--first-config-extra-flags=--flag1',
+                  '--first-config-extra-flags=--flag2=0',
+                  '--second-config-extra-flags=--flag3')
+    e = ctx.exception
+    self.assertEquals(v8_foozzie.RETURN_FAIL, e.returncode)
+    self.assertEquals(expected_output, cut_verbose_output(e.output))
+
+  def testSanityCheck(self):
+    with open(os.path.join(TEST_DATA, 'sanity_check_output.txt')) as f:
+      expected_output = f.read()
+    with self.assertRaises(subprocess.CalledProcessError) as ctx:
+      run_foozzie('test_d8_1.py', 'test_d8_3.py')
+    e = ctx.exception
+    self.assertEquals(v8_foozzie.RETURN_FAIL, e.returncode)
+    self.assertEquals(expected_output, e.output)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/v8/tools/clusterfuzz/v8_fuzz_config.py b/src/v8/tools/clusterfuzz/v8_fuzz_config.py
new file mode 100644
index 0000000..0dcacf2
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/v8_fuzz_config.py
@@ -0,0 +1,86 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+# List of configuration experiments for correctness fuzzing.
+# List of <probability>, <1st config name>, <2nd config name>, <2nd d8>.
+# Probabilities must add up to 100.
+FOOZZIE_EXPERIMENTS = [
+  [10, 'ignition', 'jitless', 'd8'],
+  [10, 'ignition', 'slow_path', 'd8'],
+  [5, 'ignition', 'slow_path_opt', 'd8'],
+  [26, 'ignition', 'ignition_turbo', 'd8'],
+  [2, 'ignition_no_ic', 'ignition_turbo', 'd8'],
+  [2, 'ignition', 'ignition_turbo_no_ic', 'd8'],
+  [18, 'ignition', 'ignition_turbo_opt', 'd8'],
+  [2, 'ignition_no_ic', 'ignition_turbo_opt', 'd8'],
+  [5, 'ignition_turbo_opt', 'ignition_turbo_opt', 'clang_x86/d8'],
+  [5, 'ignition_turbo', 'ignition_turbo', 'clang_x86/d8'],
+  [5, 'ignition', 'ignition', 'clang_x86/d8'],
+  [5, 'ignition', 'ignition', 'clang_x64_v8_arm64/d8'],
+  [5, 'ignition', 'ignition', 'clang_x86_v8_arm/d8'],
+]
+
+# Additional flag experiments. List of tuples like
+# (<likelihood to use flags in [0,1)>, <flag>).
+ADDITIONAL_FLAGS = [
+  (0.1, '--stress-marking=100'),
+  (0.1, '--stress-scavenge=100'),
+  (0.1, '--stress-compaction-random'),
+  (0.1, '--random-gc-interval=2000'),
+  (0.2, '--noanalyze-environment-liveness'),
+  (0.1, '--stress-delay-tasks'),
+  (0.01, '--thread-pool-size=1'),
+  (0.01, '--thread-pool-size=2'),
+  (0.01, '--thread-pool-size=4'),
+  (0.01, '--thread-pool-size=8'),
+  (0.1, '--interrupt-budget=1000'),
+]
+
+class Config(object):
+  def __init__(self, name, rng=None, random_seed=None):
+    """
+    Args:
+      name: Name of the used fuzzer.
+      rng: Random number generator for generating experiments.
+      random_seed: Random-seed used for d8 throughout one fuzz session.
+      TODO(machenbach): Remove random_seed after a grace period of a couple of
+      days. We only have it to keep bisection stable. Afterwards we can just
+      use rng.
+    """
+    self.name = name
+    self.rng = rng or random.Random()
+    self.random_seed = random_seed
+
+  def choose_foozzie_flags(self):
+    """Randomly chooses a configuration from FOOZZIE_EXPERIMENTS.
+
+    Returns: List of flags to pass to v8_foozzie.py fuzz harness.
+    """
+    # TODO(machenbach): Temporarily use same RNG state for all test cases in one
+    # fuzz session. See also TODO above.
+    if self.random_seed is not None:
+      flags_rng = random.Random(self.random_seed)
+    else:
+      flags_rng = random.Random()
+
+    # Add additional flags to second config based on experiment percentages.
+    extra_flags = []
+    for p, flag in ADDITIONAL_FLAGS:
+      if flags_rng.random() < p:
+        extra_flags.append('--second-config-extra-flags=%s' % flag)
+
+    # Calculate flags determining the experiment.
+    acc = 0
+    threshold = self.rng.random() * 100
+    for prob, first_config, second_config, second_d8 in FOOZZIE_EXPERIMENTS:
+      acc += prob
+      if acc > threshold:
+        return [
+          '--first-config=' + first_config,
+          '--second-config=' + second_config,
+          '--second-d8=' + second_d8,
+        ] + extra_flags
+    assert False
diff --git a/src/v8/tools/clusterfuzz/v8_mock.js b/src/v8/tools/clusterfuzz/v8_mock.js
new file mode 100644
index 0000000..2f797dd
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/v8_mock.js
@@ -0,0 +1,139 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is intended for permanent JS behavior changes for mocking out
+// non-deterministic behavior. For temporary suppressions, please refer to
+// v8_suppressions.js.
+// This file is loaded before each correctness test cases and won't get
+// minimized.
+
+
+// This will be overridden in the test cases. The override can be minimized.
+var prettyPrinted = function prettyPrinted(msg) { return msg; };
+
+// Mock Math.random.
+(function () {
+  var index = 0
+  Math.random = function() {
+    index = (index + 1) % 10;
+    return index / 10.0;
+  }
+})();
+
+// Mock Date.
+(function () {
+  var index = 0
+  var mockDate = 1477662728696
+  var mockDateNow = function() {
+    index = (index + 1) % 10
+    mockDate = mockDate + index + 1
+    return mockDate
+  }
+
+  var origDate = Date;
+  var constructDate = function(args) {
+    if (args.length == 1) {
+      var result = new origDate(args[0]);
+    } else if (args.length == 2) {
+      var result = new origDate(args[0], args[1]);
+    } else if (args.length == 3) {
+      var result = new origDate(args[0], args[1], args[2]);
+    } else if (args.length == 4) {
+      var result = new origDate(args[0], args[1], args[2], args[3]);
+    } else if (args.length == 5) {
+      var result = new origDate(args[0], args[1], args[2], args[3], args[4]);
+    } else if (args.length == 6) {
+      var result = new origDate(
+          args[0], args[1], args[2], args[3], args[4], args[5]);
+    } else if (args.length >= 7) {
+      var result = new origDate(
+          args[0], args[1], args[2], args[3], args[4], args[5], args[6]);
+    } else {
+      var result = new origDate(mockDateNow());
+    }
+    result.constructor = function(...args) { return constructDate(args); }
+    Object.defineProperty(
+        result, "constructor", { configurable: false, writable: false });
+    return result
+  }
+
+  var handler = {
+    apply: function (target, thisArg, args) {
+      return constructDate(args)
+    },
+    construct: function (target, args, newTarget) {
+      return constructDate(args)
+    },
+    get: function(target, property, receiver) {
+      if (property == "now") {
+        return mockDateNow;
+      }
+      if (property == "prototype") {
+        return origDate.prototype
+      }
+    },
+  }
+
+  Date = new Proxy(Date, handler);
+})();
+
+// Mock performace.now().
+(function () {
+  performance.now = function () { return 1.2; }
+})();
+
+// Mock stack traces.
+Error.prepareStackTrace = function (error, structuredStackTrace) {
+  return "";
+};
+Object.defineProperty(
+    Error, 'prepareStackTrace', { configurable: false, writable: false });
+
+// Mock buffer access in float typed arrays because of varying NaN patterns.
+// Note, for now we just use noop forwarding proxies, because they already
+// turn off optimizations.
+(function () {
+  var mock = function(arrayType) {
+    var handler = {
+      construct: function(target, args) {
+        var obj = new (Function.prototype.bind.apply(arrayType, [null].concat(args)));
+        return new Proxy(obj, {
+          get: function(x, prop) {
+            if (typeof x[prop] == "function")
+              return x[prop].bind(obj)
+            return x[prop];
+          },
+        });
+      },
+    };
+    return new Proxy(arrayType, handler);
+  }
+
+  Float32Array = mock(Float32Array);
+  Float64Array = mock(Float64Array);
+})();
+
+// Mock Worker.
+(function () {
+  var index = 0;
+  // TODO(machenbach): Randomize this for each test case, but keep stable
+  // during comparison. Also data and random above.
+  var workerMessages = [
+    undefined, 0, -1, "", "foo", 42, [], {}, [0], {"x": 0}
+  ];
+  Worker = function(code){
+    try {
+      print(prettyPrinted(eval(code)));
+    } catch(e) {
+      print(prettyPrinted(e));
+    }
+    this.getMessage = function(){
+      index = (index + 1) % 10;
+      return workerMessages[index];
+    }
+    this.postMessage = function(msg){
+      print(prettyPrinted(msg));
+    }
+  };
+})();
diff --git a/src/v8/tools/clusterfuzz/v8_mock_archs.js b/src/v8/tools/clusterfuzz/v8_mock_archs.js
new file mode 100644
index 0000000..acbaef8
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/v8_mock_archs.js
@@ -0,0 +1,65 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is intended for permanent JS behavior changes for mocking out
+// non-deterministic behavior. For temporary suppressions, please refer to
+// v8_suppressions.js.
+// This mocks only architecture specific differences. Refer to v8_mocks.js
+// for the general case.
+// This file is loaded before each correctness test cases and won't get
+// minimized.
+
+// Mock maximum typed-array length and limit to 1MiB.
+(function () {
+  var mock = function(arrayType) {
+    var handler = {
+      construct: function(target, args) {
+        for (let i = 0; i < args.length; i++) {
+          if (typeof args[i] != "object") {
+            args[i] = Math.min(1048576, args[i]);
+          }
+        }
+        return new (
+            Function.prototype.bind.apply(arrayType, [null].concat(args)));
+      },
+    };
+    return new Proxy(arrayType, handler);
+  }
+
+  ArrayBuffer = mock(ArrayBuffer);
+  Int8Array = mock(Int8Array);
+  Uint8Array = mock(Uint8Array);
+  Uint8ClampedArray = mock(Uint8ClampedArray);
+  Int16Array = mock(Int16Array);
+  Uint16Array = mock(Uint16Array);
+  Int32Array = mock(Int32Array);
+  Uint32Array = mock(Uint32Array);
+  BigInt64Array = mock(BigInt64Array);
+  BigUint64Array = mock(BigUint64Array);
+  Float32Array = mock(Float32Array);
+  Float64Array = mock(Float64Array);
+})();
+
+// Mock typed array set function and limit maximum offset to 1MiB.
+(function () {
+  var typedArrayTypes = [
+    Int8Array,
+    Uint8Array,
+    Uint8ClampedArray,
+    Int16Array,
+    Uint16Array,
+    Int32Array,
+    Uint32Array,
+    BigInt64Array,
+    BigUint64Array,
+    Float32Array,
+    Float64Array,
+  ];
+  for (let typedArrayType of typedArrayTypes) {
+    let set = typedArrayType.prototype.set
+    typedArrayType.prototype.set = function(array, offset) {
+      set.apply(this, [array, offset > 1048576 ? 1048576 : offset])
+    };
+  }
+})();
diff --git a/src/v8/tools/clusterfuzz/v8_sanity_checks.js b/src/v8/tools/clusterfuzz/v8_sanity_checks.js
new file mode 100644
index 0000000..f2cb893
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/v8_sanity_checks.js
@@ -0,0 +1,32 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is executed separately before the correctness test case. Add here
+// checking of global properties that should never differ in any configuration.
+// A difference found in the prints below will prevent any further correctness
+// comparison for the selected configurations to avoid flooding bugs.
+
+print("https://crbug.com/932656");
+print(Object.getOwnPropertyNames(this));
+
+print("https://crbug.com/935800");
+(function () {
+  function foo() {
+    "use asm";
+    function baz() {}
+    return {bar: baz};
+  }
+  print(Object.getOwnPropertyNames(foo().bar));
+})();
+
+print("https://crbug.com/985154");
+(function () {
+  "use strict";
+  function foo() {
+    "use asm";
+    function baz() {}
+    return {bar: baz};
+  }
+  print(Object.getOwnPropertyNames(foo().bar));
+})();
diff --git a/src/v8/tools/clusterfuzz/v8_suppressions.js b/src/v8/tools/clusterfuzz/v8_suppressions.js
new file mode 100644
index 0000000..011e727
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/v8_suppressions.js
@@ -0,0 +1,33 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is loaded before each correctness test case and after v8_mock.js.
+// You can temporarily change JS behavior here to silence known problems.
+// Please refer to a bug in a comment and remove the suppression once the
+// problem is fixed.
+
+// Suppress http://crbug.com/662429
+(function () {
+  var oldMathPow = Math.pow
+  Math.pow = function(a, b){
+    if (b < 0) {
+      return 0.000017;
+    } else {
+      return oldMathPow(a, b);
+    }
+  }
+})();
+
+// Suppress http://crbug.com/693426
+(function () {
+  var oldMathPow = Math.pow
+  Math.pow = function(a, b){
+    var s = "" + oldMathPow(a, b)
+    // Low tech precision mock. Limit digits in string representation.
+    // The phrases Infinity and NaN don't match the split("e").
+    s = s.split("e");
+    s[0] = s[0].substr(0, 17);
+    return parseFloat(s.join("e"));
+  }
+})();
diff --git a/src/v8/tools/clusterfuzz/v8_suppressions.py b/src/v8/tools/clusterfuzz/v8_suppressions.py
new file mode 100644
index 0000000..04f67b2
--- /dev/null
+++ b/src/v8/tools/clusterfuzz/v8_suppressions.py
@@ -0,0 +1,316 @@
+# Copyright 2016 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Suppressions for V8 correctness fuzzer failures.
+
+We support three types of suppressions:
+1. Ignore test case by pattern.
+Map a regular expression to a bug entry. A new failure will be reported
+when the pattern matches a JS test case.
+Subsequent matches will be recoreded under the first failure.
+
+2. Ignore test run by output pattern:
+Map a regular expression to a bug entry. A new failure will be reported
+when the pattern matches the output of a particular run.
+Subsequent matches will be recoreded under the first failure.
+
+3. Relax line-to-line comparisons with expressions of lines to ignore and
+lines to be normalized (i.e. ignore only portions of lines).
+These are not tied to bugs, be careful to not silently switch off this tool!
+
+Alternatively, think about adding a behavior change to v8_suppressions.js
+to silence a particular class of problems.
+"""
+
+import itertools
+import re
+
+# Max line length for regular experessions checking for lines to ignore.
+MAX_LINE_LENGTH = 512
+
+# For ignoring lines before carets and to ignore caret positions.
+CARET_RE = re.compile(r'^\s*\^\s*$')
+
+# Ignore by original source files. Map from bug->list of relative file paths in
+# V8, e.g. '/v8/test/mjsunit/d8-performance-now.js' including /v8/. A test will
+# be suppressed if one of the files below was used to mutate the test.
+IGNORE_SOURCES = {
+  # This contains a usage of f.arguments that often fires.
+  'crbug.com/662424': [
+    '/v8/test/mjsunit/bugs/bug-222.js',
+    '/v8/test/mjsunit/bugs/bug-941049.js',
+    '/v8/test/mjsunit/regress/regress-crbug-668795.js',
+    '/v8/test/mjsunit/regress/regress-1079.js',
+    '/v8/test/mjsunit/regress/regress-2989.js',
+  ],
+
+  'crbug.com/688159': [
+    '/v8/test/mjsunit/es7/exponentiation-operator.js',
+  ],
+
+  # TODO(machenbach): Implement blacklisting files for particular configs only,
+  # here ignition_eager.
+  'crbug.com/691589': [
+    '/v8/test/mjsunit/regress/regress-1200351.js',
+  ],
+
+  'crbug.com/691587': [
+    '/v8/test/mjsunit/asm/regress-674089.js',
+  ],
+
+  'crbug.com/774805': [
+    '/v8/test/mjsunit/console.js',
+  ],
+}
+
+# Ignore by test case pattern. Map from config->bug->regexp. Config '' is used
+# to match all configurations. Otherwise use either a compiler configuration,
+# e.g. ignition or validate_asm or an architecture, e.g. x64 or ia32.
+# Bug is preferred to be a crbug.com/XYZ, but can be any short distinguishable
+# label.
+# Regular expressions are assumed to be compiled. We use regexp.search.
+IGNORE_TEST_CASES = {
+}
+
+# Ignore by output pattern. Map from config->bug->regexp. See IGNORE_TEST_CASES
+# on how to specify config keys.
+# Bug is preferred to be a crbug.com/XYZ, but can be any short distinguishable
+# label.
+# Regular expressions are assumed to be compiled. We use regexp.search.
+IGNORE_OUTPUT = {
+  '': {
+    'crbug.com/664068':
+        re.compile(r'RangeError(?!: byte length)', re.S),
+    'crbug.com/667678':
+        re.compile(r'\[native code\]', re.S),
+    'crbug.com/689877':
+        re.compile(r'^.*SyntaxError: .*Stack overflow$', re.M),
+  },
+}
+
+# Lines matching any of the following regular expressions will be ignored
+# if appearing on both sides. The capturing groups need to match exactly.
+# Use uncompiled regular expressions - they'll be compiled later.
+ALLOWED_LINE_DIFFS = [
+  # Ignore caret position in stack traces.
+  r'^\s*\^\s*$',
+
+  # Ignore some stack trace headers as messages might not match.
+  r'^(.*)TypeError: .* is not a function$',
+  r'^(.*)TypeError: .* is not a constructor$',
+  r'^(.*)TypeError: (.*) is not .*$',
+  r'^(.*)ReferenceError: .* is not defined$',
+  r'^(.*):\d+: ReferenceError: .* is not defined$',
+
+  # These are rarely needed. It includes some cases above.
+  r'^\w*Error: .* is not .*$',
+  r'^(.*) \w*Error: .* is not .*$',
+  r'^(.*):\d+: \w*Error: .* is not .*$',
+
+  # Some test cases just print the message.
+  r'^.* is not a function(.*)$',
+  r'^(.*) is not a .*$',
+
+  # crbug.com/680064. This subsumes one of the above expressions.
+  r'^(.*)TypeError: .* function$',
+
+  # crbug.com/664068
+  r'^(.*)(?:Array buffer allocation failed|Invalid array buffer length)(.*)$',
+]
+
+# Lines matching any of the following regular expressions will be ignored.
+# Use uncompiled regular expressions - they'll be compiled later.
+IGNORE_LINES = [
+  r'^Warning: unknown flag .*$',
+  r'^Warning: .+ is deprecated.*$',
+  r'^Try --help for options$',
+
+  # crbug.com/705962
+  r'^\s\[0x[0-9a-f]+\]$',
+]
+
+
+###############################################################################
+# Implementation - you should not need to change anything below this point.
+
+# Compile regular expressions.
+ALLOWED_LINE_DIFFS = [re.compile(exp) for exp in ALLOWED_LINE_DIFFS]
+IGNORE_LINES = [re.compile(exp) for exp in IGNORE_LINES]
+
+ORIGINAL_SOURCE_PREFIX = 'v8-foozzie source: '
+
+def line_pairs(lines):
+  return itertools.izip_longest(
+      lines, itertools.islice(lines, 1, None), fillvalue=None)
+
+
+def caret_match(line1, line2):
+  if (not line1 or
+      not line2 or
+      len(line1) > MAX_LINE_LENGTH or
+      len(line2) > MAX_LINE_LENGTH):
+    return False
+  return bool(CARET_RE.match(line1) and CARET_RE.match(line2))
+
+
+def short_line_output(line):
+  if len(line) <= MAX_LINE_LENGTH:
+    # Avoid copying.
+    return line
+  return line[0:MAX_LINE_LENGTH] + '...'
+
+
+def ignore_by_regexp(line1, line2, allowed):
+  if len(line1) > MAX_LINE_LENGTH or len(line2) > MAX_LINE_LENGTH:
+    return False
+  for exp in allowed:
+    match1 = exp.match(line1)
+    match2 = exp.match(line2)
+    if match1 and match2:
+      # If there are groups in the regexp, ensure the groups matched the same
+      # things.
+      if match1.groups() == match2.groups():  # tuple comparison
+        return True
+  return False
+
+
+def diff_output(output1, output2, allowed, ignore1, ignore2):
+  """Returns a tuple (difference, source).
+
+  The difference is None if there's no difference, otherwise a string
+  with a readable diff.
+
+  The source is the last source output within the test case, or None if no
+  such output existed.
+  """
+  def useful_line(ignore):
+    def fun(line):
+      return all(not e.match(line) for e in ignore)
+    return fun
+
+  lines1 = filter(useful_line(ignore1), output1)
+  lines2 = filter(useful_line(ignore2), output2)
+
+  # This keeps track where we are in the original source file of the fuzz
+  # test case.
+  source = None
+
+  for ((line1, lookahead1), (line2, lookahead2)) in itertools.izip_longest(
+      line_pairs(lines1), line_pairs(lines2), fillvalue=(None, None)):
+
+    # Only one of the two iterators should run out.
+    assert not (line1 is None and line2 is None)
+
+    # One iterator ends earlier.
+    if line1 is None:
+      return '+ %s' % short_line_output(line2), source
+    if line2 is None:
+      return '- %s' % short_line_output(line1), source
+
+    # If lines are equal, no further checks are necessary.
+    if line1 == line2:
+      # Instrumented original-source-file output must be equal in both
+      # versions. It only makes sense to update it here when both lines
+      # are equal.
+      if line1.startswith(ORIGINAL_SOURCE_PREFIX):
+        source = line1[len(ORIGINAL_SOURCE_PREFIX):]
+      continue
+
+    # Look ahead. If next line is a caret, ignore this line.
+    if caret_match(lookahead1, lookahead2):
+      continue
+
+    # Check if a regexp allows these lines to be different.
+    if ignore_by_regexp(line1, line2, allowed):
+      continue
+
+    # Lines are different.
+    return (
+        '- %s\n+ %s' % (short_line_output(line1), short_line_output(line2)),
+        source,
+    )
+
+  # No difference found.
+  return None, source
+
+
+def get_suppression(arch1, config1, arch2, config2):
+  return V8Suppression(arch1, config1, arch2, config2)
+
+
+class Suppression(object):
+  def diff(self, output1, output2):
+    return None
+
+  def ignore_by_metadata(self, metadata):
+    return None
+
+  def ignore_by_content(self, testcase):
+    return None
+
+  def ignore_by_output1(self, output):
+    return None
+
+  def ignore_by_output2(self, output):
+    return None
+
+
+class V8Suppression(Suppression):
+  def __init__(self, arch1, config1, arch2, config2):
+    self.arch1 = arch1
+    self.config1 = config1
+    self.arch2 = arch2
+    self.config2 = config2
+
+  def diff(self, output1, output2):
+    return diff_output(
+        output1.splitlines(),
+        output2.splitlines(),
+        ALLOWED_LINE_DIFFS,
+        IGNORE_LINES,
+        IGNORE_LINES,
+    )
+
+  def ignore_by_content(self, testcase):
+    # Strip off test case preamble.
+    try:
+      lines = testcase.splitlines()
+      lines = lines[lines.index(
+          'print("js-mutation: start generated test case");'):]
+      content = '\n'.join(lines)
+    except ValueError:
+      # Search the whole test case if preamble can't be found. E.g. older
+      # already minimized test cases might have dropped the delimiter line.
+      content = testcase
+    for key in ['', self.arch1, self.arch2, self.config1, self.config2]:
+      for bug, exp in IGNORE_TEST_CASES.get(key, {}).iteritems():
+        if exp.search(content):
+          return bug
+    return None
+
+  def ignore_by_metadata(self, metadata):
+    for bug, sources in IGNORE_SOURCES.iteritems():
+      for source in sources:
+        if source in metadata['sources']:
+          return bug
+    return None
+
+  def ignore_by_output1(self, output):
+    return self.ignore_by_output(output, self.arch1, self.config1)
+
+  def ignore_by_output2(self, output):
+    return self.ignore_by_output(output, self.arch2, self.config2)
+
+  def ignore_by_output(self, output, arch, config):
+    def check(mapping):
+      for bug, exp in mapping.iteritems():
+        if exp.search(output):
+          return bug
+      return None
+    for key in ['', arch, config]:
+      bug = check(IGNORE_OUTPUT.get(key, {}))
+      if bug:
+        return bug
+    return None
diff --git a/src/v8/tools/collect_deprecation_stats.sh b/src/v8/tools/collect_deprecation_stats.sh
new file mode 100755
index 0000000..aa3f413
--- /dev/null
+++ b/src/v8/tools/collect_deprecation_stats.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+# Collect the number of [[deprecated]] calls detected when compiling V8.
+# Requires "v8_deprecate_get_isolate = true" to be useful.
+
+set -e
+
+if [ -z "$1" ]; then
+  (>&2 echo "Usage: collect_deprecation_stats.sh [<outdir>|<log>]")
+  exit 1
+fi
+
+if [ -d "$1" ]; then
+  OUTDIR=$1
+  FULL_LOG=/tmp/get_isolate_deprecation.log
+  gn clean "$OUTDIR"
+  autoninja -C "$OUTDIR" > $FULL_LOG
+else
+  FULL_LOG=$1
+fi
+
+FILTERED_LOG=/tmp/filtered_isolate_deprecation.log
+UNIQUE_WARNINGS_LOG=/tmp/unique_warnings.log
+
+grep "warning:" "$FULL_LOG" | sed $'
+s|^\.\./\.\./||;
+s/: warning: \'/: /;
+
+# strip everything after deprecated function name (including template param).
+s/\(<.*>\)\\?\'.*//' > $FILTERED_LOG
+
+sort -u $FILTERED_LOG > $UNIQUE_WARNINGS_LOG
+
+echo "Total deprecated calls: $(wc -l < $UNIQUE_WARNINGS_LOG)"
+cut -f2 -d' ' $UNIQUE_WARNINGS_LOG | sort | uniq -c
diff --git a/src/v8/tools/concatenate-files.py b/src/v8/tools/concatenate-files.py
index 8a9012c..a5dbe45 100644
--- a/src/v8/tools/concatenate-files.py
+++ b/src/v8/tools/concatenate-files.py
@@ -35,6 +35,9 @@
 # on all supported build platforms, but Python is, and hence this provides
 # us with an easy and uniform way of doing this on all platforms.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import optparse
 
 
@@ -49,7 +52,7 @@
     True, if the operation was successful.
   """
   if len(filenames) < 2:
-    print "An error occurred generating %s:\nNothing to do." % filenames[-1]
+    print("An error occurred generating %s:\nNothing to do." % filenames[-1])
     return False
 
   try:
@@ -59,7 +62,7 @@
           target.write(current.read())
     return True
   except IOError as e:
-    print "An error occurred when writing %s:\n%s" % (filenames[-1], e)
+    print("An error occurred when writing %s:\n%s" % (filenames[-1], e))
     return False
 
 
diff --git a/src/v8/tools/csvparser.js b/src/v8/tools/csvparser.js
index f0f8680..bd106a7 100644
--- a/src/v8/tools/csvparser.js
+++ b/src/v8/tools/csvparser.js
@@ -49,6 +49,9 @@
       if (escapeIdentifier == 'n') {
         result += '\n';
         nextPos = pos;
+      } else if (escapeIdentifier == '\\') {
+        result += '\\';
+        nextPos = pos;
       } else {
         if (escapeIdentifier == 'x') {
           // \x00 ascii range escapes consume 2 chars.
diff --git a/src/v8/tools/deprecation_stats.py b/src/v8/tools/deprecation_stats.py
new file mode 100755
index 0000000..628eebc
--- /dev/null
+++ b/src/v8/tools/deprecation_stats.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+from datetime import datetime
+import re
+import subprocess
+import sys
+
+RE_GITHASH = re.compile(r"^[0-9a-f]{40}")
+RE_AUTHOR_TIME = re.compile(r"^author-time (\d+)$")
+RE_FILENAME = re.compile(r"^filename (.+)$")
+
+def GetBlame(file_path):
+  result = subprocess.check_output(
+      ['git', 'blame', '-t', '--line-porcelain', file_path])
+  line_iter = iter(result.splitlines())
+  blame_list = list()
+  current_blame = None
+  while True:
+    line = next(line_iter, None)
+    if line is None:
+      break
+    if RE_GITHASH.match(line):
+      if current_blame is not None:
+        blame_list.append(current_blame)
+      current_blame = {'time': 0, 'filename': None, 'content': None}
+      continue
+    match = RE_AUTHOR_TIME.match(line)
+    if match:
+      current_blame['time'] = datetime.fromtimestamp(int(match.groups()[0]))
+      continue
+    match = RE_FILENAME.match(line)
+    if match:
+      current_blame['filename'] = match.groups()[0]
+      current_blame['content'] = next(line_iter).strip()
+      continue
+  blame_list.append(current_blame)
+  return blame_list
+
+RE_MACRO_END = re.compile(r"\);");
+RE_DEPRECATE_MACRO = re.compile(r"\(.*?,(.*)\);", re.MULTILINE)
+
+def FilterAndPrint(blame_list, macro, before):
+  index = 0
+  re_macro = re.compile(macro)
+  deprecated = list()
+  while index < len(blame_list):
+    blame = blame_list[index]
+    match = re_macro.search(blame['content'])
+    if match and blame['time'] < before:
+      line = blame['content']
+      time = blame['time']
+      pos = match.end()
+      start = -1
+      parens = 0
+      quotes = 0
+      while True:
+        if pos >= len(line):
+          # extend to next line
+          index = index + 1
+          blame = blame_list[index]
+          if line.endswith(','):
+            # add whitespace when breaking line due to comma
+            line = line + ' '
+          line = line + blame['content']
+        if line[pos] == '(':
+          parens = parens + 1
+        elif line[pos] == ')':
+          parens = parens - 1
+          if parens == 0:
+            break
+        elif line[pos] == '"':
+          quotes = quotes + 1
+        elif line[pos] == ',' and quotes % 2 == 0 and start == -1:
+          start = pos + 1
+        pos = pos + 1
+      deprecated.append([index + 1, time, line[start:pos].strip()])
+    index = index + 1
+  print("Marked as " + macro + ": " + str(len(deprecated)))
+  for linenumber, time, content in deprecated:
+    print(str(linenumber).rjust(8) + " : " + str(time) + " : " + content)
+  return len(deprecated)
+
+def ParseOptions(args):
+  parser = argparse.ArgumentParser(description="Collect deprecation statistics")
+  parser.add_argument("file_path", help="Path to v8.h")
+  parser.add_argument("--before", help="Filter by date")
+  options = parser.parse_args(args)
+  if options.before:
+    options.before = datetime.strptime(options.before, '%Y-%m-%d')
+  else:
+    options.before = datetime.now()
+  return options
+
+def Main(args):
+  options = ParseOptions(args)
+  blame_list = GetBlame(options.file_path)
+  FilterAndPrint(blame_list, "V8_DEPRECATE_SOON", options.before)
+  FilterAndPrint(blame_list, "V8_DEPRECATED", options.before)
+
+if __name__ == "__main__":
+  Main(sys.argv[1:])
diff --git a/src/v8/tools/dev/gen-tags.py b/src/v8/tools/dev/gen-tags.py
index 256f65a..a478ee3 100755
--- a/src/v8/tools/dev/gen-tags.py
+++ b/src/v8/tools/dev/gen-tags.py
@@ -15,6 +15,10 @@
 If no <arch> is given, it generates tags file for all arches:
     $ tools/dev/gen-tags.py
 """
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
 import subprocess
 import sys
diff --git a/src/v8/tools/dev/gm.py b/src/v8/tools/dev/gm.py
index c3dab0a..0e01f4f 100755
--- a/src/v8/tools/dev/gm.py
+++ b/src/v8/tools/dev/gm.py
@@ -10,31 +10,32 @@
 Expects to be run from the root of a V8 checkout.
 
 Usage:
-    gm.py [<arch>].[<mode>].[<target>] [testname...]
+    gm.py [<arch>].[<mode>[-<suffix>]].[<target>] [testname...]
 
 All arguments are optional. Most combinations should work, e.g.:
-    gm.py ia32.debug x64.release d8
+    gm.py ia32.debug x64.release x64.release-my-custom-opts d8
+    gm.py android_arm.release.check
     gm.py x64 mjsunit/foo cctest/test-bar/*
 """
 # See HELP below for additional documentation.
 
 from __future__ import print_function
 import errno
-import multiprocessing
 import os
-import pty
 import re
 import subprocess
 import sys
 
-BUILD_OPTS_DEFAULT = ""
-BUILD_OPTS_GOMA = "-j1000 -l%d" % (multiprocessing.cpu_count() + 2)
+USE_PTY = "linux" in sys.platform
+if USE_PTY:
+  import pty
+
 BUILD_TARGETS_TEST = ["d8", "cctest", "unittests"]
 BUILD_TARGETS_ALL = ["all"]
 
 # All arches that this script understands.
 ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
-          "s390", "s390x"]
+          "s390", "s390x", "android_arm", "android_arm64"]
 # Arches that get built/run when you don't specify any.
 DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
 # Modes that this script understands.
@@ -42,8 +43,8 @@
 # Modes that get built/run when you don't specify any.
 DEFAULT_MODES = ["release", "debug"]
 # Build targets that can be manually specified.
-TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "mkgrokdump",
-           "generate-bytecode-expectations", "inspector-test"]
+TARGETS = ["d8", "cctest", "unittests", "v8_fuzzers", "wasm_api_tests", "wee8",
+           "mkgrokdump", "generate-bytecode-expectations", "inspector-test"]
 # Build targets that get built when you don't specify any (and specified tests
 # don't imply any other targets).
 DEFAULT_TARGETS = ["d8"]
@@ -63,13 +64,14 @@
 HELP = """<arch> can be any of: %(arches)s
 <mode> can be any of: %(modes)s
 <target> can be any of:
- - cctest, d8, unittests, v8_fuzzers (build respective binary)
+ - %(targets)s (build respective binary)
  - all (build all binaries)
  - tests (build test binaries)
  - check (build test binaries, run most tests)
  - checkall (build all binaries, run more tests)
 """ % {"arches": " ".join(ARCHES),
-       "modes": " ".join(MODES)}
+       "modes": " ".join(MODES),
+       "targets": ", ".join(TARGETS)}
 
 TESTSUITES_TARGETS = {"benchmarks": "d8",
               "cctest": "cctest",
@@ -83,6 +85,7 @@
               "preparser": "d8",
               "test262": "d8",
               "unittests": "unittests",
+              "wasm-api-tests": "wasm_api_tests",
               "webkit": "d8"}
 
 OUTDIR = "out"
@@ -122,6 +125,7 @@
 use_goma = {GOMA}
 goma_dir = \"{GOMA_DIR}\"
 v8_enable_backtrace = true
+v8_enable_fast_mksnapshot = true
 v8_enable_slow_dchecks = true
 v8_optimized_debug = false
 """.replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
@@ -134,6 +138,7 @@
 use_goma = {GOMA}
 goma_dir = \"{GOMA_DIR}\"
 v8_enable_backtrace = true
+v8_enable_fast_mksnapshot = true
 v8_enable_verify_heap = true
 v8_optimized_debug = true
 """.replace("{GOMA}", USE_GOMA).replace("{GOMA_DIR}", str(GOMADIR))
@@ -203,6 +208,16 @@
   subdir = "%s.%s" % (arch, mode)
   return os.path.join(OUTDIR, subdir)
 
+def PrepareMksnapshotCmdline(orig_cmdline, path):
+  result = "gdb --args %s/mksnapshot " % path
+  for w in orig_cmdline.split(" "):
+    if w.startswith("gen/") or w.startswith("snapshot_blob"):
+      result += ("%(path)s%(sep)s%(arg)s " %
+                 {"path": path, "sep": os.sep, "arg": w})
+    else:
+      result += "%s " % w
+  return result
+
 class Config(object):
   def __init__(self, arch, mode, targets, tests=[]):
     self.arch = arch
@@ -215,57 +230,64 @@
     self.tests.update(tests)
 
   def GetTargetCpu(self):
+    if self.arch == "android_arm": return "target_cpu = \"arm\""
+    if self.arch == "android_arm64": return "target_cpu = \"arm64\""
     cpu = "x86"
     if "64" in self.arch or self.arch == "s390x":
       cpu = "x64"
     return "target_cpu = \"%s\"" % cpu
 
   def GetV8TargetCpu(self):
+    if self.arch == "android_arm": return "\nv8_target_cpu = \"arm\""
+    if self.arch == "android_arm64": return "\nv8_target_cpu = \"arm64\""
     if self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
                      "s390", "s390x"):
       return "\nv8_target_cpu = \"%s\"" % self.arch
     return ""
 
-  def GetGnArgs(self):
-    template = ARGS_TEMPLATES[self.mode]
-    arch_specific = self.GetTargetCpu() + self.GetV8TargetCpu()
-    return template % arch_specific
+  def GetTargetOS(self):
+    if self.arch in ("android_arm", "android_arm64"):
+      return "\ntarget_os = \"android\""
+    return ""
 
-  def WantsGoma(self):
-    output = _CallWithOutputNoTerminal(
-        "gn args --short --list=use_goma %s" % (GetPath(self.arch, self.mode)))
-    return "true" in output
+  def GetGnArgs(self):
+    # Use only substring before first '-' as the actual mode
+    mode = re.match("([^-]+)", self.mode).group(1)
+    template = ARGS_TEMPLATES[mode]
+    arch_specific = (self.GetTargetCpu() + self.GetV8TargetCpu() +
+                     self.GetTargetOS())
+    return template % arch_specific
 
   def Build(self):
     path = GetPath(self.arch, self.mode)
     args_gn = os.path.join(path, "args.gn")
+    build_ninja = os.path.join(path, "build.ninja")
     if not os.path.exists(path):
       print("# mkdir -p %s" % path)
       os.makedirs(path)
     if not os.path.exists(args_gn):
       _Write(args_gn, self.GetGnArgs())
+    if not os.path.exists(build_ninja):
       code = _Call("gn gen %s" % path)
       if code != 0: return code
     targets = " ".join(self.targets)
-    build_opts = BUILD_OPTS_GOMA if self.WantsGoma() else BUILD_OPTS_DEFAULT
     # The implementation of mksnapshot failure detection relies on
     # the "pty" module and GDB presence, so skip it on non-Linux.
-    if "linux" not in sys.platform:
-      return _Call("ninja -C %s %s %s" % (path, build_opts, targets))
+    if not USE_PTY:
+      return _Call("autoninja -C %s %s" % (path, targets))
 
-    return_code, output = _CallWithOutput("ninja -C %s %s %s" %
-                                          (path, build_opts, targets))
-    if return_code != 0 and "FAILED: gen/snapshot.cc" in output:
+    return_code, output = _CallWithOutput("autoninja -C %s %s" %
+                                          (path, targets))
+    if return_code != 0 and "FAILED:" in output and "snapshot_blob" in output:
       csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
       match = csa_trap.search(output)
       extra_opt = match.group(1) if match else ""
+      cmdline = re.compile("python ../../tools/run.py ./mksnapshot (.*)")
+      orig_cmdline = cmdline.search(output).group(1).strip()
+      cmdline = PrepareMksnapshotCmdline(orig_cmdline, path) + extra_opt
       _Notify("V8 build requires your attention",
               "Detected mksnapshot failure, re-running in GDB...")
-      _Call("gdb -args %(path)s/mksnapshot "
-            "--startup_src %(path)s/gen/snapshot.cc "
-            "--random-seed 314159265 "
-            "--startup-blob %(path)s/snapshot_blob.bin"
-            "%(extra)s"% {"path": path, "extra": extra_opt})
+      _Call(cmdline)
     return return_code
 
   def RunTests(self):
@@ -274,8 +296,9 @@
       tests = ""
     else:
       tests = " ".join(self.tests)
-    return _Call("tools/run-tests.py --arch=%s --mode=%s %s" %
-                 (self.arch, self.mode, tests))
+    return _Call('"%s" ' % sys.executable +
+                 os.path.join("tools", "run-tests.py") +
+                 " --outdir=%s %s" % (GetPath(self.arch, self.mode), tests))
 
 def GetTestBinary(argstring):
   for suite in TESTSUITES_TARGETS:
@@ -343,6 +366,8 @@
         targets.append(word)
       elif word in ACTIONS:
         actions.append(word)
+      elif any(map(lambda x: word.startswith(x + "-"), MODES)):
+        modes.append(word)
       else:
         print("Didn't understand: %s" % word)
         sys.exit(1)
diff --git a/src/v8/tools/dev/v8gen.py b/src/v8/tools/dev/v8gen.py
index b8a34e2..0b6e1d1 100755
--- a/src/v8/tools/dev/v8gen.py
+++ b/src/v8/tools/dev/v8gen.py
@@ -38,6 +38,9 @@
 -------------------------------------------------------------------------------
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import os
 import re
@@ -144,8 +147,8 @@
 
     # Check for builder/config in mb config.
     if self._options.builder not in self._mbw.masters[self._options.master]:
-      print '%s does not exist in %s for %s' % (
-          self._options.builder, CONFIG, self._options.master)
+      print('%s does not exist in %s for %s' % (
+          self._options.builder, CONFIG, self._options.master))
       return 1
 
     # TODO(machenbach): Check if the requested configurations has switched to
@@ -186,19 +189,19 @@
     return 0
 
   def cmd_list(self):
-    print '\n'.join(sorted(self._mbw.masters[self._options.master]))
+    print('\n'.join(sorted(self._mbw.masters[self._options.master])))
     return 0
 
   def verbose_print_1(self, text):
     if self._options.verbosity >= 1:
-      print '#' * 80
-      print text
+      print('#' * 80)
+      print(text)
 
   def verbose_print_2(self, text):
     if self._options.verbosity >= 2:
       indent = ' ' * 2
       for l in text.splitlines():
-        print indent + l
+        print(indent + l)
 
   def _call_cmd(self, args):
     self.verbose_print_1(' '.join(args))
@@ -290,9 +293,9 @@
     self._mbw.ReadConfigFile()
 
     if not self._options.master in self._mbw.masters:
-      print '%s not found in %s\n' % (self._options.master, CONFIG)
-      print 'Choose one of:\n%s\n' % (
-          '\n'.join(sorted(self._mbw.masters.keys())))
+      print('%s not found in %s\n' % (self._options.master, CONFIG))
+      print('Choose one of:\n%s\n' % (
+          '\n'.join(sorted(self._mbw.masters.keys()))))
       return 1
 
     return self._options.func()
diff --git a/src/v8/tools/dump-cpp.py b/src/v8/tools/dump-cpp.py
index 1fc15d9..31dc053 100644
--- a/src/v8/tools/dump-cpp.py
+++ b/src/v8/tools/dump-cpp.py
@@ -6,6 +6,9 @@
 # This script executes dumpcpp.js, collects all dumped C++ symbols,
 # and merges them back into v8 log.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
 import platform
 import re
@@ -44,10 +47,10 @@
   if d8_line:
     d8_exec = d8_line.group(1)
     if not is_file_executable(d8_exec):
-      print 'd8 binary path found in {} is not executable.'.format(log_file)
+      print('d8 binary path found in {} is not executable.'.format(log_file))
       sys.exit(-1)
   else:
-    print 'No d8 binary path found in {}.'.format(log_file)
+    print('No d8 binary path found in {}.'.format(log_file))
     sys.exit(-1)
 
   args = [d8_exec] + JS_FILES + ['--'] + args
@@ -57,9 +60,9 @@
                           stdin=f)
     out, err = sp.communicate()
   if debug:
-    print err
+    print(err)
   if sp.returncode != 0:
-    print out
+    print(out)
     exit(-1)
 
   if on_windows and out:
diff --git a/src/v8/tools/dumpcpp-driver.js b/src/v8/tools/dumpcpp-driver.js
index 4452777..6073dea 100644
--- a/src/v8/tools/dumpcpp-driver.js
+++ b/src/v8/tools/dumpcpp-driver.js
@@ -39,7 +39,8 @@
 }
 
 var cppProcessor = new CppProcessor(
-  new (entriesProviders[params.platform])(params.nm, params.targetRootFS),
+  new (entriesProviders[params.platform])(params.nm, params.targetRootFS,
+                                          params.apkEmbeddedLibrary),
   params.timedRange, params.pairwiseTimedRange);
 cppProcessor.processLogFile(params.logFileName);
 cppProcessor.dumpCppSymbols();
diff --git a/src/v8/tools/dumpcpp.js b/src/v8/tools/dumpcpp.js
index ca5ff67..58cb74c 100644
--- a/src/v8/tools/dumpcpp.js
+++ b/src/v8/tools/dumpcpp.js
@@ -4,7 +4,7 @@
 
 function CppProcessor(cppEntriesProvider, timedRange, pairwiseTimedRange) {
   LogReader.call(this, {
-      'shared-library': { parsers: [null, parseInt, parseInt, parseInt],
+      'shared-library': { parsers: [parseString, parseInt, parseInt, parseInt],
           processor: this.processSharedLibrary }
   }, timedRange, pairwiseTimedRange);
 
diff --git a/src/v8/tools/eval_gc_nvp.py b/src/v8/tools/eval_gc_nvp.py
index 25afe8e..222ebef 100755
--- a/src/v8/tools/eval_gc_nvp.py
+++ b/src/v8/tools/eval_gc_nvp.py
@@ -7,10 +7,14 @@
 """This script is used to analyze GCTracer's NVP output."""
 
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
+
 from argparse import ArgumentParser
 from copy import deepcopy
 from gc_nvp_common import split_nvp
-from math import ceil,log
+from math import ceil, log
 from sys import stdin
 
 
diff --git a/src/v8/tools/eval_gc_time.sh b/src/v8/tools/eval_gc_time.sh
index 9abc93a..f809c35 100755
--- a/src/v8/tools/eval_gc_time.sh
+++ b/src/v8/tools/eval_gc_time.sh
@@ -94,7 +94,6 @@
   clear.slots_buffer \
   clear.store_buffer \
   clear.string_table \
-  clear.weak_cells \
   clear.weak_collections \
   clear.weak_lists \
   evacuate.candidates \
diff --git a/src/v8/tools/find-commit-for-patch.py b/src/v8/tools/find-commit-for-patch.py
index 657826c..cca1f40 100755
--- a/src/v8/tools/find-commit-for-patch.py
+++ b/src/v8/tools/find-commit-for-patch.py
@@ -3,6 +3,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import subprocess
 import sys
diff --git a/src/v8/tools/find_depot_tools.py b/src/v8/tools/find_depot_tools.py
index 95ae9e8..db3ffa2 100644
--- a/src/v8/tools/find_depot_tools.py
+++ b/src/v8/tools/find_depot_tools.py
@@ -4,6 +4,9 @@
 """Small utility function to find depot_tools and add it to the python path.
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
 import sys
 
@@ -36,5 +39,5 @@
       return i
     previous_dir = root_dir
     root_dir = os.path.dirname(root_dir)
-  print >> sys.stderr, 'Failed to find depot_tools'
+  print('Failed to find depot_tools', file=sys.stderr)
   return None
diff --git a/src/v8/tools/gc-nvp-to-csv.py b/src/v8/tools/gc-nvp-to-csv.py
index 26ed8e1..b3ad374 100755
--- a/src/v8/tools/gc-nvp-to-csv.py
+++ b/src/v8/tools/gc-nvp-to-csv.py
@@ -11,20 +11,25 @@
 # Usage: gc-nvp-to-csv.py <GC-trace-filename>
 #
 
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import sys
 import gc_nvp_common
 
+
 def process_trace(filename):
   trace = gc_nvp_common.parse_gc_trace(filename)
   if len(trace):
     keys = trace[0].keys()
-    print ', '.join(keys)
+    print(', '.join(keys))
     for entry in trace:
-      print ', '.join(map(lambda key: str(entry[key]), keys))
+      print(', '.join(map(lambda key: str(entry[key]), keys)))
 
 
 if len(sys.argv) != 2:
-  print "Usage: %s <GC-trace-filename>" % sys.argv[0]
+  print("Usage: %s <GC-trace-filename>" % sys.argv[0])
   sys.exit(1)
 
 process_trace(sys.argv[1])
diff --git a/src/v8/tools/gc-nvp-trace-processor.py b/src/v8/tools/gc-nvp-trace-processor.py
index 21526ae..75d50b1 100755
--- a/src/v8/tools/gc-nvp-trace-processor.py
+++ b/src/v8/tools/gc-nvp-trace-processor.py
@@ -37,10 +37,21 @@
 #
 
 
+# for py2/py3 compatibility
 from __future__ import with_statement
+from __future__ import print_function
+from functools import reduce
+
 import sys, types, subprocess, math
 import gc_nvp_common
 
+
+try:
+  long        # Python 2
+except NameError:
+  long = int  # Python 3
+
+
 def flatten(l):
   flat = []
   for i in l: flat.extend(i)
@@ -62,7 +73,7 @@
     self.title = title
     self.axis = axis
     self.props = keywords
-    if type(field) is types.ListType:
+    if type(field) is list:
       self.field = field
     else:
       self.field = [field]
@@ -135,7 +146,7 @@
 
 def get_field(trace_line, field):
   t = type(field)
-  if t is types.StringType:
+  if t is bytes:
     return trace_line[field]
   elif t is types.FunctionType:
     return field(trace_line)
@@ -177,7 +188,7 @@
     outfilename = "%s_%d.png" % (prefix, len(charts))
     charts.append(outfilename)
     script = generate_script_and_datafile(plot, trace, '~datafile', outfilename)
-    print 'Plotting %s...' % outfilename
+    print('Plotting %s...' % outfilename)
     gnuplot(script)
 
   return charts
@@ -350,10 +361,10 @@
       out.write('<img src="%s">' % chart)
       out.write('</body></html>')
 
-  print "%s generated." % (filename + '.html')
+  print("%s generated." % (filename + '.html'))
 
 if len(sys.argv) != 2:
-  print "Usage: %s <GC-trace-filename>" % sys.argv[0]
+  print("Usage: %s <GC-trace-filename>" % sys.argv[0])
   sys.exit(1)
 
 process_trace(sys.argv[1])
diff --git a/src/v8/tools/gcmole/BUILD.gn b/src/v8/tools/gcmole/BUILD.gn
new file mode 100644
index 0000000..0434a64
--- /dev/null
+++ b/src/v8/tools/gcmole/BUILD.gn
@@ -0,0 +1,43 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+group("v8_run_gcmole") {
+  testonly = true
+
+  data = [
+    "gccause.lua",
+    "gcmole.lua",
+    "gcmole-tools/",
+    "parallel.py",
+    "run-gcmole.py",
+
+    # The following contains all relevant source and build files.
+    "../../BUILD.gn",
+    "../../base/",
+    "../../include/",
+    "../../src/",
+    "../../test/cctest/",
+    "../../test/common/",
+    "../../testing/gtest/include/gtest/gtest_prod.h",
+    "../../third_party/googletest/src/googletest/include/gtest/gtest_prod.h",
+    "../../third_party/icu/source/",
+    "../../third_party/wasm-api/wasm.h",
+    "../../third_party/wasm-api/wasm.hh",
+    "../../third_party/inspector_protocol/",
+    "$target_gen_dir/../../",
+    "$target_gen_dir/../../torque-generated/",
+  ]
+
+  deps = [
+    "../../:run_torque",
+  ]
+
+  if (v8_gcmole) {
+    # This assumes gcmole tools have been fetched by a hook
+    # into v8/tools/gcmole/gcmole_tools.
+    data += [ "gcmole-tools/" ]
+  }
+}
diff --git a/src/v8/tools/gcmole/Makefile b/src/v8/tools/gcmole/Makefile
index ee43c00..e1bde68 100644
--- a/src/v8/tools/gcmole/Makefile
+++ b/src/v8/tools/gcmole/Makefile
@@ -27,16 +27,20 @@
 
 # This is Makefile for clang plugin part of gcmole tool. See README.
 
-LLVM_INCLUDE:=$(LLVM_SRC_ROOT)/include
-CLANG_INCLUDE:=$(LLVM_SRC_ROOT)/tools/clang/include
+LLVM_SRC_INCLUDE:=$(LLVM_SRC_ROOT)/include
+LLVM_BUILD_INCLUDE:=$(BUILD_ROOT)/include
+CLANG_SRC_INCLUDE:=$(CLANG_SRC_ROOT)/include
+CLANG_BUILD_INCLUDE:=$(BUILD_ROOT)/tools/clang/include
 
 libgcmole.so: gcmole.cc
-	$(CXX) -I$(LLVM_INCLUDE) -I$(CLANG_INCLUDE) -I. -D_DEBUG              \
+	$(CXX) -I$(LLVM_BUILD_INCLUDE) -I$(LLVM_SRC_INCLUDE)                  \
+	-I$(CLANG_BUILD_INCLUDE) -I$(CLANG_SRC_INCLUDE) -I. -D_DEBUG          \
 	-D_GNU_SOURCE -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS         \
 	-D__STDC_LIMIT_MACROS -O3 -fomit-frame-pointer -fno-exceptions        \
 	-fno-rtti -fPIC -Woverloaded-virtual -Wcast-qual -fno-strict-aliasing \
 	-pedantic -Wno-long-long -Wall -W -Wno-unused-parameter               \
-	-Wwrite-strings -std=c++0x -shared -o libgcmole.so gcmole.cc
+	-Wwrite-strings -static-libstdc++ -std=c++0x -shared -o libgcmole.so  \
+	gcmole.cc
 
 clean:
 	$(RM) libgcmole.so
diff --git a/src/v8/tools/gcmole/README b/src/v8/tools/gcmole/README
index 37f8afb..578ea56 100644
--- a/src/v8/tools/gcmole/README
+++ b/src/v8/tools/gcmole/README
@@ -5,12 +5,12 @@
 
 For example the following code is GC-unsafe:
 
-Handle<Object> Foo();  // Assume Foo can trigger a GC.
-void Bar(Object*, Object*);
+    Handle<Object> Foo();  // Assume Foo can trigger a GC.
+    void Bar(Object*, Object*);
 
-Handle<Object> baz;
-baz->Qux(*Foo());  // (a)  
-Bar(*Foo(), *baz);  // (b)
+    Handle<Object> baz;
+    baz->Qux(*Foo());  // (a)
+    Bar(*Foo(), *baz);  // (b)
 
 Both in cases (a) and (b) compiler is free to evaluate call arguments (that 
 includes receiver) in any order. That means it can dereference baz before 
@@ -19,20 +19,30 @@
 
 PREREQUISITES -----------------------------------------------------------------
 
-1) Install Lua 5.1
+(1) Install Lua 5.1
 
-2) Get LLVM 2.9 and Clang 2.9 sources and build them.
+    $ sudo apt-get install lua5.1
 
-Follow the instructions on http://clang.llvm.org/get_started.html.
+(2) Get LLVM 8.0 and Clang 8.0 sources and build them.
 
-Make sure to pass --enable-optimized to configure to get Release build 
-instead of a Debug one.
+    Follow the instructions on http://clang.llvm.org/get_started.html.
 
-3) Build gcmole Clang plugin (libgcmole.so)
+    Make sure to pass -DCMAKE_BUILD_TYPE=Release to cmake to get Release build 
+    instead of a Debug one.
 
-In the tools/gcmole execute the following command:
+(3) Build gcmole Clang plugin (libgcmole.so)
 
-LLVM_SRC_ROOT=<path-to-llvm-source-root> make
+    In the tools/gcmole directory execute the following command:
+
+    $ BUILD_ROOT=<path> LLVM_SRC_ROOT=<path> CLANG_SRC_ROOT=<path> make
+
+(*) Note that steps (2) and (3) can also be achieved by just using the included
+    bootstrapping script in this directory:
+
+    $ ./tools/gcmole/bootstrap.sh
+
+    This will use "third_party/llvm+clang-build" as a build directory and checkout
+    required sources in the "third_party" directory.
 
 USING GCMOLE ------------------------------------------------------------------
 
@@ -60,3 +70,41 @@
 can be ignored.
 
 If any errors were found driver exits with non-zero status.
+
+PACKAGING ---------------------------------------------------------------------
+
+gcmole is deployed on V8's buildbot infrastructure to run it as part of the
+continuous integration. A pre-built package of gcmole together with Clang is
+hosted on Google Cloud Storage for this purpose. To update this package to a
+newer version, use the provided packaging script:
+
+    $ ./tools/gcmole/package.sh
+
+This will create a new "tools/gcmole/gcmole-tools.tar.gz" package with the
+corresponding SHA1 sum suitable to be used for this purpose. It assumes that
+Clang was built in "third_party/llvm+clang-build" (e.g. by the bootstrapping
+script "bootstrap.sh" mentioned above).
+
+TROUBLESHOOTING ---------------------------------------------------------------
+
+gcmole is tighly coupled with the AST structure that Clang produces. Therefore
+when upgrading to a newer Clang version, it might start producing bogus output
+or completely stop outputting warnings. In such occasion, one might start the
+debugging process by checking weather a new AST node type is introduced which
+is currently not supported by gcmole. Insert the following code at the end of
+the FunctionAnalyzer::VisitExpr method to see the unsupported AST class(es)
+and the source position which generates them:
+
+    if (expr) {
+      clang::Stmt::StmtClass stmtClass = expr->getStmtClass();
+      d_.Report(clang::FullSourceLoc(expr->getExprLoc(), sm_),
+        d_.getCustomDiagID(clang::DiagnosticsEngine::Remark, "%0")) << stmtClass;
+    }
+
+For instance, gcmole currently doesn't support AtomicExprClass statements
+introduced for atomic operations.
+
+A convenient way to observe the AST generated by Clang is to pass the following
+flags when invoking clang++
+
+    -Xclang -ast-dump -fsyntax-only
diff --git a/src/v8/tools/gcmole/bootstrap.sh b/src/v8/tools/gcmole/bootstrap.sh
index ac6593c..05ab1cb 100755
--- a/src/v8/tools/gcmole/bootstrap.sh
+++ b/src/v8/tools/gcmole/bootstrap.sh
@@ -27,16 +27,18 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# This script will build libgcmole.so. Building a recent clang needs a
-# recent GCC, so if you explicitly want to use GCC 4.8, use:
-#
-#    CC=gcc-4.8 CPP=cpp-4.8 CXX=g++-4.8 CXXFLAGS=-static-libstdc++ CXXCPP=cpp-4.8 ./bootstrap.sh
+# This script will build libgcmole.so as well as a corresponding recent
+# version of Clang and LLVM. The Clang will be built with the locally
+# installed compiler and statically link against the local libstdc++ so
+# that the resulting binary is easier transferable between different
+# environments.
 
-CLANG_RELEASE=3.5
+CLANG_RELEASE=8.0
 
-THIS_DIR="$(dirname "${0}")"
+THIS_DIR="$(readlink -f "$(dirname "${0}")")"
 LLVM_DIR="${THIS_DIR}/../../third_party/llvm"
-CLANG_DIR="${LLVM_DIR}/tools/clang"
+CLANG_DIR="${THIS_DIR}/../../third_party/clang"
+BUILD_DIR="${THIS_DIR}/../../third_party/llvm+clang-build"
 
 LLVM_REPO_URL=${LLVM_URL:-https://llvm.org/svn/llvm-project}
 
@@ -70,7 +72,7 @@
   fi
 fi
 
-echo Getting LLVM r"${CLANG_RELEASE}" in "${LLVM_DIR}"
+echo Getting LLVM release "${CLANG_RELEASE}" in "${LLVM_DIR}"
 if ! svn co --force \
     "${LLVM_REPO_URL}/llvm/branches/release_${CLANG_RELEASE/./}" \
     "${LLVM_DIR}"; then
@@ -81,7 +83,7 @@
       "${LLVM_DIR}"
 fi
 
-echo Getting clang r"${CLANG_RELEASE}" in "${CLANG_DIR}"
+echo Getting clang release "${CLANG_RELEASE}" in "${CLANG_DIR}"
 svn co --force \
     "${LLVM_REPO_URL}/cfe/branches/release_${CLANG_RELEASE/./}" \
     "${CLANG_DIR}"
@@ -97,33 +99,32 @@
 fi
 
 # Build clang.
-cd "${LLVM_DIR}"
-if [[ ! -f ./config.status ]]; then
-  ../llvm/configure \
-      --enable-optimized \
-      --disable-threads \
-      --disable-pthreads \
-      --without-llvmgcc \
-      --without-llvmgxx
+if [ ! -e "${BUILD_DIR}" ]; then
+  mkdir "${BUILD_DIR}"
 fi
-
+cd "${BUILD_DIR}"
+cmake -DCMAKE_CXX_FLAGS="-static-libstdc++" -DLLVM_ENABLE_TERMINFO=OFF \
+    -DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS=clang "${LLVM_DIR}"
 MACOSX_DEPLOYMENT_TARGET=10.5 make -j"${NUM_JOBS}"
+
+# Strip the clang binary.
 STRIP_FLAGS=
 if [ "${OS}" = "Darwin" ]; then
   # See http://crbug.com/256342
   STRIP_FLAGS=-x
 fi
-strip ${STRIP_FLAGS} Release+Asserts/bin/clang
+strip ${STRIP_FLAGS} bin/clang
 cd -
 
 # Build libgcmole.so
 make -C "${THIS_DIR}" clean
-make -C "${THIS_DIR}" LLVM_SRC_ROOT="${LLVM_DIR}" libgcmole.so
+make -C "${THIS_DIR}" LLVM_SRC_ROOT="${LLVM_DIR}" \
+    CLANG_SRC_ROOT="${CLANG_DIR}" BUILD_ROOT="${BUILD_DIR}" libgcmole.so
 
 set +x
 
 echo
 echo You can now run gcmole using this command:
 echo
-echo CLANG_BIN=\"third_party/llvm/Release+Asserts/bin\" lua tools/gcmole/gcmole.lua
+echo CLANG_BIN=\"third_party/llvm+clang-build/bin\" lua tools/gcmole/gcmole.lua
 echo
diff --git a/src/v8/tools/gcmole/gcmole-test.cc b/src/v8/tools/gcmole/gcmole-test.cc
new file mode 100644
index 0000000..c00c6e5
--- /dev/null
+++ b/src/v8/tools/gcmole/gcmole-test.cc
@@ -0,0 +1,71 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/isolate.h"
+#include "src/handles/handles-inl.h"
+#include "src/handles/handles.h"
+#include "src/objects/maybe-object.h"
+#include "src/objects/object-macros.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Object> CauseGC(Handle<Object> obj, Isolate* isolate) {
+  isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting);
+
+  return obj;
+}
+
+void TwoArgumentsFunction(Object a, Object b) {
+  a->Print();
+  b->Print();
+}
+
+void TestTwoArguments(Isolate* isolate) {
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  Handle<JSObject> obj2 = isolate->factory()->NewJSObjectWithNullProto();
+  TwoArgumentsFunction(*CauseGC(obj1, isolate), *CauseGC(obj2, isolate));
+}
+
+void TwoSizeTArgumentsFunction(size_t a, size_t b) {
+  USE(a);
+  USE(b);
+}
+
+void TestTwoSizeTArguments(Isolate* isolate) {
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  Handle<JSObject> obj2 = isolate->factory()->NewJSObjectWithNullProto();
+  TwoSizeTArgumentsFunction(sizeof(*CauseGC(obj1, isolate)),
+                            sizeof(*CauseGC(obj2, isolate)));
+}
+
+class SomeObject : public Object {
+ public:
+  void Method(Object a) { a->Print(); }
+
+  SomeObject& operator=(const Object& b) {
+    this->Print();
+    return *this;
+  }
+
+  DECL_CAST(SomeObject)
+
+  OBJECT_CONSTRUCTORS(SomeObject, Object);
+};
+
+void TestMethodCall(Isolate* isolate) {
+  SomeObject obj;
+  Handle<SomeObject> so = handle(obj, isolate);
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  so->Method(*CauseGC(obj1, isolate));
+}
+
+void TestOperatorCall(Isolate* isolate) {
+  SomeObject obj;
+  Handle<JSObject> obj1 = isolate->factory()->NewJSObjectWithNullProto();
+  obj = *CauseGC(obj1, isolate);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/v8/tools/gcmole/gcmole-tools.tar.gz.sha1 b/src/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
index f9e3e01..718e967 100644
--- a/src/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
+++ b/src/v8/tools/gcmole/gcmole-tools.tar.gz.sha1
@@ -1 +1 @@
-a21e6b0d08afcfe454042c2c1fbf1d1738caf129
\ No newline at end of file
+3d4ba1759c3d5bc7e98c466d24fa0c43f186ba79
\ No newline at end of file
diff --git a/src/v8/tools/gcmole/gcmole.cc b/src/v8/tools/gcmole/gcmole.cc
index f7a6c94..6631583 100644
--- a/src/v8/tools/gcmole/gcmole.cc
+++ b/src/v8/tools/gcmole/gcmole.cc
@@ -458,7 +458,9 @@
   CallProps() : env_(NULL) { }
 
   void SetEffect(int arg, ExprEffect in) {
-    if (in.hasGC()) gc_.set(arg);
+    if (in.hasGC()) {
+      gc_.set(arg);
+    }
     if (in.hasRawDef()) raw_def_.set(arg);
     if (in.hasRawUse()) raw_use_.set(arg);
     if (in.env() != NULL) {
@@ -472,17 +474,24 @@
 
   ExprEffect ComputeCumulativeEffect(bool result_is_raw) {
     ExprEffect out = ExprEffect::NoneWithEnv(env_);
-    if (gc_.any()) out.setGC();
+    if (gc_.any()) {
+      out.setGC();
+    }
     if (raw_use_.any()) out.setRawUse();
     if (result_is_raw) out.setRawDef();
     return out;
   }
 
   bool IsSafe() {
-    if (!gc_.any()) return true;
+    if (!gc_.any()) {
+      return true;
+    }
     std::bitset<kMaxNumberOfArguments> raw = (raw_def_ | raw_use_);
-    if (!raw.any()) return true;
-    return gc_.count() == 1 && !((raw ^ gc_).any());
+    if (!raw.any()) {
+      return true;
+    }
+    bool result = gc_.count() == 1 && !((raw ^ gc_).any());
+    return result;
   }
 
  private:
@@ -537,20 +546,19 @@
 class FunctionAnalyzer {
  public:
   FunctionAnalyzer(clang::MangleContext* ctx,
-                   clang::DeclarationName handle_decl_name,
                    clang::CXXRecordDecl* object_decl,
+                   clang::CXXRecordDecl* maybe_object_decl,
                    clang::CXXRecordDecl* smi_decl, clang::DiagnosticsEngine& d,
                    clang::SourceManager& sm, bool dead_vars_analysis)
       : ctx_(ctx),
-        handle_decl_name_(handle_decl_name),
         object_decl_(object_decl),
+        maybe_object_decl_(maybe_object_decl),
         smi_decl_(smi_decl),
         d_(d),
         sm_(sm),
         block_(NULL),
         dead_vars_analysis_(dead_vars_analysis) {}
 
-
   // --------------------------------------------------------------------------
   // Expressions
   // --------------------------------------------------------------------------
@@ -574,6 +582,7 @@
     VISIT(CharacterLiteral);
     VISIT(ChooseExpr);
     VISIT(CompoundLiteralExpr);
+    VISIT(ConstantExpr);
     VISIT(CXXBindTemporaryExpr);
     VISIT(CXXBoolLiteralExpr);
     VISIT(CXXConstructExpr);
@@ -598,9 +607,11 @@
     VISIT(FloatingLiteral);
     VISIT(GNUNullExpr);
     VISIT(ImaginaryLiteral);
+    VISIT(ImplicitCastExpr);
     VISIT(ImplicitValueInitExpr);
     VISIT(InitListExpr);
     VISIT(IntegerLiteral);
+    VISIT(MaterializeTemporaryExpr);
     VISIT(MemberExpr);
     VISIT(OffsetOfExpr);
     VISIT(OpaqueValueExpr);
@@ -616,6 +627,7 @@
     VISIT(SubstNonTypeTemplateParmPackExpr);
     VISIT(TypeTraitExpr);
     VISIT(UnaryOperator);
+    VISIT(UnaryExprOrTypeTraitExpr);
     VISIT(VAArgExpr);
 #undef VISIT
 
@@ -685,6 +697,7 @@
           llvm::cast<clang::DeclRefExpr>(expr)->getDecl()->getNameAsString();
       return true;
     }
+
     return false;
   }
 
@@ -701,14 +714,6 @@
       case clang::BO_LOr:
         return ExprEffect::Merge(VisitExpr(lhs, env), VisitExpr(rhs, env));
 
-      case clang::BO_Assign: {
-        std::string var_name;
-        if (IsRawPointerVar(lhs, &var_name)) {
-          return VisitExpr(rhs, env).Define(var_name);
-        }
-        return Par(expr, 2, exprs, env);
-      }
-
       default:
         return Par(expr, 2, exprs, env);
     }
@@ -718,6 +723,10 @@
     return VisitExpr(expr->getSubExpr(), env);
   }
 
+  DECL_VISIT_EXPR(MaterializeTemporaryExpr) {
+    return VisitExpr(expr->GetTemporaryExpr(), env);
+  }
+
   DECL_VISIT_EXPR(CXXConstructExpr) {
     return VisitArguments<>(expr, env);
   }
@@ -740,6 +749,12 @@
     return VisitExpr(expr->getSubExpr(), env);
   }
 
+  DECL_VISIT_EXPR(ImplicitCastExpr) {
+    return VisitExpr(expr->getSubExpr(), env);
+  }
+
+  DECL_VISIT_EXPR(ConstantExpr) { return VisitExpr(expr->getSubExpr(), env); }
+
   DECL_VISIT_EXPR(InitListExpr) {
     return Seq(expr, expr->getNumInits(), expr->getInits(), env);
   }
@@ -761,11 +776,11 @@
   }
 
   DECL_VISIT_EXPR(UnaryOperator) {
-    // TODO We are treating all expressions that look like &raw_pointer_var
-    //      as definitions of raw_pointer_var. This should be changed to
-    //      recognize less generic pattern:
+    // TODO(mstarzinger): We are treating all expressions that look like
+    // {&raw_pointer_var} as definitions of {raw_pointer_var}. This should be
+    // changed to recognize less generic pattern:
     //
-    //         if (maybe_object->ToObject(&obj)) return maybe_object;
+    //   if (maybe_object->ToObject(&obj)) return maybe_object;
     //
     if (expr->getOpcode() == clang::UO_AddrOf) {
       std::string var_name;
@@ -776,6 +791,14 @@
     return VisitExpr(expr->getSubExpr(), env);
   }
 
+  DECL_VISIT_EXPR(UnaryExprOrTypeTraitExpr) {
+    if (expr->isArgumentType()) {
+      return ExprEffect::None();
+    }
+
+    return VisitExpr(expr->getArgumentExpr(), env);
+  }
+
   DECL_VISIT_EXPR(CastExpr) {
     return VisitExpr(expr->getSubExpr(), env);
   }
@@ -796,7 +819,8 @@
 
     if (!props.IsSafe()) ReportUnsafe(parent, BAD_EXPR_MSG);
 
-    return props.ComputeCumulativeEffect(IsRawPointerType(parent->getType()));
+    return props.ComputeCumulativeEffect(
+        RepresentsRawPointerType(parent->getType()));
   }
 
   ExprEffect Seq(clang::Stmt* parent,
@@ -816,7 +840,7 @@
                  const clang::QualType& var_type,
                  const std::string& var_name,
                  const Environment& env) {
-    if (IsRawPointerType(var_type)) {
+    if (RepresentsRawPointerType(var_type)) {
       if (!env.IsAlive(var_name) && dead_vars_analysis_) {
         ReportUnsafe(parent, DEAD_VAR_MSG);
       }
@@ -840,7 +864,8 @@
     CallProps props;
     VisitArguments<>(call, &props, env);
     if (!props.IsSafe()) ReportUnsafe(call, BAD_EXPR_MSG);
-    return props.ComputeCumulativeEffect(IsRawPointerType(call->getType()));
+    return props.ComputeCumulativeEffect(
+        RepresentsRawPointerType(call->getType()));
   }
 
   template<typename ExprType>
@@ -864,12 +889,25 @@
       props.SetEffect(0, VisitExpr(receiver, env));
     }
 
-    VisitArguments<>(call, &props, env);
+    std::string var_name;
+    clang::CXXOperatorCallExpr* opcall =
+        llvm::dyn_cast_or_null<clang::CXXOperatorCallExpr>(call);
+    if (opcall != NULL && opcall->isAssignmentOp() &&
+        IsRawPointerVar(opcall->getArg(0), &var_name)) {
+      // TODO(mstarzinger): We are treating all assignment operator calls with
+      // the left hand side looking like {raw_pointer_var} as safe independent
+      // of the concrete assignment operator implementation. This should be
+      // changed to be more narrow only if the assignment operator of the base
+      // {Object} or {HeapObject} class was used, which we know to be safe.
+      props.SetEffect(1, VisitExpr(call->getArg(1), env).Define(var_name));
+    } else {
+      VisitArguments<>(call, &props, env);
+    }
 
     if (!props.IsSafe()) ReportUnsafe(call, BAD_EXPR_MSG);
 
-    ExprEffect out =
-        props.ComputeCumulativeEffect(IsRawPointerType(call->getType()));
+    ExprEffect out = props.ComputeCumulativeEffect(
+        RepresentsRawPointerType(call->getType()));
 
     clang::FunctionDecl* callee = call->getDirectCallee();
     if ((callee != NULL) && KnownToCauseGC(ctx_, callee)) {
@@ -1104,45 +1142,88 @@
     }
   }
 
-  bool IsDerivedFrom(clang::CXXRecordDecl* record,
-                     clang::CXXRecordDecl* base) {
+  bool IsDerivedFrom(const clang::CXXRecordDecl* record,
+                     const clang::CXXRecordDecl* base) {
     return (record == base) || record->isDerivedFrom(base);
   }
 
-  bool IsRawPointerType(clang::QualType qtype) {
-    const clang::PointerType* type =
+  const clang::CXXRecordDecl* GetDefinitionOrNull(
+      const clang::CXXRecordDecl* record) {
+    if (record == NULL) {
+      return NULL;
+    }
+
+    if (!InV8Namespace(record)) return NULL;
+
+    if (!record->hasDefinition()) {
+      return NULL;
+    }
+
+    return record->getDefinition();
+  }
+
+  bool IsRawPointerType(const clang::PointerType* type) {
+    const clang::CXXRecordDecl* record = type->getPointeeCXXRecordDecl();
+
+    const clang::CXXRecordDecl* definition = GetDefinitionOrNull(record);
+    if (!definition) {
+      return false;
+    }
+
+    // TODO(mstarzinger): Unify the common parts of {IsRawPointerType} and
+    // {IsInternalPointerType} once gcmole is up and running again.
+    bool result = (IsDerivedFrom(record, object_decl_) &&
+                   !IsDerivedFrom(record, smi_decl_)) ||
+                  IsDerivedFrom(record, maybe_object_decl_);
+    return result;
+  }
+
+  bool IsInternalPointerType(clang::QualType qtype) {
+    if (qtype.isNull()) {
+      return false;
+    }
+    if (qtype->isNullPtrType()) {
+      return true;
+    }
+
+    const clang::CXXRecordDecl* record = qtype->getAsCXXRecordDecl();
+
+    const clang::CXXRecordDecl* definition = GetDefinitionOrNull(record);
+    if (!definition) {
+      return false;
+    }
+
+    // TODO(mstarzinger): Unify the common parts of {IsRawPointerType} and
+    // {IsInternalPointerType} once gcmole is up and running again.
+    bool result = (IsDerivedFrom(record, object_decl_) &&
+                   !IsDerivedFrom(record, smi_decl_)) ||
+                  IsDerivedFrom(record, maybe_object_decl_);
+    return result;
+  }
+
+  // Returns weather the given type is a raw pointer or a wrapper around
+  // such. For V8 that means Object and MaybeObject instances.
+  bool RepresentsRawPointerType(clang::QualType qtype) {
+    const clang::PointerType* pointer_type =
         llvm::dyn_cast_or_null<clang::PointerType>(qtype.getTypePtrOrNull());
-    if (type == NULL) return false;
-
-    const clang::TagType* pointee =
-        ToTagType(type->getPointeeType().getTypePtr());
-    if (pointee == NULL) return false;
-
-    clang::CXXRecordDecl* record =
-        llvm::dyn_cast_or_null<clang::CXXRecordDecl>(pointee->getDecl());
-    if (record == NULL) return false;
-
-    if (!InV8Namespace(record)) return false;
-
-    if (!record->hasDefinition()) return false;
-
-    record = record->getDefinition();
-
-    return IsDerivedFrom(record, object_decl_) &&
-        !IsDerivedFrom(record, smi_decl_);
+    if (pointer_type != NULL) {
+      return IsRawPointerType(pointer_type);
+    } else {
+      return IsInternalPointerType(qtype);
+    }
   }
 
   Environment VisitDecl(clang::Decl* decl, const Environment& env) {
     if (clang::VarDecl* var = llvm::dyn_cast<clang::VarDecl>(decl)) {
       Environment out = var->hasInit() ? VisitStmt(var->getInit(), env) : env;
 
-      if (IsRawPointerType(var->getType())) {
+      if (RepresentsRawPointerType(var->getType())) {
         out = out.Define(var->getNameAsString());
       }
 
       return out;
     }
-    // TODO: handle other declarations?
+    // TODO(mstarzinger): handle other declarations?
     return env;
   }
 
@@ -1199,8 +1280,8 @@
 
 
   clang::MangleContext* ctx_;
-  clang::DeclarationName handle_decl_name_;
   clang::CXXRecordDecl* object_decl_;
+  clang::CXXRecordDecl* maybe_object_decl_;
   clang::CXXRecordDecl* smi_decl_;
 
   clang::DiagnosticsEngine& d_;
@@ -1231,23 +1312,34 @@
         r.ResolveNamespace("v8").ResolveNamespace("internal").
             Resolve<clang::CXXRecordDecl>("Object");
 
+    clang::CXXRecordDecl* maybe_object_decl =
+        r.ResolveNamespace("v8")
+            .ResolveNamespace("internal")
+            .Resolve<clang::CXXRecordDecl>("MaybeObject");
+
     clang::CXXRecordDecl* smi_decl =
         r.ResolveNamespace("v8").ResolveNamespace("internal").
             Resolve<clang::CXXRecordDecl>("Smi");
 
     if (object_decl != NULL) object_decl = object_decl->getDefinition();
 
+    if (maybe_object_decl != NULL)
+      maybe_object_decl = maybe_object_decl->getDefinition();
+
     if (smi_decl != NULL) smi_decl = smi_decl->getDefinition();
 
-    if (object_decl != NULL && smi_decl != NULL) {
+    if (object_decl != NULL && smi_decl != NULL && maybe_object_decl != NULL) {
       function_analyzer_ = new FunctionAnalyzer(
-          clang::ItaniumMangleContext::create(ctx, d_), r.ResolveName("Handle"),
-          object_decl, smi_decl, d_, sm_, dead_vars_analysis_);
+          clang::ItaniumMangleContext::create(ctx, d_), object_decl,
+          maybe_object_decl, smi_decl, d_, sm_, dead_vars_analysis_);
       TraverseDecl(ctx.getTranslationUnitDecl());
     } else {
       if (object_decl == NULL) {
         llvm::errs() << "Failed to resolve v8::internal::Object\n";
       }
+      if (maybe_object_decl == NULL) {
+        llvm::errs() << "Failed to resolve v8::internal::MaybeObject\n";
+      }
       if (smi_decl == NULL) {
         llvm::errs() << "Failed to resolve v8::internal::Smi\n";
       }
@@ -1271,9 +1363,10 @@
 template<typename ConsumerType>
 class Action : public clang::PluginASTAction {
  protected:
-  clang::ASTConsumer *CreateASTConsumer(clang::CompilerInstance &CI,
-                                        llvm::StringRef InFile) {
-    return new ConsumerType(CI.getDiagnostics(), CI.getSourceManager(), args_);
+  virtual std::unique_ptr<clang::ASTConsumer> CreateASTConsumer(
+      clang::CompilerInstance& CI, llvm::StringRef InFile) {
+    return std::unique_ptr<clang::ASTConsumer>(
+        new ConsumerType(CI.getDiagnostics(), CI.getSourceManager(), args_));
   }
 
   bool ParseArgs(const clang::CompilerInstance &CI,
diff --git a/src/v8/tools/gcmole/gcmole.lua b/src/v8/tools/gcmole/gcmole.lua
index 862b7b0..ae17fdc 100644
--- a/src/v8/tools/gcmole/gcmole.lua
+++ b/src/v8/tools/gcmole/gcmole.lua
@@ -112,6 +112,7 @@
       .. " -DV8_INTL_SUPPORT"
       .. " -I./"
       .. " -Iinclude/"
+      .. " -Iout/Release/gen"
       .. " -Ithird_party/icu/source/common"
       .. " -Ithird_party/icu/source/i18n"
       .. " " .. arch_options
@@ -181,34 +182,6 @@
 end
 
 -------------------------------------------------------------------------------
--- GYP file parsing
-
--- TODO(machenbach): Remove this when deprecating gyp.
-local function ParseGYPFile()
-   local result = {}
-   local gyp_files = {
-       { "src/v8.gyp",             "'([^']-%.cc)'",      "src/"         },
-       { "test/cctest/cctest.gyp", "'(test-[^']-%.cc)'", "test/cctest/" }
-   }
-
-   for i = 1, #gyp_files do
-      local filename = gyp_files[i][1]
-      local pattern = gyp_files[i][2]
-      local prefix = gyp_files[i][3]
-      local gyp_file = assert(io.open(filename), "failed to open GYP file")
-      local gyp = gyp_file:read('*a')
-      for condition, sources in
-         gyp:gmatch "%[.-### gcmole%((.-)%) ###(.-)%]" do
-         if result[condition] == nil then result[condition] = {} end
-         for file in sources:gmatch(pattern) do
-            table.insert(result[condition], prefix .. file)
-         end
-      end
-      gyp_file:close()
-   end
-
-   return result
-end
 
 local function ParseGNFile()
    local result = {}
@@ -258,34 +231,8 @@
 end
 
 
-local gyp_sources = ParseGYPFile()
 local gn_sources = ParseGNFile()
 
--- TODO(machenbach): Remove this comparison logic when deprecating gyp.
-local function CompareSources(sources1, sources2, what)
-  for condition, files1 in pairs(sources1) do
-    local files2 = sources2[condition]
-    assert(
-      files2 ~= nil,
-      "Missing gcmole condition in " .. what .. ": " .. condition)
-
-    -- Turn into set for speed.
-    files2_set = {}
-    for i, file in pairs(files2) do files2_set[file] = true end
-
-    for i, file in pairs(files1) do
-      assert(
-        files2_set[file] ~= nil,
-        "Missing file " .. file .. " in " .. what .. " for condition " ..
-        condition)
-    end
-  end
-end
-
-CompareSources(gyp_sources, gn_sources, "GN")
-CompareSources(gn_sources, gyp_sources, "GYP")
-
-
 local function FilesForArch(arch)
    return BuildFileList(gn_sources, { os = 'linux',
                                       arch = arch,
diff --git a/src/v8/tools/gcmole/package.sh b/src/v8/tools/gcmole/package.sh
new file mode 100755
index 0000000..6206e7b
--- /dev/null
+++ b/src/v8/tools/gcmole/package.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This script will package a built gcmole plugin together with the
+# corresponding clang binary into an archive which can be used on the
+# buildbot infrastructure to be run against V8 checkouts.
+
+THIS_DIR="$(readlink -f "$(dirname "${0}")")"
+
+PACKAGE_DIR="${THIS_DIR}/../../tools/gcmole/gcmole-tools"
+PACKAGE_FILE="${THIS_DIR}/../../tools/gcmole/gcmole-tools.tar.gz"
+PACKAGE_SUM="${THIS_DIR}/../../tools/gcmole/gcmole-tools.tar.gz.sha1"
+BUILD_DIR="${THIS_DIR}/../../third_party/llvm+clang-build"
+
+# Echo all commands
+set -x
+
+# Copy all required files
+mkdir -p "${PACKAGE_DIR}/bin"
+cp "${BUILD_DIR}/bin/clang++" "${PACKAGE_DIR}/bin"
+mkdir -p "${PACKAGE_DIR}/lib"
+cp -r "${BUILD_DIR}/lib/clang" "${PACKAGE_DIR}/lib"
+cp "${THIS_DIR}/libgcmole.so" "${PACKAGE_DIR}"
+
+# Generate the archive
+cd "$(dirname "${PACKAGE_DIR}")"
+tar -c -z -f "${PACKAGE_FILE}" "$(basename "${PACKAGE_DIR}")"
+
+# Generate checksum
+sha1sum "${PACKAGE_FILE}" | awk '{print $1}' > "${PACKAGE_SUM}"
+
+set +x
+
+echo
+echo You can find a packaged version of gcmole here:
+echo
+echo $(readlink -f "${PACKAGE_FILE}")
+echo
+echo You can now run gcmole using this command:
+echo
+echo CLANG_BIN="tools/gcmole/gcmole-tools/bin" lua tools/gcmole/gcmole.lua
+echo
diff --git a/src/v8/tools/gcmole/parallel.py b/src/v8/tools/gcmole/parallel.py
index 0c045f4..7ff95cc 100755
--- a/src/v8/tools/gcmole/parallel.py
+++ b/src/v8/tools/gcmole/parallel.py
@@ -20,6 +20,9 @@
 ______________ finish <exit code of clang --opt file2> ______________
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import itertools
 import multiprocessing
 import subprocess
@@ -39,6 +42,6 @@
   cmdlines = ["%s %s" % (sys.argv[1], filename) for filename in sys.argv[2:]]
   for filename, result in itertools.izip(
       sys.argv[2:], pool.imap(invoke, cmdlines)):
-    print "______________ %s" % filename
-    print result[0]
-    print "______________ finish %d ______________" % result[1]
+    print("______________ %s" % filename)
+    print(result[0])
+    print("______________ finish %d ______________" % result[1])
diff --git a/src/v8/tools/gcmole/run-gcmole.py b/src/v8/tools/gcmole/run-gcmole.py
index 88799e3..6f2a091 100755
--- a/src/v8/tools/gcmole/run-gcmole.py
+++ b/src/v8/tools/gcmole/run-gcmole.py
@@ -3,7 +3,11 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
+import os.path
 import signal
 import subprocess
 import sys
@@ -17,6 +21,11 @@
 
 assert len(sys.argv) == 2
 
+if not os.path.isfile("out/Release/gen/torque-generated/builtin-definitions-tq.h"):
+  print("Expected generated headers in out/Release/gen.")
+  print("Either build v8 in out/Release or change gcmole.lua:115")
+  sys.exit(-1)
+
 proc = subprocess.Popen(
     [LUA, DRIVER, sys.argv[1]],
     env={'CLANG_BIN': CLANG_BIN, 'CLANG_PLUGINS': CLANG_PLUGINS},
diff --git a/src/v8/tools/gdb-v8-support.py b/src/v8/tools/gdb-v8-support.py
index a0262f0..f8442bf 100644
--- a/src/v8/tools/gdb-v8-support.py
+++ b/src/v8/tools/gdb-v8-support.py
@@ -25,12 +25,15 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import re
 import tempfile
 import os
 import subprocess
 import time
-
+import gdb
 
 kSmiTag = 0
 kSmiTagSize = 1
diff --git a/src/v8/tools/gdbinit b/src/v8/tools/gdbinit
index fa9f434..ad7847d 100644
--- a/src/v8/tools/gdbinit
+++ b/src/v8/tools/gdbinit
@@ -2,18 +2,27 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-# Print HeapObjects.
+# Print tagged object.
 define job
-call _v8_internal_Print_Object((void*)($arg0))
+call (void) _v8_internal_Print_Object((void*)($arg0))
 end
 document job
 Print a v8 JavaScript object
 Usage: job tagged_ptr
 end
 
-# Print v8::Local handle value.
+# Print content of v8::internal::Handle.
+define jh
+call (void) _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).location_))
+end
+document jh
+Print content of a v8::internal::Handle
+Usage: jh internal_handle
+end
+
+# Print content of v8::Local handle.
 define jlh
-call _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).val_))
+call (void) _v8_internal_Print_Object(*((v8::internal::Object**)($arg0).val_))
 end
 document jlh
 Print content of a v8::Local handle
@@ -22,62 +31,25 @@
 
 # Print Code objects containing given PC.
 define jco
-call _v8_internal_Print_Code((void*)($arg0))
+call (void) _v8_internal_Print_Code((void*)($arg0))
 end
 document jco
 Print a v8 Code object from an internal code address
 Usage: jco pc
 end
 
-# Print FeedbackVector
-define jfv
-call _v8_internal_Print_FeedbackVector((void*)($arg0))
-end
-document jfv
-Print a v8 FeedbackVector object
-Usage: jfv tagged_ptr
-end
-
-# Print FeedbackMetadata
-define jfm
-call _v8_internal_Print_FeedbackMetadata((void*)($arg0))
-end
-document jfm
-Print a v8 FeedbackMetadata object
-Usage: jfm tagged_ptr
-end
-
-
-# Print DescriptorArray.
-define jda
-call _v8_internal_Print_DescriptorArray((void*)($arg0))
-end
-document jda
-Print a v8 DescriptorArray object
-Usage: jda tagged_ptr
-end
-
 # Print LayoutDescriptor.
 define jld
-call _v8_internal_Print_LayoutDescriptor((void*)($arg0))
+call (void) _v8_internal_Print_LayoutDescriptor((void*)($arg0))
 end
 document jld
 Print a v8 LayoutDescriptor object
 Usage: jld tagged_ptr
 end
 
-# Print TransitionArray.
-define jta
-call _v8_internal_Print_TransitionArray((void*)($arg0))
-end
-document jta
-Print a v8 TransitionArray object
-Usage: jta tagged_ptr
-end
-
 # Print TransitionTree.
 define jtt
-call _v8_internal_Print_TransitionTree((void*)($arg0))
+call (void) _v8_internal_Print_TransitionTree((void*)($arg0))
 end
 document jtt
 Print the complete transition tree of the given v8 Map.
@@ -86,13 +58,22 @@
 
 # Print JavaScript stack trace.
 define jst
-call _v8_internal_Print_StackTrace()
+call (void) _v8_internal_Print_StackTrace()
 end
 document jst
 Print the current JavaScript stack trace
 Usage: jst
 end
 
+# Print TurboFan graph node.
+define pn
+call _v8_internal_Node_Print((void*)($arg0))
+end
+document pn
+Print a v8 TurboFan graph node
+Usage: pn node_address
+end
+
 # Skip the JavaScript stack.
 define jss
 set $js_entry_sp=v8::internal::Isolate::Current()->thread_local_top()->js_entry_sp_
@@ -110,7 +91,7 @@
 python
 import re
 frame_re = re.compile("^#(\d+)\s*(?:0x[a-f\d]+ in )?(.+) \(.+ at (.+)")
-assert_re = re.compile("^\s*(\S+) = .+<v8::internal::Per\w+AssertType::(\w+)_ASSERT, (false|true)>")
+assert_re = re.compile("^\s*(\S+) = .+<v8::internal::Per\w+AssertScope<v8::internal::(\S*), (false|true)>")
 btl = gdb.execute("backtrace full", to_string = True).splitlines()
 for l in btl:
   match = frame_re.match(l)
@@ -135,8 +116,8 @@
 # Search for a pointer inside all valid pages.
 define space_find
   set $space = $arg0
-  set $current_page = $space->anchor()->next_page()
-  while ($current_page != $space->anchor())
+  set $current_page = $space->first_page()
+  while ($current_page != 0)
     printf "#   Searching in %p - %p\n", $current_page->area_start(), $current_page->area_end()-1
     find $current_page->area_start(), $current_page->area_end()-1, $arg1
     set $current_page = $current_page->next_page()
@@ -159,3 +140,87 @@
 
 set disassembly-flavor intel
 set disable-randomization off
+
+# Install a handler whenever the debugger stops due to a signal. It walks up the
+# stack looking for V8_Dcheck and moves the frame to the one above it so it's
+# immediately at the line of code that triggered the DCHECK.
+python
+def dcheck_stop_handler(event):
+  frame = gdb.selected_frame()
+  select_frame = None
+  message = None
+  count = 0
+  # limit stack scanning since they're usually shallow and otherwise stack
+  # overflows can be very slow.
+  while frame is not None and count < 5:
+    count += 1
+    if frame.name() == 'V8_Dcheck':
+      frame_message = gdb.lookup_symbol('message', frame.block())[0]
+      if frame_message:
+        message = frame_message.value(frame).string()
+      select_frame = frame.older()
+      break
+    if frame.name() is not None and frame.name().startswith('V8_Fatal'):
+      select_frame = frame.older()
+    frame = frame.older()
+
+  if select_frame is not None:
+    select_frame.select()
+    gdb.execute('frame')
+    if message:
+      print('DCHECK error: {}'.format(message))
+
+gdb.events.stop.connect(dcheck_stop_handler)
+end
+
+# Code imported from chromium/src/tools/gdb/gdbinit
+python
+
+import os
+import subprocess
+import sys
+
+compile_dirs = set()
+
+
+def get_current_debug_file_directories():
+  dir = gdb.execute("show debug-file-directory", to_string=True)
+  dir = dir[
+      len('The directory where separate debug symbols are searched for is "'
+         ):-len('".') - 1]
+  return set(dir.split(":"))
+
+
+def add_debug_file_directory(dir):
+  # gdb has no function to add debug-file-directory, simulates that by using
+  # `show debug-file-directory` and `set debug-file-directory <directories>`.
+  current_dirs = get_current_debug_file_directories()
+  current_dirs.add(dir)
+  gdb.execute(
+      "set debug-file-directory %s" % ":".join(current_dirs), to_string=True)
+
+
+def newobj_handler(event):
+  global compile_dirs
+  compile_dir = os.path.dirname(event.new_objfile.filename)
+  if not compile_dir:
+    return
+  if compile_dir in compile_dirs:
+    return
+  compile_dirs.add(compile_dir)
+
+  # Add source path
+  gdb.execute("dir %s" % compile_dir)
+
+  # Need to tell the location of .dwo files.
+  # https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html
+  # https://crbug.com/603286#c35
+  add_debug_file_directory(compile_dir)
+
+# Event hook for newly loaded objfiles.
+# https://sourceware.org/gdb/onlinedocs/gdb/Events-In-Python.html
+gdb.events.new_objfile.connect(newobj_handler)
+
+gdb.execute("set environment V8_GDBINIT_SOURCED=1")
+
+end
diff --git a/src/v8/tools/gen-inlining-tests.py b/src/v8/tools/gen-inlining-tests.py
index a790236..400386c 100644
--- a/src/v8/tools/gen-inlining-tests.py
+++ b/src/v8/tools/gen-inlining-tests.py
@@ -1,9 +1,11 @@
-#!/usr/bin/env python3
+#!/usr/bin/env python
 
 # Copyright 2016 the V8 project authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
 
 from collections import namedtuple
 import textwrap
diff --git a/src/v8/tools/gen-keywords-gen-h.py b/src/v8/tools/gen-keywords-gen-h.py
new file mode 100755
index 0000000..02750dc
--- /dev/null
+++ b/src/v8/tools/gen-keywords-gen-h.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import subprocess
+import re
+import math
+
+INPUT_PATH = "src/parsing/keywords.txt"
+OUTPUT_PATH = "src/parsing/keywords-gen.h"
+
+# TODO(leszeks): Trimming seems to regress performance, investigate.
+TRIM_CHAR_TABLE = False
+
+
+def next_power_of_2(x):
+  return 1 if x == 0 else 2**int(math.ceil(math.log(x, 2)))
+
+
+def call_with_input(cmd, input_string=""):
+  p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+  stdout, _ = p.communicate(input_string)
+  retcode = p.wait()
+  if retcode != 0:
+    raise subprocess.CalledProcessError(retcode, cmd)
+  return stdout
+
+
+def checked_sub(pattern, sub, out, count=1, flags=0):
+  out, n = re.subn(pattern, sub, out, flags=flags)
+  if n != count:
+    raise Exception("Didn't get exactly %d replacement(s) for pattern: %s" %
+                    (count, pattern))
+  return out
+
+
+def change_sizet_to_int(out):
+  # Literal buffer lengths are given as ints, not size_t
+  return checked_sub(r'\bsize_t\b', 'int', out, count=4)
+
+
+def drop_line_directives(out):
+  # #line causes gcov issue, so drop it
+  return re.sub(r'^#\s*line .*$\n', '', out, flags=re.MULTILINE)
+
+
+def trim_and_dcheck_char_table(out):
+  # Potential keyword strings are known to be lowercase ascii, so chop off the
+  # rest of the table and mask out the char
+
+  reads_re = re.compile(
+      r'asso_values\[static_cast<unsigned char>\(str\[(\d+)\]\)\]')
+
+  dchecks = []
+  for str_read in reads_re.finditer(out):
+    dchecks.append("DCHECK_LT(str[%d], 128);" % int(str_read.group(1)))
+
+  if TRIM_CHAR_TABLE:
+    out = checked_sub(
+        r'static const unsigned char asso_values\[\]\s*=\s*\{(\s*\d+\s*,){96}',
+        "".join(dchecks) + r'static const unsigned char asso_values[32] = {',
+        out,
+        flags=re.MULTILINE)
+    out = checked_sub(
+        reads_re.pattern,
+        r'asso_values[static_cast<unsigned char>(str[(\1)]&31)]',
+        out,
+        count=len(dchecks),
+        flags=re.MULTILINE)
+  else:
+    out = checked_sub(
+        r'static const unsigned char asso_values\[\]\s*=\s*\{',
+        "".join(dchecks) + r'static const unsigned char asso_values[128] = {',
+        out,
+        flags=re.MULTILINE)
+
+  return out
+
+
+def use_isinrange(out):
+  # Our IsInRange method is more efficient than checking for min/max length
+  return checked_sub(r'if \(len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH\)',
+                     r'if (IsInRange(len, MIN_WORD_LENGTH, MAX_WORD_LENGTH))',
+                     out)
+
+
+def pad_tables(out):
+  # We don't want to compare against the max hash value, so pad the tables up
+  # to a power of two and mask the hash.
+
+  # First get the new size
+  max_hash_value = int(re.search(r'MAX_HASH_VALUE\s*=\s*(\d+)', out).group(1))
+  old_table_length = max_hash_value + 1
+  new_table_length = next_power_of_2(old_table_length)
+  table_padding_len = new_table_length - old_table_length
+
+  # Pad the length table.
+  single_lengthtable_entry = r'\d+'
+  out = checked_sub(
+      r"""
+      static\ const\ unsigned\ char\ kPerfectKeywordLengthTable\[\]\s*=\s*\{
+        (
+          \s*%(single_lengthtable_entry)s\s*
+          (?:,\s*%(single_lengthtable_entry)s\s*)*
+        )
+      \}
+    """ % {'single_lengthtable_entry': single_lengthtable_entry},
+      r'static const unsigned char kPerfectKeywordLengthTable[%d] = { \1 %s }'
+      % (new_table_length, "".join([',0'] * table_padding_len)),
+      out,
+      flags=re.MULTILINE | re.VERBOSE)
+
+  # Pad the word list.
+  single_wordlist_entry = r"""
+      (?:\#line\ \d+\ ".*"$\s*)?
+      \{\s*"[a-z]*"\s*,\s*Token::[A-Z_]+\}
+    """
+  out = checked_sub(
+      r"""
+      static\ const\ struct\ PerfectKeywordHashTableEntry\ kPerfectKeywordHashTable\[\]\s*=\s*\{
+        (
+          \s*%(single_wordlist_entry)s\s*
+          (?:,\s*%(single_wordlist_entry)s\s*)*
+        )
+      \}
+    """ % {'single_wordlist_entry': single_wordlist_entry},
+      r'static const struct PerfectKeywordHashTableEntry kPerfectKeywordHashTable[%d] = {\1 %s }'
+      % (new_table_length, "".join(
+          [',{"",Token::IDENTIFIER}'] * table_padding_len)),
+      out,
+      flags=re.MULTILINE | re.VERBOSE)
+
+  # Mask the hash and replace the range check with DCHECKs.
+  out = checked_sub(r'Hash\s*\(\s*str,\s*len\s*\)',
+                    r'Hash(str, len)&0x%x' % (new_table_length - 1), out)
+  out = checked_sub(
+      r'if \(key <= MAX_HASH_VALUE\)',
+      r'DCHECK_LT(key, arraysize(kPerfectKeywordLengthTable));DCHECK_LT(key, arraysize(kPerfectKeywordHashTable));',
+      out)
+
+  return out
+
+
+def return_token(out):
+  # We want to return the actual token rather than the table entry.
+
+  # Change the return type of the function. Make it inline too.
+  out = checked_sub(
+      r'const\s*struct\s*PerfectKeywordHashTableEntry\s*\*\s*((?:PerfectKeywordHash::)?GetToken)',
+      r'inline Token::Value \1',
+      out,
+      count=2)
+
+  # Change the return value when the keyword is found
+  out = checked_sub(r'return &kPerfectKeywordHashTable\[key\];',
+                    r'return kPerfectKeywordHashTable[key].value;', out)
+
+  # Change the return value when the keyword is not found
+  out = checked_sub(r'return 0;', r'return Token::IDENTIFIER;', out)
+
+  return out
+
+
+def memcmp_to_while(out):
+  # It's faster to loop over the keyword with a while loop than calling memcmp.
+  # Careful, this replacement is quite flaky, because otherwise the regex is
+  # unreadable.
+  return checked_sub(
+      re.escape("if (*str == *s && !memcmp (str + 1, s + 1, len - 1))") + r"\s*"
+      + re.escape("return kPerfectKeywordHashTable[key].value;"),
+      """
+      while(*s!=0) {
+        if (*s++ != *str++) return Token::IDENTIFIER;
+      }
+      return kPerfectKeywordHashTable[key].value;
+      """,
+      out,
+      flags=re.MULTILINE)
+
+
+def wrap_namespace(out):
+  return """// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is automatically generated by gen-keywords-gen-h.py and should not
+// be modified manually.
+
+#ifndef V8_PARSING_KEYWORDS_GEN_H_
+#define V8_PARSING_KEYWORDS_GEN_H_
+
+#include "src/parsing/token.h"
+
+namespace v8 {
+namespace internal {
+
+%s
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_PARSING_KEYWORDS_GEN_H_
+""" % (out)
+
+
+def trim_character_set_warning(out):
+  # gperf generates an error message that is too large, trim it
+
+  return out.replace(
+      '"gperf generated tables don\'t work with this execution character set. Please report a bug to <bug-gperf@gnu.org>."',
+      '"gperf generated tables don\'t work with this execution character set."\\\n// If you see this error, please report a bug to <bug-gperf@gnu.org>.'
+  )
+
+
+def main():
+  try:
+    script_dir = os.path.dirname(sys.argv[0])
+    root_dir = os.path.join(script_dir, '..')
+
+    out = subprocess.check_output(["gperf", "-m100", INPUT_PATH], cwd=root_dir)
+
+    # And now some munging of the generated file.
+    out = change_sizet_to_int(out)
+    out = drop_line_directives(out)
+    out = trim_and_dcheck_char_table(out)
+    out = use_isinrange(out)
+    out = pad_tables(out)
+    out = return_token(out)
+    out = memcmp_to_while(out)
+    out = wrap_namespace(out)
+    out = trim_character_set_warning(out)
+
+    # Final formatting.
+    clang_format_path = os.path.join(root_dir,
+                                     'third_party/depot_tools/clang-format')
+    out = call_with_input([clang_format_path], out)
+
+    with open(os.path.join(root_dir, OUTPUT_PATH), 'w') as f:
+      f.write(out)
+
+    return 0
+
+  except subprocess.CalledProcessError as e:
+    sys.stderr.write("Error calling '{}'\n".format(" ".join(e.cmd)))
+    return e.returncode
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/src/v8/tools/gen-postmortem-metadata.py b/src/v8/tools/gen-postmortem-metadata.py
index 043ecc3..2323e8a 100644
--- a/src/v8/tools/gen-postmortem-metadata.py
+++ b/src/v8/tools/gen-postmortem-metadata.py
@@ -46,6 +46,9 @@
 # the generated libv8 binary.
 #
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import re
 import sys
 
@@ -58,6 +61,9 @@
     { 'name': 'APIObjectType',          'value': 'JS_API_OBJECT_TYPE' },
     { 'name': 'SpecialAPIObjectType',   'value': 'JS_SPECIAL_API_OBJECT_TYPE' },
 
+    { 'name': 'FirstContextType',     'value': 'FIRST_CONTEXT_TYPE' },
+    { 'name': 'LastContextType',     'value': 'LAST_CONTEXT_TYPE' },
+
     { 'name': 'IsNotStringMask',        'value': 'kIsNotStringMask' },
     { 'name': 'StringTag',              'value': 'kStringTag' },
 
@@ -79,7 +85,6 @@
     { 'name': 'SmiTagMask',             'value': 'kSmiTagMask' },
     { 'name': 'SmiValueShift',          'value': 'kSmiTagSize' },
     { 'name': 'SmiShiftSize',           'value': 'kSmiShiftSize' },
-    { 'name': 'PointerSizeLog2',        'value': 'kPointerSizeLog2' },
 
     { 'name': 'OddballFalse',           'value': 'Oddball::kFalse' },
     { 'name': 'OddballTrue',            'value': 'Oddball::kTrue' },
@@ -91,8 +96,6 @@
     { 'name': 'OddballOther',           'value': 'Oddball::kOther' },
     { 'name': 'OddballException',       'value': 'Oddball::kException' },
 
-    { 'name': 'prop_idx_first',
-        'value': 'DescriptorArray::kFirstIndex' },
     { 'name': 'prop_kind_Data',
         'value': 'kData' },
     { 'name': 'prop_kind_Accessor',
@@ -123,26 +126,14 @@
         'value': 'PropertyDetails::RepresentationField::kMask' },
     { 'name': 'prop_representation_shift',
         'value': 'PropertyDetails::RepresentationField::kShift' },
-    { 'name': 'prop_representation_integer8',
-        'value': 'Representation::Kind::kInteger8' },
-    { 'name': 'prop_representation_uinteger8',
-        'value': 'Representation::Kind::kUInteger8' },
-    { 'name': 'prop_representation_integer16',
-        'value': 'Representation::Kind::kInteger16' },
-    { 'name': 'prop_representation_uinteger16',
-        'value': 'Representation::Kind::kUInteger16' },
     { 'name': 'prop_representation_smi',
         'value': 'Representation::Kind::kSmi' },
-    { 'name': 'prop_representation_integer32',
-        'value': 'Representation::Kind::kInteger32' },
     { 'name': 'prop_representation_double',
         'value': 'Representation::Kind::kDouble' },
     { 'name': 'prop_representation_heapobject',
         'value': 'Representation::Kind::kHeapObject' },
     { 'name': 'prop_representation_tagged',
         'value': 'Representation::Kind::kTagged' },
-    { 'name': 'prop_representation_external',
-        'value': 'Representation::Kind::kExternal' },
 
     { 'name': 'prop_desc_key',
         'value': 'DescriptorArray::kEntryKeyIndex' },
@@ -184,25 +175,18 @@
 
     { 'name': 'scopeinfo_idx_nparams',
         'value': 'ScopeInfo::kParameterCount' },
-    { 'name': 'scopeinfo_idx_nstacklocals',
-        'value': 'ScopeInfo::kStackLocalCount' },
     { 'name': 'scopeinfo_idx_ncontextlocals',
         'value': 'ScopeInfo::kContextLocalCount' },
     { 'name': 'scopeinfo_idx_first_vars',
         'value': 'ScopeInfo::kVariablePartIndex' },
 
-    { 'name': 'sharedfunctioninfo_start_position_mask',
-        'value': 'SharedFunctionInfo::StartPositionBits::kMask' },
-    { 'name': 'sharedfunctioninfo_start_position_shift',
-        'value': 'SharedFunctionInfo::StartPositionBits::kShift' },
+    { 'name': 'jsarray_buffer_was_detached_mask',
+        'value': 'JSArrayBuffer::WasDetachedBit::kMask' },
+    { 'name': 'jsarray_buffer_was_detached_shift',
+        'value': 'JSArrayBuffer::WasDetachedBit::kShift' },
 
-    { 'name': 'jsarray_buffer_was_neutered_mask',
-        'value': 'JSArrayBuffer::WasNeutered::kMask' },
-    { 'name': 'jsarray_buffer_was_neutered_shift',
-        'value': 'JSArrayBuffer::WasNeutered::kShift' },
-
-    { 'name': 'context_idx_closure',
-        'value': 'Context::CLOSURE_INDEX' },
+    { 'name': 'context_idx_scope_info',
+        'value': 'Context::SCOPE_INFO_INDEX' },
     { 'name': 'context_idx_native',
         'value': 'Context::NATIVE_CONTEXT_INDEX' },
     { 'name': 'context_idx_prev',
@@ -211,6 +195,9 @@
         'value': 'Context::EXTENSION_INDEX' },
     { 'name': 'context_min_slots',
         'value': 'Context::MIN_CONTEXT_SLOTS' },
+    { 'name': 'native_context_embedder_data_offset',
+        'value': 'Internals::kNativeContextEmbedderDataOffset' },
+
 
     { 'name': 'namedictionaryshape_prefix_size',
         'value': 'NameDictionaryShape::kPrefixSize' },
@@ -226,6 +213,16 @@
         'value': 'NumberDictionaryShape::kPrefixSize' },
     { 'name': 'numberdictionaryshape_entry_size',
         'value': 'NumberDictionaryShape::kEntrySize' },
+
+    { 'name': 'simplenumberdictionaryshape_prefix_size',
+        'value': 'SimpleNumberDictionaryShape::kPrefixSize' },
+    { 'name': 'simplenumberdictionaryshape_entry_size',
+        'value': 'SimpleNumberDictionaryShape::kEntrySize' },
+
+    { 'name': 'type_JSError__JS_ERROR_TYPE', 'value': 'JS_ERROR_TYPE' },
+
+    { 'name': 'class_SharedFunctionInfo__function_data__Object',
+        'value': 'SharedFunctionInfo::kFunctionDataOffset' },
 ];
 
 #
@@ -238,13 +235,15 @@
 #
 extras_accessors = [
     'JSFunction, context, Context, kContextOffset',
+    'JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset',
     'HeapObject, map, Map, kMapOffset',
     'JSObject, elements, Object, kElementsOffset',
     'JSObject, internal_fields, uintptr_t, kHeaderSize',
     'FixedArray, data, uintptr_t, kHeaderSize',
-    'FixedTypedArrayBase, external_pointer, Object, kExternalPointerOffset',
-    'JSArrayBuffer, backing_store, Object, kBackingStoreOffset',
-    'JSArrayBufferView, byte_offset, Object, kByteOffsetOffset',
+    'JSArrayBuffer, backing_store, uintptr_t, kBackingStoreOffset',
+    'JSArrayBuffer, byte_length, size_t, kByteLengthOffset',
+    'JSArrayBufferView, byte_length, size_t, kByteLengthOffset',
+    'JSArrayBufferView, byte_offset, size_t, kByteOffsetOffset',
     'JSTypedArray, length, Object, kLengthOffset',
     'Map, instance_size_in_words, char, kInstanceSizeInWordsOffset',
     'Map, inobject_properties_start_or_constructor_function_index, char, kInObjectPropertiesStartOrConstructorFunctionIndexOffset',
@@ -255,22 +254,19 @@
     'Map, prototype, Object, kPrototypeOffset',
     'Oddball, kind_offset, int, kKindOffset',
     'HeapNumber, value, double, kValueOffset',
-    'ConsString, first, String, kFirstOffset',
-    'ConsString, second, String, kSecondOffset',
     'ExternalString, resource, Object, kResourceOffset',
     'SeqOneByteString, chars, char, kHeaderSize',
     'SeqTwoByteString, chars, char, kHeaderSize',
-    'SharedFunctionInfo, code, Code, kCodeOffset',
-    'SharedFunctionInfo, scope_info, ScopeInfo, kScopeInfoOffset',
-    'SharedFunctionInfo, function_token_position, int, kFunctionTokenPositionOffset',
-    'SharedFunctionInfo, start_position_and_type, int, kStartPositionAndTypeOffset',
-    'SharedFunctionInfo, end_position, int, kEndPositionOffset',
-    'SharedFunctionInfo, internal_formal_parameter_count, int, kFormalParameterCountOffset',
-    'SharedFunctionInfo, compiler_hints, int, kCompilerHintsOffset',
-    'SharedFunctionInfo, length, int, kLengthOffset',
+    'UncompiledData, start_position, int32_t, kStartPositionOffset',
+    'UncompiledData, end_position, int32_t, kEndPositionOffset',
+    'SharedFunctionInfo, raw_function_token_offset, int16_t, kFunctionTokenOffsetOffset',
+    'SharedFunctionInfo, internal_formal_parameter_count, uint16_t, kFormalParameterCountOffset',
+    'SharedFunctionInfo, flags, int, kFlagsOffset',
+    'SharedFunctionInfo, length, uint16_t, kLengthOffset',
     'SlicedString, parent, String, kParentOffset',
     'Code, instruction_start, uintptr_t, kHeaderSize',
     'Code, instruction_size, int, kInstructionSizeOffset',
+    'String, length, int32_t, kLengthOffset',
 ];
 
 #
@@ -280,8 +276,8 @@
 #
 expected_classes = [
     'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
-    'JSObject', 'JSRegExp', 'JSValue', 'Map', 'Oddball', 'Script',
-    'SeqOneByteString', 'SharedFunctionInfo'
+    'JSObject', 'JSRegExp', 'JSPrimitiveWrapper', 'Map', 'Oddball', 'Script',
+    'SeqOneByteString', 'SharedFunctionInfo', 'ScopeInfo', 'JSPromise'
 ];
 
 
@@ -299,12 +295,17 @@
  * This file is generated by %s.  Do not edit directly.
  */
 
-#include "src/v8.h"
-#include "src/frames.h"
-#include "src/frames-inl.h" /* for architecture-specific frame constants */
-#include "src/contexts.h"
+#include "src/init/v8.h"
+#include "src/execution/frames.h"
+#include "src/execution/frames-inl.h" /* for architecture-specific frame constants */
+#include "src/objects/contexts.h"
+#include "src/objects/objects.h"
+#include "src/objects/data-handler.h"
+#include "src/objects/js-promise.h"
+#include "src/objects/js-regexp-string-iterator.h"
 
-using namespace v8::internal;
+namespace v8 {
+namespace internal {
 
 extern "C" {
 
@@ -320,6 +321,9 @@
 
 footer = '''
 }
+
+}
+}
 '''
 
 #
@@ -364,6 +368,7 @@
         in_insttype = False;
 
         typestr = '';
+        uncommented_file = ''
 
         #
         # Iterate the header file line-by-line to collect type and class
@@ -386,15 +391,26 @@
                         typestr += line;
                         continue;
 
-                match = re.match('class (\w[^:]*)(: public (\w[^{]*))?\s*{\s*',
-                    line);
+                uncommented_file += '\n' + line
 
-                if (match):
-                        klass = match.group(1).strip();
-                        pklass = match.group(3);
-                        if (pklass):
-                                pklass = pklass.strip();
-                        klasses[klass] = { 'parent': pklass };
+        for match in re.finditer(r'\nclass(?:\s+V8_EXPORT(?:_PRIVATE)?)?'
+                                 r'\s+(\w[^:;]*)'
+                                 r'(?:: public (\w[^{]*))?\s*{\s*',
+                                 uncommented_file):
+                klass = match.group(1).strip();
+                pklass = match.group(2);
+                if (pklass):
+                        # Check for generated Torque class.
+                        gen_match = re.match(
+                            r'TorqueGenerated\w+\s*<\s*\w+,\s*(\w+)\s*>',
+                            pklass)
+                        if (gen_match):
+                                pklass = gen_match.group(1)
+                        # Strip potential template arguments from parent
+                        # class.
+                        match = re.match(r'(\w+)(<.*>)?', pklass.strip());
+                        pklass = match.group(1).strip();
+                klasses[klass] = { 'parent': pklass };
 
         #
         # Process the instance type declaration.
@@ -412,14 +428,9 @@
         #
         for type in types:
                 #
-                # Symbols and Strings are implemented using the same classes.
-                #
-                usetype = re.sub('SYMBOL_', 'STRING_', type);
-
-                #
                 # REGEXP behaves like REG_EXP, as in JS_REGEXP_TYPE => JSRegExp.
                 #
-                usetype = re.sub('_REGEXP_', '_REG_EXP_', usetype);
+                usetype = re.sub('_REGEXP_', '_REG_EXP_', type);
 
                 #
                 # Remove the "_TYPE" suffix and then convert to camel case,
@@ -513,7 +524,8 @@
 
         consts = [];
 
-        if (kind == 'ACCESSORS' or kind == 'ACCESSORS_GCSAFE'):
+        if (kind == 'ACCESSORS' or kind == 'ACCESSORS2' or
+            kind == 'ACCESSORS_GCSAFE'):
                 klass = args[0];
                 field = args[1];
                 dtype = args[2].replace('<', '_').replace('>', '_')
@@ -556,7 +568,7 @@
         # may span multiple lines and may contain nested parentheses.  We also
         # call parse_field() to pick apart the invocation.
         #
-        prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE',
+        prefixes = [ 'ACCESSORS', 'ACCESSORS2', 'ACCESSORS_GCSAFE',
                      'SMI_ACCESSORS', 'ACCESSORS_TO_SMI' ];
         current = '';
         opens = 0;
@@ -616,7 +628,7 @@
 # Emit the whole output file.
 #
 def emit_config():
-        out = file(sys.argv[1], 'w');
+        out = open(sys.argv[1], 'w');
 
         out.write(header);
 
diff --git a/src/v8/tools/generate-builtins-tests.py b/src/v8/tools/generate-builtins-tests.py
index 4e6961d..4380f91 100755
--- a/src/v8/tools/generate-builtins-tests.py
+++ b/src/v8/tools/generate-builtins-tests.py
@@ -3,6 +3,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import json
 import optparse
 import os
diff --git a/src/v8/tools/generate-header-include-checks.py b/src/v8/tools/generate-header-include-checks.py
new file mode 100755
index 0000000..fa18d85
--- /dev/null
+++ b/src/v8/tools/generate-header-include-checks.py
@@ -0,0 +1,148 @@
+#!/usr/bin/env python
+# vim:fenc=utf-8:shiftwidth=2
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Check that each header can be included in isolation.
+
+For each header we generate one .cc file which only includes this one header.
+All these .cc files are then added to a sources.gni file which is included in
+BUILD.gn. Just compile to check whether there are any violations to the rule
+that each header must be includable in isolation.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import os
+import os.path
+import re
+import sys
+
+# TODO(clemensh): Extend to tests.
+DEFAULT_INPUT = ['base', 'src']
+DEFAULT_GN_FILE = 'BUILD.gn'
+MY_DIR = os.path.dirname(os.path.realpath(__file__))
+V8_DIR = os.path.dirname(MY_DIR)
+OUT_DIR = os.path.join(V8_DIR, 'check-header-includes')
+AUTO_EXCLUDE = [
+  # flag-definitions.h needs a mode set for being included.
+  'src/flags/flag-definitions.h',
+]
+AUTO_EXCLUDE_PATTERNS = [
+  'src/base/atomicops_internals_.*',
+  # TODO(petermarshall): Enable once Perfetto is built by default.
+  'src/libplatform/tracing/perfetto*',
+] + [
+  # platform-specific headers
+  '\\b{}\\b'.format(p) for p in
+    ('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', 's390',
+     'ppc')]
+
+args = None
+def parse_args():
+  global args
+  parser = argparse.ArgumentParser()
+  parser.add_argument('-i', '--input', type=str, action='append',
+                      help='Headers or directories to check (directories '
+                           'are scanned for headers recursively); default: ' +
+                           ','.join(DEFAULT_INPUT))
+  parser.add_argument('-x', '--exclude', type=str, action='append',
+                      help='Add an exclude pattern (regex)')
+  parser.add_argument('-v', '--verbose', action='store_true',
+                      help='Be verbose')
+  args = parser.parse_args()
+  args.exclude = (args.exclude or []) + AUTO_EXCLUDE_PATTERNS
+  args.exclude += ['^' + re.escape(x) + '$' for x in AUTO_EXCLUDE]
+  if not args.input:
+    args.input=DEFAULT_INPUT
+
+
+def printv(line):
+  if args.verbose:
+    print(line)
+
+
+def find_all_headers():
+  printv('Searching for headers...')
+  header_files = []
+  exclude_patterns = [re.compile(x) for x in args.exclude]
+  def add_recursively(filename):
+    full_name = os.path.join(V8_DIR, filename)
+    if not os.path.exists(full_name):
+      sys.exit('File does not exist: {}'.format(full_name))
+    if os.path.isdir(full_name):
+      for subfile in os.listdir(full_name):
+        full_name = os.path.join(filename, subfile)
+        printv('Scanning {}'.format(full_name))
+        add_recursively(full_name)
+    elif filename.endswith('.h'):
+      printv('--> Found header file {}'.format(filename))
+      for p in exclude_patterns:
+        if p.search(filename):
+          printv('--> EXCLUDED (matches {})'.format(p.pattern))
+          return
+      header_files.append(filename)
+
+  for filename in args.input:
+    add_recursively(filename)
+
+  return header_files
+
+
+def get_cc_file_name(header):
+  split = os.path.split(header)
+  header_dir = os.path.relpath(split[0], V8_DIR)
+  # Prefix with the directory name, to avoid collisions in the object files.
+  prefix = header_dir.replace(os.path.sep, '-')
+  cc_file_name = 'test-include-' + prefix + '-' + split[1][:-1] + 'cc'
+  return os.path.join(OUT_DIR, cc_file_name)
+
+
+def create_including_cc_files(header_files):
+  comment = 'check including this header in isolation'
+  for header in header_files:
+    cc_file_name = get_cc_file_name(header)
+    rel_cc_file_name = os.path.relpath(cc_file_name, V8_DIR)
+    content = '#include "{}"  // {}\n'.format(header, comment)
+    if os.path.exists(cc_file_name):
+      with open(cc_file_name) as cc_file:
+        if cc_file.read() == content:
+          printv('File {} is up to date'.format(rel_cc_file_name))
+          continue
+    printv('Creating file {}'.format(rel_cc_file_name))
+    with open(cc_file_name, 'w') as cc_file:
+      cc_file.write(content)
+
+
+def generate_gni(header_files):
+  gni_file = os.path.join(OUT_DIR, 'sources.gni')
+  printv('Generating file "{}"'.format(os.path.relpath(gni_file, V8_DIR)))
+  with open(gni_file, 'w') as gn:
+    gn.write("""\
+# Copyright 2018 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This list is filled automatically by tools/check_header_includes.py.
+check_header_includes_sources = [
+""");
+    for header in header_files:
+      cc_file_name = get_cc_file_name(header)
+      gn.write('    "{}",\n'.format(os.path.relpath(cc_file_name, V8_DIR)))
+    gn.write(']\n')
+
+
+def main():
+  parse_args()
+  header_files = find_all_headers()
+  if not os.path.exists(OUT_DIR):
+    os.mkdir(OUT_DIR)
+  create_including_cc_files(header_files)
+  generate_gni(header_files)
+
+if __name__ == '__main__':
+  main()
diff --git a/src/v8/tools/get_landmines.py b/src/v8/tools/get_landmines.py
new file mode 100755
index 0000000..bf8efa5
--- /dev/null
+++ b/src/v8/tools/get_landmines.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This file emits the list of reasons why a particular build needs to be clobbered
+(or a list of 'landmines').
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import os
+import sys
+
+sys.path.insert(0, os.path.abspath(
+  os.path.join(os.path.dirname(__file__), '..', 'build')))
+
+import get_landmines as build_get_landmines
+
+
+def print_landmines():  # pylint: disable=invalid-name
+  """
+  ALL LANDMINES ARE EMITTED FROM HERE.
+  """
+  # DO NOT add landmines as part of a regular CL. Landmines are a last-effort
+  # bandaid fix if a CL that got landed has a build dependency bug and all bots
+  # need to be cleaned up. If you're writing a new CL that causes build
+  # dependency problems, fix the dependency problems instead of adding a
+  # landmine.
+  # See the Chromium version in src/build/get_landmines.py for usage examples.
+  print('Need to clobber after ICU52 roll.')
+  print('Landmines test.')
+  print('Activating MSVS 2013.')
+  print('Revert activation of MSVS 2013.')
+  print('Activating MSVS 2013 again.')
+  print('Clobber after ICU roll.')
+  print('Moar clobbering...')
+  print('Remove build/android.gypi')
+  print('Cleanup after windows ninja switch attempt.')
+  print('Switching to pinned msvs toolchain.')
+  print('Clobbering to hopefully resolve problem with mksnapshot')
+  print('Clobber after ICU roll.')
+  print('Clobber after Android NDK update.')
+  print('Clober to fix windows build problems.')
+  print('Clober again to fix windows build problems.')
+  print('Clobber to possibly resolve failure on win-32 bot.')
+  print('Clobber for http://crbug.com/668958.')
+  print('Clobber to possibly resolve build failure on Misc V8 Linux gcc.')
+  build_get_landmines.print_landmines()
+  return 0
+
+
+def main():
+  print_landmines()
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/src/v8/tools/grokdump.py b/src/v8/tools/grokdump.py
index 5d9ffff..773622d 100755
--- a/src/v8/tools/grokdump.py
+++ b/src/v8/tools/grokdump.py
@@ -27,6 +27,12 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# flake8: noqa  # https://bugs.chromium.org/p/v8/issues/detail?id=8784
+
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import BaseHTTPServer
 import bisect
 import cgi
@@ -69,7 +75,7 @@
 
 def DebugPrint(s):
   if not DEBUG: return
-  print s
+  print(s)
 
 
 class Descriptor(object):
@@ -120,7 +126,7 @@
 def FullDump(reader, heap):
   """Dump all available memory regions."""
   def dump_region(reader, start, size, location):
-    print
+    print()
     while start & 3 != 0:
       start += 1
       size -= 1
@@ -131,17 +137,17 @@
     if is_executable is not False:
       lines = reader.GetDisasmLines(start, size)
       for line in lines:
-        print FormatDisasmLine(start, heap, line)
-      print
+        print(FormatDisasmLine(start, heap, line))
+      print()
 
     if is_ascii is not False:
       # Output in the same format as the Unix hd command
       addr = start
-      for i in xrange(0, size, 16):
+      for i in range(0, size, 16):
         slot = i + location
         hex_line = ""
         asc_line = ""
-        for i in xrange(16):
+        for i in range(16):
           if slot + i < location + size:
             byte = ctypes.c_uint8.from_buffer(reader.minidump, slot + i).value
             if byte >= 0x20 and byte < 0x7f:
@@ -153,24 +159,24 @@
             hex_line += "   "
           if i == 7:
             hex_line += " "
-        print "%s  %s |%s|" % (reader.FormatIntPtr(addr),
+        print("%s  %s |%s|" % (reader.FormatIntPtr(addr),
                                hex_line,
-                               asc_line)
+                               asc_line))
         addr += 16
 
     if is_executable is not True and is_ascii is not True:
-      print "%s - %s" % (reader.FormatIntPtr(start),
-                         reader.FormatIntPtr(start + size))
-      print start + size + 1;
-      for i in xrange(0, size, reader.PointerSize()):
+      print("%s - %s" % (reader.FormatIntPtr(start),
+                         reader.FormatIntPtr(start + size)))
+      print(start + size + 1);
+      for i in range(0, size, reader.PointerSize()):
         slot = start + i
         maybe_address = reader.ReadUIntPtr(slot)
         heap_object = heap.FindObject(maybe_address)
-        print "%s: %s" % (reader.FormatIntPtr(slot),
-                          reader.FormatIntPtr(maybe_address))
+        print("%s: %s" % (reader.FormatIntPtr(slot),
+                          reader.FormatIntPtr(maybe_address)))
         if heap_object:
           heap_object.Print(Printer())
-          print
+          print()
 
   reader.ForEachMemoryRegion(dump_region)
 
@@ -185,6 +191,10 @@
     (0xbbbbbbbb, 0xbbbbbbbb),
     (0xfefefefe, 0xfefefeff),
 )
+# See StackTraceFailureMessage in isolate.h
+STACK_TRACE_MARKER = 0xdecade30
+# See FailureMessage in logging.cc
+ERROR_MESSAGE_MARKER = 0xdecade10
 
 # Set of structures and constants that describe the layout of minidump
 # files. Based on MSDN and Google Breakpad.
@@ -576,7 +586,10 @@
 
 MD_CPU_ARCHITECTURE_X86 = 0
 MD_CPU_ARCHITECTURE_ARM = 5
-MD_CPU_ARCHITECTURE_ARM64 = 0x8003
+# Breakpad used a custom value of 0x8003 here; Crashpad uses the new
+# standardized value 12.
+MD_CPU_ARCHITECTURE_ARM64 = 12
+MD_CPU_ARCHITECTURE_ARM64_BREAKPAD_LEGACY = 0x8003
 MD_CPU_ARCHITECTURE_AMD64 = 9
 
 OBJDUMP_BIN = None
@@ -607,11 +620,11 @@
     self.minidump = mmap.mmap(self.minidump_file.fileno(), 0, mmap.MAP_PRIVATE)
     self.header = MINIDUMP_HEADER.Read(self.minidump, 0)
     if self.header.signature != MinidumpReader._HEADER_MAGIC:
-      print >>sys.stderr, "Warning: Unsupported minidump header magic!"
+      print("Warning: Unsupported minidump header magic!", file=sys.stderr)
     DebugPrint(self.header)
     directories = []
     offset = self.header.stream_directories_rva
-    for _ in xrange(self.header.stream_count):
+    for _ in range(self.header.stream_count):
       directories.append(MINIDUMP_DIRECTORY.Read(self.minidump, offset))
       offset += MINIDUMP_DIRECTORY.size
     self.arch = None
@@ -637,6 +650,8 @@
         system_info = MINIDUMP_RAW_SYSTEM_INFO.Read(
             self.minidump, d.location.rva)
         self.arch = system_info.processor_architecture
+        if self.arch == MD_CPU_ARCHITECTURE_ARM64_BREAKPAD_LEGACY:
+          self.arch = MD_CPU_ARCHITECTURE_ARM64
         assert self.arch in [MD_CPU_ARCHITECTURE_AMD64,
                              MD_CPU_ARCHITECTURE_ARM,
                              MD_CPU_ARCHITECTURE_ARM64,
@@ -673,7 +688,7 @@
         assert ctypes.sizeof(self.module_list) == d.location.data_size
         DebugPrint(self.module_list)
       elif d.stream_type == MD_MEMORY_LIST_STREAM:
-        print >>sys.stderr, "Warning: This is not a full minidump!"
+        print("Warning: This is not a full minidump!", file=sys.stderr)
         assert self.memory_list is None
         self.memory_list = MINIDUMP_MEMORY_LIST.Read(
           self.minidump, d.location.rva)
@@ -695,8 +710,8 @@
     else:
       objdump_bin = self._FindThirdPartyObjdump()
     if not objdump_bin or not os.path.exists(objdump_bin):
-      print "# Cannot find '%s', falling back to default objdump '%s'" % (
-          objdump_bin, DEFAULT_OBJDUMP_BIN)
+      print("# Cannot find '%s', falling back to default objdump '%s'" % (
+          objdump_bin, DEFAULT_OBJDUMP_BIN))
       objdump_bin  = DEFAULT_OBJDUMP_BIN
     global OBJDUMP_BIN
     OBJDUMP_BIN = objdump_bin
@@ -718,12 +733,12 @@
       else:
         # use default otherwise
         return None
-      print ("# Looking for platform specific (%s) objdump in "
-             "third_party directory.") % platform_filter
+      print(("# Looking for platform specific (%s) objdump in "
+             "third_party directory.") % platform_filter)
       objdumps = filter(lambda file: platform_filter in file >= 0, objdumps)
       if len(objdumps) == 0:
-        print "# Could not find platform specific objdump in third_party."
-        print "# Make sure you installed the correct SDK."
+        print("# Could not find platform specific objdump in third_party.")
+        print("# Make sure you installed the correct SDK.")
         return None
       return objdumps[0]
 
@@ -757,7 +772,7 @@
 
   def IsValidExceptionStackAddress(self, address):
     if not self.IsValidAddress(address): return False
-    return self.isExceptionStackAddress(address)
+    return self.IsExceptionStackAddress(address)
 
   def IsModuleAddress(self, address):
     return self.GetModuleForAddress(address) != None
@@ -818,7 +833,7 @@
   def IsProbableASCIIRegion(self, location, length):
     ascii_bytes = 0
     non_ascii_bytes = 0
-    for i in xrange(length):
+    for i in range(length):
       loc = location + i
       byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
       if byte >= 0x7f:
@@ -840,7 +855,7 @@
   def IsProbableExecutableRegion(self, location, length):
     opcode_bytes = 0
     sixty_four = self.Is64()
-    for i in xrange(length):
+    for i in range(length):
       loc = location + i
       byte = ctypes.c_uint8.from_buffer(self.minidump, loc).value
       if (byte == 0x8b or           # mov
@@ -889,19 +904,19 @@
   def FindWord(self, word, alignment=0):
     def search_inside_region(reader, start, size, location):
       location = (location + alignment) & ~alignment
-      for i in xrange(size - self.PointerSize()):
+      for i in range(size - self.PointerSize()):
         loc = location + i
         if reader._ReadWord(loc) == word:
           slot = start + (loc - location)
-          print "%s: %s" % (reader.FormatIntPtr(slot),
-                            reader.FormatIntPtr(word))
+          print("%s: %s" % (reader.FormatIntPtr(slot),
+                            reader.FormatIntPtr(word)))
     self.ForEachMemoryRegion(search_inside_region)
 
   def FindWordList(self, word):
     aligned_res = []
     unaligned_res = []
     def search_inside_region(reader, start, size, location):
-      for i in xrange(size - self.PointerSize()):
+      for i in range(size - self.PointerSize()):
         loc = location + i
         if reader._ReadWord(loc) == word:
           slot = start + (loc - location)
@@ -1022,7 +1037,7 @@
   #    http://code.google.com/p/google-breakpad/wiki/SymbolFiles
   #
   def _LoadSymbolsFrom(self, symfile, baseaddr):
-    print "Loading symbols from %s" % (symfile)
+    print("Loading symbols from %s" % (symfile))
     funcs = []
     with open(symfile) as f:
       for line in f:
@@ -1034,7 +1049,7 @@
           name = result.group(4).rstrip()
           bisect.insort_left(self.symbols,
                              FuncSymbol(baseaddr + start, size, name))
-    print " ... done"
+    print(" ... done")
 
   def TryLoadSymbolsFor(self, modulename, module):
     try:
@@ -1044,7 +1059,7 @@
         self._LoadSymbolsFrom(symfile, module.base_of_image)
         self.modules_with_symbols.append(module)
     except Exception as e:
-      print "  ... failure (%s)" % (e)
+      print("  ... failure (%s)" % (e))
 
   # Returns true if address is covered by some module that has loaded symbols.
   def _IsInModuleWithSymbols(self, addr):
@@ -1086,11 +1101,11 @@
     self.indent -= 2
 
   def Print(self, string):
-    print "%s%s" % (self._IndentString(), string)
+    print("%s%s" % (self._IndentString(), string))
 
   def PrintLines(self, lines):
     indent = self._IndentString()
-    print "\n".join("%s%s" % (indent, line) for line in lines)
+    print("\n".join("%s%s" % (indent, line) for line in lines))
 
   def _IndentString(self):
     return self.indent * " "
@@ -1227,9 +1242,6 @@
   def DependentCodeOffset(self):
     return self.CodeCacheOffset() + self.heap.PointerSize()
 
-  def WeakCellCacheOffset(self):
-    return self.DependentCodeOffset() + self.heap.PointerSize()
-
   def ReadByte(self, offset):
     return self.heap.reader.ReadU8(self.address + offset)
 
@@ -1427,7 +1439,7 @@
     p.Indent()
     p.Print("length: %d" % self.length)
     base_offset = self.ElementsOffset()
-    for i in xrange(self.length):
+    for i in range(self.length):
       offset = base_offset + 4 * i
       try:
         p.Print("[%08d] = %s" % (i, self.ObjectField(offset)))
@@ -1497,7 +1509,7 @@
     p.Print("Descriptors(%08x, length=%d)" % (array.address, length))
     p.Print("[et] %s" % (array.Get(1)))
 
-    for di in xrange(length):
+    for di in range(length):
       i = 2 + di * 3
       p.Print("0x%x" % (array.address + array.MemberOffset(i)))
       p.Print("[%i] name:    %s" % (di, array.Get(i + 0)))
@@ -1542,7 +1554,7 @@
     if prototype is not None:
       p.Print("[prototype  ] %s" % (prototype))
 
-    for di in xrange(length):
+    for di in range(length):
       i = 3 + di * 2
       p.Print("[%i] symbol: %s" % (di, array.Get(i + 0)))
       p.Print("[%i] target: %s" % (di, array.Get(i + 1)))
@@ -1714,9 +1726,9 @@
     "EXTERNAL_SYMBOL_TYPE": ExternalString,
     "EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
     "EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
-    "SHORT_EXTERNAL_SYMBOL_TYPE": ExternalString,
-    "SHORT_EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
-    "SHORT_EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
+    "UNCACHED_EXTERNAL_SYMBOL_TYPE": ExternalString,
+    "UNCACHED_EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+    "UNCACHED_EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
     "STRING_TYPE": SeqString,
     "ONE_BYTE_STRING_TYPE": SeqString,
     "CONS_STRING_TYPE": ConsString,
@@ -1728,6 +1740,8 @@
     "ODDBALL_TYPE": Oddball,
     "FIXED_ARRAY_TYPE": FixedArray,
     "HASH_TABLE_TYPE": FixedArray,
+    "OBJECT_BOILERPLATE_DESCRIPTION_TYPE": FixedArray,
+    "SCOPE_INFO_TYPE": FixedArray,
     "JS_FUNCTION_TYPE": JSFunction,
     "SHARED_FUNCTION_INFO_TYPE": SharedFunctionInfo,
     "SCRIPT_TYPE": Script,
@@ -1938,10 +1952,10 @@
         exception_thread.stack.memory.data_size
     frame_pointer = self.reader.ExceptionFP()
     self.styles[frame_pointer] = "frame"
-    for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
+    for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
       # stack address
       self.styles[slot] = "sa"
-    for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
+    for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
       maybe_address = self.reader.ReadUIntPtr(slot)
       # stack value
       self.styles[maybe_address] = "sv"
@@ -2057,7 +2071,8 @@
   def SenseMap(self, tagged_address):
     if self.IsInKnownMapSpace(tagged_address):
       offset = self.GetPageOffset(tagged_address)
-      known_map_info = KNOWN_MAPS.get(offset)
+      lookup_key = ("MAP_SPACE", offset)
+      known_map_info = KNOWN_MAPS.get(lookup_key)
       if known_map_info:
         known_map_type, known_map_name = known_map_info
         return KnownMap(self, known_map_name, known_map_type)
@@ -2083,15 +2098,15 @@
     raise NotImplementedError
 
   def PrintKnowledge(self):
-    print "  known_first_map_page = %s\n"\
+    print("  known_first_map_page = %s\n"\
           "  known_first_old_page = %s" % (
           self.reader.FormatIntPtr(self.known_first_map_page),
-          self.reader.FormatIntPtr(self.known_first_old_page))
+          self.reader.FormatIntPtr(self.known_first_old_page)))
 
   def FindFirstAsciiString(self, start, end=None, min_length=32):
     """ Walk the memory until we find a large string """
     if not end: end = start + 64
-    for slot in xrange(start, end):
+    for slot in range(start, end):
       if not self.reader.IsValidAddress(slot): break
       message = self.reader.ReadAsciiString(slot)
       if len(message) > min_length:
@@ -2105,22 +2120,75 @@
     """
     # Only look at the first 1k words on the stack
     ptr_size = self.reader.PointerSize()
-    if start is None:
-      start = self.reader.ExceptionSP()
+    if start is None: start = self.reader.ExceptionSP()
     if not self.reader.IsValidAddress(start): return start
     end = start + ptr_size * 1024 * 4
-    message_start = 0
     magic1 = None
-    for slot in xrange(start, end, ptr_size):
+    for slot in range(start, end, ptr_size):
       if not self.reader.IsValidAddress(slot + ptr_size): break
       magic1 = self.reader.ReadUIntPtr(slot)
       magic2 = self.reader.ReadUIntPtr(slot + ptr_size)
       pair = (magic1 & 0xFFFFFFFF, magic2 & 0xFFFFFFFF)
       if pair in MAGIC_MARKER_PAIRS:
-        message_slot = slot + ptr_size * 4
-        message_start = self.reader.ReadUIntPtr(message_slot)
-        break
-    if message_start == 0:
+        return self.TryExtractOldStyleStackTrace(slot, start, end,
+                                                 print_message)
+      if pair[0] == STACK_TRACE_MARKER:
+        return self.TryExtractStackTrace(slot, start, end, print_message)
+      elif pair[0] == ERROR_MESSAGE_MARKER:
+        return self.TryExtractErrorMessage(slot, start, end, print_message)
+    # Simple fallback in case not stack trace object was found
+    return self.TryExtractOldStyleStackTrace(0, start, end,
+                                             print_message)
+
+  def TryExtractStackTrace(self, slot, start, end, print_message):
+    ptr_size = self.reader.PointerSize()
+    assert self.reader.ReadUIntPtr(slot) & 0xFFFFFFFF == STACK_TRACE_MARKER
+    end_marker = STACK_TRACE_MARKER + 1;
+    header_size = 10
+    # Look for the end marker after the fields and the message buffer.
+    end_search = start + (32 * 1024) + (header_size * ptr_size);
+    end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
+    if not end_slot: return start
+    print("Stack Message (start=%s):" % self.heap.FormatIntPtr(slot))
+    slot += ptr_size
+    for name in ("isolate","ptr1", "ptr2", "ptr3", "ptr4", "codeObject1",
+                 "codeObject2", "codeObject3", "codeObject4"):
+      value = self.reader.ReadUIntPtr(slot)
+      print(" %s: %s" % (name.rjust(14), self.heap.FormatIntPtr(value)))
+      slot += ptr_size
+    print("  message start: %s" % self.heap.FormatIntPtr(slot))
+    stack_start = end_slot + ptr_size
+    print("  stack_start:   %s" % self.heap.FormatIntPtr(stack_start))
+    (message_start, message) = self.FindFirstAsciiString(slot)
+    self.FormatStackTrace(message, print_message)
+    return stack_start
+
+  def FindPtr(self, expected_value, start, end):
+    ptr_size = self.reader.PointerSize()
+    for slot in range(start, end, ptr_size):
+      if not self.reader.IsValidAddress(slot): return None
+      value = self.reader.ReadUIntPtr(slot)
+      if value == expected_value: return slot
+    return None
+
+  def TryExtractErrorMessage(self, slot, start, end, print_message):
+    ptr_size = self.reader.PointerSize()
+    end_marker = ERROR_MESSAGE_MARKER + 1;
+    header_size = 1
+    end_search = start + 1024 + (header_size * ptr_size);
+    end_slot = self.FindPtr(end_marker, end_search, end_search + ptr_size * 512)
+    if not end_slot: return start
+    print("Error Message (start=%s):" % self.heap.FormatIntPtr(slot))
+    slot += ptr_size
+    (message_start, message) = self.FindFirstAsciiString(slot)
+    self.FormatStackTrace(message, print_message)
+    stack_start = end_slot + ptr_size
+    return stack_start
+
+  def TryExtractOldStyleStackTrace(self, message_slot, start, end,
+                                   print_message):
+    ptr_size = self.reader.PointerSize()
+    if message_slot == 0:
       """
       On Mac we don't always get proper magic markers, so just try printing
       the first long ascii string found on the stack.
@@ -2130,29 +2198,35 @@
       message_start, message = self.FindFirstAsciiString(start, end, 128)
       if message_start is None: return start
     else:
+      message_start = self.reader.ReadUIntPtr(message_slot + ptr_size * 4)
       message = self.reader.ReadAsciiString(message_start)
     stack_start = message_start + len(message) + 1
     # Make sure the address is word aligned
     stack_start =  stack_start - (stack_start % ptr_size)
     if magic1 is None:
-      print "Stack Message:"
-      print "  message start: %s" % self.heap.FormatIntPtr(message_start)
-      print "  stack_start:   %s" % self.heap.FormatIntPtr(stack_start )
+      print("Stack Message:")
+      print("  message start: %s" % self.heap.FormatIntPtr(message_start))
+      print("  stack_start:   %s" % self.heap.FormatIntPtr(stack_start ))
     else:
       ptr1 = self.reader.ReadUIntPtr(slot + ptr_size * 2)
       ptr2 = self.reader.ReadUIntPtr(slot + ptr_size * 3)
-      print "Stack Message:"
-      print "  magic1:        %s" % self.heap.FormatIntPtr(magic1)
-      print "  magic2:        %s" % self.heap.FormatIntPtr(magic2)
-      print "  ptr1:          %s" % self.heap.FormatIntPtr(ptr1)
-      print "  ptr2:          %s" % self.heap.FormatIntPtr(ptr2)
-      print "  message start: %s" % self.heap.FormatIntPtr(message_start)
-      print "  stack_start:   %s" % self.heap.FormatIntPtr(stack_start )
-      print ""
+      print("Stack Message:")
+      print("  magic1:        %s" % self.heap.FormatIntPtr(magic1))
+      print("  magic2:        %s" % self.heap.FormatIntPtr(magic2))
+      print("  ptr1:          %s" % self.heap.FormatIntPtr(ptr1))
+      print("  ptr2:          %s" % self.heap.FormatIntPtr(ptr2))
+      print("  message start: %s" % self.heap.FormatIntPtr(message_start))
+      print("  stack_start:   %s" % self.heap.FormatIntPtr(stack_start ))
+      print("")
+    self.FormatStackTrace(message, print_message)
+    return stack_start
+
+  def FormatStackTrace(self, message, print_message):
     if not print_message:
-      print "  Use `dsa` to print the message with annotated addresses."
-      print ""
-      return stack_start
+      print("  Use `dsa` to print the message with annotated addresses.")
+      print("")
+      return
+    ptr_size = self.reader.PointerSize()
     # Annotate all addresses in the dumped message
     prog = re.compile("[0-9a-fA-F]{%s}" % ptr_size*2)
     addresses = list(set(prog.findall(message)))
@@ -2161,12 +2235,12 @@
       address = self.heap.FormatIntPtr(int(address_org, 16))
       if address_org != address:
         message = message.replace(address_org, address)
-    print "Message:"
-    print "="*80
-    print message
-    print "="*80
-    print ""
-    return stack_start
+    print("Message:")
+    print("="*80)
+    print(message)
+    print("="*80)
+    print("")
+
 
   def TryInferFramePointer(self, slot, address):
     """ Assume we have a framepointer if we find 4 consecutive links """
@@ -2216,9 +2290,9 @@
     free_space_end = 0
     ptr_size = self.reader.PointerSize()
 
-    for slot in xrange(start, end, ptr_size):
+    for slot in range(start, end, ptr_size):
       if not self.reader.IsValidAddress(slot):
-        print "%s: Address is not contained within the minidump!" % slot
+        print("%s: Address is not contained within the minidump!" % slot)
         return
       maybe_address = self.reader.ReadUIntPtr(slot)
       address_info = []
@@ -2276,17 +2350,17 @@
           frame_pointer = maybe_address
       address_type_marker = self.heap.AddressTypeMarker(maybe_address)
       string_value = self.reader.ReadAsciiPtr(slot)
-      print "%s: %s %s %s %s" % (self.reader.FormatIntPtr(slot),
+      print("%s: %s %s %s %s" % (self.reader.FormatIntPtr(slot),
                            self.reader.FormatIntPtr(maybe_address),
                            address_type_marker,
                            string_value,
-                           ' | '.join(address_info))
+                           ' | '.join(address_info)))
       if maybe_address_contents == 0xdecade01:
         in_oom_dump_area = False
       heap_object = self.heap.FindObject(maybe_address)
       if heap_object:
         heap_object.Print(Printer())
-        print ""
+        print("")
 
 WEB_HEADER = """
 <!DOCTYPE html>
@@ -2638,7 +2712,7 @@
     stack_bottom = exception_thread.stack.start + \
         exception_thread.stack.memory.data_size
     stack_map = {self.reader.ExceptionIP(): -1}
-    for slot in xrange(stack_top, stack_bottom, self.reader.PointerSize()):
+    for slot in range(stack_top, stack_bottom, self.reader.PointerSize()):
       maybe_address = self.reader.ReadUIntPtr(slot)
       if not maybe_address in stack_map:
         stack_map[maybe_address] = slot
@@ -2656,7 +2730,7 @@
       address = int(straddress, 0)
       self.comments.set_comment(address, comment)
     except ValueError:
-      print "Invalid address"
+      print("Invalid address")
 
   def set_page_address(self, kind, straddress):
     try:
@@ -2667,7 +2741,7 @@
         self.padawan.known_first_map_page = address
       self.comments.save_page_address(kind, address)
     except ValueError:
-      print "Invalid address"
+      print("Invalid address")
 
   def td_from_address(self, f, address):
     f.write("<td %s>" % self.comments.get_style_class_string(address))
@@ -2790,7 +2864,7 @@
     if details == InspectionWebFormatter.CONTEXT_FULL:
       if self.reader.exception.exception.parameter_count > 0:
         f.write("&nbsp;&nbsp; Exception parameters: ")
-        for i in xrange(0, self.reader.exception.exception.parameter_count):
+        for i in range(0, self.reader.exception.exception.parameter_count):
           f.write("%08x" % self.reader.exception.exception.information[i])
         f.write("<br><br>")
 
@@ -2866,19 +2940,19 @@
     f.write('<div class="code">')
     f.write("<table class=codedump>")
 
-    for j in xrange(0, end_address - start_address, size):
+    for j in range(0, end_address - start_address, size):
       slot = start_address + j
       heap_object = ""
       maybe_address = None
       end_region = region[0] + region[1]
       if slot < region[0] or slot + size > end_region:
         straddress = "0x"
-        for i in xrange(end_region, slot + size):
+        for i in range(end_region, slot + size):
           straddress += "??"
         for i in reversed(
-            xrange(max(slot, region[0]), min(slot + size, end_region))):
+            range(max(slot, region[0]), min(slot + size, end_region))):
           straddress += "%02x" % self.reader.ReadU8(i)
-        for i in xrange(slot, region[0]):
+        for i in range(slot, region[0]):
           straddress += "??"
       else:
         maybe_address = self.reader.ReadUIntPtr(slot)
@@ -2940,7 +3014,7 @@
 
     start = self.align_down(start_address, line_width)
 
-    for i in xrange(end_address - start):
+    for i in range(end_address - start):
       address = start + i
       if address % 64 == 0:
         if address != start:
@@ -3010,7 +3084,7 @@
             (start_address, end_address, highlight_address, expand))
     f.write('<div class="code">')
     f.write("<table class=\"codedump\">");
-    for i in xrange(len(lines)):
+    for i in range(len(lines)):
       line = lines[i]
       next_address = count
       if i + 1 < len(lines):
@@ -3257,7 +3331,7 @@
 class InspectionWebServer(BaseHTTPServer.HTTPServer):
   def __init__(self, port_number, switches, minidump_name):
     BaseHTTPServer.HTTPServer.__init__(
-        self, ('', port_number), InspectionWebHandler)
+        self, ('localhost', port_number), InspectionWebHandler)
     splitpath = os.path.split(minidump_name)
     self.dumppath = splitpath[0]
     self.dumpfilename = splitpath[1]
@@ -3386,8 +3460,8 @@
 
   def do_help(self, cmd=None):
     if len(cmd) == 0:
-      print "Available commands"
-      print "=" * 79
+      print("Available commands")
+      print("=" * 79)
       prefix = "do_"
       methods = inspect.getmembers(InspectionShell, predicate=inspect.ismethod)
       for name,method in methods:
@@ -3396,8 +3470,8 @@
         if not doc: continue
         name = prefix.join(name.split(prefix)[1:])
         description = doc.splitlines()[0]
-        print (name + ": ").ljust(16) + description
-      print "=" * 79
+        print((name + ": ").ljust(16) + description)
+      print("=" * 79)
     else:
       return super(InspectionShell, self).do_help(cmd)
 
@@ -3425,9 +3499,9 @@
     address = self.ParseAddressExpr(address)
     string = self.reader.ReadAsciiString(address)
     if string == "":
-      print "Not an ASCII string at %s" % self.reader.FormatIntPtr(address)
+      print("Not an ASCII string at %s" % self.reader.FormatIntPtr(address))
     else:
-      print "%s\n" % string
+      print("%s\n" % string)
 
   def do_dsa(self, address):
     """ see display_stack_ascii"""
@@ -3438,7 +3512,7 @@
     Print ASCII stack error message.
     """
     if self.reader.exception is None:
-      print "Minidump has no exception info"
+      print("Minidump has no exception info")
       return
     if len(address) == 0:
       address = None
@@ -3463,7 +3537,7 @@
     else:
       self.dd_start += self.dd_num * self.reader.PointerSize()
     if not self.reader.IsAlignedAddress(self.dd_start):
-      print "Warning: Dumping un-aligned memory, is this what you had in mind?"
+      print("Warning: Dumping un-aligned memory, is this what you had in mind?")
     end = self.dd_start + self.reader.PointerSize() * self.dd_num
     self.padawan.InterpretMemory(self.dd_start, end)
 
@@ -3482,13 +3556,13 @@
     if self.reader.IsAlignedAddress(address):
       address = address + 1
     elif not self.heap.IsTaggedObjectAddress(address):
-      print "Address doesn't look like a valid pointer!"
+      print("Address doesn't look like a valid pointer!")
       return
     heap_object = self.padawan.SenseObject(address)
     if heap_object:
       heap_object.Print(Printer())
     else:
-      print "Address cannot be interpreted as object!"
+      print("Address cannot be interpreted as object!")
 
   def do_dso(self, args):
     """ see display_stack_objects """
@@ -3555,10 +3629,10 @@
     address = self.ParseAddressExpr(address)
     page_address = address & ~self.heap.PageAlignmentMask()
     if self.reader.IsValidAddress(page_address):
-      print "**** Not Implemented"
+      print("**** Not Implemented")
       return
     else:
-      print "Page header is not available!"
+      print("Page header is not available!")
 
   def do_k(self, arguments):
     """
@@ -3603,10 +3677,10 @@
      List all available memory regions.
     """
     def print_region(reader, start, size, location):
-      print "  %s - %s (%d bytes)" % (reader.FormatIntPtr(start),
+      print("  %s - %s (%d bytes)" % (reader.FormatIntPtr(start),
                                       reader.FormatIntPtr(start + size),
-                                      size)
-    print "Available memory regions:"
+                                      size))
+    print("Available memory regions:")
     self.reader.ForEachMemoryRegion(print_region)
 
   def do_lm(self, arg):
@@ -3627,7 +3701,7 @@
           PrintModuleDetails(self.reader, module)
       else:
         PrintModuleDetails(self.reader, module)
-    print
+    print()
 
   def do_s(self, word):
     """ see search """
@@ -3644,9 +3718,10 @@
     try:
       word = self.ParseAddressExpr(word)
     except ValueError:
-      print "Malformed word, prefix with '0x' to use hexadecimal format."
+      print("Malformed word, prefix with '0x' to use hexadecimal format.")
       return
-    print "Searching for word %d/0x%s:" % (word, self.reader.FormatIntPtr(word))
+    print(
+      "Searching for word %d/0x%s:" % (word, self.reader.FormatIntPtr(word)))
     self.reader.FindWord(word)
 
   def do_sh(self, none):
@@ -3656,7 +3731,7 @@
      You might get lucky and find this rare treasure full of invaluable
      information.
     """
-    print "**** Not Implemented"
+    print("**** Not Implemented")
 
   def do_u(self, args):
     """ see disassemble """
@@ -3679,24 +3754,24 @@
       skip = True
 
     if not self.reader.IsValidAddress(self.u_start):
-      print "Address %s is not contained within the minidump!" % (
-          self.reader.FormatIntPtr(self.u_start))
+      print("Address %s is not contained within the minidump!" % (
+          self.reader.FormatIntPtr(self.u_start)))
       return
     lines = self.reader.GetDisasmLines(self.u_start, self.u_size)
     if len(lines) == 0:
-      print "Address %s could not be disassembled!" % (
-          self.reader.FormatIntPtr(self.u_start))
-      print "    Could not disassemble using %s." % OBJDUMP_BIN
-      print "    Pass path to architecture specific objdump via --objdump?"
+      print("Address %s could not be disassembled!" % (
+          self.reader.FormatIntPtr(self.u_start)))
+      print("    Could not disassemble using %s." % OBJDUMP_BIN)
+      print("    Pass path to architecture specific objdump via --objdump?")
       return
     for line in lines:
       if skip:
         skip = False
         continue
-      print FormatDisasmLine(self.u_start, self.heap, line)
+      print(FormatDisasmLine(self.u_start, self.heap, line))
     # Set the next start address = last line
     self.u_start += lines[-1][0]
-    print
+    print()
 
   def do_EOF(self, none):
     raise KeyboardInterrupt
@@ -3733,18 +3808,18 @@
 
 
 def PrintModuleDetails(reader, module):
-  print "%s" % GetModuleName(reader, module)
+  print("%s" % GetModuleName(reader, module))
   file_version = GetVersionString(module.version_info.dwFileVersionMS,
                                   module.version_info.dwFileVersionLS);
   product_version = GetVersionString(module.version_info.dwProductVersionMS,
                                      module.version_info.dwProductVersionLS)
-  print "  base: %s" % reader.FormatIntPtr(module.base_of_image)
-  print "  end: %s" % reader.FormatIntPtr(module.base_of_image +
-                                          module.size_of_image)
-  print "  file version: %s" % file_version
-  print "  product version: %s" % product_version
+  print("  base: %s" % reader.FormatIntPtr(module.base_of_image))
+  print("  end: %s" % reader.FormatIntPtr(module.base_of_image +
+                                          module.size_of_image))
+  print("  file version: %s" % file_version)
+  print("  product version: %s" % product_version)
   time_date_stamp = datetime.datetime.fromtimestamp(module.time_date_stamp)
-  print "  timestamp: %s" % time_date_stamp
+  print("  timestamp: %s" % time_date_stamp)
 
 
 def AnalyzeMinidump(options, minidump_name):
@@ -3754,7 +3829,7 @@
   stack_top = reader.ExceptionSP()
   stack_bottom = reader.StackBottom()
   stack_map = {reader.ExceptionIP(): -1}
-  for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
+  for slot in range(stack_top, stack_bottom, reader.PointerSize()):
     maybe_address = reader.ReadUIntPtr(slot)
     if not maybe_address in stack_map:
       stack_map[maybe_address] = slot
@@ -3764,51 +3839,51 @@
 
   DebugPrint("========================================")
   if reader.exception is None:
-    print "Minidump has no exception info"
+    print("Minidump has no exception info")
   else:
-    print "Address markers:"
-    print "  T = valid tagged pointer in the minidump"
-    print "  S = address on the exception stack"
-    print "  C = address in loaded C/C++ module"
-    print "  * = address in the minidump"
-    print ""
-    print "Exception info:"
+    print("Address markers:")
+    print("  T = valid tagged pointer in the minidump")
+    print("  S = address on the exception stack")
+    print("  C = address in loaded C/C++ module")
+    print("  * = address in the minidump")
+    print("")
+    print("Exception info:")
     exception_thread = reader.ExceptionThread()
-    print "  thread id: %d" % exception_thread.id
-    print "  code:      %08X" % reader.exception.exception.code
-    print "  context:"
+    print("  thread id: %d" % exception_thread.id)
+    print("  code:      %08X" % reader.exception.exception.code)
+    print("  context:")
     context = CONTEXT_FOR_ARCH[reader.arch]
     maxWidth = max(map(lambda s: len(s), context))
     for r in context:
       register_value = reader.Register(r)
-      print "    %s: %s" % (r.rjust(maxWidth),
-                            heap.FormatIntPtr(register_value))
+      print("    %s: %s" % (r.rjust(maxWidth),
+                            heap.FormatIntPtr(register_value)))
     # TODO(vitalyr): decode eflags.
     if reader.arch in [MD_CPU_ARCHITECTURE_ARM, MD_CPU_ARCHITECTURE_ARM64]:
-      print "    cpsr: %s" % bin(reader.exception_context.cpsr)[2:]
+      print("    cpsr: %s" % bin(reader.exception_context.cpsr)[2:])
     else:
-      print "    eflags: %s" % bin(reader.exception_context.eflags)[2:]
+      print("    eflags: %s" % bin(reader.exception_context.eflags)[2:])
 
-    print
-    print "  modules:"
+    print()
+    print("  modules:")
     for module in reader.module_list.modules:
       name = GetModuleName(reader, module)
       if name in KNOWN_MODULES:
-        print "    %s at %08X" % (name, module.base_of_image)
+        print("    %s at %08X" % (name, module.base_of_image))
         reader.TryLoadSymbolsFor(name, module)
-    print
+    print()
 
-    print "  stack-top:    %s" % heap.FormatIntPtr(reader.StackTop())
-    print "  stack-bottom: %s" % heap.FormatIntPtr(reader.StackBottom())
-    print ""
+    print("  stack-top:    %s" % heap.FormatIntPtr(reader.StackTop()))
+    print("  stack-bottom: %s" % heap.FormatIntPtr(reader.StackBottom()))
+    print("")
 
     if options.shell:
       padawan.PrintStackTraceMessage(print_message=False)
 
-    print "Disassembly around exception.eip:"
+    print("Disassembly around exception.eip:")
     eip_symbol = reader.FindSymbol(reader.ExceptionIP())
     if eip_symbol is not None:
-      print eip_symbol
+      print(eip_symbol)
     disasm_start = reader.ExceptionIP() - EIP_PROXIMITY
     disasm_bytes = 2 * EIP_PROXIMITY
     if (options.full):
@@ -3820,12 +3895,12 @@
     lines = reader.GetDisasmLines(disasm_start, disasm_bytes)
 
     if not lines:
-      print "Could not disassemble using %s." % OBJDUMP_BIN
-      print "Pass path to architecture specific objdump via --objdump?"
+      print("Could not disassemble using %s." % OBJDUMP_BIN)
+      print("Pass path to architecture specific objdump via --objdump?")
 
     for line in lines:
-      print FormatDisasmLine(disasm_start, heap, line)
-    print
+      print(FormatDisasmLine(disasm_start, heap, line))
+    print()
 
   if heap is None:
     heap = V8Heap(reader, None)
@@ -3840,10 +3915,10 @@
     try:
       InspectionShell(reader, heap).cmdloop("type help to get help")
     except KeyboardInterrupt:
-      print "Kthxbye."
+      print("Kthxbye.")
   elif not options.command:
     if reader.exception is not None:
-      print "Annotated stack (from exception.esp to bottom):"
+      print("Annotated stack (from exception.esp to bottom):")
       stack_start = padawan.PrintStackTraceMessage()
       padawan.InterpretMemory(stack_start, stack_bottom)
   reader.Dispose()
@@ -3871,11 +3946,11 @@
   if options.web:
     try:
       server = InspectionWebServer(PORT_NUMBER, options, args[0])
-      print 'Started httpserver on port ' , PORT_NUMBER
+      print('Started httpserver on port ' , PORT_NUMBER)
       webbrowser.open('http://localhost:%i/summary.html' % PORT_NUMBER)
       server.serve_forever()
     except KeyboardInterrupt:
-      print '^C received, shutting down the web server'
+      print('^C received, shutting down the web server')
       server.socket.close()
   else:
     AnalyzeMinidump(options, args[0])
diff --git a/src/v8/tools/heap-stats/README.md b/src/v8/tools/heap-stats/README.md
index 70083fe..9cf6e56 100644
--- a/src/v8/tools/heap-stats/README.md
+++ b/src/v8/tools/heap-stats/README.md
@@ -6,8 +6,9 @@
 
 The tool consumes log files produced by d8 (or Chromium) by passing
 `--trace-gc-object-stats` or a trace captured using Chrome's tracing
-infrastructure. Chrome trace files need to be unpacked before they can
-be used though.
+infrastructure. Chrome trace files can either be processed as gzip or raw text
+files.
+
 
 Hosting requires a web server, e.g.:
 
diff --git a/src/v8/tools/heap-stats/categories.js b/src/v8/tools/heap-stats/categories.js
index 0a836d5..e02571b 100644
--- a/src/v8/tools/heap-stats/categories.js
+++ b/src/v8/tools/heap-stats/categories.js
@@ -6,20 +6,17 @@
 const CATEGORIES = new Map([
   [
     'user', new Set([
-      '*FIXED_ARRAY_CONTEXT_SUB_TYPE',
-      '*FIXED_ARRAY_COPY_ON_WRITE_SUB_TYPE',
-      '*FIXED_ARRAY_DICTIONARY_PROPERTIES_SUB_TYPE',
-      '*FIXED_ARRAY_JS_COLLECTION_SUB_TYPE',
-      '*FIXED_ARRAY_JS_WEAK_COLLECTION_SUB_TYPE',
-      '*FIXED_ARRAY_PACKED_ELEMENTS_SUB_TYPE',
       'CONS_ONE_BYTE_STRING_TYPE',
       'CONS_STRING_TYPE',
       'DESCRIPTOR_ARRAY_TYPE',
+      'ELEMENTS_TYPE',
       'EXTERNAL_INTERNALIZED_STRING_TYPE',
       'EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
       'EXTERNAL_ONE_BYTE_STRING_TYPE',
       'EXTERNAL_STRING_TYPE',
       'EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE',
+      'FIXED_BIGINT64_ARRAY_TYPE',
+      'FIXED_BIGUINT64_ARRAY_TYPE',
       'FIXED_DOUBLE_ARRAY_TYPE',
       'FIXED_FLOAT32_ARRAY_TYPE',
       'FIXED_FLOAT64_ARRAY_TYPE',
@@ -30,10 +27,14 @@
       'FIXED_UINT32_ARRAY_TYPE',
       'FIXED_UINT8_ARRAY_TYPE',
       'FIXED_UINT8_CLAMPED_ARRAY_TYPE',
+      'FUNCTION_CONTEXT_TYPE',
+      'GLOBAL_ELEMENTS_TYPE',
+      'GLOBAL_PROPERTIES_TYPE',
       'HEAP_NUMBER_TYPE',
       'INTERNALIZED_STRING_TYPE',
       'JS_ARGUMENTS_TYPE',
       'JS_ARRAY_BUFFER_TYPE',
+      'JS_ARRAY_ITERATOR_TYPE',
       'JS_ARRAY_TYPE',
       'JS_BOUND_FUNCTION_TYPE',
       'JS_DATE_TYPE',
@@ -49,31 +50,55 @@
       'JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE',
       'JS_GLOBAL_OBJECT_TYPE',
       'JS_GLOBAL_PROXY_TYPE',
+      'JS_INTL_COLLATOR_TYPE',
+      'JS_INTL_DATE_TIME_FORMAT_TYPE',
+      'JS_INTL_LIST_FORMAT_TYPE',
+      'JS_INTL_LOCALE_TYPE',
+      'JS_INTL_NUMBER_FORMAT_TYPE',
+      'JS_INTL_PLURAL_RULES_TYPE',
+      'JS_INTL_RELATIVE_TIME_FORMAT_TYPE',
+      'JS_INTL_SEGMENT_ITERATOR_TYPE',
+      'JS_INTL_SEGMENTER_TYPE',
+      'JS_INTL_V8_BREAK_ITERATOR_TYPE',
+      'JS_MAP_KEY_ITERATOR_TYPE',
       'JS_MAP_KEY_VALUE_ITERATOR_TYPE',
       'JS_MAP_TYPE',
+      'JS_MAP_VALUE_ITERATOR_TYPE',
       'JS_MESSAGE_OBJECT_TYPE',
       'JS_OBJECT_TYPE',
+      'JS_PRIMITIVE_WRAPPER_TYPE',
       'JS_PROMISE_TYPE',
+      'JS_PROXY_TYPE',
       'JS_REGEXP_TYPE',
+      'JS_SET_KEY_VALUE_ITERATOR_TYPE',
       'JS_SET_TYPE',
+      'JS_SET_VALUE_ITERATOR_TYPE',
       'JS_STRING_ITERATOR_TYPE',
+      'JS_TO_WASM_FUNCTION',
       'JS_TYPED_ARRAY_TYPE',
-      'JS_VALUE_TYPE',
       'JS_WEAK_MAP_TYPE',
       'MUTABLE_HEAP_NUMBER_TYPE',
+      'NATIVE_CONTEXT_TYPE',
+      'OBJECT_PROPERTY_DICTIONARY_TYPE',
       'ONE_BYTE_INTERNALIZED_STRING_TYPE',
       'ONE_BYTE_STRING_TYPE',
+      'OTHER_CONTEXT_TYPE',
       'PROPERTY_ARRAY_TYPE',
-      'SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE',
-      'SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
-      'SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE',
-      'SHORT_EXTERNAL_STRING_TYPE',
       'SLICED_ONE_BYTE_STRING_TYPE',
       'SLICED_STRING_TYPE',
+      'STRING_EXTERNAL_RESOURCE_ONE_BYTE_TYPE',
+      'STRING_EXTERNAL_RESOURCE_TWO_BYTE_TYPE',
       'STRING_TYPE',
       'SYMBOL_TYPE',
       'THIN_ONE_BYTE_STRING_TYPE',
       'THIN_STRING_TYPE',
+      'UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE',
+      'UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE',
+      'UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE',
+      'UNCACHED_EXTERNAL_STRING_TYPE',
+      'WASM_INSTANCE_TYPE',
+      'WASM_MEMORY_TYPE',
+      'WASM_MODULE_TYPE',
     ])
   ],
   [
@@ -83,27 +108,20 @@
       'ACCESSOR_PAIR_TYPE',
       'ALLOCATION_MEMENTO_TYPE',
       'ALLOCATION_SITE_TYPE',
+      'ARRAY_BOILERPLATE_DESCRIPTION_TYPE',
+      'ARRAY_BOILERPLATE_DESCRIPTION_ELEMENTS_TYPE',
       'BOILERPLATE_ELEMENTS_TYPE',
-      'BOILERPLATE_NAME_DICTIONARY_TYPE',
       'BOILERPLATE_PROPERTY_ARRAY_TYPE',
+      'BOILERPLATE_PROPERTY_DICTIONARY_TYPE',
       'BYTE_ARRAY_TYPE',
+      'CALL_HANDLER_INFO_TYPE',
       'CELL_TYPE',
+      'CODE_STUBS_TABLE_TYPE',
       'CONTEXT_EXTENSION_TYPE',
-      '*FIXED_ARRAY_DEPENDENT_CODE_SUB_TYPE',
-      '*FIXED_ARRAY_ENUM_CACHE_SUB_TYPE',
-      '*FIXED_ARRAY_ENUM_INDICES_CACHE_SUB_TYPE',
-      '*FIXED_ARRAY_FAST_TEMPLATE_INSTANTIATIONS_CACHE_SUB_TYPE',
-      '*FIXED_ARRAY_NUMBER_STRING_CACHE_SUB_TYPE',
-      '*FIXED_ARRAY_PROTOTYPE_USERS_SUB_TYPE',
-      '*FIXED_ARRAY_REGEXP_MULTIPLE_CACHE_SUB_TYPE',
-      '*FIXED_ARRAY_RETAINED_MAPS_SUB_TYPE',
-      '*FIXED_ARRAY_SCOPE_INFO_SUB_TYPE',
-      '*FIXED_ARRAY_SCRIPT_LIST_SUB_TYPE',
-      '*FIXED_ARRAY_SINGLE_CHARACTER_STRING_CACHE_SUB_TYPE',
-      '*FIXED_ARRAY_STRING_SPLIT_CACHE_SUB_TYPE',
-      '*FIXED_ARRAY_TEMPLATE_INFO_SUB_TYPE',
-      '*FIXED_ARRAY_WEAK_NEW_SPACE_OBJECT_TO_CODE_SUB_TYPE',
+      'ENUM_CACHE_TYPE',
+      'ENUM_INDICES_CACHE_TYPE',
       'FOREIGN_TYPE',
+      'FUNCTION_TEMPLATE_INFO_ENTRIES_TYPE',
       'FUNCTION_TEMPLATE_INFO_TYPE',
       'INTERCEPTOR_INFO_TYPE',
       'JS_API_OBJECT_TYPE',
@@ -111,41 +129,74 @@
       'JS_OBJECT_BOILERPLATE_TYPE',
       'JS_SPECIAL_API_OBJECT_TYPE',
       'MAP_TYPE',
+      'NUMBER_STRING_CACHE_TYPE',
+      'OBJECT_BOILERPLATE_DESCRIPTION_TYPE',
       'OBJECT_TEMPLATE_INFO_TYPE',
+      'OBJECT_TO_CODE_TYPE',
       'ODDBALL_TYPE',
       'PROMISE_REACTION_JOB_INFO_TYPE',
       'PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE',
       'PROPERTY_CELL_TYPE',
       'PROTOTYPE_INFO_TYPE',
+      'PROTOTYPE_USERS_TYPE',
+      'REGEXP_MULTIPLE_CACHE_TYPE',
+      'RETAINED_MAPS_TYPE',
+      'SCOPE_INFO_TYPE',
+      'SCRIPT_LIST_TYPE',
+      'SCRIPT_SHARED_FUNCTION_INFOS_TYPE',
+      'SERIALIZED_OBJECTS_TYPE',
+      'SINGLE_CHARACTER_STRING_CACHE_TYPE',
       'STACK_FRAME_INFO_TYPE',
+      'STRING_SPLIT_CACHE_TYPE',
+      'STRING_TABLE_TYPE',
       'TRANSITION_ARRAY_TYPE',
-      'WEAK_CELL_TYPE',
+      'WEAK_NEW_SPACE_OBJECT_TO_CODE_TYPE',
     ])
   ],
   [
     'code', new Set([
-      '*CODE_BUILTIN',
-      '*CODE_BYTECODE_HANDLER',
-      '*CODE_OPTIMIZED_FUNCTION',
-      '*CODE_REGEXP',
-      '*CODE_STUB',
-      '*FIXED_ARRAY_BYTECODE_ARRAY_CONSTANT_POOL_SUB_TYPE',
-      '*FIXED_ARRAY_BYTECODE_ARRAY_HANDLER_TABLE_SUB_TYPE',
-      '*FIXED_ARRAY_CODE_STUBS_TABLE_SUB_TYPE',
-      '*FIXED_ARRAY_COMPILATION_CACHE_TABLE_SUB_TYPE',
-      '*FIXED_ARRAY_DEOPTIMIZATION_DATA_SUB_TYPE',
-      '*FIXED_ARRAY_EMBEDDED_OBJECT_SUB_TYPE',
-      '*FIXED_ARRAY_HANDLER_TABLE_SUB_TYPE',
-      '*FIXED_ARRAY_NOSCRIPT_SHARED_FUNCTION_INFOS_SUB_TYPE',
-      '*FIXED_ARRAY_OPTIMIZED_CODE_LITERALS_SUB_TYPE',
-      '*FIXED_ARRAY_SHARED_FUNCTION_INFOS_SUB_TYPE',
+      'BUILTIN',
+      'BYTECODE_ARRAY_CONSTANT_POOL_TYPE',
+      'BYTECODE_ARRAY_HANDLER_TABLE_TYPE',
       'BYTECODE_ARRAY_TYPE',
+      'BYTECODE_HANDLER',
       'CODE_DATA_CONTAINER_TYPE',
+      'DEOPTIMIZATION_DATA_TYPE',
+      'EMBEDDED_OBJECT_TYPE',
+      'FEEDBACK_CELL_TYPE',
+      'FEEDBACK_METADATA_TYPE',
+      'FEEDBACK_VECTOR_ENTRY_TYPE',
+      'FEEDBACK_VECTOR_HEADER_TYPE',
+      'FEEDBACK_VECTOR_SLOT_CALL_TYPE',
+      'FEEDBACK_VECTOR_SLOT_CALL_UNUSED_TYPE',
+      'FEEDBACK_VECTOR_SLOT_ENUM_TYPE',
+      'FEEDBACK_VECTOR_SLOT_LOAD_TYPE',
+      'FEEDBACK_VECTOR_SLOT_LOAD_UNUSED_TYPE',
+      'FEEDBACK_VECTOR_SLOT_OTHER_TYPE',
+      'FEEDBACK_VECTOR_SLOT_STORE_TYPE',
+      'FEEDBACK_VECTOR_SLOT_STORE_UNUSED_TYPE',
       'FEEDBACK_VECTOR_TYPE',
       'LOAD_HANDLER_TYPE',
+      'NOSCRIPT_SHARED_FUNCTION_INFOS_TYPE',
+      'OPTIMIZED_CODE_LITERALS_TYPE',
+      'OPTIMIZED_FUNCTION',
+      'PREPARSE_DATA_TYPE',
+      'REGEXP',
+      'RELOC_INFO_TYPE',
+      'SCRIPT_SOURCE_EXTERNAL_ONE_BYTE_TYPE',
+      'SCRIPT_SOURCE_EXTERNAL_TWO_BYTE_TYPE',
+      'SCRIPT_SOURCE_EXTERNAL_TYPE',
+      'SCRIPT_SOURCE_NON_EXTERNAL_ONE_BYTE_TYPE',
+      'SCRIPT_SOURCE_NON_EXTERNAL_TWO_BYTE_TYPE',
       'SCRIPT_TYPE',
       'SHARED_FUNCTION_INFO_TYPE',
+      'SOURCE_POSITION_TABLE_TYPE',
       'STORE_HANDLER_TYPE',
+      'STUB',
+      'UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE',
+      'UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE',
+      'UNCOMPILED_JS_FUNCTION_TYPE',
+      'UNCOMPILED_SHARED_FUNCTION_INFO_TYPE'
     ])
   ],
   ['unclassified', new Set()],
@@ -158,10 +209,3 @@
   ['code', 'Code'],
   ['unclassified', 'Unclassified'],
 ]);
-
-// Instance types that are constructed from their sub types and
-// should thus be hidden.
-const IGNORED_INSTANCE_TYPES = new Set([
-  'FIXED_ARRAY_TYPE',
-  'CODE_TYPE',
-]);
diff --git a/src/v8/tools/heap-stats/details-selection.html b/src/v8/tools/heap-stats/details-selection.html
index d60aef9..04b274a 100644
--- a/src/v8/tools/heap-stats/details-selection.html
+++ b/src/v8/tools/heap-stats/details-selection.html
@@ -3,12 +3,17 @@
 found in the LICENSE file. -->
 <template id="details-selection-template">
 <style>
+#dataSelectionSection {
+  display: none;
+}
+
 .box {
   border-left: dashed 1px #666666;
   border-right: dashed 1px #666666;
   border-bottom: dashed 1px #666666;
   padding: 10px;
   overflow: hidden;
+  position: relative;
 }
 
 .box:nth-of-type(1) {
@@ -20,53 +25,121 @@
     border-radius: 0px 0px 5px 5px;
 }
 
-span {
-  display: block;
-  padding: 5px;
+.box > ul {
+  margin: 0px;
+  padding: 0px;
+}
+
+.box > ul > li {
+  display: inline-block;
+}
+
+.box > ul > li:not(:first-child) {
+  margin-left: 10px;
+}
+
+.box > ul > li:first-child {
   font-weight: bold;
 }
 
-.boxDiv {
-  padding: 3px;
+.instanceTypeSelectBox {
+  position: relative;
+  overflow: hidden;
   float: left;
+  padding: 0px 5px 2px 0px;
+  margin: 3px;
+  border-radius: 3px;
 }
 
-.boxDiv > label {
+.instanceTypeSelectBox > label {
   font-size: xx-small;
 }
 
+.instanceTypeSelectBox > input {
+  vertical-align: middle;
+}
+
+.percentBackground {
+  position: absolute;
+  width: 200%;
+  height: 100%;
+  left: 0%;
+  top: 0px;
+  margin-left: -100%;
+  transition: all 1s ease-in-out;
+}
+
+.instanceTypeSelectBox > .percentBackground  {
+  background: linear-gradient(90deg, #68b0f7 50%, #b3d9ff 50%);
+  z-index: -1;
+}
+.box > .percentBackground  {
+  background: linear-gradient(90deg, #e0edfe 50%, #fff 50%);
+  z-index: -2;
+}
+
 #categories {
   margin-top: 10px;
 }
+
+#category-filter {
+  text-align: right;
+  width: 50px;
+}
+
 </style>
-<h2>Data selection</h2>
-<ul>
-  <li>
-    <label for="isolate-select">
-      Isolate
-    </label>
-    <select id="isolate-select">
-      <option>No data</option>
-    </select>
-  </li>
-  <li>
-    <label for="dataset-select">
-      Data set
-    </label>
-    <select id="dataset-select">
-      <option>No data</option>
-    </select>
-  </li>
-  <li>
-    <input type="checkbox" id="merge-categories" checked=checked />
-    <label for="merge-categories">
-      Merge categories
-    </label>
-  </li>
-</ul>
+<section id="dataSelectionSection">
+  <h2>Data selection</h2>
+  <ul>
+    <li>
+      <label for="isolate-select">
+        Isolate
+      </label>
+      <select id="isolate-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <label for="data-view-select">
+        Data view
+      </label>
+      <select id="data-view-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <label for="dataset-select">
+        Data set
+      </label>
+      <select id="dataset-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <label for="gc-select">
+        Garbage collection (at a specific time in ms)
+      </label>
+      <select id="gc-select">
+        <option>No data</option>
+      </select>
+    </li>
+    <li>
+      <input id="category-filter" type="text" value="0" disabled="disabled" />KB
+      <button id="category-filter-btn" disabled="disabled">
+        Filter categories with less memory
+      </button>
+      <button id="category-auto-filter-btn" disabled="disabled">
+        Show top 20 categories only
+      </button>
+    </li>
+    <li>
+      <button id="csv-export-btn" disabled="disabled">Export selection as CSV</button>
+    </li>
+  </ul>
 
-
-<div id="categories"></div>
+  <div id="categories"></div>
+</section>
 </template>
+
 <script type="text/javascript" src="categories.js"></script>
-<script type="text/javascript" src="details-selection.js"></script>
\ No newline at end of file
+<script type="text/javascript" src="details-selection.js"></script>
diff --git a/src/v8/tools/heap-stats/details-selection.js b/src/v8/tools/heap-stats/details-selection.js
index 43c000d..1e9cc83 100644
--- a/src/v8/tools/heap-stats/details-selection.js
+++ b/src/v8/tools/heap-stats/details-selection.js
@@ -8,6 +8,10 @@
     document.currentScript.ownerDocument.querySelector(
         '#details-selection-template');
 
+const VIEW_BY_INSTANCE_TYPE = 'by-instance-type';
+const VIEW_BY_INSTANCE_CATEGORY = 'by-instance-category';
+const VIEW_BY_FIELD_TYPE = 'by-field-type';
+
 class DetailsSelection extends HTMLElement {
   constructor() {
     super();
@@ -15,10 +19,18 @@
     shadowRoot.appendChild(details_selection_template.content.cloneNode(true));
     this.isolateSelect.addEventListener(
         'change', e => this.handleIsolateChange(e));
+    this.dataViewSelect.addEventListener(
+        'change', e => this.notifySelectionChanged(e));
     this.datasetSelect.addEventListener(
         'change', e => this.notifySelectionChanged(e));
-    this.$('#merge-categories')
-        .addEventListener('change', e => this.notifySelectionChanged(e));
+    this.gcSelect.addEventListener(
+      'change', e => this.notifySelectionChanged(e));
+    this.$('#csv-export-btn')
+        .addEventListener('click', e => this.exportCurrentSelection(e));
+    this.$('#category-filter-btn')
+        .addEventListener('click', e => this.filterCurrentSelection(e));
+    this.$('#category-auto-filter-btn')
+        .addEventListener('click', e => this.filterTop20Categories(e));
   }
 
   connectedCallback() {
@@ -36,31 +48,28 @@
     return this._data;
   }
 
-  buildCategory(name) {
-    const div = document.createElement('div');
-    div.id = name;
-    div.classList.add('box');
-    const span = document.createElement('span');
-    div.appendChild(span);
-    span.innerHTML = CATEGORY_NAMES.get(name) + ' ';
-    const all_button = document.createElement('button');
-    span.appendChild(all_button);
-    all_button.innerHTML = 'All';
-    all_button.addEventListener('click', e => this.selectCategory(name));
-    const none_button = document.createElement('button');
-    span.appendChild(none_button);
-    none_button.innerHTML = 'None';
-    none_button.addEventListener('click', e => this.unselectCategory(name));
-    const innerDiv = document.createElement('div');
-    div.appendChild(innerDiv);
-    innerDiv.id = name + 'Content';
-    return div;
+  get selectedIsolate() {
+    return this._data[this.selection.isolate];
+  }
+
+  get selectedData() {
+    console.assert(this.data, 'invalid data');
+    console.assert(this.selection, 'invalid selection');
+    return this.selectedIsolate.gcs[this.selection.gc][this.selection.data_set];
   }
 
   $(id) {
     return this.shadowRoot.querySelector(id);
   }
 
+  querySelectorAll(query) {
+    return this.shadowRoot.querySelectorAll(query);
+  }
+
+  get dataViewSelect() {
+    return this.$('#data-view-select');
+  }
+
   get datasetSelect() {
     return this.$('#dataset-select');
   }
@@ -69,17 +78,76 @@
     return this.$('#isolate-select');
   }
 
-  dataChanged() {
-    this.clearUI();
-    this.populateSelect('#isolate-select', Object.keys(this.data));
-    this.handleIsolateChange();
+  get gcSelect() {
+    return this.$('#gc-select');
   }
 
-  clearUI() {
+  buildCategory(name) {
+    const div = document.createElement('div');
+    div.id = name;
+    div.classList.add('box');
+    const ul = document.createElement('ul');
+    div.appendChild(ul);
+    const name_li = document.createElement('li');
+    ul.appendChild(name_li);
+    name_li.innerHTML = CATEGORY_NAMES.get(name);
+    const percent_li = document.createElement('li');
+    ul.appendChild(percent_li);
+    percent_li.innerHTML = '0%';
+    percent_li.id = name + 'PercentContent';
+    const all_li = document.createElement('li');
+    ul.appendChild(all_li);
+    const all_button = document.createElement('button');
+    all_li.appendChild(all_button);
+    all_button.innerHTML = 'All';
+    all_button.addEventListener('click', e => this.selectCategory(name));
+    const none_li = document.createElement('li');
+    ul.appendChild(none_li);
+    const none_button = document.createElement('button');
+    none_li.appendChild(none_button);
+    none_button.innerHTML = 'None';
+    none_button.addEventListener('click', e => this.unselectCategory(name));
+    const innerDiv = document.createElement('div');
+    div.appendChild(innerDiv);
+    innerDiv.id = name + 'Content';
+    const percentDiv = document.createElement('div');
+    div.appendChild(percentDiv);
+    percentDiv.className = 'percentBackground';
+    percentDiv.id = name + 'PercentBackground';
+    return div;
+  }
+
+  dataChanged() {
     this.selection = {categories: {}};
-    removeAllChildren(this.isolateSelect);
+    this.resetUI(true);
+    this.populateIsolateSelect();
+    this.handleIsolateChange();
+    this.$('#dataSelectionSection').style.display = 'block';
+  }
+
+  populateIsolateSelect() {
+    let isolates = Object.entries(this.data);
+    // Sorty by peak heap memory consumption.
+    isolates.sort((a, b) => b[1].peakMemory - a[1].peakMemory);
+    this.populateSelect(
+        '#isolate-select', isolates, (key, isolate) => isolate.getLabel());
+  }
+
+  resetUI(resetIsolateSelect) {
+    if (resetIsolateSelect) removeAllChildren(this.isolateSelect);
+
+    removeAllChildren(this.dataViewSelect);
     removeAllChildren(this.datasetSelect);
+    removeAllChildren(this.gcSelect);
     this.clearCategories();
+    this.setButtonState('disabled');
+  }
+
+  setButtonState(disabled) {
+    this.$('#csv-export-btn').disabled = disabled;
+    this.$('#category-filter').disabled = disabled;
+    this.$('#category-filter-btn').disabled = disabled;
+    this.$('#category-auto-filter-btn').disabled = disabled;
   }
 
   handleIsolateChange(e) {
@@ -88,9 +156,27 @@
       this.selection.isolate = null;
       return;
     }
-
+    this.resetUI(false);
     this.populateSelect(
-        '#dataset-select', this.data[this.selection.isolate].data_sets, 'live');
+        '#data-view-select', [
+          [VIEW_BY_INSTANCE_TYPE, 'Selected instance types'],
+          [VIEW_BY_INSTANCE_CATEGORY, 'Selected type categories'],
+          [VIEW_BY_FIELD_TYPE, 'Field type statistics']
+        ],
+        (key, label) => label, VIEW_BY_INSTANCE_TYPE);
+    this.populateSelect(
+        '#dataset-select', this.selectedIsolate.data_sets.entries(), null,
+        'live');
+    this.populateSelect(
+        '#gc-select',
+        Object.keys(this.selectedIsolate.gcs)
+            .map(id => [id, this.selectedIsolate.gcs[id].time]),
+        (key, time, index) => {
+          return (index + ': ').padStart(4, '0') +
+              formatSeconds(time).padStart(6, '0') + ' ' +
+              formatBytes(this.selectedIsolate.gcs[key].live.overall)
+                  .padStart(9, '0');
+        });
     this.populateCategories();
     this.notifySelectionChanged();
   }
@@ -98,23 +184,105 @@
   notifySelectionChanged(e) {
     if (!this.selection.isolate) return;
 
+    this.selection.data_view = this.dataViewSelect.value;
     this.selection.categories = {};
-    for (let category of CATEGORIES.keys()) {
-      const selected = this.selectedInCategory(category);
-      if (selected.length > 0) this.selection.categories[category] = selected;
+    if (this.selection.data_view === VIEW_BY_FIELD_TYPE) {
+      this.$('#categories').style.display = 'none';
+    } else {
+      for (let category of CATEGORIES.keys()) {
+        const selected = this.selectedInCategory(category);
+        if (selected.length > 0) this.selection.categories[category] = selected;
+      }
+      this.$('#categories').style.display = 'block';
     }
     this.selection.category_names = CATEGORY_NAMES;
     this.selection.data_set = this.datasetSelect.value;
-    this.selection.merge_categories = this.$('#merge-categories').checked;
+    this.selection.gc = this.gcSelect.value;
+    this.setButtonState(false);
+    this.updatePercentagesInCategory();
+    this.updatePercentagesInInstanceTypes();
     this.dispatchEvent(new CustomEvent(
         'change', {bubbles: true, composed: true, detail: this.selection}));
   }
 
+  filterCurrentSelection(e) {
+    const minSize = this.$('#category-filter').value * KB;
+    this.filterCurrentSelectionWithThresold(minSize);
+  }
+
+  filterTop20Categories(e) {
+    // Limit to show top 20 categories only.
+    let minSize = 0;
+    let count = 0;
+    let sizes = this.selectedIsolate.instanceTypePeakMemory;
+    for (let key in sizes) {
+      if (count == 20) break;
+      minSize = sizes[key];
+      count++;
+    }
+    this.filterCurrentSelectionWithThresold(minSize);
+  }
+
+  filterCurrentSelectionWithThresold(minSize) {
+    if (minSize === 0) return;
+
+    this.selection.category_names.forEach((_, category) => {
+      for (let checkbox of this.querySelectorAll(
+               'input[name=' + category + 'Checkbox]')) {
+        checkbox.checked =
+            this.selectedData.instance_type_data[checkbox.instance_type]
+                .overall > minSize;
+        console.log(
+            checkbox.instance_type, checkbox.checked,
+            this.selectedData.instance_type_data[checkbox.instance_type]
+                .overall);
+      }
+    });
+    this.notifySelectionChanged();
+  }
+
+  updatePercentagesInCategory() {
+    const overalls = {};
+    let overall = 0;
+    // Reset all categories.
+    this.selection.category_names.forEach((_, category) => {
+      overalls[category] = 0;
+    });
+    // Only update categories that have selections.
+    Object.entries(this.selection.categories).forEach(([category, value]) => {
+      overalls[category] =
+          Object.values(value).reduce(
+              (accu, current) =>
+                  accu + this.selectedData.instance_type_data[current].overall,
+              0) /
+          KB;
+      overall += overalls[category];
+    });
+    Object.entries(overalls).forEach(([category, category_overall]) => {
+      let percents = category_overall / overall * 100;
+      this.$(`#${category}PercentContent`).innerHTML =
+          `${percents.toFixed(1)}%`;
+      this.$('#' + category + 'PercentBackground').style.left = percents + '%';
+    });
+  }
+
+  updatePercentagesInInstanceTypes() {
+    const instanceTypeData = this.selectedData.instance_type_data;
+    const maxInstanceType = this.selectedData.singleInstancePeakMemory;
+    this.querySelectorAll('.instanceTypeSelectBox  input').forEach(checkbox => {
+      let instanceType = checkbox.value;
+      let instanceTypeSize = instanceTypeData[instanceType].overall;
+      let percents = instanceTypeSize / maxInstanceType;
+      let percentDiv = checkbox.parentNode.querySelector('.percentBackground');
+      percentDiv.style.left = (percents * 100) + '%';
+
+    });
+  }
+
   selectedInCategory(category) {
-    const selected = this.shadowRoot.querySelectorAll(
-        'input[name=' + category + 'Checkbox]:checked');
-    var tmp = [];
-    for (var val of selected.values()) tmp.push(val.value);
+    let tmp = [];
+    this.querySelectorAll('input[name=' + category + 'Checkbox]:checked')
+        .forEach(checkbox => tmp.push(checkbox.value));
     return tmp;
   }
 
@@ -125,17 +293,21 @@
     return 'unclassified';
   }
 
-  createOption(text) {
+  createOption(value, text) {
     const option = document.createElement('option');
-    option.value = text;
+    option.value = value;
     option.text = text;
     return option;
   }
 
-  populateSelect(id, iterable, autoselect = null) {
-    for (let option_value of iterable) {
-      const option = this.createOption(option_value);
-      if (autoselect === option_value) {
+  populateSelect(id, iterable, labelFn = null, autoselect = null) {
+    if (labelFn == null) labelFn = e => e;
+    let index = 0;
+    for (let [key, value] of iterable) {
+      index++;
+      const label = labelFn(key, value, index);
+      const option = this.createOption(key, label);
+      if (autoselect === key) {
         option.selected = 'selected';
       }
       this.$(id).appendChild(option);
@@ -158,9 +330,7 @@
       categories[cat] = [];
     }
 
-    for (let instance_type of this.data[this.selection.isolate]
-             .non_empty_instance_types) {
-      if (IGNORED_INSTANCE_TYPES.has(instance_type)) continue;
+    for (let instance_type of this.selectedIsolate.non_empty_instance_types) {
       const category = this.categoryForType(instance_type);
       categories[category].push(instance_type);
     }
@@ -174,38 +344,65 @@
   }
 
   unselectCategory(category) {
-    for (let checkbox of this.shadowRoot.querySelectorAll(
-             'input[name=' + category + 'Checkbox]')) {
-      checkbox.checked = false;
-    }
+    this.querySelectorAll('input[name=' + category + 'Checkbox]')
+        .forEach(checkbox => checkbox.checked = false);
     this.notifySelectionChanged();
   }
 
   selectCategory(category) {
-    for (let checkbox of this.shadowRoot.querySelectorAll(
-             'input[name=' + category + 'Checkbox]')) {
-      checkbox.checked = true;
-    }
+    this.querySelectorAll('input[name=' + category + 'Checkbox]')
+        .forEach(checkbox => checkbox.checked = true);
     this.notifySelectionChanged();
   }
 
   createCheckBox(instance_type, category) {
     const div = document.createElement('div');
-    div.classList.add('boxDiv');
+    div.classList.add('instanceTypeSelectBox');
     const input = document.createElement('input');
     div.appendChild(input);
     input.type = 'checkbox';
     input.name = category + 'Checkbox';
     input.checked = 'checked';
     input.id = instance_type + 'Checkbox';
+    input.instance_type = instance_type;
     input.value = instance_type;
     input.addEventListener('change', e => this.notifySelectionChanged(e));
     const label = document.createElement('label');
     div.appendChild(label);
     label.innerText = instance_type;
     label.htmlFor = instance_type + 'Checkbox';
+    const percentDiv = document.createElement('div');
+    percentDiv.className = 'percentBackground';
+    div.appendChild(percentDiv);
     return div;
   }
+
+  exportCurrentSelection(e) {
+    const data = [];
+    const selected_data =
+        this.selectedIsolate.gcs[this.selection.gc][this.selection.data_set]
+            .instance_type_data;
+    Object.values(this.selection.categories).forEach(instance_types => {
+      instance_types.forEach(instance_type => {
+        data.push([instance_type, selected_data[instance_type].overall / KB]);
+      });
+    });
+    const createInlineContent = arrayOfRows => {
+      const content = arrayOfRows.reduce(
+          (accu, rowAsArray) => {return accu + `${rowAsArray.join(',')}\n`},
+          '');
+      return `data:text/csv;charset=utf-8,${content}`;
+    };
+    const encodedUri = encodeURI(createInlineContent(data));
+    const link = document.createElement('a');
+    link.setAttribute('href', encodedUri);
+    link.setAttribute(
+        'download',
+        `heap_objects_data_${this.selection.isolate}_${this.selection.gc}.csv`);
+    this.shadowRoot.appendChild(link);
+    link.click();
+    this.shadowRoot.removeChild(link);
+  }
 }
 
 customElements.define('details-selection', DetailsSelection);
diff --git a/src/v8/tools/heap-stats/global-timeline.js b/src/v8/tools/heap-stats/global-timeline.js
index 0533f21..3830b7c 100644
--- a/src/v8/tools/heap-stats/global-timeline.js
+++ b/src/v8/tools/heap-stats/global-timeline.js
@@ -4,9 +4,6 @@
 
 'use strict';
 
-const KB = 1024;
-const MB = KB * KB;
-
 const global_timeline_template =
     document.currentScript.ownerDocument.querySelector(
         '#global-timeline-template');
@@ -60,6 +57,57 @@
     }
   }
 
+  getFieldData() {
+    const labels = [
+      {type: 'number', label: 'Time'},
+      {type: 'number', label: 'Ptr compression benefit'},
+      {type: 'string', role: 'tooltip'},
+      {type: 'number', label: 'Embedder fields'},
+      {type: 'number', label: 'Tagged fields'},
+      {type: 'number', label: 'Other raw fields'},
+      {type: 'number', label: 'Unboxed doubles'}
+    ];
+    const chart_data = [labels];
+    const isolate_data = this.data[this.selection.isolate];
+    let sum_total = 0;
+    let sum_ptr_compr_benefit_perc = 0;
+    let count = 0;
+    Object.keys(isolate_data.gcs).forEach(gc_key => {
+      const gc_data = isolate_data.gcs[gc_key];
+      const data_set = gc_data[this.selection.data_set].field_data;
+      const data = [];
+      data.push(gc_data.time * kMillis2Seconds);
+      const total = data_set.tagged_fields +
+                    data_set.embedder_fields +
+                    data_set.other_raw_fields +
+                    data_set.unboxed_double_fields;
+      const ptr_compr_benefit = data_set.tagged_fields / 2;
+      const ptr_compr_benefit_perc = ptr_compr_benefit / total * 100;
+      sum_total += total;
+      sum_ptr_compr_benefit_perc += ptr_compr_benefit_perc;
+      count++;
+      const tooltip = "Ptr compression benefit: " +
+                      (ptr_compr_benefit / KB).toFixed(2) + "KB " +
+                      " (" + ptr_compr_benefit_perc.toFixed(2) + "%)";
+      data.push(ptr_compr_benefit / KB);
+      data.push(tooltip);
+      data.push(data_set.embedder_fields / KB);
+      data.push(data_set.tagged_fields / KB);
+      data.push(data_set.other_raw_fields / KB);
+      data.push(data_set.unboxed_double_fields / KB);
+      chart_data.push(data);
+    });
+    const avg_ptr_compr_benefit_perc =
+        count ? sum_ptr_compr_benefit_perc / count : 0;
+    console.log("==================================================");
+    console.log("= Average ptr compression benefit is " +
+                avg_ptr_compr_benefit_perc.toFixed(2) + "%");
+    console.log("= Average V8 heap size " +
+                (sum_total / count / KB).toFixed(2) + " KB");
+    console.log("==================================================");
+    return chart_data;
+  }
+
   getCategoryData() {
     const categories = Object.keys(this.selection.categories)
                            .map(k => this.selection.category_names.get(k));
@@ -70,7 +118,7 @@
       const gc_data = isolate_data.gcs[gc_key];
       const data_set = gc_data[this.selection.data_set].instance_type_data;
       const data = [];
-      data.push(gc_data.time);
+      data.push(gc_data.time * kMillis2Seconds);
       Object.values(this.selection.categories).forEach(instance_types => {
         data.push(
             instance_types
@@ -86,7 +134,6 @@
   }
 
   getInstanceTypeData() {
-    const categories = Object.keys(this.selection.categories);
     const instance_types =
         Object.values(this.selection.categories)
             .reduce((accu, current) => accu.concat(current), []);
@@ -97,7 +144,7 @@
       const gc_data = isolate_data.gcs[gc_key];
       const data_set = gc_data[this.selection.data_set].instance_type_data;
       const data = [];
-      data.push(gc_data.time);
+      data.push(gc_data.time * kMillis2Seconds);
       instance_types.forEach(instance_type => {
         data.push(data_set[instance_type].overall / KB);
       });
@@ -106,26 +153,56 @@
     return chart_data;
   }
 
-  drawChart() {
-    console.assert(this.data, 'invalid data');
-    console.assert(this.selection, 'invalid selection');
+  getChartData() {
+    switch (this.selection.data_view) {
+      case VIEW_BY_FIELD_TYPE:
+        return this.getFieldData();
+      case VIEW_BY_INSTANCE_CATEGORY:
+        return this.getCategoryData();
+      case VIEW_BY_INSTANCE_TYPE:
+      default:
+        return this.getInstanceTypeData();
+    }
+  }
 
-    const chart_data = (this.selection.merge_categories) ?
-        this.getCategoryData() :
-        this.getInstanceTypeData();
-    const data = google.visualization.arrayToDataTable(chart_data);
+  getChartOptions() {
     const options = {
       isStacked: true,
       hAxis: {
-        title: 'Time [ms]',
+        format: '###.##s',
+        title: 'Time [s]',
       },
-      vAxis: {title: 'Memory consumption [KBytes]'},
-      chartArea: {width: '85%', height: '70%'},
+      vAxis: {
+        format: '#,###KB',
+        title: 'Memory consumption [KBytes]'
+      },
+      chartArea: {left:100, width: '85%', height: '70%'},
       legend: {position: 'top', maxLines: '1'},
       pointsVisible: true,
       pointSize: 5,
       explorer: {},
     };
+    switch (this.selection.data_view) {
+      case VIEW_BY_FIELD_TYPE:
+        // Overlay pointer compression benefit on top of the graph
+        return Object.assign(options, {
+          series: {0: {type: 'line', lineDashStyle: [13, 13]}},
+        });
+      case VIEW_BY_INSTANCE_CATEGORY:
+      case VIEW_BY_INSTANCE_TYPE:
+      default:
+        return options;
+    }
+  }
+
+  drawChart() {
+    console.assert(this.data, 'invalid data');
+    console.assert(this.selection, 'invalid selection');
+
+    const chart_data = this.getChartData();
+
+    const data = google.visualization.arrayToDataTable(chart_data);
+    const options = this.getChartOptions();
     const chart = new google.visualization.AreaChart(this.$('#chart'));
     this.show();
     chart.draw(data, google.charts.Line.convertOptions(options));
diff --git a/src/v8/tools/heap-stats/helper.js b/src/v8/tools/heap-stats/helper.js
new file mode 100644
index 0000000..00f68f9
--- /dev/null
+++ b/src/v8/tools/heap-stats/helper.js
@@ -0,0 +1,23 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const KB = 1024;
+const MB = KB * KB;
+const GB = MB * KB;
+const kMillis2Seconds = 1 / 1000;
+
+function formatBytes(bytes) {
+  const units = ['B', 'KiB', 'MiB', 'GiB'];
+  const divisor = 1024;
+  let index = 0;
+  while (index < units.length && bytes >= divisor) {
+    index++;
+    bytes /= divisor;
+  }
+  return bytes.toFixed(2) + units[index];
+}
+
+function formatSeconds(millis) {
+  return (millis * kMillis2Seconds).toFixed(2) + 's';
+}
diff --git a/src/v8/tools/heap-stats/histogram-viewer.html b/src/v8/tools/heap-stats/histogram-viewer.html
new file mode 100644
index 0000000..93fe980
--- /dev/null
+++ b/src/v8/tools/heap-stats/histogram-viewer.html
@@ -0,0 +1,19 @@
+<!-- Copyright 2018 the V8 project authors. All rights reserved.
+Use of this source code is governed by a BSD-style license that can be
+found in the LICENSE file. -->
+<template id="histogram-viewer-template">
+<style>
+#chart {
+  width: 100%;
+  height: 800px;
+}
+</style>
+<div id="container" style="display: none;">
+  <h2>Details</h2>
+  <ul>
+    <li><span id="overall"></span></li>
+  </ul>
+  <div id="chart"></div>
+</div>
+</template>
+<script type="text/javascript"  src="histogram-viewer.js"></script>
\ No newline at end of file
diff --git a/src/v8/tools/heap-stats/histogram-viewer.js b/src/v8/tools/heap-stats/histogram-viewer.js
new file mode 100644
index 0000000..240c6cb
--- /dev/null
+++ b/src/v8/tools/heap-stats/histogram-viewer.js
@@ -0,0 +1,190 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+const histogram_viewer_template =
+    document.currentScript.ownerDocument.querySelector(
+        '#histogram-viewer-template');
+
+class HistogramViewer extends HTMLElement {
+  constructor() {
+    super();
+    const shadowRoot = this.attachShadow({mode: 'open'});
+    shadowRoot.appendChild(histogram_viewer_template.content.cloneNode(true));
+  }
+
+  $(id) {
+    return this.shadowRoot.querySelector(id);
+  }
+
+  set data(value) {
+    this._data = value;
+    this.stateChanged();
+  }
+
+  get data() {
+    return this._data;
+  }
+
+  set selection(value) {
+    this._selection = value;
+    this.stateChanged();
+  }
+
+  get selection() {
+    return this._selection;
+  }
+
+  isValid() {
+    return this.data && this.selection &&
+           (this.selection.data_view === VIEW_BY_INSTANCE_CATEGORY ||
+            this.selection.data_view === VIEW_BY_INSTANCE_TYPE);
+    ;
+  }
+
+  hide() {
+    this.$('#container').style.display = 'none';
+  }
+
+  show() {
+    this.$('#container').style.display = 'block';
+  }
+
+  getOverallValue() {
+    switch (this.selection.data_view) {
+      case VIEW_BY_FIELD_TYPE:
+        return NaN;
+      case VIEW_BY_INSTANCE_CATEGORY:
+        return this.getPropertyForCategory('overall');
+      case VIEW_BY_INSTANCE_TYPE:
+      default:
+        return this.getPropertyForInstanceTypes('overall');
+    }
+  }
+
+  stateChanged() {
+    if (this.isValid()) {
+      const overall_bytes = this.getOverallValue();
+      this.$('#overall').innerHTML = `Overall: ${overall_bytes / KB} KB`;
+      this.drawChart();
+    } else {
+      this.hide();
+    }
+  }
+
+  get selectedData() {
+    console.assert(this.data, 'invalid data');
+    console.assert(this.selection, 'invalid selection');
+    return this.data[this.selection.isolate]
+        .gcs[this.selection.gc][this.selection.data_set];
+  }
+
+  get selectedInstanceTypes() {
+    console.assert(this.selection, 'invalid selection');
+    return Object.values(this.selection.categories)
+        .reduce((accu, current) => accu.concat(current), []);
+  }
+
+  getPropertyForCategory(property) {
+    return Object.values(this.selection.categories)
+        .reduce(
+            (outer_accu, instance_types) => outer_accu +
+                instance_types.reduce(
+                    (inner_accu, instance_type) => inner_accu +
+                        this.selectedData
+                            .instance_type_data[instance_type][property],
+                    0),
+            0);
+  }
+
+  getPropertyForInstanceTypes(property) {
+    return this.selectedInstanceTypes.reduce(
+        (accu, instance_type) => accu +
+            this.selectedData.instance_type_data[instance_type][property],
+        0);
+  }
+
+  formatBytes(bytes) {
+    const units = ['B', 'KiB', 'MiB'];
+    const divisor = 1024;
+    let index = 0;
+    while (index < units.length && bytes >= divisor) {
+      index++;
+      bytes /= divisor;
+    }
+    return bytes + units[index];
+  }
+
+  getCategoryData() {
+    const labels = [
+      'Bucket',
+      ...Object.keys(this.selection.categories)
+          .map(k => this.selection.category_names.get(k))
+    ];
+    const data = this.selectedData.bucket_sizes.map(
+        (bucket_size, index) =>
+            [`<${this.formatBytes(bucket_size)}`,
+             ...Object.values(this.selection.categories)
+                 .map(
+                     instance_types =>
+                         instance_types
+                             .map(
+                                 instance_type =>
+                                     this.selectedData
+                                         .instance_type_data[instance_type]
+                                         .histogram[index])
+                             .reduce((accu, current) => accu + current, 0))]);
+    // Adjust last histogram bucket label.
+    data[data.length - 1][0] = 'rest';
+    return [labels, ...data];
+  }
+
+  getInstanceTypeData() {
+    const instance_types = this.selectedInstanceTypes;
+    const labels = ['Bucket', ...instance_types];
+    const data = this.selectedData.bucket_sizes.map(
+        (bucket_size, index) =>
+            [`<${bucket_size}`,
+             ...instance_types.map(
+                 instance_type =>
+                     this.selectedData.instance_type_data[instance_type]
+                         .histogram[index])]);
+    // Adjust last histogram bucket label.
+    data[data.length - 1][0] = 'rest';
+    return [labels, ...data];
+  }
+
+  getChartData() {
+    switch (this.selection.data_view) {
+      case VIEW_BY_FIELD_TYPE:
+        return this.getFieldData();
+      case VIEW_BY_INSTANCE_CATEGORY:
+        return this.getCategoryData();
+      case VIEW_BY_INSTANCE_TYPE:
+      default:
+        return this.getInstanceTypeData();
+    }
+  }
+
+  drawChart() {
+    const chart_data = this.getChartData();
+    const data = google.visualization.arrayToDataTable(chart_data);
+    const options = {
+      legend: {position: 'top', maxLines: '1'},
+      chartArea: {width: '85%', height: '85%'},
+      bar: {groupWidth: '80%'},
+      hAxis: {
+        title: 'Count',
+        minValue: 0
+      },
+      explorer: {},
+    };
+    const chart = new google.visualization.BarChart(this.$('#chart'));
+    this.show();
+    chart.draw(data, options);
+  }
+}
+
+customElements.define('histogram-viewer', HistogramViewer);
diff --git a/src/v8/tools/heap-stats/index.html b/src/v8/tools/heap-stats/index.html
index 3c2e62b..11fac21 100644
--- a/src/v8/tools/heap-stats/index.html
+++ b/src/v8/tools/heap-stats/index.html
@@ -8,15 +8,22 @@
 <head>
   <meta charset="UTF-8">
   <title>V8 Heap Statistics</title>
-  <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet' type='text/css'>
-  <script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
+  <link href='https://fonts.googleapis.com/css?family=Roboto' rel='stylesheet'>
+  <script
+          src="https://www.gstatic.com/charts/loader.js"></script>
+  <script
+          src="https://cdnjs.cloudflare.com/ajax/libs/pako/1.0.6/pako_inflate.min.js"
+          integrity="sha256-N1z6ddQzX83fjw8v7uSNe7/MgOmMKdwFUv1+AJMDqNM="
+          crossorigin="anonymous"></script>
+
+  <script src="helper.js"></script>
 
   <link rel="import" href="details-selection.html">
   <link rel="import" href="global-timeline.html">
+  <link rel="import" href="histogram-viewer.html">
   <link rel="import" href="trace-file-reader.html">
 
-  <style type="text/css">
-
+  <style>
 body {
   font-family: 'Roboto', sans-serif;
   margin-left: 5%;
@@ -24,11 +31,10 @@
 }
 
   </style>
-  <script type="text/javascript">
-
+  <script>
 'use strict';
 
-google.charts.load('current', {'packages':['line', 'corechart']});
+google.charts.load('current', {'packages':['line', 'corechart', 'bar']});
 
 function $(id) { return document.querySelector(id); }
 
@@ -47,42 +53,48 @@
   state.selection = null;
   $('#global-timeline').selection = state.selection;
   $('#global-timeline').data = state.data;
-  $('#type-details').selection = state.selection;
-  $('#type-details').data = state.data;
+  $('#histogram-viewer').selection = state.selection;
+  $('#histogram-viewer').data = state.data;
   $('#details-selection').data = state.data;
 }
 
 function globalSelectionChangedA(e) {
   state.selection = e.detail;
+  console.log(state.selection);
   $('#global-timeline').selection = state.selection;
-  $('#type-details').selection = state.selection;
+  $('#histogram-viewer').selection = state.selection;
 }
 
   </script>
 </head>
 
 <body>
-  <trace-file-reader onchange="globalDataChanged(event)"></trace-file-reader>
   <h1>V8 Heap Statistics</h1>
+  <trace-file-reader onchange="globalDataChanged(event)"></trace-file-reader>
+
+  <details-selection id="details-selection" onchange="globalSelectionChangedA(event)"></details-selection>
+  <global-timeline id="global-timeline"></global-timeline>
+  <histogram-viewer id="histogram-viewer"></histogram-viewer>
+
   <p>Visualize object statistics that have been gathered using</p>
   <ul>
-    <li><code>--trace-gc-object-stats on V8</code></li>
+    <li><code>--trace-gc-object-stats</code> on V8</li>
     <li>
       <a
         href="https://www.chromium.org/developers/how-tos/trace-event-profiling-tool">Chrome's
         tracing infrastructure</a> collecting data for the category
-      <code>v8.gc_stats</code>. The trace file needs to be unpacked (e.g. using
-      <code>gunzip</code>).
+      <code>v8.gc_stats</code>.
     </li>
   </ul>
   <p>
+    Note that you only get a data point on major GCs. You can enforce this by
+    using the <code>--gc-global</code> flag.
+  </p>
+  <p>
     Note that the visualizer needs to run on a web server due to HTML imports
     requiring <a
          href="https://en.wikipedia.org/wiki/Cross-origin_resource_sharing">CORS</a>.
   </p>
-  <details-selection id="details-selection" onchange="globalSelectionChangedA(event)"></details-selection>
-  <global-timeline id="global-timeline"></global-timeline>
-  <type-details id="type-details"></type-details>
 </body>
 
 </html>
diff --git a/src/v8/tools/heap-stats/model.js b/src/v8/tools/heap-stats/model.js
new file mode 100644
index 0000000..cd6e1e5
--- /dev/null
+++ b/src/v8/tools/heap-stats/model.js
@@ -0,0 +1,105 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+class Isolate {
+  constructor(address) {
+    this.address = address;
+    this.start = null;
+    this.end = null;
+    this.samples = Object.create(null);
+    this.non_empty_instance_types = new Set();
+    this.gcs = Object.create(null);
+    this.zonetags = [];
+    this.samples = {zone: {}};
+    this.data_sets = new Set();
+    this.peakMemory = 0;
+    // Maps instance_types to their max memory consumption over all gcs.
+    this.instanceTypePeakMemory = Object.create(null);
+    // Peak memory consumed by any single instance type.
+    this.singleInstanceTypePeakMemory = 0;
+  }
+
+  finalize() {
+    Object.values(this.gcs).forEach(gc => this.finalizeGC(gc));
+    this.sortInstanceTypePeakMemory();
+  }
+
+  getLabel() {
+    let label = `${this.address}: gc=#${Object.keys(this.gcs).length}`;
+    label += ` peak=${formatBytes(this.peakMemory)}`
+    return label;
+  }
+
+  finalizeGC(gc_data) {
+    this.data_sets.forEach(key => this.finalizeDataSet(gc_data[key]));
+    if (!('live' in gc_data)) return;
+    let liveData = gc_data.live;
+    this.peakMemory = Math.max(this.peakMemory, liveData.overall);
+    let data = liveData.instance_type_data;
+    for (let name in data) {
+      let prev = this.instanceTypePeakMemory[name] || 0;
+      this.instanceTypePeakMemory[name] = Math.max(prev, data[name].overall);
+    }
+  }
+
+  finalizeDataSet(data_set) {
+    // Create a ranked instance type array that sorts instance types by
+    // memory size (overall).
+    let data = data_set.instance_type_data;
+    let ranked_instance_types =
+        [...data_set.non_empty_instance_types].sort((a, b) => {
+          return data[a].overall - data[b].overall;
+        });
+    // Reassemble the instance_type list sorted by size.
+    let sorted_data = Object.create(null);
+    let max = 0;
+    ranked_instance_types.forEach((name) => {
+      let entry = sorted_data[name] = data[name];
+      max = Math.max(max, entry.overall);
+    });
+    data_set.instance_type_data = data;
+    data_set.singleInstancePeakMemory = max;
+
+    Object.entries(data_set.instance_type_data).forEach(([name, entry]) => {
+      this.checkHistogram(
+          name, entry, data_set.bucket_sizes, 'histogram', ' overall');
+      this.checkHistogram(
+          name, entry, data_set.bucket_sizes, 'over_allocated_histogram',
+          ' over_allocated');
+    });
+  }
+
+  // Check that a lower bound for histogram memory does not exceed the
+  // overall counter.
+  checkHistogram(type, entry, bucket_sizes, histogram, overallProperty) {
+    let sum = 0;
+    for (let i = 1; i < entry[histogram].length; i++) {
+      sum += entry[histogram][i] * bucket_sizes[i - 1];
+    }
+    const overall = entry[overallProperty];
+    if (sum >= overall) {
+      console.error(
+          `${type}: sum('${histogram}') > overall (${sum} > ${overall})`);
+    }
+  }
+
+  sortInstanceTypePeakMemory() {
+    let entries = Object.entries(this.instanceTypePeakMemory);
+    entries.sort((a, b) => {return b[1] - a[1]});
+    this.instanceTypePeakMemory = Object.create(null);
+    let max = 0;
+    for (let [key, value] of entries) {
+      this.instanceTypePeakMemory[key] = value;
+      max = Math.max(max, value);
+    }
+    this.singleInstanceTypePeakMemory = max;
+  }
+
+  getInstanceTypePeakMemory(type) {
+    if (!(type in this.instanceTypePeakMemory)) return 0;
+    return this.instanceTypePeakMemory[type];
+  }
+}
diff --git a/src/v8/tools/heap-stats/trace-file-reader.html b/src/v8/tools/heap-stats/trace-file-reader.html
index 98c2ef0..c5e5c6f 100644
--- a/src/v8/tools/heap-stats/trace-file-reader.html
+++ b/src/v8/tools/heap-stats/trace-file-reader.html
@@ -10,17 +10,77 @@
   text-align: center;
   border: solid 1px #000000;
   border-radius: 5px;
+  cursor: pointer;
+  transition: all 0.5s ease-in-out;
+}
+
+#fileReader.done {
+    height: 20px;
+    line-height: 20px;
+}
+
+#fileReader:hover {
+  background-color: #e0edfe ;
+}
+
+.loading #fileReader {
+  cursor: wait;
 }
 
 #fileReader > input {
   display: none;
 }
+
+
+#loader {
+  display: none;
+}
+
+.loading #loader {
+  display: block;
+  position: fixed;
+  top: 0px;
+  left: 0px;
+  width: 100%;
+  height: 100%;
+  background-color: rgba(255, 255, 255, 0.5);
+}
+
+#spinner {
+  position: absolute;
+  width: 100px;
+  height: 100px;
+  top: 40%;
+  left: 50%;
+  margin-left: -50px;
+  border: 30px solid #000;
+  border-top: 30px solid #36E;
+  border-radius: 50%;
+  animation: spin 1s ease-in-out infinite;
+}
+
+@keyframes spin {
+ 0% {
+    transform: rotate(0deg);
+ }
+ 100% {
+    transform: rotate(360deg);
+ }
+}
 </style>
-<div id="fileReader">
-  <span id="label">
-    Drag and drop a trace file into this area, or click to choose from disk.
-   </span>
-  <input id="file" type="file" name="file" />
-</div>
+
+<section id="fileReaderSection">
+  <div id="fileReader" tabindex=1 >
+    <span id="label">
+      Drag and drop a trace file into this area, or click to choose from disk.
+     </span>
+    <input id="file" type="file" name="file" />
+  </div>
+  <div id="loader">
+    <div id="spinner"></div>
+  </div>
+</section>
 </template>
-<script type="text/javascript" src="trace-file-reader.js"></script>
+<script type="text/javascript" src="model.js"></script>
+
+<script src="trace-file-reader.js"></script>
diff --git a/src/v8/tools/heap-stats/trace-file-reader.js b/src/v8/tools/heap-stats/trace-file-reader.js
index 59825fe..4fec9a1 100644
--- a/src/v8/tools/heap-stats/trace-file-reader.js
+++ b/src/v8/tools/heap-stats/trace-file-reader.js
@@ -17,16 +17,25 @@
     this.addEventListener('dragover', e => this.handleDragOver(e));
     this.addEventListener('drop', e => this.handleChange(e));
     this.$('#file').addEventListener('change', e => this.handleChange(e));
+    this.$('#fileReader').addEventListener('keydown', e => this.handleKeyEvent(e));
   }
 
   $(id) {
     return this.shadowRoot.querySelector(id);
   }
 
+  get section() {
+    return this.$('#fileReaderSection');
+  }
+
   updateLabel(text) {
     this.$('#label').innerText = text;
   }
 
+  handleKeyEvent(event) {
+    if (event.key == "Enter") this.handleClick(event);
+  }
+
   handleClick(event) {
     this.$('#file').click();
   }
@@ -42,56 +51,81 @@
     event.preventDefault();
   }
 
-  connectedCallback() {}
+  connectedCallback() {
+    this.$('#fileReader').focus();
+  }
 
   readFile(file) {
     if (!file) {
       this.updateLabel('Failed to load file.');
       return;
     }
+    this.$('#fileReader').blur();
 
-    const result = new FileReader();
-    result.onload = (e) => {
-      let contents = e.target.result.split('\n');
-      const return_data = (e.target.result.includes('V8.GC_Objects_Stats')) ?
-          this.createModelFromChromeTraceFile(contents) :
-          this.createModelFromV8TraceFile(contents);
-      this.updateLabel('Finished loading \'' + file.name + '\'.');
-      this.dispatchEvent(new CustomEvent(
-          'change', {bubbles: true, composed: true, detail: return_data}));
-    };
-    result.readAsText(file);
+    this.section.className = 'loading';
+    const reader = new FileReader();
+
+    if (['application/gzip', 'application/x-gzip'].includes(file.type)) {
+      reader.onload = (e) => {
+        try {
+          const textResult = pako.inflate(e.target.result, {to: 'string'});
+          this.processRawText(file, textResult);
+          this.section.className = 'success';
+          this.$('#fileReader').classList.add('done');
+        } catch (err) {
+          console.error(err);
+          this.section.className = 'failure';
+        }
+      };
+      // Delay the loading a bit to allow for CSS animations to happen.
+      setTimeout(() => reader.readAsArrayBuffer(file), 0);
+    } else {
+      reader.onload = (e) => {
+        try {
+          this.processRawText(file, e.target.result);
+          this.section.className = 'success';
+          this.$('#fileReader').classList.add('done');
+        } catch (err) {
+          console.error(err);
+          this.section.className = 'failure';
+        }
+      };
+      // Delay the loading a bit to allow for CSS animations to happen.
+      setTimeout(() => reader.readAsText(file), 0);
+    }
   }
 
-  createOrUpdateEntryIfNeeded(data, keys, entry) {
+  processRawText(file, result) {
+    let contents = result.split('\n');
+    const return_data = (result.includes('V8.GC_Objects_Stats')) ?
+        this.createModelFromChromeTraceFile(contents) :
+        this.createModelFromV8TraceFile(contents);
+    this.extendAndSanitizeModel(return_data);
+    this.updateLabel('Finished loading \'' + file.name + '\'.');
+    this.dispatchEvent(new CustomEvent(
+        'change', {bubbles: true, composed: true, detail: return_data}));
+  }
+
+  createOrUpdateEntryIfNeeded(data, entry) {
     console.assert(entry.isolate, 'entry should have an isolate');
-    if (!(entry.isolate in keys)) {
-      keys[entry.isolate] = new Set();
-    }
     if (!(entry.isolate in data)) {
-      data[entry.isolate] = {
-        non_empty_instance_types: new Set(),
-        gcs: {},
-        zonetags: [],
-        samples: {zone: {}},
-        start: null,
-        end: null,
-        data_sets: new Set()
-      };
+      data[entry.isolate] = new Isolate(entry.isolate);
     }
     const data_object = data[entry.isolate];
     if (('id' in entry) && !(entry.id in data_object.gcs)) {
       data_object.gcs[entry.id] = {non_empty_instance_types: new Set()};
     }
     if ('time' in entry) {
-      if (data_object.end === null || data_object.end < entry.time)
+      if (data_object.end === null || data_object.end < entry.time) {
         data_object.end = entry.time;
-      if (data_object.start === null || data_object.start > entry.time)
+      }
+      if (data_object.start === null || data_object.start > entry.time) {
         data_object.start = entry.time;
+      }
     }
   }
 
-  createDatasetIfNeeded(data, keys, entry, data_set) {
+  createDatasetIfNeeded(data, entry, data_set) {
     if (!(data_set in data[entry.isolate].gcs[entry.id])) {
       data[entry.isolate].gcs[entry.id][data_set] = {
         instance_type_data: {},
@@ -102,9 +136,17 @@
     }
   }
 
-  addInstanceTypeData(
-      data, keys, isolate, gc_id, data_set, instance_type, entry) {
-    keys[isolate].add(data_set);
+  addFieldTypeData(data, isolate, gc_id, data_set, tagged_fields,
+                   embedder_fields, unboxed_double_fields, other_raw_fields) {
+    data[isolate].gcs[gc_id][data_set].field_data = {
+      tagged_fields,
+      embedder_fields,
+      unboxed_double_fields,
+      other_raw_fields
+    };
+  }
+
+  addInstanceTypeData(data, isolate, gc_id, data_set, instance_type, entry) {
     data[isolate].gcs[gc_id][data_set].instance_type_data[instance_type] = {
       overall: entry.overall,
       count: entry.count,
@@ -121,117 +163,82 @@
     }
   }
 
-  extendAndSanitizeModel(data, keys) {
+  extendAndSanitizeModel(data) {
     const checkNonNegativeProperty = (obj, property) => {
       console.assert(obj[property] >= 0, 'negative property', obj, property);
     };
 
-    for (const isolate of Object.keys(data)) {
-      for (const gc of Object.keys(data[isolate].gcs)) {
-        for (const data_set_key of keys[isolate]) {
-          const data_set = data[isolate].gcs[gc][data_set_key];
-          // 1. Create a ranked instance type array that sorts instance
-          // types by memory size (overall).
-          data_set.ranked_instance_types =
-              [...data_set.non_empty_instance_types].sort(function(a, b) {
-                if (data_set.instance_type_data[a].overall >
-                    data_set.instance_type_data[b].overall) {
-                  return 1;
-                } else if (
-                    data_set.instance_type_data[a].overall <
-                    data_set.instance_type_data[b].overall) {
-                  return -1;
-                }
-                return 0;
-              });
-
-          let known_count = 0;
-          let known_overall = 0;
-          let known_histogram =
-              Array(
-                  data_set.instance_type_data.FIXED_ARRAY_TYPE.histogram.length)
-                  .fill(0);
-          for (const instance_type in data_set.instance_type_data) {
-            if (!instance_type.startsWith('*FIXED_ARRAY')) continue;
-            const subtype = data_set.instance_type_data[instance_type];
-            known_count += subtype.count;
-            known_overall += subtype.count;
-            for (let i = 0; i < subtype.histogram.length; i++) {
-              known_histogram[i] += subtype.histogram[i];
-            }
-          }
-
-          const fixed_array_data = data_set.instance_type_data.FIXED_ARRAY_TYPE;
-          const unknown_entry = {
-            count: fixed_array_data.count - known_count,
-            overall: fixed_array_data.overall - known_overall,
-            histogram: fixed_array_data.histogram.map(
-                (value, index) => value - known_histogram[index])
-          };
-
-          // Check for non-negative values.
-          checkNonNegativeProperty(unknown_entry, 'count');
-          checkNonNegativeProperty(unknown_entry, 'overall');
-          for (let i = 0; i < unknown_entry.histogram.length; i++) {
-            checkNonNegativeProperty(unknown_entry.histogram, i);
-          }
-
-          data_set.instance_type_data['*FIXED_ARRAY_UNKNOWN_SUB_TYPE'] =
-              unknown_entry;
-          data_set.non_empty_instance_types.add(
-              '*FIXED_ARRAY_UNKNOWN_SUB_TYPE');
-        }
-      }
-    }
+    Object.values(data).forEach(isolate => isolate.finalize());
   }
 
   createModelFromChromeTraceFile(contents) {
-    console.log('Processing log as chrome trace file.');
-    const data = Object.create(null);  // Final data container.
-    const keys = Object.create(null);  // Collecting 'keys' per isolate.
+    // Trace files support two formats.
+    // {traceEvents: [ data ]}
+    const kObjectTraceFile = {
+      name: 'object',
+      endToken: ']}',
+      getDataArray: o => o.traceEvents
+    };
+    // [ data ]
+    const kArrayTraceFile = {
+      name: 'array',
+      endToken: ']',
+      getDataArray: o => o
+    };
+    const handler =
+        (contents[0][0] === '{') ? kObjectTraceFile : kArrayTraceFile;
+    console.log(`Processing log as chrome trace file (${handler.name}).`);
 
     // Pop last line in log as it might be broken.
     contents.pop();
     // Remove trailing comma.
     contents[contents.length - 1] = contents[contents.length - 1].slice(0, -1);
     // Terminate JSON.
-    const sanitized_contents = [...contents, ']}'].join('');
+    const sanitized_contents = [...contents, handler.endToken].join('');
+
+    const data = Object.create(null);  // Final data container.
     try {
       const raw_data = JSON.parse(sanitized_contents);
-      const objects_stats_data =
-          raw_data.traceEvents.filter(e => e.name == 'V8.GC_Objects_Stats');
-      objects_stats_data.forEach(trace_data => {
-        const actual_data = trace_data.args;
-        const data_sets = new Set(Object.keys(actual_data));
-        Object.keys(actual_data).forEach(data_set => {
-          const string_entry = actual_data[data_set];
-          try {
-            const entry = JSON.parse(string_entry);
-            this.createOrUpdateEntryIfNeeded(data, keys, entry);
-            this.createDatasetIfNeeded(data, keys, entry, data_set);
-            const isolate = entry.isolate;
-            const time = entry.time;
-            const gc_id = entry.id;
-            data[isolate].gcs[gc_id].time = time;
-            data[isolate].gcs[gc_id][data_set].bucket_sizes =
-                entry.bucket_sizes;
-            for (let [instance_type, value] of Object.entries(
-                     entry.type_data)) {
-              // Trace file format uses markers that do not have actual
-              // properties.
-              if (!('overall' in value)) continue;
-              this.addInstanceTypeData(
-                  data, keys, isolate, gc_id, data_set, instance_type, value);
-            }
-          } catch (e) {
-            console.log('Unable to parse data set entry', e);
-          }
-        });
-      });
+      const raw_array_data = handler.getDataArray(raw_data);
+      raw_array_data.filter(e => e.name === 'V8.GC_Objects_Stats')
+          .forEach(trace_data => {
+            const actual_data = trace_data.args;
+            const data_sets = new Set(Object.keys(actual_data));
+            Object.keys(actual_data).forEach(data_set => {
+              const string_entry = actual_data[data_set];
+              try {
+                const entry = JSON.parse(string_entry);
+                this.createOrUpdateEntryIfNeeded(data, entry);
+                this.createDatasetIfNeeded(data, entry, data_set);
+                const isolate = entry.isolate;
+                const time = entry.time;
+                const gc_id = entry.id;
+                data[isolate].gcs[gc_id].time = time;
+
+                const field_data = entry.field_data;
+                this.addFieldTypeData(data, isolate, gc_id, data_set,
+                  field_data.tagged_fields, field_data.embedder_fields,
+                  field_data.unboxed_double_fields,
+                  field_data.other_raw_fields);
+
+                data[isolate].gcs[gc_id][data_set].bucket_sizes =
+                    entry.bucket_sizes;
+                for (let [instance_type, value] of Object.entries(
+                         entry.type_data)) {
+                  // Trace file format uses markers that do not have actual
+                  // properties.
+                  if (!('overall' in value)) continue;
+                  this.addInstanceTypeData(
+                      data, isolate, gc_id, data_set, instance_type, value);
+                }
+              } catch (e) {
+                console.log('Unable to parse data set entry', e);
+              }
+            });
+          });
     } catch (e) {
-      console.log('Unable to parse chrome trace file.', e);
+      console.error('Unable to parse chrome trace file.', e);
     }
-    this.extendAndSanitizeModel(data, keys);
     return data;
   }
 
@@ -243,20 +250,18 @@
         line = line.replace(/^I\/v8\s*\(\d+\):\s+/g, '');
         return JSON.parse(line);
       } catch (e) {
-        console.log('Unable to parse line: \'' + line + '\'\' (' + e + ')');
+        console.log('Unable to parse line: \'' + line + '\' (' + e + ')');
       }
       return null;
     });
 
     const data = Object.create(null);  // Final data container.
-    const keys = Object.create(null);  // Collecting 'keys' per isolate.
-
     for (var entry of contents) {
       if (entry === null || entry.type === undefined) {
         continue;
       }
       if (entry.type === 'zone') {
-        this.createOrUpdateEntryIfNeeded(data, keys, entry);
+        this.createOrUpdateEntryIfNeeded(data, entry);
         const stacktrace = ('stacktrace' in entry) ? entry.stacktrace : [];
         data[entry.isolate].samples.zone[entry.time] = {
           allocated: entry.allocated,
@@ -265,26 +270,32 @@
         };
       } else if (
           entry.type === 'zonecreation' || entry.type === 'zonedestruction') {
-        this.createOrUpdateEntryIfNeeded(data, keys, entry);
+        this.createOrUpdateEntryIfNeeded(data, entry);
         data[entry.isolate].zonetags.push(
             Object.assign({opening: entry.type === 'zonecreation'}, entry));
       } else if (entry.type === 'gc_descriptor') {
-        this.createOrUpdateEntryIfNeeded(data, keys, entry);
+        this.createOrUpdateEntryIfNeeded(data, entry);
         data[entry.isolate].gcs[entry.id].time = entry.time;
         if ('zone' in entry)
           data[entry.isolate].gcs[entry.id].malloced = entry.zone;
+      } else if (entry.type === 'field_data') {
+        this.createOrUpdateEntryIfNeeded(data, entry);
+        this.createDatasetIfNeeded(data, entry, entry.key);
+        this.addFieldTypeData(data, entry.isolate, entry.id, entry.key,
+          entry.tagged_fields, entry.embedder_fields,
+          entry.unboxed_double_fields, entry.other_raw_fields);
       } else if (entry.type === 'instance_type_data') {
         if (entry.id in data[entry.isolate].gcs) {
-          this.createOrUpdateEntryIfNeeded(data, keys, entry);
-          this.createDatasetIfNeeded(data, keys, entry, entry.key);
+          this.createOrUpdateEntryIfNeeded(data, entry);
+          this.createDatasetIfNeeded(data, entry, entry.key);
           this.addInstanceTypeData(
-              data, keys, entry.isolate, entry.id, entry.key,
+              data, entry.isolate, entry.id, entry.key,
               entry.instance_type_name, entry);
         }
       } else if (entry.type === 'bucket_sizes') {
         if (entry.id in data[entry.isolate].gcs) {
-          this.createOrUpdateEntryIfNeeded(data, keys, entry);
-          this.createDatasetIfNeeded(data, keys, entry, entry.key);
+          this.createOrUpdateEntryIfNeeded(data, entry);
+          this.createDatasetIfNeeded(data, entry, entry.key);
           data[entry.isolate].gcs[entry.id][entry.key].bucket_sizes =
               entry.sizes;
         }
@@ -292,7 +303,6 @@
         console.log('Unknown entry type: ' + entry.type);
       }
     }
-    this.extendAndSanitizeModel(data, keys);
     return data;
   }
 }
diff --git a/src/v8/tools/ic-explorer.html b/src/v8/tools/ic-explorer.html
index f60a356..aede91e 100644
--- a/src/v8/tools/ic-explorer.html
+++ b/src/v8/tools/ic-explorer.html
@@ -1,3 +1,4 @@
+<!DOCTYPE html>
 <html>
 <!--
 Copyright 2016 the V8 project authors. All rights reserved.  Use of this source
@@ -5,6 +6,8 @@
 -->
 
 <head>
+  <meta charset="utf-8">
+  <title>V8 IC explorer</title>
   <style>
     html {
       font-family: monospace;
@@ -46,16 +49,16 @@
       padding: 0.5em 0 0.2em 0;
     }
   </style>
-  <script src="./splaytree.js" type="text/javascript"></script>
-  <script src="./codemap.js" type="text/javascript"></script>
-  <script src="./csvparser.js" type="text/javascript"></script>
-  <script src="./consarray.js" type="text/javascript"></script>
-  <script src="./profile.js" type="text/javascript"></script>
-  <script src="./profile_view.js" type="text/javascript"></script>
-  <script src="./logreader.js" type="text/javascript"></script>
-  <script src="./arguments.js" type="text/javascript"></script>
-  <script src="./ic-processor.js" type="text/javascript"></script>
-  <script src="./SourceMap.js" type="text/javascript"></script>
+  <script src="./splaytree.js"></script>
+  <script src="./codemap.js"></script>
+  <script src="./csvparser.js"></script>
+  <script src="./consarray.js"></script>
+  <script src="./profile.js"></script>
+  <script src="./profile_view.js"></script>
+  <script src="./logreader.js"></script>
+  <script src="./arguments.js"></script>
+  <script src="./ic-processor.js"></script>
+  <script src="./SourceMap.js"></script>
 
   <script>
     "use strict"
diff --git a/src/v8/tools/ic-processor.js b/src/v8/tools/ic-processor.js
index 93f40b3..a97fe0e 100644
--- a/src/v8/tools/ic-processor.js
+++ b/src/v8/tools/ic-processor.js
@@ -32,11 +32,12 @@
 
 
 function IcProcessor() {
-  var propertyICParser = [parseInt, parseInt, parseInt, null, null, parseInt,
-                          null, null, null];
+  var propertyICParser = [parseInt, parseInt, parseInt, parseString,
+      parseString, parseInt, parseString, parseString, parseString];
   LogReader.call(this, {
       'code-creation': {
-          parsers: [null, parseInt, parseInt, parseInt, parseInt, null, 'var-args'],
+          parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
+              parseString, parseVarArgs],
           processor: this.processCodeCreation },
       'code-move': { parsers: [parseInt, parseInt],
           processor: this.processCodeMove },
@@ -44,6 +45,12 @@
           processor: this.processCodeDelete },
       'sfi-move': { parsers: [parseInt, parseInt],
           processor: this.processFunctionMove },
+      'LoadGlobalIC': {
+        parsers : propertyICParser,
+        processor: this.processPropertyIC.bind(this, "LoadGlobalIC") },
+      'StoreGlobalIC': {
+        parsers : propertyICParser,
+        processor: this.processPropertyIC.bind(this, "StoreGlobalIC") },
       'LoadIC': {
         parsers : propertyICParser,
         processor: this.processPropertyIC.bind(this, "LoadIC") },
@@ -56,14 +63,19 @@
       'KeyedStoreIC': {
         parsers : propertyICParser,
         processor: this.processPropertyIC.bind(this, "KeyedStoreIC") },
+      'StoreInArrayLiteralIC': {
+        parsers : propertyICParser,
+        processor: this.processPropertyIC.bind(this, "StoreInArrayLiteralIC") },
       });
-  this.deserializedEntriesNames_ = [];
   this.profile_ = new Profile();
 
+  this.LoadGlobalIC = 0;
+  this.StoreGlobalIC = 0;
   this.LoadIC = 0;
   this.StoreIC = 0;
   this.KeyedLoadIC = 0;
   this.KeyedStoreIC = 0;
+  this.StoreInArrayLiteralIC = 0;
 }
 inherits(IcProcessor, LogReader);
 
@@ -100,10 +112,13 @@
   }
   print();
   print("=====================");
+  print("LoadGlobal: " + this.LoadGlobalIC);
+  print("StoreGlobal: " + this.StoreGlobalIC);
   print("Load: " + this.LoadIC);
   print("Store: " + this.StoreIC);
   print("KeyedLoad: " + this.KeyedLoadIC);
   print("KeyedStore: " + this.KeyedStoreIC);
+  print("StoreInArrayLiteral: " + this.StoreInArrayLiteralIC);
 };
 
 IcProcessor.prototype.addEntry = function(entry) {
@@ -112,10 +127,6 @@
 
 IcProcessor.prototype.processCodeCreation = function(
     type, kind, timestamp, start, size, name, maybe_func) {
-  name = this.deserializedEntriesNames_[start] || name;
-  if (name.startsWith("onComplete")) {
-    console.log(name);
-  }
   if (maybe_func.length) {
     var funcAddr = parseInt(maybe_func[0]);
     var state = parseState(maybe_func[1]);
diff --git a/src/v8/tools/ignition/bytecode_dispatches_report.py b/src/v8/tools/ignition/bytecode_dispatches_report.py
index 97f8e83..aa5a9c9 100755
--- a/src/v8/tools/ignition/bytecode_dispatches_report.py
+++ b/src/v8/tools/ignition/bytecode_dispatches_report.py
@@ -5,6 +5,9 @@
 # found in the LICENSE file.
 #
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import heapq
 import json
@@ -54,8 +57,8 @@
   for source, counters_from_source in iteritems(dispatches_table):
     for destination, counter in iteritems(counters_from_source):
       if counter == __COUNTER_MAX:
-        print "WARNING: {} -> {} may have saturated.".format(source,
-                                                             destination)
+        print("WARNING: {} -> {} may have saturated.".format(source,
+                                                             destination))
 
 
 def find_top_bytecode_dispatch_pairs(dispatches_table, top_count):
@@ -71,9 +74,9 @@
 def print_top_bytecode_dispatch_pairs(dispatches_table, top_count):
   top_bytecode_dispatch_pairs = (
     find_top_bytecode_dispatch_pairs(dispatches_table, top_count))
-  print "Top {} bytecode dispatch pairs:".format(top_count)
+  print("Top {} bytecode dispatch pairs:".format(top_count))
   for source, destination, counter in top_bytecode_dispatch_pairs:
-    print "{:>12d}\t{} -> {}".format(counter, source, destination)
+    print("{:>12d}\t{} -> {}".format(counter, source, destination))
 
 
 def find_top_bytecodes(dispatches_table):
@@ -87,9 +90,9 @@
 
 def print_top_bytecodes(dispatches_table):
   top_bytecodes = find_top_bytecodes(dispatches_table)
-  print "Top bytecodes:"
+  print("Top bytecodes:")
   for bytecode, counter in top_bytecodes:
-    print "{:>12d}\t{}".format(counter, bytecode)
+    print("{:>12d}\t{}".format(counter, bytecode))
 
 
 def find_top_dispatch_sources_and_destinations(
@@ -116,13 +119,13 @@
                                                 top_count, sort_relative):
   top_sources, top_destinations = find_top_dispatch_sources_and_destinations(
       dispatches_table, bytecode, top_count, sort_relative)
-  print "Top sources of dispatches to {}:".format(bytecode)
+  print("Top sources of dispatches to {}:".format(bytecode))
   for source_name, counter, ratio in top_sources:
-    print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name)
+    print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, source_name))
 
-  print "\nTop destinations of dispatches from {}:".format(bytecode)
+  print("\nTop destinations of dispatches from {}:".format(bytecode))
   for destination_name, counter, ratio in top_destinations:
-    print "{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name)
+    print("{:>12d}\t{:>5.1f}%\t{}".format(counter, ratio * 100, destination_name))
 
 
 def build_counters_matrix(dispatches_table):
diff --git a/src/v8/tools/ignition/linux_perf_bytecode_annotate.py b/src/v8/tools/ignition/linux_perf_bytecode_annotate.py
index b242232..96cb0ee 100755
--- a/src/v8/tools/ignition/linux_perf_bytecode_annotate.py
+++ b/src/v8/tools/ignition/linux_perf_bytecode_annotate.py
@@ -1,10 +1,13 @@
-#! /usr/bin/python2
+#! /usr/bin/python
 #
 # Copyright 2016 the V8 project authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 #
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import collections
 import os
@@ -94,18 +97,18 @@
     return offsets.pop() if offsets else -1
 
   current_offset = next_offset()
-  print current_offset;
+  print(current_offset);
 
   for line in bytecode_disassembly:
     disassembly_offset = int(line.split()[1])
     if disassembly_offset == current_offset:
       count = offset_counts[current_offset]
       percentage = 100.0 * count / total
-      print "{:>8d} ({:>5.1f}%) ".format(count, percentage),
+      print("{:>8d} ({:>5.1f}%) ".format(count, percentage), end=' ')
       current_offset = next_offset()
     else:
-      print "                ",
-    print line
+      print("                ", end=' ')
+    print(line)
 
   if offsets:
     print ("WARNING: Offsets not empty. Output is most likely invalid due to "
diff --git a/src/v8/tools/js2c.py b/src/v8/tools/js2c.py
index 0107436..b94f3ad 100755
--- a/src/v8/tools/js2c.py
+++ b/src/v8/tools/js2c.py
@@ -31,9 +31,11 @@
 # char arrays. It is used for embedded JavaScript code in the V8
 # library.
 
+# for py2/py3 compatibility
+from functools import reduce
+
 import os, re
 import optparse
-import jsmin
 import textwrap
 
 
@@ -96,161 +98,6 @@
   return lines
 
 
-def ExpandMacroDefinition(lines, pos, name_pattern, macro, expander):
-  pattern_match = name_pattern.search(lines, pos)
-  while pattern_match is not None:
-    # Scan over the arguments
-    height = 1
-    start = pattern_match.start()
-    end = pattern_match.end()
-    assert lines[end - 1] == '('
-    last_match = end
-    arg_index = [0]  # Wrap state into array, to work around Python "scoping"
-    mapping = { }
-    def add_arg(str):
-      # Remember to expand recursively in the arguments
-      if arg_index[0] >= len(macro.args):
-        lineno = lines.count(os.linesep, 0, start) + 1
-        raise Error('line %s: Too many arguments for macro "%s"' % (lineno, name_pattern.pattern))
-      replacement = expander(str.strip())
-      mapping[macro.args[arg_index[0]]] = replacement
-      arg_index[0] += 1
-    while end < len(lines) and height > 0:
-      # We don't count commas at higher nesting levels.
-      if lines[end] == ',' and height == 1:
-        add_arg(lines[last_match:end])
-        last_match = end + 1
-      elif lines[end] in ['(', '{', '[']:
-        height = height + 1
-      elif lines[end] in [')', '}', ']']:
-        height = height - 1
-      end = end + 1
-    # Remember to add the last match.
-    add_arg(lines[last_match:end-1])
-    if arg_index[0] < len(macro.args) -1:
-      lineno = lines.count(os.linesep, 0, start) + 1
-      raise Error('line %s: Too few arguments for macro "%s"' % (lineno, name_pattern.pattern))
-    result = macro.expand(mapping)
-    # Replace the occurrence of the macro with the expansion
-    lines = lines[:start] + result + lines[end:]
-    pattern_match = name_pattern.search(lines, start + len(result))
-  return lines
-
-def ExpandMacros(lines, macros):
-  # We allow macros to depend on the previously declared macros, but
-  # we don't allow self-dependecies or recursion.
-  for name_pattern, macro in reversed(macros):
-    def expander(s):
-      return ExpandMacros(s, macros)
-    lines = ExpandMacroDefinition(lines, 0, name_pattern, macro, expander)
-  return lines
-
-class TextMacro:
-  def __init__(self, args, body):
-    self.args = args
-    self.body = body
-  def expand(self, mapping):
-    # Keys could be substrings of earlier values. To avoid unintended
-    # clobbering, apply all replacements simultaneously.
-    any_key_pattern = "|".join(re.escape(k) for k in mapping.iterkeys())
-    def replace(match):
-      return mapping[match.group(0)]
-    return re.sub(any_key_pattern, replace, self.body)
-
-CONST_PATTERN = re.compile(r'^define\s+([a-zA-Z0-9_]+)\s*=\s*([^;]*);$')
-MACRO_PATTERN = re.compile(r'^macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*=\s*([^;]*);$')
-
-
-def ReadMacros(lines):
-  constants = []
-  macros = []
-  for line in lines.split('\n'):
-    hash = line.find('#')
-    if hash != -1: line = line[:hash]
-    line = line.strip()
-    if len(line) is 0: continue
-    const_match = CONST_PATTERN.match(line)
-    if const_match:
-      name = const_match.group(1)
-      value = const_match.group(2).strip()
-      constants.append((re.compile("\\b%s\\b" % name), value))
-    else:
-      macro_match = MACRO_PATTERN.match(line)
-      if macro_match:
-        name = macro_match.group(1)
-        args = [match.strip() for match in macro_match.group(2).split(',')]
-        body = macro_match.group(3).strip()
-        macros.append((re.compile("\\b%s\\(" % name), TextMacro(args, body)))
-      else:
-        raise Error("Illegal line: " + line)
-  return (constants, macros)
-
-
-TEMPLATE_PATTERN = re.compile(r'^\s+T\(([A-Z][a-zA-Z0-9]*),')
-
-def ReadMessageTemplates(lines):
-  templates = []
-  index = 0
-  for line in lines.split('\n'):
-    template_match = TEMPLATE_PATTERN.match(line)
-    if template_match:
-      name = "k%s" % template_match.group(1)
-      value = index
-      index = index + 1
-      templates.append((re.compile("\\b%s\\b" % name), value))
-  return templates
-
-INLINE_MACRO_PATTERN = re.compile(r'macro\s+([a-zA-Z0-9_]+)\s*\(([^)]*)\)\s*\n')
-INLINE_MACRO_END_PATTERN = re.compile(r'endmacro\s*\n')
-
-def ExpandInlineMacros(lines):
-  pos = 0
-  while True:
-    macro_match = INLINE_MACRO_PATTERN.search(lines, pos)
-    if macro_match is None:
-      # no more macros
-      return lines
-    name = macro_match.group(1)
-    args = [match.strip() for match in macro_match.group(2).split(',')]
-    end_macro_match = INLINE_MACRO_END_PATTERN.search(lines, macro_match.end());
-    if end_macro_match is None:
-      raise Error("Macro %s unclosed" % name)
-    body = lines[macro_match.end():end_macro_match.start()]
-
-    # remove macro definition
-    lines = lines[:macro_match.start()] + lines[end_macro_match.end():]
-    name_pattern = re.compile("\\b%s\\(" % name)
-    macro = TextMacro(args, body)
-
-    # advance position to where the macro definition was
-    pos = macro_match.start()
-
-    def non_expander(s):
-      return s
-    lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
-
-
-INLINE_CONSTANT_PATTERN = re.compile(r'define\s+([a-zA-Z0-9_]+)\s*=\s*([^;\n]+);\n')
-
-def ExpandInlineConstants(lines):
-  pos = 0
-  while True:
-    const_match = INLINE_CONSTANT_PATTERN.search(lines, pos)
-    if const_match is None:
-      # no more constants
-      return lines
-    name = const_match.group(1)
-    replacement = const_match.group(2)
-    name_pattern = re.compile("\\b%s\\b" % name)
-
-    # remove constant definition and replace
-    lines = (lines[:const_match.start()] +
-             re.sub(name_pattern, replacement, lines[const_match.end():]))
-
-    # advance position to where the constant definition was
-    pos = const_match.start()
-
-
 HEADER_TEMPLATE = """\
 // Copyright 2011 Google Inc. All Rights Reserved.
 
@@ -258,9 +105,9 @@
 // want to make changes to this file you should either change the
 // javascript source files or the GYP script.
 
-#include "src/v8.h"
+#include "src/init/v8.h"
 #include "src/snapshot/natives.h"
-#include "src/utils.h"
+#include "src/utils/utils.h"
 
 namespace v8 {
 namespace internal {
@@ -273,11 +120,6 @@
   }
 
   template <>
-  int NativesCollection<%(type)s>::GetDebuggerCount() {
-    return %(debugger_count)i;
-  }
-
-  template <>
   int NativesCollection<%(type)s>::GetIndex(const char* name) {
 %(get_index_cases)s\
     return -1;
@@ -323,33 +165,16 @@
 """
 
 
-def BuildFilterChain(macro_filename, message_template_file):
+def BuildFilterChain():
   """Build the chain of filter functions to be applied to the sources.
 
-  Args:
-    macro_filename: Name of the macro file, if any.
-
   Returns:
     A function (string -> string) that processes a source file.
   """
-  filter_chain = []
-
-  if macro_filename:
-    (consts, macros) = ReadMacros(ReadFile(macro_filename))
-    filter_chain.append(lambda l: ExpandMacros(l, macros))
-    filter_chain.append(lambda l: ExpandConstants(l, consts))
-
-  if message_template_file:
-    message_templates = ReadMessageTemplates(ReadFile(message_template_file))
-    filter_chain.append(lambda l: ExpandConstants(l, message_templates))
-
-  filter_chain.extend([
+  filter_chain = [
     RemoveCommentsEmptyLinesAndWhitespace,
-    ExpandInlineMacros,
-    ExpandInlineConstants,
     Validate,
-    jsmin.JavaScriptMinifier().JSMinify
-  ])
+  ]
 
   def chain(f1, f2):
     return lambda x: f2(f1(x))
@@ -363,25 +188,12 @@
   def __init__(self):
     self.names = []
     self.modules = []
-    self.is_debugger_id = []
-
-
-def IsDebuggerFile(filename):
-  return os.path.basename(os.path.dirname(filename)) == "debug"
-
-def IsMacroFile(filename):
-  return filename.endswith("macros.py")
-
-def IsMessageTemplateFile(filename):
-  return filename.endswith("messages.h")
-
 
 def PrepareSources(source_files, native_type, emit_js):
   """Read, prepare and assemble the list of source files.
 
   Args:
-    source_files: List of JavaScript-ish source files. A file named macros.py
-        will be treated as a list of macros.
+    source_files: List of JavaScript-ish source files.
     native_type: String corresponding to a NativeType enum value, allowing us
         to treat different types of sources differently.
     emit_js: True if we should skip the byte conversion and just leave the
@@ -390,29 +202,7 @@
   Returns:
     An instance of Sources.
   """
-  macro_file = None
-  macro_files = filter(IsMacroFile, source_files)
-  assert len(macro_files) in [0, 1]
-  if macro_files:
-    source_files.remove(macro_files[0])
-    macro_file = macro_files[0]
-
-  message_template_file = None
-  message_template_files = filter(IsMessageTemplateFile, source_files)
-  assert len(message_template_files) in [0, 1]
-  if message_template_files:
-    source_files.remove(message_template_files[0])
-    message_template_file = message_template_files[0]
-
-  filters = None
-  if native_type in ("EXTRAS", "EXPERIMENTAL_EXTRAS"):
-    filters = BuildExtraFilterChain()
-  else:
-    filters = BuildFilterChain(macro_file, message_template_file)
-
-  # Sort 'debugger' sources first.
-  source_files = sorted(source_files,
-                        lambda l,r: IsDebuggerFile(r) - IsDebuggerFile(l))
+  filters = BuildFilterChain()
 
   source_files_and_contents = [(f, ReadFile(f)) for f in source_files]
 
@@ -433,9 +223,6 @@
 
     result.modules.append(lines)
 
-    is_debugger = IsDebuggerFile(source)
-    result.is_debugger_id.append(is_debugger)
-
     name = os.path.basename(source)[:-3]
     result.names.append(name)
 
@@ -458,14 +245,17 @@
   raw_sources = "".join(sources.modules)
 
   # The sources are expected to be ASCII-only.
-  assert not filter(lambda value: ord(value) >= 128, raw_sources)
+  try:
+    raw_sources.encode('ascii')
+  except UnicodeEncodeError:
+    assert False
 
   # Loop over modules and build up indices into the source blob:
   get_index_cases = []
   get_script_name_cases = []
   get_script_source_cases = []
   offset = 0
-  for i in xrange(len(sources.modules)):
+  for i in range(len(sources.modules)):
     native_name = "native %s.js" % sources.names[i]
     d = {
         "i": i,
@@ -483,7 +273,6 @@
 
   metadata = {
     "builtin_count": len(sources.modules),
-    "debugger_count": sum(sources.is_debugger_id),
     "sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes),
     "total_length": total_length,
     "get_index_cases": "".join(get_index_cases),
@@ -507,15 +296,15 @@
   value_with_length = (value << 2) | (size - 1)
 
   byte_sequence = bytearray()
-  for i in xrange(size):
+  for i in range(size):
     byte_sequence.append(value_with_length & 255)
     value_with_length >>= 8;
   blob_file.write(byte_sequence)
 
 
 def PutStr(blob_file, value):
-  PutInt(blob_file, len(value));
-  blob_file.write(value);
+  PutInt(blob_file, len(value.encode()))
+  blob_file.write(value.encode())
 
 
 def WriteStartupBlob(sources, startup_blob):
@@ -528,14 +317,8 @@
   """
   output = open(startup_blob, "wb")
 
-  debug_sources = sum(sources.is_debugger_id);
-  PutInt(output, debug_sources)
-  for i in xrange(debug_sources):
-    PutStr(output, sources.names[i]);
-    PutStr(output, sources.modules[i]);
-
-  PutInt(output, len(sources.names) - debug_sources)
-  for i in xrange(debug_sources, len(sources.names)):
+  PutInt(output, len(sources.names))
+  for i in range(len(sources.names)):
     PutStr(output, sources.names[i]);
     PutStr(output, sources.modules[i]);
 
@@ -578,7 +361,7 @@
   parser.set_usage("""js2c out.cc type sources.js ...
         out.cc: C code to be generated.
         type: type parameter for NativesCollection template.
-        sources.js: JS internal sources or macros.py.""")
+        sources.js: JS internal sources.""")
   (options, args) = parser.parse_args()
   JS2C(args[2:],
        args[0],
diff --git a/src/v8/tools/jsfunfuzz/BUILD.gn b/src/v8/tools/jsfunfuzz/BUILD.gn
new file mode 100644
index 0000000..3c40460
--- /dev/null
+++ b/src/v8/tools/jsfunfuzz/BUILD.gn
@@ -0,0 +1,19 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("../../gni/v8.gni")
+
+group("v8_jsfunfuzz") {
+  testonly = true
+
+  data_deps = [
+    "../..:d8",
+  ]
+
+  data = [
+    # Grab current directory. This avoids adding logic for checking the
+    # existence of the jsfunfuzz subdirectory.
+    "./",
+  ]
+}
diff --git a/src/v8/tools/jsfunfuzz/fuzz-harness.sh b/src/v8/tools/jsfunfuzz/fuzz-harness.sh
index 8d064b2..fa4f9d9 100755
--- a/src/v8/tools/jsfunfuzz/fuzz-harness.sh
+++ b/src/v8/tools/jsfunfuzz/fuzz-harness.sh
@@ -51,8 +51,17 @@
     cat << EOF | patch -s -p0 -d "$v8_root"
 --- tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py~
 +++ tools/jsfunfuzz/jsfunfuzz/multi_timed_run.py
-@@ -125,7 +125,7 @@
- 
+@@ -118,19 +118,19 @@
+-def showtail(logfilename):
++def showtail(logfilename, method="tail"):
+-   cmd = "tail -n 20 %s" % logfilename
++   cmd = "%s -n 20 %s" % (method, logfilename)
+    print cmd
+    print ""
+    os.system(cmd)
+    print ""
+    print ""
+
  def many_timed_runs():
      iteration = 0
 -    while True:
@@ -60,6 +69,12 @@
          iteration += 1
          logfilename = "w%d" % iteration
          one_timed_run(logfilename)
+         if not succeeded(logfilename):
+             showtail(logfilename)
+-            showtail("err-" + logfilename)
++            showtail("err-" + logfilename, method="head")
+
+             many_timed_runs()
 EOF
   fi
 
diff --git a/src/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1 b/src/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1
index 4499960..d12877e 100644
--- a/src/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1
+++ b/src/v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1
@@ -1 +1 @@
-d92e66273ea2a0da89456a977edd0224a8e837e9
\ No newline at end of file
+936f3baf5a24313da5eb98195d5e01d76fe602fb
\ No newline at end of file
diff --git a/src/v8/tools/linux-tick-processor b/src/v8/tools/linux-tick-processor
index 705e07d..8b856ca 100755
--- a/src/v8/tools/linux-tick-processor
+++ b/src/v8/tools/linux-tick-processor
@@ -27,8 +27,8 @@
 fi
 
 if [ ! -x "$d8_exec" ]; then
-  echo "d8 shell not found in $D8_PATH"
-  echo "To build, execute 'make native' from the V8 directory"
+  echo "d8 shell not found in $D8_PATH" >&2
+  echo "Please provide path to d8 as env var in D8_PATH" >&2
   exit 1
 fi
 
diff --git a/src/v8/tools/ll_prof.py b/src/v8/tools/ll_prof.py
index ca2cb00..4499874 100755
--- a/src/v8/tools/ll_prof.py
+++ b/src/v8/tools/ll_prof.py
@@ -27,6 +27,10 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import bisect
 import collections
 import ctypes
@@ -157,7 +161,7 @@
     # Print annotated lines.
     address = lines[0][0]
     total_count = 0
-    for i in xrange(len(lines)):
+    for i in range(len(lines)):
       start_offset = lines[i][0] - address
       if i == len(lines) - 1:
         end_offset = self.end_address - self.start_address
@@ -183,10 +187,10 @@
         # 6 for the percentage number, incl. the '.'
         # 1 for the '%' sign
         # => 15
-        print "%5d | %6.2f%% %x(%d): %s" % (count, percent, offset, offset, lines[i][1])
+        print("%5d | %6.2f%% %x(%d): %s" % (count, percent, offset, offset, lines[i][1]))
       else:
-        print "%s %x(%d): %s" % (" " * 15, offset, offset, lines[i][1])
-    print
+        print("%s %x(%d): %s" % (" " * 15, offset, offset, lines[i][1]))
+    print()
     assert total_count == self.self_ticks, \
         "Lost ticks (%d != %d) in %s" % (total_count, self.self_ticks, self)
 
@@ -267,9 +271,8 @@
     pages = 0
     while page_id < limit_id:
       if max_pages >= 0 and pages > max_pages:
-        print >>sys.stderr, \
-            "Warning: page limit (%d) reached for %s [%s]" % (
-            max_pages, code.name, code.origin)
+        print("Warning: page limit (%d) reached for %s [%s]" % (
+            max_pages, code.name, code.origin), file=sys.stderr)
         break
       if page_id in self.pages:
         page = self.pages[page_id]
@@ -309,7 +312,7 @@
 
   def Print(self):
     for code in self.AllCode():
-      print code
+      print(code)
 
   def Find(self, pc):
     if pc < self.min_address or pc >= self.max_address:
@@ -411,7 +414,7 @@
           continue
         code = self.code_map.Find(old_start_address)
         if not code:
-          print >>sys.stderr, "Warning: Not found %x" % old_start_address
+          print("Warning: Not found %x" % old_start_address, file=sys.stderr)
           continue
         assert code.start_address == old_start_address, \
             "Inexact move address %x for %s" % (old_start_address, code)
@@ -591,7 +594,7 @@
     self.trace = mmap.mmap(self.trace_file.fileno(), 0, mmap.MAP_PRIVATE)
     self.trace_header = TRACE_HEADER_DESC.Read(self.trace, 0)
     if self.trace_header.magic != TraceReader._TRACE_HEADER_MAGIC:
-      print >>sys.stderr, "Warning: unsupported trace header magic"
+      print("Warning: unsupported trace header magic", file=sys.stderr)
     self.offset = self.trace_header.data_offset
     self.limit = self.trace_header.data_offset + self.trace_header.data_size
     assert self.limit <= self.trace.size(), \
@@ -642,7 +645,7 @@
       return sample
     sample.ips = []
     offset += self.header_size + ctypes.sizeof(sample)
-    for _ in xrange(sample.nr):
+    for _ in range(sample.nr):
       sample.ips.append(
         self.ip_struct.from_buffer(self.trace, offset).value)
       offset += self.ip_size
@@ -786,7 +789,7 @@
 
   def _LoadKernelSymbols(self, code_map):
     if not os.path.exists(KERNEL_ALLSYMS_FILE):
-      print >>sys.stderr, "Warning: %s not found" % KERNEL_ALLSYMS_FILE
+      print("Warning: %s not found" % KERNEL_ALLSYMS_FILE, file=sys.stderr)
       return False
     kallsyms = open(KERNEL_ALLSYMS_FILE, "r")
     code = None
@@ -804,35 +807,35 @@
 
 
 def PrintReport(code_map, library_repo, arch, ticks, options):
-  print "Ticks per symbol:"
+  print("Ticks per symbol:")
   used_code = [code for code in code_map.UsedCode()]
   used_code.sort(key=lambda x: x.self_ticks, reverse=True)
   for i, code in enumerate(used_code):
     code_ticks = code.self_ticks
-    print "%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
-                                    code.FullName(), code.origin)
+    print("%10d %5.1f%% %s [%s]" % (code_ticks, 100. * code_ticks / ticks,
+                                    code.FullName(), code.origin))
     if options.disasm_all or i < options.disasm_top:
       code.PrintAnnotated(arch, options)
-  print
-  print "Ticks per library:"
+  print()
+  print("Ticks per library:")
   mmap_infos = [m for m in library_repo.infos if m.ticks > 0]
   mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
   for mmap_info in mmap_infos:
     mmap_ticks = mmap_info.ticks
-    print "%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
-                               mmap_info.unique_name)
+    print("%10d %5.1f%% %s" % (mmap_ticks, 100. * mmap_ticks / ticks,
+                               mmap_info.unique_name))
 
 
 def PrintDot(code_map, options):
-  print "digraph G {"
+  print("digraph G {")
   for code in code_map.UsedCode():
     if code.self_ticks < 10:
       continue
-    print "n%d [shape=box,label=\"%s\"];" % (code.id, code.name)
+    print("n%d [shape=box,label=\"%s\"];" % (code.id, code.name))
     if code.callee_ticks:
       for callee, ticks in code.callee_ticks.iteritems():
-        print "n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks)
-  print "}"
+        print("n%d -> n%d [label=\"%d\"];" % (code.id, callee.id, ticks))
+  print("}")
 
 
 if __name__ == "__main__":
@@ -877,8 +880,8 @@
   options, args = parser.parse_args()
 
   if not options.quiet:
-    print "V8 log: %s, %s.ll" % (options.log, options.log)
-    print "Perf trace file: %s" % options.trace
+    print("V8 log: %s, %s.ll" % (options.log, options.log))
+    print("Perf trace file: %s" % options.trace)
 
   V8_GC_FAKE_MMAP = options.gc_fake_mmap
   HOST_ROOT = options.host_root
@@ -886,7 +889,7 @@
     disasm.OBJDUMP_BIN = options.objdump
     OBJDUMP_BIN = options.objdump
   else:
-    print "Cannot find %s, falling back to default objdump" % options.objdump
+    print("Cannot find %s, falling back to default objdump" % options.objdump)
 
   # Stats.
   events = 0
@@ -904,8 +907,8 @@
   log_reader = LogReader(log_name=options.log + ".ll",
                          code_map=code_map)
   if not options.quiet:
-    print "Generated code architecture: %s" % log_reader.arch
-    print
+    print("Generated code architecture: %s" % log_reader.arch)
+    print()
     sys.stdout.flush()
 
   # Process the code and trace logs.
@@ -968,11 +971,11 @@
       def PrintTicks(number, total, description):
         print("%10d %5.1f%% ticks in %s" %
               (number, 100.0*number/total, description))
-      print
-      print "Stats:"
-      print "%10d total trace events" % events
-      print "%10d total ticks" % ticks
-      print "%10d ticks not in symbols" % missed_ticks
+      print()
+      print("Stats:")
+      print("%10d total trace events" % events)
+      print("%10d total ticks" % ticks)
+      print("%10d ticks not in symbols" % missed_ticks)
       unaccounted = "unaccounted ticks"
       if really_missed_ticks > 0:
         unaccounted += " (probably in the kernel, try --kernel)"
@@ -980,10 +983,10 @@
       PrintTicks(optimized_ticks, ticks, "ticks in optimized code")
       PrintTicks(generated_ticks, ticks, "ticks in other lazily compiled code")
       PrintTicks(v8_internal_ticks, ticks, "ticks in v8::internal::*")
-      print "%10d total symbols" % len([c for c in code_map.AllCode()])
-      print "%10d used symbols" % len([c for c in code_map.UsedCode()])
-      print "%9.2fs library processing time" % mmap_time
-      print "%9.2fs tick processing time" % sample_time
+      print("%10d total symbols" % len([c for c in code_map.AllCode()]))
+      print("%10d used symbols" % len([c for c in code_map.UsedCode()]))
+      print("%9.2fs library processing time" % mmap_time)
+      print("%9.2fs tick processing time" % sample_time)
 
   log_reader.Dispose()
   trace_reader.Dispose()
diff --git a/src/v8/tools/lldb_commands.py b/src/v8/tools/lldb_commands.py
index d8946ee..2884cd6 100644
--- a/src/v8/tools/lldb_commands.py
+++ b/src/v8/tools/lldb_commands.py
@@ -2,24 +2,69 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# Load this file by adding this to your ~/.lldbinit:
+# command script import <this_dir>/lldb_commands.py
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import lldb
 import re
 
+#####################
+# Helper functions. #
+#####################
+def current_thread(debugger):
+  return debugger.GetSelectedTarget().GetProcess().GetSelectedThread()
+
+def current_frame(debugger):
+  return current_thread(debugger).GetSelectedFrame()
+
+def no_arg_cmd(debugger, cmd):
+  current_frame(debugger).EvaluateExpression(cmd)
+  print("")
+
+def ptr_arg_cmd(debugger, name, param, cmd):
+  if not param:
+    print("'{}' requires an argument".format(name))
+    return
+  param = '(void*)({})'.format(param)
+  no_arg_cmd(debugger, cmd.format(param))
+
+#####################
+# lldb commands.    #
+#####################
+def job(debugger, param, *args):
+  """Print a v8 heap object"""
+  ptr_arg_cmd(debugger, 'job', param, "_v8_internal_Print_Object({})")
+
+def jlh(debugger, param, *args):
+  """Print v8::Local handle value"""
+  ptr_arg_cmd(debugger, 'jlh', param,
+              "_v8_internal_Print_Object(*(v8::internal::Object**)(*{}))")
+
+def jco(debugger, param, *args):
+  """Print the code object at the given pc (default: current pc)"""
+  if not param:
+    param = str(current_frame(debugger).FindRegister("pc").value)
+  ptr_arg_cmd(debugger, 'jco', param, "_v8_internal_Print_Code({})")
+
+def jld(debugger, param, *args):
+  """Print a v8 LayoutDescriptor object"""
+  ptr_arg_cmd(debugger, 'jld', param,
+              "_v8_internal_Print_LayoutDescriptor({})")
+
+def jtt(debugger, param, *args):
+  """Print the transition tree of a v8 Map"""
+  ptr_arg_cmd(debugger, 'jtt', param, "_v8_internal_Print_TransitionTree({})")
+
 def jst(debugger, *args):
   """Print the current JavaScript stack trace"""
-  target = debugger.GetSelectedTarget()
-  process = target.GetProcess()
-  thread = process.GetSelectedThread()
-  frame = thread.GetSelectedFrame()
-  frame.EvaluateExpression("_v8_internal_Print_StackTrace();")
-  print("")
+  no_arg_cmd(debugger, "_v8_internal_Print_StackTrace()")
 
 def jss(debugger, *args):
   """Skip the jitted stack on x64 to where we entered JS last"""
-  target = debugger.GetSelectedTarget()
-  process = target.GetProcess()
-  thread = process.GetSelectedThread()
-  frame = thread.GetSelectedFrame()
+  frame = current_frame(debugger)
   js_entry_sp = frame.EvaluateExpression(
       "v8::internal::Isolate::Current()->thread_local_top()->js_entry_sp_;") \
        .GetValue()
@@ -36,10 +81,7 @@
   func_name_re = re.compile("([^(<]+)(?:\(.+\))?")
   assert_re = re.compile(
       "^v8::internal::Per\w+AssertType::(\w+)_ASSERT, (false|true)>")
-  target = debugger.GetSelectedTarget()
-  process = target.GetProcess()
-  thread = process.GetSelectedThread()
-  frame = thread.GetSelectedFrame()
+  thread = current_thread(debugger)
   for frame in thread:
     functionSignature = frame.GetDisplayFunctionName()
     if functionSignature is None:
@@ -66,7 +108,8 @@
       print("%s -> %s %s (%s)\033[0m" % (
           color, prefix, match.group(2), match.group(1)))
 
-def __lldb_init_module (debugger, dict):
-  debugger.HandleCommand('command script add -f lldb_commands.jst jst')
-  debugger.HandleCommand('command script add -f lldb_commands.jss jss')
-  debugger.HandleCommand('command script add -f lldb_commands.bta bta')
+def __lldb_init_module(debugger, dict):
+  debugger.HandleCommand('settings set target.x86-disassembly-flavor intel')
+  for cmd in ('job', 'jlh', 'jco', 'jld', 'jtt', 'jst', 'jss', 'bta'):
+    debugger.HandleCommand(
+      'command script add -f lldb_commands.{} {}'.format(cmd, cmd))
diff --git a/src/v8/tools/locs.py b/src/v8/tools/locs.py
new file mode 100755
index 0000000..bd1609d
--- /dev/null
+++ b/src/v8/tools/locs.py
@@ -0,0 +1,458 @@
+#!/usr/bin/env python
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" locs.py - Count lines of code before and after preprocessor expansion
+  Consult --help for more information.
+"""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import argparse
+import json
+import multiprocessing
+import os
+import re
+import subprocess
+import sys
+import tempfile
+import time
+from collections import defaultdict
+from concurrent.futures import ThreadPoolExecutor
+from pathlib import Path
+
+# for py2/py3 compatibility
+try:
+  FileNotFoundError
+except NameError:
+  FileNotFoundError = IOError
+
+ARGPARSE = argparse.ArgumentParser(
+    description=("A script that computes LoC for a build dir"),
+    epilog="""Examples:
+ Count with default settings for build in out/Default:
+   locs.py --build-dir out/Default
+ Count only a custom group of files settings for build in out/Default:
+   tools/locs.py --build-dir out/Default
+                 --group src-compiler '\.\./\.\./src/compiler'
+                 --only src-compiler
+ Report the 10 files with the worst expansion:
+   tools/locs.py --build-dir out/Default --worst 10
+ Report the 10 files with the worst expansion in src/compiler:
+   tools/locs.py --build-dir out/Default --worst 10
+                 --group src-compiler '\.\./\.\./src/compiler'
+                 --only src-compiler
+ Report the 10 largest files after preprocessing:
+   tools/locs.py --build-dir out/Default --largest 10
+ Report the 10 smallest input files:
+   tools/locs.py --build-dir out/Default --smallest 10""",
+    formatter_class=argparse.RawTextHelpFormatter
+)
+
+ARGPARSE.add_argument(
+    '--json',
+    action='store_true',
+    default=False,
+    help="output json instead of short summary")
+ARGPARSE.add_argument(
+    '--build-dir',
+    type=str,
+    help="Use specified build dir and generate necessary files",
+    required=True)
+ARGPARSE.add_argument(
+    '--echocmd',
+    action='store_true',
+    default=False,
+    help="output command used to compute LoC")
+ARGPARSE.add_argument(
+    '--only',
+    action='append',
+    default=[],
+    help="Restrict counting to report group (can be passed multiple times)")
+ARGPARSE.add_argument(
+    '--not',
+    action='append',
+    default=[],
+    help="Exclude specific group (can be passed multiple times)")
+ARGPARSE.add_argument(
+    '--list-groups',
+    action='store_true',
+    default=False,
+    help="List groups and associated regular expressions")
+ARGPARSE.add_argument(
+    '--group',
+    nargs=2,
+    action='append',
+    default=[],
+    help="Add a report group (can be passed multiple times)")
+ARGPARSE.add_argument(
+    '--largest',
+    type=int,
+    nargs='?',
+    default=0,
+    const=3,
+    help="Output the n largest files after preprocessing")
+ARGPARSE.add_argument(
+    '--worst',
+    type=int,
+    nargs='?',
+    default=0,
+    const=3,
+    help="Output the n files with worst expansion by preprocessing")
+ARGPARSE.add_argument(
+    '--smallest',
+    type=int,
+    nargs='?',
+    default=0,
+    const=3,
+    help="Output the n smallest input files")
+ARGPARSE.add_argument(
+    '--files',
+    type=int,
+    nargs='?',
+    default=0,
+    const=3,
+    help="Output results for each file separately")
+ARGPARSE.add_argument(
+    '--jobs',
+    type=int,
+    default=multiprocessing.cpu_count(),
+    help="Process specified number of files concurrently")
+
+ARGS = vars(ARGPARSE.parse_args())
+
+
+def MaxWidth(strings):
+  max_width = 0
+  for s in strings:
+    max_width = max(max_width, len(s))
+  return max_width
+
+
+def GenerateCompileCommandsAndBuild(build_dir, out):
+  if not os.path.isdir(build_dir):
+    print("Error: Specified build dir {} is not a directory.".format(
+        build_dir), file=sys.stderr)
+    exit(1)
+
+  autoninja = "autoninja -C {}".format(build_dir)
+  if subprocess.call(autoninja, shell=True, stdout=out) != 0:
+    print("Error: Building {} failed.".format(build_dir), file=sys.stderr)
+    exit(1)
+
+  compile_commands_file = "{}/compile_commands.json".format(build_dir)
+  print("Generating compile commands in {}.".format(
+      compile_commands_file), file=out)
+  ninja = "ninja -C {} -t compdb cxx cc > {}".format(
+      build_dir, compile_commands_file)
+  if subprocess.call(ninja, shell=True, stdout=out) != 0:
+    print("Error: Cound not generate {} for {}.".format(
+        compile_commands_file, build_dir), file=sys.stderr)
+    exit(1)
+
+  ninja_deps_file = "{}/ninja-deps.txt".format(build_dir)
+  print("Generating ninja dependencies in {}.".format(
+      ninja_deps_file), file=out)
+  ninja = "ninja -C {} -t deps > {}".format(
+      build_dir, ninja_deps_file)
+  if subprocess.call(ninja, shell=True, stdout=out) != 0:
+    print("Error: Cound not generate {} for {}.".format(
+        ninja_deps_file, build_dir), file=sys.stderr)
+    exit(1)
+
+  return compile_commands_file, ninja_deps_file
+
+
+def fmt_bytes(num_bytes):
+  if num_bytes > 1024*1024*1024:
+    return int(num_bytes / (1024*1024)), "MB"
+  elif num_bytes > 1024*1024:
+    return int(num_bytes / (1024)), "kB"
+  return int(num_bytes), " B"
+
+
+class CompilationData:
+  def __init__(self, loc, in_bytes, expanded, expanded_bytes):
+    self.loc = loc
+    self.in_bytes = in_bytes
+    self.expanded = expanded
+    self.expanded_bytes = expanded_bytes
+
+  def ratio(self):
+    return self.expanded / (self.loc+1)
+
+  def to_string(self):
+    exp_bytes, exp_unit = fmt_bytes(self.expanded_bytes)
+    in_bytes, in_unit = fmt_bytes(self.in_bytes)
+    return "{:>9,} LoC ({:>7,} {}) to {:>12,} LoC ({:>7,} {}) ({:>5.0f}x)".format(
+        self.loc, in_bytes, in_unit, self.expanded, exp_bytes, exp_unit, self.ratio())
+
+
+class File(CompilationData):
+  def __init__(self, file, target, loc, in_bytes, expanded, expanded_bytes):
+    super().__init__(loc, in_bytes, expanded, expanded_bytes)
+    self.file = file
+    self.target = target
+
+  def to_string(self):
+    return "{} {} {}".format(super().to_string(), self.file, self.target)
+
+
+class Group(CompilationData):
+  def __init__(self, name, regexp_string):
+    super().__init__(0, 0, 0, 0)
+    self.name = name
+    self.count = 0
+    self.regexp = re.compile(regexp_string)
+
+  def account(self, unit):
+    if (self.regexp.match(unit.file)):
+      self.loc += unit.loc
+      self.in_bytes += unit.in_bytes
+      self.expanded += unit.expanded
+      self.expanded_bytes += unit.expanded_bytes
+      self.count += 1
+
+  def to_string(self, name_width):
+    return "{:<{}} ({:>5} files): {}".format(
+        self.name, name_width, self.count, super().to_string())
+
+
+def SetupReportGroups():
+  default_report_groups = {"total": '.*',
+                           "src": '\\.\\./\\.\\./src',
+                           "test": '\\.\\./\\.\\./test',
+                           "third_party": '\\.\\./\\.\\./third_party',
+                           "gen": 'gen'}
+
+  report_groups = default_report_groups.copy()
+  report_groups.update(dict(ARGS['group']))
+
+  if ARGS['only']:
+    for only_arg in ARGS['only']:
+      if not only_arg in report_groups.keys():
+        print("Error: specified report group '{}' is not defined.".format(
+            ARGS['only']))
+        exit(1)
+      else:
+        report_groups = {
+            k: v for (k, v) in report_groups.items() if k in ARGS['only']}
+
+  if ARGS['not']:
+    report_groups = {
+        k: v for (k, v) in report_groups.items() if k not in ARGS['not']}
+
+  if ARGS['list_groups']:
+    print_cat_max_width = MaxWidth(list(report_groups.keys()) + ["Category"])
+    print("  {:<{}}  {}".format("Category",
+                                print_cat_max_width, "Regular expression"))
+    for cat, regexp_string in report_groups.items():
+      print("  {:<{}}: {}".format(
+          cat, print_cat_max_width, regexp_string))
+
+  report_groups = {k: Group(k, v) for (k, v) in report_groups.items()}
+
+  return report_groups
+
+
+class Results:
+  def __init__(self):
+    self.groups = SetupReportGroups()
+    self.units = {}
+    self.source_dependencies = {}
+    self.header_dependents = {}
+
+  def track(self, filename):
+    is_tracked = False
+    for group in self.groups.values():
+      if group.regexp.match(filename):
+        is_tracked = True
+    return is_tracked
+
+  def recordFile(self, filename, targetname, loc, in_bytes, expanded, expanded_bytes):
+    unit = File(filename, targetname, loc, in_bytes, expanded, expanded_bytes)
+    self.units[filename] = unit
+    for group in self.groups.values():
+      group.account(unit)
+
+  def maxGroupWidth(self):
+    return MaxWidth([v.name for v in self.groups.values()])
+
+  def printGroupResults(self, file):
+    for key in sorted(self.groups.keys()):
+      print(self.groups[key].to_string(self.maxGroupWidth()), file=file)
+
+  def printSorted(self, key, count, reverse, out):
+    for unit in sorted(list(self.units.values()), key=key, reverse=reverse)[:count]:
+      print(unit.to_string(), file=out)
+
+  def addHeaderDeps(self, source_dependencies, header_dependents):
+    self.source_dependencies = source_dependencies
+    self.header_dependents = header_dependents
+
+
+class LocsEncoder(json.JSONEncoder):
+  def default(self, o):
+    if isinstance(o, File):
+      return {"file": o.file, "target": o.target, "loc": o.loc, "in_bytes": o.in_bytes,
+              "expanded": o.expanded, "expanded_bytes": o.expanded_bytes}
+    if isinstance(o, Group):
+      return {"name": o.name, "loc": o.loc, "in_bytes": o.in_bytes,
+              "expanded": o.expanded, "expanded_bytes": o.expanded_bytes}
+    if isinstance(o, Results):
+      return {"groups": o.groups, "units": o.units,
+              "source_dependencies": o.source_dependencies,
+              "header_dependents": o.header_dependents}
+    return json.JSONEncoder.default(self, o)
+
+
+class StatusLine:
+  def __init__(self):
+    self.max_width = 0
+
+  def print(self, statusline, end="\r", file=sys.stdout):
+    self.max_width = max(self.max_width, len(statusline))
+    print("{0:<{1}}".format(statusline, self.max_width),
+          end=end, file=file, flush=True)
+
+
+class CommandSplitter:
+  def __init__(self):
+    self.cmd_pattern = re.compile(
+        "([^\\s]*\\s+)?(?P<clangcmd>[^\\s]*clang.*)"
+        " -c (?P<infile>.*) -o (?P<outfile>.*)")
+
+  def process(self, compilation_unit):
+    cmd = self.cmd_pattern.match(compilation_unit['command'])
+    outfilename = cmd.group('outfile')
+    infilename = cmd.group('infile')
+    infile = Path(compilation_unit['directory']).joinpath(infilename)
+    return (cmd.group('clangcmd'), infilename, infile, outfilename)
+
+
+def parse_ninja_deps(ninja_deps):
+  source_dependencies = {}
+  header_dependents = defaultdict(int)
+  current_target = None
+  for line in ninja_deps:
+    line = line.rstrip()
+    # Ignore empty lines
+    if not line:
+      current_target = None
+      continue
+    if line[0] == ' ':
+      # New dependency
+      if len(line) < 5 or line[0:4] != '    ' or line[5] == ' ':
+        sys.exit('Lines must have no indentation or exactly four ' +
+                 'spaces.')
+      dep = line[4:]
+      if not re.search(r"\.(h|hpp)$", dep):
+        continue
+      header_dependents[dep] += 1
+      continue
+    # New target
+    colon_pos = line.find(':')
+    if colon_pos < 0:
+      sys.exit('Unindented line must have a colon')
+    if current_target is not None:
+      sys.exit('Missing empty line before new target')
+    current_target = line[0:colon_pos]
+    match = re.search(r"#deps (\d+)", line)
+    deps_number = match.group(1)
+    source_dependencies[current_target] = int(deps_number)
+
+  return (source_dependencies, header_dependents)
+
+
+def Main():
+  out = sys.stdout
+  if ARGS['json']:
+    out = sys.stderr
+
+  compile_commands_file, ninja_deps_file = GenerateCompileCommandsAndBuild(
+      ARGS['build_dir'], out)
+
+  result = Results()
+  status = StatusLine()
+
+  try:
+    with open(compile_commands_file) as file:
+      compile_commands = json.load(file)
+    with open(ninja_deps_file) as file:
+      source_dependencies, header_dependents = parse_ninja_deps(file)
+      result.addHeaderDeps(source_dependencies, header_dependents)
+  except FileNotFoundError:
+    print("Error: Cannot read '{}'. Consult --help to get started.".format(
+        ninja_deps_file))
+    exit(1)
+
+  cmd_splitter = CommandSplitter()
+
+  def count_lines_of_unit(ikey):
+    i, key = ikey
+    if not result.track(key['file']):
+      return
+    message = "[{}/{}] Counting LoCs of {}".format(
+        i, len(compile_commands), key['file'])
+    status.print(message, file=out)
+    clangcmd, infilename, infile, outfilename = cmd_splitter.process(key)
+    if not infile.is_file():
+      return
+
+    clangcmd = clangcmd + " -E -P " + \
+        str(infile) + " -o /dev/stdout | sed '/^\\s*$/d' | wc -lc"
+    loccmd = ("cat {}  | sed '\\;^\\s*//;d' | sed '\\;^/\\*;d'"
+              " | sed '/^\\*/d' | sed '/^\\s*$/d' | wc -lc")
+    loccmd = loccmd.format(infile)
+    runcmd = " {} ; {}".format(clangcmd, loccmd)
+    if ARGS['echocmd']:
+      print(runcmd)
+    process = subprocess.Popen(
+        runcmd, shell=True, cwd=key['directory'], stdout=subprocess.PIPE)
+    p = {'process': process, 'infile': infilename, 'outfile': outfilename}
+    output, _ = p['process'].communicate()
+    expanded, expanded_bytes, loc, in_bytes = list(map(int, output.split()))
+    result.recordFile(p['infile'], p['outfile'], loc,
+                      in_bytes, expanded, expanded_bytes)
+
+  with tempfile.TemporaryDirectory(dir='/tmp/', prefix="locs.") as temp:
+    start = time.time()
+
+    with ThreadPoolExecutor(max_workers=ARGS['jobs']) as executor:
+      list(executor.map(count_lines_of_unit, enumerate(compile_commands)))
+
+    end = time.time()
+    if ARGS['json']:
+      print(json.dumps(result, ensure_ascii=False, cls=LocsEncoder))
+    status.print("Processed {:,} files in {:,.2f} sec.".format(
+        len(compile_commands), end-start), end="\n", file=out)
+    result.printGroupResults(file=out)
+
+    if ARGS['largest']:
+      print("Largest {} files after expansion:".format(ARGS['largest']))
+      result.printSorted(
+          lambda v: v.expanded, ARGS['largest'], reverse=True, out=out)
+
+    if ARGS['worst']:
+      print("Worst expansion ({} files):".format(ARGS['worst']))
+      result.printSorted(
+          lambda v: v.ratio(), ARGS['worst'], reverse=True, out=out)
+
+    if ARGS['smallest']:
+      print("Smallest {} input files:".format(ARGS['smallest']))
+      result.printSorted(
+          lambda v: v.loc, ARGS['smallest'], reverse=False, out=out)
+
+    if ARGS['files']:
+      print("List of input files:")
+      result.printSorted(
+          lambda v: v.file, ARGS['files'], reverse=False, out=out)
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(Main())
diff --git a/src/v8/tools/logreader.js b/src/v8/tools/logreader.js
index d9dfd17..ebdc55a 100644
--- a/src/v8/tools/logreader.js
+++ b/src/v8/tools/logreader.js
@@ -112,7 +112,7 @@
  */
 LogReader.prototype.processLogLine = function(line) {
   if (!this.timedRange_) {
-    this.processLog_([line]);
+    this.processLogLine_(line);
     return;
   }
   if (line.startsWith("current-time")) {
@@ -130,7 +130,7 @@
     if (this.hasSeenTimerMarker_) {
       this.logLinesSinceLastTimerMarker_.push(line);
     } else if (!line.startsWith("tick")) {
-      this.processLog_([line]);
+      this.processLogLine_(line);
     }
   }
 };
@@ -175,6 +175,9 @@
   return false;
 };
 
+// Parses dummy variable for readability;
+const parseString = 'parse-string';
+const parseVarArgs = 'parse-var-args';
 
 /**
  * Does a dispatch of a log record.
@@ -185,9 +188,8 @@
 LogReader.prototype.dispatchLogRow_ = function(fields) {
   // Obtain the dispatch.
   var command = fields[0];
-  if (!(command in this.dispatchTable_)) return;
-
   var dispatch = this.dispatchTable_[command];
+  if (dispatch === undefined) return;
 
   if (dispatch === null || this.skipDispatch(dispatch)) {
     return;
@@ -197,14 +199,16 @@
   var parsedFields = [];
   for (var i = 0; i < dispatch.parsers.length; ++i) {
     var parser = dispatch.parsers[i];
-    if (parser === null) {
+    if (parser === parseString) {
       parsedFields.push(fields[1 + i]);
     } else if (typeof parser == 'function') {
       parsedFields.push(parser(fields[1 + i]));
-    } else {
+    } else if (parser === parseVarArgs) {
       // var-args
       parsedFields.push(fields.slice(1 + i));
       break;
+    } else {
+      throw new Error("Invalid log field parser: " + parser);
     }
   }
 
@@ -220,11 +224,19 @@
  * @private
  */
 LogReader.prototype.processLog_ = function(lines) {
-  for (var i = 0, n = lines.length; i < n; ++i, ++this.lineNum_) {
-    var line = lines[i];
-    if (!line) {
-      continue;
-    }
+  for (var i = 0, n = lines.length; i < n; ++i) {
+    this.processLogLine_(lines[i]);
+  }
+}
+
+/**
+ * Processes a single log line.
+ *
+ * @param {String} a log line
+ * @private
+ */
+LogReader.prototype.processLogLine_ = function(line) {
+  if (line.length > 0) {
     try {
       var fields = this.csvParser_.parseLine(line);
       this.dispatchLogRow_(fields);
@@ -232,4 +244,5 @@
       this.printError('line ' + (this.lineNum_ + 1) + ': ' + (e.message || e));
     }
   }
+  this.lineNum_++;
 };
diff --git a/src/v8/tools/map-processor b/src/v8/tools/map-processor
index c0713bd..cf18c31 100755
--- a/src/v8/tools/map-processor
+++ b/src/v8/tools/map-processor
@@ -28,7 +28,7 @@
 
 if [ ! -x "$d8_exec" ]; then
   echo "d8 shell not found in $D8_PATH"
-  echo "To build, execute 'make native' from the V8 directory"
+  echo "Please provide path to d8 as env var in D8_PATH"
   exit 1
 fi
 
diff --git a/src/v8/tools/map-processor.html b/src/v8/tools/map-processor.html
index 4029e96..70c205c 100644
--- a/src/v8/tools/map-processor.html
+++ b/src/v8/tools/map-processor.html
@@ -5,7 +5,7 @@
   code is governed by a BSD-style license that can be found in the LICENSE file.
   -->
 <head>
-<meta charset="UTF-8">
+<meta charset="utf-8">
 <style>
 html, body {
   font-family: sans-serif;
@@ -15,10 +15,100 @@
 h1, h2, h3, section {
   padding-left: 15px;
 }
+
+#content {
+  opacity: 0.0;
+  height: 0px;
+  transition: all 0.5s ease-in-out;
+}
+
+.success #content {
+  height: auto;
+  opacity: 1.0;
+}
+
+#fileReader {
+  width: 100%;
+  height: 100px;
+  line-height: 100px;
+  text-align: center;
+  border: solid 1px #000000;
+  border-radius: 5px;
+  cursor: pointer;
+  transition: all 0.5s ease-in-out;
+}
+
+.failure #fileReader {
+  background-color: #FFAAAA;
+}
+
+.success #fileReader {
+    height: 20px;
+    line-height: 20px;
+}
+
+#fileReader:hover {
+  background-color: #e0edfe;
+}
+
+.loading #fileReader {
+  cursor: wait;
+}
+
+#fileReader > input {
+  display: none;
+}
+
+
+#loader {
+  display: none;
+}
+
+.loading #loader {
+  display: block;
+  position: fixed;
+  top: 0px;
+  left: 0px;
+  width: 100%;
+  height: 100%;
+  background-color: rgba(255, 255, 255, 0.5);
+}
+
+#spinner {
+  position: absolute;
+  width: 100px;
+  height: 100px;
+  top: 40%;
+  left: 50%;
+  margin-left: -50px;
+  border: 30px solid #000;
+  border-top: 30px solid #36E;
+  border-radius: 50%;
+  animation: spin 1s ease-in-out infinite;
+}
+
+@keyframes spin {
+  0% {
+    transform: rotate(0deg);
+  }
+  100% {
+    transform: rotate(360deg);
+  }
+}
+
+.colorbox {
+  width: 10px;
+  height: 10px;
+  border: 1px black solid;
+}
+
 #stats table {
   display: inline-block;
   padding-right: 50px;
 }
+#stats table td {
+  cursor: pointer;
+}
 #stats .transitionTable {
   max-height: 200px;
   overflow-y: scroll;
@@ -30,6 +120,16 @@
   overflow-x: scroll;
   user-select: none;
 }
+#timelineLabel {
+  transform: rotate(90deg);
+  transform-origin: left bottom 0;
+  position: absolute;
+  left: 0;
+  width: 250px;
+  text-align: center;
+  font-size: 10px;
+  opacity: 0.5;
+}
 #timelineChunks {
   height: 250px;
   position: absolute;
@@ -179,6 +279,9 @@
   word-break: break-all;
   background-color: rgba(255,255,255,0.5);
 }
+.black{
+  background-color: black;
+}
 .red {
   background-color: red;
 }
@@ -308,7 +411,7 @@
 function div(classes) {
   let node = document.createElement('div');
   if (classes !== void 0) {
-    if (typeof classes == "string") {
+    if (typeof classes === "string") {
       node.classList.add(classes);
     } else {
       classes.forEach(cls => node.classList.add(cls));
@@ -322,11 +425,17 @@
   if (className) node.classList.add(className)
   return node;
 }
-function td(text) {
+
+function td(textOrNode) {
   let node = document.createElement("td");
-  node.innerText = text;
+  if (typeof textOrNode === "object") {
+    node.appendChild(textOrNode);
+  } else {
+    node.innerText = textOrNode;
+  }
   return node;
 }
+
 function tr() {
   let node = document.createElement("tr");
   return node;
@@ -369,9 +478,14 @@
 // =========================================================================
 // EventHandlers
 function handleBodyLoad() {
-  let upload = $('uploadInput');
-  upload.onclick = (e) => { e.target.value = null };
-  upload.onchange = (e) => { handleLoadFile(e.target) };
+  let upload = $('fileReader');
+  upload.onclick = (e) => $("file").click();
+  upload.ondragover = (e) => e.preventDefault();
+  upload.ondrop = (e) => handleLoadFile(e);
+  $('file').onchange = (e) => handleLoadFile(e);
+  upload.onkeydown = (e) =>  {
+    if (event.key == "Enter") $("file").click();
+  };
   upload.focus();
 
   document.state = new State();
@@ -381,20 +495,31 @@
     tooltip.style.top = e.pageY + "px";
     let map = e.target.map;
     if (map) {
-      $("tooltipContents").innerText = map.description.join("\n");
+      $("tooltipContents").innerText = map.description;
     }
   });
+
+  function handleLoadFile(event) {
+    // Used for drop and file change.
+    event.preventDefault();
+    let host = event.dataTransfer ? event.dataTransfer : event.target;
+    let file = host.files[0];
+    let reader = new FileReader();
+    document.body.className = 'loading';
+    reader.onload = function(evt) {
+      try {
+        handleLoadText(this.result);
+       document.body.className = 'success';
+      } catch(e) {
+       document.body.className = 'failure';
+       console.error(e);
+      }
+    }
+    // Defer the reading to allow spinner CSS animation.
+    setTimeout(() => reader.readAsText(file), 0);
+  }
 }
 
-function handleLoadFile(upload) {
-  let files = upload.files;
-  let file = files[0];
-  let reader = new FileReader();
-  reader.onload = function(evt) {
-    handleLoadText(this.result);
-  }
-  reader.readAsText(file);
-}
 
 function handleLoadText(text) {
   let mapProcessor = new MapProcessor();
@@ -540,6 +665,7 @@
     let details = "";
     if (this.map) {
       details += "ID: " + this.map.id;
+      details += "\nSource location: " + this.map.filePosition;
       details += "\n" + this.map.description;
     }
     $("mapDetails").innerText = details;
@@ -591,7 +717,6 @@
       time += interval;
     }
     this.drawOverview();
-    this.drawHistograms();
     this.redraw();
   }
 
@@ -686,47 +811,6 @@
     $("timelineOverview").style.backgroundImage = "url(" + imageData + ")";
   }
 
-  drawHistograms() {
-    $("mapsDepthHistogram").histogram = this.timeline.depthHistogram();
-    $("mapsFanOutHistogram").histogram = this.timeline.fanOutHistogram();
-  }
-
-  drawMapsDepthHistogram() {
-    let canvas = $("mapsDepthCanvas");
-    let histogram = this.timeline.depthHistogram();
-    this.drawHistogram(canvas, histogram, true);
-  }
-
-  drawMapsFanOutHistogram() {
-    let canvas = $("mapsFanOutCanvas");
-    let histogram = this.timeline.fanOutHistogram();
-    this.drawHistogram(canvas, histogram, true, true);
-  }
-
-  drawHistogram(canvas, histogram, logScaleX=false, logScaleY=false) {
-    let ctx = canvas.getContext("2d");
-    let yMax = histogram.max(each => each.length);
-    if (logScaleY) yMax = Math.log(yMax);
-    let xMax = histogram.length;
-    if (logScaleX) xMax = Math.log(xMax);
-    ctx.clearRect(0, 0, canvas.width, canvas.height);
-    ctx.beginPath();
-    ctx.moveTo(0,canvas.height);
-    for (let i = 0; i < histogram.length; i++) {
-      let x = i;
-      if (logScaleX) x = Math.log(x);
-      x = x / xMax * canvas.width;
-      let bucketLength = histogram[i].length;
-      if (logScaleY) bucketLength = Math.log(bucketLength);
-      let y = (1 - bucketLength / yMax) * canvas.height;
-      ctx.lineTo(x, y);
-    }
-    ctx.lineTo(canvas.width, canvas.height);
-    ctx.closePath;
-    ctx.stroke();
-    ctx.fill();
-  }
-
   redraw() {
     let canvas= $("timelineCanvas");
     canvas.width = (this.chunks.length+1) * kChunkWidth;
@@ -1006,26 +1090,32 @@
   }
   updateGeneralStats() {
     let pairs = [
-      ["Maps", e => true],
-      ["Transitions", e => e.edge && e.edge.isTransition()],
-      ["Fast to Slow", e => e.edge && e.edge.isFastToSlow()],
-      ["Slow to Fast", e => e.edge && e.edge.isSlowToFast()],
-      ["Initial Map", e => e.edge && e.edge.isInitial()],
-      ["Replace Descriptors", e => e.edge && e.edge.isReplaceDescriptors()],
-      ["Copy as Prototype", e => e.edge && e.edge.isCopyAsPrototype()],
-      ["Optimize as Prototype", e => e.edge && e.edge.isOptimizeAsPrototype()],
-      ["Deprecated", e => e.isDeprecated()],
+      ["Maps", null, e => true],
+      ["Transitions", 'black', e => e.edge && e.edge.isTransition()],
+      ["Fast to Slow", 'violet', e => e.edge && e.edge.isFastToSlow()],
+      ["Slow to Fast", 'orange', e => e.edge && e.edge.isSlowToFast()],
+      ["Initial Map", 'yellow', e => e.edge && e.edge.isInitial()],
+      ["Replace Descriptors", 'red', e => e.edge && e.edge.isReplaceDescriptors()],
+      ["Copy as Prototype", 'red', e => e.edge && e.edge.isCopyAsPrototype()],
+      ["Optimize as Prototype", null, e => e.edge && e.edge.isOptimizeAsPrototype()],
+      ["Deprecated", null, e => e.isDeprecated()],
+      ["Bootstrapped", 'green', e => e.isBootstrapped()],
     ];
 
     let text = "";
     let tableNode = table();
     let name, filter;
     let total = this.timeline.size();
-    pairs.forEach(([name, filter]) => {
+    pairs.forEach(([name, color, filter]) => {
       let row = tr();
+      if (color !== null) {
+        row.appendChild(td(div(['colorbox', color])));
+      } else {
+       row.appendChild(td(""));
+      }
       row.maps = this.timeline.filterUniqueTransitions(filter);
-      row.addEventListener("click",
-          e => this.transitionView.showMaps(e.target.parentNode.maps));
+      row.onclick =
+          (e) => this.transitionView.showMaps(e.target.parentNode.maps);
       row.appendChild(td(name));
       let count = this.timeline.count(filter);
       row.appendChild(td(count));
@@ -1060,7 +1150,7 @@
   switch(type) {
     case "new": return "green";
     case "Normalize": return "violet";
-    case "map=SlowToFast": return "orange";
+    case "SlowToFast": return "orange";
     case "InitialMap": return "yellow";
     case "Transition": return "black";
     case "ReplaceDescriptors": return "red";
@@ -1069,183 +1159,53 @@
 }
 
 // ShadowDom elements =========================================================
-customElements.define('x-histogram', class extends HTMLElement {
-  constructor() {
-    super();
-    let shadowRoot = this.attachShadow({mode: 'open'});
-    const t = document.querySelector('#x-histogram-template');
-    const instance = t.content.cloneNode(true);
-    shadowRoot.appendChild(instance);
-    this._histogram = undefined;
-    this.mouseX = 0;
-    this.mouseY = 0;
-    this.canvas.addEventListener('mousemove', event => this.handleCanvasMove(event));
-  }
-  setBoolAttribute(name, value) {
-    if (value) {
-      this.setAttribute(name, "");
-    } else {
-      this.deleteAttribute(name);
-    }
-  }
-  static get observedAttributes() {
-    return ['title', 'xlog', 'ylog', 'xlabel', 'ylabel'];
-  }
-  $(query) { return this.shadowRoot.querySelector(query) }
-  get h1() { return this.$("h2") }
-  get canvas() { return this.$("canvas") }
-  get xLabelDiv() { return this.$("#xLabel") }
-  get yLabelDiv() { return this.$("#yLabel") }
-
-  get histogram() {
-    return this._histogram;
-  }
-  set histogram(array) {
-    this._histogram = array;
-    if (this._histogram) {
-      this.yMax = this._histogram.max(each => each.length);
-      this.xMax = this._histogram.length;
-    }
-    this.draw();
-  }
-
-  get title() { return this.getAttribute("title") }
-  set title(string) { this.setAttribute("title", string) }
-  get xLabel() { return this.getAttribute("xlabel") }
-  set xLabel(string) { this.setAttribute("xlabel", string)}
-  get yLabel() { return this.getAttribute("ylabel") }
-  set yLabel(string) { this.setAttribute("ylabel", string)}
-  get xLog() { return this.hasAttribute("xlog") }
-  set xLog(value) { this.setBoolAttribute("xlog", value) }
-  get yLog() { return this.hasAttribute("ylog") }
-  set yLog(value) { this.setBoolAttribute("ylog", value) }
-
-  attributeChangedCallback(name, oldValue, newValue) {
-    if (name == "title") {
-      this.h1.innerText = newValue;
-      return;
-    }
-    if (name == "ylabel") {
-      this.yLabelDiv.innerText = newValue;
-      return;
-    }
-    if (name == "xlabel") {
-      this.xLabelDiv.innerText = newValue;
-      return;
-    }
-    this.draw();
-  }
-
-  handleCanvasMove(event) {
-    this.mouseX = event.offsetX;
-    this.mouseY = event.offsetY;
-    this.draw();
-  }
-  xPosition(i) {
-    let x = i;
-    if (this.xLog) x = Math.log(x);
-    return x / this.xMax * this.canvas.width;
-  }
-  yPosition(i) {
-    let bucketLength = this.histogram[i].length;
-    if (this.yLog) {
-      return (1 - Math.log(bucketLength) / Math.log(this.yMax)) * this.drawHeight + 10;
-    } else {
-     return (1 - bucketLength / this.yMax) * this.drawHeight + 10;
-    }
-  }
-
-  get drawHeight() { return this.canvas.height - 10 }
-
-  draw() {
-    if (!this.histogram) return;
-    let width = this.canvas.width;
-    let height = this.drawHeight;
-    let ctx = this.canvas.getContext("2d");
-    if (this.xLog) yMax = Math.log(yMax);
-    let xMax = this.histogram.length;
-    if (this.yLog) xMax = Math.log(xMax);
-    ctx.clearRect(0, 0, this.canvas.width, this.canvas.height);
-    ctx.beginPath();
-    ctx.moveTo(0, height);
-    for (let i = 0; i < this.histogram.length; i++) {
-      ctx.lineTo(this.xPosition(i), this.yPosition(i));
-    }
-    ctx.lineTo(width, height);
-    ctx.closePath;
-    ctx.stroke();
-    ctx.fill();
-    if (!this.mouseX) return;
-    ctx.beginPath();
-    let index = Math.round(this.mouseX);
-    let yBucket = this.histogram[index];
-    let y = this.yPosition(index);
-    if (this.yLog) y = Math.log(y);
-    ctx.moveTo(0, y);
-    ctx.lineTo(width-40, y);
-    ctx.moveTo(this.mouseX, 0);
-    ctx.lineTo(this.mouseX, height);
-    ctx.stroke();
-    ctx.textAlign = "left";
-    ctx.fillText(yBucket.length, width-30, y);
-  }
-});
 
 </script>
 </head>
-<template id="x-histogram-template">
-  <style>
-    #yLabel {
-      transform: rotate(90deg);
-    }
-    canvas, #yLabel, #info { float: left; }
-    #xLabel { clear: both }
-  </style>
-  <h2></h2>
-  <div id="yLabel"></div>
-  <canvas height=50></canvas>
-  <div id="info">
-  </div>
-  <div id="xLabel"></div>
-</template>
-
 <body onload="handleBodyLoad(event)" onkeypress="handleKeyDown(event)">
-  <h2>Data</h2>
+  <h1>V8 Map Explorer</h1>
   <section>
-    <form name="fileForm">
-      <p>
-        <input id="uploadInput" type="file" name="files">
-      </p>
-    </form>
+    <div id="fileReader" tabindex=1 >
+      <span id="label">
+        Drag and drop a v8.log file into this area, or click to choose from disk.
+      </span>
+      <input id="file" type="file" name="files">
+    </div>
+    <div id="loader">
+      <div id="spinner"></div>
+    </div>
   </section>
 
-  <h2>Stats</h2>
-  <section id="stats"></section>
+  <div id="content">
+    <h2>Stats</h2>
+    <section id="stats"></section>
 
-  <h2>Timeline</h2>
-  <div id="timeline">
-    <div id=timelineChunks></div>
-    <canvas id="timelineCanvas" ></canvas>
-  </div>
-  <div id="timelineOverview"
-      onmousemove="handleTimelineIndicatorMove(event)" >
-    <div id="timelineOverviewIndicator">
-      <div class="leftMask"></div>
-      <div class="rightMask"></div>
+    <h2>Timeline</h2>
+    <div id="timeline">
+      <div id="timelineLabel">Frequency</div>
+      <div id="timelineChunks"></div>
+      <canvas id="timelineCanvas"></canvas>
     </div>
+    <div id="timelineOverview"
+        onmousemove="handleTimelineIndicatorMove(event)" >
+      <div id="timelineOverviewIndicator">
+        <div class="leftMask"></div>
+        <div class="rightMask"></div>
+      </div>
+    </div>
+
+    <h2>Transitions</h2>
+    <section id="transitionView"></section>
+    <br/>
+
+    <h2>Selected Map</h2>
+    <section id="mapDetails"></section>
   </div>
 
-  <h2>Transitions</h2>
-  <section id="transitionView"></section>
-  <br/>
-
-  <h2>Selected Map</h2>
-  <section id="mapDetails"></section>
-
-  <x-histogram id="mapsDepthHistogram"
-      title="Maps Depth" xlabel="depth" ylabel="nof"></x-histogram>
-  <x-histogram id="mapsFanOutHistogram" xlabel="fan-out"
-      title="Maps Fan-out" ylabel="nof"></x-histogram>
+  <h2>Instructions</h2>
+  <section>
+    <p>Visualize Map trees that have been gathere using <code>--trace-maps</code>.</p>
+  </section>
 
   <div id="tooltip">
     <div id="tooltipContents"></div>
diff --git a/src/v8/tools/map-processor.js b/src/v8/tools/map-processor.js
index 5b0e469..7e8572a 100644
--- a/src/v8/tools/map-processor.js
+++ b/src/v8/tools/map-processor.js
@@ -8,7 +8,8 @@
     super();
     this.dispatchTable_ = {
       'code-creation': {
-        parsers: [null, parseInt, parseInt, parseInt, parseInt, null, 'var-args'],
+        parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
+          parseString, parseVarArgs],
         processor: this.processCodeCreation
       },
       'code-move': {
@@ -24,21 +25,20 @@
         processor: this.processFunctionMove
       },
       'map-create': {
-        parsers: [parseInt, parseInt, null],
+        parsers: [parseInt, parseInt, parseString],
         processor: this.processMapCreate
       },
       'map': {
-        parsers: [null, parseInt, parseInt, parseInt, parseInt, parseInt,
-          null, null, null
+        parsers: [parseString, parseInt, parseInt, parseInt, parseInt, parseInt,
+          parseString, parseString, parseString
         ],
         processor: this.processMap
       },
       'map-details': {
-        parsers: [parseInt, parseInt, null],
+        parsers: [parseInt, parseInt, parseString],
         processor: this.processMapDetails
       }
     };
-    this.deserializedEntriesNames_ = [];
     this.profile_ = new Profile();
     this.timeline_ = new Timeline();
   }
@@ -65,7 +65,7 @@
         this.processLogLine(line);
       }
     } catch(e) {
-      console.log("Error occurred during parsing, trying to continue: " + e);
+      console.error("Error occurred during parsing, trying to continue: " + e);
     }
     return this.finalize();
   }
@@ -107,10 +107,6 @@
 
   processCodeCreation(
     type, kind, timestamp, start, size, name, maybe_func) {
-    name = this.deserializedEntriesNames_[start] || name;
-    if (name.startsWith("onComplete")) {
-      console.log(name);
-    }
     if (maybe_func.length) {
       let funcAddr = parseInt(maybe_func[0]);
       let state = this.parseState(maybe_func[1]);
@@ -155,7 +151,7 @@
     from = this.getExistingMap(from, time);
     to = this.getExistingMap(to, time);
     let edge = new Edge(type, name, reason, time, from, to);
-    edge.filePosition = this.formatPC(pc, line, column);
+    to.filePosition = this.formatPC(pc, line, column);
     edge.finishSetup();
   }
 
@@ -179,9 +175,6 @@
   }
 
   createMap(id, time) {
-    if (id == 0x1821257d1761) {
-      console.log(id);
-    }
     let map = new V8Map(id, time);
     this.timeline_.push(map);
     return map;
@@ -216,6 +209,7 @@
     V8Map.set(id, this);
     this.leftId = 0;
     this.rightId = 0;
+    this.filePosition = "";
   }
 
   finalize(id) {
@@ -291,6 +285,10 @@
     return this.edge === void 0 ? "new" : this.edge.type;
   }
 
+  isBootstrapped() {
+    return this.edge === void 0;
+  }
+
   getParents() {
     let parents = [];
     let current = this.parent();
@@ -322,7 +320,6 @@
     this.time = time;
     this.from = from;
     this.to = to;
-    this.filePosition = "";
   }
 
   finishSetup() {
@@ -370,31 +367,35 @@
   }
 
   isTransition() {
-    return this.type == "Transition"
+    return this.type === "Transition"
   }
 
   isFastToSlow() {
-    return this.type == "Normalize"
+    return this.type === "Normalize"
   }
 
   isSlowToFast() {
-    return this.type == "SlowToFast"
+    return this.type === "SlowToFast"
   }
 
   isInitial() {
-    return this.type == "InitialMap"
+    return this.type === "InitialMap"
+  }
+
+  isBootstrapped() {
+    return this.type === "new"
   }
 
   isReplaceDescriptors() {
-    return this.type == "ReplaceDescriptors"
+    return this.type === "ReplaceDescriptors"
   }
 
   isCopyAsPrototype() {
-    return this.reason == "CopyAsPrototype"
+    return this.reason === "CopyAsPrototype"
   }
 
   isOptimizeAsPrototype() {
-    return this.reason == "OptimizeAsPrototype"
+    return this.reason === "OptimizeAsPrototype"
   }
 
   symbol() {
diff --git a/src/v8/tools/mb/docs/design_spec.md b/src/v8/tools/mb/docs/design_spec.md
index fb202da..c119e65 100644
--- a/src/v8/tools/mb/docs/design_spec.md
+++ b/src/v8/tools/mb/docs/design_spec.md
@@ -169,7 +169,7 @@
 * `compile_targets`, which is a list of pruned targets to be
   passed to Ninja to build. It is acceptable to replace a list of
   pruned targets by a meta target if it turns out that all of the
-  dependendencies of the target are affected by the patch (i.e.,
+  dependencies of the target are affected by the patch (i.e.,
   all ten binaries that blink_tests depends on), but doing so is
   not required.
 * `test_targets`, which is a list of unpruned targets to be mapped
diff --git a/src/v8/tools/mb/docs/user_guide.md b/src/v8/tools/mb/docs/user_guide.md
index a7d72c8..75c195a 100644
--- a/src/v8/tools/mb/docs/user_guide.md
+++ b/src/v8/tools/mb/docs/user_guide.md
@@ -20,7 +20,7 @@
 
 ### `mb analyze`
 
-`mb analyze` is reponsible for determining what targets are affected by
+`mb analyze` is responsible for determining what targets are affected by
 a list of files (e.g., the list of files in a patch on a trybot):
 
 ```
@@ -229,7 +229,7 @@
 
 There should be an key in this dict for every supported configuration
 of Chromium, meaning every configuration we have a bot for, and every
-configuration commonly used by develpers but that we may not have a bot
+configuration commonly used by developers but that we may not have a bot
 for.
 
 The value of each key is a list of "mixins" that will define what that
diff --git a/src/v8/tools/mb/mb.py b/src/v8/tools/mb/mb.py
index 9a660022..f66f82a 100755
--- a/src/v8/tools/mb/mb.py
+++ b/src/v8/tools/mb/mb.py
@@ -4,16 +4,13 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-"""MB - the Meta-Build wrapper around GYP and GN
+"""MB - the Meta-Build wrapper around GN.
 
-MB is a wrapper script for GYP and GN that can be used to generate build files
+MB is a wrapper script for GN that can be used to generate build files
 for sets of canned configurations and analyze them.
 """
 
-# TODO(thomasanderson): Remove this comment.  It is added to
-# workaround https://crbug.com/736215 for CL
-# https://codereview.chromium.org/2974603002/
-
+# for py2/py3 compatibility
 from __future__ import print_function
 
 import argparse
@@ -22,6 +19,7 @@
 import json
 import os
 import pipes
+import platform
 import pprint
 import re
 import shutil
@@ -39,6 +37,12 @@
 
 import gn_helpers
 
+try:
+  cmp              # Python 2
+except NameError:  # Python 3
+  def cmp(x, y):   # pylint: disable=redefined-builtin
+    return (x > y) - (x < y)
+
 
 def main(args):
   mbw = MetaBuildWrapper()
@@ -95,21 +99,17 @@
                         help='path to config file '
                              '(default is %(default)s)')
       subp.add_argument('-i', '--isolate-map-file', metavar='PATH',
-                        default=self.default_isolate_map,
                         help='path to isolate map file '
-                             '(default is %(default)s)')
+                             '(default is %(default)s)',
+                        default=[],
+                        action='append',
+                        dest='isolate_map_files')
       subp.add_argument('-g', '--goma-dir',
                         help='path to goma directory')
-      subp.add_argument('--gyp-script', metavar='PATH',
-                        default=self.PathJoin('build', 'gyp_chromium'),
-                        help='path to gyp script relative to project root '
-                             '(default is %(default)s)')
       subp.add_argument('--android-version-code',
-                        help='Sets GN arg android_default_version_code and '
-                             'GYP_DEFINE app_manifest_version_code')
+                        help='Sets GN arg android_default_version_code')
       subp.add_argument('--android-version-name',
-                        help='Sets GN arg android_default_version_name and '
-                             'GYP_DEFINE app_manifest_version_name')
+                        help='Sets GN arg android_default_version_name')
       subp.add_argument('-n', '--dryrun', action='store_true',
                         help='Do a dry run (i.e., do nothing, just print '
                              'the commands that will run)')
@@ -131,6 +131,8 @@
     subp.add_argument('output_path', nargs=1,
                       help='path to a file containing the output arguments '
                            'as a JSON object.')
+    subp.add_argument('--json-output',
+                      help='Write errors to json.output')
     subp.set_defaults(func=self.CmdAnalyze)
 
     subp = subps.add_parser('export',
@@ -149,6 +151,8 @@
     subp.add_argument('--swarming-targets-file',
                       help='save runtime dependencies for targets listed '
                            'in file.')
+    subp.add_argument('--json-output',
+                      help='Write errors to json.output')
     subp.add_argument('path', nargs=1,
                       help='path to generate build into')
     subp.set_defaults(func=self.CmdGen)
@@ -167,6 +171,12 @@
                             help='look up the command for a given config or '
                                  'builder')
     AddCommonOptions(subp)
+    subp.add_argument('--quiet', default=False, action='store_true',
+                      help='Print out just the arguments, '
+                           'do not emulate the output of the gen subcommand.')
+    subp.add_argument('--recursive', default=False, action='store_true',
+                      help='Lookup arguments from imported files, '
+                           'implies --quiet')
     subp.set_defaults(func=self.CmdLookup)
 
     subp = subps.add_parser(
@@ -190,7 +200,6 @@
         '    --test-launcher-retry-limit=0'
         '\n'
     )
-
     AddCommonOptions(subp)
     subp.add_argument('-j', '--jobs', dest='jobs', type=int,
                       help='Number of jobs to pass to ninja')
@@ -202,6 +211,14 @@
                             ' This can be either a regular path or a '
                             'GN-style source-relative path like '
                             '//out/Default.'))
+    subp.add_argument('-s', '--swarmed', action='store_true',
+                      help='Run under swarming with the default dimensions')
+    subp.add_argument('-d', '--dimension', default=[], action='append', nargs=2,
+                      dest='dimensions', metavar='FOO bar',
+                      help='dimension to filter on')
+    subp.add_argument('--no-default-dimensions', action='store_false',
+                      dest='default_dimensions', default=True,
+                      help='Do not automatically add dimensions to the task')
     subp.add_argument('target', nargs=1,
                       help='ninja target to build and run')
     subp.add_argument('extra_args', nargs='*',
@@ -217,26 +234,6 @@
                       help='path to config file (default is %(default)s)')
     subp.set_defaults(func=self.CmdValidate)
 
-    subp = subps.add_parser('audit',
-                            help='Audit the config file to track progress')
-    subp.add_argument('-f', '--config-file', metavar='PATH',
-                      default=self.default_config,
-                      help='path to config file (default is %(default)s)')
-    subp.add_argument('-i', '--internal', action='store_true',
-                      help='check internal masters also')
-    subp.add_argument('-m', '--master', action='append',
-                      help='master to audit (default is all non-internal '
-                           'masters in file)')
-    subp.add_argument('-u', '--url-template', action='store',
-                      default='https://build.chromium.org/p/'
-                              '{master}/json/builders',
-                      help='URL scheme for JSON APIs to buildbot '
-                           '(default: %(default)s) ')
-    subp.add_argument('-c', '--check-compile', action='store_true',
-                      help='check whether tbd and master-only bots actually'
-                           ' do compiles')
-    subp.set_defaults(func=self.CmdAudit)
-
     subp = subps.add_parser('gerrit-buildbucket-config',
                             help='Print buildbucket.config for gerrit '
                             '(see MB user guide)')
@@ -253,10 +250,6 @@
 
     self.args = parser.parse_args(argv)
 
-    # TODO(machenbach): This prepares passing swarming targets to isolate on the
-    # infra side.
-    self.args.swarming_targets_file = None
-
   def DumpInputFiles(self):
 
     def DumpContentsOfFilePassedTo(arg_name, path):
@@ -276,11 +269,7 @@
 
   def CmdAnalyze(self):
     vals = self.Lookup()
-    self.ClobberIfNeeded(vals)
-    if vals['type'] == 'gn':
-      return self.RunGNAnalyze(vals)
-    else:
-      return self.RunGYPAnalyze(vals)
+    return self.RunGNAnalyze(vals)
 
   def CmdExport(self):
     self.ReadConfigFile()
@@ -312,11 +301,7 @@
 
   def CmdGen(self):
     vals = self.Lookup()
-    self.ClobberIfNeeded(vals)
-    if vals['type'] == 'gn':
-      return self.RunGNGen(vals)
-    else:
-      return self.RunGYPGen(vals)
+    return self.RunGNGen(vals)
 
   def CmdHelp(self):
     if self.args.subcommand:
@@ -328,23 +313,19 @@
     vals = self.GetConfig()
     if not vals:
       return 1
-
-    if vals['type'] == 'gn':
-      return self.RunGNIsolate()
-    else:
-      return self.Build('%s_run' % self.args.target[0])
+    return self.RunGNIsolate()
 
   def CmdLookup(self):
     vals = self.Lookup()
-    if vals['type'] == 'gn':
+    gn_args = self.GNArgs(vals, expand_imports=self.args.recursive)
+    if self.args.quiet or self.args.recursive:
+      self.Print(gn_args, end='')
+    else:
       cmd = self.GNCmd('gen', '_path_')
-      gn_args = self.GNArgs(vals)
       self.Print('\nWriting """\\\n%s""" to _path_/args.gn.\n' % gn_args)
       env = None
-    else:
-      cmd, env = self.GYPCmd('_path_', vals)
 
-    self.PrintCmd(cmd, env)
+      self.PrintCmd(cmd, env)
     return 0
 
   def CmdRun(self):
@@ -355,33 +336,87 @@
     build_dir = self.args.path[0]
     target = self.args.target[0]
 
-    if vals['type'] == 'gn':
-      if self.args.build:
-        ret = self.Build(target)
-        if ret:
-          return ret
-      ret = self.RunGNIsolate()
+    if self.args.build:
+      ret = self.Build(target)
       if ret:
         return ret
-    else:
-      ret = self.Build('%s_run' % target)
-      if ret:
-        return ret
+    ret = self.RunGNIsolate()
+    if ret:
+      return ret
 
+    if self.args.swarmed:
+      return self._RunUnderSwarming(build_dir, target)
+    else:
+      return self._RunLocallyIsolated(build_dir, target)
+
+  def _RunUnderSwarming(self, build_dir, target):
+    # TODO(dpranke): Look up the information for the target in
+    # the //testing/buildbot.json file, if possible, so that we
+    # can determine the isolate target, command line, and additional
+    # swarming parameters, if possible.
+    #
+    # TODO(dpranke): Also, add support for sharding and merging results.
+    dimensions = []
+    for k, v in self._DefaultDimensions() + self.args.dimensions:
+      dimensions += ['-d', k, v]
+
+    cmd = [
+        self.executable,
+        self.PathJoin('tools', 'swarming_client', 'isolate.py'),
+        'archive',
+        '-s',
+        self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
+        '-I', 'isolateserver.appspot.com',
+      ]
+    ret, out, _ = self.Run(cmd, force_verbose=False)
+    if ret:
+      return ret
+
+    isolated_hash = out.splitlines()[0].split()[0]
+    cmd = [
+        self.executable,
+        self.PathJoin('tools', 'swarming_client', 'swarming.py'),
+          'run',
+          '-s', isolated_hash,
+          '-I', 'isolateserver.appspot.com',
+          '-S', 'chromium-swarm.appspot.com',
+      ] + dimensions
+    if self.args.extra_args:
+      cmd += ['--'] + self.args.extra_args
+    ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
+    return ret
+
+  def _RunLocallyIsolated(self, build_dir, target):
     cmd = [
         self.executable,
         self.PathJoin('tools', 'swarming_client', 'isolate.py'),
         'run',
         '-s',
         self.ToSrcRelPath('%s/%s.isolated' % (build_dir, target)),
-    ]
+      ]
     if self.args.extra_args:
-        cmd += ['--'] + self.args.extra_args
-
-    ret, _, _ = self.Run(cmd, force_verbose=False, buffer_output=False)
-
+      cmd += ['--'] + self.args.extra_args
+    ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
     return ret
 
+  def _DefaultDimensions(self):
+    if not self.args.default_dimensions:
+      return []
+
+    # This code is naive and just picks reasonable defaults per platform.
+    if self.platform == 'darwin':
+      os_dim = ('os', 'Mac-10.12')
+    elif self.platform.startswith('linux'):
+      os_dim = ('os', 'Ubuntu-16.04')
+    elif self.platform == 'win32':
+      os_dim = ('os', 'Windows-10')
+    else:
+      raise MBErr('unrecognized platform string "%s"' % self.platform)
+
+    return [('pool', 'Chrome'),
+            ('cpu', 'x86-64'),
+            os_dim]
+
   def CmdBuildbucket(self):
     self.ReadConfigFile()
 
@@ -462,154 +497,26 @@
       self.Print('mb config file %s looks ok.' % self.args.config_file)
     return 0
 
-  def CmdAudit(self):
-    """Track the progress of the GYP->GN migration on the bots."""
-
-    # First, make sure the config file is okay, but don't print anything
-    # if it is (it will throw an error if it isn't).
-    self.CmdValidate(print_ok=False)
-
-    stats = OrderedDict()
-    STAT_MASTER_ONLY = 'Master only'
-    STAT_CONFIG_ONLY = 'Config only'
-    STAT_TBD = 'Still TBD'
-    STAT_GYP = 'Still GYP'
-    STAT_DONE = 'Done (on GN)'
-    stats[STAT_MASTER_ONLY] = 0
-    stats[STAT_CONFIG_ONLY] = 0
-    stats[STAT_TBD] = 0
-    stats[STAT_GYP] = 0
-    stats[STAT_DONE] = 0
-
-    def PrintBuilders(heading, builders, notes):
-      stats.setdefault(heading, 0)
-      stats[heading] += len(builders)
-      if builders:
-        self.Print('  %s:' % heading)
-        for builder in sorted(builders):
-          self.Print('    %s%s' % (builder, notes[builder]))
-
-    self.ReadConfigFile()
-
-    masters = self.args.master or self.masters
-    for master in sorted(masters):
-      url = self.args.url_template.replace('{master}', master)
-
-      self.Print('Auditing %s' % master)
-
-      MASTERS_TO_SKIP = (
-        'client.skia',
-        'client.v8.fyi',
-        'tryserver.v8',
-      )
-      if master in MASTERS_TO_SKIP:
-        # Skip these bots because converting them is the responsibility of
-        # those teams and out of scope for the Chromium migration to GN.
-        self.Print('  Skipped (out of scope)')
-        self.Print('')
-        continue
-
-      INTERNAL_MASTERS = ('official.desktop', 'official.desktop.continuous',
-                          'internal.client.kitchensync')
-      if master in INTERNAL_MASTERS and not self.args.internal:
-        # Skip these because the servers aren't accessible by default ...
-        self.Print('  Skipped (internal)')
-        self.Print('')
-        continue
-
-      try:
-        # Fetch the /builders contents from the buildbot master. The
-        # keys of the dict are the builder names themselves.
-        json_contents = self.Fetch(url)
-        d = json.loads(json_contents)
-      except Exception as e:
-        self.Print(str(e))
-        return 1
-
-      config_builders = set(self.masters[master])
-      master_builders = set(d.keys())
-      both = master_builders & config_builders
-      master_only = master_builders - config_builders
-      config_only = config_builders - master_builders
-      tbd = set()
-      gyp = set()
-      done = set()
-      notes = {builder: '' for builder in config_builders | master_builders}
-
-      for builder in both:
-        config = self.masters[master][builder]
-        if config == 'tbd':
-          tbd.add(builder)
-        elif isinstance(config, dict):
-          vals = self.FlattenConfig(config.values()[0])
-          if vals['type'] == 'gyp':
-            gyp.add(builder)
-          else:
-            done.add(builder)
-        elif config.startswith('//'):
-          done.add(builder)
-        else:
-          vals = self.FlattenConfig(config)
-          if vals['type'] == 'gyp':
-            gyp.add(builder)
-          else:
-            done.add(builder)
-
-      if self.args.check_compile and (tbd or master_only):
-        either = tbd | master_only
-        for builder in either:
-          notes[builder] = ' (' + self.CheckCompile(master, builder) +')'
-
-      if master_only or config_only or tbd or gyp:
-        PrintBuilders(STAT_MASTER_ONLY, master_only, notes)
-        PrintBuilders(STAT_CONFIG_ONLY, config_only, notes)
-        PrintBuilders(STAT_TBD, tbd, notes)
-        PrintBuilders(STAT_GYP, gyp, notes)
-      else:
-        self.Print('  All GN!')
-
-      stats[STAT_DONE] += len(done)
-
-      self.Print('')
-
-    fmt = '{:<27} {:>4}'
-    self.Print(fmt.format('Totals', str(sum(int(v) for v in stats.values()))))
-    self.Print(fmt.format('-' * 27, '----'))
-    for stat, count in stats.items():
-      self.Print(fmt.format(stat, str(count)))
-
-    return 0
-
   def GetConfig(self):
     build_dir = self.args.path[0]
 
     vals = self.DefaultVals()
     if self.args.builder or self.args.master or self.args.config:
       vals = self.Lookup()
-      if vals['type'] == 'gn':
-        # Re-run gn gen in order to ensure the config is consistent with the
-        # build dir.
-        self.RunGNGen(vals)
+      # Re-run gn gen in order to ensure the config is consistent with the
+      # build dir.
+      self.RunGNGen(vals)
       return vals
 
-    mb_type_path = self.PathJoin(self.ToAbsPath(build_dir), 'mb_type')
-    if not self.Exists(mb_type_path):
-      toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
-                                     'toolchain.ninja')
-      if not self.Exists(toolchain_path):
-        self.Print('Must either specify a path to an existing GN build dir '
-                   'or pass in a -m/-b pair or a -c flag to specify the '
-                   'configuration')
-        return {}
-      else:
-        mb_type = 'gn'
-    else:
-      mb_type = self.ReadFile(mb_type_path).strip()
+    toolchain_path = self.PathJoin(self.ToAbsPath(build_dir),
+                                   'toolchain.ninja')
+    if not self.Exists(toolchain_path):
+      self.Print('Must either specify a path to an existing GN build dir '
+                 'or pass in a -m/-b pair or a -c flag to specify the '
+                 'configuration')
+      return {}
 
-    if mb_type == 'gn':
-      vals['gn_args'] = self.GNArgsFromDir(build_dir)
-    vals['type'] = mb_type
-
+    vals['gn_args'] = self.GNArgsFromDir(build_dir)
     return vals
 
   def GNArgsFromDir(self, build_dir):
@@ -641,14 +548,6 @@
           raise MBErr('Config "%s" not found in %s' %
                       (config, self.args.config_file))
         vals = self.FlattenConfig(config)
-
-    # Do some basic sanity checking on the config so that we
-    # don't have to do this in every caller.
-    if 'type' not in vals:
-        vals['type'] = 'gn'
-    assert vals['type'] in ('gn', 'gyp'), (
-        'Unknown meta-build type "%s"' % vals['gn_args'])
-
     return vals
 
   def ReadIOSBotConfig(self):
@@ -660,17 +559,10 @@
       return {}
 
     contents = json.loads(self.ReadFile(path))
-    gyp_vals = contents.get('GYP_DEFINES', {})
-    if isinstance(gyp_vals, dict):
-      gyp_defines = ' '.join('%s=%s' % (k, v) for k, v in gyp_vals.items())
-    else:
-      gyp_defines = ' '.join(gyp_vals)
     gn_args = ' '.join(contents.get('gn_args', []))
 
     vals = self.DefaultVals()
     vals['gn_args'] = gn_args
-    vals['gyp_defines'] = gyp_defines
-    vals['type'] = contents.get('mb_type', 'gn')
     return vals
 
   def ReadConfigFile(self):
@@ -689,14 +581,26 @@
     self.mixins = contents['mixins']
 
   def ReadIsolateMap(self):
-    if not self.Exists(self.args.isolate_map_file):
-      raise MBErr('isolate map file not found at %s' %
-                  self.args.isolate_map_file)
-    try:
-      return ast.literal_eval(self.ReadFile(self.args.isolate_map_file))
-    except SyntaxError as e:
-      raise MBErr('Failed to parse isolate map file "%s": %s' %
-                  (self.args.isolate_map_file, e))
+    if not self.args.isolate_map_files:
+      self.args.isolate_map_files = [self.default_isolate_map]
+
+    for f in self.args.isolate_map_files:
+      if not self.Exists(f):
+        raise MBErr('isolate map file not found at %s' % f)
+    isolate_maps = {}
+    for isolate_map in self.args.isolate_map_files:
+      try:
+        isolate_map = ast.literal_eval(self.ReadFile(isolate_map))
+        duplicates = set(isolate_map).intersection(isolate_maps)
+        if duplicates:
+          raise MBErr(
+              'Duplicate targets in isolate map files: %s.' %
+              ', '.join(duplicates))
+        isolate_maps.update(isolate_map)
+      except SyntaxError as e:
+        raise MBErr(
+            'Failed to parse isolate map file "%s": %s' % (isolate_map, e))
+    return isolate_maps
 
   def ConfigFromArgs(self):
     if self.args.config:
@@ -747,9 +651,6 @@
       'args_file': '',
       'cros_passthrough': False,
       'gn_args': '',
-      'gyp_defines': '',
-      'gyp_crosscompile': False,
-      'type': 'gn',
     }
 
   def FlattenMixins(self, mixins, vals, visited):
@@ -773,50 +674,11 @@
           vals['gn_args'] += ' ' + mixin_vals['gn_args']
         else:
           vals['gn_args'] = mixin_vals['gn_args']
-      if 'gyp_crosscompile' in mixin_vals:
-        vals['gyp_crosscompile'] = mixin_vals['gyp_crosscompile']
-      if 'gyp_defines' in mixin_vals:
-        if vals['gyp_defines']:
-          vals['gyp_defines'] += ' ' + mixin_vals['gyp_defines']
-        else:
-          vals['gyp_defines'] = mixin_vals['gyp_defines']
-      if 'type' in mixin_vals:
-        vals['type'] = mixin_vals['type']
 
       if 'mixins' in mixin_vals:
         self.FlattenMixins(mixin_vals['mixins'], vals, visited)
     return vals
 
-  def ClobberIfNeeded(self, vals):
-    path = self.args.path[0]
-    build_dir = self.ToAbsPath(path)
-    mb_type_path = self.PathJoin(build_dir, 'mb_type')
-    needs_clobber = False
-    new_mb_type = vals['type']
-    if self.Exists(build_dir):
-      if self.Exists(mb_type_path):
-        old_mb_type = self.ReadFile(mb_type_path)
-        if old_mb_type != new_mb_type:
-          self.Print("Build type mismatch: was %s, will be %s, clobbering %s" %
-                     (old_mb_type, new_mb_type, path))
-          needs_clobber = True
-      else:
-        # There is no 'mb_type' file in the build directory, so this probably
-        # means that the prior build(s) were not done through mb, and we
-        # have no idea if this was a GYP build or a GN build. Clobber it
-        # to be safe.
-        self.Print("%s/mb_type missing, clobbering to be safe" % path)
-        needs_clobber = True
-
-    if self.args.dryrun:
-      return
-
-    if needs_clobber:
-      self.RemoveDirectory(build_dir)
-
-    self.MaybeMakeDirectory(build_dir)
-    self.WriteFile(mb_type_path, new_mb_type)
-
   def RunGNGen(self, vals, compute_grit_inputs_for_analyze=False):
     build_dir = self.args.path[0]
 
@@ -853,14 +715,18 @@
       self.WriteFile(gn_runtime_deps_path, '\n'.join(labels) + '\n')
       cmd.append('--runtime-deps-list-file=%s' % gn_runtime_deps_path)
 
-    ret, _, _ = self.Run(cmd)
+    ret, output, _ = self.Run(cmd)
     if ret:
+        if self.args.json_output:
+          # write errors to json.output
+          self.WriteJSON({'output': output}, self.args.json_output)
         # If `gn gen` failed, we should exit early rather than trying to
         # generate isolates. Run() will have already logged any error output.
         self.Print('GN gen failed: %d' % ret)
         return ret
 
     android = 'target_os="android"' in vals['gn_args']
+    fuchsia = 'target_os="fuchsia"' in vals['gn_args']
     for target in swarming_targets:
       if android:
         # Android targets may be either android_apk or executable. The former
@@ -870,6 +736,11 @@
         runtime_deps_targets = [
             target + '.runtime_deps',
             'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
+      elif fuchsia:
+        # Only emit a runtime deps file for the group() target on Fuchsia.
+        label = isolate_map[target]['label']
+        runtime_deps_targets = [
+          'obj/%s.stamp.runtime_deps' % label.replace(':', '/')]
       elif (isolate_map[target]['type'] == 'script' or
             isolate_map[target].get('label_type') == 'group'):
         # For script targets, the build target is usually a group,
@@ -987,11 +858,17 @@
     else:
       subdir, exe = 'win', 'gn.exe'
 
-    gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, exe)
+    arch = platform.machine()
+    if (arch.startswith('s390') or arch.startswith('ppc') or
+        self.platform.startswith('aix')):
+      # use gn in PATH
+      gn_path = 'gn'
+    else:
+      gn_path = self.PathJoin(self.chromium_src_dir, 'buildtools', subdir, exe)
     return [gn_path, subcommand, path] + list(args)
 
 
-  def GNArgs(self, vals):
+  def GNArgs(self, vals, expand_imports=False):
     if vals['cros_passthrough']:
       if not 'GN_ARGS' in os.environ:
         raise MBErr('MB is expecting GN_ARGS to be in the environment')
@@ -1013,47 +890,24 @@
     if android_version_name:
       gn_args += ' android_default_version_name="%s"' % android_version_name
 
-    # Canonicalize the arg string into a sorted, newline-separated list
-    # of key-value pairs, and de-dup the keys if need be so that only
-    # the last instance of each arg is listed.
-    gn_args = gn_helpers.ToGNString(gn_helpers.FromGNArgs(gn_args))
+    args_gn_lines = []
+    parsed_gn_args = {}
 
     args_file = vals.get('args_file', None)
     if args_file:
-      gn_args = ('import("%s")\n' % vals['args_file']) + gn_args
-    return gn_args
+      if expand_imports:
+        content = self.ReadFile(self.ToAbsPath(args_file))
+        parsed_gn_args = gn_helpers.FromGNArgs(content)
+      else:
+        args_gn_lines.append('import("%s")' % args_file)
 
-  def RunGYPGen(self, vals):
-    path = self.args.path[0]
+    # Canonicalize the arg string into a sorted, newline-separated list
+    # of key-value pairs, and de-dup the keys if need be so that only
+    # the last instance of each arg is listed.
+    parsed_gn_args.update(gn_helpers.FromGNArgs(gn_args))
+    args_gn_lines.append(gn_helpers.ToGNString(parsed_gn_args))
 
-    output_dir = self.ParseGYPConfigPath(path)
-    cmd, env = self.GYPCmd(output_dir, vals)
-    ret, _, _ = self.Run(cmd, env=env)
-    return ret
-
-  def RunGYPAnalyze(self, vals):
-    output_dir = self.ParseGYPConfigPath(self.args.path[0])
-    if self.args.verbose:
-      inp = self.ReadInputJSON(['files', 'test_targets',
-                                'additional_compile_targets'])
-      self.Print()
-      self.Print('analyze input:')
-      self.PrintJSON(inp)
-      self.Print()
-
-    cmd, env = self.GYPCmd(output_dir, vals)
-    cmd.extend(['-f', 'analyzer',
-                '-G', 'config_path=%s' % self.args.input_path[0],
-                '-G', 'analyzer_output_path=%s' % self.args.output_path[0]])
-    ret, _, _ = self.Run(cmd, env=env)
-    if not ret and self.args.verbose:
-      outp = json.loads(self.ReadFile(self.args.output_path[0]))
-      self.Print()
-      self.Print('analyze output:')
-      self.PrintJSON(outp)
-      self.Print()
-
-    return ret
+    return '\n'.join(args_gn_lines)
 
   def ToAbsPath(self, build_path, *comps):
     return self.PathJoin(self.chromium_src_dir,
@@ -1066,86 +920,6 @@
       return path[2:].replace('/', self.sep)
     return self.RelPath(path, self.chromium_src_dir)
 
-  def ParseGYPConfigPath(self, path):
-    rpath = self.ToSrcRelPath(path)
-    output_dir, _, _ = rpath.rpartition(self.sep)
-    return output_dir
-
-  def GYPCmd(self, output_dir, vals):
-    if vals['cros_passthrough']:
-      if not 'GYP_DEFINES' in os.environ:
-        raise MBErr('MB is expecting GYP_DEFINES to be in the environment')
-      gyp_defines = os.environ['GYP_DEFINES']
-      if not 'chromeos=1' in gyp_defines:
-        raise MBErr('GYP_DEFINES is missing chromeos=1: (GYP_DEFINES=%s)' %
-                    gyp_defines)
-    else:
-      gyp_defines = vals['gyp_defines']
-
-    goma_dir = self.args.goma_dir
-
-    # GYP uses shlex.split() to split the gyp defines into separate arguments,
-    # so we can support backslashes and and spaces in arguments by quoting
-    # them, even on Windows, where this normally wouldn't work.
-    if goma_dir and ('\\' in goma_dir or ' ' in goma_dir):
-      goma_dir = "'%s'" % goma_dir
-
-    if goma_dir:
-      gyp_defines += ' gomadir=%s' % goma_dir
-
-    android_version_code = self.args.android_version_code
-    if android_version_code:
-      gyp_defines += ' app_manifest_version_code=%s' % android_version_code
-
-    android_version_name = self.args.android_version_name
-    if android_version_name:
-      gyp_defines += ' app_manifest_version_name=%s' % android_version_name
-
-    cmd = [
-        self.executable,
-        self.args.gyp_script,
-        '-G',
-        'output_dir=' + output_dir,
-    ]
-
-    # Ensure that we have an environment that only contains
-    # the exact values of the GYP variables we need.
-    env = os.environ.copy()
-
-    # This is a terrible hack to work around the fact that
-    # //tools/clang/scripts/update.py is invoked by GYP and GN but
-    # currently relies on an environment variable to figure out
-    # what revision to embed in the command line #defines.
-    # For GN, we've made this work via a gn arg that will cause update.py
-    # to get an additional command line arg, but getting that to work
-    # via GYP_DEFINES has proven difficult, so we rewrite the GYP_DEFINES
-    # to get rid of the arg and add the old var in, instead.
-    # See crbug.com/582737 for more on this. This can hopefully all
-    # go away with GYP.
-    m = re.search('llvm_force_head_revision=1\s*', gyp_defines)
-    if m:
-      env['LLVM_FORCE_HEAD_REVISION'] = '1'
-      gyp_defines = gyp_defines.replace(m.group(0), '')
-
-    # This is another terrible hack to work around the fact that
-    # GYP sets the link concurrency to use via the GYP_LINK_CONCURRENCY
-    # environment variable, and not via a proper GYP_DEFINE. See
-    # crbug.com/611491 for more on this.
-    m = re.search('gyp_link_concurrency=(\d+)(\s*)', gyp_defines)
-    if m:
-      env['GYP_LINK_CONCURRENCY'] = m.group(1)
-      gyp_defines = gyp_defines.replace(m.group(0), '')
-
-    env['GYP_GENERATORS'] = 'ninja'
-    if 'GYP_CHROMIUM_NO_ACTION' in env:
-      del env['GYP_CHROMIUM_NO_ACTION']
-    if 'GYP_CROSSCOMPILE' in env:
-      del env['GYP_CROSSCOMPILE']
-    env['GYP_DEFINES'] = gyp_defines
-    if vals['gyp_crosscompile']:
-      env['GYP_CROSSCOMPILE'] = '1'
-    return cmd, env
-
   def RunGNAnalyze(self, vals):
     # Analyze runs before 'gn gen' now, so we need to run gn gen
     # in order to ensure that we have a build directory.
@@ -1200,8 +974,11 @@
     try:
       self.WriteJSON(gn_inp, gn_input_path)
       cmd = self.GNCmd('analyze', build_path, gn_input_path, gn_output_path)
-      ret, _, _ = self.Run(cmd, force_verbose=True)
+      ret, output, _ = self.Run(cmd, force_verbose=True)
       if ret:
+        if self.args.json_output:
+          # write errors to json.output
+          self.WriteJSON({'output': output}, self.args.json_output)
         return ret
 
       gn_outp_str = self.ReadFile(gn_output_path)
@@ -1347,9 +1124,6 @@
       if env and var in env:
         self.Print('%s%s=%s' % (env_prefix, var, env_quoter(env[var])))
 
-    print_env('GYP_CROSSCOMPILE')
-    print_env('GYP_DEFINES')
-    print_env('GYP_LINK_CONCURRENCY')
     print_env('LLVM_FORCE_HEAD_REVISION')
 
     if cmd[0] == self.executable:
@@ -1416,7 +1190,7 @@
   def MaybeMakeDirectory(self, path):
     try:
       os.makedirs(path)
-    except OSError, e:
+    except OSError as e:
       if e.errno != errno.EEXIST:
         raise
 
@@ -1486,7 +1260,6 @@
 
 def QuoteForCmd(arg):
   # First, escape the arg so that CommandLineToArgvW will parse it properly.
-  # From //tools/gyp/pylib/gyp/msvs_emulation.py:23.
   if arg == '' or ' ' in arg or '"' in arg:
     quote_re = re.compile(r'(\\*)"')
     arg = '"%s"' % (quote_re.sub(lambda mo: 2 * mo.group(1) + '\\"', arg))
diff --git a/src/v8/tools/mb/mb_unittest.py b/src/v8/tools/mb/mb_unittest.py
index 1576375..a22686a 100755
--- a/src/v8/tools/mb/mb_unittest.py
+++ b/src/v8/tools/mb/mb_unittest.py
@@ -65,8 +65,6 @@
     self.files[path] = contents
 
   def Call(self, cmd, env=None, buffer_output=True):
-    if env:
-      self.cross_compile = env.get('GYP_CROSSCOMPILE')
     self.calls.append(cmd)
     if self.cmds:
       return self.cmds.pop(0)
@@ -112,13 +110,10 @@
   'masters': {
     'chromium': {},
     'fake_master': {
-      'fake_builder': 'gyp_rel_bot',
-      'fake_gn_builder': 'gn_rel_bot',
-      'fake_gyp_crosscompile_builder': 'gyp_crosscompile',
-      'fake_gn_debug_builder': 'gn_debug_goma',
-      'fake_gyp_builder': 'gyp_debug',
-      'fake_gn_args_bot': '//build/args/bots/fake_master/fake_gn_args_bot.gn',
-      'fake_multi_phase': { 'phase_1': 'gn_phase_1', 'phase_2': 'gn_phase_2'},
+      'fake_builder': 'rel_bot',
+      'fake_debug_builder': 'debug_goma',
+      'fake_args_bot': '//build/args/bots/fake_master/fake_args_bot.gn',
+      'fake_multi_phase': { 'phase_1': 'phase_1', 'phase_2': 'phase_2'},
       'fake_args_file': 'args_file_goma',
       'fake_args_file_twice': 'args_file_twice',
     },
@@ -126,38 +121,26 @@
   'configs': {
     'args_file_goma': ['args_file', 'goma'],
     'args_file_twice': ['args_file', 'args_file'],
-    'gyp_rel_bot': ['gyp', 'rel', 'goma'],
-    'gn_debug_goma': ['gn', 'debug', 'goma'],
-    'gyp_debug': ['gyp', 'debug', 'fake_feature1'],
-    'gn_rel_bot': ['gn', 'rel', 'goma'],
-    'gyp_crosscompile': ['gyp', 'crosscompile'],
-    'gn_phase_1': ['gn', 'phase_1'],
-    'gn_phase_2': ['gn', 'phase_2'],
+    'rel_bot': ['rel', 'goma', 'fake_feature1'],
+    'debug_goma': ['debug', 'goma'],
+    'phase_1': ['phase_1'],
+    'phase_2': ['phase_2'],
   },
   'mixins': {
-    'crosscompile': {
-      'gyp_crosscompile': True,
-    },
     'fake_feature1': {
       'gn_args': 'enable_doom_melon=true',
-      'gyp_defines': 'doom_melon=1',
     },
-    'gyp': {'type': 'gyp'},
-    'gn': {'type': 'gn'},
     'goma': {
       'gn_args': 'use_goma=true',
-      'gyp_defines': 'goma=1',
     },
     'args_file': {
       'args_file': '//build/args/fake.gn',
     },
     'phase_1': {
       'gn_args': 'phase=1',
-      'gyp_args': 'phase=1',
     },
     'phase_2': {
       'gn_args': 'phase=2',
-      'gyp_args': 'phase=2',
     },
     'rel': {
       'gn_args': 'is_debug=false',
@@ -169,28 +152,6 @@
 }
 """
 
-GYP_HACKS_CONFIG = """\
-{
-  'masters': {
-    'chromium': {},
-    'fake_master': {
-      'fake_builder': 'fake_config',
-    },
-  },
-  'configs': {
-    'fake_config': ['fake_mixin'],
-  },
-  'mixins': {
-    'fake_mixin': {
-      'type': 'gyp',
-      'gn_args': '',
-      'gyp_defines':
-         ('foo=bar llvm_force_head_revision=1 '
-          'gyp_link_concurrency=1 baz=1'),
-    },
-  },
-}
-"""
 
 TRYSERVER_CONFIG = """\
 {
@@ -229,7 +190,7 @@
         },
       }''')
     mbw.files.setdefault(
-        mbw.ToAbsPath('//build/args/bots/fake_master/fake_gn_args_bot.gn'),
+        mbw.ToAbsPath('//build/args/bots/fake_master/fake_args_bot.gn'),
         'is_debug = false\n')
     if files:
       for path, contents in files.items():
@@ -249,37 +210,6 @@
       self.assertEqual(mbw.err, err)
     return mbw
 
-  def test_clobber(self):
-    files = {
-      '/fake_src/out/Debug': None,
-      '/fake_src/out/Debug/mb_type': None,
-    }
-    mbw = self.fake_mbw(files)
-
-    # The first time we run this, the build dir doesn't exist, so no clobber.
-    self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
-    self.assertEqual(mbw.rmdirs, [])
-    self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
-
-    # The second time we run this, the build dir exists and matches, so no
-    # clobber.
-    self.check(['gen', '-c', 'gn_debug_goma', '//out/Debug'], mbw=mbw, ret=0)
-    self.assertEqual(mbw.rmdirs, [])
-    self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gn')
-
-    # Now we switch build types; this should result in a clobber.
-    self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
-    self.assertEqual(mbw.rmdirs, ['/fake_src/out/Debug'])
-    self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
-
-    # Now we delete mb_type; this checks the case where the build dir
-    # exists but wasn't populated by mb; this should also result in a clobber.
-    del mbw.files['/fake_src/out/Debug/mb_type']
-    self.check(['gen', '-c', 'gyp_debug', '//out/Debug'], mbw=mbw, ret=0)
-    self.assertEqual(mbw.rmdirs,
-                     ['/fake_src/out/Debug', '/fake_src/out/Debug'])
-    self.assertEqual(mbw.files['/fake_src/out/Debug/mb_type'], 'gyp')
-
   def test_analyze(self):
     files = {'/tmp/in.json': '''{\
                "files": ["foo/foo_unittest.cc"],
@@ -295,7 +225,7 @@
     mbw = self.fake_mbw(files)
     mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
 
-    self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+    self.check(['analyze', '-c', 'debug_goma', '//out/Default',
                 '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
     out = json.loads(mbw.files['/tmp/out.json'])
     self.assertEqual(out, {
@@ -319,7 +249,7 @@
     mbw = self.fake_mbw(files)
     mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
 
-    self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+    self.check(['analyze', '-c', 'debug_goma', '//out/Default',
                 '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
     out = json.loads(mbw.files['/tmp/out.json'])
 
@@ -342,7 +272,7 @@
     mbw = self.fake_mbw(files)
     mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
 
-    self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+    self.check(['analyze', '-c', 'debug_goma', '//out/Default',
                 '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
     out = json.loads(mbw.files['/tmp/out.json'])
 
@@ -354,7 +284,7 @@
     self.assertEqual(['all', 'foo_unittests'], out['compile_targets'])
 
   def test_analyze_handles_way_too_many_results(self):
-    too_many_files = ', '.join(['"//foo:foo%d"' % i for i in xrange(4 * 1024)])
+    too_many_files = ', '.join(['"//foo:foo%d"' % i for i in range(4 * 1024)])
     files = {'/tmp/in.json': '''{\
                "files": ["foo/foo_unittest.cc"],
                "test_targets": ["foo_unittests"],
@@ -369,7 +299,7 @@
     mbw = self.fake_mbw(files)
     mbw.Call = lambda cmd, env=None, buffer_output=True: (0, '', '')
 
-    self.check(['analyze', '-c', 'gn_debug_goma', '//out/Default',
+    self.check(['analyze', '-c', 'debug_goma', '//out/Default',
                 '/tmp/in.json', '/tmp/out.json'], mbw=mbw, ret=0)
     out = json.loads(mbw.files['/tmp/out.json'])
 
@@ -379,9 +309,9 @@
     # test_targets and additional_compile_targets.
     self.assertEqual(['all', 'foo_unittests'], out['compile_targets'])
 
-  def test_gn_gen(self):
+  def test_gen(self):
     mbw = self.fake_mbw()
-    self.check(['gen', '-c', 'gn_debug_goma', '//out/Default', '-g', '/goma'],
+    self.check(['gen', '-c', 'debug_goma', '//out/Default', '-g', '/goma'],
                mbw=mbw, ret=0)
     self.assertMultiLineEqual(mbw.files['/fake_src/out/Default/args.gn'],
                               ('goma_dir = "/goma"\n'
@@ -394,7 +324,7 @@
                   mbw.out)
 
     mbw = self.fake_mbw(win32=True)
-    self.check(['gen', '-c', 'gn_debug_goma', '-g', 'c:\\goma', '//out/Debug'],
+    self.check(['gen', '-c', 'debug_goma', '-g', 'c:\\goma', '//out/Debug'],
                mbw=mbw, ret=0)
     self.assertMultiLineEqual(mbw.files['c:\\fake_src\\out\\Debug\\args.gn'],
                               ('goma_dir = "c:\\\\goma"\n'
@@ -404,14 +334,14 @@
                   '--check\n', mbw.out)
 
     mbw = self.fake_mbw()
-    self.check(['gen', '-m', 'fake_master', '-b', 'fake_gn_args_bot',
+    self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_bot',
                 '//out/Debug'],
                mbw=mbw, ret=0)
     self.assertEqual(
         mbw.files['/fake_src/out/Debug/args.gn'],
-        'import("//build/args/bots/fake_master/fake_gn_args_bot.gn")\n')
+        'import("//build/args/bots/fake_master/fake_args_bot.gn")\n')
 
-  def test_gn_gen_args_file_mixins(self):
+  def test_gen_args_file_mixins(self):
     mbw = self.fake_mbw()
     self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file',
                 '//out/Debug'], mbw=mbw, ret=0)
@@ -425,14 +355,12 @@
     self.check(['gen', '-m', 'fake_master', '-b', 'fake_args_file_twice',
                 '//out/Debug'], mbw=mbw, ret=1)
 
-  def test_gn_gen_fails(self):
+  def test_gen_fails(self):
     mbw = self.fake_mbw()
     mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
-    self.check(['gen', '-c', 'gn_debug_goma', '//out/Default'], mbw=mbw, ret=1)
+    self.check(['gen', '-c', 'debug_goma', '//out/Default'], mbw=mbw, ret=1)
 
-  # TODO(machenbach): Comment back in after swarming file parameter is used.
-  """
-  def test_gn_gen_swarming(self):
+  def test_gen_swarming(self):
     files = {
       '/tmp/swarming_targets': 'base_unittests\n',
       '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
@@ -448,7 +376,7 @@
     }
     mbw = self.fake_mbw(files)
     self.check(['gen',
-                '-c', 'gn_debug_goma',
+                '-c', 'debug_goma',
                 '--swarming-targets-file', '/tmp/swarming_targets',
                 '//out/Default'], mbw=mbw, ret=0)
     self.assertIn('/fake_src/out/Default/base_unittests.isolate',
@@ -456,7 +384,7 @@
     self.assertIn('/fake_src/out/Default/base_unittests.isolated.gen.json',
                   mbw.files)
 
-  def test_gn_gen_swarming_script(self):
+  def test_gen_swarming_script(self):
     files = {
       '/tmp/swarming_targets': 'cc_perftests\n',
       '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
@@ -473,7 +401,7 @@
     }
     mbw = self.fake_mbw(files=files, win32=True)
     self.check(['gen',
-                '-c', 'gn_debug_goma',
+                '-c', 'debug_goma',
                 '--swarming-targets-file', '/tmp/swarming_targets',
                 '--isolate-map-file',
                 '/fake_src/testing/buildbot/gn_isolate_map.pyl',
@@ -482,9 +410,77 @@
                   mbw.files)
     self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
                   mbw.files)
-  """  # pylint: disable=pointless-string-statement
 
-  def test_gn_isolate(self):
+
+  def test_multiple_isolate_maps(self):
+    files = {
+      '/tmp/swarming_targets': 'cc_perftests\n',
+      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+          "{'cc_perftests': {"
+          "  'label': '//cc:cc_perftests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      '/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
+          "{'cc_perftests2': {"
+          "  'label': '//cc:cc_perftests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
+          "cc_perftests\n"
+      ),
+    }
+    mbw = self.fake_mbw(files=files, win32=True)
+    self.check(['gen',
+                '-c', 'debug_goma',
+                '--swarming-targets-file', '/tmp/swarming_targets',
+                '--isolate-map-file',
+                '/fake_src/testing/buildbot/gn_isolate_map.pyl',
+                '--isolate-map-file',
+                '/fake_src/testing/buildbot/gn_isolate_map2.pyl',
+                '//out/Default'], mbw=mbw, ret=0)
+    self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolate',
+                  mbw.files)
+    self.assertIn('c:\\fake_src\\out\\Default\\cc_perftests.isolated.gen.json',
+                  mbw.files)
+
+
+  def test_duplicate_isolate_maps(self):
+    files = {
+      '/tmp/swarming_targets': 'cc_perftests\n',
+      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+          "{'cc_perftests': {"
+          "  'label': '//cc:cc_perftests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      '/fake_src/testing/buildbot/gn_isolate_map2.pyl': (
+          "{'cc_perftests': {"
+          "  'label': '//cc:cc_perftests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      'c:\\fake_src\out\Default\cc_perftests.exe.runtime_deps': (
+          "cc_perftests\n"
+      ),
+    }
+    mbw = self.fake_mbw(files=files, win32=True)
+    # Check that passing duplicate targets into mb fails.
+    self.check(['gen',
+                '-c', 'debug_goma',
+                '--swarming-targets-file', '/tmp/swarming_targets',
+                '--isolate-map-file',
+                '/fake_src/testing/buildbot/gn_isolate_map.pyl',
+                '--isolate-map-file',
+                '/fake_src/testing/buildbot/gn_isolate_map2.pyl',
+                '//out/Default'], mbw=mbw, ret=1)
+
+  def test_isolate(self):
     files = {
       '/fake_src/out/Default/toolchain.ninja': "",
       '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
@@ -498,7 +494,7 @@
           "base_unittests\n"
       ),
     }
-    self.check(['isolate', '-c', 'gn_debug_goma', '//out/Default',
+    self.check(['isolate', '-c', 'debug_goma', '//out/Default',
                 'base_unittests'], files=files, ret=0)
 
     # test running isolate on an existing build_dir
@@ -506,11 +502,10 @@
     self.check(['isolate', '//out/Default', 'base_unittests'],
                files=files, ret=0)
 
-    files['/fake_src/out/Default/mb_type'] = 'gn\n'
     self.check(['isolate', '//out/Default', 'base_unittests'],
                files=files, ret=0)
 
-  def test_gn_run(self):
+  def test_run(self):
     files = {
       '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
           "{'base_unittests': {"
@@ -523,55 +518,61 @@
           "base_unittests\n"
       ),
     }
-    self.check(['run', '-c', 'gn_debug_goma', '//out/Default',
+    self.check(['run', '-c', 'debug_goma', '//out/Default',
                 'base_unittests'], files=files, ret=0)
 
-  def test_gn_lookup(self):
-    self.check(['lookup', '-c', 'gn_debug_goma'], ret=0)
+  def test_run_swarmed(self):
+    files = {
+      '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
+          "{'base_unittests': {"
+          "  'label': '//base:base_unittests',"
+          "  'type': 'raw',"
+          "  'args': [],"
+          "}}\n"
+      ),
+      '/fake_src/out/Default/base_unittests.runtime_deps': (
+          "base_unittests\n"
+      ),
+    }
 
-  def test_gn_lookup_goma_dir_expansion(self):
-    self.check(['lookup', '-c', 'gn_rel_bot', '-g', '/foo'], ret=0,
+    def run_stub(cmd, **_kwargs):
+      if 'isolate.py' in cmd[1]:
+        return 0, 'fake_hash base_unittests', ''
+      else:
+        return 0, '', ''
+
+    mbw = self.fake_mbw(files=files)
+    mbw.Run = run_stub
+    self.check(['run', '-s', '-c', 'debug_goma', '//out/Default',
+                'base_unittests'], mbw=mbw, ret=0)
+    self.check(['run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7',
+                '//out/Default', 'base_unittests'], mbw=mbw, ret=0)
+
+  def test_lookup(self):
+    self.check(['lookup', '-c', 'debug_goma'], ret=0,
                out=('\n'
                     'Writing """\\\n'
+                    'is_debug = true\n'
+                    'use_goma = true\n'
+                    '""" to _path_/args.gn.\n\n'
+                    '/fake_src/buildtools/linux64/gn gen _path_\n'))
+
+  def test_quiet_lookup(self):
+    self.check(['lookup', '-c', 'debug_goma', '--quiet'], ret=0,
+               out=('is_debug = true\n'
+                    'use_goma = true\n'))
+
+  def test_lookup_goma_dir_expansion(self):
+    self.check(['lookup', '-c', 'rel_bot', '-g', '/foo'], ret=0,
+               out=('\n'
+                    'Writing """\\\n'
+                    'enable_doom_melon = true\n'
                     'goma_dir = "/foo"\n'
                     'is_debug = false\n'
                     'use_goma = true\n'
                     '""" to _path_/args.gn.\n\n'
                     '/fake_src/buildtools/linux64/gn gen _path_\n'))
 
-  def test_gyp_analyze(self):
-    mbw = self.check(['analyze', '-c', 'gyp_rel_bot', '//out/Release',
-                      '/tmp/in.json', '/tmp/out.json'], ret=0)
-    self.assertIn('analyzer', mbw.calls[0])
-
-  def test_gyp_crosscompile(self):
-    mbw = self.fake_mbw()
-    self.check(['gen', '-c', 'gyp_crosscompile', '//out/Release'],
-               mbw=mbw, ret=0)
-    self.assertTrue(mbw.cross_compile)
-
-  def test_gyp_gen(self):
-    self.check(['gen', '-c', 'gyp_rel_bot', '-g', '/goma', '//out/Release'],
-               ret=0,
-               out=("GYP_DEFINES='goma=1 gomadir=/goma'\n"
-                    "python build/gyp_chromium -G output_dir=out\n"))
-
-    mbw = self.fake_mbw(win32=True)
-    self.check(['gen', '-c', 'gyp_rel_bot', '-g', 'c:\\goma', '//out/Release'],
-               mbw=mbw, ret=0,
-               out=("set GYP_DEFINES=goma=1 gomadir='c:\\goma'\n"
-                    "python build\\gyp_chromium -G output_dir=out\n"))
-
-  def test_gyp_gen_fails(self):
-    mbw = self.fake_mbw()
-    mbw.Call = lambda cmd, env=None, buffer_output=True: (1, '', '')
-    self.check(['gen', '-c', 'gyp_rel_bot', '//out/Release'], mbw=mbw, ret=1)
-
-  def test_gyp_lookup_goma_dir_expansion(self):
-    self.check(['lookup', '-c', 'gyp_rel_bot', '-g', '/foo'], ret=0,
-               out=("GYP_DEFINES='goma=1 gomadir=/foo'\n"
-                    "python build/gyp_chromium -G output_dir=_path_\n"))
-
   def test_help(self):
     orig_stdout = sys.stdout
     try:
@@ -589,7 +590,7 @@
     self.assertIn('Must specify a build --phase', mbw.out)
 
     # Check that passing a --phase to a single-phase builder fails.
-    mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_gn_builder',
+    mbw = self.check(['lookup', '-m', 'fake_master', '-b', 'fake_builder',
                       '--phase', 'phase_1'], ret=1)
     self.assertIn('Must not specify a build --phase', mbw.out)
 
@@ -607,20 +608,23 @@
                       '--phase', 'phase_2'], ret=0)
     self.assertIn('phase = 2', mbw.out)
 
+  def test_recursive_lookup(self):
+    files = {
+        '/fake_src/build/args/fake.gn': (
+          'enable_doom_melon = true\n'
+          'enable_antidoom_banana = true\n'
+        )
+    }
+    self.check(['lookup', '-m', 'fake_master', '-b', 'fake_args_file',
+                '--recursive'], files=files, ret=0,
+               out=('enable_antidoom_banana = true\n'
+                    'enable_doom_melon = true\n'
+                    'use_goma = true\n'))
+
   def test_validate(self):
     mbw = self.fake_mbw()
     self.check(['validate'], mbw=mbw, ret=0)
 
-  def test_gyp_env_hacks(self):
-    mbw = self.fake_mbw()
-    mbw.files[mbw.default_config] = GYP_HACKS_CONFIG
-    self.check(['lookup', '-c', 'fake_config'], mbw=mbw,
-               ret=0,
-               out=("GYP_DEFINES='foo=bar baz=1'\n"
-                    "GYP_LINK_CONCURRENCY=1\n"
-                    "LLVM_FORCE_HEAD_REVISION=1\n"
-                    "python build/gyp_chromium -G output_dir=_path_\n"))
-
   def test_buildbucket(self):
     mbw = self.fake_mbw()
     mbw.files[mbw.default_config] = TRYSERVER_CONFIG
diff --git a/src/v8/tools/mips_toolchain.tar.gz.sha1 b/src/v8/tools/mips_toolchain.tar.gz.sha1
new file mode 100644
index 0000000..8d45723
--- /dev/null
+++ b/src/v8/tools/mips_toolchain.tar.gz.sha1
@@ -0,0 +1 @@
+d51b5d903340262d8d13ecd51054c16a901b3cf3
\ No newline at end of file
diff --git a/src/v8/tools/node/README.md b/src/v8/tools/node/README.md
new file mode 100644
index 0000000..dc16c91
--- /dev/null
+++ b/src/v8/tools/node/README.md
@@ -0,0 +1,12 @@
+# Node.js Backports
+
+We no longer maintain our own backport script.
+
+For backporting V8 changes to Node.js, there is a useful script in
+[node-core-utils][1]. You can use the `git node v8 backport` command, which will
+bump the necessary V8 version numbers depending on the specific branch.
+
+See the [Node.js documentation][2] on V8 backports for a guide.
+
+[1]: https://github.com/nodejs/node-core-utils
+[2]: https://github.com/nodejs/node/blob/master/doc/guides/maintaining-V8.md
diff --git a/src/v8/tools/node/fetch_deps.py b/src/v8/tools/node/fetch_deps.py
index a3e6d74..ee5b629 100755
--- a/src/v8/tools/node/fetch_deps.py
+++ b/src/v8/tools/node/fetch_deps.py
@@ -9,6 +9,9 @@
 Usage: fetch_deps.py <v8-path>
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
 import subprocess
 import sys
@@ -22,42 +25,43 @@
     "managed"     : False,
     "custom_deps" : {
       # These deps are already part of Node.js.
-      "v8/base/trace_event/common" : None,
-      "v8/testing/gtest"           : None,
-      "v8/third_party/jinja2"      : None,
-      "v8/third_party/markupsafe"  : None,
+      "v8/base/trace_event/common"            : None,
+      "v8/third_party/googletest/src"         : None,
       # These deps are unnecessary for building.
       "v8/test/benchmarks/data"               : None,
       "v8/testing/gmock"                      : None,
       "v8/test/mozilla/data"                  : None,
       "v8/test/test262/data"                  : None,
       "v8/test/test262/harness"               : None,
-      "v8/test/wasm-js"                       : None,
-      "v8/third_party/android_tools"          : None,
+      "v8/third_party/android_ndk"            : None,
+      "v8/third_party/android_sdk"            : None,
       "v8/third_party/catapult"               : None,
       "v8/third_party/colorama/src"           : None,
+      "v8/third_party/fuchsia-sdk"            : None,
       "v8/third_party/instrumented_libraries" : None,
-      "v8/tools/gyp"                          : None,
       "v8/tools/luci-go"                      : None,
       "v8/tools/swarming_client"              : None,
-    },
-    "custom_vars": {
-      "build_for_node" : True,
+      "v8/third_party/qemu-linux-x64"         : None,
     },
   },
 ]
 
 def EnsureGit(v8_path):
+  def git(args):
+    # shell=True needed on Windows to resolve git.bat.
+    return subprocess.check_output(
+        "git " + args, cwd=v8_path, shell=True).strip()
+
   expected_git_dir = os.path.join(v8_path, ".git")
-  actual_git_dir = subprocess.check_output(
-      ["git", "rev-parse", "--absolute-git-dir"], cwd=v8_path).strip()
+  actual_git_dir = git("rev-parse --absolute-git-dir")
   if expected_git_dir == actual_git_dir:
-    print "V8 is tracked stand-alone by git."
+    print("V8 is tracked stand-alone by git.")
     return False
-  print "Initializing temporary git repository in v8."
-  subprocess.check_call(["git", "init"], cwd=v8_path)
-  subprocess.check_call(["git", "commit", "--allow-empty", "-m", "init"],
-                        cwd=v8_path)
+  print("Initializing temporary git repository in v8.")
+  git("init")
+  git("config user.name \"Ada Lovelace\"")
+  git("config user.email ada@lovela.ce")
+  git("commit --allow-empty -m init")
   return True
 
 def FetchDeps(v8_path):
@@ -70,14 +74,15 @@
 
   temporary_git = EnsureGit(v8_path)
   try:
-    print "Fetching dependencies."
+    print("Fetching dependencies.")
     env = os.environ.copy()
     # gclient needs to have depot_tools in the PATH.
     env["PATH"] = depot_tools + os.pathsep + env["PATH"]
+    gclient = os.path.join(depot_tools, "gclient.py")
     spec = "solutions = %s" % GCLIENT_SOLUTION
-    subprocess.check_call(["gclient", "sync", "--spec", spec],
-                          cwd=os.path.join(v8_path, os.path.pardir),
-                          env=env)
+    subprocess.check_call([sys.executable, gclient, "sync", "--spec", spec],
+                           cwd=os.path.join(v8_path, os.path.pardir),
+                           env=env)
   except:
     raise
   finally:
@@ -88,8 +93,8 @@
         os.path.join(v8_path, os.pardir, ".gclient_entries"))
     if os.path.isfile(gclient_entries):
       os.remove(gclient_entries)
-  # Enable building with GN for configure script.
-  return True
+
+  return depot_tools
 
 
 if __name__ == "__main__":
diff --git a/src/v8/tools/node/node_common.py b/src/v8/tools/node/node_common.py
index f7ca3a6..2efb218 100755
--- a/src/v8/tools/node/node_common.py
+++ b/src/v8/tools/node/node_common.py
@@ -3,10 +3,15 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
+import pipes
 import shutil
 import stat
 import subprocess
+import sys
 
 DEPOT_TOOLS_URL = \
   "https://chromium.googlesource.com/chromium/tools/depot_tools.git"
@@ -15,27 +20,33 @@
   def _Get(v8_path):
     depot_tools = os.path.join(v8_path, "_depot_tools")
     try:
-      gclient_path = os.path.join(depot_tools, "gclient")
-      gclient_check = subprocess.check_output([gclient_path, "--version"])
-      if "gclient.py" in gclient_check:
+      gclient_path = os.path.join(depot_tools, "gclient.py")
+      if os.path.isfile(gclient_path):
         return depot_tools
     except:
       pass
     if fetch_if_not_exist:
-      print "Checking out depot_tools."
-      subprocess.check_call(["git", "clone", DEPOT_TOOLS_URL, depot_tools])
+      print("Checking out depot_tools.")
+      # shell=True needed on Windows to resolve git.bat.
+      subprocess.check_call("git clone {} {}".format(
+          pipes.quote(DEPOT_TOOLS_URL),
+          pipes.quote(depot_tools)), shell=True)
+      # Using check_output to hide warning messages.
+      subprocess.check_output(
+          [sys.executable, gclient_path, "metrics", "--opt-out"],
+          cwd=depot_tools)
       return depot_tools
     return None
   depot_tools = _Get(v8_path)
   assert depot_tools is not None
-  print "Using depot tools in %s" % depot_tools
+  print("Using depot tools in %s" % depot_tools)
   return depot_tools
 
 def UninitGit(v8_path):
-  print "Uninitializing temporary git repository"
+  print("Uninitializing temporary git repository")
   target = os.path.join(v8_path, ".git")
   if os.path.isdir(target):
-    print ">> Cleaning up %s" % target
+    print(">> Cleaning up %s" % target)
     def OnRmError(func, path, exec_info):
       # This might happen on Windows
       os.chmod(path, stat.S_IWRITE)
diff --git a/src/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo b/src/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo
deleted file mode 100644
index eb1ae45..0000000
--- a/src/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_foo
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/src/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new b/src/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new
deleted file mode 100644
index eb1ae45..0000000
--- a/src/v8/tools/node/testdata/v8/testing/gtest/baz/gtest_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/src/v8/tools/node/testdata/v8/testing/gtest/gtest_bar b/src/v8/tools/node/testdata/v8/testing/gtest/gtest_bar
deleted file mode 100644
index eb1ae45..0000000
--- a/src/v8/tools/node/testdata/v8/testing/gtest/gtest_bar
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/src/v8/tools/node/testdata/v8/testing/gtest/gtest_new b/src/v8/tools/node/testdata/v8/testing/gtest/gtest_new
deleted file mode 100644
index eb1ae45..0000000
--- a/src/v8/tools/node/testdata/v8/testing/gtest/gtest_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/src/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new b/src/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new
deleted file mode 100644
index eb1ae45..0000000
--- a/src/v8/tools/node/testdata/v8/testing/gtest/new/gtest_new
+++ /dev/null
@@ -1 +0,0 @@
-...
diff --git a/src/v8/tools/node/testdata/v8/third_party/markupsafe/markupsafe b/src/v8/tools/node/testdata/v8/third_party/markupsafe/markupsafe
deleted file mode 100644
index e69de29..0000000
--- a/src/v8/tools/node/testdata/v8/third_party/markupsafe/markupsafe
+++ /dev/null
diff --git a/src/v8/tools/parse-processor b/src/v8/tools/parse-processor
index 588f120..1c71752 100755
--- a/src/v8/tools/parse-processor
+++ b/src/v8/tools/parse-processor
@@ -28,12 +28,13 @@
 
 if [ ! -x "$d8_exec" ]; then
   echo "d8 shell not found in $D8_PATH"
-  echo "To build, execute 'make native' from the V8 directory"
+  echo "Please provide path to d8 as env var in D8_PATH"
   exit 1
 fi
 
 # nm spits out 'no symbols found' messages to stderr.
-cat $log_file | $d8_exec --trace-maps --allow-natives-syntax --trace-deopt $tools_path/splaytree.js $tools_path/codemap.js \
+cat $log_file | $d8_exec --allow-natives-syntax \
+  $tools_path/splaytree.js $tools_path/codemap.js \
   $tools_path/csvparser.js $tools_path/consarray.js \
   $tools_path/profile.js $tools_path/profile_view.js \
   $tools_path/logreader.js $tools_path/arguments.js \
diff --git a/src/v8/tools/parse-processor.html b/src/v8/tools/parse-processor.html
index e41fffb..0f5818e 100644
--- a/src/v8/tools/parse-processor.html
+++ b/src/v8/tools/parse-processor.html
@@ -1,3 +1,4 @@
+<!DOCTYPE html>
 <html>
 <!--
 Copyright 2016 the V8 project authors. All rights reserved.  Use of this source
@@ -5,6 +6,8 @@
 -->
 
 <head>
+<meta charset="utf-8">
+<title>V8 Parse Processor</title>
 <style>
   html {
     font-family: monospace;
@@ -62,23 +65,56 @@
     overflow-y: scroll;
   }
 
-
   .funktion {
   }
 
+  .script-size {
+    display: inline-flex;
+    background-color: #505050;
+    border-radius: 3px;
+    padding: 3px;
+    margin: 2px;
+    white-space: nowrap;
+    overflow: hidden;
+    text-decoration: none;
+    color: white;
+  }
+  .script-size.eval {
+    background-color: #ee6300fc;
+  }
+  .script-size.streaming {
+    background-color: #008aff;
+  }
+  .script-size.deserialized {
+    background-color: #1fad00fc;
+  }
+
+  .script-details {
+    padding-right: 5px;
+    margin-right: 4px;
+  }
+  /* all but the last need a border  */
+  .script-details:nth-last-child(n+2) {
+    border-right: 1px white solid;
+  }
+
+  .script-details.id {
+    min-width: 2em;
+    text-align: right;
+  }
 </style>
-<script src="./splaytree.js" type="text/javascript"></script>
-<script src="./codemap.js" type="text/javascript"></script>
-<script src="./csvparser.js" type="text/javascript"></script>
-<script src="./consarray.js" type="text/javascript"></script>
-<script src="./profile.js" type="text/javascript"></script>
-<script src="./profile_view.js" type="text/javascript"></script>
-<script src="./logreader.js" type="text/javascript"></script>
-<script src="./arguments.js" type="text/javascript"></script>
-<script src="./parse-processor.js" type="text/javascript"></script>
-<script src="./SourceMap.js" type="text/javascript"></script>
-<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
-<script type="text/javascript">
+<script src="./splaytree.js"></script>
+<script src="./codemap.js"></script>
+<script src="./csvparser.js"></script>
+<script src="./consarray.js"></script>
+<script src="./profile.js"></script>
+<script src="./profile_view.js"></script>
+<script src="./logreader.js"></script>
+<script src="./arguments.js"></script>
+<script src="./parse-processor.js"></script>
+<script src="./SourceMap.js"></script>
+<script src="https://www.gstatic.com/charts/loader.js"></script>
+<script>
 "use strict";
 google.charts.load('current', {packages: ['corechart']});
 
@@ -86,7 +122,6 @@
   return document.querySelector(query);
 }
 
-
 function loadFile() {
   let files = $('#uploadInput').files;
 
@@ -149,19 +184,20 @@
 }
 
 function delay(t) {
-  return new Promise(resolve = > setTimeout(resolve, t));
+  return new Promise(resolve => setTimeout(resolve, t));
 }
 
 function renderParseResults(parseProcessor) {
   let result = $('#result');
   // clear out all existing result pages;
   result.innerHTML = '';
-  const start = parseProcessor.firstEvent;
-  const end = parseProcessor.lastEvent;
+  const start = parseProcessor.firstEventTimestamp;
+  const end = parseProcessor.lastEventTimestamp;
   renderScript(result, parseProcessor.totalScript, start, end);
   // Build up the graphs lazily to keep the page responsive.
   parseProcessor.scripts.forEach(
       script => renderScript(result, script, start, end));
+  renderScriptSizes(parseProcessor);
   // Install an intersection observer to lazily load the graphs when the script
   // div becomes visible for the first time.
   var io = new IntersectionObserver((entries, observer) => {
@@ -172,11 +208,10 @@
       appendGraph(target.script, target, start, end);
       observer.unobserve(entry.target);
     });
-  }, {});
+  }, {rootMargin: '400px'});
   document.querySelectorAll('.script').forEach(div => io.observe(div));
 }
 
-
 const kTimeFactor = 10;
 const kHeight = 20;
 const kFunktionTopOffset = 50;
@@ -189,32 +224,69 @@
   scriptDiv.script = script;
 
   let scriptTitle = h3();
-  if (script.file) scriptTitle.appendChild(a(script.file, script.file));
-  let anchor = a("", ' id=' + script.id);
+  let anchor = a("", 'Script #' + script.id);
   anchor.name = "script"+script.id
   scriptTitle.appendChild(anchor);
   scriptDiv.appendChild(scriptTitle);
+  if (script.file) scriptTitle.appendChild(a(script.file, script.file));
   let summary = createNode('pre', 'script-details');
   summary.appendChild(text(script.summary));
   scriptDiv.appendChild(summary);
   result.appendChild(scriptDiv);
-  return scriptDiv;
+}
+
+function renderScriptSizes(parseProcessor) {
+  let scriptsDiv = $('#scripts');
+  parseProcessor.scripts.forEach(
+    script => {
+      let scriptDiv = a('#script'+script.id, '', 'script-size');
+      let scriptId = div('script-details');
+      scriptId.classList.add('id');
+      scriptId.innerText = script.id;
+      scriptDiv.appendChild(scriptId);
+      let scriptSize = div('script-details');
+      scriptSize.innerText = BYTES(script.bytesTotal);
+      scriptDiv.appendChild(scriptSize);
+      let scriptUrl = div('script-details');
+      if (script.isEval) {
+        scriptUrl.innerText = "eval";
+        scriptDiv.classList.add('eval');
+      } else {
+        scriptUrl.innerText = script.file.split("/").pop();
+      }
+      if (script.isStreamingCompiled ) {
+        scriptDiv.classList.add('streaming');
+      } else if (script.deserializationTimestamp > 0) {
+        scriptDiv.classList.add('deserialized');
+      }
+      scriptDiv.appendChild(scriptUrl);
+      scriptDiv.style.width = script.bytesTotal * 0.001;
+      scriptsDiv.appendChild(scriptDiv);
+    });
 }
 
 const kMaxTime = 120 * kSecondsToMillis;
 // Resolution of the graphs
 const kTimeIncrement = 1;
 const kSelectionTimespan = 2;
+// TODO(cbruni): support compilation cache hit.
 const series = [
-//    ['firstParseEvent', 'Any Parse Event'],
+    ['firstParseEvent', 'Any Parse', 'area'],
+    ['execution', '1st Exec', 'area'],
+    ['firstCompileEvent', 'Any Compile', 'area'],
+    ['compile', 'Eager Compile'],
+    ['lazyCompile', 'Lazy Compile'],
     ['parse', 'Parsing'],
-//    ['preparse', 'Preparsing'],
-//    ['resolution', 'Preparsing with Var. Resolution'],
-    ['lazyCompile', 'Lazy Compilation'],
-    ['compile', 'Eager Compilation'],
-    ['execution', 'First Execution'],
+    ['preparse', 'Preparse'],
+    ['resolution', 'Preparse with Var. Resolution'],
+    ['deserialization', 'Deserialization'],
+    ['optimization', 'Optimize'],
 ];
 const metricNames = series.map(each => each[0]);
+// Display cumulative values (useuful for bytes).
+const kCumulative = true;
+// Include durations in the graphs.
+const kUseDuration = false;
 
 
 function appendGraph(script, parentNode, start, end) {
@@ -223,27 +295,34 @@
 
   console.time(timerLabel);
   let data = new google.visualization.DataTable();
-  data.addColumn('number', 'Time');
+  data.addColumn('number', 'Duration');
   // The series are interleave bytes processed, time spent and thus have two
   // different vAxes.
   let seriesOptions = [];
-  series.forEach(each => {
-    let description = each[1];
+  let colors = ['#4D4D4D', '#fff700', '#5DA5DA', '#FAA43A', '#60BD68',
+      '#F17CB0', '#B2912F', '#B276B2', '#DECF3F', '#F15854'];
+  series.forEach(([metric, description, type]) => {
+    let color = colors.shift();
     // Add the bytes column.
-    data.addColumn('number', description + ' Bytes');
-    seriesOptions.push({targetAxisIndex: 0});
+    data.addColumn('number', description);
+    let options = {targetAxisIndex: 0, color: color};
+    if (type == 'area') options.type = 'area';
+    seriesOptions.push(options)
     // Add the time column.
-    data.addColumn('number', description + ' Time');
-    seriesOptions.push({targetAxisIndex: 1, lineDashStyle: [3, 2]});
+    if (kUseDuration) {
+      data.addColumn('number', description + ' Duration');
+      seriesOptions.push(
+          {targetAxisIndex: 1, color: color, lineDashStyle: [3, 2]});
+    }
   });
-  // The first entry contains the total.
-  seriesOptions[0].type = 'area';
 
   const maxTime = Math.min(kMaxTime, end);
   console.time('metrics');
   let metricValues =
-    script.getAccumulatedTimeMetrics(metricNames , 0, maxTime, kTimeIncrement);
+    script.getAccumulatedTimeMetrics(metricNames , 0, maxTime, kTimeIncrement,
+        kCumulative, kUseDuration);
   console.timeEnd('metrics');
+  // Make sure that the series added to the graph matches the returned values.
   console.assert(metricValues[0].length == seriesOptions.length + 1);
   data.addRows(metricValues);
 
@@ -257,11 +336,11 @@
     },
     vAxes: {
       0: {title: 'Bytes Touched', format: 'short'},
-      1: {title: 'Time', format: '#,###ms'}
+      1: {title: 'Duration', format: '#,###ms'}
     },
     height: 400,
     width: 1000,
-    chartArea: {left: '5%', top: '15%', width: "85%", height: "75%"},
+    chartArea: {left: 70, top: 0, right: 160, height: "90%"},
     // The first series should be a area chart (total bytes touched),
     series: seriesOptions,
     // everthing else is a line.
@@ -275,27 +354,29 @@
   google.visualization.events.addListener(chart, 'select',
       () => selectGraphPointHandler(chart, data, script, parentNode));
   chart.draw(data, options);
+  // Add event listeners
   console.timeEnd(timerLabel);
 }
 
-
 function selectGraphPointHandler(chart, data, script, parentNode) {
   let selection = chart.getSelection();
   if (selection.length <= 0) return;
   // Display a list of funktions with events at the given time.
   let {row, column} = selection[0];
   if (row === null|| column === null) return;
-  let name = series[((column-1)/2) | 0][0];
+  const kEntrySize = kUseDuration ? 2 : 1;
+  let [metric, description] = series[((column-1)/ kEntrySize) | 0];
   let time = data.getValue(row, 0);
   let funktions = script.getFunktionsAtTime(
-        time * kSecondsToMillis, kSelectionTimespan, name);
+        time * kSecondsToMillis, kSelectionTimespan, metric);
   let oldList = parentNode.querySelector('.funktion-list');
-  parentNode.replaceChild(createFunktionList(name, time, funktions), oldList);
+  parentNode.replaceChild(
+      createFunktionList(metric, description, time, funktions), oldList);
 }
 
-function createFunktionList(metric, time, funktions) {
+function createFunktionList(metric, description, time, funktions) {
   let container = createNode('div', 'funktion-list');
-  container.appendChild(h3('Changes of ' + metric + ' at ' +
+  container.appendChild(h3('Changes of "' + description + '" at ' +
         time + 's: ' + funktions.length));
   let listNode = createNode('ul');
   funktions.forEach(funktion => {
@@ -311,8 +392,6 @@
   container.appendChild(listNode);
   return container;
 }
-
-
 </script>
 </head>
 
@@ -326,10 +405,14 @@
   <h2>Data</h2>
   <form name="fileForm">
     <p>
-      <input id="uploadInput" type="file" name="files" onchange="loadFile();"> trace entries: <span id="count">0</span>
+      <input id="uploadInput" type="file" name="files" onchange="loadFile();" accept=".log"> trace entries: <span id="count">0</span>
     </p>
   </form>
 
+
+  <h2>Scripts</h2>
+  <div id="scripts"></div>
+
   <h2>Result</h2>
   <div id="result"></div>
 </body>
diff --git a/src/v8/tools/parse-processor.js b/src/v8/tools/parse-processor.js
index 30b593a..b829cf2 100644
--- a/src/v8/tools/parse-processor.js
+++ b/src/v8/tools/parse-processor.js
@@ -22,13 +22,13 @@
 // confusion between the decimal and thousands separator is big (alternating
 // between comma "," vs dot "."). The Swiss formatting uses "'" as a thousands
 // separator, dropping most of that confusion.
-var numberFormat = new Intl.NumberFormat('de-CH', {
+const numberFormat = new Intl.NumberFormat('de-CH', {
   maximumFractionDigits: 2,
   minimumFractionDigits: 2,
 });
 
 function formatNumber(value) {
-  return formatNumber(value);
+  return numberFormat.format(value);
 }
 
 function BYTES(bytes, total) {
@@ -50,48 +50,136 @@
   return Math.round(value / total * 100) + "%";
 }
 
-function timestampMin(list) {
-  let result = -1;
-  list.forEach(timestamp => {
-    if (result === -1) {
-      result = timestamp;
-    } else if (timestamp != -1) {
-      result = Math.min(result, timestamp);
+// ===========================================================================
+const kNoTimeMetrics = {
+  __proto__: null,
+  executionDuration: 0,
+  firstEventTimestamp: 0,
+  firstParseEventTimestamp: 0,
+  lastParseEventTimestamp: 0,
+  lastEventTimestamp: 0
+};
+
+class CompilationUnit {
+  constructor() {
+    this.isEval = false;
+
+    // Lazily computed properties.
+    this.firstEventTimestamp = -1;
+    this.firstParseEventTimestamp = -1;
+    this.firstCompileEventTimestamp = -1;
+    this.lastParseEventTimestamp = -1;
+    this.lastEventTimestamp = -1;
+    this.deserializationTimestamp = -1;
+
+    this.preparseTimestamp = -1;
+    this.parseTimestamp = -1;
+    this.parse2Timestamp = -1;
+    this.resolutionTimestamp = -1;
+    this.compileTimestamp = -1;
+    this.lazyCompileTimestamp = -1;
+    this.executionTimestamp = -1;
+    this.optimizationTimestamp = -1;
+
+    this.deserializationDuration = -0.0;
+    this.preparseDuration = -0.0;
+    this.parseDuration = -0.0;
+    this.parse2Duration = -0.0;
+    this.resolutionDuration = -0.0;
+    this.scopeResolutionDuration = -0.0;
+    this.lazyCompileDuration = -0.0;
+    this.compileDuration = -0.0;
+    this.optimizeDuration = -0.0;
+
+    this.ownBytes = -1;
+    this.compilationCacheHits = [];
+  }
+
+  finalize() {
+    this.firstEventTimestamp = this.timestampMin(
+        this.deserializationTimestamp, this.parseTimestamp,
+        this.preparseTimestamp, this.resolutionTimestamp,
+        this.executionTimestamp);
+
+    this.firstParseEventTimestamp = this.timestampMin(
+        this.deserializationTimestamp, this.parseTimestamp,
+        this.preparseTimestamp, this.resolutionTimestamp);
+
+    this.firstCompileEventTimestamp = this.rawTimestampMin(
+        this.deserializationTimestamp, this.compileTimestamp,
+        this.lazyCompileTimestamp);
+    // Any excuted script needs to be compiled.
+    if (this.hasBeenExecuted() &&
+        (this.firstCompileEventTimestamp <= 0 ||
+         this.executionTimestamp < this.firstCompileTimestamp)) {
+      console.error('Compile < execution timestamp', this);
     }
-  });
-  return Math.round(result);
+
+    if (this.ownBytes < 0) console.error(this, 'Own bytes must be positive');
+  }
+
+  hasBeenExecuted() {
+    return this.executionTimestamp > 0;
+  }
+
+  addCompilationCacheHit(timestamp) {
+    this.compilationCacheHits.push(timestamp);
+  }
+
+  // Returns the smallest timestamp from the given list, ignoring
+  // uninitialized (-1) values.
+  rawTimestampMin(...timestamps) {
+    timestamps = timestamps.length == 1 ? timestamps[0] : timestamps;
+    let result = timestamps.reduce((min, item) => {
+      return item == -1 ? min : (min == -1 ? item : Math.min(item, item));
+    }, -1);
+    return result;
+  }
+  timestampMin(...timestamps) {
+    let result = this.rawTimestampMin(...timestamps);
+    if (Number.isNaN(result) || result < 0) {
+      console.error(
+          'Invalid timestamp min:', {result, timestamps, script: this});
+      return 0;
+    }
+    return result;
+  }
+
+  timestampMax(...timestamps) {
+    timestamps = timestamps.length == 1 ? timestamps[0] : timestamps;
+    let result = Math.max(...timestamps);
+    if (Number.isNaN(result) || result < 0) {
+      console.error(
+          'Invalid timestamp max:', {result, timestamps, script: this});
+      return 0;
+    }
+    return result;
+  }
 }
 
-
 // ===========================================================================
-class Script {
-  constructor(file, id) {
-    this.file = file;
-    this.isNative = false;
-    this.id = id;
+class Script extends CompilationUnit {
+  constructor(id) {
+    super();
     if (id === void 0 || id <= 0) {
-      throw new Error(`Invalid id=${id} for script with file='${file}'`);
+      throw new Error(`Invalid id=${id} for script`);
     }
-    this.isEval = false;
+    this.file = '';
+    this.id = id;
+
+    this.isNative = false;
+    this.isBackgroundCompiled = false;
+    this.isStreamingCompiled = false;
+
     this.funktions = [];
     this.metrics = new Map();
     this.maxNestingLevel = 0;
 
-    this.firstEvent = -1;
-    this.firstParseEvent = -1;
-    this.lastParseEvent = -1;
-    this.executionTimestamp = -1;
-    this.compileTimestamp = -1;
-    this.lastEvent = -1;
-
-    this.compileTime = -0.0;
-
     this.width = 0;
-    this.bytesTotal = 0;
-    this.ownBytes = -1;
+    this.bytesTotal = -1;
     this.finalized = false;
     this.summary = '';
-    this.setFile(file);
+    this.source = '';
   }
 
   setFile(name) {
@@ -100,15 +188,29 @@
   }
 
   isEmpty() {
-    return this.funktions.length === 0
+    return this.funktions.length === 0;
   }
 
-  funktionAtPosition(start) {
-    if (start === 0) throw "position 0 is reserved for the script";
-    if (this.finalized) throw 'Finalized script has no source position!';
+  getFunktionAtStartPosition(start) {
+    if (!this.isEval && start === 0) {
+      throw 'position 0 is reserved for the script';
+    }
+    if (this.finalized) {
+      return this.funktions.find(funktion => funktion.start == start);
+    }
     return this.funktions[start];
   }
 
+  // Return the innermost function at the given source position.
+  getFunktionForPosition(position) {
+    if (!this.finalized) throw 'Incomplete script';
+    for (let i = this.funktions.length - 1; i >= 0; i--) {
+      let funktion = this.funktions[i];
+      if (funktion.containsPosition(position)) return funktion;
+    }
+    return undefined;
+  }
+
   addMissingFunktions(list) {
     if (this.finalized) throw 'script is finalized!';
     list.forEach(fn => {
@@ -136,7 +238,7 @@
     let maxNesting = 0;
     // Iterate over the Funktions in byte position order.
     this.funktions.forEach(fn => {
-      fn.fromEval = this.isEval;
+      fn.isEval = this.isEval;
       if (parent === null) {
         parent = fn;
       } else {
@@ -150,18 +252,48 @@
         }
         parent = fn;
       }
-      this.firstParseEvent = this.firstParseEvent === -1 ?
-        fn.getFirstParseEvent() :
-        Math.min(this.firstParseEvent, fn.getFirstParseEvent());
-      this.lastParseEvent =
-        Math.max(this.lastParseEvent, fn.getLastParseEvent());
-      fn.getFirstEvent();
-      if (Number.isNaN(this.lastEvent)) throw "Invalid lastEvent";
-      this.lastEvent = Math.max(this.lastEvent, fn.getLastEvent());
-      if (Number.isNaN(this.lastEvent)) throw "Invalid lastEvent";
+    });
+    // Sanity checks to ensure that scripts are executed and parsed before any
+    // of its funktions.
+    let funktionFirstParseEventTimestamp = -1;
+    // Second iteration step to finalize the funktions once the proper
+    // hierarchy has been set up.
+    this.funktions.forEach(fn => {
+      fn.finalize();
+
+      funktionFirstParseEventTimestamp = this.timestampMin(
+          funktionFirstParseEventTimestamp, fn.firstParseEventTimestamp);
+
+      this.lastParseEventTimestamp = this.timestampMax(
+          this.lastParseEventTimestamp, fn.lastParseEventTimestamp);
+
+      this.lastEventTimestamp =
+          this.timestampMax(this.lastEventTimestamp, fn.lastEventTimestamp);
     });
     this.maxNestingLevel = maxNesting;
-    this.getFirstEvent();
+
+    // Initialize sizes.
+    if (!this.ownBytes === -1) throw 'Invalid state';
+    if (this.funktions.length == 0) {
+      this.bytesTotal = this.ownBytes = 0;
+      return;
+    }
+    let toplevelFunktionBytes = this.funktions.reduce(
+        (bytes, each) => bytes + (each.isToplevel() ? each.getBytes() : 0), 0);
+    if (this.isDeserialized || this.isEval || this.isStreamingCompiled) {
+      if (this.getBytes() === -1) {
+        this.bytesTotal = toplevelFunktionBytes;
+      }
+    }
+    this.ownBytes = this.bytesTotal - toplevelFunktionBytes;
+    // Initialize common properties.
+    super.finalize();
+    // Sanity checks after the minimum timestamps have been computed.
+    if (funktionFirstParseEventTimestamp < this.firstParseEventTimestamp) {
+      console.error(
+          'invalid firstCompileEventTimestamp', this,
+          funktionFirstParseEventTimestamp, this.firstParseEventTimestamp);
+    }
   }
 
   print() {
@@ -180,22 +312,16 @@
   }
 
   getOwnBytes() {
-    if (this.ownBytes === -1) {
-      this.ownBytes = this.funktions.reduce(
-        (bytes, each) => bytes - each.parent == null ? each.getBytes() : 0,
-        this.getBytes());
-      if (this.ownBytes < 0) throw "Own bytes must be positive";
-    }
     return this.ownBytes;
   }
 
   // Also see Funktion.prototype.getMetricBytes
   getMetricBytes(name) {
     if (name == 'lazyCompileTimestamp') return this.getOwnBytes();
-    return this.getBytes();
+    return this.getOwnBytes();
   }
 
-  getMetricTime(name) {
+  getMetricDuration(name) {
     return this[name];
   }
 
@@ -235,55 +361,59 @@
     };
 
     log("  - file:         " + this.file);
+    log('  - details:      ' +
+        'isEval=' + this.isEval + ' deserialized=' + this.isDeserialized +
+        ' streamed=' + this.isStreamingCompiled);
     info("scripts", this.getScripts());
     info("functions", all);
     info("toplevel fn", all.filter(each => each.isToplevel()));
-    info("preparsed", all.filter(each => each.preparseTime > 0));
+    info('preparsed', all.filter(each => each.preparseDuration > 0));
 
-
-    info("fully parsed", all.filter(each => each.parseTime > 0));
-    // info("fn parsed", all.filter(each => each.parse2Time > 0));
-    // info("resolved", all.filter(each => each.resolutionTime > 0));
+    info('fully parsed', all.filter(each => each.parseDuration > 0));
+    // info("fn parsed", all.filter(each => each.parse2Duration > 0));
+    // info("resolved", all.filter(each => each.resolutionDuration > 0));
     info("executed", all.filter(each => each.executionTimestamp > 0));
-    info("forEval", all.filter(each => each.fromEval));
+    info('forEval', all.filter(each => each.isEval));
     info("lazy compiled", all.filter(each => each.lazyCompileTimestamp > 0));
     info("eager compiled", all.filter(each => each.compileTimestamp > 0));
 
-    let parsingCost = new ExecutionCost('parse', all,
-      each => each.parseTime);
+    let parsingCost =
+        new ExecutionCost('parse', all, each => each.parseDuration);
     parsingCost.setMetrics(this.metrics);
-    log(parsingCost.toString())
+    log(parsingCost.toString());
 
-    let preParsingCost = new ExecutionCost('preparse', all,
-      each => each.preparseTime);
+    let preParsingCost =
+        new ExecutionCost('preparse', all, each => each.preparseDuration);
     preParsingCost.setMetrics(this.metrics);
-    log(preParsingCost.toString())
+    log(preParsingCost.toString());
 
-    let resolutionCost = new ExecutionCost('resolution', all,
-      each => each.resolutionTime);
+    let resolutionCost =
+        new ExecutionCost('resolution', all, each => each.resolutionDuration);
     resolutionCost.setMetrics(this.metrics);
-    log(resolutionCost.toString())
+    log(resolutionCost.toString());
 
     let nesting = new NestingDistribution(all);
     nesting.setMetrics(this.metrics);
-    log(nesting.toString())
+    log(nesting.toString());
 
     if (printSummary) console.log(this.summary);
   }
 
-  getAccumulatedTimeMetrics(metrics, start, end, delta, incremental = false) {
+  getAccumulatedTimeMetrics(
+      metrics, start, end, delta, cumulative = true, useDuration = false) {
     // Returns an array of the following format:
-    // [ [start, acc(metric0, start, start), acc(metric1, ...), ...],
-    //   [start+delta, acc(metric0, start, start+delta), ...],
+    // [ [start,         acc(metric0, start, start), acc(metric1, ...), ...],
+    //   [start+delta,   acc(metric0, start, start+delta), ...],
     //   [start+delta*2, acc(metric0, start, start+delta*2), ...],
     //   ...
     // ]
+    if (end <= start) throw 'Invalid ranges [' + start + ',' + end + ']';
     const timespan = end - start;
     const kSteps = Math.ceil(timespan / delta);
     // To reduce the time spent iterating over the funktions of this script
     // we iterate once over all funktions and add the metric changes to each
     // timepoint:
-    // [ [0, 300, ...], [1, 15, ...], [2, 100, ...], [3, 0, ...] ... ]
+    // [ [0, 300, ...], [1,  15, ...], [2, 100, ...], [3,   0, ...] ... ]
     // In a second step we accumulate all values:
     // [ [0, 300, ...], [1, 315, ...], [2, 415, ...], [3, 415, ...] ... ]
     //
@@ -293,7 +423,7 @@
     const metricProperties = ["time"];
     metrics.forEach(each => {
       metricProperties.push(each + 'Timestamp');
-      metricProperties.push(each + 'Time');
+      if (useDuration) metricProperties.push(each + 'Duration');
     });
     // Create a packed {rowTemplate} which is copied later-on.
     let indexToTime = (t) => (start + t * delta) / kSecondsToMillis;
@@ -305,12 +435,15 @@
     // Create the real metric's property name on the Funktion object.
     // Add the increments of each Funktion's metric to the result.
     this.forEach(funktionOrScript => {
-      // Iterate over the Funktion's metric names, position 0 is the time.
-      for (let i = 1; i < metricProperties.length; i += 2) {
-        let property = metricProperties[i];
-        let timestamp = funktionOrScript[property];
+      // Iterate over the Funktion's metric names, skipping position 0 which
+      // is the time.
+      const kMetricIncrement = useDuration ? 2 : 1;
+      for (let i = 1; i < metricProperties.length; i += kMetricIncrement) {
+        let timestampPropertyName = metricProperties[i];
+        let timestamp = funktionOrScript[timestampPropertyName];
         if (timestamp === void 0) continue;
-        if (timestamp < 0 || end < timestamp) continue;
+        if (timestamp < start || end < timestamp) continue;
+        timestamp -= start;
         let index = Math.floor(timestamp / delta);
         let row = rows[index];
         if (row === null) {
@@ -320,9 +453,10 @@
           row[0] = indexToTime(index);
         }
         // Add the metric value.
-        row[i] += funktionOrScript.getMetricBytes(property);
-        let timeMetricName = metricProperties[i + 1];
-        row[i + 1] += funktionOrScript.getMetricTime(timeMetricName);
+        row[i] += funktionOrScript.getMetricBytes(timestampPropertyName);
+        if (!useDuration) continue;
+        let durationPropertyName = metricProperties[i + 1];
+        row[i + 1] += funktionOrScript.getMetricDuration(durationPropertyName);
       }
     });
     // Create a packed array again with only the valid entries.
@@ -334,14 +468,14 @@
       let current = rows[t];
       if (current === null) {
         // Ensure a zero data-point after each non-zero point.
-        if (incremental && rows[t - 1] !== null) {
+        if (!cumulative && rows[t - 1] !== null) {
           let duplicate = rowTemplate.slice();
           duplicate[0] = indexToTime(t);
           result.push(duplicate);
         }
         continue;
       }
-      if (!incremental) {
+      if (cumulative) {
         // Skip i==0 where the corresponding time value in seconds is.
         for (let i = 1; i < metricProperties.length; i++) {
           current[i] += previous[i];
@@ -349,7 +483,7 @@
       }
       // Make sure we have a data-point in time right before the current one.
       if (rows[t - 1] === null) {
-        let duplicate = (incremental ? rowTemplate : previous).slice();
+        let duplicate = (!cumulative ? rowTemplate : previous).slice();
         duplicate[0] = indexToTime(t - 1);
         result.push(duplicate);
       }
@@ -374,14 +508,6 @@
       funktion => funktion.didMetricChange(time, delta, metric));
     return result;
   }
-
-  getFirstEvent() {
-    if (this.firstEvent === -1) {
-      // TODO(cbruni): add support for network request timestanp
-      this.firstEvent = this.firstParseEvent;
-    }
-    return this.firstEvent;
-  }
 }
 
 
@@ -491,127 +617,67 @@
 }
 
 // ===========================================================================
-const kNoTimeMetrics = {
-  __proto__: null,
-  executionTime: 0,
-  firstEventTimestamp: 0,
-  firstParseEventTimestamp: 0,
-  lastParseTimestamp: 0,
-  lastEventTimestamp: 0
-};
 
-class Funktion {
+class Funktion extends CompilationUnit {
   constructor(name, start, end, script) {
+    super();
     if (start < 0) throw "invalid start position: " + start;
-    if (end <= 0) throw "invalid end position: " + end;
-    if (end <= start) throw "invalid start end positions";
+    if (script.isEval) {
+      if (end < start) throw 'invalid start end positions';
+    } else {
+      if (end <= 0) throw 'invalid end position: ' + end;
+      if (end <= start) throw 'invalid start end positions';
+    }
 
     this.name = name;
     this.start = start;
     this.end = end;
-    this.ownBytes = -1;
     this.script = script;
     this.parent = null;
-    this.fromEval = false;
     this.nested = [];
     this.nestingLevel = 0;
 
-    this.preparseTimestamp = -1;
-    this.parseTimestamp = -1;
-    this.parse2Timestamp = -1;
-    this.resolutionTimestamp = -1;
-    this.lazyCompileTimestamp = -1;
-    this.compileTimestamp = -1;
-    this.executionTimestamp = -1;
-
-    this.preparseTime = -0.0;
-    this.parseTime = -0.0;
-    this.parse2Time = -0.0;
-    this.resolutionTime = -0.0;
-    this.scopeResolutionTime = -0.0;
-    this.lazyCompileTime = -0.0;
-    this.compileTime = -0.0;
-
-    // Lazily computed properties.
-    this.firstEventTimestamp = -1;
-    this.firstParseEventTimestamp = -1;
-    this.lastParseTimestamp = -1;
-    this.lastEventTimestamp = -1;
-
     if (script) this.script.addFunktion(this);
   }
 
+  finalize() {
+    this.lastParseEventTimestamp = Math.max(
+        this.preparseTimestamp + this.preparseDuration,
+        this.parseTimestamp + this.parseDuration,
+        this.resolutionTimestamp + this.resolutionDuration);
+    if (!(this.lastParseEventTimestamp > 0)) this.lastParseEventTimestamp = 0;
+
+    this.lastEventTimestamp =
+        Math.max(this.lastParseEventTimestamp, this.executionTimestamp);
+    if (!(this.lastEventTimestamp > 0)) this.lastEventTimestamp = 0;
+
+    this.ownBytes = this.nested.reduce(
+        (bytes, each) => bytes - each.getBytes(), this.getBytes());
+
+    super.finalize();
+  }
+
   getMetricBytes(name) {
     if (name == 'lazyCompileTimestamp') return this.getOwnBytes();
-    return this.getBytes();
+    return this.getOwnBytes();
   }
 
-  getMetricTime(name) {
+  getMetricDuration(name) {
     if (name in kNoTimeMetrics) return 0;
     return this[name];
   }
 
-  getFirstEvent() {
-    if (this.firstEventTimestamp === -1) {
-      this.firstEventTimestamp = timestampMin(
-        [this.parseTimestamp, this.preparseTimestamp,
-          this.resolutionTimestamp, this.executionTimestamp
-        ]);
-      if (!(this.firstEventTimestamp > 0)) {
-        this.firstEventTimestamp = 0;
-      }
-    }
-    return this.firstEventTimestamp;
-  }
-
-  getFirstParseEvent() {
-    if (this.firstParseEventTimestamp === -1) {
-      this.firstParseEventTimestamp = timestampMin(
-        [this.parseTimestamp, this.preparseTimestamp,
-          this.resolutionTimestamp
-        ]);
-      if (!(this.firstParseEventTimestamp > 0)) {
-        this.firstParseEventTimestamp = 0;
-      }
-    }
-    return this.firstParseEventTimestamp;
-  }
-
-  getLastParseEvent() {
-    if (this.lastParseTimestamp === -1) {
-      this.lastParseTimestamp = Math.max(
-        this.preparseTimestamp + this.preparseTime,
-        this.parseTimestamp + this.parseTime,
-        this.resolutionTimestamp + this.resolutionTime);
-      if (!(this.lastParseTimestamp > 0)) {
-        this.lastParseTimestamp = 0;
-      }
-    }
-    return this.lastParseTimestamp;
-  }
-
-  getLastEvent() {
-    if (this.lastEventTimestamp === -1) {
-      this.lastEventTimestamp = Math.max(
-        this.getLastParseEvent(), this.executionTimestamp);
-      if (!(this.lastEventTimestamp > 0)) {
-        this.lastEventTimestamp = 0;
-      }
-    }
-    return this.lastEventTimestamp;
-  }
-
   isNestedIn(funktion) {
     if (this.script != funktion.script) throw "Incompatible script";
     return funktion.start < this.start && this.end <= funktion.end;
   }
 
   isToplevel() {
-    return this.parent === null
+    return this.parent === null;
   }
 
-  hasBeenExecuted() {
-    return this.executionTimestamp > 0
+  containsPosition(position) {
+    return this.start <= position && position <= this.end;
   }
 
   accumulateNestingLevel(accumulator) {
@@ -641,12 +707,6 @@
   }
 
   getOwnBytes() {
-    if (this.ownBytes === -1) {
-      this.ownBytes = this.nested.reduce(
-        (bytes, each) => bytes - each.getBytes(),
-        this.getBytes());
-      if (this.ownBytes < 0) throw "Own bytes must be positive";
-    }
     return this.ownBytes;
   }
 
@@ -661,7 +721,7 @@
 
   toString(details = true) {
     let result = 'function' + (this.name ? ' ' + this.name : '') +
-      `() range=${this.start}-${this.end}`;
+        `() range=${this.start}-${this.end}`;
     if (details) result += ` script=${this.script ? this.script.id : 'X'}`;
     return result;
   }
@@ -687,25 +747,60 @@
 class ParseProcessor extends LogReader {
   constructor() {
     super();
-    let config = (processor) => {
-      // {script file},{script id},{start position},{end position},
-      // {time},{timestamp},{function name}
-      return {
-        parsers: [null, parseInt, parseInt, parseInt, parseFloat, parseInt, null],
-        processor: processor
-      }
-    };
-
     this.dispatchTable_ = {
-      'parse-full': config(this.processFull),
-      'parse-function': config(this.processFunction),
-      'parse-script': config(this.processScript),
-      'parse-eval': config(this.processEval),
-      'preparse-no-resolution': config(this.processPreparseNoResolution),
-      'preparse-resolution': config(this.processPreparseResolution),
-      'first-execution': config(this.processFirstExecution),
-      'compile-lazy': config(this.processCompileLazy),
-      'compile': config(this.processCompile)
+      // Avoid accidental leaking of __proto__ properties and force this object
+      // to be in dictionary-mode.
+      __proto__: null,
+      // "function",{event type},
+      // {script id},{start position},{end position},{time},{timestamp},
+      // {function name}
+      'function': {
+        parsers: [
+          parseString, parseInt, parseInt, parseInt, parseFloat, parseInt,
+          parseString
+        ],
+        processor: this.processFunctionEvent
+      },
+      // "compilation-cache","hit"|"put",{type},{scriptid},{start position},
+      // {end position},{timestamp}
+      'compilation-cache': {
+        parsers:
+            [parseString, parseString, parseInt, parseInt, parseInt, parseInt],
+        processor: this.processCompilationCacheEvent
+      },
+      'script': {
+        parsers: [parseString, parseInt, parseInt],
+        processor: this.processScriptEvent
+      },
+      // "script-details", {script_id}, {file}, {line}, {column}, {size}
+      'script-details': {
+        parsers: [parseInt, parseString, parseInt, parseInt, parseInt],
+        processor: this.processScriptDetails
+      },
+      'script-source': {
+        parsers: [parseInt, parseString, parseString],
+        processor: this.processScriptSource
+      },
+    };
+    this.functionEventDispatchTable_ = {
+      // Avoid accidental leaking of __proto__ properties and force this object
+      // to be in dictionary-mode.
+      __proto__: null,
+      'full-parse': this.processFull.bind(this),
+      'parse-function': this.processParseFunction.bind(this),
+      // TODO(cbruni): make sure arrow functions emit a normal parse-function
+      // event.
+      'parse': this.processParseFunction.bind(this),
+      'parse-script': this.processParseScript.bind(this),
+      'parse-eval': this.processParseEval.bind(this),
+      'preparse-no-resolution': this.processPreparseNoResolution.bind(this),
+      'preparse-resolution': this.processPreparseResolution.bind(this),
+      'first-execution': this.processFirstExecution.bind(this),
+      'compile-lazy': this.processCompileLazy.bind(this),
+      'compile': this.processCompile.bind(this),
+      'compile-eval': this.processCompileEval.bind(this),
+      'optimize-lazy': this.processOptimizeLazy.bind(this),
+      'deserialize': this.processDeserialize.bind(this),
     };
 
     this.idToScript = new Map();
@@ -713,9 +808,9 @@
     this.nameToFunction = new Map();
     this.scripts = [];
     this.totalScript = new TotalScript();
-    this.firstEvent = -1;
-    this.lastParseEvent = -1;
-    this.lastEvent = -1;
+    this.firstEventTimestamp = -1;
+    this.lastParseEventTimestamp = -1;
+    this.lastEventTimestamp = -1;
   }
 
   print() {
@@ -755,152 +850,283 @@
     this.scripts = Array.from(this.idToScript.values())
       .filter(each => !each.isNative);
 
-    this.scripts.forEach(script => script.finalize());
-    this.scripts.forEach(script => script.calculateMetrics(false));
-
-    this.firstEvent =
-      timestampMin(this.scripts.map(each => each.firstEvent));
-    this.lastParseEvent = this.scripts.reduce(
-      (max, script) => Math.max(max, script.lastParseEvent), -1);
-    this.lastEvent = this.scripts.reduce(
-      (max, script) => Math.max(max, script.lastEvent), -1);
+    this.scripts.forEach(script => {
+      script.finalize();
+      script.calculateMetrics(false)
+    });
 
     this.scripts.forEach(script => this.totalScript.addAllFunktions(script));
     this.totalScript.calculateMetrics(true);
-    const series = [
-      ['firstParseEvent', 'Any Parse Event'],
-      ['parse', 'Parsing'],
-      ['preparse', 'Preparsing'],
-      ['resolution', 'Preparsing with Var. Resolution'],
-      ['lazyCompile', 'Lazy Compilation'],
-      ['compile', 'Eager Compilation'],
-      ['execution', 'First Execution'],
-    ];
-    let metrics = series.map(each => each[0]);
-    this.totalScript.getAccumulatedTimeMetrics(metrics, 0, this.lastEvent, 10);
-  };
+
+    this.firstEventTimestamp = this.totalScript.timestampMin(
+        this.scripts.map(each => each.firstEventTimestamp));
+    this.lastParseEventTimestamp = this.totalScript.timestampMax(
+        this.scripts.map(each => each.lastParseEventTimestamp));
+    this.lastEventTimestamp = this.totalScript.timestampMax(
+        this.scripts.map(each => each.lastEventTimestamp));
+
+    const series = {
+      firstParseEvent: 'Any Parse Event',
+      parse: 'Parsing',
+      preparse: 'Preparsing',
+      resolution: 'Preparsing with Var. Resolution',
+      lazyCompile: 'Lazy Compilation',
+      compile: 'Eager Compilation',
+      execution: 'First Execution',
+    };
+    let metrics = Object.keys(series);
+    this.totalScript.getAccumulatedTimeMetrics(
+        metrics, 0, this.lastEventTimestamp, 10);
+  }
+
+  processFunctionEvent(
+      eventName, scriptId, startPosition, endPosition, duration, timestamp,
+      functionName) {
+    let handlerFn = this.functionEventDispatchTable_[eventName];
+    if (handlerFn === undefined) {
+      console.error('Couldn\'t find handler for function event:' + eventName);
+    }
+    handlerFn(
+        scriptId, startPosition, endPosition, duration, timestamp,
+        functionName);
+  }
 
   addEntry(entry) {
     this.entries.push(entry);
   }
 
-  lookupScript(file, id) {
-    // During preparsing we only have the temporary ranges and no script yet.
-    let script;
-    if (this.idToScript.has(id)) {
-      script = this.idToScript.get(id);
-    } else {
-      script = new Script(file, id);
-      this.idToScript.set(id, script);
-    }
-    if (file.length > 0 && script.file.length === 0) {
-      script.setFile(file);
-      this.fileToScript.set(file, script);
-    }
-    return script;
+  lookupScript(id) {
+    return this.idToScript.get(id);
   }
 
-  lookupFunktion(file, scriptId,
-    startPosition, endPosition, time, timestamp, functionName) {
-    let script = this.lookupScript(file, scriptId);
-    let funktion = script.funktionAtPosition(startPosition);
+  getOrCreateFunction(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    if (scriptId == -1) {
+      return this.lookupFunktionByRange(startPosition, endPosition);
+    }
+    let script = this.lookupScript(scriptId);
+    let funktion = script.getFunktionAtStartPosition(startPosition);
     if (funktion === void 0) {
       funktion = new Funktion(functionName, startPosition, endPosition, script);
     }
     return funktion;
   }
 
-  processEval(file, scriptId, startPosition,
-    endPosition, time, timestamp, functionName) {
-    let script = this.lookupScript(file, scriptId);
+  // Iterates over all functions and tries to find matching ones.
+  lookupFunktionsByRange(start, end) {
+    let results = [];
+    this.idToScript.forEach(script => {
+      script.forEach(funktion => {
+        if (funktion.startPostion == start && funktion.endPosition == end) {
+          results.push(funktion);
+        }
+      });
+    });
+    return results;
+  }
+  lookupFunktionByRange(start, end) {
+    let results = this.lookupFunktionsByRange(start, end);
+    if (results.length != 1) throw "Could not find unique function by range";
+    return results[0];
+  }
+
+  processScriptEvent(eventName, scriptId, timestamp) {
+    let script = this.idToScript.get(scriptId);
+    switch (eventName) {
+      case 'create':
+      case 'reserve-id':
+      case 'deserialize': {
+        if (script !== undefined) return;
+        script = new Script(scriptId);
+        this.idToScript.set(scriptId, script);
+        if (eventName == 'deserialize') {
+          script.deserializationTimestamp = toTimestamp(timestamp);
+        }
+        return;
+      }
+      case 'background-compile':
+        if (script.isBackgroundCompiled) {
+          throw 'Cannot background-compile twice';
+        }
+        script.isBackgroundCompiled = true;
+        // TODO(cbruni): remove once backwards compatibility is no longer needed.
+        script.isStreamingCompiled = true;
+        // TODO(cbruni): fix parse events for background compilation scripts
+        script.preparseTimestamp = toTimestamp(timestamp);
+        return;
+      case 'streaming-compile':
+        if (script.isStreamingCompiled) throw 'Cannot stream-compile twice';
+        // TODO(cbruni): remove once backwards compatibility is no longer needed.
+        script.isBackgroundCompiled = true;
+        script.isStreamingCompiled = true;
+        // TODO(cbruni): fix parse events for background compilation scripts
+        script.preparseTimestamp = toTimestamp(timestamp);
+        return;
+      default:
+        console.error('Unhandled script event: ' + eventName);
+    }
+  }
+
+  processScriptDetails(scriptId, file, startLine, startColumn, size) {
+    let script = this.lookupScript(scriptId);
+    script.setFile(file);
+  }
+
+  processScriptSource(scriptId, url, source) {
+    let script = this.lookupScript(scriptId);
+    script.source = source;
+  }
+
+  processParseEval(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    if (startPosition != 0 && startPosition != -1) {
+      console.error('Invalid start position for parse-eval', arguments);
+    }
+    let script = this.processParseScript(...arguments);
     script.isEval = true;
   }
 
-  processFull(file, scriptId, startPosition,
-    endPosition, time, timestamp, functionName) {
-    let funktion = this.lookupFunktion(...arguments);
+  processFull(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    if (startPosition == 0) {
+      // This should only happen for eval.
+      let script = this.lookupScript(scriptId);
+      script.isEval = true;
+      return;
+    }
+    let funktion = this.getOrCreateFunction(...arguments);
     // TODO(cbruni): this should never happen, emit differen event from the
     // parser.
     if (funktion.parseTimestamp > 0) return;
-    funktion.parseTimestamp = startOf(timestamp, time);
-    funktion.parseTime = time;
+    funktion.parseTimestamp = startOf(timestamp, duration);
+    funktion.parseDuration = duration;
   }
 
-  processFunction(file, scriptId, startPosition,
-    endPosition, time, timestamp, functionName) {
-    let funktion = this.lookupFunktion(...arguments);
-    funktion.parseTimestamp = startOf(timestamp, time);
-    funktion.parseTime = time;
+  processParseFunction(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let funktion = this.getOrCreateFunction(...arguments);
+    funktion.parseTimestamp = startOf(timestamp, duration);
+    funktion.parseDuration = duration;
   }
 
-  processScript(file, scriptId, startPosition,
-    endPosition, time, timestamp, functionName) {
-    // TODO timestamp and time
-    let script = this.lookupScript(file, scriptId);
-    let ts = startOf(timestamp, time);
+  processParseScript(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    // TODO timestamp and duration
+    let script = this.lookupScript(scriptId);
+    let ts = startOf(timestamp, duration);
     script.parseTimestamp = ts;
-    script.firstEventTimestamp = ts;
-    script.firstParseEventTimestamp = ts;
-    script.parseTime = time;
+    script.parseDuration = duration;
+    return script;
   }
 
-  processPreparseResolution(file, scriptId,
-    startPosition, endPosition, time, timestamp, functionName) {
-    let funktion = this.lookupFunktion(...arguments);
+  processPreparseResolution(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let funktion = this.getOrCreateFunction(...arguments);
     // TODO(cbruni): this should never happen, emit different event from the
     // parser.
     if (funktion.resolutionTimestamp > 0) return;
-    funktion.resolutionTimestamp = startOf(timestamp, time);
-    funktion.resolutionTime = time;
+    funktion.resolutionTimestamp = startOf(timestamp, duration);
+    funktion.resolutionDuration = duration;
   }
 
-  processPreparseNoResolution(file, scriptId,
-    startPosition, endPosition, time, timestamp, functionName) {
-    let funktion = this.lookupFunktion(...arguments);
-    funktion.preparseTimestamp = startOf(timestamp, time);
-    funktion.preparseTime = time;
+  processPreparseNoResolution(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let funktion = this.getOrCreateFunction(...arguments);
+    funktion.preparseTimestamp = startOf(timestamp, duration);
+    funktion.preparseDuration = duration;
   }
 
-  processFirstExecution(file, scriptId,
-    startPosition, endPosition, time, timestamp, functionName) {
-    let script = this.lookupScript(file, scriptId);
+  processFirstExecution(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let script = this.lookupScript(scriptId);
     if (startPosition === 0) {
       // undefined = eval fn execution
       if (script) {
         script.executionTimestamp = toTimestamp(timestamp);
       }
     } else {
-      let funktion = script.funktionAtPosition(startPosition);
+      let funktion = script.getFunktionAtStartPosition(startPosition);
       if (funktion) {
         funktion.executionTimestamp = toTimestamp(timestamp);
-      } else if (functionName.length > 0) {
-        // throw new Error("Could not find function: " + functionName);
+      } else {
+        // TODO(cbruni): handle funktions from  compilation-cache hits.
       }
     }
   }
 
-  processCompileLazy(file, scriptId,
-    startPosition, endPosition, time, timestamp, functionName) {
-    let funktion = this.lookupFunktion(...arguments);
-    funktion.lazyCompileTimestamp = startOf(timestamp, time);
-    funktion.lazyCompileTime = time;
-    script.firstPar
+  processCompileLazy(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let funktion = this.getOrCreateFunction(...arguments);
+    funktion.lazyCompileTimestamp = startOf(timestamp, duration);
+    funktion.lazyCompileDuration = duration;
   }
 
-  processCompile(file, scriptId,
-    startPosition, endPosition, time, timestamp, functionName) {
-
-    let script = this.lookupScript(file, scriptId);
+  processCompile(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let script = this.lookupScript(scriptId);
     if (startPosition === 0) {
-      script.compileTimestamp = startOf(timestamp, time);
-      script.compileTime = time;
+      script.compileTimestamp = startOf(timestamp, duration);
+      script.compileDuration = duration;
       script.bytesTotal = endPosition;
+      return script;
     } else {
-      let funktion = script.funktionAtPosition(startPosition);
-      funktion.compileTimestamp = startOf(timestamp, time);
-      funktion.compileTime = time;
+      let funktion = script.getFunktionAtStartPosition(startPosition);
+      if (funktion === undefined) {
+        // This should not happen since any funktion has to be parsed first.
+        console.error('processCompile funktion not found', ...arguments);
+        return;
+      }
+      funktion.compileTimestamp = startOf(timestamp, duration);
+      funktion.compileDuration = duration;
+      return funktion;
     }
   }
+
+  processCompileEval(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let compilationUnit = this.processCompile(...arguments);
+    compilationUnit.isEval = true;
+  }
+
+  processOptimizeLazy(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let compilationUnit = this.lookupScript(scriptId);
+    if (startPosition > 0) {
+      compilationUnit =
+          compilationUnit.getFunktionAtStartPosition(startPosition);
+      if (compilationUnit === undefined) {
+        // This should not happen since any funktion has to be parsed first.
+        console.error('processOptimizeLazy funktion not found', ...arguments);
+        return;
+      }
+    }
+    compilationUnit.optimizationTimestamp = startOf(timestamp, duration);
+    compilationUnit.optimizationDuration = duration;
+  }
+
+  processDeserialize(
+      scriptId, startPosition, endPosition, duration, timestamp, functionName) {
+    let compilationUnit = this.lookupScript(scriptId);
+    if (startPosition === 0) {
+      compilationUnit.bytesTotal = endPosition;
+    } else {
+      compilationUnit = this.getOrCreateFunction(...arguments);
+    }
+    compilationUnit.deserializationTimestamp = startOf(timestamp, duration);
+    compilationUnit.deserializationDuration = duration;
+  }
+
+  processCompilationCacheEvent(
+      eventType, cacheType, scriptId, startPosition, endPosition, timestamp) {
+    if (eventType !== 'hit') return;
+    let compilationUnit = this.lookupScript(scriptId);
+    if (startPosition > 0) {
+      compilationUnit =
+          compilationUnit.getFunktionAtStartPosition(startPosition);
+    }
+    compilationUnit.addCompilationCacheHit(toTimestamp(timestamp));
+  }
+
 }
 
 
diff --git a/src/v8/tools/perf-compare.py b/src/v8/tools/perf-compare.py
index 75f3c73..744f6aa 100755
--- a/src/v8/tools/perf-compare.py
+++ b/src/v8/tools/perf-compare.py
@@ -11,6 +11,9 @@
   %prog -t "x64 results" ../result.json master.json -o results.html
 '''
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 from collections import OrderedDict
 import json
 import math
@@ -418,7 +421,7 @@
       run_names[run_name] = 0
 
       for error in data["errors"]:
-        print "Error:", error
+        print("Error:", error)
 
       for trace in data["traces"]:
         suite_name = trace["graphs"][0]
diff --git a/src/v8/tools/plot-timer-events b/src/v8/tools/plot-timer-events
index 3294e85..0217632 100755
--- a/src/v8/tools/plot-timer-events
+++ b/src/v8/tools/plot-timer-events
@@ -32,7 +32,7 @@
 
 if test ! -x "$d8_exec"; then
   echo "d8 shell not found in $D8_PATH"
-  echo "To build, execute 'make native' from the V8 directory"
+  echo "Please provide path to d8 as env var in D8_PATH"
   exit 1
 fi
 
diff --git a/src/v8/tools/predictable_wrapper.py b/src/v8/tools/predictable_wrapper.py
index cf7bf00b..31b62c8 100644
--- a/src/v8/tools/predictable_wrapper.py
+++ b/src/v8/tools/predictable_wrapper.py
@@ -14,11 +14,21 @@
 compared. Differences are reported as errors.
 """
 
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import sys
 
 from testrunner.local import command
+from testrunner.local import utils
+
 
 MAX_TRIES = 3
+TIMEOUT = 120
+
+# Predictable mode works only when run on the host os.
+command.setup(utils.GuessOS(), None)
 
 def main(args):
   def allocation_str(stdout):
@@ -27,23 +37,23 @@
         return line
     return None
 
-  cmd = command.Command(args[0], args[1:])
+  cmd = command.Command(args[0], args[1:], timeout=TIMEOUT)
 
   previous_allocations = None
   for run in range(1, MAX_TRIES + 1):
-    print '### Predictable run #%d' % run
+    print('### Predictable run #%d' % run)
     output = cmd.execute()
     if output.stdout:
-      print '### Stdout:'
-      print output.stdout
+      print('### Stdout:')
+      print(output.stdout)
     if output.stderr:
-      print '### Stderr:'
-      print output.stderr
-    print '### Return code: %s' % output.exit_code
+      print('### Stderr:')
+      print(output.stderr)
+    print('### Return code: %s' % output.exit_code)
     if output.HasTimedOut():
       # If we get a timeout in any run, we are in an unpredictable state. Just
       # report it as a failure and don't rerun.
-      print '### Test timed out'
+      print('### Test timed out')
       return 1
     allocations = allocation_str(output.stdout)
     if not allocations:
@@ -52,7 +62,7 @@
              '--verify-predictable is passed at the cmd line.')
       return 2
     if previous_allocations and previous_allocations != allocations:
-      print '### Allocations differ'
+      print('### Allocations differ')
       return 3
     if run >= MAX_TRIES:
       # No difference on the last run -> report a success.
diff --git a/src/v8/tools/profile.js b/src/v8/tools/profile.js
index cddadaa..ef36271 100644
--- a/src/v8/tools/profile.js
+++ b/src/v8/tools/profile.js
@@ -892,16 +892,24 @@
 
 JsonProfile.prototype.addCode = function(
     kind, name, timestamp, start, size) {
+  let codeId = this.codeEntries_.length;
+  // Find out if we have a static code entry for the code. If yes, we will
+  // make sure it is written to the JSON file just once.
+  let staticEntry = this.codeMap_.findAddress(start);
+  if (staticEntry && staticEntry.entry.type === 'CPP') {
+    codeId = staticEntry.entry.codeId;
+  }
+
   var entry = new CodeMap.CodeEntry(size, name, 'CODE');
   this.codeMap_.addCode(start, entry);
 
-  entry.codeId = this.codeEntries_.length;
-  this.codeEntries_.push({
+  entry.codeId = codeId;
+  this.codeEntries_[codeId] = {
     name : entry.name,
     timestamp: timestamp,
     type : entry.type,
     kind : kind
-  });
+  };
 
   return entry;
 };
@@ -975,7 +983,7 @@
   if (!entry) return;
   var codeId = entry.codeId;
 
-  // Resolve the inlined fucntions list.
+  // Resolve the inlined functions list.
   if (inlinedFunctions.length > 0) {
     inlinedFunctions = inlinedFunctions.substring(1).split("S");
     for (var i = 0; i < inlinedFunctions.length; i++) {
@@ -1002,33 +1010,10 @@
   };
 };
 
-function unescapeString(s) {
-  s = s.split("\\");
-  for (var i = 1; i < s.length; i++) {
-    if (s[i] === "") {
-      // Double backslash.
-      s[i] = "\\";
-    } else if (i > 0 && s[i].startsWith("x")) {
-      // Escaped Ascii character.
-      s[i] = String.fromCharCode(parseInt(s[i].substring(1, 3), 16)) +
-          s[i].substring(3);
-    } else if (i > 0 && s[i].startsWith("u")) {
-      // Escaped unicode character.
-      s[i] = String.fromCharCode(parseInt(s[i].substring(1, 5), 16)) +
-          s[i].substring(5);
-    } else {
-      if (i > 0 && s[i - 1] !== "\\") {
-        printErr("Malformed source string");
-      }
-    }
-  }
-  return s.join("");
-}
-
 JsonProfile.prototype.addScriptSource = function(script, url, source) {
   this.scripts_[script] = {
-    name : unescapeString(url),
-    source : unescapeString(source)
+    name : url,
+    source : source
   };
 };
 
diff --git a/src/v8/tools/profview/index.html b/src/v8/tools/profview/index.html
index 32f7c7b..8695a41 100644
--- a/src/v8/tools/profview/index.html
+++ b/src/v8/tools/profview/index.html
@@ -22,7 +22,7 @@
   Chrome V8 profiling log processor
 </h3>
 
-<input type="file" id="fileinput" />
+<input type="file" id="fileinput" /><div id="source-status"></div>
 <br>
 <hr>
 
@@ -59,6 +59,10 @@
   </table>
   <div>
     Current code object: <span id="timeline-currentCode"></span>
+    <button id="source-viewer-hide-button">Hide source</button>
+  </div>
+  <div>
+    <table id="source-viewer"> </table>
   </div>
 </div>
 
@@ -108,7 +112,7 @@
 <br>
 <br>
 <br>
-Copyright the V8 Authors - Last change to this page: 2017/02/15
+Copyright the V8 Authors - Last change to this page: 2018/08/13
 </p>
 
 </body>
diff --git a/src/v8/tools/profview/profile-utils.js b/src/v8/tools/profview/profile-utils.js
index f5a85be..4be5589 100644
--- a/src/v8/tools/profview/profile-utils.js
+++ b/src/v8/tools/profview/profile-utils.js
@@ -93,9 +93,10 @@
 
 function createNodeFromStackEntry(code, codeId, vmState) {
   let name = code ? code.name : "UNKNOWN";
-
-  return { name, codeId, type : resolveCodeKindAndVmState(code, vmState),
-           children : [], ownTicks : 0, ticks : 0 };
+  let node = createEmptyNode(name);
+  node.codeId = codeId;
+  node.type = resolveCodeKindAndVmState(code, vmState);
+  return node;
 }
 
 function childIdFromCode(codeId, code) {
@@ -148,29 +149,30 @@
 }
 
 function addOrUpdateChildNode(parent, file, stackIndex, stackPos, ascending) {
-  let stack = file.ticks[stackIndex].s;
-  let vmState = file.ticks[stackIndex].vm;
-  let codeId = stack[stackPos];
-  let code = codeId >= 0 ? file.code[codeId] : undefined;
   if (stackPos === -1) {
     // We reached the end without finding the next step.
     // If we are doing top-down call tree, update own ticks.
     if (!ascending) {
       parent.ownTicks++;
     }
-  } else {
-    console.assert(stackPos >= 0 && stackPos < stack.length);
-    // We found a child node.
-    let childId = childIdFromCode(codeId, code);
-    let child = parent.children[childId];
-    if (!child) {
-      child = createNodeFromStackEntry(code, codeId, vmState);
-      child.delayedExpansion = { frameList : [], ascending };
-      parent.children[childId] = child;
-    }
-    child.ticks++;
-    addFrameToFrameList(child.delayedExpansion.frameList, stackIndex, stackPos);
+    return;
   }
+
+  let stack = file.ticks[stackIndex].s;
+  console.assert(stackPos >= 0 && stackPos < stack.length);
+  let codeId = stack[stackPos];
+  let code = codeId >= 0 ? file.code[codeId] : undefined;
+  // We found a child node.
+  let childId = childIdFromCode(codeId, code);
+  let child = parent.children[childId];
+  if (!child) {
+    let vmState = file.ticks[stackIndex].vm;
+    child = createNodeFromStackEntry(code, codeId, vmState);
+    child.delayedExpansion = { frameList : [], ascending };
+    parent.children[childId] = child;
+  }
+  child.ticks++;
+  addFrameToFrameList(child.delayedExpansion.frameList, stackIndex, stackPos);
 }
 
 // This expands a tree node (direct children only).
@@ -314,13 +316,7 @@
       this.tree = root;
       this.categories = categories;
     } else {
-      this.tree = {
-        name : "root",
-        codeId: -1,
-        children : [],
-        ownTicks : 0,
-        ticks : 0
-      };
+      this.tree = createEmptyNode("root");
       this.categories = null;
     }
 
@@ -339,7 +335,7 @@
       let codeId = stack[i];
       if (codeId < 0 || this.codeVisited[codeId]) continue;
 
-      let code = codeId >= 0 ? file.code[codeId] : undefined;
+      let code = file.code[codeId];
       if (this.filter) {
         let type = code ? code.type : undefined;
         let kind = code ? code.kind : undefined;
@@ -601,3 +597,15 @@
     softDeoptimizations,
   };
 }
+
+function normalizeLeadingWhitespace(lines) {
+  let regex = /^\s*/;
+  let minimumLeadingWhitespaceChars = Infinity;
+  for (let line of lines) {
+    minimumLeadingWhitespaceChars =
+        Math.min(minimumLeadingWhitespaceChars, regex.exec(line)[0].length);
+  }
+  for (let i = 0; i < lines.length; i++) {
+    lines[i] = lines[i].substring(minimumLeadingWhitespaceChars);
+  }
+}
diff --git a/src/v8/tools/profview/profview.css b/src/v8/tools/profview/profview.css
index 106bfe2..ca39745 100644
--- a/src/v8/tools/profview/profview.css
+++ b/src/v8/tools/profview/profview.css
@@ -2,6 +2,11 @@
   width : 100%;
 }
 
+td {
+  padding-top: 0.1em;
+  padding-bottom: 0.1em;
+}
+
 .numeric {
   width : 12ex;
 }
@@ -14,31 +19,82 @@
   font-family: 'Roboto', sans-serif;
 }
 
-div.code-type-chip {
-  display : inline-block;
-  padding : 0.0em;
+#source-status {
+  display: inline-block;
 }
 
-span.code-type-chip {
+.tree-row-arrow {
+  margin-right: 0.2em;
+  text-align: right;
+}
+
+.code-type-chip {
   border-radius : 1em;
-  display : inline-block;
-  padding : 0.1em;
+  padding : 0.2em;
   background-color : #4040c0;
   color: #ffffff;
   font-size : small;
   box-shadow: 0 2px 5px 0 rgba(0, 0, 0, 0.16), 0 2px 10px 0 rgba(0, 0, 0, 0.12);
 }
 
-span.code-type-chip-space {
-  width : 0.5ex;
-  display : inline-block;
+.tree-row-name {
+  margin-left: 0.2em;
+  margin-right: 0.2em;
 }
 
-span.codeid-link {
+.codeid-link {
   text-decoration: underline;
   cursor: pointer;
 }
 
+.view-source-link {
+  text-decoration: underline;
+  cursor: pointer;
+  font-size: 10pt;
+  margin-left: 0.6em;
+  color: #555555;
+}
+
+#source-viewer {
+  border: 1px solid black;
+  padding: 0.2em;
+  font-family: 'Roboto Mono', monospace;
+  white-space: pre;
+  margin-top: 1em;
+  margin-bottom: 1em;
+}
+
+#source-viewer td.line-none {
+  background-color: white;
+}
+
+#source-viewer td.line-cold {
+  background-color: #e1f5fe;
+}
+
+#source-viewer td.line-mediumcold {
+  background-color: #b2ebf2;
+}
+
+#source-viewer td.line-mediumhot {
+  background-color: #c5e1a5;
+}
+
+#source-viewer td.line-hot {
+  background-color: #dce775;
+}
+
+#source-viewer td.line-superhot {
+  background-color: #ffee58;
+}
+
+#source-viewer .source-line-number {
+  padding-left: 0.2em;
+  padding-right: 0.2em;
+  color: #003c8f;
+  background-color: #eceff1;
+}
+
 div.mode-button {
   padding: 1em 3em;
   display: inline-block;
diff --git a/src/v8/tools/profview/profview.js b/src/v8/tools/profview/profview.js
index d480cd4..210cec7 100644
--- a/src/v8/tools/profview/profview.js
+++ b/src/v8/tools/profview/profview.js
@@ -8,34 +8,42 @@
   return document.getElementById(id);
 }
 
-let components = [];
+function removeAllChildren(element) {
+  while (element.firstChild) {
+    element.removeChild(element.firstChild);
+  }
+}
 
+let components;
 function createViews() {
-  components.push(new CallTreeView());
-  components.push(new TimelineView());
-  components.push(new HelpView());
-  components.push(new SummaryView());
-  components.push(new ModeBarView());
-
-  main.setMode("summary");
+  components = [
+    new CallTreeView(),
+    new TimelineView(),
+    new HelpView(),
+    new SummaryView(),
+    new ModeBarView(),
+    new ScriptSourceView(),
+  ];
 }
 
 function emptyState() {
   return {
     file : null,
-    mode : "none",
+    mode : null,
     currentCodeId : null,
+    viewingSource: false,
     start : 0,
     end : Infinity,
-    timeLine : {
-      width : 100,
-      height : 100
+    timelineSize : {
+      width : 0,
+      height : 0
     },
     callTree : {
       attribution : "js-exclude-bc",
       categories : "code-type",
       sort : "time"
-    }
+    },
+    sourceData: null
   };
 }
 
@@ -47,6 +55,7 @@
 
 let main = {
   currentState : emptyState(),
+  renderPending : false,
 
   setMode(mode) {
     if (mode !== main.currentState.mode) {
@@ -120,22 +129,28 @@
     }
   },
 
-  setTimeLineDimensions(width, height) {
-    if (width !== main.currentState.timeLine.width ||
-        height !== main.currentState.timeLine.height) {
-      let timeLine = Object.assign({}, main.currentState.timeLine);
-      timeLine.width = width;
-      timeLine.height = height;
-      main.currentState = Object.assign({}, main.currentState);
-      main.currentState.timeLine = timeLine;
-      main.delayRender();
+  updateSources(file) {
+    let statusDiv = $("source-status");
+    if (!file) {
+      statusDiv.textContent = "";
+      return;
     }
+    if (!file.scripts || file.scripts.length === 0) {
+      statusDiv.textContent =
+          "Script source not available. Run profiler with --log-source-code.";
+      return;
+    }
+    statusDiv.textContent = "Script source is available.";
+    main.currentState.sourceData = new SourceData(file);
   },
 
   setFile(file) {
     if (file !== main.currentState.file) {
-      main.currentState = Object.assign({}, main.currentState);
+      let lastMode = main.currentState.mode || "summary";
+      main.currentState = emptyState();
       main.currentState.file = file;
+      main.updateSources(file);
+      main.setMode(lastMode);
       main.delayRender();
     }
   },
@@ -148,10 +163,16 @@
     }
   },
 
+  setViewingSource(value) {
+    if (main.currentState.viewingSource !== value) {
+      main.currentState = Object.assign({}, main.currentState);
+      main.currentState.viewingSource = value;
+      main.delayRender();
+    }
+  },
+
   onResize() {
-    main.setTimeLineDimensions(
-      Math.round(window.innerWidth - 20),
-      Math.round(window.innerHeight / 5));
+    main.delayRender();
   },
 
   onLoad() {
@@ -160,9 +181,7 @@
       if (f) {
         let reader = new FileReader();
         reader.onload = function(event) {
-          let profData = JSON.parse(event.target.result);
-          main.setViewInterval(0, Infinity);
-          main.setFile(profData);
+          main.setFile(JSON.parse(event.target.result));
         };
         reader.onerror = function(event) {
           console.error(
@@ -176,11 +195,14 @@
     $("fileinput").addEventListener(
         "change", loadHandler, false);
     createViews();
-    main.onResize();
   },
 
   delayRender()  {
-    Promise.resolve().then(() => {
+    if (main.renderPending) return;
+    main.renderPending = true;
+
+    window.requestAnimationFrame(() => {
+      main.renderPending = false;
       for (let c of components) {
         c.render(main.currentState);
       }
@@ -188,50 +210,51 @@
   }
 };
 
-let bucketDescriptors =
+const CATEGORY_COLOR = "#f5f5f5";
+const bucketDescriptors =
     [ { kinds : [ "JSOPT" ],
-        color : "#00ff00",
-        backgroundColor : "#c0ffc0",
+        color : "#64dd17",
+        backgroundColor : "#80e27e",
         text : "JS Optimized" },
       { kinds : [ "JSUNOPT", "BC" ],
-        color : "#ffb000",
-        backgroundColor : "#ffe0c0",
+        color : "#dd2c00",
+        backgroundColor : "#ff9e80",
         text : "JS Unoptimized" },
       { kinds : [ "IC" ],
-        color : "#ffff00",
-        backgroundColor : "#ffffc0",
+        color : "#ff6d00",
+        backgroundColor : "#ffab40",
         text : "IC" },
       { kinds : [ "STUB", "BUILTIN", "REGEXP" ],
-        color : "#ffb0b0",
-        backgroundColor : "#fff0f0",
+        color : "#ffd600",
+        backgroundColor : "#ffea00",
         text : "Other generated" },
       { kinds : [ "CPP", "LIB" ],
-        color : "#0000ff",
-        backgroundColor : "#c0c0ff",
+        color : "#304ffe",
+        backgroundColor : "#6ab7ff",
         text : "C++" },
       { kinds : [ "CPPEXT" ],
-        color : "#8080ff",
-        backgroundColor : "#e0e0ff",
+        color : "#003c8f",
+        backgroundColor : "#c0cfff",
         text : "C++/external" },
       { kinds : [ "CPPPARSE" ],
-        color : "#b890f7",
-        backgroundColor : "#ebdeff",
+        color : "#aa00ff",
+        backgroundColor : "#ffb2ff",
         text : "C++/Parser" },
       { kinds : [ "CPPCOMPBC" ],
-        color : "#52b0ce",
-        backgroundColor : "#a5c8d4",
+        color : "#43a047",
+        backgroundColor : "#88c399",
         text : "C++/Bytecode compiler" },
       { kinds : [ "CPPCOMP" ],
-        color : "#00ffff",
-        backgroundColor : "#c0ffff",
+        color : "#00e5ff",
+        backgroundColor : "#6effff",
         text : "C++/Compiler" },
       { kinds : [ "CPPGC" ],
-        color : "#ff00ff",
-        backgroundColor : "#ffc0ff",
+        color : "#6200ea",
+        backgroundColor : "#e1bee7",
         text : "C++/GC" },
       { kinds : [ "UNKNOWN" ],
-        color : "#f0f0f0",
-        backgroundColor : "#e0e0e0",
+        color : "#bdbdbd",
+        backgroundColor : "#efefef",
         text : "Unknown" }
     ];
 
@@ -260,13 +283,13 @@
     case "UNKNOWN":
       return "Unknown";
     case "CPPPARSE":
-      return "C++ (parser)";
+      return "C++ Parser";
     case "CPPCOMPBC":
-      return "C++ (bytecode compiler)";
+      return "C++ Bytecode Compiler)";
     case "CPPCOMP":
-      return "C++ (compiler)";
+      return "C++ Compiler";
     case "CPPGC":
-      return "C++";
+      return "C++ GC";
     case "CPPEXT":
       return "C++ External";
     case "CPP":
@@ -291,27 +314,15 @@
   console.error("Unknown type: " + type);
 }
 
-function createTypeDiv(type) {
+function createTypeNode(type) {
   if (type === "CAT") {
     return document.createTextNode("");
   }
-  let div = document.createElement("div");
-  div.classList.add("code-type-chip");
-
   let span = document.createElement("span");
   span.classList.add("code-type-chip");
   span.textContent = codeTypeToText(type);
-  div.appendChild(span);
 
-  span = document.createElement("span");
-  span.classList.add("code-type-chip-space");
-  div.appendChild(span);
-
-  return div;
-}
-
-function isBytecodeHandler(kind) {
-  return kind === "BytecodeHandler";
+  return span;
 }
 
 function filterFromFilterId(id) {
@@ -322,31 +333,56 @@
       return (type, kind) => type !== 'CODE';
     case "js-exclude-bc":
       return (type, kind) =>
-          type !== 'CODE' || !isBytecodeHandler(kind);
+          type !== 'CODE' || kind !== "BytecodeHandler";
   }
 }
 
-function createTableExpander(indent) {
+function createIndentNode(indent) {
   let div = document.createElement("div");
-  div.style.width = (indent + 0.5) + "em";
   div.style.display = "inline-block";
-  div.style.textAlign = "right";
+  div.style.width = (indent + 0.5) + "em";
   return div;
 }
 
+function createArrowNode() {
+  let span = document.createElement("span");
+  span.classList.add("tree-row-arrow");
+  return span;
+}
+
 function createFunctionNode(name, codeId) {
-  if (codeId === -1) {
-    return document.createTextNode(name);
-  }
   let nameElement = document.createElement("span");
-  nameElement.classList.add("codeid-link");
-  nameElement.onclick = function() {
-    main.setCurrentCode(codeId);
-  };
   nameElement.appendChild(document.createTextNode(name));
+  nameElement.classList.add("tree-row-name");
+  if (codeId !== -1) {
+    nameElement.classList.add("codeid-link");
+    nameElement.onclick = (event) => {
+      main.setCurrentCode(codeId);
+      // Prevent the click from bubbling to the row and causing it to
+      // collapse/expand.
+      event.stopPropagation();
+    };
+  }
   return nameElement;
 }
 
+function createViewSourceNode(codeId) {
+  let linkElement = document.createElement("span");
+  linkElement.appendChild(document.createTextNode("View source"));
+  linkElement.classList.add("view-source-link");
+  linkElement.onclick = (event) => {
+    main.setCurrentCode(codeId);
+    main.setViewingSource(true);
+    // Prevent the click from bubbling to the row and causing it to
+    // collapse/expand.
+    event.stopPropagation();
+  };
+  return linkElement;
+}
+
+const COLLAPSED_ARROW = "\u25B6";
+const EXPANDED_ARROW = "\u25BC";
+
 class CallTreeView {
   constructor() {
     this.element = $("calltree");
@@ -400,22 +436,19 @@
   }
 
   expandTree(tree, indent) {
-    let that = this;
     let index = 0;
     let id = "R/";
     let row = tree.row;
-    let expander = tree.expander;
 
     if (row) {
       index = row.rowIndex;
       id = row.id;
 
-      // Make sure we collapse the children when the row is clicked
-      // again.
-      expander.textContent = "\u25BE";
-      let expandHandler = expander.onclick;
-      expander.onclick = () => {
-        that.collapseRow(tree, expander, expandHandler);
+      tree.arrow.textContent = EXPANDED_ARROW;
+      // Collapse the children when the row is clicked again.
+      let expandHandler = row.onclick;
+      row.onclick = () => {
+        this.collapseRow(tree, expandHandler);
       }
     }
 
@@ -439,7 +472,9 @@
       let row = this.rows.insertRow(index);
       row.id = id + i + "/";
 
-      if (node.type !== "CAT") {
+      if (node.type === "CAT") {
+        row.style.backgroundColor = CATEGORY_COLOR;
+      } else {
         row.style.backgroundColor = bucketFromKind(node.type).backgroundColor;
       }
 
@@ -460,10 +495,17 @@
 
       // Create the name cell.
       let nameCell = row.insertCell();
-      let expander = createTableExpander(indent + 1);
-      nameCell.appendChild(expander);
-      nameCell.appendChild(createTypeDiv(node.type));
+      nameCell.appendChild(createIndentNode(indent + 1));
+      let arrow = createArrowNode();
+      nameCell.appendChild(arrow);
+      nameCell.appendChild(createTypeNode(node.type));
       nameCell.appendChild(createFunctionNode(node.name, node.codeId));
+      if (main.currentState.sourceData &&
+          node.codeId >= 0 &&
+          main.currentState.sourceData.hasSource(
+              this.currentState.file.code[node.codeId].func)) {
+        nameCell.appendChild(createViewSourceNode(node.codeId));
+      }
 
       // Inclusive ticks cell.
       c = row.insertCell();
@@ -476,18 +518,18 @@
         c.style.textAlign = "right";
       }
       if (node.children.length > 0) {
-        expander.textContent = "\u25B8";
-        expander.onclick = () => { that.expandTree(node, indent + 1); };
+        arrow.textContent = COLLAPSED_ARROW;
+        row.onclick = () => { this.expandTree(node, indent + 1); };
       }
 
       node.row = row;
-      node.expander = expander;
+      node.arrow = arrow;
 
       index++;
     }
   }
 
-  collapseRow(tree, expander, expandHandler) {
+  collapseRow(tree, expandHandler) {
     let row = tree.row;
     let id = row.id;
     let index = row.rowIndex;
@@ -496,8 +538,8 @@
       this.rows.deleteRow(index);
     }
 
-    expander.textContent = "\u25B8";
-    expander.onclick = expandHandler;
+    tree.arrow.textContent = COLLAPSED_ARROW;
+    row.onclick = expandHandler;
   }
 
   fillSelects(mode, calltree) {
@@ -809,10 +851,12 @@
       return;
     }
 
-    this.currentState = newState;
+    let width = Math.round(document.documentElement.clientWidth - 20);
+    let height = Math.round(document.documentElement.clientHeight / 5);
+
     if (oldState) {
-      if (newState.timeLine.width === oldState.timeLine.width &&
-          newState.timeLine.height === oldState.timeLine.height &&
+      if (width === oldState.timelineSize.width &&
+          height === oldState.timelineSize.height &&
           newState.file === oldState.file &&
           newState.currentCodeId === oldState.currentCodeId &&
           newState.start === oldState.start &&
@@ -821,21 +865,27 @@
         return;
       }
     }
+    this.currentState = newState;
+    this.currentState.timelineSize.width = width;
+    this.currentState.timelineSize.height = height;
 
     this.element.style.display = "inherit";
 
+    let file = this.currentState.file;
+
+    const minPixelsPerBucket = 10;
+    const minTicksPerBucket = 8;
+    let maxBuckets = Math.round(file.ticks.length / minTicksPerBucket);
+    let bucketCount = Math.min(
+        Math.round(width / minPixelsPerBucket), maxBuckets);
+
     // Make sure the canvas has the right dimensions.
-    let width = this.currentState.timeLine.width;
-    let height = this.currentState.timeLine.height;
     this.canvas.width = width;
     this.canvas.height  = height;
 
     // Make space for the selection text.
     height -= this.imageOffset;
 
-    let file = this.currentState.file;
-    if (!file) return;
-
     let currentCodeId = this.currentState.currentCodeId;
 
     let firstTime = file.ticks[0].tm;
@@ -846,13 +896,6 @@
     this.selectionStart = (start - firstTime) / (lastTime - firstTime) * width;
     this.selectionEnd = (end - firstTime) / (lastTime - firstTime) * width;
 
-    let tickCount = file.ticks.length;
-
-    let minBucketPixels = 10;
-    let minBucketSamples = 30;
-    let bucketCount = Math.min(width / minBucketPixels,
-                               tickCount / minBucketSamples);
-
     let stackProcessor = new CategorySampler(file, bucketCount);
     generateTree(file, 0, Infinity, stackProcessor);
     let codeIdProcessor = new FunctionTimelineProcessor(
@@ -873,28 +916,36 @@
       let sum = 0;
       let bucketData = [];
       let total = buckets[i].total;
-      for (let j = 0; j < bucketDescriptors.length; j++) {
-        let desc = bucketDescriptors[j];
-        for (let k = 0; k < desc.kinds.length; k++) {
-          sum += buckets[i][desc.kinds[k]];
+      if (total > 0) {
+        for (let j = 0; j < bucketDescriptors.length; j++) {
+          let desc = bucketDescriptors[j];
+          for (let k = 0; k < desc.kinds.length; k++) {
+            sum += buckets[i][desc.kinds[k]];
+          }
+          bucketData.push(Math.round(graphHeight * sum / total));
         }
-        bucketData.push(Math.round(graphHeight * sum / total));
+      } else {
+        // No ticks fell into this bucket. Fill with "Unknown."
+        for (let j = 0; j < bucketDescriptors.length; j++) {
+          let desc = bucketDescriptors[j];
+          bucketData.push(desc.text === "Unknown" ? graphHeight : 0);
+        }
       }
       bucketsGraph.push(bucketData);
     }
 
     // Draw the category graph into the buffer.
-    let bucketWidth = width / bucketsGraph.length;
+    let bucketWidth = width / (bucketsGraph.length - 1);
     let ctx = buffer.getContext('2d');
     for (let i = 0; i < bucketsGraph.length - 1; i++) {
       let bucketData = bucketsGraph[i];
       let nextBucketData = bucketsGraph[i + 1];
+      let x1 = Math.round(i * bucketWidth);
+      let x2 = Math.round((i + 1) * bucketWidth);
       for (let j = 0; j < bucketData.length; j++) {
-        let x1 = Math.round(i * bucketWidth);
-        let x2 = Math.round((i + 1) * bucketWidth);
         ctx.beginPath();
-        ctx.moveTo(x1, j && bucketData[j - 1]);
-        ctx.lineTo(x2, j && nextBucketData[j - 1]);
+        ctx.moveTo(x1, j > 0 ? bucketData[j - 1] : 0);
+        ctx.lineTo(x2, j > 0 ? nextBucketData[j - 1] : 0);
         ctx.lineTo(x2, nextBucketData[j]);
         ctx.lineTo(x1, bucketData[j]);
         ctx.closePath();
@@ -1017,9 +1068,7 @@
       cell.appendChild(document.createTextNode(" " + desc.text));
     }
 
-    while (this.currentCode.firstChild) {
-      this.currentCode.removeChild(this.currentCode.firstChild);
-    }
+    removeAllChildren(this.currentCode);
     if (currentCodeId) {
       let currentCode = file.code[currentCodeId];
       this.currentCode.appendChild(document.createTextNode(currentCode.name));
@@ -1090,10 +1139,7 @@
     }
 
     this.element.style.display = "inherit";
-
-    while (this.element.firstChild) {
-      this.element.removeChild(this.element.firstChild);
-    }
+    removeAllChildren(this.element);
 
     let stats = computeOptimizationStats(
         this.currentState.file, newState.start, newState.end);
@@ -1114,22 +1160,22 @@
       return row;
     }
 
-    function makeCollapsible(row, expander) {
-      expander.textContent = "\u25BE";
-      let expandHandler = expander.onclick;
-      expander.onclick = () => {
+    function makeCollapsible(row, arrow) {
+      arrow.textContent = EXPANDED_ARROW;
+      let expandHandler = row.onclick;
+      row.onclick = () => {
         let id = row.id;
         let index = row.rowIndex + 1;
         while (index < rows.rows.length &&
           rows.rows[index].id.startsWith(id)) {
           rows.deleteRow(index);
         }
-        expander.textContent = "\u25B8";
-        expander.onclick = expandHandler;
+        arrow.textContent = COLLAPSED_ARROW;
+        row.onclick = expandHandler;
       }
     }
 
-    function expandDeoptInstances(row, expander, instances, indent, kind) {
+    function expandDeoptInstances(row, arrow, instances, indent, kind) {
       let index = row.rowIndex;
       for (let i = 0; i < instances.length; i++) {
         let childRow = rows.insertRow(index + 1);
@@ -1145,18 +1191,19 @@
             document.createTextNode("Reason: " + deopt.reason));
         reasonCell.style.textIndent = indent + "em";
       }
-      makeCollapsible(row, expander);
+      makeCollapsible(row, arrow);
     }
 
-    function expandDeoptFunctionList(row, expander, list, indent, kind) {
+    function expandDeoptFunctionList(row, arrow, list, indent, kind) {
       let index = row.rowIndex;
       for (let i = 0; i < list.length; i++) {
         let childRow = rows.insertRow(index + 1);
         childRow.id = row.id + i + "/";
 
         let textCell = childRow.insertCell(-1);
-        let expander = createTableExpander(indent);
-        textCell.appendChild(expander);
+        textCell.appendChild(createIndentNode(indent));
+        let childArrow = createArrowNode();
+        textCell.appendChild(childArrow);
         textCell.appendChild(
             createFunctionNode(list[i].f.name, list[i].f.codes[0]));
 
@@ -1164,16 +1211,16 @@
         numberCell.textContent = list[i].instances.length;
         numberCell.style.textIndent = indent + "em";
 
-        expander.textContent = "\u25B8";
-        expander.onclick = () => {
+        childArrow.textContent = COLLAPSED_ARROW;
+        childRow.onclick = () => {
           expandDeoptInstances(
-              childRow, expander, list[i].instances, indent + 1);
+              childRow, childArrow, list[i].instances, indent + 1);
         };
       }
-      makeCollapsible(row, expander);
+      makeCollapsible(row, arrow);
     }
 
-    function expandOptimizedFunctionList(row, expander, list, indent, kind) {
+    function expandOptimizedFunctionList(row, arrow, list, indent, kind) {
       let index = row.rowIndex;
       for (let i = 0; i < list.length; i++) {
         let childRow = rows.insertRow(index + 1);
@@ -1188,17 +1235,19 @@
         numberCell.textContent = list[i].instances.length;
         numberCell.style.textIndent = indent + "em";
       }
-      makeCollapsible(row, expander);
+      makeCollapsible(row, arrow);
     }
 
     function addExpandableRow(text, list, indent, kind) {
       let row = rows.insertRow(-1);
 
       row.id = "opt-table/" + kind + "/";
+      row.style.backgroundColor = CATEGORY_COLOR;
 
       let textCell = row.insertCell(-1);
-      let expander = createTableExpander(indent);
-      textCell.appendChild(expander);
+      textCell.appendChild(createIndentNode(indent));
+      let arrow = createArrowNode();
+      textCell.appendChild(arrow);
       textCell.appendChild(document.createTextNode(text));
 
       let numberCell = row.insertCell(-1);
@@ -1208,16 +1257,16 @@
       }
 
       if (list.count > 0) {
-        expander.textContent = "\u25B8";
+        arrow.textContent = COLLAPSED_ARROW;
         if (kind === "opt") {
-          expander.onclick = () => {
+          row.onclick = () => {
             expandOptimizedFunctionList(
-                row, expander, list.functions, indent + 1, kind);
+                row, arrow, list.functions, indent + 1, kind);
           };
         } else {
-          expander.onclick = () => {
+          row.onclick = () => {
             expandDeoptFunctionList(
-                row, expander, list.functions, indent + 1, kind);
+                row, arrow, list.functions, indent + 1, kind);
           };
         }
       }
@@ -1241,6 +1290,217 @@
   }
 }
 
+class ScriptSourceView {
+  constructor() {
+    this.table = $("source-viewer");
+    this.hideButton = $("source-viewer-hide-button");
+    this.hideButton.onclick = () => {
+      main.setViewingSource(false);
+    };
+  }
+
+  render(newState) {
+    let oldState = this.currentState;
+    if (!newState.file || !newState.viewingSource) {
+      this.table.style.display = "none";
+      this.hideButton.style.display = "none";
+      this.currentState = null;
+      return;
+    }
+    if (oldState) {
+      if (newState.file === oldState.file &&
+          newState.currentCodeId === oldState.currentCodeId &&
+          newState.viewingSource === oldState.viewingSource) {
+        // No change, nothing to do.
+        return;
+      }
+    }
+    this.currentState = newState;
+
+    this.table.style.display = "inline-block";
+    this.hideButton.style.display = "inline";
+    removeAllChildren(this.table);
+
+    let functionId =
+        this.currentState.file.code[this.currentState.currentCodeId].func;
+    let sourceView =
+        this.currentState.sourceData.generateSourceView(functionId);
+    for (let i = 0; i < sourceView.source.length; i++) {
+      let sampleCount = sourceView.lineSampleCounts[i] || 0;
+      let sampleProportion = sourceView.samplesTotal > 0 ?
+                             sampleCount / sourceView.samplesTotal : 0;
+      let heatBucket;
+      if (sampleProportion === 0) {
+        heatBucket = "line-none";
+      } else if (sampleProportion < 0.2) {
+        heatBucket = "line-cold";
+      } else if (sampleProportion < 0.4) {
+        heatBucket = "line-mediumcold";
+      } else if (sampleProportion < 0.6) {
+        heatBucket = "line-mediumhot";
+      } else if (sampleProportion < 0.8) {
+        heatBucket = "line-hot";
+      } else {
+        heatBucket = "line-superhot";
+      }
+
+      let row = this.table.insertRow(-1);
+
+      let lineNumberCell = row.insertCell(-1);
+      lineNumberCell.classList.add("source-line-number");
+      lineNumberCell.textContent = i + sourceView.firstLineNumber;
+
+      let sampleCountCell = row.insertCell(-1);
+      sampleCountCell.classList.add(heatBucket);
+      sampleCountCell.textContent = sampleCount;
+
+      let sourceLineCell = row.insertCell(-1);
+      sourceLineCell.classList.add(heatBucket);
+      sourceLineCell.textContent = sourceView.source[i];
+    }
+
+    $("timeline-currentCode").scrollIntoView();
+  }
+}
+
+class SourceData {
+  constructor(file) {
+    this.scripts = new Map();
+    for (let i = 0; i < file.scripts.length; i++) {
+      const scriptBlock = file.scripts[i];
+      if (scriptBlock === null) continue; // Array may be sparse.
+      let source = scriptBlock.source.split("\n");
+      this.scripts.set(i, source);
+    }
+
+    this.functions = new Map();
+    for (let codeId = 0; codeId < file.code.length; ++codeId) {
+      let codeBlock = file.code[codeId];
+      if (codeBlock.source && codeBlock.func !== undefined) {
+        let data = this.functions.get(codeBlock.func);
+        if (!data) {
+          data = new FunctionSourceData(codeBlock.source.script,
+                                        codeBlock.source.start,
+                                        codeBlock.source.end);
+          this.functions.set(codeBlock.func, data);
+        }
+        data.addSourceBlock(codeId, codeBlock.source);
+      }
+    }
+
+    for (let tick of file.ticks) {
+      let stack = tick.s;
+      for (let i = 0; i < stack.length; i += 2) {
+        let codeId = stack[i];
+        if (codeId < 0) continue;
+        let functionId = file.code[codeId].func;
+        if (this.functions.has(functionId)) {
+          let codeOffset = stack[i + 1];
+          this.functions.get(functionId).addOffsetSample(codeId, codeOffset);
+        }
+      }
+    }
+  }
+
+  getScript(scriptId) {
+    return this.scripts.get(scriptId);
+  }
+
+  getLineForScriptOffset(script, scriptOffset) {
+    let line = 0;
+    let charsConsumed = 0;
+    for (; line < script.length; ++line) {
+      charsConsumed += script[line].length + 1; // Add 1 for newline.
+      if (charsConsumed > scriptOffset) break;
+    }
+    return line;
+  }
+
+  hasSource(functionId) {
+    return this.functions.has(functionId);
+  }
+
+  generateSourceView(functionId) {
+    console.assert(this.hasSource(functionId));
+    let data = this.functions.get(functionId);
+    let scriptId = data.scriptId;
+    let script = this.getScript(scriptId);
+    let firstLineNumber =
+        this.getLineForScriptOffset(script, data.startScriptOffset);
+    let lastLineNumber =
+        this.getLineForScriptOffset(script, data.endScriptOffset);
+    let lines = script.slice(firstLineNumber, lastLineNumber + 1);
+    normalizeLeadingWhitespace(lines);
+
+    let samplesTotal = 0;
+    let lineSampleCounts = [];
+    for (let [codeId, block] of data.codes) {
+      block.offsets.forEach((sampleCount, codeOffset) => {
+        let sourceOffset = block.positionTable.getScriptOffset(codeOffset);
+        let lineNumber =
+            this.getLineForScriptOffset(script, sourceOffset) - firstLineNumber;
+        samplesTotal += sampleCount;
+        lineSampleCounts[lineNumber] =
+            (lineSampleCounts[lineNumber] || 0) + sampleCount;
+      });
+    }
+
+    return {
+      source: lines,
+      lineSampleCounts: lineSampleCounts,
+      samplesTotal: samplesTotal,
+      firstLineNumber: firstLineNumber + 1  // Source code is 1-indexed.
+    };
+  }
+}
+
+class FunctionSourceData {
+  constructor(scriptId, startScriptOffset, endScriptOffset) {
+    this.scriptId = scriptId;
+    this.startScriptOffset = startScriptOffset;
+    this.endScriptOffset = endScriptOffset;
+
+    this.codes = new Map();
+  }
+
+  addSourceBlock(codeId, source) {
+    this.codes.set(codeId, {
+      positionTable: new SourcePositionTable(source.positions),
+      offsets: []
+    });
+  }
+
+  addOffsetSample(codeId, codeOffset) {
+    let codeIdOffsets = this.codes.get(codeId).offsets;
+    codeIdOffsets[codeOffset] = (codeIdOffsets[codeOffset] || 0) + 1;
+  }
+}
+
+class SourcePositionTable {
+  constructor(encodedTable) {
+    this.offsetTable = [];
+    let offsetPairRegex = /C([0-9]+)O([0-9]+)/g;
+    while (true) {
+      let regexResult = offsetPairRegex.exec(encodedTable);
+      if (!regexResult) break;
+      let codeOffset = parseInt(regexResult[1]);
+      let scriptOffset = parseInt(regexResult[2]);
+      if (isNaN(codeOffset) || isNaN(scriptOffset)) continue;
+      this.offsetTable.push(codeOffset, scriptOffset);
+    }
+  }
+
+  getScriptOffset(codeOffset) {
+    console.assert(codeOffset >= 0);
+    for (let i = this.offsetTable.length - 2; i >= 0; i -= 2) {
+      if (this.offsetTable[i] <= codeOffset) {
+        return this.offsetTable[i + 1];
+      }
+    }
+    return this.offsetTable[1];
+  }
+}
+
 class HelpView {
   constructor() {
     this.element = $("help");
diff --git a/src/v8/tools/profviz/composer.js b/src/v8/tools/profviz/composer.js
index ce625ad..411a724 100644
--- a/src/v8/tools/profviz/composer.js
+++ b/src/v8/tools/profviz/composer.js
@@ -176,7 +176,9 @@
   }
 
   function MergeRanges(ranges) {
-    ranges.sort(function(a, b) { return a.start - b.start; });
+    ranges.sort(function(a, b) {
+      return (a.start == b.start) ? a.end - b.end : a.start - b.start;
+    });
     var result = [];
     var j = 0;
     for (var i = 0; i < ranges.length; i = j) {
@@ -306,13 +308,14 @@
     };
     // Collect data from log.
     var logreader = new LogReader(
-      { 'timer-event-start': { parsers: [null, parseTimeStamp],
+      { 'timer-event-start': { parsers: [parseString, parseTimeStamp],
                                processor: processTimerEventStart },
-        'timer-event-end':   { parsers: [null, parseTimeStamp],
+        'timer-event-end':   { parsers: [parseString, parseTimeStamp],
                                processor: processTimerEventEnd },
-        'shared-library': { parsers: [null, parseInt, parseInt],
+        'shared-library': { parsers: [parseString, parseInt, parseInt],
                             processor: processSharedLibrary },
-        'code-creation':  { parsers: [null, parseInt, parseInt, parseInt, null],
+        'code-creation':  { parsers: [parseString, parseInt, parseInt,
+                                parseInt, parseString],
                             processor: processCodeCreateEvent },
         'code-move':      { parsers: [parseInt, parseInt],
                             processor: processCodeMoveEvent },
@@ -322,8 +325,8 @@
                             processor: processCodeDeoptEvent },
         'current-time':   { parsers: [parseTimeStamp],
                             processor: processCurrentTimeEvent },
-        'tick':           { parsers: [parseInt, parseTimeStamp,
-                                      null, null, parseInt, 'var-args'],
+        'tick':           { parsers: [parseInt, parseTimeStamp, parseString,
+                                parseString, parseInt, parseVarArgs],
                             processor: processTickEvent }
       });
 
@@ -516,8 +519,13 @@
     // Label the longest pauses.
     execution_pauses =
         RestrictRangesTo(execution_pauses, range_start, range_end);
-    execution_pauses.sort(
-        function(a, b) { return b.duration() - a.duration(); });
+    execution_pauses.sort(function(a, b) {
+      if (a.duration() == b.duration() && b.end == a.end)
+        return b.start - a.start;
+
+      return (a.duration() == b.duration())
+          ? b.end - a.end : b.duration() - a.duration();
+    });
 
     var max_pause_time = execution_pauses.length > 0
         ? execution_pauses[0].duration() : 0;
diff --git a/src/v8/tools/profviz/worker.js b/src/v8/tools/profviz/worker.js
index 7f16308..95ed40b 100644
--- a/src/v8/tools/profviz/worker.js
+++ b/src/v8/tools/profviz/worker.js
@@ -100,7 +100,7 @@
          var profile = "";
          print = function(text) { profile += text + "\n"; };
          // Dummy entries provider, as we cannot call nm.
-         var entriesProvider = new UnixCppEntriesProvider("", "");
+         var entriesProvider = new UnixCppEntriesProvider("", "", "");
          var targetRootFS = "";
          var separateIc = false;
          var callGraphSize = 5;
diff --git a/src/v8/tools/release/auto_push.py b/src/v8/tools/release/auto_push.py
index ca9e5e8..4cb9687 100755
--- a/src/v8/tools/release/auto_push.py
+++ b/src/v8/tools/release/auto_push.py
@@ -26,6 +26,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import json
 import os
@@ -63,7 +66,7 @@
         format="%H", git_hash="%s..%s" % (last_release, self["candidate"]))
 
     if not commits:
-      print "Already pushed current candidate %s" % self["candidate"]
+      print("Already pushed current candidate %s" % self["candidate"])
       return True
 
 
@@ -71,7 +74,7 @@
   MESSAGE = "Creating release if specified."
 
   def RunStep(self):
-    print "Creating release for %s." % self["candidate"]
+    print("Creating release for %s." % self["candidate"])
 
     args = [
       "--author", self._options.author,
@@ -96,7 +99,7 @@
 
   def _ProcessOptions(self, options):
     if not options.author or not options.reviewer:  # pragma: no cover
-      print "You need to specify author and reviewer."
+      print("You need to specify author and reviewer.")
       return False
     options.requires_editor = False
     return True
diff --git a/src/v8/tools/release/auto_roll.py b/src/v8/tools/release/auto_roll.py
index b27675e..829aefb 100755
--- a/src/v8/tools/release/auto_roll.py
+++ b/src/v8/tools/release/auto_roll.py
@@ -3,6 +3,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import os
 import sys
@@ -14,13 +17,17 @@
 
 ISSUE_MSG = (
 """Please follow these instructions for assigning/CC'ing issues:
-https://github.com/v8/v8/wiki/Triaging%20issues
+https://v8.dev/docs/triage-issues
 
 Please close rolling in case of a roll revert:
 https://v8-roll.appspot.com/
 This only works with a Google account.
 
-CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel;master.tryserver.chromium.android:android_optional_gpu_tests_rel""")
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux-blink-rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:mac_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:win_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:android_optional_gpu_tests_rel""")
 
 class Preparation(Step):
   MESSAGE = "Preparation."
@@ -40,7 +47,7 @@
     self["last_roll"] = self._options.last_roll
     if not self["last_roll"]:
       # Interpret the DEPS file to retrieve the v8 revision.
-      # TODO(machenbach): This should be part or the roll-deps api of
+      # TODO(machenbach): This should be part or the setdep api of
       # depot_tools.
       Var = lambda var: '%s'
       exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
@@ -140,7 +147,7 @@
     self['json_output']['monitoring_state'] = 'upload'
     cwd = self._options.chromium
     # Patch DEPS file.
-    if self.Command("roll-dep-svn", "v8 %s" %
+    if self.Command("gclient", "setdep -r src/v8@%s" %
                     self["roll"], cwd=cwd) is None:
       self.Die("Failed to create deps for %s" % self["roll"])
 
@@ -155,15 +162,14 @@
     message.append("TBR=%s" % self._options.reviewer)
     self.GitCommit("\n\n".join(message),  author=self._options.author, cwd=cwd)
     if not self._options.dry_run:
-      self.GitUpload(author=self._options.author,
-                     force=True,
+      self.GitUpload(force=True,
                      bypass_hooks=True,
                      cq=self._options.use_commit_queue,
                      cq_dry_run=self._options.use_dry_run,
                      cwd=cwd)
-      print "CL uploaded."
+      print("CL uploaded.")
     else:
-      print "Dry run - don't upload."
+      print("Dry run - don't upload.")
 
     self.GitCheckout("master", cwd=cwd)
     self.GitDeleteBranch("work-branch", cwd=cwd)
@@ -206,7 +212,7 @@
 
   def _ProcessOptions(self, options):  # pragma: no cover
     if not options.author or not options.reviewer:
-      print "A reviewer (-r) and an author (-a) are required."
+      print("A reviewer (-r) and an author (-a) are required.")
       return False
 
     options.requires_editor = False
diff --git a/src/v8/tools/release/auto_tag.py b/src/v8/tools/release/auto_tag.py
index a52a028..fddefed 100755
--- a/src/v8/tools/release/auto_tag.py
+++ b/src/v8/tools/release/auto_tag.py
@@ -3,6 +3,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import sys
 
@@ -15,7 +18,7 @@
   def RunStep(self):
     # TODO(machenbach): Remove after the git switch.
     if self.Config("PERSISTFILE_BASENAME") == "/tmp/v8-auto-tag-tempfile":
-      print "This script is disabled until after the v8 git migration."
+      print("This script is disabled until after the v8 git migration.")
       return True
 
     self.CommonPrepare()
@@ -80,7 +83,7 @@
         self["candidate_version"] = version
 
     if not self["candidate"] or not self["candidate_version"]:
-      print "Nothing found to tag."
+      print("Nothing found to tag.")
       self.CommonCleanup()
       return True
 
@@ -120,18 +123,18 @@
       # Don't include the version change commit itself if there is no upper
       # limit yet.
       candidate_svn =  str(int(candidate_svn) + 1)
-      next_svn = sys.maxint
+      next_svn = sys.maxsize
     lkgr_svn = self.LastLKGR(candidate_svn, next_svn)
 
     if not lkgr_svn:
-      print "There is no lkgr since the candidate version yet."
+      print("There is no lkgr since the candidate version yet.")
       self.CommonCleanup()
       return True
 
     # Let's check if the lkgr is at least three hours old.
     self["lkgr"] = self.vc.SvnGit(lkgr_svn)
     if not self["lkgr"]:
-      print "Couldn't find git hash for lkgr %s" % lkgr_svn
+      print("Couldn't find git hash for lkgr %s" % lkgr_svn)
       self.CommonCleanup()
       return True
 
@@ -139,11 +142,11 @@
     current_utc_time = self._side_effect_handler.GetUTCStamp()
 
     if current_utc_time < lkgr_utc_time + 10800:
-      print "Candidate lkgr %s is too recent for tagging." % lkgr_svn
+      print("Candidate lkgr %s is too recent for tagging." % lkgr_svn)
       self.CommonCleanup()
       return True
 
-    print "Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"])
+    print("Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"]))
 
 
 class MakeTag(Step):
@@ -172,7 +175,7 @@
 
   def _ProcessOptions(self, options):  # pragma: no cover
     if not options.dry_run and not options.author:
-      print "Specify your chromium.org email with -a"
+      print("Specify your chromium.org email with -a")
       return False
     options.wait_for_lgtm = False
     options.force_readline_defaults = True
diff --git a/src/v8/tools/release/check_clusterfuzz.py b/src/v8/tools/release/check_clusterfuzz.py
index 8af8351..021cd55 100755
--- a/src/v8/tools/release/check_clusterfuzz.py
+++ b/src/v8/tools/release/check_clusterfuzz.py
@@ -13,6 +13,8 @@
 suppress stdout and stderr and only process contents of the results_file.
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
 
 import argparse
 import httplib
@@ -222,7 +224,7 @@
     with open(options.results_file, "w") as f:
       f.write(json.dumps(results))
   else:
-    print results
+    print(results)
 
 
 if __name__ == "__main__":
diff --git a/src/v8/tools/release/common_includes.py b/src/v8/tools/release/common_includes.py
index d295e37..af4709b 100644
--- a/src/v8/tools/release/common_includes.py
+++ b/src/v8/tools/release/common_includes.py
@@ -26,6 +26,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import datetime
 import httplib
@@ -51,11 +54,19 @@
 PUSH_MSG_NEW_RE = re.compile(r"^Version \d+\.\d+\.\d+$")
 VERSION_FILE = os.path.join("include", "v8-version.h")
 WATCHLISTS_FILE = "WATCHLISTS"
+RELEASE_WORKDIR = "/tmp/v8-release-scripts-work-dir/"
 
 # V8 base directory.
 V8_BASE = os.path.dirname(
     os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
+# Add our copy of depot_tools to the PATH as many scripts use tools from there,
+# e.g. git-cl, fetch, git-new-branch etc, and we can not depend on depot_tools
+# being in the PATH on the LUCI bots.
+path_to_depot_tools = os.path.join(V8_BASE, 'third_party', 'depot_tools')
+new_path = path_to_depot_tools + os.pathsep + os.environ.get('PATH')
+os.environ['PATH'] = new_path
+
 
 def TextToFile(text, file_name):
   with open(file_name, "w") as f:
@@ -191,8 +202,8 @@
   cwd = cwd or os.getcwd()
   # TODO(machenbach): Use timeout.
   cmd_line = "%s %s %s" % (prefix, cmd, args)
-  print "Command: %s" % cmd_line
-  print "in %s" % cwd
+  print("Command: %s" % cmd_line)
+  print("in %s" % cwd)
   sys.stdout.flush()
   try:
     if pipe:
@@ -264,8 +275,8 @@
     try:
       return json.loads(data)
     except:
-      print data
-      print "ERROR: Could not read response. Is your key valid?"
+      print(data)
+      print("ERROR: Could not read response. Is your key valid?")
       raise
 
   def Sleep(self, seconds):
@@ -440,7 +451,7 @@
     if not self._state and os.path.exists(state_file):
       self._state.update(json.loads(FileToText(state_file)))
 
-    print ">>> Step %d: %s" % (self._number, self._text)
+    print(">>> Step %d: %s" % (self._number, self._text))
     try:
       return self.RunStep()
     finally:
@@ -476,16 +487,16 @@
           raise Exception("Retried too often. Giving up. Reason: %s" %
                           str(got_exception))
         wait_time = wait_plan.pop()
-        print "Waiting for %f seconds." % wait_time
+        print("Waiting for %f seconds." % wait_time)
         self._side_effect_handler.Sleep(wait_time)
-        print "Retrying..."
+        print("Retrying...")
       else:
         return result
 
   def ReadLine(self, default=None):
     # Don't prompt in forced mode.
     if self._options.force_readline_defaults and default is not None:
-      print "%s (forced)" % default
+      print("%s (forced)" % default)
       return default
     else:
       return self._side_effect_handler.ReadLine()
@@ -521,8 +532,8 @@
 
   def Die(self, msg=""):
     if msg != "":
-      print "Error: %s" % msg
-    print "Exiting"
+      print("Error: %s" % msg)
+    print("Exiting")
     raise Exception(msg)
 
   def DieNoManualMode(self, msg=""):
@@ -531,7 +542,7 @@
       self.Die(msg)
 
   def Confirm(self, msg):
-    print "%s [Y/n] " % msg,
+    print("%s [Y/n] " % msg, end=' ')
     answer = self.ReadLine(default="Y")
     return answer == "" or answer == "Y" or answer == "y"
 
@@ -541,7 +552,7 @@
         msg = "Branch %s exists, do you want to delete it?" % name
         if self.Confirm(msg):
           self.GitDeleteBranch(name, cwd=cwd)
-          print "Branch %s deleted." % name
+          print("Branch %s deleted." % name)
         else:
           msg = "Can't continue. Please delete branch %s and try again." % name
           self.Die(msg)
@@ -604,10 +615,10 @@
            "change the headline of the uploaded CL.")
     answer = ""
     while answer != "LGTM":
-      print "> ",
+      print("> ", end=' ')
       answer = self.ReadLine(None if self._options.wait_for_lgtm else "LGTM")
       if answer != "LGTM":
-        print "That was not 'LGTM'."
+        print("That was not 'LGTM'.")
 
   def WaitForResolvingConflicts(self, patch_file):
     print("Applying the patch \"%s\" failed. Either type \"ABORT<Return>\", "
@@ -619,8 +630,8 @@
       if answer == "ABORT":
         self.Die("Applying the patch failed.")
       if answer != "":
-        print "That was not 'RESOLVED' or 'ABORT'."
-      print "> ",
+        print("That was not 'RESOLVED' or 'ABORT'.")
+      print("> ", end=' ')
       answer = self.ReadLine()
 
   # Takes a file containing the patch to apply as first argument.
@@ -759,16 +770,26 @@
   MESSAGE = "Upload for code review."
 
   def RunStep(self):
+    reviewer = None
     if self._options.reviewer:
-      print "Using account %s for review." % self._options.reviewer
+      print("Using account %s for review." % self._options.reviewer)
       reviewer = self._options.reviewer
-    else:
-      print "Please enter the email address of a V8 reviewer for your patch: ",
+
+    tbr_reviewer = None
+    if self._options.tbr_reviewer:
+      print("Using account %s for TBR review." % self._options.tbr_reviewer)
+      tbr_reviewer = self._options.tbr_reviewer
+
+    if not reviewer and not tbr_reviewer:
+      print(
+        "Please enter the email address of a V8 reviewer for your patch: ",
+        end=' ')
       self.DieNoManualMode("A reviewer must be specified in forced mode.")
       reviewer = self.ReadLine()
-    self.GitUpload(reviewer, self._options.author, self._options.force_upload,
+
+    self.GitUpload(reviewer, self._options.force_upload,
                    bypass_hooks=self._options.bypass_upload_hooks,
-                   cc=self._options.cc)
+                   cc=self._options.cc, tbr_reviewer=tbr_reviewer)
 
 
 def MakeStep(step_class=Step, number=0, state=None, config=None,
@@ -821,6 +842,8 @@
                         help="File to write results summary to.")
     parser.add_argument("-r", "--reviewer", default="",
                         help="The account name to be used for reviews.")
+    parser.add_argument("--tbr-reviewer", "--tbr", default="",
+                        help="The account name to be used for TBR reviews.")
     parser.add_argument("-s", "--step",
         help="Specify the step where to start work. Default: 0.",
         default=0, type=int)
@@ -836,7 +859,7 @@
 
     # Process common options.
     if options.step < 0:  # pragma: no cover
-      print "Bad step number %d" % options.step
+      print("Bad step number %d" % options.step)
       parser.print_help()
       return None
 
@@ -865,6 +888,11 @@
     if not options:
       return 1
 
+    # Ensure temp dir exists for state files.
+    state_dir = os.path.dirname(self._config["PERSISTFILE_BASENAME"])
+    if not os.path.exists(state_dir):
+      os.makedirs(state_dir)
+
     state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
     if options.step == 0 and os.path.exists(state_file):
       os.remove(state_file)
diff --git a/src/v8/tools/release/create_release.py b/src/v8/tools/release/create_release.py
index e5c2114..cbf07f4 100755
--- a/src/v8/tools/release/create_release.py
+++ b/src/v8/tools/release/create_release.py
@@ -3,6 +3,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import os
 import sys
@@ -27,7 +30,7 @@
     self["push_hash"] = (self._options.revision or
                          self.GitLog(n=1, format="%H", branch="origin/master"))
     assert self["push_hash"]
-    print "Release revision %s" % self["push_hash"]
+    print("Release revision %s" % self["push_hash"])
 
 
 class IncrementVersion(Step):
@@ -138,7 +141,7 @@
   def RunStep(self):
     cmd = "push origin %s:refs/heads/%s" % (self["push_hash"], self["version"])
     if self._options.dry_run:
-      print "Dry run. Command:\ngit %s" % cmd
+      print("Dry run. Command:\ngit %s" % cmd)
     else:
       self.Git(cmd)
 
@@ -207,8 +210,7 @@
     self["commit_title"] = text.splitlines()[0]
     TextToFile(text, self.Config("COMMITMSG_FILE"))
 
-    self.GitCommit(file_name = self.Config("COMMITMSG_FILE"))
-    os.remove(self.Config("COMMITMSG_FILE"))
+    self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
     os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
 
 
@@ -217,18 +219,20 @@
 
   def RunStep(self):
     if self._options.dry_run:
-      print "Dry run - upload CL."
+      print("Dry run - upload CL.")
     else:
-      self.GitUpload(author=self._options.author,
-                     force=True,
+      self.GitUpload(force=True,
                      bypass_hooks=True,
-                     private=True)
+                     no_autocc=True,
+                     message_file=self.Config("COMMITMSG_FILE"))
     cmd = "cl land --bypass-hooks -f"
     if self._options.dry_run:
-      print "Dry run. Command:\ngit %s" % cmd
+      print("Dry run. Command:\ngit %s" % cmd)
     else:
       self.Git(cmd)
 
+    os.remove(self.Config("COMMITMSG_FILE"))
+
 
 class TagRevision(Step):
   MESSAGE = "Tag the new revision."
@@ -269,7 +273,7 @@
 
   def _ProcessOptions(self, options):  # pragma: no cover
     if not options.author or not options.reviewer:
-      print "Reviewer (-r) and author (-a) are required."
+      print("Reviewer (-r) and author (-a) are required.")
       return False
     return True
 
diff --git a/src/v8/tools/release/filter_build_files.py b/src/v8/tools/release/filter_build_files.py
index 7d3f221..1afabe8 100755
--- a/src/v8/tools/release/filter_build_files.py
+++ b/src/v8/tools/release/filter_build_files.py
@@ -25,6 +25,11 @@
   'd8',
 ]
 
+# Additional executable files added only to ref archive type.
+REFBUILD_EXECUTABLE_FILES = [
+  'cctest',
+]
+
 SUPPLEMENTARY_FILES = [
   'icudtl.dat',
   'natives_blob.bin',
@@ -35,7 +40,7 @@
 LIBRARY_FILES = {
   'android': ['*.a', '*.so'],
   'linux': ['*.a', '*.so'],
-  'mac': ['*.a', '*.so'],
+  'mac': ['*.a', '*.so', '*.dylib'],
   'win': ['*.lib', '*.dll'],
 }
 
@@ -51,7 +56,7 @@
                       help='Path to an output file. The files will '
                            'be stored in json list with absolute paths.')
   parser.add_argument('-t', '--type',
-                      choices=['all', 'exe', 'lib'], default='all',
+                      choices=['all', 'exe', 'lib', 'ref'], default='all',
                       help='Specifies the archive type.')
   args = parser.parse_args()
 
@@ -60,8 +65,8 @@
 
   args.dir = os.path.abspath(args.dir)
 
-  # Skip libraries for exe archive type.
-  if args.type == 'exe':
+  # Skip libraries for exe and ref archive types.
+  if args.type in ('exe', 'ref'):
     library_files = []
   else:
     library_files = LIBRARY_FILES[args.platform]
@@ -72,6 +77,9 @@
   else:
     executable_files = EXECUTABLE_FILES
 
+  if args.type == 'ref':
+    executable_files.extend(REFBUILD_EXECUTABLE_FILES)
+
   list_of_files = []
   def add_files_from_globs(globs):
     list_of_files.extend(itertools.chain(*map(glob.iglob, globs)))
diff --git a/src/v8/tools/release/git_recipes.py b/src/v8/tools/release/git_recipes.py
index 9dedae8..0997e0b 100644
--- a/src/v8/tools/release/git_recipes.py
+++ b/src/v8/tools/release/git_recipes.py
@@ -205,14 +205,14 @@
     args.append(Quoted(patch_file))
     self.Git(MakeArgs(args), **kwargs)
 
-  def GitUpload(self, reviewer="", author="", force=False, cq=False,
-                cq_dry_run=False, bypass_hooks=False, cc="", private=False,
-                **kwargs):
+  def GitUpload(self, reviewer="", force=False, cq=False,
+                cq_dry_run=False, bypass_hooks=False, cc="", tbr_reviewer="",
+                no_autocc=False, message_file=None, **kwargs):
     args = ["cl upload --send-mail"]
-    if author:
-      args += ["--email", Quoted(author)]
     if reviewer:
       args += ["-r", Quoted(reviewer)]
+    if tbr_reviewer:
+      args += ["--tbrs", Quoted(tbr_reviewer)]
     if force:
       args.append("-f")
     if cq:
@@ -221,11 +221,13 @@
       args.append("--cq-dry-run")
     if bypass_hooks:
       args.append("--bypass-hooks")
+    if no_autocc:
+      args.append("--no-autocc")
     if cc:
       args += ["--cc", Quoted(cc)]
+    if message_file:
+      args += ["--message-file", Quoted(message_file)]
     args += ["--gerrit"]
-    if private:
-      args += ["--private"]
     # TODO(machenbach): Check output in forced mode. Verify that all required
     # base files were uploaded, if not retry.
     self.Git(MakeArgs(args), pipe=False, **kwargs)
diff --git a/src/v8/tools/release/merge_to_branch.py b/src/v8/tools/release/merge_to_branch.py
index 877d121..c959429 100755
--- a/src/v8/tools/release/merge_to_branch.py
+++ b/src/v8/tools/release/merge_to_branch.py
@@ -26,6 +26,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 from collections import OrderedDict
 import sys
@@ -186,10 +189,10 @@
 
   def RunStep(self):
     self.CommonCleanup()
-    print "*** SUMMARY ***"
-    print "branch: %s" % self["merge_to_branch"]
+    print("*** SUMMARY ***")
+    print("branch: %s" % self["merge_to_branch"])
     if self["revision_list"]:
-      print "patches: %s" % self["revision_list"]
+      print("patches: %s" % self["revision_list"])
 
 
 class MergeToBranch(ScriptsBase):
@@ -215,10 +218,10 @@
   def _ProcessOptions(self, options):
     if len(options.revisions) < 1:
       if not options.patch:
-        print "Either a patch file or revision numbers must be specified"
+        print("Either a patch file or revision numbers must be specified")
         return False
       if not options.message:
-        print "You must specify a merge comment if no patches are specified"
+        print("You must specify a merge comment if no patches are specified")
         return False
     options.bypass_upload_hooks = True
     # CC ulan to make sure that fixes are merged to Google3.
@@ -233,19 +236,20 @@
     for revision in options.revisions:
       if (IsSvnNumber(revision) or
           (revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
-        print "Please provide full git hashes of the patches to merge."
-        print "Got: %s" % revision
+        print("Please provide full git hashes of the patches to merge.")
+        print("Got: %s" % revision)
         return False
     return True
 
   def _Config(self):
     return {
       "BRANCHNAME": "prepare-merge",
-      "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
+      "PERSISTFILE_BASENAME": RELEASE_WORKDIR + "v8-merge-to-branch-tempfile",
       "ALREADY_MERGING_SENTINEL_FILE":
-          "/tmp/v8-merge-to-branch-tempfile-already-merging",
-      "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
-      "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
+          RELEASE_WORKDIR + "v8-merge-to-branch-tempfile-already-merging",
+      "TEMPORARY_PATCH_FILE":
+          RELEASE_WORKDIR + "v8-prepare-merge-tempfile-temporary-patch",
+      "COMMITMSG_FILE": RELEASE_WORKDIR + "v8-prepare-merge-tempfile-commitmsg",
     }
 
   def _Steps(self):
diff --git a/src/v8/tools/release/mergeinfo.py b/src/v8/tools/release/mergeinfo.py
index 1e29ece..bed7441 100755
--- a/src/v8/tools/release/mergeinfo.py
+++ b/src/v8/tools/release/mergeinfo.py
@@ -3,6 +3,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import os
 import sys
@@ -77,32 +80,32 @@
   return version
 
 def print_analysis(git_working_dir, hash_to_search):
-  print '1.) Searching for "' + hash_to_search + '"'
-  print '=====================ORIGINAL COMMIT START==================='
-  print describe_commit(git_working_dir, hash_to_search)
-  print '=====================ORIGINAL COMMIT END====================='
-  print '2.) General information:'
+  print('1.) Searching for "' + hash_to_search + '"')
+  print('=====================ORIGINAL COMMIT START===================')
+  print(describe_commit(git_working_dir, hash_to_search))
+  print('=====================ORIGINAL COMMIT END=====================')
+  print('2.) General information:')
   branches = get_branches_for_commit(git_working_dir, hash_to_search)
-  print 'Is LKGR:         ' + str(is_lkgr(branches))
-  print 'Is on Canary:    ' + str(get_first_canary(branches))
-  print 'First V8 branch: ' + str(get_first_v8_version(branches)) + \
-      ' (Might not be the rolled version)'
-  print '3.) Found follow-up commits, reverts and ports:'
+  print('Is LKGR:         ' + str(is_lkgr(branches)))
+  print('Is on Canary:    ' + str(get_first_canary(branches)))
+  print('First V8 branch: ' + str(get_first_v8_version(branches)) + \
+      ' (Might not be the rolled version)')
+  print('3.) Found follow-up commits, reverts and ports:')
   followups = get_followup_commits(git_working_dir, hash_to_search)
   for followup in followups:
-    print describe_commit(git_working_dir, followup, True)
+    print(describe_commit(git_working_dir, followup, True))
 
-  print '4.) Found merges:'
+  print('4.) Found merges:')
   merges = get_merge_commits(git_working_dir, hash_to_search)
   for currentMerge in merges:
-    print describe_commit(git_working_dir, currentMerge, True)
-    print '---Merged to:'
+    print(describe_commit(git_working_dir, currentMerge, True))
+    print('---Merged to:')
     mergeOutput = git_execute(git_working_dir, ['branch',
                                                 '--contains',
                                                 currentMerge,
                                                 '-r']).strip()
-    print mergeOutput
-  print 'Finished successfully'
+    print(mergeOutput)
+  print('Finished successfully')
 
 if __name__ == '__main__':  # pragma: no cover
   parser = argparse.ArgumentParser('Tool to check where a git commit was'
diff --git a/src/v8/tools/release/push_to_candidates.py b/src/v8/tools/release/push_to_candidates.py
index 538b988..c706896 100755
--- a/src/v8/tools/release/push_to_candidates.py
+++ b/src/v8/tools/release/push_to_candidates.py
@@ -26,6 +26,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import os
 import sys
@@ -46,7 +49,7 @@
 
     if(self["current_branch"] == self.Config("CANDIDATESBRANCH")
        or self["current_branch"] == self.Config("BRANCHNAME")):
-      print "Warning: Script started on branch %s" % self["current_branch"]
+      print("Warning: Script started on branch %s" % self["current_branch"])
 
     self.PrepareBranch()
     self.DeleteBranch(self.Config("CANDIDATESBRANCH"))
@@ -347,10 +350,10 @@
 
   def _ProcessOptions(self, options):  # pragma: no cover
     if not options.manual and not options.reviewer:
-      print "A reviewer (-r) is required in (semi-)automatic mode."
+      print("A reviewer (-r) is required in (semi-)automatic mode.")
       return False
     if not options.manual and not options.author:
-      print "Specify your chromium.org email with -a in (semi-)automatic mode."
+      print("Specify your chromium.org email with -a in (semi-)automatic mode.")
       return False
 
     options.tbr_commit = not options.manual
diff --git a/src/v8/tools/release/roll_merge.py b/src/v8/tools/release/roll_merge.py
index 2dd43ea..2f03abb 100755
--- a/src/v8/tools/release/roll_merge.py
+++ b/src/v8/tools/release/roll_merge.py
@@ -26,6 +26,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 from collections import OrderedDict
 import sys
@@ -202,7 +205,7 @@
   MESSAGE = "Create the tag."
 
   def RunStep(self):
-    print "Creating tag %s" % self["version"]
+    print("Creating tag %s" % self["version"])
     self.vc.Tag(self["version"],
                 self.vc.RemoteBranch(self["merge_to_branch"]),
                 self["commit_title"])
@@ -213,11 +216,11 @@
 
   def RunStep(self):
     self.CommonCleanup()
-    print "*** SUMMARY ***"
-    print "version: %s" % self["version"]
-    print "branch: %s" % self["merge_to_branch"]
+    print("*** SUMMARY ***")
+    print("version: %s" % self["version"])
+    print("branch: %s" % self["merge_to_branch"])
     if self["revision_list"]:
-      print "patches: %s" % self["revision_list"]
+      print("patches: %s" % self["revision_list"])
 
 
 class RollMerge(ScriptsBase):
@@ -241,10 +244,10 @@
   def _ProcessOptions(self, options):
     if len(options.revisions) < 1:
       if not options.patch:
-        print "Either a patch file or revision numbers must be specified"
+        print("Either a patch file or revision numbers must be specified")
         return False
       if not options.message:
-        print "You must specify a merge comment if no patches are specified"
+        print("You must specify a merge comment if no patches are specified")
         return False
     options.bypass_upload_hooks = True
     # CC ulan to make sure that fixes are merged to Google3.
@@ -254,19 +257,21 @@
     for revision in options.revisions:
       if (IsSvnNumber(revision) or
           (revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
-        print "Please provide full git hashes of the patches to merge."
-        print "Got: %s" % revision
+        print("Please provide full git hashes of the patches to merge.")
+        print("Got: %s" % revision)
         return False
     return True
 
   def _Config(self):
     return {
       "BRANCHNAME": "prepare-merge",
-      "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
+      "PERSISTFILE_BASENAME":
+          RELEASE_WORKDIR + "v8-merge-to-branch-tempfile",
       "ALREADY_MERGING_SENTINEL_FILE":
-          "/tmp/v8-merge-to-branch-tempfile-already-merging",
-      "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
-      "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
+          RELEASE_WORKDIR + "v8-merge-to-branch-tempfile-already-merging",
+      "TEMPORARY_PATCH_FILE":
+          RELEASE_WORKDIR + "v8-prepare-merge-tempfile-temporary-patch",
+      "COMMITMSG_FILE": RELEASE_WORKDIR + "v8-prepare-merge-tempfile-commitmsg",
     }
 
   def _Steps(self):
diff --git a/src/v8/tools/release/script_test.py b/src/v8/tools/release/script_test.py
index b9a17e9..0f345b7 100755
--- a/src/v8/tools/release/script_test.py
+++ b/src/v8/tools/release/script_test.py
@@ -29,6 +29,9 @@
 # Wraps test execution with a coverage analysis. To get the best speed, the
 # native python coverage version >= 3.7.1 should be installed.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import coverage
 import os
 import unittest
@@ -46,7 +49,7 @@
   ])
   unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(alltests))
   cov.stop()
-  print cov.report()
+  print(cov.report())
 
 
 if __name__ == '__main__':
diff --git a/src/v8/tools/release/search_related_commits.py b/src/v8/tools/release/search_related_commits.py
index d27aa56..e6e52d2 100755
--- a/src/v8/tools/release/search_related_commits.py
+++ b/src/v8/tools/release/search_related_commits.py
@@ -3,6 +3,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import operator
 import os
@@ -17,7 +20,7 @@
   all_commits_raw = _find_commits_inbetween(
       start_hash, until, git_working_dir, verbose)
   if verbose:
-    print "All commits between <of> and <until>: " + all_commits_raw
+    print("All commits between <of> and <until>: " + all_commits_raw)
 
   # Adding start hash too
   all_commits = [start_hash]
@@ -61,7 +64,7 @@
 
   commit_position = matches.group(2)
   if verbose:
-    print "1.) Commit position to look for: " + commit_position
+    print("1.) Commit position to look for: " + commit_position)
 
   search_range = start_hash + ".." + until
 
@@ -78,13 +81,13 @@
       git_working_dir, git_args(start_hash), verbose).strip()
 
   if verbose:
-    print "2.) Found by hash: " + found_by_hash
+    print("2.) Found by hash: " + found_by_hash)
 
   found_by_commit_pos = git_execute(
       git_working_dir, git_args(commit_position), verbose).strip()
 
   if verbose:
-    print "3.) Found by commit position: " + found_by_commit_pos
+    print("3.) Found by commit position: " + found_by_commit_pos)
 
   # Replace brackets or else they are wrongly interpreted by --grep
   title = title.replace("[", "\\[")
@@ -94,7 +97,7 @@
       git_working_dir, git_args(title), verbose).strip()
 
   if verbose:
-    print "4.) Found by title: " + found_by_title
+    print("4.) Found by title: " + found_by_title)
 
   hits = (
       _convert_to_array(found_by_hash) +
@@ -132,8 +135,8 @@
 def git_execute(working_dir, args, verbose=False):
   command = ["git", "-C", working_dir] + args
   if verbose:
-    print "Git working dir: " + working_dir
-    print "Executing git command:" + str(command)
+    print("Git working dir: " + working_dir)
+    print("Executing git command:" + str(command))
   p = Popen(args=command, stdin=PIPE,
             stdout=PIPE, stderr=PIPE)
   output, err = p.communicate()
@@ -141,7 +144,7 @@
   if rc != 0:
     raise Exception(err)
   if verbose:
-    print "Git return value: " + output
+    print("Git return value: " + output)
   return output
 
 def _pretty_print_entry(hash, git_dir, pre_text, verbose):
@@ -215,4 +218,4 @@
   args = sys.argv[1:]
   options = parser.parse_args(args)
   for current_line in main(options):
-    print current_line
+    print(current_line)
diff --git a/src/v8/tools/release/test_scripts.py b/src/v8/tools/release/test_scripts.py
index 759012d..62158d9 100755
--- a/src/v8/tools/release/test_scripts.py
+++ b/src/v8/tools/release/test_scripts.py
@@ -26,6 +26,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
 import shutil
 import tempfile
@@ -43,8 +46,6 @@
 from merge_to_branch import MergeToBranch
 import push_to_candidates
 from push_to_candidates import *
-import releases
-from releases import Releases
 from auto_tag import AutoTag
 import roll_merge
 from roll_merge import RollMerge
@@ -97,38 +98,6 @@
                 ]
     self.assertEquals(expected, NormalizeVersionTags(input))
 
-  def testSortBranches(self):
-    S = releases.SortBranches
-    self.assertEquals(["3.1", "2.25"], S(["2.25", "3.1"])[0:2])
-    self.assertEquals(["3.0", "2.25"], S(["2.25", "3.0", "2.24"])[0:2])
-    self.assertEquals(["3.11", "3.2"], S(["3.11", "3.2", "2.24"])[0:2])
-
-  def testFilterDuplicatesAndReverse(self):
-    F = releases.FilterDuplicatesAndReverse
-    self.assertEquals([], F([]))
-    self.assertEquals([["100", "10"]], F([["100", "10"]]))
-    self.assertEquals([["99", "9"], ["100", "10"]],
-                      F([["100", "10"], ["99", "9"]]))
-    self.assertEquals([["98", "9"], ["100", "10"]],
-                      F([["100", "10"], ["99", "9"], ["98", "9"]]))
-    self.assertEquals([["98", "9"], ["99", "10"]],
-                      F([["100", "10"], ["99", "10"], ["98", "9"]]))
-
-  def testBuildRevisionRanges(self):
-    B = releases.BuildRevisionRanges
-    self.assertEquals({}, B([]))
-    self.assertEquals({"10": "100"}, B([["100", "10"]]))
-    self.assertEquals({"10": "100", "9": "99:99"},
-                      B([["100", "10"], ["99", "9"]]))
-    self.assertEquals({"10": "100", "9": "97:99"},
-                      B([["100", "10"], ["98", "9"], ["97", "9"]]))
-    self.assertEquals({"10": "100", "9": "99:99", "3": "91:98"},
-                      B([["100", "10"], ["99", "9"], ["91", "3"]]))
-    self.assertEquals({"13": "101", "12": "100:100", "9": "94:97",
-                       "3": "91:93, 98:99"},
-                      B([["101", "13"], ["100", "12"], ["98", "3"],
-                         ["94", "9"], ["91", "3"]]))
-
   def testMakeComment(self):
     self.assertEquals("#   Line 1\n#   Line 2\n#",
                       MakeComment("    Line 1\n    Line 2\n"))
@@ -417,11 +386,11 @@
     return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
 
   def Call(self, fun, *args, **kwargs):
-    print "Calling %s with %s and %s" % (str(fun), str(args), str(kwargs))
+    print("Calling %s with %s and %s" % (str(fun), str(args), str(kwargs)))
 
   def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
-    print "%s %s" % (cmd, args)
-    print "in %s" % cwd
+    print("%s %s" % (cmd, args))
+    print("in %s" % cwd)
     return self._mock.Call("command", cmd + " " + args, cwd=cwd)
 
   def ReadLine(self):
@@ -967,8 +936,9 @@
           cb=self.WriteFakeWatchlistsFile),
       Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
           cb=CheckVersionCommit),
-      Cmd("git cl upload --send-mail --email \"author@chromium.org\" "
-          "-f --bypass-hooks --gerrit --private", ""),
+      Cmd("git cl upload --send-mail "
+          "-f --bypass-hooks --no-autocc --message-file "
+          "\"%s\" --gerrit" % TEST_CONFIG["COMMITMSG_FILE"], ""),
       Cmd("git cl land --bypass-hooks -f", ""),
       Cmd("git fetch", ""),
       Cmd("git log -1 --format=%H --grep="
@@ -1031,13 +1001,17 @@
 https://chromium.googlesource.com/v8/v8/+log/last_rol..roll_hsh
 
 Please follow these instructions for assigning/CC'ing issues:
-https://github.com/v8/v8/wiki/Triaging%20issues
+https://v8.dev/docs/triage-issues
 
 Please close rolling in case of a roll revert:
 https://v8-roll.appspot.com/
 This only works with a Google account.
 
-CQ_INCLUDE_TRYBOTS=master.tryserver.blink:linux_trusty_blink_rel;master.tryserver.chromium.linux:linux_optional_gpu_tests_rel;master.tryserver.chromium.mac:mac_optional_gpu_tests_rel;master.tryserver.chromium.win:win_optional_gpu_tests_rel;master.tryserver.chromium.android:android_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux-blink-rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:linux_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:mac_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:win_optional_gpu_tests_rel
+CQ_INCLUDE_TRYBOTS=luci.chromium.try:android_optional_gpu_tests_rel
 
 TBR=reviewer@chromium.org"""
 
@@ -1113,12 +1087,13 @@
       Cmd("git pull", "", cwd=chrome_dir),
       Cmd("git fetch origin", ""),
       Cmd("git new-branch work-branch", "", cwd=chrome_dir),
-      Cmd("roll-dep-svn v8 roll_hsh", "rolled", cb=WriteDeps, cwd=chrome_dir),
+      Cmd("gclient setdep -r src/v8@roll_hsh", "", cb=WriteDeps,
+          cwd=chrome_dir),
       Cmd(("git commit -am \"%s\" "
            "--author \"author@chromium.org <author@chromium.org>\"" %
            self.ROLL_COMMIT_MSG),
           "", cwd=chrome_dir),
-      Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
+      Cmd("git cl upload --send-mail -f "
           "--cq-dry-run --bypass-hooks --gerrit", "",
           cwd=chrome_dir),
       Cmd("git checkout -f master", "", cwd=chrome_dir),
@@ -1307,251 +1282,6 @@
     args += ["-s", "4"]
     RollMerge(TEST_CONFIG, self).Run(args)
 
-  def testReleases(self):
-    c_hash1_commit_log = """Update V8 to Version 4.2.71.
-
-Cr-Commit-Position: refs/heads/master@{#5678}
-"""
-    c_hash2_commit_log = """Revert something.
-
-BUG=12345
-
-Reason:
-> Some reason.
-> Cr-Commit-Position: refs/heads/master@{#12345}
-> git-svn-id: svn://svn.chromium.org/chrome/trunk/src@12345 003-1c4
-
-Review URL: https://codereview.chromium.org/12345
-
-Cr-Commit-Position: refs/heads/master@{#4567}
-git-svn-id: svn://svn.chromium.org/chrome/trunk/src@4567 0039-1c4b
-
-"""
-    c_hash3_commit_log = """Simple.
-
-git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
-
-"""
-    c_hash_234_commit_log = """Version 3.3.1.1 (cherry-pick).
-
-Merged abc12.
-
-Review URL: fake.com
-
-Cr-Commit-Position: refs/heads/candidates@{#234}
-"""
-    c_hash_123_commit_log = """Version 3.3.1.0
-
-git-svn-id: googlecode@123 0039-1c4b
-"""
-    c_hash_345_commit_log = """Version 3.4.0.
-
-Cr-Commit-Position: refs/heads/candidates@{#345}
-"""
-    c_hash_456_commit_log = """Version 4.2.71.
-
-Cr-Commit-Position: refs/heads/4.2.71@{#1}
-"""
-    c_deps = "Line\n   \"v8_revision\": \"%s\",\n  line\n"
-
-    json_output = self.MakeEmptyTempFile()
-    csv_output = self.MakeEmptyTempFile()
-    self.WriteFakeVersionFile()
-
-    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
-    chrome_dir = TEST_CONFIG["CHROMIUM"]
-    chrome_v8_dir = os.path.join(chrome_dir, "v8")
-    os.makedirs(chrome_v8_dir)
-
-    def ResetVersion(major, minor, build, patch=0):
-      return lambda: self.WriteFakeVersionFile(major=major,
-                                               minor=minor,
-                                               build=build,
-                                               patch=patch)
-
-    self.Expect([
-      Cmd("git status -s -uno", ""),
-      Cmd("git checkout -f origin/master", ""),
-      Cmd("git fetch", ""),
-      Cmd("git branch", "  branch1\n* branch2\n"),
-      Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], ""),
-      Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
-      Cmd("git rev-list --max-age=395200 --tags",
-          "bad_tag\nhash_234\nhash_123\nhash_345\nhash_456\n"),
-      Cmd("git describe --tags bad_tag", "3.23.42-1-deadbeef"),
-      Cmd("git describe --tags hash_234", "3.3.1.1"),
-      Cmd("git describe --tags hash_123", "3.21.2"),
-      Cmd("git describe --tags hash_345", "3.22.3"),
-      Cmd("git describe --tags hash_456", "4.2.71"),
-      Cmd("git diff --name-only hash_234 hash_234^", VERSION_FILE),
-      Cmd("git checkout -f hash_234 -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 3, 1, 1)),
-      Cmd("git branch -r --contains hash_234", "  branch-heads/3.3\n"),
-      Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
-      Cmd("git log -1 --format=%s hash_234", ""),
-      Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
-      Cmd("git log -1 --format=%ci hash_234", "18:15"),
-      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 22, 5)),
-      Cmd("git diff --name-only hash_123 hash_123^", VERSION_FILE),
-      Cmd("git checkout -f hash_123 -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 21, 2)),
-      Cmd("git branch -r --contains hash_123", "  branch-heads/3.21\n"),
-      Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
-      Cmd("git log -1 --format=%s hash_123", ""),
-      Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
-      Cmd("git log -1 --format=%ci hash_123", "03:15"),
-      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 22, 5)),
-      Cmd("git diff --name-only hash_345 hash_345^", VERSION_FILE),
-      Cmd("git checkout -f hash_345 -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 22, 3)),
-      Cmd("git branch -r --contains hash_345", "  origin/candidates\n"),
-      Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
-      Cmd("git log -1 --format=%s hash_345", ""),
-      Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
-      Cmd("git log -1 --format=%ci hash_345", ""),
-      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 22, 5)),
-      Cmd("git diff --name-only hash_456 hash_456^", VERSION_FILE),
-      Cmd("git checkout -f hash_456 -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(4, 2, 71)),
-      Cmd("git branch -r --contains hash_456", "  origin/4.2.71\n"),
-      Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
-      Cmd("git log -1 --format=%H 4.2.71", "hash_456"),
-      Cmd("git log -1 --format=%s hash_456", "Version 4.2.71"),
-      Cmd("git log -1 --format=%H hash_456^", "master_456"),
-      Cmd("git log -1 --format=%B master_456",
-          "Cr-Commit-Position: refs/heads/master@{#456}"),
-      Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
-      Cmd("git log -1 --format=%ci hash_456", "02:15"),
-      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 22, 5)),
-      Cmd("git fetch origin +refs/heads/*:refs/remotes/origin/* "
-          "+refs/branch-heads/*:refs/remotes/branch-heads/*", "",
-          cwd=chrome_dir),
-      Cmd("git fetch origin", "", cwd=chrome_v8_dir),
-      Cmd("git log --format=%H --grep=\"V8\" origin/master -- DEPS",
-          "c_hash1\nc_hash2\nc_hash3\n",
-          cwd=chrome_dir),
-      Cmd("git show c_hash1:DEPS", c_deps % "hash_456", cwd=chrome_dir),
-      Cmd("git log -1 --format=%B c_hash1", c_hash1_commit_log,
-          cwd=chrome_dir),
-      Cmd("git show c_hash2:DEPS", c_deps % "hash_345", cwd=chrome_dir),
-      Cmd("git log -1 --format=%B c_hash2", c_hash2_commit_log,
-          cwd=chrome_dir),
-      Cmd("git show c_hash3:DEPS", c_deps % "deadbeef", cwd=chrome_dir),
-      Cmd("git log -1 --format=%B c_hash3", c_hash3_commit_log,
-          cwd=chrome_dir),
-      Cmd("git branch -r", " weird/123\n  branch-heads/7\n", cwd=chrome_dir),
-      Cmd("git show refs/branch-heads/7:DEPS", c_deps % "hash_345",
-          cwd=chrome_dir),
-      URL("http://omahaproxy.appspot.com/all.json", """[{
-        "os": "win",
-        "versions": [{
-          "version": "2.2.2.2",
-          "v8_version": "22.2.2.2",
-          "current_reldate": "04/09/15",
-          "os": "win",
-          "channel": "canary",
-          "previous_version": "1.1.1.0"
-          }]
-        }]"""),
-      URL("http://omahaproxy.appspot.com/v8.json?version=1.1.1.0", """{
-        "chromium_version": "1.1.1.0",
-        "v8_version": "11.1.1.0"
-        }"""),
-      Cmd("git rev-list -1 11.1.1", "v8_previous_version_hash"),
-      Cmd("git rev-list -1 22.2.2.2", "v8_version_hash"),
-      Cmd("git checkout -f origin/master", ""),
-      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "")
-    ])
-
-    args = ["-c", TEST_CONFIG["CHROMIUM"],
-            "--json", json_output,
-            "--csv", csv_output,
-            "--max-releases", "1"]
-    Releases(TEST_CONFIG, self).Run(args)
-
-    # Check expected output.
-    csv = ("4.2.71,4.2.71,1,5678,\r\n"
-           "3.22.3,candidates,345,4567:5677,\r\n"
-           "3.21.2,3.21,123,,\r\n"
-           "3.3.1.1,3.3,234,,abc12\r\n")
-    self.assertEquals(csv, FileToText(csv_output))
-
-    expected_json = {"chrome_releases":{
-                                        "canaries": [
-                                                     {
-                           "chrome_version": "2.2.2.2",
-                           "os": "win",
-                           "release_date": "04/09/15",
-                           "v8_version": "22.2.2.2",
-                           "v8_version_hash": "v8_version_hash",
-                           "v8_previous_version": "11.1.1.0",
-                           "v8_previous_version_hash": "v8_previous_version_hash"
-                           }]},
-                     "releases":[
-      {
-        "revision": "1",
-        "revision_git": "hash_456",
-        "master_position": "456",
-        "master_hash": "master_456",
-        "patches_merged": "",
-        "version": "4.2.71",
-        "chromium_revision": "5678",
-        "branch": "4.2.71",
-        "review_link": "",
-        "date": "02:15",
-        "chromium_branch": "",
-        # FIXME(machenbach): Fix revisions link for git.
-        "revision_link": "https://code.google.com/p/v8/source/detail?r=1",
-      },
-      {
-        "revision": "345",
-        "revision_git": "hash_345",
-        "master_position": "",
-        "master_hash": "",
-        "patches_merged": "",
-        "version": "3.22.3",
-        "chromium_revision": "4567:5677",
-        "branch": "candidates",
-        "review_link": "",
-        "date": "",
-        "chromium_branch": "7",
-        "revision_link": "https://code.google.com/p/v8/source/detail?r=345",
-      },
-      {
-        "revision": "123",
-        "revision_git": "hash_123",
-        "patches_merged": "",
-        "master_position": "",
-        "master_hash": "",
-        "version": "3.21.2",
-        "chromium_revision": "",
-        "branch": "3.21",
-        "review_link": "",
-        "date": "03:15",
-        "chromium_branch": "",
-        "revision_link": "https://code.google.com/p/v8/source/detail?r=123",
-      },
-      {
-        "revision": "234",
-        "revision_git": "hash_234",
-        "patches_merged": "abc12",
-        "master_position": "",
-        "master_hash": "",
-        "version": "3.3.1.1",
-        "chromium_revision": "",
-        "branch": "3.3",
-        "review_link": "fake.com",
-        "date": "18:15",
-        "chromium_branch": "",
-        "revision_link": "https://code.google.com/p/v8/source/detail?r=234",
-      },],
-    }
-    self.assertEquals(expected_json, json.loads(FileToText(json_output)))
-
   def testMergeToBranch(self):
     TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
     TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
@@ -1677,250 +1407,5 @@
     args += ["-s", "4"]
     MergeToBranch(TEST_CONFIG, self).Run(args)
 
-  def testReleases(self):
-    c_hash1_commit_log = """Update V8 to Version 4.2.71.
-
-Cr-Commit-Position: refs/heads/master@{#5678}
-"""
-    c_hash2_commit_log = """Revert something.
-
-BUG=12345
-
-Reason:
-> Some reason.
-> Cr-Commit-Position: refs/heads/master@{#12345}
-> git-svn-id: svn://svn.chromium.org/chrome/trunk/src@12345 003-1c4
-
-Review URL: https://codereview.chromium.org/12345
-
-Cr-Commit-Position: refs/heads/master@{#4567}
-git-svn-id: svn://svn.chromium.org/chrome/trunk/src@4567 0039-1c4b
-
-"""
-    c_hash3_commit_log = """Simple.
-
-git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
-
-"""
-    c_hash_234_commit_log = """Version 3.3.1.1 (cherry-pick).
-
-Merged abc12.
-
-Review URL: fake.com
-
-Cr-Commit-Position: refs/heads/candidates@{#234}
-"""
-    c_hash_123_commit_log = """Version 3.3.1.0
-
-git-svn-id: googlecode@123 0039-1c4b
-"""
-    c_hash_345_commit_log = """Version 3.4.0.
-
-Cr-Commit-Position: refs/heads/candidates@{#345}
-"""
-    c_hash_456_commit_log = """Version 4.2.71.
-
-Cr-Commit-Position: refs/heads/4.2.71@{#1}
-"""
-    c_deps = "Line\n   \"v8_revision\": \"%s\",\n  line\n"
-
-    json_output = self.MakeEmptyTempFile()
-    csv_output = self.MakeEmptyTempFile()
-    self.WriteFakeVersionFile()
-
-    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
-    chrome_dir = TEST_CONFIG["CHROMIUM"]
-    chrome_v8_dir = os.path.join(chrome_dir, "v8")
-    os.makedirs(chrome_v8_dir)
-
-    def ResetVersion(major, minor, build, patch=0):
-      return lambda: self.WriteFakeVersionFile(major=major,
-                                               minor=minor,
-                                               build=build,
-                                               patch=patch)
-
-    self.Expect([
-      Cmd("git status -s -uno", ""),
-      Cmd("git checkout -f origin/master", ""),
-      Cmd("git fetch", ""),
-      Cmd("git branch", "  branch1\n* branch2\n"),
-      Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], ""),
-      Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
-      Cmd("git rev-list --max-age=395200 --tags",
-          "bad_tag\nhash_234\nhash_123\nhash_345\nhash_456\n"),
-      Cmd("git describe --tags bad_tag", "3.23.42-1-deadbeef"),
-      Cmd("git describe --tags hash_234", "3.3.1.1"),
-      Cmd("git describe --tags hash_123", "3.21.2"),
-      Cmd("git describe --tags hash_345", "3.22.3"),
-      Cmd("git describe --tags hash_456", "4.2.71"),
-      Cmd("git diff --name-only hash_234 hash_234^", VERSION_FILE),
-      Cmd("git checkout -f hash_234 -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 3, 1, 1)),
-      Cmd("git branch -r --contains hash_234", "  branch-heads/3.3\n"),
-      Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
-      Cmd("git log -1 --format=%s hash_234", ""),
-      Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
-      Cmd("git log -1 --format=%ci hash_234", "18:15"),
-      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 22, 5)),
-      Cmd("git diff --name-only hash_123 hash_123^", VERSION_FILE),
-      Cmd("git checkout -f hash_123 -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 21, 2)),
-      Cmd("git branch -r --contains hash_123", "  branch-heads/3.21\n"),
-      Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
-      Cmd("git log -1 --format=%s hash_123", ""),
-      Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
-      Cmd("git log -1 --format=%ci hash_123", "03:15"),
-      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 22, 5)),
-      Cmd("git diff --name-only hash_345 hash_345^", VERSION_FILE),
-      Cmd("git checkout -f hash_345 -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 22, 3)),
-      Cmd("git branch -r --contains hash_345", "  origin/candidates\n"),
-      Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
-      Cmd("git log -1 --format=%s hash_345", ""),
-      Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
-      Cmd("git log -1 --format=%ci hash_345", ""),
-      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 22, 5)),
-      Cmd("git diff --name-only hash_456 hash_456^", VERSION_FILE),
-      Cmd("git checkout -f hash_456 -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(4, 2, 71)),
-      Cmd("git branch -r --contains hash_456", "  origin/4.2.71\n"),
-      Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
-      Cmd("git log -1 --format=%H 4.2.71", "hash_456"),
-      Cmd("git log -1 --format=%s hash_456", "Version 4.2.71"),
-      Cmd("git log -1 --format=%H hash_456^", "master_456"),
-      Cmd("git log -1 --format=%B master_456",
-          "Cr-Commit-Position: refs/heads/master@{#456}"),
-      Cmd("git log -1 --format=%B hash_456", c_hash_456_commit_log),
-      Cmd("git log -1 --format=%ci hash_456", "02:15"),
-      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
-          cb=ResetVersion(3, 22, 5)),
-      Cmd("git fetch origin +refs/heads/*:refs/remotes/origin/* "
-          "+refs/branch-heads/*:refs/remotes/branch-heads/*", "",
-          cwd=chrome_dir),
-      Cmd("git fetch origin", "", cwd=chrome_v8_dir),
-      Cmd("git log --format=%H --grep=\"V8\" origin/master -- DEPS",
-          "c_hash1\nc_hash2\nc_hash3\n",
-          cwd=chrome_dir),
-      Cmd("git show c_hash1:DEPS", c_deps % "hash_456", cwd=chrome_dir),
-      Cmd("git log -1 --format=%B c_hash1", c_hash1_commit_log,
-          cwd=chrome_dir),
-      Cmd("git show c_hash2:DEPS", c_deps % "hash_345", cwd=chrome_dir),
-      Cmd("git log -1 --format=%B c_hash2", c_hash2_commit_log,
-          cwd=chrome_dir),
-      Cmd("git show c_hash3:DEPS", c_deps % "deadbeef", cwd=chrome_dir),
-      Cmd("git log -1 --format=%B c_hash3", c_hash3_commit_log,
-          cwd=chrome_dir),
-      Cmd("git branch -r", " weird/123\n  branch-heads/7\n", cwd=chrome_dir),
-      Cmd("git show refs/branch-heads/7:DEPS", c_deps % "hash_345",
-          cwd=chrome_dir),
-      URL("http://omahaproxy.appspot.com/all.json", """[{
-        "os": "win",
-        "versions": [{
-          "version": "2.2.2.2",
-          "v8_version": "22.2.2.2",
-          "current_reldate": "04/09/15",
-          "os": "win",
-          "channel": "canary",
-          "previous_version": "1.1.1.0"
-          }]
-        }]"""),
-      URL("http://omahaproxy.appspot.com/v8.json?version=1.1.1.0", """{
-        "chromium_version": "1.1.1.0",
-        "v8_version": "11.1.1.0"
-        }"""),
-      Cmd("git rev-list -1 11.1.1", "v8_previous_version_hash"),
-      Cmd("git rev-list -1 22.2.2.2", "v8_version_hash"),
-      Cmd("git checkout -f origin/master", ""),
-      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "")
-    ])
-
-    args = ["-c", TEST_CONFIG["CHROMIUM"],
-            "--json", json_output,
-            "--csv", csv_output,
-            "--max-releases", "1"]
-    Releases(TEST_CONFIG, self).Run(args)
-
-    # Check expected output.
-    csv = ("4.2.71,4.2.71,1,5678,\r\n"
-           "3.22.3,candidates,345,4567:5677,\r\n"
-           "3.21.2,3.21,123,,\r\n"
-           "3.3.1.1,3.3,234,,abc12\r\n")
-    self.assertEquals(csv, FileToText(csv_output))
-
-    expected_json = {"chrome_releases":{
-                                        "canaries": [
-                                                     {
-                           "chrome_version": "2.2.2.2",
-                           "os": "win",
-                           "release_date": "04/09/15",
-                           "v8_version": "22.2.2.2",
-                           "v8_version_hash": "v8_version_hash",
-                           "v8_previous_version": "11.1.1.0",
-                           "v8_previous_version_hash": "v8_previous_version_hash"
-                           }]},
-                     "releases":[
-      {
-        "revision": "1",
-        "revision_git": "hash_456",
-        "master_position": "456",
-        "master_hash": "master_456",
-        "patches_merged": "",
-        "version": "4.2.71",
-        "chromium_revision": "5678",
-        "branch": "4.2.71",
-        "review_link": "",
-        "date": "02:15",
-        "chromium_branch": "",
-        # FIXME(machenbach): Fix revisions link for git.
-        "revision_link": "https://code.google.com/p/v8/source/detail?r=1",
-      },
-      {
-        "revision": "345",
-        "revision_git": "hash_345",
-        "master_position": "",
-        "master_hash": "",
-        "patches_merged": "",
-        "version": "3.22.3",
-        "chromium_revision": "4567:5677",
-        "branch": "candidates",
-        "review_link": "",
-        "date": "",
-        "chromium_branch": "7",
-        "revision_link": "https://code.google.com/p/v8/source/detail?r=345",
-      },
-      {
-        "revision": "123",
-        "revision_git": "hash_123",
-        "patches_merged": "",
-        "master_position": "",
-        "master_hash": "",
-        "version": "3.21.2",
-        "chromium_revision": "",
-        "branch": "3.21",
-        "review_link": "",
-        "date": "03:15",
-        "chromium_branch": "",
-        "revision_link": "https://code.google.com/p/v8/source/detail?r=123",
-      },
-      {
-        "revision": "234",
-        "revision_git": "hash_234",
-        "patches_merged": "abc12",
-        "master_position": "",
-        "master_hash": "",
-        "version": "3.3.1.1",
-        "chromium_revision": "",
-        "branch": "3.3",
-        "review_link": "fake.com",
-        "date": "18:15",
-        "chromium_branch": "",
-        "revision_link": "https://code.google.com/p/v8/source/detail?r=234",
-      },],
-    }
-    self.assertEquals(expected_json, json.loads(FileToText(json_output)))
-
 if __name__ == '__main__':
   unittest.main()
diff --git a/src/v8/tools/node/testdata/v8/third_party/jinja2/jinja2 b/src/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/baz/gtest_new
similarity index 100%
copy from src/v8/tools/node/testdata/v8/third_party/jinja2/jinja2
copy to src/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/baz/gtest_new
diff --git a/src/v8/tools/node/testdata/v8/third_party/jinja2/jinja2 b/src/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_new
similarity index 100%
rename from src/v8/tools/node/testdata/v8/third_party/jinja2/jinja2
rename to src/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_new
diff --git a/src/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h b/src/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
new file mode 100644
index 0000000..847c8bc
--- /dev/null
+++ b/src/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
@@ -0,0 +1 @@
+gtest_prod
diff --git a/src/v8/tools/node/testdata/v8/third_party/jinja2/jinja2 b/src/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/new/gtest_new
similarity index 100%
copy from src/v8/tools/node/testdata/v8/third_party/jinja2/jinja2
copy to src/v8/tools/release/testdata/v8/third_party/googletest/src/googletest/include/gtest/new/gtest_new
diff --git a/src/v8/tools/run-clang-tidy.py b/src/v8/tools/run-clang-tidy.py
new file mode 100755
index 0000000..aee1b40
--- /dev/null
+++ b/src/v8/tools/run-clang-tidy.py
@@ -0,0 +1,424 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import json
+import multiprocessing
+import optparse
+import os
+import re
+import subprocess
+import sys
+
+CLANG_TIDY_WARNING = re.compile(r'(\/.*?)\ .*\[(.*)\]$')
+CLANG_TIDY_CMDLINE_OUT = re.compile(r'^clang-tidy.*\ .*|^\./\.\*')
+FILE_REGEXS = ['../src/*', '../test/*']
+HEADER_REGEX = ['\.\.\/src\/.*|\.\.\/include\/.*|\.\.\/test\/.*']
+
+THREADS = multiprocessing.cpu_count()
+
+
+class ClangTidyWarning(object):
+  """
+  Wraps up a clang-tidy warning to present aggregated information.
+  """
+
+  def __init__(self, warning_type):
+    self.warning_type = warning_type
+    self.occurrences = set()
+
+  def add_occurrence(self, file_path):
+    self.occurrences.add(file_path.lstrip())
+
+  def __hash__(self):
+    return hash(self.warning_type)
+
+  def to_string(self, file_loc):
+    s = '[%s] #%d\n' % (self.warning_type, len(self.occurrences))
+    if file_loc:
+      s += ' ' + '\n  '.join(self.occurrences)
+      s += '\n'
+    return s
+
+  def __str__(self):
+    return self.to_string(False)
+
+  def __lt__(self, other):
+    return len(self.occurrences) < len(other.occurrences)
+
+
+def GenerateCompileCommands(build_folder):
+  """
+  Generate a compilation database.
+
+  Currently clang-tidy-4 does not understand all flags that are passed
+  by the build system, therefore, we remove them from the generated file.
+  """
+  ninja_ps = subprocess.Popen(
+    ['ninja', '-t', 'compdb', 'cxx', 'cc'],
+    stdout=subprocess.PIPE,
+    cwd=build_folder)
+
+  out_filepath = os.path.join(build_folder, 'compile_commands.json')
+  with open(out_filepath, 'w') as cc_file:
+    while True:
+        line = ninja_ps.stdout.readline()
+
+        if line == '':
+            break
+
+        line = line.replace('-fcomplete-member-pointers', '')
+        line = line.replace('-Wno-enum-compare-switch', '')
+        line = line.replace('-Wno-ignored-pragma-optimize', '')
+        line = line.replace('-Wno-null-pointer-arithmetic', '')
+        line = line.replace('-Wno-unused-lambda-capture', '')
+        line = line.replace('-Wno-defaulted-function-deleted', '')
+        cc_file.write(line)
+
+
+def skip_line(line):
+  """
+  Check if a clang-tidy output line should be skipped.
+  """
+  return bool(CLANG_TIDY_CMDLINE_OUT.search(line))
+
+
+def ClangTidyRunFull(build_folder, skip_output_filter, checks, auto_fix):
+  """
+  Run clang-tidy on the full codebase and print warnings.
+  """
+  extra_args = []
+  if auto_fix:
+    extra_args.append('-fix')
+
+  if checks is not None:
+    extra_args.append('-checks')
+    extra_args.append('-*, ' + checks)
+
+  with open(os.devnull, 'w') as DEVNULL:
+    ct_process = subprocess.Popen(
+      ['run-clang-tidy', '-j' + str(THREADS), '-p', '.']
+       + ['-header-filter'] + HEADER_REGEX + extra_args
+       + FILE_REGEXS,
+      cwd=build_folder,
+      stdout=subprocess.PIPE,
+      stderr=DEVNULL)
+  removing_check_header = False
+  empty_lines = 0
+
+  while True:
+    line = ct_process.stdout.readline()
+    if line == '':
+      break
+
+    # Skip all lines after Enbale checks and before two newlines,
+    # i.e., skip clang-tidy check list.
+    if line.startswith('Enabled checks'):
+      removing_check_header = True
+    if removing_check_header and not skip_output_filter:
+      if line == '\n':
+        empty_lines += 1
+      if empty_lines == 2:
+        removing_check_header = False
+      continue
+
+    # Different lines get removed to ease output reading.
+    if not skip_output_filter and skip_line(line):
+      continue
+
+    # Print line, because no filter was matched.
+    if line != '\n':
+        sys.stdout.write(line)
+
+
+def ClangTidyRunAggregate(build_folder, print_files):
+  """
+  Run clang-tidy on the full codebase and aggregate warnings into categories.
+  """
+  with open(os.devnull, 'w') as DEVNULL:
+    ct_process = subprocess.Popen(
+      ['run-clang-tidy', '-j' + str(THREADS), '-p', '.'] +
+        ['-header-filter'] + HEADER_REGEX +
+        FILE_REGEXS,
+      cwd=build_folder,
+      stdout=subprocess.PIPE,
+      stderr=DEVNULL)
+  warnings = dict()
+  while True:
+    line = ct_process.stdout.readline()
+    if line == '':
+      break
+
+    res = CLANG_TIDY_WARNING.search(line)
+    if res is not None:
+      warnings.setdefault(
+          res.group(2),
+          ClangTidyWarning(res.group(2))).add_occurrence(res.group(1))
+
+  for warning in sorted(warnings.values(), reverse=True):
+    sys.stdout.write(warning.to_string(print_files))
+
+
+def ClangTidyRunDiff(build_folder, diff_branch, auto_fix):
+  """
+  Run clang-tidy on the diff between current and the diff_branch.
+  """
+  if diff_branch is None:
+    diff_branch = subprocess.check_output(['git', 'merge-base',
+                                           'HEAD', 'origin/master']).strip()
+
+  git_ps = subprocess.Popen(
+    ['git', 'diff', '-U0', diff_branch], stdout=subprocess.PIPE)
+
+  extra_args = []
+  if auto_fix:
+    extra_args.append('-fix')
+
+  with open(os.devnull, 'w') as DEVNULL:
+    """
+    The script `clang-tidy-diff` does not provide support to add header-
+    filters. To still analyze headers we use the build path option `-path` to
+    inject our header-filter option. This works because the script just adds
+    the passed path string to the commandline of clang-tidy.
+    """
+    modified_build_folder = build_folder
+    modified_build_folder += ' -header-filter='
+    modified_build_folder += '\'' + ''.join(HEADER_REGEX) + '\''
+
+    ct_ps = subprocess.Popen(
+      ['clang-tidy-diff.py', '-path', modified_build_folder, '-p1'] +
+        extra_args,
+      stdin=git_ps.stdout,
+      stdout=subprocess.PIPE,
+      stderr=DEVNULL)
+  git_ps.wait()
+  while True:
+    line = ct_ps.stdout.readline()
+    if line == '':
+      break
+
+    if skip_line(line):
+      continue
+
+    sys.stdout.write(line)
+
+
+def rm_prefix(string, prefix):
+  """
+  Removes prefix from a string until the new string
+  no longer starts with the prefix.
+  """
+  while string.startswith(prefix):
+    string = string[len(prefix):]
+  return string
+
+
+def ClangTidyRunSingleFile(build_folder, filename_to_check, auto_fix,
+                           line_ranges=[]):
+  """
+  Run clang-tidy on a single file.
+  """
+  files_with_relative_path = []
+
+  compdb_filepath = os.path.join(build_folder, 'compile_commands.json')
+  with open(compdb_filepath) as raw_json_file:
+    compdb = json.load(raw_json_file)
+
+  for db_entry in compdb:
+    if db_entry['file'].endswith(filename_to_check):
+      files_with_relative_path.append(db_entry['file'])
+
+  with open(os.devnull, 'w') as DEVNULL:
+    for file_with_relative_path in files_with_relative_path:
+      line_filter = None
+      if len(line_ranges) != 0:
+        line_filter = '['
+        line_filter += '{ \"lines\":[' + ', '.join(line_ranges)
+        line_filter += '], \"name\":\"'
+        line_filter += rm_prefix(file_with_relative_path,
+                                 '../') + '\"}'
+        line_filter += ']'
+
+      extra_args = ['-line-filter=' + line_filter] if line_filter else []
+
+      if auto_fix:
+        extra_args.append('-fix')
+
+      subprocess.call(['clang-tidy', '-p', '.'] +
+                      extra_args +
+                      [file_with_relative_path],
+                      cwd=build_folder,
+                      stderr=DEVNULL)
+
+
+def CheckClangTidy():
+  """
+  Checks if a clang-tidy binary exists.
+  """
+  with open(os.devnull, 'w') as DEVNULL:
+    return subprocess.call(['which', 'clang-tidy'], stdout=DEVNULL) == 0
+
+
+def CheckCompDB(build_folder):
+  """
+  Checks if a compilation database exists in the build_folder.
+  """
+  return os.path.isfile(os.path.join(build_folder, 'compile_commands.json'))
+
+
+def DetectBuildFolder():
+    """
+    Tries to auto detect the last used build folder in out/
+    """
+    outdirs_folder = 'out/'
+    last_used = None
+    last_timestamp = -1
+    for outdir in [outdirs_folder + folder_name
+                   for folder_name in os.listdir(outdirs_folder)
+                   if os.path.isdir(outdirs_folder + folder_name)]:
+        outdir_modified_timestamp = os.path.getmtime(outdir)
+        if  outdir_modified_timestamp > last_timestamp:
+            last_timestamp = outdir_modified_timestamp
+            last_used = outdir
+
+    return last_used
+
+
+def GetOptions():
+  """
+  Generate the option parser for this script.
+  """
+  result = optparse.OptionParser()
+  result.add_option(
+    '-b',
+    '--build-folder',
+    help='Set V8 build folder',
+    dest='build_folder',
+    default=None)
+  result.add_option(
+    '-j',
+    help='Set the amount of threads that should be used',
+    dest='threads',
+    default=None)
+  result.add_option(
+    '--gen-compdb',
+    help='Generate a compilation database for clang-tidy',
+    default=False,
+    action='store_true')
+  result.add_option(
+    '--no-output-filter',
+    help='Done use any output filterning',
+    default=False,
+    action='store_true')
+  result.add_option(
+    '--fix',
+    help='Fix auto fixable issues',
+    default=False,
+    dest='auto_fix',
+    action='store_true'
+  )
+
+  # Full clang-tidy.
+  full_run_g = optparse.OptionGroup(result, 'Clang-tidy full', '')
+  full_run_g.add_option(
+    '--full',
+    help='Run clang-tidy on the whole codebase',
+    default=False,
+    action='store_true')
+  full_run_g.add_option('--checks',
+                        help='Clang-tidy checks to use.',
+                        default=None)
+  result.add_option_group(full_run_g)
+
+  # Aggregate clang-tidy.
+  agg_run_g = optparse.OptionGroup(result, 'Clang-tidy aggregate', '')
+  agg_run_g.add_option('--aggregate', help='Run clang-tidy on the whole '\
+             'codebase and aggregate the warnings',
+             default=False, action='store_true')
+  agg_run_g.add_option('--show-loc', help='Show file locations when running '\
+             'in aggregate mode', default=False,
+             action='store_true')
+  result.add_option_group(agg_run_g)
+
+  # Diff clang-tidy.
+  diff_run_g = optparse.OptionGroup(result, 'Clang-tidy diff', '')
+  diff_run_g.add_option('--branch', help='Run clang-tidy on the diff '\
+             'between HEAD and the merge-base between HEAD '\
+             'and DIFF_BRANCH (origin/master by default).',
+             default=None, dest='diff_branch')
+  result.add_option_group(diff_run_g)
+
+  # Single clang-tidy.
+  single_run_g = optparse.OptionGroup(result, 'Clang-tidy single', '')
+  single_run_g.add_option(
+    '--single', help='', default=False, action='store_true')
+  single_run_g.add_option(
+    '--file', help='File name to check', default=None, dest='file_name')
+  single_run_g.add_option('--lines', help='Limit checks to a line range. '\
+              'For example: --lines="[2,4], [5,6]"',
+              default=[], dest='line_ranges')
+
+  result.add_option_group(single_run_g)
+  return result
+
+
+def main():
+  parser = GetOptions()
+  (options, _) = parser.parse_args()
+
+  if options.threads is not None:
+    global THREADS
+    THREADS = options.threads
+
+  if options.build_folder is None:
+    options.build_folder = DetectBuildFolder()
+
+  if not CheckClangTidy():
+    print('Could not find clang-tidy')
+  elif options.build_folder is None or not os.path.isdir(options.build_folder):
+    print('Please provide a build folder with -b')
+  elif options.gen_compdb:
+    GenerateCompileCommands(options.build_folder)
+  elif not CheckCompDB(options.build_folder):
+    print('Could not find compilation database, ' \
+      'please generate it with --gen-compdb')
+  else:
+    print('Using build folder:', options.build_folder)
+    if options.full:
+      print('Running clang-tidy - full')
+      ClangTidyRunFull(options.build_folder,
+                       options.no_output_filter,
+                       options.checks,
+                       options.auto_fix)
+    elif options.aggregate:
+      print('Running clang-tidy - aggregating warnings')
+      if options.auto_fix:
+        print('Auto fix not working in aggregate mode, running without.')
+      ClangTidyRunAggregate(options.build_folder, options.show_loc)
+    elif options.single:
+      print('Running clang-tidy - single on ' + options.file_name)
+      if options.file_name is not None:
+        line_ranges = []
+        for match in re.findall(r'(\[.*?\])', options.line_ranges):
+          if match is not []:
+            line_ranges.append(match)
+        ClangTidyRunSingleFile(options.build_folder,
+                               options.file_name,
+                               options.auto_fix,
+                               line_ranges)
+      else:
+        print('Filename provided, please specify a filename with --file')
+    else:
+      print('Running clang-tidy')
+      ClangTidyRunDiff(options.build_folder,
+                       options.diff_branch,
+                       options.auto_fix)
+
+
+if __name__ == '__main__':
+  main()
diff --git a/src/v8/tools/run-num-fuzzer.py b/src/v8/tools/run-num-fuzzer.py
new file mode 100755
index 0000000..9b5a065
--- /dev/null
+++ b/src/v8/tools/run-num-fuzzer.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+#
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import sys
+
+from testrunner import num_fuzzer
+
+
+if __name__ == "__main__":
+  sys.exit(num_fuzzer.NumFuzzer().execute())
diff --git a/src/v8/tools/run-perf.sh b/src/v8/tools/run-perf.sh
index 8375093..0317a9a 100755
--- a/src/v8/tools/run-perf.sh
+++ b/src/v8/tools/run-perf.sh
@@ -55,4 +55,4 @@
   -e $EVENT_TYPE \
   -c $SAMPLE_EVERY_N_CYCLES \
   --call-graph $CALL_GRAPH_METHOD \
-  -i "$COMMAND" --perf_basic_prof "$@"
+  -i "$COMMAND" --perf-basic-prof "$@"
diff --git a/src/v8/tools/run-wasm-api-tests.py b/src/v8/tools/run-wasm-api-tests.py
new file mode 100755
index 0000000..79f53cb
--- /dev/null
+++ b/src/v8/tools/run-wasm-api-tests.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+#
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""\
+Helper script for compiling and running the Wasm C/C++ API examples.
+
+Usage: tools/run-wasm-api-tests.py outdir tempdir [filters...]
+
+"outdir" is the build output directory containing libwee8, e.g. out/x64.release
+"tempdir" is a temporary dir where this script may put its artifacts. It is
+the caller's responsibility to clean it up afterwards.
+
+By default, this script builds and runs all examples, both the respective
+C and C++ versions, both with GCC ("gcc" and "g++" binaries found in $PATH)
+and V8's bundled Clang in third_party/llvm-build/. You can use any number
+of "filters" arguments to run only a subset:
+ - "c": run C versions of examples
+ - "cc": run C++ versions of examples
+ - "gcc": compile with GCC
+ - "clang": compile with Clang
+ - "hello" etc.: run "hello" example
+"""
+
+from __future__ import print_function
+
+import os
+import shutil
+import subprocess
+import sys
+
+CFLAGS = "-DDEBUG -Wall -Werror -O0 -ggdb -fsanitize=address"
+
+CHECKOUT_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+WASM_PATH = os.path.join(CHECKOUT_PATH, "third_party", "wasm-api")
+CLANG_PATH = os.path.join(CHECKOUT_PATH, "third_party", "llvm-build",
+                          "Release+Asserts", "bin")
+
+EXAMPLES = ["hello", "callback", "trap", "reflect", "global", "table",
+            "memory", "finalize", "serialize", "threads"]
+
+CLANG = {
+  "name": "Clang",
+  "c": os.path.join(CLANG_PATH, "clang"),
+  "cc": os.path.join(CLANG_PATH, "clang++"),
+  "ldflags": "-fsanitize-memory-track-origins -fsanitize-memory-use-after-dtor",
+}
+GCC = {
+  "name": "GCC",
+  "c": "gcc",
+  "cc": "g++",
+  "ldflags": "",
+}
+
+C = {
+  "name": "C",
+  "suffix": "c",
+  "cflags": "",
+}
+CXX = {
+  "name": "C++",
+  "suffix": "cc",
+  "cflags": "-std=c++11",
+}
+
+MIN_ARGS = 3  # Script, outdir, tempdir
+
+def _Call(cmd_list, silent=False):
+  cmd = " ".join(cmd_list)
+  if not silent: print("# %s" % cmd)
+  return subprocess.call(cmd, shell=True)
+
+class Runner(object):
+  def __init__(self, name, outdir, tempdir):
+    self.name = name
+    self.outdir = outdir
+    self.tempdir = tempdir
+    self.src_file_basename = os.path.join(WASM_PATH, "example", name)
+    self.dst_file_basename = os.path.join(tempdir, name)
+    self.lib_file = os.path.join(outdir, "obj", "libwee8.a")
+    if not os.path.exists(self.lib_file):
+      print("libwee8 library not found, make sure to pass the outdir as "
+            "first argument; see --help")
+      sys.exit(1)
+    src_wasm_file = self.src_file_basename + ".wasm"
+    dst_wasm_file = self.dst_file_basename + ".wasm"
+    shutil.copyfile(src_wasm_file, dst_wasm_file)
+
+  def _Error(self, step, lang, compiler, code):
+    print("Error: %s failed. To repro: tools/run-wasm-api-tests.py "
+          "%s %s %s %s %s" %
+          (step, self.outdir, self.tempdir, self.name, lang,
+           compiler["name"].lower()))
+    return code
+
+  def CompileAndRun(self, compiler, language):
+    print("==== %s %s/%s ====" %
+          (self.name, language["name"], compiler["name"]))
+    lang = language["suffix"]
+    src_file = self.src_file_basename + "." + lang
+    exe_file = self.dst_file_basename + "-" + lang
+    obj_file = exe_file  + ".o"
+    # Compile.
+    c = _Call([compiler[lang], "-c", language["cflags"], CFLAGS,
+               "-I", WASM_PATH, "-o", obj_file, src_file])
+    if c: return self._Error("compilation", lang, compiler, c)
+    # Link.
+    c = _Call([compiler["cc"], CFLAGS, compiler["ldflags"], obj_file,
+               "-o", exe_file, self.lib_file, "-ldl -pthread"])
+    if c: return self._Error("linking", lang, compiler, c)
+    # Execute.
+    exe_file = "./%s-%s" % (self.name, lang)
+    c = _Call(["cd", self.tempdir, ";", exe_file])
+    if c: return self._Error("execution", lang, compiler, c)
+    return 0
+
+def Main(args):
+  if (len(args) < MIN_ARGS or args[1] in ("-h", "--help", "help")):
+    print(__doc__)
+    return 1
+
+  outdir = sys.argv[1]
+  tempdir = sys.argv[2]
+  result = 0
+  examples = EXAMPLES
+  compilers = (GCC, CLANG)
+  languages = (C, CXX)
+  if len(args) > MIN_ARGS:
+    custom_compilers = []
+    custom_languages = []
+    custom_examples = []
+    for i in range(MIN_ARGS, len(args)):
+      arg = args[i]
+      if arg == "c" and C not in custom_languages:
+        custom_languages.append(C)
+      elif arg in ("cc", "cpp", "cxx", "c++") and CXX not in custom_languages:
+        custom_languages.append(CXX)
+      elif arg in ("gcc", "g++") and GCC not in custom_compilers:
+        custom_compilers.append(GCC)
+      elif arg in ("clang", "clang++") and CLANG not in custom_compilers:
+        custom_compilers.append(CLANG)
+      elif arg in EXAMPLES and arg not in custom_examples:
+        custom_examples.append(arg)
+      else:
+        print("Didn't understand '%s'" % arg)
+        return 1
+    if custom_compilers:
+      compilers = custom_compilers
+    if custom_languages:
+      languages = custom_languages
+    if custom_examples:
+      examples = custom_examples
+  for example in examples:
+    runner = Runner(example, outdir, tempdir)
+    for compiler in compilers:
+      for language in languages:
+        c = runner.CompileAndRun(compiler, language)
+        if c: result = c
+  if result:
+    print("\nFinished with errors.")
+  else:
+    print("\nFinished successfully.")
+  return result
+
+if __name__ == "__main__":
+  sys.exit(Main(sys.argv))
diff --git a/src/v8/tools/run_perf.py b/src/v8/tools/run_perf.py
old mode 100755
new mode 100644
index 3823eb5..a98dcae
--- a/src/v8/tools/run_perf.py
+++ b/src/v8/tools/run_perf.py
@@ -1,4 +1,3 @@
-#!/usr/bin/env python
 # Copyright 2014 the V8 project authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
@@ -11,6 +10,7 @@
 The suite json format is expected to be:
 {
   "path": <relative path chunks to perf resources and main file>,
+  "owners": [<list of email addresses of benchmark owners (required)>],
   "name": <optional suite name, file name is default>,
   "archs": [<architecture name for which this suite is run>, ...],
   "binary": <name of binary to run, default "d8">,
@@ -18,6 +18,10 @@
   "test_flags": [<flag to the test file>, ...],
   "run_count": <how often will this suite run (optional)>,
   "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
+  "timeout": <how long test is allowed to run>,
+  "timeout_XXX": <how long test is allowed run run for arch XXX>,
+  "retry_count": <how many times to retry failures (in addition to first try)",
+  "retry_count_XXX": <how many times to retry failures for arch XXX>
   "resources": [<js file to be moved to android device>, ...]
   "main": <main js perf runner file>,
   "results_regexp": <optional regexp>,
@@ -55,6 +59,7 @@
 Full example (suite with one runner):
 {
   "path": ["."],
+  "owners": ["username@chromium.org"],
   "flags": ["--expose-gc"],
   "test_flags": ["5"],
   "archs": ["ia32", "x64"],
@@ -74,6 +79,7 @@
 Full example (suite with several runners):
 {
   "path": ["."],
+  "owners": ["username@chromium.org", "otherowner@google.com"],
   "flags": ["--expose-gc"],
   "archs": ["ia32", "x64"],
   "run_count": 5,
@@ -96,50 +102,48 @@
 The test flags are passed to the js test file after '--'.
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
+
 from collections import OrderedDict
+import copy
 import json
 import logging
 import math
-import optparse
+import argparse
 import os
 import re
 import subprocess
 import sys
+import time
+import traceback
 
+import numpy
+
+from testrunner.local import android
 from testrunner.local import command
 from testrunner.local import utils
+from testrunner.objects.output import Output, NULL_OUTPUT
 
-ARCH_GUESS = utils.DefaultArch()
-SUPPORTED_ARCHS = ["arm",
-                   "ia32",
-                   "mips",
-                   "mipsel",
-                   "x64",
-                   "arm64"]
+try:
+  basestring       # Python 2
+except NameError:  # Python 3
+  basestring = str
 
-GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
-RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
-RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
+SUPPORTED_ARCHS = ['arm',
+                   'ia32',
+                   'mips',
+                   'mipsel',
+                   'x64',
+                   'arm64']
+
+GENERIC_RESULTS_RE = re.compile(r'^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$')
+RESULT_STDDEV_RE = re.compile(r'^\{([^\}]+)\}$')
+RESULT_LIST_RE = re.compile(r'^\[([^\]]+)\]$')
 TOOLS_BASE = os.path.abspath(os.path.dirname(__file__))
-
-
-def LoadAndroidBuildTools(path):  # pragma: no cover
-  assert os.path.exists(path)
-  sys.path.insert(0, path)
-
-  import devil_chromium
-  from devil.android import device_errors  # pylint: disable=import-error
-  from devil.android import device_utils  # pylint: disable=import-error
-  from devil.android.sdk import adb_wrapper  # pylint: disable=import-error
-  from devil.android.perf import cache_control  # pylint: disable=import-error
-  from devil.android.perf import perf_control  # pylint: disable=import-error
-  global adb_wrapper
-  global cache_control
-  global device_errors
-  global device_utils
-  global perf_control
-
-  devil_chromium.Initialize()
+INFRA_FAILURE_RETCODE = 87
+MIN_RUNS_FOR_CONFIDENCE = 10
 
 
 def GeometricMean(values):
@@ -148,102 +152,131 @@
   The mean is calculated using log to avoid overflow.
   """
   values = map(float, values)
-  return str(math.exp(sum(map(math.log, values)) / len(values)))
+  return math.exp(sum(map(math.log, values)) / len(values))
 
 
-class Results(object):
-  """Place holder for result traces."""
-  def __init__(self, traces=None, errors=None):
-    self.traces = traces or []
-    self.errors = errors or []
+class ResultTracker(object):
+  """Class that tracks trace/runnable results and produces script output.
+
+  The output is structured like this:
+  {
+    "traces": [
+      {
+        "graphs": ["path", "to", "trace", "config"],
+        "units": <string describing units, e.g. "ms" or "KB">,
+        "results": [<list of values measured over several runs>],
+        "stddev": <stddev of the value if measure by script or ''>
+      },
+      ...
+    ],
+    "runnables": [
+      {
+        "graphs": ["path", "to", "runnable", "config"],
+        "durations": [<list of durations of each runnable run in seconds>],
+        "timeout": <timeout configured for runnable in seconds>,
+      },
+      ...
+    ],
+    "errors": [<list of strings describing errors>],
+  }
+  """
+  def __init__(self):
+    self.traces = {}
+    self.errors = []
+    self.runnables = {}
+
+  def AddTraceResult(self, trace, result, stddev):
+    if trace.name not in self.traces:
+      self.traces[trace.name] = {
+        'graphs': trace.graphs,
+        'units': trace.units,
+        'results': [result],
+        'stddev': stddev or '',
+      }
+    else:
+      existing_entry = self.traces[trace.name]
+      assert trace.graphs == existing_entry['graphs']
+      assert trace.units == existing_entry['units']
+      if stddev:
+        existing_entry['stddev'] = stddev
+      existing_entry['results'].append(result)
+
+  def TraceHasStdDev(self, trace):
+    return trace.name in self.traces and self.traces[trace.name]['stddev'] != ''
+
+  def AddError(self, error):
+    self.errors.append(error)
+
+  def AddRunnableDuration(self, runnable, duration):
+    """Records a duration of a specific run of the runnable."""
+    if runnable.name not in self.runnables:
+      self.runnables[runnable.name] = {
+        'graphs': runnable.graphs,
+        'durations': [duration],
+        'timeout': runnable.timeout,
+      }
+    else:
+      existing_entry = self.runnables[runnable.name]
+      assert runnable.timeout == existing_entry['timeout']
+      assert runnable.graphs == existing_entry['graphs']
+      existing_entry['durations'].append(duration)
 
   def ToDict(self):
-    return {"traces": self.traces, "errors": self.errors}
+    return {
+        'traces': self.traces.values(),
+        'errors': self.errors,
+        'runnables': self.runnables.values(),
+    }
 
   def WriteToFile(self, file_name):
-    with open(file_name, "w") as f:
+    with open(file_name, 'w') as f:
       f.write(json.dumps(self.ToDict()))
 
-  def __add__(self, other):
-    self.traces += other.traces
-    self.errors += other.errors
-    return self
+  def HasEnoughRuns(self, graph_config, confidence_level):
+    """Checks if the mean of the results for a given trace config is within
+    0.1% of the true value with the specified confidence level.
+
+    This assumes Gaussian distribution of the noise and based on
+    https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule.
+
+    Args:
+      graph_config: An instance of GraphConfig.
+      confidence_level: Number of standard deviations from the mean that all
+          values must lie within. Typical values are 1, 2 and 3 and correspond
+          to 68%, 95% and 99.7% probability that the measured value is within
+          0.1% of the true value.
+
+    Returns:
+      True if specified confidence level have been achieved.
+    """
+    if not isinstance(graph_config, TraceConfig):
+      return all(self.HasEnoughRuns(child, confidence_level)
+                 for child in graph_config.children)
+
+    trace = self.traces.get(graph_config.name, {})
+    results = trace.get('results', [])
+    logging.debug('HasEnoughRuns for %s', graph_config.name)
+
+    if len(results) < MIN_RUNS_FOR_CONFIDENCE:
+      logging.debug('  Ran %d times, need at least %d',
+                    len(results), MIN_RUNS_FOR_CONFIDENCE)
+      return False
+
+    logging.debug('  Results: %d entries', len(results))
+    mean = numpy.mean(results)
+    mean_stderr = numpy.std(results) / numpy.sqrt(len(results))
+    logging.debug('  Mean: %.2f, mean_stderr: %.2f', mean, mean_stderr)
+    logging.info('>>> Confidence level is %.2f', mean / (1000.0 * mean_stderr))
+    return confidence_level * mean_stderr < mean / 1000.0
 
   def __str__(self):  # pragma: no cover
-    return str(self.ToDict())
+    return json.dumps(self.ToDict(), indent=2, separators=(',', ': '))
 
 
-class Measurement(object):
-  """Represents a series of results of one trace.
-
-  The results are from repetitive runs of the same executable. They are
-  gathered by repeated calls to ConsumeOutput.
-  """
-  def __init__(self, graphs, units, results_regexp, stddev_regexp):
-    self.name = '/'.join(graphs)
-    self.graphs = graphs
-    self.units = units
-    self.results_regexp = results_regexp
-    self.stddev_regexp = stddev_regexp
-    self.results = []
-    self.errors = []
-    self.stddev = ""
-    self.process_size = False
-
-  def ConsumeOutput(self, stdout):
-    try:
-      result = re.search(self.results_regexp, stdout, re.M).group(1)
-      self.results.append(str(float(result)))
-    except ValueError:
-      self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
-                         % (self.results_regexp, self.name))
-    except:
-      self.errors.append("Regexp \"%s\" didn't match for test %s."
-                         % (self.results_regexp, self.name))
-
-    try:
-      if self.stddev_regexp and self.stddev:
-        self.errors.append("Test %s should only run once since a stddev "
-                           "is provided by the test." % self.name)
-      if self.stddev_regexp:
-        self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
-    except:
-      self.errors.append("Regexp \"%s\" didn't match for test %s."
-                         % (self.stddev_regexp, self.name))
-
-  def GetResults(self):
-    return Results([{
-      "graphs": self.graphs,
-      "units": self.units,
-      "results": self.results,
-      "stddev": self.stddev,
-    }], self.errors)
-
-
-class NullMeasurement(object):
-  """Null object to avoid having extra logic for configurations that don't
-  require secondary run, e.g. CI bots.
-  """
-  def ConsumeOutput(self, stdout):
-    pass
-
-  def GetResults(self):
-    return Results()
-
-
-def Unzip(iterable):
-  left = []
-  right = []
-  for l, r in iterable:
-    left.append(l)
-    right.append(r)
-  return lambda: iter(left), lambda: iter(right)
-
-
-def RunResultsProcessor(results_processor, stdout, count):
+def RunResultsProcessor(results_processor, output, count):
   # Dummy pass through for null-runs.
-  if stdout is None:
-    return None
+  if output.stdout is None:
+    return output
 
   # We assume the results processor is relative to the suite.
   assert os.path.exists(results_processor)
@@ -253,113 +286,10 @@
       stdout=subprocess.PIPE,
       stderr=subprocess.PIPE,
   )
-  result, _ = p.communicate(input=stdout)
-  print ">>> Processed stdout (#%d):" % count
-  print result
-  return result
-
-
-def AccumulateResults(
-    graph_names, trace_configs, iter_output, perform_measurement, calc_total):
-  """Iterates over the output of multiple benchmark reruns and accumulates
-  results for a configured list of traces.
-
-  Args:
-    graph_names: List of names that configure the base path of the traces. E.g.
-                 ['v8', 'Octane'].
-    trace_configs: List of "TraceConfig" instances. Each trace config defines
-                   how to perform a measurement.
-    iter_output: Iterator over the standard output of each test run.
-    perform_measurement: Whether to actually run tests and perform measurements.
-                         This is needed so that we reuse this script for both CI
-                         and trybot, but want to ignore second run on CI without
-                         having to spread this logic throughout the script.
-    calc_total: Boolean flag to speficy the calculation of a summary trace.
-  Returns: A "Results" object.
-  """
-  measurements = [
-    trace.CreateMeasurement(perform_measurement) for trace in trace_configs]
-  for stdout in iter_output():
-    for measurement in measurements:
-      measurement.ConsumeOutput(stdout)
-
-  res = reduce(lambda r, m: r + m.GetResults(), measurements, Results())
-
-  if not res.traces or not calc_total:
-    return res
-
-  # Assume all traces have the same structure.
-  if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
-    res.errors.append("Not all traces have the same number of results.")
-    return res
-
-  # Calculate the geometric means for all traces. Above we made sure that
-  # there is at least one trace and that the number of results is the same
-  # for each trace.
-  n_results = len(res.traces[0]["results"])
-  total_results = [GeometricMean(t["results"][i] for t in res.traces)
-                   for i in range(0, n_results)]
-  res.traces.append({
-    "graphs": graph_names + ["Total"],
-    "units": res.traces[0]["units"],
-    "results": total_results,
-    "stddev": "",
-  })
-  return res
-
-
-def AccumulateGenericResults(graph_names, suite_units, iter_output):
-  """Iterates over the output of multiple benchmark reruns and accumulates
-  generic results.
-
-  Args:
-    graph_names: List of names that configure the base path of the traces. E.g.
-                 ['v8', 'Octane'].
-    suite_units: Measurement default units as defined by the benchmark suite.
-    iter_output: Iterator over the standard output of each test run.
-  Returns: A "Results" object.
-  """
-  traces = OrderedDict()
-  for stdout in iter_output():
-    if stdout is None:
-      # The None value is used as a null object to simplify logic.
-      continue
-    for line in stdout.strip().splitlines():
-      match = GENERIC_RESULTS_RE.match(line)
-      if match:
-        stddev = ""
-        graph = match.group(1)
-        trace = match.group(2)
-        body = match.group(3)
-        units = match.group(4)
-        match_stddev = RESULT_STDDEV_RE.match(body)
-        match_list = RESULT_LIST_RE.match(body)
-        errors = []
-        if match_stddev:
-          result, stddev = map(str.strip, match_stddev.group(1).split(","))
-          results = [result]
-        elif match_list:
-          results = map(str.strip, match_list.group(1).split(","))
-        else:
-          results = [body.strip()]
-
-        try:
-          results = map(lambda r: str(float(r)), results)
-        except ValueError:
-          results = []
-          errors = ["Found non-numeric in %s" %
-                    "/".join(graph_names + [graph, trace])]
-
-        trace_result = traces.setdefault(trace, Results([{
-          "graphs": graph_names + [graph, trace],
-          "units": (units or suite_units).strip(),
-          "results": [],
-          "stddev": "",
-        }], errors))
-        trace_result.traces[0]["results"].extend(results)
-        trace_result.traces[0]["stddev"] = stddev
-
-  return reduce(lambda r, t: r + t, traces.itervalues(), Results())
+  new_output = copy.copy(output)
+  new_output.stdout, _ = p.communicate(input=output.stdout)
+  logging.info('>>> Processed stdout (#%d):\n%s', count, output.stdout)
+  return new_output
 
 
 class Node(object):
@@ -370,14 +300,19 @@
   def AppendChild(self, child):
     self._children.append(child)
 
+  @property
+  def children(self):
+    return self._children
+
 
 class DefaultSentinel(Node):
   """Fake parent node with all default values."""
-  def __init__(self, binary = "d8"):
+  def __init__(self, binary = 'd8'):
     super(DefaultSentinel, self).__init__()
     self.binary = binary
     self.run_count = 10
     self.timeout = 60
+    self.retry_count = 4
     self.path = []
     self.graphs = []
     self.flags = []
@@ -387,8 +322,9 @@
     self.results_processor = None
     self.results_regexp = None
     self.stddev_regexp = None
-    self.units = "score"
+    self.units = 'score'
     self.total = False
+    self.owners = []
 
 
 class GraphConfig(Node):
@@ -400,32 +336,36 @@
     super(GraphConfig, self).__init__()
     self._suite = suite
 
-    assert isinstance(suite.get("path", []), list)
-    assert isinstance(suite["name"], basestring)
-    assert isinstance(suite.get("flags", []), list)
-    assert isinstance(suite.get("test_flags", []), list)
-    assert isinstance(suite.get("resources", []), list)
+    assert isinstance(suite.get('path', []), list)
+    assert isinstance(suite.get('owners', []), list)
+    assert isinstance(suite['name'], basestring)
+    assert isinstance(suite.get('flags', []), list)
+    assert isinstance(suite.get('test_flags', []), list)
+    assert isinstance(suite.get('resources', []), list)
 
     # Accumulated values.
-    self.path = parent.path[:] + suite.get("path", [])
-    self.graphs = parent.graphs[:] + [suite["name"]]
-    self.flags = parent.flags[:] + suite.get("flags", [])
-    self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
+    self.path = parent.path[:] + suite.get('path', [])
+    self.graphs = parent.graphs[:] + [suite['name']]
+    self.flags = parent.flags[:] + suite.get('flags', [])
+    self.test_flags = parent.test_flags[:] + suite.get('test_flags', [])
+    self.owners = parent.owners[:] + suite.get('owners', [])
 
     # Values independent of parent node.
-    self.resources = suite.get("resources", [])
+    self.resources = suite.get('resources', [])
 
     # Descrete values (with parent defaults).
-    self.binary = suite.get("binary", parent.binary)
-    self.run_count = suite.get("run_count", parent.run_count)
-    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
-    self.timeout = suite.get("timeout", parent.timeout)
-    self.timeout = suite.get("timeout_%s" % arch, self.timeout)
-    self.units = suite.get("units", parent.units)
-    self.total = suite.get("total", parent.total)
+    self.binary = suite.get('binary', parent.binary)
+    self.run_count = suite.get('run_count', parent.run_count)
+    self.run_count = suite.get('run_count_%s' % arch, self.run_count)
+    self.retry_count = suite.get('retry_count', parent.retry_count)
+    self.retry_count = suite.get('retry_count_%s' % arch, self.retry_count)
+    self.timeout = suite.get('timeout', parent.timeout)
+    self.timeout = suite.get('timeout_%s' % arch, self.timeout)
+    self.units = suite.get('units', parent.units)
+    self.total = suite.get('total', parent.total)
     self.results_processor = suite.get(
-        "results_processor", parent.results_processor)
-    self.process_size = suite.get("process_size", parent.process_size)
+        'results_processor', parent.results_processor)
+    self.process_size = suite.get('process_size', parent.process_size)
 
     # A regular expression for results. If the parent graph provides a
     # regexp and the current suite has none, a string place holder for the
@@ -433,17 +373,21 @@
     # TODO(machenbach): Currently that makes only sense for the leaf level.
     # Multiple place holders for multiple levels are not supported.
     if parent.results_regexp:
-      regexp_default = parent.results_regexp % re.escape(suite["name"])
+      regexp_default = parent.results_regexp % re.escape(suite['name'])
     else:
       regexp_default = None
-    self.results_regexp = suite.get("results_regexp", regexp_default)
+    self.results_regexp = suite.get('results_regexp', regexp_default)
 
     # A similar regular expression for the standard deviation (optional).
     if parent.stddev_regexp:
-      stddev_default = parent.stddev_regexp % re.escape(suite["name"])
+      stddev_default = parent.stddev_regexp % re.escape(suite['name'])
     else:
       stddev_default = None
-    self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
+    self.stddev_regexp = suite.get('stddev_regexp', stddev_default)
+
+  @property
+  def name(self):
+    return '/'.join(self.graphs)
 
 
 class TraceConfig(GraphConfig):
@@ -451,34 +395,60 @@
   def __init__(self, suite, parent, arch):
     super(TraceConfig, self).__init__(suite, parent, arch)
     assert self.results_regexp
+    assert self.owners
 
-  def CreateMeasurement(self, perform_measurement):
-    if not perform_measurement:
-      return NullMeasurement()
+  def ConsumeOutput(self, output, result_tracker):
+    """Extracts trace results from the output.
 
-    return Measurement(
-        self.graphs,
-        self.units,
-        self.results_regexp,
-        self.stddev_regexp,
-    )
+    Args:
+      output: Output object from the test run.
+      result_tracker: Result tracker to be updated.
+
+    Returns:
+      The raw extracted result value or None if an error occurred.
+    """
+    result = None
+    stddev = None
+
+    try:
+      result = float(
+        re.search(self.results_regexp, output.stdout, re.M).group(1))
+    except ValueError:
+      result_tracker.AddError(
+          'Regexp "%s" returned a non-numeric for test %s.' %
+          (self.results_regexp, self.name))
+    except:
+      result_tracker.AddError(
+          'Regexp "%s" did not match for test %s.' %
+          (self.results_regexp, self.name))
+
+    try:
+      if self.stddev_regexp:
+        if result_tracker.TraceHasStdDev(self):
+          result_tracker.AddError(
+              'Test %s should only run once since a stddev is provided by the '
+              'test.' % self.name)
+        stddev = re.search(self.stddev_regexp, output.stdout, re.M).group(1)
+    except:
+      result_tracker.AddError(
+          'Regexp "%s" did not match for test %s.' %
+          (self.stddev_regexp, self.name))
+
+    if result:
+      result_tracker.AddTraceResult(self, result, stddev)
+    return result
 
 
 class RunnableConfig(GraphConfig):
   """Represents a runnable suite definition (i.e. has a main file).
   """
+  def __init__(self, suite, parent, arch):
+    super(RunnableConfig, self).__init__(suite, parent, arch)
+    self.arch = arch
+
   @property
   def main(self):
-    return self._suite.get("main", "")
-
-  def PostProcess(self, stdouts_iter):
-    if self.results_processor:
-      def it():
-        for i, stdout in enumerate(stdouts_iter()):
-          yield RunResultsProcessor(self.results_processor, stdout, i + 1)
-      return it
-    else:
-      return stdouts_iter
+    return self._suite.get('main', '')
 
   def ChangeCWD(self, suite_path):
     """Changes the cwd to to path defined in the current graph.
@@ -490,17 +460,17 @@
     os.chdir(os.path.join(suite_dir, bench_dir))
 
   def GetCommandFlags(self, extra_flags=None):
-    suffix = ["--"] + self.test_flags if self.test_flags else []
+    suffix = ['--'] + self.test_flags if self.test_flags else []
     return self.flags + (extra_flags or []) + [self.main] + suffix
 
   def GetCommand(self, cmd_prefix, shell_dir, extra_flags=None):
     # TODO(machenbach): This requires +.exe if run on windows.
     extra_flags = extra_flags or []
     if self.binary != 'd8' and '--prof' in extra_flags:
-      print "Profiler supported only on a benchmark run with d8"
+      logging.info('Profiler supported only on a benchmark run with d8')
 
     if self.process_size:
-      cmd_prefix = ["/usr/bin/time", "--format=MaxMemory: %MKB"] + cmd_prefix
+      cmd_prefix = ['/usr/bin/time', '--format=MaxMemory: %MKB'] + cmd_prefix
     if self.binary.endswith('.py'):
       # Copy cmd_prefix instead of update (+=).
       cmd_prefix = cmd_prefix + [sys.executable]
@@ -511,25 +481,36 @@
         args=self.GetCommandFlags(extra_flags=extra_flags),
         timeout=self.timeout or 60)
 
-  def Run(self, runner, trybot):
-    """Iterates over several runs and handles the output for all traces."""
-    stdout, stdout_secondary = Unzip(runner())
-    return (
-        AccumulateResults(
-            self.graphs,
-            self._children,
-            iter_output=self.PostProcess(stdout),
-            perform_measurement=True,
-            calc_total=self.total,
-        ),
-        AccumulateResults(
-            self.graphs,
-            self._children,
-            iter_output=self.PostProcess(stdout_secondary),
-            perform_measurement=trybot,  # only run second time on trybots
-            calc_total=self.total,
-        ),
-    )
+  def ProcessOutput(self, output, result_tracker, count):
+    """Processes test run output and updates result tracker.
+
+    Args:
+      output: Output object from the test run.
+      result_tracker: ResultTracker object to be updated.
+      count: Index of the test run (used for better logging).
+    """
+    if self.results_processor:
+      output = RunResultsProcessor(self.results_processor, output, count)
+
+    results_for_total = []
+    for trace in self.children:
+      result = trace.ConsumeOutput(output, result_tracker)
+      if result:
+        results_for_total.append(result)
+
+    if self.total:
+      # Produce total metric only when all traces have produced results.
+      if len(self.children) != len(results_for_total):
+        result_tracker.AddError(
+            'Not all traces have produced results. Can not compute total for '
+            '%s.' % self.name)
+        return
+
+      # Calculate total as a the geometric mean for results from all traces.
+      total_trace = TraceConfig(
+          {'name': 'Total', 'units': self.children[0].units}, self, self.arch)
+      result_tracker.AddTraceResult(
+          total_trace, GeometricMean(results_for_total), '')
 
 
 class RunnableTraceConfig(TraceConfig, RunnableConfig):
@@ -537,30 +518,9 @@
   def __init__(self, suite, parent, arch):
     super(RunnableTraceConfig, self).__init__(suite, parent, arch)
 
-  def Run(self, runner, trybot):
-    """Iterates over several runs and handles the output."""
-    measurement = self.CreateMeasurement(perform_measurement=True)
-    measurement_secondary = self.CreateMeasurement(perform_measurement=trybot)
-    for stdout, stdout_secondary in runner():
-      measurement.ConsumeOutput(stdout)
-      measurement_secondary.ConsumeOutput(stdout_secondary)
-    return (
-        measurement.GetResults(),
-        measurement_secondary.GetResults(),
-    )
-
-
-class RunnableGenericConfig(RunnableConfig):
-  """Represents a runnable suite definition with generic traces."""
-  def __init__(self, suite, parent, arch):
-    super(RunnableGenericConfig, self).__init__(suite, parent, arch)
-
-  def Run(self, runner, trybot):
-    stdout, stdout_secondary = Unzip(runner())
-    return (
-        AccumulateGenericResults(self.graphs, self.units, stdout),
-        AccumulateGenericResults(self.graphs, self.units, stdout_secondary),
-    )
+  def ProcessOutput(self, output, result_tracker, count):
+    result_tracker.AddRunnableDuration(self, output.duration)
+    self.ConsumeOutput(output, result_tracker)
 
 
 def MakeGraphConfig(suite, arch, parent):
@@ -568,23 +528,19 @@
   if isinstance(parent, RunnableConfig):
     # Below a runnable can only be traces.
     return TraceConfig(suite, parent, arch)
-  elif suite.get("main") is not None:
+  elif suite.get('main') is not None:
     # A main file makes this graph runnable. Empty strings are accepted.
-    if suite.get("tests"):
+    if suite.get('tests'):
       # This graph has subgraphs (traces).
       return RunnableConfig(suite, parent, arch)
     else:
       # This graph has no subgraphs, it's a leaf.
       return RunnableTraceConfig(suite, parent, arch)
-  elif suite.get("generic"):
-    # This is a generic suite definition. It is either a runnable executable
-    # or has a main js file.
-    return RunnableGenericConfig(suite, parent, arch)
-  elif suite.get("tests"):
+  elif suite.get('tests'):
     # This is neither a leaf nor a runnable.
     return GraphConfig(suite, parent, arch)
   else:  # pragma: no cover
-    raise Exception("Invalid suite configuration.")
+    raise Exception('Invalid suite configuration.')
 
 
 def BuildGraphConfigs(suite, arch, parent):
@@ -593,11 +549,11 @@
   """
 
   # TODO(machenbach): Implement notion of cpu type?
-  if arch not in suite.get("archs", SUPPORTED_ARCHS):
+  if arch not in suite.get('archs', SUPPORTED_ARCHS):
     return None
 
   graph = MakeGraphConfig(suite, arch, parent)
-  for subsuite in suite.get("tests", []):
+  for subsuite in suite.get('tests', []):
     BuildGraphConfigs(subsuite, arch, graph)
   parent.AppendChild(graph)
   return graph
@@ -615,61 +571,92 @@
       for result in FlattenRunnables(child, node_cb):
         yield result
   else:  # pragma: no cover
-    raise Exception("Invalid suite configuration.")
+    raise Exception('Invalid suite configuration.')
 
 
 class Platform(object):
-  def __init__(self, options):
-    self.shell_dir = options.shell_dir
-    self.shell_dir_secondary = options.shell_dir_secondary
-    self.extra_flags = options.extra_flags.split()
+  def __init__(self, args):
+    self.shell_dir = args.shell_dir
+    self.shell_dir_secondary = args.shell_dir_secondary
+    self.extra_flags = args.extra_flags.split()
+    self.args = args
 
   @staticmethod
-  def GetPlatform(options):
-    if options.android_build_tools:
-      return AndroidPlatform(options)
+  def ReadBuildConfig(args):
+    config_path = os.path.join(args.shell_dir, 'v8_build_config.json')
+    if not os.path.isfile(config_path):
+      return {}
+    with open(config_path) as f:
+      return json.load(f)
+
+  @staticmethod
+  def GetPlatform(args):
+    if Platform.ReadBuildConfig(args).get('is_android', False):
+      return AndroidPlatform(args)
     else:
-      return DesktopPlatform(options)
+      return DesktopPlatform(args)
 
   def _Run(self, runnable, count, secondary=False):
     raise NotImplementedError()  # pragma: no cover
 
-  def Run(self, runnable, count):
+  def _LoggedRun(self, runnable, count, secondary=False):
+    suffix = ' - secondary' if secondary else ''
+    title = '>>> %%s (#%d)%s:' % ((count + 1), suffix)
+    try:
+      output = self._Run(runnable, count, secondary)
+    except OSError:
+      logging.exception(title % 'OSError')
+      raise
+    if output.stdout:
+      logging.info(title % 'Stdout' + '\n%s', output.stdout)
+    if output.stderr:  # pragma: no cover
+      # Print stderr for debugging.
+      logging.info(title % 'Stderr' + '\n%s', output.stderr)
+      logging.warning('>>> Test timed out after %ss.', runnable.timeout)
+    if output.exit_code != 0:
+      logging.warning('>>> Test crashed with exit code %d.', output.exit_code)
+    return output
+
+  def Run(self, runnable, count, secondary):
     """Execute the benchmark's main file.
 
-    If options.shell_dir_secondary is specified, the benchmark is run twice,
-    e.g. with and without patch.
     Args:
       runnable: A Runnable benchmark instance.
       count: The number of this (repeated) run.
-    Returns: A tuple with the two benchmark outputs. The latter will be None if
-             options.shell_dir_secondary was not specified.
+      secondary: True if secondary run should be executed.
+
+    Returns:
+      A tuple with the two benchmark outputs. The latter will be NULL_OUTPUT if
+      secondary is False.
     """
-    stdout = self._Run(runnable, count, secondary=False)
-    if self.shell_dir_secondary:
-      return stdout, self._Run(runnable, count, secondary=True)
+    output = self._LoggedRun(runnable, count, secondary=False)
+    if secondary:
+      return output, self._LoggedRun(runnable, count, secondary=True)
     else:
-      return stdout, None
+      return output, NULL_OUTPUT
 
 
 class DesktopPlatform(Platform):
-  def __init__(self, options):
-    super(DesktopPlatform, self).__init__(options)
+  def __init__(self, args):
+    super(DesktopPlatform, self).__init__(args)
     self.command_prefix = []
 
-    if options.prioritize or options.affinitize != None:
-      self.command_prefix = ["schedtool"]
-      if options.prioritize:
-        self.command_prefix += ["-n", "-20"]
-      if options.affinitize != None:
+    # Setup command class to OS specific version.
+    command.setup(utils.GuessOS(), args.device)
+
+    if args.prioritize or args.affinitize != None:
+      self.command_prefix = ['schedtool']
+      if args.prioritize:
+        self.command_prefix += ['-n', '-20']
+      if args.affinitize != None:
       # schedtool expects a bit pattern when setting affinity, where each
       # bit set to '1' corresponds to a core where the process may run on.
       # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
       # a core number, we need to map to said bit pattern.
-        cpu = int(options.affinitize)
+        cpu = int(args.affinitize)
         core = 1 << cpu
-        self.command_prefix += ["-a", ("0x%x" % core)]
-      self.command_prefix += ["-e"]
+        self.command_prefix += ['-a', ('0x%x' % core)]
+      self.command_prefix += ['-e']
 
   def PreExecution(self):
     pass
@@ -682,123 +669,37 @@
       node.ChangeCWD(path)
 
   def _Run(self, runnable, count, secondary=False):
-    suffix = ' - secondary' if secondary else ''
     shell_dir = self.shell_dir_secondary if secondary else self.shell_dir
-    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
     cmd = runnable.GetCommand(self.command_prefix, shell_dir, self.extra_flags)
-    try:
-      output = cmd.execute()
-    except OSError as e:  # pragma: no cover
-      print title % "OSError"
-      print e
-      return ""
+    output = cmd.execute()
 
-    print title % "Stdout"
-    print output.stdout
-    if output.stderr:  # pragma: no cover
-      # Print stderr for debugging.
-      print title % "Stderr"
-      print output.stderr
-    if output.timed_out:
-      print ">>> Test timed out after %ss." % runnable.timeout
-    if '--prof' in self.extra_flags:
-      os_prefix = {"linux": "linux", "macos": "mac"}.get(utils.GuessOS())
+    if output.IsSuccess() and '--prof' in self.extra_flags:
+      os_prefix = {'linux': 'linux', 'macos': 'mac'}.get(utils.GuessOS())
       if os_prefix:
-        tick_tools = os.path.join(TOOLS_BASE, "%s-tick-processor" % os_prefix)
-        subprocess.check_call(tick_tools + " --only-summary", shell=True)
+        tick_tools = os.path.join(TOOLS_BASE, '%s-tick-processor' % os_prefix)
+        subprocess.check_call(tick_tools + ' --only-summary', shell=True)
       else:  # pragma: no cover
-        print "Profiler option currently supported on Linux and Mac OS."
+        logging.warning(
+            'Profiler option currently supported on Linux and Mac OS.')
 
-    # time outputs to stderr
+    # /usr/bin/time outputs to stderr
     if runnable.process_size:
-      return output.stdout + output.stderr
-    return output.stdout
+      output.stdout += output.stderr
+    return output
 
 
 class AndroidPlatform(Platform):  # pragma: no cover
-  DEVICE_DIR = "/data/local/tmp/v8/"
 
-  def __init__(self, options):
-    super(AndroidPlatform, self).__init__(options)
-    LoadAndroidBuildTools(options.android_build_tools)
-
-    if not options.device:
-      # Detect attached device if not specified.
-      devices = adb_wrapper.AdbWrapper.Devices()
-      assert devices and len(devices) == 1, (
-          "None or multiple devices detected. Please specify the device on "
-          "the command-line with --device")
-      options.device = str(devices[0])
-    self.adb_wrapper = adb_wrapper.AdbWrapper(options.device)
-    self.device = device_utils.DeviceUtils(self.adb_wrapper)
+  def __init__(self, args):
+    super(AndroidPlatform, self).__init__(args)
+    self.driver = android.android_driver(args.device)
 
   def PreExecution(self):
-    perf = perf_control.PerfControl(self.device)
-    perf.SetHighPerfMode()
-
-    # Remember what we have already pushed to the device.
-    self.pushed = set()
+    self.driver.set_high_perf_mode()
 
   def PostExecution(self):
-    perf = perf_control.PerfControl(self.device)
-    perf.SetDefaultPerfMode()
-    self.device.RemovePath(
-        AndroidPlatform.DEVICE_DIR, force=True, recursive=True)
-
-  def _PushFile(self, host_dir, file_name, target_rel=".",
-                skip_if_missing=False):
-    file_on_host = os.path.join(host_dir, file_name)
-    file_on_device_tmp = os.path.join(
-        AndroidPlatform.DEVICE_DIR, "_tmp_", file_name)
-    file_on_device = os.path.join(
-        AndroidPlatform.DEVICE_DIR, target_rel, file_name)
-    folder_on_device = os.path.dirname(file_on_device)
-
-    # Only attempt to push files that exist.
-    if not os.path.exists(file_on_host):
-      if not skip_if_missing:
-        logging.critical('Missing file on host: %s' % file_on_host)
-      return
-
-    # Only push files not yet pushed in one execution.
-    if file_on_host in self.pushed:
-      return
-    else:
-      self.pushed.add(file_on_host)
-
-    # Work-around for "text file busy" errors. Push the files to a temporary
-    # location and then copy them with a shell command.
-    output = self.adb_wrapper.Push(file_on_host, file_on_device_tmp)
-    # Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)".
-    # Errors look like this: "failed to copy  ... ".
-    if output and not re.search('^[0-9]', output.splitlines()[-1]):
-      logging.critical('PUSH FAILED: ' + output)
-    self.adb_wrapper.Shell("mkdir -p %s" % folder_on_device)
-    self.adb_wrapper.Shell("cp %s %s" % (file_on_device_tmp, file_on_device))
-
-  def _PushExecutable(self, shell_dir, target_dir, binary):
-    self._PushFile(shell_dir, binary, target_dir)
-
-    # Push external startup data. Backwards compatible for revisions where
-    # these files didn't exist.
-    self._PushFile(
-        shell_dir,
-        "natives_blob.bin",
-        target_dir,
-        skip_if_missing=True,
-    )
-    self._PushFile(
-        shell_dir,
-        "snapshot_blob.bin",
-        target_dir,
-        skip_if_missing=True,
-    )
-    self._PushFile(
-        shell_dir,
-        "icudtl.dat",
-        target_dir,
-        skip_if_missing=True,
-    )
+    self.driver.set_default_perf_mode()
+    self.driver.tear_down()
 
   def PreTests(self, node, path):
     if isinstance(node, RunnableConfig):
@@ -808,52 +709,59 @@
       bench_rel = os.path.normpath(os.path.join(*node.path))
       bench_abs = os.path.join(suite_dir, bench_rel)
     else:
-      bench_rel = "."
+      bench_rel = '.'
       bench_abs = suite_dir
 
-    self._PushExecutable(self.shell_dir, "bin", node.binary)
+    self.driver.push_executable(self.shell_dir, 'bin', node.binary)
     if self.shell_dir_secondary:
-      self._PushExecutable(
-          self.shell_dir_secondary, "bin_secondary", node.binary)
+      self.driver.push_executable(
+          self.shell_dir_secondary, 'bin_secondary', node.binary)
 
     if isinstance(node, RunnableConfig):
-      self._PushFile(bench_abs, node.main, bench_rel)
+      self.driver.push_file(bench_abs, node.main, bench_rel)
     for resource in node.resources:
-      self._PushFile(bench_abs, resource, bench_rel)
+      self.driver.push_file(bench_abs, resource, bench_rel)
 
   def _Run(self, runnable, count, secondary=False):
-    suffix = ' - secondary' if secondary else ''
-    target_dir = "bin_secondary" if secondary else "bin"
-    title = ">>> %%s (#%d)%s:" % ((count + 1), suffix)
-    cache = cache_control.CacheControl(self.device)
-    cache.DropRamCaches()
-    binary_on_device = os.path.join(
-        AndroidPlatform.DEVICE_DIR, target_dir, runnable.binary)
-    cmd = [binary_on_device] + runnable.GetCommandFlags(self.extra_flags)
+    target_dir = 'bin_secondary' if secondary else 'bin'
+    self.driver.drop_ram_caches()
 
     # Relative path to benchmark directory.
     if runnable.path:
       bench_rel = os.path.normpath(os.path.join(*runnable.path))
     else:
-      bench_rel = "."
+      bench_rel = '.'
 
+    logcat_file = None
+    if self.args.dump_logcats_to:
+      runnable_name = '-'.join(runnable.graphs)
+      logcat_file = os.path.join(
+          self.args.dump_logcats_to, 'logcat-%s-#%d%s.log' % (
+            runnable_name, count + 1, '-secondary' if secondary else ''))
+      logging.debug('Dumping logcat into %s', logcat_file)
+
+    output = Output()
+    start = time.time()
     try:
-      output = self.device.RunShellCommand(
-          cmd,
-          cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
-          check_return=True,
+      output.stdout = self.driver.run(
+          target_dir=target_dir,
+          binary=runnable.binary,
+          args=runnable.GetCommandFlags(self.extra_flags),
+          rel_path=bench_rel,
           timeout=runnable.timeout,
-          retries=0,
+          logcat_file=logcat_file,
       )
-      stdout = "\n".join(output)
-      print title % "Stdout"
-      print stdout
-    except device_errors.CommandTimeoutError:
-      print ">>> Test timed out after %ss." % runnable.timeout
-      stdout = ""
+    except android.CommandFailedException as e:
+      output.stdout = e.output
+      output.exit_code = e.status
+    except android.TimeoutException as e:
+      output.stdout = e.output
+      output.timed_out = True
     if runnable.process_size:
-      return stdout + "MaxMemory: Unsupported"
-    return stdout
+      output.stdout += 'MaxMemory: Unsupported'
+    output.duration = time.time() - start
+    return output
+
 
 class CustomMachineConfiguration:
   def __init__(self, disable_aslr = False, governor = None):
@@ -880,44 +788,44 @@
   @staticmethod
   def GetASLR():
     try:
-      with open("/proc/sys/kernel/randomize_va_space", "r") as f:
+      with open('/proc/sys/kernel/randomize_va_space', 'r') as f:
         return int(f.readline().strip())
-    except Exception as e:
-      print "Failed to get current ASLR settings."
-      raise e
+    except Exception:
+      logging.exception('Failed to get current ASLR settings.')
+      raise
 
   @staticmethod
   def SetASLR(value):
     try:
-      with open("/proc/sys/kernel/randomize_va_space", "w") as f:
+      with open('/proc/sys/kernel/randomize_va_space', 'w') as f:
         f.write(str(value))
-    except Exception as e:
-      print "Failed to update ASLR to %s." % value
-      print "Are we running under sudo?"
-      raise e
+    except Exception:
+      logging.exception(
+          'Failed to update ASLR to %s. Are we running under sudo?', value)
+      raise
 
     new_value = CustomMachineConfiguration.GetASLR()
     if value != new_value:
-      raise Exception("Present value is %s" % new_value)
+      raise Exception('Present value is %s' % new_value)
 
   @staticmethod
   def GetCPUCoresRange():
     try:
-      with open("/sys/devices/system/cpu/present", "r") as f:
+      with open('/sys/devices/system/cpu/present', 'r') as f:
         indexes = f.readline()
-        r = map(int, indexes.split("-"))
+        r = map(int, indexes.split('-'))
         if len(r) == 1:
           return range(r[0], r[0] + 1)
         return range(r[0], r[1] + 1)
-    except Exception as e:
-      print "Failed to retrieve number of CPUs."
-      raise e
+    except Exception:
+      logging.exception('Failed to retrieve number of CPUs.')
+      raise
 
   @staticmethod
   def GetCPUPathForId(cpu_index):
-    ret = "/sys/devices/system/cpu/cpu"
+    ret = '/sys/devices/system/cpu/cpu'
     ret += str(cpu_index)
-    ret += "/cpufreq/scaling_governor"
+    ret += '/cpufreq/scaling_governor'
     return ret
 
   @staticmethod
@@ -927,18 +835,18 @@
       ret = None
       for cpu_index in cpu_indices:
         cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
-        with open(cpu_device, "r") as f:
+        with open(cpu_device, 'r') as f:
           # We assume the governors of all CPUs are set to the same value
           val = f.readline().strip()
           if ret == None:
             ret = val
           elif ret != val:
-            raise Exception("CPU cores have differing governor settings")
+            raise Exception('CPU cores have differing governor settings')
       return ret
-    except Exception as e:
-      print "Failed to get the current CPU governor."
-      print "Is the CPU governor disabled? Check BIOS."
-      raise e
+    except Exception:
+      logging.exception('Failed to get the current CPU governor. Is the CPU '
+                        'governor disabled? Check BIOS.')
+      raise
 
   @staticmethod
   def SetCPUGovernor(value):
@@ -946,205 +854,278 @@
       cpu_indices = CustomMachineConfiguration.GetCPUCoresRange()
       for cpu_index in cpu_indices:
         cpu_device = CustomMachineConfiguration.GetCPUPathForId(cpu_index)
-        with open(cpu_device, "w") as f:
+        with open(cpu_device, 'w') as f:
           f.write(value)
 
-    except Exception as e:
-      print "Failed to change CPU governor to %s." % value
-      print "Are we running under sudo?"
-      raise e
+    except Exception:
+      logging.exception('Failed to change CPU governor to %s. Are we '
+                        'running under sudo?', value)
+      raise
 
     cur_value = CustomMachineConfiguration.GetCPUGovernor()
     if cur_value != value:
-      raise Exception("Could not set CPU governor. Present value is %s"
+      raise Exception('Could not set CPU governor. Present value is %s'
                       % cur_value )
 
-def Main(args):
-  logging.getLogger().setLevel(logging.INFO)
-  parser = optparse.OptionParser()
-  parser.add_option("--android-build-tools",
-                    help="Path to chromium's build/android. Specifying this "
-                         "option will run tests using android platform.")
-  parser.add_option("--arch",
-                    help=("The architecture to run tests for, "
-                          "'auto' or 'native' for auto-detect"),
-                    default="x64")
-  parser.add_option("--buildbot",
-                    help="Adapt to path structure used on buildbots",
-                    default=False, action="store_true")
-  parser.add_option("--device",
-                    help="The device ID to run Android tests on. If not given "
-                         "it will be autodetected.")
-  parser.add_option("--extra-flags",
-                    help="Additional flags to pass to the test executable",
-                    default="")
-  parser.add_option("--json-test-results",
-                    help="Path to a file for storing json results.")
-  parser.add_option("--json-test-results-secondary",
-                    "--json-test-results-no-patch",  # TODO(sergiyb): Deprecate.
-                    help="Path to a file for storing json results from run "
-                         "without patch or for reference build run.")
-  parser.add_option("--outdir", help="Base directory with compile output",
-                    default="out")
-  parser.add_option("--outdir-secondary",
-                    "--outdir-no-patch",  # TODO(sergiyb): Deprecate.
-                    help="Base directory with compile output without patch or "
-                         "for reference build")
-  parser.add_option("--binary-override-path",
-                    help="JavaScript engine binary. By default, d8 under "
-                    "architecture-specific build dir. "
-                    "Not supported in conjunction with outdir-secondary.")
-  parser.add_option("--prioritize",
-                    help="Raise the priority to nice -20 for the benchmarking "
-                    "process.Requires Linux, schedtool, and sudo privileges.",
-                    default=False, action="store_true")
-  parser.add_option("--affinitize",
-                    help="Run benchmarking process on the specified core. "
-                    "For example: "
-                    "--affinitize=0 will run the benchmark process on core 0. "
-                    "--affinitize=3 will run the benchmark process on core 3. "
-                    "Requires Linux, schedtool, and sudo privileges.",
-                    default=None)
-  parser.add_option("--noaslr",
-                    help="Disable ASLR for the duration of the benchmarked "
-                    "process. Requires Linux and sudo privileges.",
-                    default=False, action="store_true")
-  parser.add_option("--cpu-governor",
-                    help="Set cpu governor to specified policy for the "
-                    "duration of the benchmarked process. Typical options: "
-                    "'powersave' for more stable results, or 'performance' "
-                    "for shorter completion time of suite, with potentially "
-                    "more noise in results.")
-  parser.add_option("--filter",
-                    help="Only run the benchmarks beginning with this string. "
-                    "For example: "
-                    "--filter=JSTests/TypedArrays/ will run only TypedArray "
-                    "benchmarks from the JSTests suite.",
-                    default="")
 
-  (options, args) = parser.parse_args(args)
+class MaxTotalDurationReachedError(Exception):
+  """Exception used to stop running tests when max total duration is reached."""
+  pass
 
-  if len(args) == 0:  # pragma: no cover
-    parser.print_help()
-    return 1
 
-  if options.arch in ["auto", "native"]:  # pragma: no cover
-    options.arch = ARCH_GUESS
+def Main(argv):
+  parser = argparse.ArgumentParser()
+  parser.add_argument('--arch',
+                      help='The architecture to run tests for. Pass "auto" '
+                      'to auto-detect.', default='x64',
+                      choices=SUPPORTED_ARCHS + ['auto'])
+  parser.add_argument('--buildbot',
+                      help='Adapt to path structure used on buildbots and adds '
+                      'timestamps/level to all logged status messages',
+                      default=False, action='store_true')
+  parser.add_argument('-d', '--device',
+                      help='The device ID to run Android tests on. If not '
+                      'given it will be autodetected.')
+  parser.add_argument('--extra-flags',
+                      help='Additional flags to pass to the test executable',
+                      default='')
+  parser.add_argument('--json-test-results',
+                      help='Path to a file for storing json results.')
+  parser.add_argument('--json-test-results-secondary',
+                      help='Path to a file for storing json results from run '
+                      'without patch or for reference build run.')
+  parser.add_argument('--outdir', help='Base directory with compile output',
+                      default='out')
+  parser.add_argument('--outdir-secondary',
+                      help='Base directory with compile output without patch '
+                      'or for reference build')
+  parser.add_argument('--binary-override-path',
+                      help='JavaScript engine binary. By default, d8 under '
+                      'architecture-specific build dir. '
+                      'Not supported in conjunction with outdir-secondary.')
+  parser.add_argument('--prioritize',
+                      help='Raise the priority to nice -20 for the '
+                      'benchmarking process.Requires Linux, schedtool, and '
+                      'sudo privileges.', default=False, action='store_true')
+  parser.add_argument('--affinitize',
+                      help='Run benchmarking process on the specified core. '
+                      'For example: --affinitize=0 will run the benchmark '
+                      'process on core 0. --affinitize=3 will run the '
+                      'benchmark process on core 3. Requires Linux, schedtool, '
+                      'and sudo privileges.', default=None)
+  parser.add_argument('--noaslr',
+                      help='Disable ASLR for the duration of the benchmarked '
+                      'process. Requires Linux and sudo privileges.',
+                      default=False, action='store_true')
+  parser.add_argument('--cpu-governor',
+                      help='Set cpu governor to specified policy for the '
+                      'duration of the benchmarked process. Typical options: '
+                      '"powersave" for more stable results, or "performance" '
+                      'for shorter completion time of suite, with potentially '
+                      'more noise in results.')
+  parser.add_argument('--filter',
+                      help='Only run the benchmarks beginning with this '
+                      'string. For example: '
+                      '--filter=JSTests/TypedArrays/ will run only TypedArray '
+                      'benchmarks from the JSTests suite.',
+                      default='')
+  parser.add_argument('--confidence-level', type=float,
+                      help='Repeatedly runs each benchmark until specified '
+                      'confidence level is reached. The value is interpreted '
+                      'as the number of standard deviations from the mean that '
+                      'all values must lie within. Typical values are 1, 2 and '
+                      '3 and correspond to 68%%, 95%% and 99.7%% probability '
+                      'that the measured value is within 0.1%% of the true '
+                      'value. Larger values result in more retries and thus '
+                      'longer runtime, but also provide more reliable results. '
+                      'Also see --max-total-duration flag.')
+  parser.add_argument('--max-total-duration', type=int, default=7140,  # 1h 59m
+                      help='Max total duration in seconds allowed for retries '
+                      'across all tests. This is especially useful in '
+                      'combination with the --confidence-level flag.')
+  parser.add_argument('--dump-logcats-to',
+                      help='Writes logcat output from each test into specified '
+                      'directory. Only supported for android targets.')
+  parser.add_argument('--run-count', type=int, default=0,
+                      help='Override the run count specified by the test '
+                      'suite. The default 0 uses the suite\'s config.')
+  parser.add_argument('-v', '--verbose', default=False, action='store_true',
+                      help='Be verbose and print debug output.')
+  parser.add_argument('suite', nargs='+', help='Path to the suite config file.')
 
-  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
-    print "Unknown architecture %s" % options.arch
-    return 1
+  try:
+    args = parser.parse_args(argv)
+  except SystemExit:
+    return INFRA_FAILURE_RETCODE
 
-  if options.device and not options.android_build_tools:  # pragma: no cover
-    print "Specifying a device requires Android build tools."
-    return 1
+  logging.basicConfig(
+      level=logging.DEBUG if args.verbose else logging.INFO,
+      format='%(asctime)s %(levelname)-8s  %(message)s')
 
-  if (options.json_test_results_secondary and
-      not options.outdir_secondary):  # pragma: no cover
-    print("For writing secondary json test results, a secondary outdir patch "
-          "must be specified.")
-    return 1
+  if args.arch == 'auto':  # pragma: no cover
+    args.arch = utils.DefaultArch()
+    if args.arch not in SUPPORTED_ARCHS:
+      logging.error(
+          'Auto-detected architecture "%s" is not supported.', args.arch)
+      return INFRA_FAILURE_RETCODE
 
-  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+  if (args.json_test_results_secondary and
+      not args.outdir_secondary):  # pragma: no cover
+    logging.error('For writing secondary json test results, a secondary outdir '
+                  'patch must be specified.')
+    return INFRA_FAILURE_RETCODE
 
-  if options.buildbot:
-    build_config = "Release"
+  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
+
+  if args.buildbot:
+    build_config = 'Release'
   else:
-    build_config = "%s.release" % options.arch
+    build_config = '%s.release' % args.arch
 
-  if options.binary_override_path == None:
-    options.shell_dir = os.path.join(workspace, options.outdir, build_config)
-    default_binary_name = "d8"
+  if args.binary_override_path == None:
+    args.shell_dir = os.path.join(workspace, args.outdir, build_config)
+    default_binary_name = 'd8'
   else:
-    if not os.path.isfile(options.binary_override_path):
-      print "binary-override-path must be a file name"
-      return 1
-    if options.outdir_secondary:
-      print "specify either binary-override-path or outdir-secondary"
-      return 1
-    options.shell_dir = os.path.abspath(
-        os.path.dirname(options.binary_override_path))
-    default_binary_name = os.path.basename(options.binary_override_path)
+    if not os.path.isfile(args.binary_override_path):
+      logging.error('binary-override-path must be a file name')
+      return INFRA_FAILURE_RETCODE
+    if args.outdir_secondary:
+      logging.error('specify either binary-override-path or outdir-secondary')
+      return INFRA_FAILURE_RETCODE
+    args.shell_dir = os.path.abspath(
+        os.path.dirname(args.binary_override_path))
+    default_binary_name = os.path.basename(args.binary_override_path)
 
-  if options.outdir_secondary:
-    options.shell_dir_secondary = os.path.join(
-        workspace, options.outdir_secondary, build_config)
+  if args.outdir_secondary:
+    args.shell_dir_secondary = os.path.join(
+        workspace, args.outdir_secondary, build_config)
   else:
-    options.shell_dir_secondary = None
+    args.shell_dir_secondary = None
 
-  if options.json_test_results:
-    options.json_test_results = os.path.abspath(options.json_test_results)
+  if args.json_test_results:
+    args.json_test_results = os.path.abspath(args.json_test_results)
 
-  if options.json_test_results_secondary:
-    options.json_test_results_secondary = os.path.abspath(
-        options.json_test_results_secondary)
+  if args.json_test_results_secondary:
+    args.json_test_results_secondary = os.path.abspath(
+        args.json_test_results_secondary)
 
   # Ensure all arguments have absolute path before we start changing current
   # directory.
-  args = map(os.path.abspath, args)
+  args.suite = map(os.path.abspath, args.suite)
 
   prev_aslr = None
   prev_cpu_gov = None
-  platform = Platform.GetPlatform(options)
+  platform = Platform.GetPlatform(args)
 
-  results = Results()
-  results_secondary = Results()
-  with CustomMachineConfiguration(governor = options.cpu_governor,
-                                  disable_aslr = options.noaslr) as conf:
-    for path in args:
+  result_tracker = ResultTracker()
+  result_tracker_secondary = ResultTracker()
+  have_failed_tests = False
+  with CustomMachineConfiguration(governor = args.cpu_governor,
+                                  disable_aslr = args.noaslr) as conf:
+    for path in args.suite:
       if not os.path.exists(path):  # pragma: no cover
-        results.errors.append("Configuration file %s does not exist." % path)
+        result_tracker.AddError('Configuration file %s does not exist.' % path)
         continue
 
       with open(path) as f:
         suite = json.loads(f.read())
 
       # If no name is given, default to the file name without .json.
-      suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
+      suite.setdefault('name', os.path.splitext(os.path.basename(path))[0])
 
       # Setup things common to one test suite.
       platform.PreExecution()
 
       # Build the graph/trace tree structure.
       default_parent = DefaultSentinel(default_binary_name)
-      root = BuildGraphConfigs(suite, options.arch, default_parent)
+      root = BuildGraphConfigs(suite, args.arch, default_parent)
 
       # Callback to be called on each node on traversal.
       def NodeCB(node):
         platform.PreTests(node, path)
 
       # Traverse graph/trace tree and iterate over all runnables.
-      for runnable in FlattenRunnables(root, NodeCB):
-        runnable_name = "/".join(runnable.graphs)
-        if not runnable_name.startswith(options.filter):
-          continue
-        print ">>> Running suite: %s" % runnable_name
+      start = time.time()
+      try:
+        for runnable in FlattenRunnables(root, NodeCB):
+          runnable_name = '/'.join(runnable.graphs)
+          if (not runnable_name.startswith(args.filter) and
+              runnable_name + '/' != args.filter):
+            continue
+          logging.info('>>> Running suite: %s', runnable_name)
 
-        def Runner():
-          """Output generator that reruns several times."""
-          for i in xrange(0, max(1, runnable.run_count)):
-            # TODO(machenbach): Allow timeout per arch like with run_count per
-            # arch.
-            yield platform.Run(runnable, i)
+          def RunGenerator(runnable):
+            if args.confidence_level:
+              counter = 0
+              while not result_tracker.HasEnoughRuns(
+                  runnable, args.confidence_level):
+                yield counter
+                counter += 1
+            else:
+              for i in range(0, max(1, args.run_count or runnable.run_count)):
+                yield i
 
-        # Let runnable iterate over all runs and handle output.
-        result, result_secondary = runnable.Run(
-          Runner, trybot=options.shell_dir_secondary)
-        results += result
-        results_secondary += result_secondary
+          for i in RunGenerator(runnable):
+            attempts_left = runnable.retry_count + 1
+            while attempts_left:
+              total_duration = time.time() - start
+              if total_duration > args.max_total_duration:
+                logging.info(
+                    '>>> Stopping now since running for too long (%ds > %ds)',
+                    total_duration, args.max_total_duration)
+                raise MaxTotalDurationReachedError()
+
+              output, output_secondary = platform.Run(
+                  runnable, i, secondary=args.shell_dir_secondary)
+              result_tracker.AddRunnableDuration(runnable, output.duration)
+              result_tracker_secondary.AddRunnableDuration(
+                  runnable, output_secondary.duration)
+
+              if output.IsSuccess() and output_secondary.IsSuccess():
+                runnable.ProcessOutput(output, result_tracker, i)
+                if output_secondary is not NULL_OUTPUT:
+                  runnable.ProcessOutput(
+                      output_secondary, result_tracker_secondary, i)
+                break
+
+              attempts_left -= 1
+              if not attempts_left:
+                logging.info('>>> Suite %s failed after %d retries',
+                             runnable_name, runnable.retry_count + 1)
+                have_failed_tests = True
+              else:
+                logging.info('>>> Retrying suite: %s', runnable_name)
+      except MaxTotalDurationReachedError:
+        have_failed_tests = True
+
       platform.PostExecution()
 
-    if options.json_test_results:
-      results.WriteToFile(options.json_test_results)
+    if args.json_test_results:
+      result_tracker.WriteToFile(args.json_test_results)
     else:  # pragma: no cover
-      print results
+      print('Primary results:', result_tracker)
 
-  if options.json_test_results_secondary:
-    results_secondary.WriteToFile(options.json_test_results_secondary)
-  else:  # pragma: no cover
-    print results_secondary
+  if args.shell_dir_secondary:
+    if args.json_test_results_secondary:
+      result_tracker_secondary.WriteToFile(args.json_test_results_secondary)
+    else:  # pragma: no cover
+      print('Secondary results:', result_tracker_secondary)
 
-  return min(1, len(results.errors))
+  if (result_tracker.errors or result_tracker_secondary.errors or
+      have_failed_tests):
+    return 1
 
-if __name__ == "__main__":  # pragma: no cover
-  sys.exit(Main(sys.argv[1:]))
+  return 0
+
+
+def MainWrapper():
+  try:
+    return Main(sys.argv[1:])
+  except:
+    # Log uncaptured exceptions and report infra failure to the caller.
+    traceback.print_exc()
+    return INFRA_FAILURE_RETCODE
+
+
+if __name__ == '__main__':  # pragma: no cover
+  sys.exit(MainWrapper())
diff --git a/src/v8/tools/sanitizers/sancov_formatter.py b/src/v8/tools/sanitizers/sancov_formatter.py
index 2e168fb..b66bfed 100755
--- a/src/v8/tools/sanitizers/sancov_formatter.py
+++ b/src/v8/tools/sanitizers/sancov_formatter.py
@@ -39,6 +39,10 @@
     'https://chromium.googlesource.com/external/llvm.org/compiler-rt.git'
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
+
 import argparse
 import json
 import logging
@@ -426,26 +430,26 @@
   options.build_dir = os.path.abspath(options.build_dir)
   if options.action.lower() == 'all':
     if not options.json_output:
-      print '--json-output is required'
+      print('--json-output is required')
       return 1
     write_instrumented(options)
   elif options.action.lower() == 'merge':
     if not options.coverage_dir:
-      print '--coverage-dir is required'
+      print('--coverage-dir is required')
       return 1
     if not options.json_input:
-      print '--json-input is required'
+      print('--json-input is required')
       return 1
     if not options.json_output:
-      print '--json-output is required'
+      print('--json-output is required')
       return 1
     merge(options)
   elif options.action.lower() == 'split':
     if not options.json_input:
-      print '--json-input is required'
+      print('--json-input is required')
       return 1
     if not options.output_dir:
-      print '--output-dir is required'
+      print('--output-dir is required')
       return 1
     split(options)
   return 0
diff --git a/src/v8/tools/sanitizers/sancov_merger.py b/src/v8/tools/sanitizers/sancov_merger.py
index 867f8b4..6fd2eb2 100755
--- a/src/v8/tools/sanitizers/sancov_merger.py
+++ b/src/v8/tools/sanitizers/sancov_merger.py
@@ -106,7 +106,7 @@
     n = max(2, int(math.ceil(len(files) / float(cpus))))
 
     # Chop files into buckets.
-    buckets = [files[i:i+n] for i in xrange(0, len(files), n)]
+    buckets = [files[i:i+n] for i in range(0, len(files), n)]
 
     # Inputs for multiprocessing. List of tuples containing:
     # Keep-files option, base path, executable name, index of bucket,
diff --git a/src/v8/tools/sanitizers/sanitize_pcs.py b/src/v8/tools/sanitizers/sanitize_pcs.py
index 47f2715..a1e3a1d 100755
--- a/src/v8/tools/sanitizers/sanitize_pcs.py
+++ b/src/v8/tools/sanitizers/sanitize_pcs.py
@@ -5,7 +5,10 @@
 
 """Corrects objdump output. The logic is from sancov.py, see comments there."""
 
-import sys;
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import sys
 
 for line in sys.stdin:
-  print '0x%x' % (int(line.strip(), 16) + 4)
+  print('0x%x' % (int(line.strip(), 16) + 4))
diff --git a/src/v8/tools/shell-utils.h b/src/v8/tools/shell-utils.h
index bfd729d..b41d327 100644
--- a/src/v8/tools/shell-utils.h
+++ b/src/v8/tools/shell-utils.h
@@ -27,7 +27,7 @@
 
 // Utility functions used by parser-shell.
 
-#include "src/globals.h"
+#include "src/common/globals.h"
 
 #include <stdio.h>
 
diff --git a/src/v8/tools/snapshot/asm_to_inline_asm.py b/src/v8/tools/snapshot/asm_to_inline_asm.py
new file mode 100644
index 0000000..ad8fdcb
--- /dev/null
+++ b/src/v8/tools/snapshot/asm_to_inline_asm.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+'''
+Converts a given file in clang assembly syntax to a corresponding
+representation in inline assembly. Specifically, this is used to convert
+embedded.S to embedded.cc for Windows clang builds.
+'''
+
+import argparse
+import sys
+
+def asm_to_inl_asm(in_filename, out_filename):
+  with open(in_filename, 'r') as infile, open(out_filename, 'wb') as outfile:
+    outfile.write('__asm__(\n')
+    for line in infile:
+      # Escape " in .S file before outputing it to inline asm file.
+      line = line.replace('"', '\\"')
+      outfile.write('  "%s\\n"\n' % line.rstrip())
+    outfile.write(');\n')
+  return 0
+
+if __name__ == '__main__':
+  parser = argparse.ArgumentParser(description=__doc__)
+  parser.add_argument('input', help='Name of the input assembly file')
+  parser.add_argument('output', help='Name of the target CC file')
+  args = parser.parse_args()
+  sys.exit(asm_to_inl_asm(args.input, args.output))
diff --git a/src/v8/tools/stats-viewer.py b/src/v8/tools/stats-viewer.py
index e8fc69e..dd9d2c2 100755
--- a/src/v8/tools/stats-viewer.py
+++ b/src/v8/tools/stats-viewer.py
@@ -34,6 +34,9 @@
 in a window, re-reading and re-displaying with regular intervals.
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import mmap
 import optparse
 import os
@@ -100,7 +103,7 @@
     if not os.path.exists(self.data_name):
       maps_name = "/proc/%s/maps" % self.data_name
       if not os.path.exists(maps_name):
-        print "\"%s\" is neither a counter file nor a PID." % self.data_name
+        print("\"%s\" is neither a counter file nor a PID." % self.data_name)
         sys.exit(1)
       maps_file = open(maps_name, "r")
       try:
@@ -110,7 +113,7 @@
             self.data_name = m.group(0)
             break
         if self.data_name is None:
-          print "Can't find counter file in maps for PID %s." % self.data_name
+          print("Can't find counter file in maps for PID %s." % self.data_name)
           sys.exit(1)
       finally:
         maps_file.close()
@@ -123,7 +126,7 @@
       return CounterCollection(data_access)
     elif data_access.IntAt(0) == CHROME_COUNTERS_FILE_MAGIC_NUMBER:
       return ChromeCounterCollection(data_access)
-    print "File %s is not stats data." % self.data_name
+    print("File %s is not stats data." % self.data_name)
     sys.exit(1)
 
   def CleanUp(self):
@@ -143,7 +146,7 @@
       self.RefreshCounters()
       changed = True
     else:
-      for i in xrange(self.data.CountersInUse()):
+      for i in range(self.data.CountersInUse()):
         counter = self.data.Counter(i)
         name = counter.Name()
         if name in self.ui_counters:
@@ -188,7 +191,7 @@
       sorted by prefix.
     """
     names = {}
-    for i in xrange(self.data.CountersInUse()):
+    for i in range(self.data.CountersInUse()):
       counter = self.data.Counter(i)
       name = counter.Name()
       names[name] = counter
@@ -233,7 +236,7 @@
                              text=counter_name)
         name.grid(row=index, column=0, padx=1, pady=1)
       count = len(counter_objs)
-      for i in xrange(count):
+      for i in range(count):
         counter = counter_objs[i]
         name = counter.Name()
         var = Tkinter.StringVar()
@@ -435,7 +438,7 @@
 
   def CountersInUse(self):
     """Return the number of counters in active use."""
-    for i in xrange(self.max_counters):
+    for i in range(self.max_counters):
       name_offset = self.counter_names_offset + i * self._COUNTER_NAME_SIZE
       if self.data.ByteAt(name_offset) == 0:
         return i
diff --git a/src/v8/tools/test262-results-parser.js b/src/v8/tools/test262-results-parser.js
new file mode 100644
index 0000000..379436e
--- /dev/null
+++ b/src/v8/tools/test262-results-parser.js
@@ -0,0 +1,41 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Run the test runner and dump a json file. Use this script to pass
+// the json file and return a list of failing tests that can be copied
+// to test262.status.
+//
+// Usage:
+//
+// Run the test runner to generate the results:
+// $ tools/run-tests.py --gn test262 --json-test-results=tools/.test262-results.json
+//
+// Run this script to print the formatted results:
+// $ node tools/test262-results-parser.js .test262-results.json
+//
+// Note: The json results file generated by the test runner should be
+// in the tools/ directly, which is the same dir as this script.
+
+var fs = require('fs'),
+    path = require('path');
+
+function main() {
+  if (process.argv.length === 2)  {
+    throw new Error('File name required as first arg.');
+  }
+
+  var fileName = process.argv[2],
+      fullPath = path.join(__dirname, fileName),
+      results = require(fullPath)[0].results,
+      tests = new Set();
+  for (let result of results) {
+    let [_, ...test] = result.name.split('/');
+    tests.add(`  '${test.join('/')}': [FAIL],`);
+  }
+
+
+  [...tests].sort().forEach(i => console.log(i));
+}
+
+main();
diff --git a/src/v8/tools/testrunner/base_runner.py b/src/v8/tools/testrunner/base_runner.py
index 8fc09ee..7f9b434 100644
--- a/src/v8/tools/testrunner/base_runner.py
+++ b/src/v8/tools/testrunner/base_runner.py
@@ -2,12 +2,19 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
 
 from collections import OrderedDict
 import json
+import multiprocessing
 import optparse
 import os
+import shlex
 import sys
+import traceback
+
 
 
 # Add testrunner to the path.
@@ -17,10 +24,15 @@
     os.path.dirname(os.path.abspath(__file__))))
 
 
-from local import testsuite
-from local import utils
-
-from testproc.shard import ShardProc
+from testrunner.local import command
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.test_config import TestConfig
+from testrunner.testproc import progress
+from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.shard import ShardProc
+from testrunner.testproc.sigproc import SignalProc
+from testrunner.testproc.timeout import TimeoutProc
 
 
 BASE_DIR = (
@@ -31,8 +43,6 @@
 
 DEFAULT_OUT_GN = 'out.gn'
 
-ARCH_GUESS = utils.DefaultArch()
-
 # Map of test name synonyms to lists of test suites. Should be ordered by
 # expected runtimes (suites with slow test cases first). These groups are
 # invoked in separate steps on the bots.
@@ -46,6 +56,7 @@
     "inspector",
     "webkit",
     "mkgrokdump",
+    "wasm-js",
     "fuzzer",
     "message",
     "preparser",
@@ -60,6 +71,7 @@
     "wasm-spec-tests",
     "inspector",
     "mkgrokdump",
+    "wasm-js",
     "fuzzer",
     "message",
     "preparser",
@@ -68,13 +80,12 @@
   ],
   # This needs to stay in sync with test/d8_default.isolate.
   "d8_default": [
-    # TODO(machenbach): uncomment after infra side lands.
-    #"debugger",
+    "debugger",
     "mjsunit",
     "webkit",
-    #"message",
-    #"preparser",
-    #"intl",
+    "message",
+    "preparser",
+    "intl",
   ],
   # This needs to stay in sync with test/optimize_for_size.isolate.
   "optimize_for_size": [
@@ -90,6 +101,16 @@
   ],
 }
 
+# Double the timeout for these:
+SLOW_ARCHS = ["arm",
+              "mips",
+              "mipsel",
+              "mips64",
+              "mips64el",
+              "s390",
+              "s390x",
+              "arm64"]
+
 
 class ModeConfig(object):
   def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
@@ -99,8 +120,9 @@
     self.execution_mode = execution_mode
 
 
-DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
-RELEASE_FLAGS = ["--nohard-abort"]
+DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap",
+               "--testing-d8-test-runner"]
+RELEASE_FLAGS = ["--nohard-abort", "--testing-d8-test-runner"]
 MODES = {
   "debug": ModeConfig(
     flags=DEBUG_FLAGS,
@@ -138,6 +160,12 @@
   ),
 }
 
+PROGRESS_INDICATORS = {
+  'verbose': progress.VerboseProgressIndicator,
+  'dots': progress.DotsProgressIndicator,
+  'color': progress.ColorProgressIndicator,
+  'mono': progress.MonochromeProgressIndicator,
+}
 
 class TestRunnerError(Exception):
   pass
@@ -151,17 +179,34 @@
     else:
       self.arch = build_config['v8_target_cpu']
 
-    self.is_debug = build_config['is_debug']
     self.asan = build_config['is_asan']
     self.cfi_vptr = build_config['is_cfi']
     self.dcheck_always_on = build_config['dcheck_always_on']
     self.gcov_coverage = build_config['is_gcov_coverage']
+    self.is_android = build_config['is_android']
+    self.is_clang = build_config['is_clang']
+    self.is_debug = build_config['is_debug']
+    self.is_full_debug = build_config['is_full_debug']
     self.msan = build_config['is_msan']
     self.no_i18n = not build_config['v8_enable_i18n_support']
     self.no_snap = not build_config['v8_use_snapshot']
     self.predictable = build_config['v8_enable_verify_predictable']
     self.tsan = build_config['is_tsan']
+    # TODO(machenbach): We only have ubsan not ubsan_vptr.
     self.ubsan_vptr = build_config['is_ubsan_vptr']
+    self.embedded_builtins = build_config['v8_enable_embedded_builtins']
+    self.verify_csa = build_config['v8_enable_verify_csa']
+    self.lite_mode = build_config['v8_enable_lite_mode']
+    self.pointer_compression = build_config['v8_enable_pointer_compression']
+    # Export only for MIPS target
+    if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
+      self.mips_arch_variant = build_config['mips_arch_variant']
+      self.mips_use_msa = build_config['mips_use_msa']
+
+  @property
+  def use_sanitizer(self):
+    return (self.asan or self.cfi_vptr or self.msan or self.tsan or
+            self.ubsan_vptr)
 
   def __str__(self):
     detected_options = []
@@ -186,6 +231,14 @@
       detected_options.append('tsan')
     if self.ubsan_vptr:
       detected_options.append('ubsan_vptr')
+    if self.embedded_builtins:
+      detected_options.append('embedded_builtins')
+    if self.verify_csa:
+      detected_options.append('verify_csa')
+    if self.lite_mode:
+      detected_options.append('lite_mode')
+    if self.pointer_compression:
+      detected_options.append('pointer_compression')
 
     return '\n'.join(detected_options)
 
@@ -197,6 +250,12 @@
     self.build_config = None
     self.mode_name = None
     self.mode_options = None
+    self.target_os = None
+
+  @property
+  def framework_name(self):
+    """String name of the base-runner subclass, used in test results."""
+    raise NotImplementedError()
 
   def execute(self, sys_args=None):
     if sys_args is None:  # pragma: no cover
@@ -204,8 +263,13 @@
     try:
       parser = self._create_parser()
       options, args = self._parse_args(parser, sys_args)
+      if options.swarming:
+        # Swarming doesn't print how isolated commands are called. Lets make
+        # this less cryptic by printing it ourselves.
+        print(' '.join(sys.argv))
 
       self._load_build_config(options)
+      command.setup(self.target_os, options.device)
 
       try:
         self._process_default_options(options)
@@ -215,14 +279,26 @@
         raise
 
       args = self._parse_test_args(args)
-      suites = self._get_suites(args, options.verbose)
-
+      tests = self._load_testsuite_generators(args, options)
       self._setup_env()
-      return self._do_execute(suites, args, options)
+      print(">>> Running tests for %s.%s" % (self.build_config.arch,
+                                            self.mode_name))
+      exit_code = self._do_execute(tests, args, options)
+      if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
+        print("Force exit code 0 after failures. Json test results file "
+              "generated with failure information.")
+        exit_code = utils.EXIT_CODE_PASS
+      return exit_code
     except TestRunnerError:
-      return 1
+      traceback.print_exc()
+      return utils.EXIT_CODE_INTERNAL_ERROR
     except KeyboardInterrupt:
-      return 2
+      return utils.EXIT_CODE_INTERRUPTED
+    except Exception:
+      traceback.print_exc()
+      return utils.EXIT_CODE_INTERNAL_ERROR
+    finally:
+      command.tear_down()
 
   def _create_parser(self):
     parser = optparse.OptionParser()
@@ -247,14 +323,70 @@
                       " and buildbot builds): %s" % MODES.keys())
     parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
                       "directory will be used")
-    parser.add_option("-v", "--verbose", help="Verbose output",
+    parser.add_option("--test-root", help="Root directory of the test suites",
+                      default=os.path.join(self.basedir, 'test'))
+    parser.add_option("--total-timeout-sec", default=0, type="int",
+                      help="How long should fuzzer run")
+    parser.add_option("--swarming", default=False, action="store_true",
+                      help="Indicates running test driver on swarming.")
+
+    parser.add_option("-j", help="The number of parallel tasks to run",
+                      default=0, type=int)
+    parser.add_option("-d", "--device",
+                      help="The device ID to run Android tests on. If not "
+                           "given it will be autodetected.")
+
+    # Shard
+    parser.add_option("--shard-count", default=1, type=int,
+                      help="Split tests into this number of shards")
+    parser.add_option("--shard-run", default=1, type=int,
+                      help="Run this shard from the split up tests.")
+
+    # Progress
+    parser.add_option("-p", "--progress",
+                      choices=PROGRESS_INDICATORS.keys(), default="mono",
+                      help="The style of progress indicator (verbose, dots, "
+                           "color, mono)")
+    parser.add_option("--json-test-results",
+                      help="Path to a file for storing json results.")
+    parser.add_option("--exit-after-n-failures", type="int", default=100,
+                      help="Exit after the first N failures instead of "
+                           "running all tests. Pass 0 to disable this feature.")
+
+    # Rerun
+    parser.add_option("--rerun-failures-count", default=0, type=int,
+                      help="Number of times to rerun each failing test case. "
+                           "Very slow tests will be rerun only once.")
+    parser.add_option("--rerun-failures-max", default=100, type=int,
+                      help="Maximum number of failing test cases to rerun")
+
+    # Test config
+    parser.add_option("--command-prefix", default="",
+                      help="Prepended to each shell command used to run a test")
+    parser.add_option("--extra-flags", action="append", default=[],
+                      help="Additional flags to pass to each test command")
+    parser.add_option("--isolates", action="store_true", default=False,
+                      help="Whether to test isolates")
+    parser.add_option("--no-harness", "--noharness",
+                      default=False, action="store_true",
+                      help="Run without test harness of a given suite")
+    parser.add_option("--random-seed", default=0, type=int,
+                      help="Default seed for initializing random generator")
+    parser.add_option("--run-skipped", help="Also run skipped tests.",
                       default=False, action="store_true")
-    parser.add_option("--shard-count",
-                      help="Split tests into this number of shards",
-                      default=1, type="int")
-    parser.add_option("--shard-run",
-                      help="Run this shard from the split up tests.",
-                      default=1, type="int")
+    parser.add_option("-t", "--timeout", default=60, type=int,
+                      help="Timeout for single test in seconds")
+    parser.add_option("-v", "--verbose", default=False, action="store_true",
+                      help="Verbose output")
+
+    # TODO(machenbach): Temporary options for rolling out new test runner
+    # features.
+    parser.add_option("--mastername", default='',
+                      help="Mastername property from infrastructure. Not "
+                           "setting this option indicates manual usage.")
+    parser.add_option("--buildername", default='',
+                      help="Buildername property from infrastructure. Not "
+                           "setting this option indicates manual usage.")
 
   def _add_parser_options(self, parser):
     pass
@@ -264,7 +396,7 @@
 
     if any(map(lambda v: v and ',' in v,
                 [options.arch, options.mode])):  # pragma: no cover
-      print 'Multiple arch/mode are deprecated'
+      print('Multiple arch/mode are deprecated')
       raise TestRunnerError()
 
     return options, args
@@ -277,13 +409,20 @@
         pass
 
     if not self.build_config:  # pragma: no cover
-      print 'Failed to load build config'
+      print('Failed to load build config')
       raise TestRunnerError
 
-    print 'Build found: %s' % self.outdir
+    print('Build found: %s' % self.outdir)
     if str(self.build_config):
-      print '>>> Autodetected:'
-      print self.build_config
+      print('>>> Autodetected:')
+      print(self.build_config)
+
+    # Represents the OS where tests are run on. Same as host OS except for
+    # Android, which is determined by build output.
+    if self.build_config.is_android:
+      self.target_os = 'android'
+    else:
+      self.target_os = utils.GuessOS()
 
   # Returns possible build paths in order:
   # gn
@@ -353,7 +492,7 @@
     build_config_mode = 'debug' if self.build_config.is_debug else 'release'
     if options.mode:
       if options.mode not in MODES:  # pragma: no cover
-        print '%s mode is invalid' % options.mode
+        print('%s mode is invalid' % options.mode)
         raise TestRunnerError()
       if MODES[options.mode].execution_mode != build_config_mode:
         print ('execution mode (%s) for %s is inconsistent with build config '
@@ -378,6 +517,16 @@
       print('Warning: --shell-dir is deprecated. Searching for executables in '
             'build directory (%s) instead.' % self.outdir)
 
+    if options.j == 0:
+      if self.build_config.is_android:
+        # Adb isn't happy about multi-processed file pushing.
+        options.j = 1
+      else:
+        options.j = multiprocessing.cpu_count()
+
+    options.command_prefix = shlex.split(options.command_prefix)
+    options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+
   def _buildbot_to_v8_mode(self, config):
     """Convert buildbot build configs to configs understood by the v8 runner.
 
@@ -410,6 +559,9 @@
         asan_options.append('detect_leaks=1')
       else:
         asan_options.append('detect_leaks=0')
+      if utils.GuessOS() == 'windows':
+        # https://crbug.com/967663
+        asan_options.append('detect_stack_use_after_return=0')
       os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
 
     if self.build_config.cfi_vptr:
@@ -471,34 +623,129 @@
 
     return reduce(list.__add__, map(expand_test_group, args), [])
 
-  def _get_suites(self, args, verbose=False):
-    names = self._args_to_suite_names(args)
-    return self._load_suites(names, verbose)
-
-  def _args_to_suite_names(self, args):
+  def _args_to_suite_names(self, args, test_root):
     # Use default tests if no test configuration was provided at the cmd line.
-    all_names = set(utils.GetSuitePaths(os.path.join(self.basedir, 'test')))
+    all_names = set(utils.GetSuitePaths(test_root))
     args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set
     return [name for name in args_names if name in all_names]
 
   def _get_default_suite_names(self):
     return []
 
-  def _expand_test_group(self, name):
-    return TEST_MAP.get(name, [name])
+  def _load_testsuite_generators(self, args, options):
+    names = self._args_to_suite_names(args, options.test_root)
+    test_config = self._create_test_config(options)
+    variables = self._get_statusfile_variables(options)
 
-  def _load_suites(self, names, verbose=False):
-    def load_suite(name):
-      if verbose:
-        print '>>> Loading test suite: %s' % name
-      return testsuite.TestSuite.LoadTestSuite(
-          os.path.join(self.basedir, 'test', name))
-    return map(load_suite, names)
+    # Head generator with no elements
+    test_chain = testsuite.TestGenerator(0, [], [])
+    for name in names:
+      if options.verbose:
+        print('>>> Loading test suite: %s' % name)
+      suite = testsuite.TestSuite.Load(
+          os.path.join(options.test_root, name), test_config,
+          self.framework_name)
+
+      if self._is_testsuite_supported(suite, options):
+        tests = suite.load_tests_from_disk(variables)
+        test_chain.merge(tests)
+
+    return test_chain
+
+  def _is_testsuite_supported(self, suite, options):
+    """A predicate that can be overridden to filter out unsupported TestSuite
+    instances (see NumFuzzer for usage)."""
+    return True
+
+  def _get_statusfile_variables(self, options):
+    simd_mips = (
+      self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
+      self.build_config.mips_arch_variant == "r6" and
+      self.build_config.mips_use_msa)
+
+    mips_arch_variant = (
+      self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
+      self.build_config.mips_arch_variant)
+
+    # TODO(machenbach): In GN we can derive simulator run from
+    # target_arch != v8_target_arch in the dumped build config.
+    return {
+      "arch": self.build_config.arch,
+      "asan": self.build_config.asan,
+      "byteorder": sys.byteorder,
+      "dcheck_always_on": self.build_config.dcheck_always_on,
+      "deopt_fuzzer": False,
+      "endurance_fuzzer": False,
+      "gc_fuzzer": False,
+      "gc_stress": False,
+      "gcov_coverage": self.build_config.gcov_coverage,
+      "isolates": options.isolates,
+      "is_clang": self.build_config.is_clang,
+      "is_full_debug": self.build_config.is_full_debug,
+      "mips_arch_variant": mips_arch_variant,
+      "mode": self.mode_options.status_mode
+              if not self.build_config.dcheck_always_on
+              else "debug",
+      "msan": self.build_config.msan,
+      "no_harness": options.no_harness,
+      "no_i18n": self.build_config.no_i18n,
+      "no_snap": self.build_config.no_snap,
+      "novfp3": False,
+      "optimize_for_size": "--optimize-for-size" in options.extra_flags,
+      "predictable": self.build_config.predictable,
+      "simd_mips": simd_mips,
+      "simulator_run": False,
+      "system": self.target_os,
+      "tsan": self.build_config.tsan,
+      "ubsan_vptr": self.build_config.ubsan_vptr,
+      "embedded_builtins": self.build_config.embedded_builtins,
+      "verify_csa": self.build_config.verify_csa,
+      "lite_mode": self.build_config.lite_mode,
+      "pointer_compression": self.build_config.pointer_compression,
+    }
+
+  def _create_test_config(self, options):
+    timeout = options.timeout * self._timeout_scalefactor(options)
+    return TestConfig(
+        command_prefix=options.command_prefix,
+        extra_flags=options.extra_flags,
+        isolates=options.isolates,
+        mode_flags=self.mode_options.flags,
+        no_harness=options.no_harness,
+        noi18n=self.build_config.no_i18n,
+        random_seed=options.random_seed,
+        run_skipped=options.run_skipped,
+        shell_dir=self.outdir,
+        timeout=timeout,
+        verbose=options.verbose,
+    )
+
+  def _timeout_scalefactor(self, options):
+    """Increases timeout for slow build configurations."""
+    factor = self.mode_options.timeout_scalefactor
+    if self.build_config.arch in SLOW_ARCHS:
+      factor *= 4
+    if self.build_config.lite_mode:
+      factor *= 2
+    if self.build_config.predictable:
+      factor *= 4
+    if self.build_config.use_sanitizer:
+      factor *= 1.5
+    if self.build_config.is_full_debug:
+      factor *= 4
+
+    return factor
 
   # TODO(majeski): remove options & args parameters
   def _do_execute(self, suites, args, options):
     raise NotImplementedError()
 
+  def _prepare_procs(self, procs):
+    procs = filter(None, procs)
+    for i in range(0, len(procs) - 1):
+      procs[i].connect_to(procs[i + 1])
+    procs[0].setup()
+
   def _create_shard_proc(self, options):
     myid, count = self._get_shard_info(options)
     if count == 1:
@@ -536,8 +783,42 @@
       # TODO(machenbach): Turn this into an assert. If that's wrong on the
       # bots, printing will be quite useless. Or refactor this code to make
       # sure we get a return code != 0 after testing if we got here.
-      print "shard-run not a valid number, should be in [1:shard-count]"
-      print "defaulting back to running all tests"
+      print("shard-run not a valid number, should be in [1:shard-count]")
+      print("defaulting back to running all tests")
       return 1, 1
 
     return shard_run, shard_count
+
+  def _create_progress_indicators(self, test_count, options):
+    procs = [PROGRESS_INDICATORS[options.progress]()]
+    if options.json_test_results:
+      procs.append(progress.JsonTestProgressIndicator(
+        self.framework_name,
+        options.json_test_results,
+        self.build_config.arch,
+        self.mode_options.execution_mode))
+
+    for proc in procs:
+      try:
+        proc.set_test_count(test_count)
+      except AttributeError:
+        pass
+
+    return procs
+
+  def _create_result_tracker(self, options):
+    return progress.ResultsTracker(options.exit_after_n_failures)
+
+  def _create_timeout_proc(self, options):
+    if not options.total_timeout_sec:
+      return None
+    return TimeoutProc(options.total_timeout_sec)
+
+  def _create_signal_proc(self):
+    return SignalProc()
+
+  def _create_rerun_proc(self, options):
+    if not options.rerun_failures_count:
+      return None
+    return RerunProc(options.rerun_failures_count,
+                     options.rerun_failures_max)
diff --git a/src/v8/tools/testrunner/local/android.py b/src/v8/tools/testrunner/local/android.py
new file mode 100644
index 0000000..ebf04af
--- /dev/null
+++ b/src/v8/tools/testrunner/local/android.py
@@ -0,0 +1,205 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Wrapper around the Android device abstraction from src/build/android.
+"""
+
+import logging
+import os
+import sys
+import re
+
+BASE_DIR = os.path.normpath(
+    os.path.join(os.path.dirname(__file__), '..', '..', '..'))
+ANDROID_DIR = os.path.join(BASE_DIR, 'build', 'android')
+DEVICE_DIR = '/data/local/tmp/v8/'
+
+
+class TimeoutException(Exception):
+  def __init__(self, timeout, output=None):
+    self.timeout = timeout
+    self.output = output
+
+
+class CommandFailedException(Exception):
+  def __init__(self, status, output):
+    self.status = status
+    self.output = output
+
+
+class _Driver(object):
+  """Helper class to execute shell commands on an Android device."""
+  def __init__(self, device=None):
+    assert os.path.exists(ANDROID_DIR)
+    sys.path.insert(0, ANDROID_DIR)
+
+    # We import the dependencies only on demand, so that this file can be
+    # imported unconditionally.
+    import devil_chromium
+    from devil.android import device_errors  # pylint: disable=import-error
+    from devil.android import device_utils  # pylint: disable=import-error
+    from devil.android.perf import cache_control  # pylint: disable=import-error
+    from devil.android.perf import perf_control  # pylint: disable=import-error
+    global cache_control
+    global device_errors
+    global perf_control
+
+    devil_chromium.Initialize()
+
+    # Find specified device or a single attached device if none was specified.
+    # In case none or multiple devices are attached, this raises an exception.
+    self.device = device_utils.DeviceUtils.HealthyDevices(
+        retries=5, enable_usb_resets=True, device_arg=device)[0]
+
+    # This remembers what we have already pushed to the device.
+    self.pushed = set()
+
+  def tear_down(self):
+    """Clean up files after running all tests."""
+    self.device.RemovePath(DEVICE_DIR, force=True, recursive=True)
+
+  def push_file(self, host_dir, file_name, target_rel='.',
+                skip_if_missing=False):
+    """Push a single file to the device (cached).
+
+    Args:
+      host_dir: Absolute parent directory of the file to push.
+      file_name: Name of the file to push.
+      target_rel: Parent directory of the target location on the device
+          (relative to the device's base dir for testing).
+      skip_if_missing: Keeps silent about missing files when set. Otherwise logs
+          error.
+    """
+    # TODO(sergiyb): Implement this method using self.device.PushChangedFiles to
+    # avoid accessing low-level self.device.adb.
+    file_on_host = os.path.join(host_dir, file_name)
+
+    # Only push files not yet pushed in one execution.
+    if file_on_host in self.pushed:
+      return
+
+    file_on_device_tmp = os.path.join(DEVICE_DIR, '_tmp_', file_name)
+    file_on_device = os.path.join(DEVICE_DIR, target_rel, file_name)
+    folder_on_device = os.path.dirname(file_on_device)
+
+    # Only attempt to push files that exist.
+    if not os.path.exists(file_on_host):
+      if not skip_if_missing:
+        logging.critical('Missing file on host: %s' % file_on_host)
+      return
+
+    # Work-around for 'text file busy' errors. Push the files to a temporary
+    # location and then copy them with a shell command.
+    output = self.device.adb.Push(file_on_host, file_on_device_tmp)
+    # Success looks like this: '3035 KB/s (12512056 bytes in 4.025s)'.
+    # Errors look like this: 'failed to copy  ... '.
+    if output and not re.search('^[0-9]', output.splitlines()[-1]):
+      logging.critical('PUSH FAILED: ' + output)
+    self.device.adb.Shell('mkdir -p %s' % folder_on_device)
+    self.device.adb.Shell('cp %s %s' % (file_on_device_tmp, file_on_device))
+    self.pushed.add(file_on_host)
+
+  def push_executable(self, shell_dir, target_dir, binary):
+    """Push files required to run a V8 executable.
+
+    Args:
+      shell_dir: Absolute parent directory of the executable on the host.
+      target_dir: Parent directory of the executable on the device (relative to
+          devices' base dir for testing).
+      binary: Name of the binary to push.
+    """
+    self.push_file(shell_dir, binary, target_dir)
+
+    # Push external startup data. Backwards compatible for revisions where
+    # these files didn't exist. Or for bots that don't produce these files.
+    self.push_file(
+        shell_dir,
+        'natives_blob.bin',
+        target_dir,
+        skip_if_missing=True,
+    )
+    self.push_file(
+        shell_dir,
+        'snapshot_blob.bin',
+        target_dir,
+        skip_if_missing=True,
+    )
+    self.push_file(
+        shell_dir,
+        'snapshot_blob_trusted.bin',
+        target_dir,
+        skip_if_missing=True,
+    )
+    self.push_file(
+        shell_dir,
+        'icudtl.dat',
+        target_dir,
+        skip_if_missing=True,
+    )
+
+  def run(self, target_dir, binary, args, rel_path, timeout, env=None,
+          logcat_file=False):
+    """Execute a command on the device's shell.
+
+    Args:
+      target_dir: Parent directory of the executable on the device (relative to
+          devices' base dir for testing).
+      binary: Name of the binary.
+      args: List of arguments to pass to the binary.
+      rel_path: Relative path on device to use as CWD.
+      timeout: Timeout in seconds.
+      env: The environment variables with which the command should be run.
+      logcat_file: File into which to stream adb logcat log.
+    """
+    binary_on_device = os.path.join(DEVICE_DIR, target_dir, binary)
+    cmd = [binary_on_device] + args
+    def run_inner():
+      try:
+        output = self.device.RunShellCommand(
+            cmd,
+            cwd=os.path.join(DEVICE_DIR, rel_path),
+            check_return=True,
+            env=env,
+            timeout=timeout,
+            retries=0,
+        )
+        return '\n'.join(output)
+      except device_errors.AdbCommandFailedError as e:
+        raise CommandFailedException(e.status, e.output)
+      except device_errors.CommandTimeoutError as e:
+        raise TimeoutException(timeout, e.output)
+
+
+    if logcat_file:
+      with self.device.GetLogcatMonitor(output_file=logcat_file) as logmon:
+        result = run_inner()
+      logmon.Close()
+      return result
+    else:
+      return run_inner()
+
+  def drop_ram_caches(self):
+    """Drop ran caches on device."""
+    cache = cache_control.CacheControl(self.device)
+    cache.DropRamCaches()
+
+  def set_high_perf_mode(self):
+    """Set device into high performance mode."""
+    perf = perf_control.PerfControl(self.device)
+    perf.SetHighPerfMode()
+
+  def set_default_perf_mode(self):
+    """Set device into default performance mode."""
+    perf = perf_control.PerfControl(self.device)
+    perf.SetDefaultPerfMode()
+
+
+_ANDROID_DRIVER = None
+def android_driver(device=None):
+  """Singleton access method to the driver class."""
+  global _ANDROID_DRIVER
+  if not _ANDROID_DRIVER:
+    _ANDROID_DRIVER = _Driver(device)
+  return _ANDROID_DRIVER
diff --git a/src/v8/tools/testrunner/local/command.py b/src/v8/tools/testrunner/local/command.py
index 93b1ac9..b68252c 100644
--- a/src/v8/tools/testrunner/local/command.py
+++ b/src/v8/tools/testrunner/local/command.py
@@ -2,24 +2,57 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
 
 import os
+import re
+import signal
 import subprocess
 import sys
 import threading
 import time
 
+from ..local.android import (
+    android_driver, CommandFailedException, TimeoutException)
 from ..local import utils
 from ..objects import output
 
 
+BASE_DIR = os.path.normpath(
+    os.path.join(os.path.dirname(os.path.abspath(__file__)), '..' , '..', '..'))
+
 SEM_INVALID_VALUE = -1
 SEM_NOGPFAULTERRORBOX = 0x0002  # Microsoft Platform SDK WinBase.h
 
 
+def setup_testing():
+  """For testing only: We use threading under the hood instead of
+  multiprocessing to make coverage work. Signal handling is only supported
+  in the main thread, so we disable it for testing.
+  """
+  signal.signal = lambda *_: None
+
+
+class AbortException(Exception):
+  """Indicates early abort on SIGINT, SIGTERM or internal hard timeout."""
+  pass
+
+
 class BaseCommand(object):
   def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
-               verbose=False):
+               verbose=False, resources_func=None):
+    """Initialize the command.
+
+    Args:
+      shell: The name of the executable (e.g. d8).
+      args: List of args to pass to the executable.
+      cmd_prefix: Prefix of command (e.g. a wrapper script).
+      timeout: Timeout in seconds.
+      env: Environment dict for execution.
+      verbose: Print additional output.
+      resources_func: Callable, returning all test files needed by this command.
+    """
     assert(timeout > 0)
 
     self.shell = shell
@@ -29,16 +62,22 @@
     self.env = env or {}
     self.verbose = verbose
 
-  def execute(self, **additional_popen_kwargs):
+  def execute(self):
     if self.verbose:
-      print '# %s' % self
+      print('# %s' % self)
 
-    process = self._start_process(**additional_popen_kwargs)
+    process = self._start_process()
+
+    # Variable to communicate with the signal handler.
+    abort_occured = [False]
+    def handler(signum, frame):
+      self._abort(process, abort_occured)
+    signal.signal(signal.SIGTERM, handler)
 
     # Variable to communicate with the timer.
     timeout_occured = [False]
     timer = threading.Timer(
-        self.timeout, self._on_timeout, [process, timeout_occured])
+        self.timeout, self._abort, [process, timeout_occured])
     timer.start()
 
     start_time = time.time()
@@ -47,6 +86,9 @@
 
     timer.cancel()
 
+    if abort_occured[0]:
+      raise AbortException()
+
     return output.Output(
       process.returncode,
       timeout_occured[0],
@@ -56,14 +98,13 @@
       duration
     )
 
-  def _start_process(self, **additional_popen_kwargs):
+  def _start_process(self):
     try:
       return subprocess.Popen(
         args=self._get_popen_args(),
         stdout=subprocess.PIPE,
         stderr=subprocess.PIPE,
         env=self._get_env(),
-        **additional_popen_kwargs
       )
     except Exception as e:
       sys.stderr.write('Error executing: %s\n' % self)
@@ -85,12 +126,16 @@
   def _kill_process(self, process):
     raise NotImplementedError()
 
-  def _on_timeout(self, process, timeout_occured):
-    timeout_occured[0] = True
+  def _abort(self, process, abort_called):
+    abort_called[0] = True
     try:
+      print('Attempting to kill process %s' % process.pid)
+      sys.stdout.flush()
       self._kill_process(process)
-    except OSError:
-      sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+    except OSError as e:
+      print(e)
+      sys.stdout.flush()
+      pass
 
   def __str__(self):
     return self.to_string()
@@ -114,6 +159,25 @@
 
 
 class PosixCommand(BaseCommand):
+  # TODO(machenbach): Use base process start without shell once
+  # https://crbug.com/v8/8889 is resolved.
+  def _start_process(self):
+    def wrapped(arg):
+      if set('() \'"') & set(arg):
+        return "'%s'" % arg.replace("'", "'\"'\"'")
+      return arg
+    try:
+      return subprocess.Popen(
+        args=' '.join(map(wrapped, self._get_popen_args())),
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        env=self._get_env(),
+        shell=True,
+      )
+    except Exception as e:
+      sys.stderr.write('Error executing: %s\n' % self)
+      raise e
+
   def _kill_process(self, process):
     process.kill()
 
@@ -147,9 +211,6 @@
     return subprocess.list2cmdline(self._to_args_list())
 
   def _kill_process(self, process):
-    if self.verbose:
-      print 'Attempting to kill process %d' % process.pid
-      sys.stdout.flush()
     tk = subprocess.Popen(
         'taskkill /T /F /PID %d' % process.pid,
         stdout=subprocess.PIPE,
@@ -157,15 +218,96 @@
     )
     stdout, stderr = tk.communicate()
     if self.verbose:
-      print 'Taskkill results for %d' % process.pid
-      print stdout
-      print stderr
-      print 'Return code: %d' % tk.returncode
+      print('Taskkill results for %d' % process.pid)
+      print(stdout)
+      print(stderr)
+      print('Return code: %d' % tk.returncode)
       sys.stdout.flush()
 
 
-# Set the Command class to the OS-specific version.
-if utils.IsWindows():
-  Command = WindowsCommand
-else:
-  Command = PosixCommand
+class AndroidCommand(BaseCommand):
+  # This must be initialized before creating any instances of this class.
+  driver = None
+
+  def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
+               verbose=False, resources_func=None):
+    """Initialize the command and all files that need to be pushed to the
+    Android device.
+    """
+    self.shell_name = os.path.basename(shell)
+    self.shell_dir = os.path.dirname(shell)
+    self.files_to_push = (resources_func or (lambda: []))()
+
+    # Make all paths in arguments relative and also prepare files from arguments
+    # for pushing to the device.
+    rel_args = []
+    find_path_re = re.compile(r'.*(%s/[^\'"]+).*' % re.escape(BASE_DIR))
+    for arg in (args or []):
+      match = find_path_re.match(arg)
+      if match:
+        self.files_to_push.append(match.group(1))
+      rel_args.append(
+          re.sub(r'(.*)%s/(.*)' % re.escape(BASE_DIR), r'\1\2', arg))
+
+    super(AndroidCommand, self).__init__(
+        shell, args=rel_args, cmd_prefix=cmd_prefix, timeout=timeout, env=env,
+        verbose=verbose)
+
+  def execute(self, **additional_popen_kwargs):
+    """Execute the command on the device.
+
+    This pushes all required files to the device and then runs the command.
+    """
+    if self.verbose:
+      print('# %s' % self)
+
+    self.driver.push_executable(self.shell_dir, 'bin', self.shell_name)
+
+    for abs_file in self.files_to_push:
+      abs_dir = os.path.dirname(abs_file)
+      file_name = os.path.basename(abs_file)
+      rel_dir = os.path.relpath(abs_dir, BASE_DIR)
+      self.driver.push_file(abs_dir, file_name, rel_dir)
+
+    start_time = time.time()
+    return_code = 0
+    timed_out = False
+    try:
+      stdout = self.driver.run(
+          'bin', self.shell_name, self.args, '.', self.timeout, self.env)
+    except CommandFailedException as e:
+      return_code = e.status
+      stdout = e.output
+    except TimeoutException as e:
+      return_code = 1
+      timed_out = True
+      # Sadly the Android driver doesn't provide output on timeout.
+      stdout = ''
+
+    duration = time.time() - start_time
+    return output.Output(
+        return_code,
+        timed_out,
+        stdout,
+        '',  # No stderr available.
+        -1,  # No pid available.
+        duration,
+    )
+
+
+Command = None
+def setup(target_os, device):
+  """Set the Command class to the OS-specific version."""
+  global Command
+  if target_os == 'android':
+    AndroidCommand.driver = android_driver(device)
+    Command = AndroidCommand
+  elif target_os == 'windows':
+    Command = WindowsCommand
+  else:
+    Command = PosixCommand
+
+def tear_down():
+  """Clean up after using commands."""
+  if Command == AndroidCommand:
+    AndroidCommand.driver.tear_down()
diff --git a/src/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status b/src/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status
new file mode 100644
index 0000000..b5ebc84
--- /dev/null
+++ b/src/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status
@@ -0,0 +1,5 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[]
diff --git a/src/v8/tools/testrunner/local/fake_testsuite/testcfg.py b/src/v8/tools/testrunner/local/fake_testsuite/testcfg.py
new file mode 100644
index 0000000..28de737
--- /dev/null
+++ b/src/v8/tools/testrunner/local/fake_testsuite/testcfg.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+from testrunner.local import testsuite, statusfile
+
+
+class TestLoader(testsuite.TestLoader):
+  def _list_test_filenames(self):
+    return ["fast", "slow"]
+
+  def list_tests(self):
+    self.test_count_estimation = 2
+    fast = self._create_test("fast", self.suite)
+    slow = self._create_test("slow", self.suite)
+
+    slow._statusfile_outcomes.append(statusfile.SLOW)
+    yield fast
+    yield slow
+
+
+class TestSuite(testsuite.TestSuite):
+  def _test_loader_class(self):
+    return TestLoader
+
+  def _test_class(self):
+    return testsuite.TestCase
+
+def GetSuite(*args, **kwargs):
+  return TestSuite(*args, **kwargs)
diff --git a/src/v8/tools/testrunner/local/pool.py b/src/v8/tools/testrunner/local/pool.py
index 9199b62..e0b0ec4 100644
--- a/src/v8/tools/testrunner/local/pool.py
+++ b/src/v8/tools/testrunner/local/pool.py
@@ -3,43 +3,53 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-from Queue import Empty
-from multiprocessing import Event, Process, Queue
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from contextlib import contextmanager
+from multiprocessing import Process, Queue
+import os
+import signal
+import time
 import traceback
 
+try:
+  from queue import Empty  # Python 3
+except ImportError:
+  from Queue import Empty  # Python 2
+
+from . import command
+
 
 def setup_testing():
   """For testing only: Use threading under the hood instead of multiprocessing
   to make coverage work.
   """
   global Queue
-  global Event
   global Process
   del Queue
-  del Event
   del Process
-  from Queue import Queue
-  from threading import Event
+  try:
+    from queue import Queue  # Python 3
+  except ImportError:
+    from Queue import Queue  # Python 2
+
   from threading import Thread as Process
+  # Monkeypatch threading Queue to look like multiprocessing Queue.
+  Queue.cancel_join_thread = lambda self: None
+  # Monkeypatch os.kill and add fake pid property on Thread.
+  os.kill = lambda *args: None
+  Process.pid = property(lambda self: None)
 
 
 class NormalResult():
   def __init__(self, result):
     self.result = result
-    self.exception = False
-    self.break_now = False
-
+    self.exception = None
 
 class ExceptionResult():
-  def __init__(self):
-    self.exception = True
-    self.break_now = False
-
-
-class BreakResult():
-  def __init__(self):
-    self.exception = False
-    self.break_now = True
+  def __init__(self, exception):
+    self.exception = exception
 
 
 class MaybeResult():
@@ -56,26 +66,43 @@
     return MaybeResult(False, value)
 
 
-def Worker(fn, work_queue, done_queue, done,
+def Worker(fn, work_queue, done_queue,
            process_context_fn=None, process_context_args=None):
   """Worker to be run in a child process.
-  The worker stops on two conditions. 1. When the poison pill "STOP" is
-  reached or 2. when the event "done" is set."""
+  The worker stops when the poison pill "STOP" is reached.
+  """
   try:
     kwargs = {}
     if process_context_fn and process_context_args is not None:
       kwargs.update(process_context=process_context_fn(*process_context_args))
     for args in iter(work_queue.get, "STOP"):
-      if done.is_set():
-        break
       try:
         done_queue.put(NormalResult(fn(*args, **kwargs)))
-      except Exception, e:
+      except command.AbortException:
+        # SIGINT, SIGTERM or internal hard timeout.
+        break
+      except Exception as e:
         traceback.print_exc()
         print(">>> EXCEPTION: %s" % e)
-        done_queue.put(ExceptionResult())
+        done_queue.put(ExceptionResult(e))
+    # When we reach here on normal tear down, all items have been pulled from
+    # the done_queue before and this should have no effect. On fast abort, it's
+    # possible that a fast worker left items on the done_queue in memory, which
+    # will never be pulled. This call purges those to avoid a deadlock.
+    done_queue.cancel_join_thread()
   except KeyboardInterrupt:
-    done_queue.put(BreakResult())
+    assert False, 'Unreachable'
+
+
+@contextmanager
+def without_sig():
+  int_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
+  term_handler = signal.signal(signal.SIGTERM, signal.SIG_IGN)
+  try:
+    yield
+  finally:
+    signal.signal(signal.SIGINT, int_handler)
+    signal.signal(signal.SIGTERM, term_handler)
 
 
 class Pool():
@@ -88,24 +115,28 @@
   # Necessary to not overflow the queue's pipe if a keyboard interrupt happens.
   BUFFER_FACTOR = 4
 
-  def __init__(self, num_workers, heartbeat_timeout=30):
+  def __init__(self, num_workers, heartbeat_timeout=1):
     self.num_workers = num_workers
     self.processes = []
     self.terminated = False
+    self.abort_now = False
 
-    # Invariant: count >= #work_queue + #done_queue. It is greater when a
-    # worker takes an item from the work_queue and before the result is
+    # Invariant: processing_count >= #work_queue + #done_queue. It is greater
+    # when a worker takes an item from the work_queue and before the result is
     # submitted to the done_queue. It is equal when no worker is working,
     # e.g. when all workers have finished, and when no results are processed.
     # Count is only accessed by the parent process. Only the parent process is
     # allowed to remove items from the done_queue and to add items to the
     # work_queue.
-    self.count = 0
-    self.work_queue = Queue()
-    self.done_queue = Queue()
-    self.done = Event()
+    self.processing_count = 0
     self.heartbeat_timeout = heartbeat_timeout
 
+    # Disable sigint and sigterm to prevent subprocesses from capturing the
+    # signals.
+    with without_sig():
+      self.work_queue = Queue()
+      self.done_queue = Queue()
+
   def imap_unordered(self, fn, gen,
                      process_context_fn=None, process_context_args=None):
     """Maps function "fn" to items in generator "gen" on the worker processes
@@ -123,58 +154,63 @@
           process_context_fn. All arguments will be pickled and sent beyond the
           process boundary.
     """
+    if self.terminated:
+      return
     try:
       internal_error = False
       gen = iter(gen)
       self.advance = self._advance_more
 
-      for w in xrange(self.num_workers):
-        p = Process(target=Worker, args=(fn,
-                                         self.work_queue,
-                                         self.done_queue,
-                                         self.done,
-                                         process_context_fn,
-                                         process_context_args))
-        p.start()
-        self.processes.append(p)
+      # Disable sigint and sigterm to prevent subprocesses from capturing the
+      # signals.
+      with without_sig():
+        for w in range(self.num_workers):
+          p = Process(target=Worker, args=(fn,
+                                          self.work_queue,
+                                          self.done_queue,
+                                          process_context_fn,
+                                          process_context_args))
+          p.start()
+          self.processes.append(p)
 
       self.advance(gen)
-      while self.count > 0:
+      while self.processing_count > 0:
         while True:
           try:
-            result = self.done_queue.get(timeout=self.heartbeat_timeout)
-            break
-          except Empty:
-            # Indicate a heartbeat. The iterator will continue fetching the
-            # next result.
-            yield MaybeResult.create_heartbeat()
-        self.count -= 1
-        if result.exception:
-          # TODO(machenbach): Handle a few known types of internal errors
-          # gracefully, e.g. missing test files.
-          internal_error = True
-          continue
-        elif result.break_now:
-          # A keyboard interrupt happened in one of the worker processes.
-          raise KeyboardInterrupt
-        else:
-          yield MaybeResult.create_result(result.result)
+            # Read from result queue in a responsive fashion. If available,
+            # this will return a normal result immediately or a heartbeat on
+            # heartbeat timeout (default 1 second).
+            result = self._get_result_from_queue()
+          except:
+            # TODO(machenbach): Handle a few known types of internal errors
+            # gracefully, e.g. missing test files.
+            internal_error = True
+            continue
+
+          if self.abort_now:
+            # SIGINT, SIGTERM or internal hard timeout.
+            return
+
+          yield result
+          break
+
         self.advance(gen)
     except KeyboardInterrupt:
-      raise
+      assert False, 'Unreachable'
     except Exception as e:
       traceback.print_exc()
       print(">>> EXCEPTION: %s" % e)
     finally:
-      self.terminate()
+      self._terminate()
+
     if internal_error:
       raise Exception("Internal error in a worker process.")
 
   def _advance_more(self, gen):
-    while self.count < self.num_workers * self.BUFFER_FACTOR:
+    while self.processing_count < self.num_workers * self.BUFFER_FACTOR:
       try:
-        self.work_queue.put(gen.next())
-        self.count += 1
+        self.work_queue.put(next(gen))
+        self.processing_count += 1
       except StopIteration:
         self.advance = self._advance_empty
         break
@@ -185,27 +221,51 @@
   def add(self, args):
     """Adds an item to the work queue. Can be called dynamically while
     processing the results from imap_unordered."""
-    self.work_queue.put(args)
-    self.count += 1
+    assert not self.terminated
 
-  def terminate(self):
+    self.work_queue.put(args)
+    self.processing_count += 1
+
+  def abort(self):
+    """Schedules abort on next queue read.
+
+    This is safe to call when handling SIGINT, SIGTERM or when an internal
+    hard timeout is reached.
+    """
+    self.abort_now = True
+
+  def _terminate(self):
+    """Terminates execution and cleans up the queues.
+
+    If abort() was called before termination, this also terminates the
+    subprocesses and doesn't wait for ongoing tests.
+    """
     if self.terminated:
       return
     self.terminated = True
 
-    # For exceptional tear down set the "done" event to stop the workers before
-    # they empty the queue buffer.
-    self.done.set()
+    # Drain out work queue from tests
+    try:
+      while True:
+        self.work_queue.get(True, 0.1)
+    except Empty:
+      pass
 
-    for p in self.processes:
+    # Make sure all processes stop
+    for _ in self.processes:
       # During normal tear down the workers block on get(). Feed a poison pill
       # per worker to make them stop.
       self.work_queue.put("STOP")
 
+    if self.abort_now:
+      for p in self.processes:
+        os.kill(p.pid, signal.SIGTERM)
+
     for p in self.processes:
       p.join()
 
-    # Drain the queues to prevent failures when queues are garbage collected.
+    # Drain the queues to prevent stderr chatter when queues are garbage
+    # collected.
     try:
       while True: self.work_queue.get(False)
     except:
@@ -214,3 +274,22 @@
       while True: self.done_queue.get(False)
     except:
       pass
+
+  def _get_result_from_queue(self):
+    """Attempts to get the next result from the queue.
+
+    Returns: A wrapped result if one was available within heartbeat timeout,
+        a heartbeat result otherwise.
+    Raises:
+        Exception: If an exception occured when processing the task on the
+            worker side, it is reraised here.
+    """
+    while True:
+      try:
+        result = self.done_queue.get(timeout=self.heartbeat_timeout)
+        self.processing_count -= 1
+        if result.exception:
+          raise result.exception
+        return MaybeResult.create_result(result.result)
+      except Empty:
+        return MaybeResult.create_heartbeat()
diff --git a/src/v8/tools/testrunner/local/pool_unittest.py b/src/v8/tools/testrunner/local/pool_unittest.py
old mode 100644
new mode 100755
index 235eca6..240cd56
--- a/src/v8/tools/testrunner/local/pool_unittest.py
+++ b/src/v8/tools/testrunner/local/pool_unittest.py
@@ -3,9 +3,16 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import os
+import sys
 import unittest
 
-from pool import Pool
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.local.pool import Pool
 
 def Run(x):
   if x == 10:
@@ -17,6 +24,9 @@
     results = set()
     pool = Pool(3)
     for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
+      if result.heartbeat:
+        # Any result can be a heartbeat due to timings.
+        continue
       results.add(result.value)
     self.assertEquals(set(range(0, 10)), results)
 
@@ -25,6 +35,9 @@
     pool = Pool(3)
     with self.assertRaises(Exception):
       for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
+        if result.heartbeat:
+          # Any result can be a heartbeat due to timings.
+          continue
         # Item 10 will not appear in results due to an internal exception.
         results.add(result.value)
     expect = set(range(0, 12))
@@ -35,8 +48,15 @@
     results = set()
     pool = Pool(3)
     for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
+      if result.heartbeat:
+        # Any result can be a heartbeat due to timings.
+        continue
       results.add(result.value)
       if result.value < 30:
         pool.add([result.value + 20])
     self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)),
                       results)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/v8/tools/testrunner/local/statusfile.py b/src/v8/tools/testrunner/local/statusfile.py
index 988750d..e477832 100644
--- a/src/v8/tools/testrunner/local/statusfile.py
+++ b/src/v8/tools/testrunner/local/statusfile.py
@@ -25,6 +25,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
 import re
 
@@ -34,8 +37,8 @@
 # Possible outcomes
 FAIL = "FAIL"
 PASS = "PASS"
-TIMEOUT = "TIMEOUT" # TODO(majeski): unused in status files
-CRASH = "CRASH" # TODO(majeski): unused in status files
+TIMEOUT = "TIMEOUT"
+CRASH = "CRASH"
 
 # Outcomes only for status file, need special handling
 FAIL_OK = "FAIL_OK"
@@ -45,21 +48,22 @@
 SKIP = "SKIP"
 SLOW = "SLOW"
 NO_VARIANTS = "NO_VARIANTS"
+FAIL_PHASE_ONLY = "FAIL_PHASE_ONLY"
 
 ALWAYS = "ALWAYS"
 
 KEYWORDS = {}
 for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, NO_VARIANTS, FAIL_SLOPPY,
-            ALWAYS]:
+            ALWAYS, FAIL_PHASE_ONLY]:
   KEYWORDS[key] = key
 
 # Support arches, modes to be written as keywords instead of strings.
 VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "big", "little",
+for var in ["debug", "release", "big", "little", "android",
             "android_arm", "android_arm64", "android_ia32", "android_x64",
             "arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
             "x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
-            "linux", "aix"]:
+            "linux", "aix", "r1", "r2", "r3", "r5", "r6"]:
   VARIABLES[var] = var
 
 # Allow using variants as keywords.
@@ -132,7 +136,7 @@
             variant_desc = 'variant independent'
           else:
             variant_desc = 'variant: %s' % variant
-          print 'Unused rule: %s -> %s (%s)' % (rule, value, variant_desc)
+          print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
 
 
 def _JoinsPassAndFail(outcomes1, outcomes2):
@@ -328,5 +332,5 @@
                   "missing file for %s test %s" % (basename, rule))
     return status["success"]
   except Exception as e:
-    print e
+    print(e)
     return False
diff --git a/src/v8/tools/testrunner/local/statusfile_unittest.py b/src/v8/tools/testrunner/local/statusfile_unittest.py
index 299e332..e8d5ff9 100755
--- a/src/v8/tools/testrunner/local/statusfile_unittest.py
+++ b/src/v8/tools/testrunner/local/statusfile_unittest.py
@@ -3,6 +3,7 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+
 import unittest
 
 import statusfile
diff --git a/src/v8/tools/testrunner/local/testsuite.py b/src/v8/tools/testrunner/local/testsuite.py
index 6a9e983..864d734 100644
--- a/src/v8/tools/testrunner/local/testsuite.py
+++ b/src/v8/tools/testrunner/local/testsuite.py
@@ -28,38 +28,20 @@
 
 import fnmatch
 import imp
+import itertools
 import os
+from contextlib import contextmanager
 
 from . import command
 from . import statusfile
 from . import utils
 from ..objects.testcase import TestCase
-from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
+from .variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
 
 
 STANDARD_VARIANT = set(["default"])
 
 
-class LegacyVariantsGenerator(object):
-  def __init__(self, suite, variants):
-    self.suite = suite
-    self.all_variants = ALL_VARIANTS & variants
-    self.standard_variant = STANDARD_VARIANT & variants
-
-  def FilterVariantsByTest(self, test):
-    if test.only_standard_variant:
-      return self.standard_variant
-    return self.all_variants
-
-  def GetFlagSets(self, test, variant):
-    return ALL_VARIANT_FLAGS[variant]
-
-
-class StandardLegacyVariantsGenerator(LegacyVariantsGenerator):
-  def FilterVariantsByTest(self, testcase):
-    return self.standard_variant
-
-
 class VariantsGenerator(object):
   def __init__(self, variants):
     self._all_variants = [v for v in variants if v in ALL_VARIANTS]
@@ -80,45 +62,235 @@
     return self._all_variants
 
 
+class TestCombiner(object):
+  def get_group_key(self, test):
+    """To indicate what tests can be combined with each other we define a group
+    key for each test. Tests with the same group key can be combined. Test
+    without a group key (None) is not combinable with any other test.
+    """
+    raise NotImplementedError()
+
+  def combine(self, name, tests):
+    """Returns test combined from `tests`. Since we identify tests by their
+    suite and name, `name` parameter should be unique within one suite.
+    """
+    return self._combined_test_class()(name, tests)
+
+  def _combined_test_class(self):
+    raise NotImplementedError()
+
+
+class TestLoader(object):
+  """Base class for loading TestSuite tests after applying test suite
+  transformations."""
+
+  def __init__(self, suite, test_class, test_config, test_root):
+    self.suite = suite
+    self.test_class = test_class
+    self.test_config = test_config
+    self.test_root = test_root
+    self.test_count_estimation = len(list(self._list_test_filenames()))
+
+  def _list_test_filenames(self):
+    """Implemented by the subclassed TestLoaders to list filenames.
+
+    Filenames are expected to be sorted and are deterministic."""
+    raise NotImplementedError
+
+  def _should_filter_by_name(self, name):
+    return False
+
+  def _should_filter_by_test(self, test):
+    return False
+
+  def _filename_to_testname(self, filename):
+    """Hook for subclasses to write their own filename transformation
+    logic before the test creation."""
+    return filename
+
+  # TODO: not needed for every TestLoader, extract it into a subclass.
+  def _path_to_name(self, path):
+    if utils.IsWindows():
+      return path.replace(os.path.sep, "/")
+
+    return path
+
+  def _create_test(self, path, suite, **kwargs):
+    """Converts paths into test objects using the given options"""
+    return self.test_class(
+      suite, path, self._path_to_name(path), self.test_config, **kwargs)
+
+  def list_tests(self):
+    """Loads and returns the test objects for a TestSuite"""
+    # TODO: detect duplicate tests.
+    for filename in self._list_test_filenames():
+      if self._should_filter_by_name(filename):
+        continue
+
+      testname = self._filename_to_testname(filename)
+      case = self._create_test(testname, self.suite)
+      if self._should_filter_by_test(case):
+        continue
+
+      yield case
+
+
+class GenericTestLoader(TestLoader):
+  """Generic TestLoader implementing the logic for listing filenames"""
+  @property
+  def excluded_files(self):
+    return set()
+
+  @property
+  def excluded_dirs(self):
+    return set()
+
+  @property
+  def excluded_suffixes(self):
+    return set()
+
+  @property
+  def test_dirs(self):
+    return [self.test_root]
+
+  @property
+  def extensions(self):
+    return []
+
+  def __find_extension(self, filename):
+    for extension in self.extensions:
+      if filename.endswith(extension):
+        return extension
+
+    return False
+
+  def _should_filter_by_name(self, filename):
+    if not self.__find_extension(filename):
+      return True
+
+    for suffix in self.excluded_suffixes:
+      if filename.endswith(suffix):
+        return True
+
+    if os.path.basename(filename) in self.excluded_files:
+      return True
+
+    return False
+
+  def _filename_to_testname(self, filename):
+    extension = self.__find_extension(filename)
+    if not extension:
+      return filename
+
+    return filename[:-len(extension)]
+
+  def _to_relpath(self, abspath, test_root):
+    return os.path.relpath(abspath, test_root)
+
+  def _list_test_filenames(self):
+    for test_dir in sorted(self.test_dirs):
+      test_root = os.path.join(self.test_root, test_dir)
+      for dirname, dirs, files in os.walk(test_root, followlinks=True):
+        dirs.sort()
+        for dir in dirs:
+          if dir in self.excluded_dirs or dir.startswith('.'):
+            dirs.remove(dir)
+
+        files.sort()
+        for filename in files:
+          abspath = os.path.join(dirname, filename)
+
+          yield self._to_relpath(abspath, test_root)
+
+
+class JSTestLoader(GenericTestLoader):
+  @property
+  def extensions(self):
+    return [".js", ".mjs"]
+
+
+class TestGenerator(object):
+  def __init__(self, test_count_estimate, slow_tests, fast_tests):
+    self.test_count_estimate = test_count_estimate
+    self.slow_tests = slow_tests
+    self.fast_tests = fast_tests
+    self._rebuild_iterator()
+
+  def _rebuild_iterator(self):
+    self._iterator = itertools.chain(self.slow_tests, self.fast_tests)
+
+  def __iter__(self):
+    return self
+
+  def __next__(self):
+    return self.next()
+
+  def next(self):
+    return next(self._iterator)
+
+  def merge(self, test_generator):
+    self.test_count_estimate += test_generator.test_count_estimate
+    self.slow_tests = itertools.chain(
+      self.slow_tests, test_generator.slow_tests)
+    self.fast_tests = itertools.chain(
+      self.fast_tests, test_generator.fast_tests)
+    self._rebuild_iterator()
+
+
+@contextmanager
+def _load_testsuite_module(name, root):
+  f = None
+  try:
+    (f, pathname, description) = imp.find_module("testcfg", [root])
+    yield imp.load_module(name + "_testcfg", f, pathname, description)
+  finally:
+    if f:
+      f.close()
+
 class TestSuite(object):
   @staticmethod
-  def LoadTestSuite(root):
+  def Load(root, test_config, framework_name):
     name = root.split(os.path.sep)[-1]
-    f = None
-    try:
-      (f, pathname, description) = imp.find_module("testcfg", [root])
-      module = imp.load_module(name + "_testcfg", f, pathname, description)
-      return module.GetSuite(name, root)
-    finally:
-      if f:
-        f.close()
+    with _load_testsuite_module(name, root) as module:
+      return module.GetSuite(name, root, test_config, framework_name)
 
-  def __init__(self, name, root):
-    # Note: This might be called concurrently from different processes.
+  def __init__(self, name, root, test_config, framework_name):
     self.name = name  # string
     self.root = root  # string containing path
+    self.test_config = test_config
+    self.framework_name = framework_name  # name of the test runner impl
     self.tests = None  # list of TestCase objects
     self.statusfile = None
 
+    self._test_loader = self._test_loader_class()(
+      self, self._test_class(), self.test_config, self.root)
+
   def status_file(self):
     return "%s/%s.status" % (self.root, self.name)
 
-  def ListTests(self, context):
+  @property
+  def _test_loader_class(self):
     raise NotImplementedError
 
-  def _LegacyVariantsGeneratorFactory(self):
-    """The variant generator class to be used."""
-    return LegacyVariantsGenerator
+  def ListTests(self):
+    return self._test_loader.list_tests()
 
-  def CreateLegacyVariantsGenerator(self, variants):
-    """Return a generator for the testing variants of this suite.
+  def __initialize_test_count_estimation(self):
+    # Retrieves a single test to initialize the test generator.
+    next(iter(self.ListTests()), None)
 
-    Args:
-      variants: List of variant names to be run as specified by the test
-                runner.
-    Returns: An object of type LegacyVariantsGenerator.
-    """
-    return self._LegacyVariantsGeneratorFactory()(self, set(variants))
+  def __calculate_test_count(self):
+    self.__initialize_test_count_estimation()
+    return self._test_loader.test_count_estimation
+
+  def load_tests_from_disk(self, statusfile_variables):
+    self.statusfile = statusfile.StatusFile(
+      self.status_file(), statusfile_variables)
+
+    test_count = self.__calculate_test_count()
+    slow_tests = (test for test in self.ListTests() if test.is_slow)
+    fast_tests = (test for test in self.ListTests() if not test.is_slow)
+    return TestGenerator(test_count, slow_tests, fast_tests)
 
   def get_variants_gen(self, variants):
     return self._variants_gen_class()(variants)
@@ -126,83 +298,20 @@
   def _variants_gen_class(self):
     return VariantsGenerator
 
-  def ReadStatusFile(self, variables):
-    self.statusfile = statusfile.StatusFile(self.status_file(), variables)
+  def test_combiner_available(self):
+    return bool(self._test_combiner_class())
 
-  def ReadTestCases(self, context):
-    self.tests = self.ListTests(context)
+  def get_test_combiner(self):
+    cls = self._test_combiner_class()
+    if cls:
+      return cls()
+    return None
 
-
-  def FilterTestCasesByStatus(self,
-                              slow_tests_mode=None,
-                              pass_fail_tests_mode=None):
-    """Filters tests by outcomes from status file.
-
-    Status file has to be loaded before using this function.
-
-    Args:
-      slow_tests_mode: What to do with slow tests.
-      pass_fail_tests_mode: What to do with pass or fail tests.
-
-    Mode options:
-      None (default) - don't skip
-      "skip" - skip if slow/pass_fail
-      "run" - skip if not slow/pass_fail
+  def _test_combiner_class(self):
+    """Returns Combiner subclass. None if suite doesn't support combining
+    tests.
     """
-    def _skip_slow(is_slow, mode):
-      return (
-        (mode == 'run' and not is_slow) or
-        (mode == 'skip' and is_slow))
-
-    def _skip_pass_fail(pass_fail, mode):
-      return (
-        (mode == 'run' and not pass_fail) or
-        (mode == 'skip' and pass_fail))
-
-    def _compliant(test):
-      if test.do_skip:
-        return False
-      if _skip_slow(test.is_slow, slow_tests_mode):
-        return False
-      if _skip_pass_fail(test.is_pass_or_fail, pass_fail_tests_mode):
-        return False
-      return True
-
-    self.tests = filter(_compliant, self.tests)
-
-  def FilterTestCasesByArgs(self, args):
-    """Filter test cases based on command-line arguments.
-
-    args can be a glob: asterisks in any position of the argument
-    represent zero or more characters. Without asterisks, only exact matches
-    will be used with the exeption of the test-suite name as argument.
-    """
-    filtered = []
-    globs = []
-    for a in args:
-      argpath = a.split('/')
-      if argpath[0] != self.name:
-        continue
-      if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
-        return  # Don't filter, run all tests in this suite.
-      path = '/'.join(argpath[1:])
-      globs.append(path)
-
-    for t in self.tests:
-      for g in globs:
-        if fnmatch.fnmatch(t.path, g):
-          filtered.append(t)
-          break
-    self.tests = filtered
-
-  def _create_test(self, path, **kwargs):
-    test = self._test_class()(self, path, self._path_to_name(path), **kwargs)
-    return test
+    return None
 
   def _test_class(self):
     raise NotImplementedError
-
-  def _path_to_name(self, path):
-    if utils.IsWindows():
-      return path.replace("\\", "/")
-    return path
diff --git a/src/v8/tools/testrunner/local/testsuite_unittest.py b/src/v8/tools/testrunner/local/testsuite_unittest.py
index efefe4c..b74fef1 100755
--- a/src/v8/tools/testrunner/local/testsuite_unittest.py
+++ b/src/v8/tools/testrunner/local/testsuite_unittest.py
@@ -3,8 +3,10 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import itertools
 import os
 import sys
+import tempfile
 import unittest
 
 # Needed because the test runner contains relative imports.
@@ -12,109 +14,72 @@
     os.path.abspath(__file__))))
 sys.path.append(TOOLS_PATH)
 
-from testrunner.local.testsuite import TestSuite
+from testrunner.local.testsuite import TestSuite, TestGenerator
 from testrunner.objects.testcase import TestCase
+from testrunner.test_config import TestConfig
 
 
 class TestSuiteTest(unittest.TestCase):
-  def test_filter_testcases_by_status_first_pass(self):
-    suite = TestSuite('foo', 'bar')
-    suite.rules = {
-      '': {
-        'foo/bar': set(['PASS', 'SKIP']),
-        'baz/bar': set(['PASS', 'FAIL']),
-      },
-    }
-    suite.prefix_rules = {
-      '': {
-        'baz/': set(['PASS', 'SLOW']),
-      },
-    }
-    suite.tests = [
-      TestCase(suite, 'foo/bar', 'foo/bar'),
-      TestCase(suite, 'baz/bar', 'baz/bar'),
-    ]
-    suite.FilterTestCasesByStatus()
-    self.assertEquals(
-        [TestCase(suite, 'baz/bar', 'baz/bar')],
-        suite.tests,
-    )
-    outcomes = suite.GetStatusFileOutcomes(suite.tests[0].name,
-                                           suite.tests[0].variant)
-    self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), outcomes)
-
-  def test_filter_testcases_by_status_second_pass(self):
-    suite = TestSuite('foo', 'bar')
-
-    suite.rules = {
-      '': {
-        'foo/bar': set(['PREV']),
-      },
-      'default': {
-        'foo/bar': set(['PASS', 'SKIP']),
-        'baz/bar': set(['PASS', 'FAIL']),
-      },
-      'stress': {
-        'baz/bar': set(['SKIP']),
-      },
-    }
-    suite.prefix_rules = {
-      '': {
-        'baz/': set(['PREV']),
-      },
-      'default': {
-        'baz/': set(['PASS', 'SLOW']),
-      },
-      'stress': {
-        'foo/': set(['PASS', 'SLOW']),
-      },
-    }
-
-    test1 = TestCase(suite, 'foo/bar', 'foo/bar')
-    test2 = TestCase(suite, 'baz/bar', 'baz/bar')
-    suite.tests = [
-      test1.create_variant(variant='default', flags=[]),
-      test1.create_variant(variant='stress', flags=['-v']),
-      test2.create_variant(variant='default', flags=[]),
-      test2.create_variant(variant='stress', flags=['-v']),
-    ]
-
-    suite.FilterTestCasesByStatus()
-    self.assertEquals(
-        [
-          TestCase(suite, 'foo/bar', 'foo/bar').create_variant(None, ['-v']),
-          TestCase(suite, 'baz/bar', 'baz/bar'),
-        ],
-        suite.tests,
+  def setUp(self):
+    test_dir = os.path.dirname(__file__)
+    self.test_root = os.path.join(test_dir, "fake_testsuite")
+    self.test_config = TestConfig(
+        command_prefix=[],
+        extra_flags=[],
+        isolates=False,
+        mode_flags=[],
+        no_harness=False,
+        noi18n=False,
+        random_seed=0,
+        run_skipped=False,
+        shell_dir='fake_testsuite/fake_d8',
+        timeout=10,
+        verbose=False,
     )
 
-    self.assertEquals(
-        set(['PREV', 'PASS', 'SLOW']),
-        suite.GetStatusFileOutcomes(suite.tests[0].name,
-                                    suite.tests[0].variant),
-    )
-    self.assertEquals(
-        set(['PREV', 'PASS', 'FAIL', 'SLOW']),
-        suite.GetStatusFileOutcomes(suite.tests[1].name,
-                                    suite.tests[1].variant),
-    )
+    self.suite = TestSuite.Load(
+        self.test_root, self.test_config, "standard_runner")
 
-  def test_fail_ok_outcome(self):
-    suite = TestSuite('foo', 'bar')
-    suite.rules = {
-      '': {
-        'foo/bar': set(['FAIL_OK']),
-        'baz/bar': set(['FAIL']),
-      },
-    }
-    suite.prefix_rules = {}
-    suite.tests = [
-      TestCase(suite, 'foo/bar', 'foo/bar'),
-      TestCase(suite, 'baz/bar', 'baz/bar'),
-    ]
+  def testLoadingTestSuites(self):
+    self.assertEquals(self.suite.name, "fake_testsuite")
+    self.assertEquals(self.suite.test_config, self.test_config)
 
-    for t in suite.tests:
-      self.assertEquals(['FAIL'], t.expected_outcomes)
+    # Verify that the components of the TestSuite aren't loaded yet.
+    self.assertIsNone(self.suite.tests)
+    self.assertIsNone(self.suite.statusfile)
+
+  def testLoadingTestsFromDisk(self):
+    tests = self.suite.load_tests_from_disk(
+      statusfile_variables={})
+    def is_generator(iterator):
+      return iterator == iter(iterator)
+
+    self.assertTrue(is_generator(tests))
+    self.assertEquals(tests.test_count_estimate, 2)
+
+    slow_tests, fast_tests = list(tests.slow_tests), list(tests.fast_tests)
+    # Verify that the components of the TestSuite are loaded.
+    self.assertTrue(len(slow_tests) == len(fast_tests) == 1)
+    self.assertTrue(all(test.is_slow for test in slow_tests))
+    self.assertFalse(any(test.is_slow for test in fast_tests))
+    self.assertIsNotNone(self.suite.statusfile)
+
+  def testMergingTestGenerators(self):
+    tests = self.suite.load_tests_from_disk(
+      statusfile_variables={})
+    more_tests = self.suite.load_tests_from_disk(
+      statusfile_variables={})
+
+    # Merge the test generators
+    tests.merge(more_tests)
+    self.assertEquals(tests.test_count_estimate, 4)
+
+    # Check the tests are sorted by speed
+    test_speeds = []
+    for test in tests:
+      test_speeds.append(test.is_slow)
+
+    self.assertEquals(test_speeds, [True, True, False, False])
 
 
 if __name__ == '__main__':
diff --git a/src/v8/tools/testrunner/local/utils.py b/src/v8/tools/testrunner/local/utils.py
index bf8c3d9..9128c43 100644
--- a/src/v8/tools/testrunner/local/utils.py
+++ b/src/v8/tools/testrunner/local/utils.py
@@ -25,6 +25,8 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
 
 from os.path import exists
 from os.path import isdir
@@ -36,6 +38,21 @@
 import urllib2
 
 
+### Exit codes and their meaning.
+# Normal execution.
+EXIT_CODE_PASS = 0
+# Execution with test failures.
+EXIT_CODE_FAILURES = 1
+# Execution with no tests executed.
+EXIT_CODE_NO_TESTS = 2
+# Execution aborted with SIGINT (Ctrl-C).
+EXIT_CODE_INTERRUPTED = 3
+# Execution aborted with SIGTERM.
+EXIT_CODE_TERMINATED = 4
+# Internal error.
+EXIT_CODE_INTERNAL_ERROR = 5
+
+
 def GetSuitePaths(test_root):
   return [ f for f in os.listdir(test_root) if isdir(join(test_root, f)) ]
 
@@ -132,7 +149,7 @@
       return
     except:
       # If there's no curl, fall back to urlopen.
-      print "Curl is currently not installed. Falling back to python."
+      print("Curl is currently not installed. Falling back to python.")
       pass
   with open(destination, 'w') as f:
     f.write(urllib2.urlopen(source).read())
diff --git a/src/v8/tools/testrunner/local/variants.py b/src/v8/tools/testrunner/local/variants.py
index f1e9ad3..4b0cf15 100644
--- a/src/v8/tools/testrunner/local/variants.py
+++ b/src/v8/tools/testrunner/local/variants.py
@@ -4,26 +4,57 @@
 
 # Use this to run several variants of the tests.
 ALL_VARIANT_FLAGS = {
+  "assert_types": [["--assert-types"]],
   "code_serializer": [["--cache=code"]],
   "default": [[]],
   "future": [["--future"]],
+  "gc_stats": [["--gc-stats=1"]],
   # Alias of exhaustive variants, but triggering new test framework features.
   "infra_staging": [[]],
-  "liftoff": [["--liftoff"]],
+  "interpreted_regexp": [["--regexp-interpret-all"]],
+  "jitless": [["--jitless"]],
   "minor_mc": [["--minor-mc"]],
   # No optimization means disable all optimizations. OptimizeFunctionOnNextCall
   # would not force optimization too. It turns into a Nop. Please see
   # https://chromium-review.googlesource.com/c/452620/ for more discussion.
-  "nooptimization": [["--noopt"]],
+  # For WebAssembly, we test "Liftoff-only" in the nooptimization variant and
+  # "TurboFan-only" in the stress variant. The WebAssembly configuration is
+  # independent of JS optimizations, so we can combine those configs.
+  "nooptimization": [["--no-opt", "--liftoff", "--no-wasm-tier-up"]],
   "slow_path": [["--force-slow-path"]],
-  "stress": [["--stress-opt", "--always-opt"]],
-  "stress_background_compile": [["--background-compile", "--stress-background-compile"]],
+  "stress": [["--stress-opt", "--always-opt", "--no-liftoff",
+              "--no-wasm-tier-up"]],
+  "stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
+                                         "--wasm-code-gc",
+                                         "--stress-wasm-code-gc"]],
   "stress_incremental_marking":  [["--stress-incremental-marking"]],
   # Trigger stress sampling allocation profiler with sample interval = 2^14
   "stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
   "trusted": [["--no-untrusted-code-mitigations"]],
-  "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
-  "wasm_no_native": [["--no-wasm-jit-to-native"]],
+  "no_wasm_traps": [["--no-wasm-trap-handler"]],
 }
 
-ALL_VARIANTS = set(ALL_VARIANT_FLAGS.keys())
+SLOW_VARIANTS = set([
+  'stress',
+  'nooptimization',
+])
+
+FAST_VARIANTS = set([
+  'default'
+])
+
+
+def _variant_order_key(v):
+  if v in SLOW_VARIANTS:
+    return 0
+  if v in FAST_VARIANTS:
+    return 100
+  return 50
+
+ALL_VARIANTS = sorted(ALL_VARIANT_FLAGS.keys(),
+                      key=_variant_order_key)
+
+# Check {SLOW,FAST}_VARIANTS entries
+for variants in [SLOW_VARIANTS, FAST_VARIANTS]:
+  for v in variants:
+    assert v in ALL_VARIANT_FLAGS
diff --git a/src/v8/tools/testrunner/local/verbose.py b/src/v8/tools/testrunner/local/verbose.py
index 49e8085..8569368 100644
--- a/src/v8/tools/testrunner/local/verbose.py
+++ b/src/v8/tools/testrunner/local/verbose.py
@@ -25,6 +25,8 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
 
 import sys
 import time
@@ -63,7 +65,7 @@
     else:
       assert False # Unreachable # TODO: check this in outcomes parsing phase.
 
-  print REPORT_TEMPLATE % {
+  print(REPORT_TEMPLATE % {
     "total": total,
     "skipped": skipped,
     "nocrash": nocrash,
@@ -71,17 +73,17 @@
     "fail_ok": fail_ok,
     "fail": fail,
     "crash": crash,
-  }
+  })
 
 
 def PrintTestSource(tests):
   for test in tests:
-    print "--- begin source: %s ---" % test
+    print("--- begin source: %s ---" % test)
     if test.is_source_available():
-      print test.get_source()
+      print(test.get_source())
     else:
-      print '(no source available)'
-    print "--- end source: %s ---" % test
+      print('(no source available)')
+    print("--- end source: %s ---" % test)
 
 
 def FormatTime(d):
@@ -92,11 +94,11 @@
 def PrintTestDurations(suites, outputs, overall_time):
     # Write the times to stderr to make it easy to separate from the
     # test output.
-    print
+    print()
     sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time))
     timed_tests = [(t, outputs[t].duration) for s in suites for t in s.tests
                    if t in outputs]
-    timed_tests.sort(key=lambda (_, duration): duration, reverse=True)
+    timed_tests.sort(key=lambda test_duration: test_duration[1], reverse=True)
     index = 1
     for test, duration in timed_tests[:20]:
       t = FormatTime(duration)
diff --git a/src/v8/tools/testrunner/num_fuzzer.py b/src/v8/tools/testrunner/num_fuzzer.py
new file mode 100755
index 0000000..d4e92a6
--- /dev/null
+++ b/src/v8/tools/testrunner/num_fuzzer.py
@@ -0,0 +1,226 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import random
+import sys
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+import base_runner
+
+from testrunner.local import utils
+
+from testrunner.testproc import fuzzer
+from testrunner.testproc.base import TestProcProducer
+from testrunner.testproc.combiner import CombinerProc
+from testrunner.testproc.execution import ExecutionProc
+from testrunner.testproc.expectation import ForgiveTimeoutProc
+from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
+from testrunner.testproc.loader import LoadProc
+from testrunner.testproc.progress import ResultsTracker
+from testrunner.utils import random_utils
+
+
+DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
+
+
+class NumFuzzer(base_runner.BaseTestRunner):
+  def __init__(self, *args, **kwargs):
+    super(NumFuzzer, self).__init__(*args, **kwargs)
+
+  @property
+  def framework_name(self):
+    return 'num_fuzzer'
+
+  def _add_parser_options(self, parser):
+    parser.add_option("--fuzzer-random-seed", default=0,
+                      help="Default seed for initializing fuzzer random "
+                      "generator")
+    parser.add_option("--tests-count", default=5, type="int",
+                      help="Number of tests to generate from each base test. "
+                           "Can be combined with --total-timeout-sec with "
+                           "value 0 to provide infinite number of subtests. "
+                           "When --combine-tests is set it indicates how many "
+                           "tests to create in total")
+
+    # Stress gc
+    parser.add_option("--stress-marking", default=0, type="int",
+                      help="probability [0-10] of adding --stress-marking "
+                           "flag to the test")
+    parser.add_option("--stress-scavenge", default=0, type="int",
+                      help="probability [0-10] of adding --stress-scavenge "
+                           "flag to the test")
+    parser.add_option("--stress-compaction", default=0, type="int",
+                      help="probability [0-10] of adding --stress-compaction "
+                           "flag to the test")
+    parser.add_option("--stress-gc", default=0, type="int",
+                      help="probability [0-10] of adding --random-gc-interval "
+                           "flag to the test")
+
+    # Stress tasks
+    parser.add_option("--stress-delay-tasks", default=0, type="int",
+                      help="probability [0-10] of adding --stress-delay-tasks "
+                           "flag to the test")
+    parser.add_option("--stress-thread-pool-size", default=0, type="int",
+                      help="probability [0-10] of adding --thread-pool-size "
+                           "flag to the test")
+
+    # Stress deopt
+    parser.add_option("--stress-deopt", default=0, type="int",
+                      help="probability [0-10] of adding --deopt-every-n-times "
+                           "flag to the test")
+    parser.add_option("--stress-deopt-min", default=1, type="int",
+                      help="extends --stress-deopt to have minimum interval "
+                           "between deopt points")
+
+    # Combine multiple tests
+    parser.add_option("--combine-tests", default=False, action="store_true",
+                      help="Combine multiple tests as one and run with "
+                           "try-catch wrapper")
+    parser.add_option("--combine-max", default=100, type="int",
+                      help="Maximum number of tests to combine")
+    parser.add_option("--combine-min", default=2, type="int",
+                      help="Minimum number of tests to combine")
+
+    # Miscellaneous
+    parser.add_option("--variants", default='default',
+                      help="Comma-separated list of testing variants")
+
+    return parser
+
+
+  def _process_options(self, options):
+    if not options.fuzzer_random_seed:
+      options.fuzzer_random_seed = random_utils.random_seed()
+
+    if options.total_timeout_sec:
+      options.tests_count = 0
+
+    if options.combine_tests:
+      if options.combine_min > options.combine_max:
+        print(('min_group_size (%d) cannot be larger than max_group_size (%d)' %
+               options.min_group_size, options.max_group_size))
+        raise base_runner.TestRunnerError()
+
+    if options.variants != 'default':
+      print ('Only default testing variant is supported with numfuzz')
+      raise base_runner.TestRunnerError()
+
+    return True
+
+  def _get_default_suite_names(self):
+    return DEFAULT_SUITES
+
+  def _get_statusfile_variables(self, options):
+    variables = (
+        super(NumFuzzer, self)._get_statusfile_variables(options))
+    variables.update({
+      'deopt_fuzzer': bool(options.stress_deopt),
+      'endurance_fuzzer': bool(options.combine_tests),
+      'gc_stress': bool(options.stress_gc),
+      'gc_fuzzer': bool(max([options.stress_marking,
+                             options.stress_scavenge,
+                             options.stress_compaction,
+                             options.stress_gc,
+                             options.stress_delay_tasks,
+                             options.stress_thread_pool_size])),
+    })
+    return variables
+
+  def _do_execute(self, tests, args, options):
+    loader = LoadProc(tests)
+    fuzzer_rng = random.Random(options.fuzzer_random_seed)
+
+    combiner = self._create_combiner(fuzzer_rng, options)
+    results = self._create_result_tracker(options)
+    execproc = ExecutionProc(options.j)
+    sigproc = self._create_signal_proc()
+    indicators = self._create_progress_indicators(
+      tests.test_count_estimate, options)
+    procs = [
+      loader,
+      NameFilterProc(args) if args else None,
+      StatusFileFilterProc(None, None),
+      # TODO(majeski): Improve sharding when combiner is present. Maybe select
+      # different random seeds for shards instead of splitting tests.
+      self._create_shard_proc(options),
+      ForgiveTimeoutProc(),
+      combiner,
+      self._create_fuzzer(fuzzer_rng, options),
+      sigproc,
+    ] + indicators + [
+      results,
+      self._create_timeout_proc(options),
+      self._create_rerun_proc(options),
+      execproc,
+    ]
+    self._prepare_procs(procs)
+    loader.load_initial_tests(initial_batch_size=float('inf'))
+
+    # TODO(majeski): maybe some notification from loader would be better?
+    if combiner:
+      combiner.generate_initial_tests(options.j * 4)
+
+    # This starts up worker processes and blocks until all tests are
+    # processed.
+    execproc.run()
+
+    for indicator in indicators:
+      indicator.finished()
+
+    print('>>> %d tests ran' % results.total)
+    if results.failed:
+      return utils.EXIT_CODE_FAILURES
+
+    # Indicate if a SIGINT or SIGTERM happened.
+    return sigproc.exit_code
+
+  def _is_testsuite_supported(self, suite, options):
+    return not options.combine_tests or suite.test_combiner_available()
+
+  def _create_combiner(self, rng, options):
+    if not options.combine_tests:
+      return None
+    return CombinerProc(rng, options.combine_min, options.combine_max,
+                        options.tests_count)
+
+  def _create_fuzzer(self, rng, options):
+    return fuzzer.FuzzerProc(
+        rng,
+        self._tests_count(options),
+        self._create_fuzzer_configs(options),
+        self._disable_analysis(options),
+    )
+
+  def _tests_count(self, options):
+    if options.combine_tests:
+      return 1
+    return options.tests_count
+
+  def _disable_analysis(self, options):
+    """Disable analysis phase when options are used that don't support it."""
+    return options.combine_tests
+
+  def _create_fuzzer_configs(self, options):
+    fuzzers = []
+    def add(name, prob, *args):
+      if prob:
+        fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args))
+
+    add('compaction', options.stress_compaction)
+    add('marking', options.stress_marking)
+    add('scavenge', options.stress_scavenge)
+    add('gc_interval', options.stress_gc)
+    add('threads', options.stress_thread_pool_size)
+    add('delay', options.stress_delay_tasks)
+    add('deopt', options.stress_deopt, options.stress_deopt_min)
+    return fuzzers
+
+
+if __name__ == '__main__':
+  sys.exit(NumFuzzer().execute())
diff --git a/src/v8/tools/testrunner/objects/output.py b/src/v8/tools/testrunner/objects/output.py
index adc33c9..78aa63d 100644
--- a/src/v8/tools/testrunner/objects/output.py
+++ b/src/v8/tools/testrunner/objects/output.py
@@ -27,12 +27,15 @@
 
 
 import signal
+import copy
 
 from ..local import utils
 
+
 class Output(object):
 
-  def __init__(self, exit_code, timed_out, stdout, stderr, pid, duration):
+  def __init__(self, exit_code=0, timed_out=False, stdout=None, stderr=None,
+               pid=None, duration=None):
     self.exit_code = exit_code
     self.timed_out = timed_out
     self.stdout = stdout
@@ -40,6 +43,13 @@
     self.pid = pid
     self.duration = duration
 
+  def without_text(self):
+    """Returns copy of the output without stdout and stderr."""
+    other = copy.copy(self)
+    other.stdout = None
+    other.stderr = None
+    return other
+
   def HasCrashed(self):
     if utils.IsWindows():
       return 0x80000000 & self.exit_code and not (0x3FFFFF00 & self.exit_code)
@@ -52,3 +62,16 @@
 
   def HasTimedOut(self):
     return self.timed_out
+
+  def IsSuccess(self):
+    return not self.HasCrashed() and not self.HasTimedOut()
+
+
+class _NullOutput(Output):
+  """Useful to signal that the binary has not been run."""
+  def __init__(self):
+    super(_NullOutput, self).__init__()
+
+
+# Default instance of the _NullOutput class above.
+NULL_OUTPUT = _NullOutput()
diff --git a/src/v8/tools/testrunner/objects/predictable.py b/src/v8/tools/testrunner/objects/predictable.py
index ad93077..52d14ea 100644
--- a/src/v8/tools/testrunner/objects/predictable.py
+++ b/src/v8/tools/testrunner/objects/predictable.py
@@ -4,6 +4,7 @@
 
 from ..local import statusfile
 from ..outproc import base as outproc_base
+from ..testproc import base as testproc_base
 from ..testproc.result import Result
 
 
@@ -15,11 +16,7 @@
 
 
 def get_outproc(test):
-  output_proc = test.output_proc
-  if output_proc.negative or statusfile.FAIL in test.expected_outcomes:
-    # TODO(majeski): Skip these tests instead of having special outproc.
-    return NeverUnexpectedOutputOutProc(output_proc)
-  return OutProc(output_proc)
+  return OutProc(test.output_proc)
 
 
 class OutProc(outproc_base.BaseOutProc):
@@ -31,9 +28,6 @@
     super(OutProc, self).__init__()
     self._outproc = _outproc
 
-  def process(self, output):
-    return Result(self.has_unexpected_output(output), output)
-
   def has_unexpected_output(self, output):
     return output.exit_code != 0
 
@@ -49,9 +43,6 @@
     return self._outproc.expected_outcomes
 
 
-class NeverUnexpectedOutputOutProc(OutProc):
-  """Output processor wrapper for tests that we will return False for
-  has_unexpected_output in the predictable mode.
-  """
-  def has_unexpected_output(self, output):
-    return False
+class PredictableFilterProc(testproc_base.TestProcFilter):
+  def _filter(self, test):
+    return test.skip_predictable()
diff --git a/src/v8/tools/testrunner/objects/testcase.py b/src/v8/tools/testrunner/objects/testcase.py
index 06db328..6d4dcd1 100644
--- a/src/v8/tools/testrunner/objects/testcase.py
+++ b/src/v8/tools/testrunner/objects/testcase.py
@@ -37,10 +37,33 @@
 
 FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
 
+# Patterns for additional resource files on Android. Files that are not covered
+# by one of the other patterns below will be specified in the resources section.
+RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
+# Pattern to auto-detect files to push on Android for statements like:
+# load("path/to/file.js")
+LOAD_PATTERN = re.compile(
+    r"(?:load|readbuffer|read)\((?:'|\")([^'\"]+)(?:'|\")\)")
+# Pattern to auto-detect files to push on Android for statements like:
+# import "path/to/file.js"
+MODULE_RESOURCES_PATTERN_1 = re.compile(
+    r"(?:import|export)(?:\(| )(?:'|\")([^'\"]+)(?:'|\")")
+# Pattern to auto-detect files to push on Android for statements like:
+# import foobar from "path/to/file.js"
+MODULE_RESOURCES_PATTERN_2 = re.compile(
+    r"(?:import|export).*from (?:'|\")([^'\"]+)(?:'|\")")
+
+TIMEOUT_LONG = "long"
+
+try:
+  cmp             # Python 2
+except NameError:
+  def cmp(x, y):  # Python 3
+    return (x > y) - (x < y)
 
 
 class TestCase(object):
-  def __init__(self, suite, path, name):
+  def __init__(self, suite, path, name, test_config):
     self.suite = suite        # TestSuite object
 
     self.path = path          # string, e.g. 'div-mod', 'test-api/foo'
@@ -49,60 +72,40 @@
     self.variant = None       # name of the used testing variant
     self.variant_flags = []   # list of strings, flags specific to this test
 
-    self.id = None  # int, used to map result back to TestCase instance
-    self.run = 1  # The nth time this test is executed.
-    self.cmd = None
-
     # Fields used by the test processors.
     self.origin = None # Test that this test is subtest of.
     self.processor = None # Processor that created this subtest.
     self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
     self.keep_output = False # Can output of this test be dropped
 
+    # Test config contains information needed to build the command.
+    self._test_config = test_config
+    self._random_seed = None # Overrides test config value if not None
+
+    # Outcomes
     self._statusfile_outcomes = None
-    self._expected_outcomes = None # optimization: None == [statusfile.PASS]
+    self.expected_outcomes = None
     self._statusfile_flags = None
+
     self._prepare_outcomes()
 
   def create_subtest(self, processor, subtest_id, variant=None, flags=None,
-                     keep_output=False):
+                     keep_output=False, random_seed=None):
     subtest = copy.copy(self)
     subtest.origin = self
     subtest.processor = processor
     subtest.procid += '.%s' % subtest_id
-    subtest.keep_output = keep_output
+    subtest.keep_output |= keep_output
+    if random_seed:
+      subtest._random_seed = random_seed
+    if flags:
+      subtest.variant_flags = subtest.variant_flags + flags
     if variant is not None:
       assert self.variant is None
       subtest.variant = variant
-      subtest.variant_flags = flags
       subtest._prepare_outcomes()
     return subtest
 
-  def create_variant(self, variant, flags, procid_suffix=None):
-    """Makes a shallow copy of the object and updates variant, variant flags and
-    all fields that depend on it, e.g. expected outcomes.
-
-    Args
-      variant       - variant name
-      flags         - flags that should be added to origin test's variant flags
-      procid_suffix - for multiple variants with the same name set suffix to
-        keep procid unique.
-    """
-    other = copy.copy(self)
-    if not self.variant_flags:
-      other.variant_flags = flags
-    else:
-      other.variant_flags = self.variant_flags + flags
-    other.variant = variant
-    if procid_suffix:
-      other.procid += '[%s-%s]' % (variant, procid_suffix)
-    else:
-      other.procid += '[%s]' % variant
-
-    other._prepare_outcomes(variant != self.variant)
-
-    return other
-
   def _prepare_outcomes(self, force_update=True):
     if force_update or self._statusfile_outcomes is None:
       def is_flag(outcome):
@@ -140,7 +143,8 @@
 
   @property
   def do_skip(self):
-    return statusfile.SKIP in self._statusfile_outcomes
+    return (statusfile.SKIP in self._statusfile_outcomes and
+            not self.suite.test_config.run_skipped)
 
   @property
   def is_slow(self):
@@ -160,43 +164,59 @@
   def only_standard_variant(self):
     return statusfile.NO_VARIANTS in self._statusfile_outcomes
 
-  def get_command(self, context):
-    params = self._get_cmd_params(context)
+  def get_command(self):
+    params = self._get_cmd_params()
     env = self._get_cmd_env()
-    shell, shell_flags = self._get_shell_with_flags(context)
-    timeout = self._get_timeout(params, context.timeout)
-    return self._create_cmd(shell, shell_flags + params, env, timeout, context)
+    shell = self.get_shell()
+    if utils.IsWindows():
+      shell += '.exe'
+    shell_flags = self._get_shell_flags()
+    timeout = self._get_timeout(params)
+    return self._create_cmd(shell, shell_flags + params, env, timeout)
 
-  def _get_cmd_params(self, ctx):
+  def _get_cmd_params(self):
     """Gets command parameters and combines them in the following order:
       - files [empty by default]
+      - random seed
       - extra flags (from command line)
       - user flags (variant/fuzzer flags)
-      - statusfile flags
       - mode flags (based on chosen mode)
       - source flags (from source code) [empty by default]
+      - test-suite flags
+      - statusfile flags
 
     The best way to modify how parameters are created is to only override
     methods for getting partial parameters.
     """
     return (
-        self._get_files_params(ctx) +
-        self._get_extra_flags(ctx) +
+        self._get_files_params() +
+        self._get_random_seed_flags() +
+        self._get_extra_flags() +
         self._get_variant_flags() +
-        self._get_statusfile_flags() +
-        self._get_mode_flags(ctx) +
+        self._get_mode_flags() +
         self._get_source_flags() +
-        self._get_suite_flags(ctx)
+        self._get_suite_flags() +
+        self._get_statusfile_flags()
     )
 
   def _get_cmd_env(self):
     return {}
 
-  def _get_files_params(self, ctx):
+  def _get_files_params(self):
     return []
 
-  def _get_extra_flags(self, ctx):
-    return ctx.extra_flags
+  def _get_timeout_param(self):
+    return None
+
+  def _get_random_seed_flags(self):
+    return ['--random-seed=%d' % self.random_seed]
+
+  @property
+  def random_seed(self):
+    return self._random_seed or self._test_config.random_seed
+
+  def _get_extra_flags(self):
+    return self._test_config.extra_flags
 
   def _get_variant_flags(self):
     return self.variant_flags
@@ -208,50 +228,49 @@
     """
     return self._statusfile_flags
 
-  def _get_mode_flags(self, ctx):
-    return ctx.mode_flags
+  def _get_mode_flags(self):
+    return self._test_config.mode_flags
 
   def _get_source_flags(self):
     return []
 
-  def _get_suite_flags(self, ctx):
+  def _get_suite_flags(self):
     return []
 
-  def _get_shell_with_flags(self, ctx):
-    shell = self.get_shell()
-    shell_flags = []
-    if shell == 'd8':
-      shell_flags.append('--test')
-    if utils.IsWindows():
-      shell += '.exe'
-    if ctx.random_seed:
-      shell_flags.append('--random-seed=%s' % ctx.random_seed)
-    return shell, shell_flags
+  def _get_shell_flags(self):
+    return []
 
-  def _get_timeout(self, params, timeout):
+  def _get_timeout(self, params):
+    timeout = self._test_config.timeout
     if "--stress-opt" in params:
       timeout *= 4
+    if "--jitless" in params:
+      timeout *= 2
+    if "--no-opt" in params:
+      timeout *= 2
     if "--noenable-vfp3" in params:
       timeout *= 2
-
-    # TODO(majeski): make it slow outcome dependent.
-    timeout *= 2
+    if self._get_timeout_param() == TIMEOUT_LONG:
+      timeout *= 10
+    if self.is_slow:
+      timeout *= 4
     return timeout
 
   def get_shell(self):
-    return 'd8'
+    raise NotImplementedError()
 
   def _get_suffix(self):
     return '.js'
 
-  def _create_cmd(self, shell, params, env, timeout, ctx):
+  def _create_cmd(self, shell, params, env, timeout):
     return command.Command(
-      cmd_prefix=ctx.command_prefix,
-      shell=os.path.abspath(os.path.join(ctx.shell_dir, shell)),
+      cmd_prefix=self._test_config.command_prefix,
+      shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)),
       args=params,
       env=env,
       timeout=timeout,
-      verbose=ctx.verbose
+      verbose=self._test_config.verbose,
+      resources_func=self._get_resources,
     )
 
   def _parse_source_flags(self, source=None):
@@ -271,6 +290,18 @@
   def _get_source_path(self):
     return None
 
+  def _get_resources(self):
+    """Returns a list of absolute paths with additional files needed by the
+    test case.
+
+    Used to push additional files to Android devices.
+    """
+    return []
+
+  def skip_predictable(self):
+    """Returns True if the test case is not suitable for predictable testing."""
+    return True
+
   @property
   def output_proc(self):
     if self.expected_outcomes is outproc.OUTCOMES_PASS:
@@ -281,18 +312,63 @@
     # Make sure that test cases are sorted correctly if sorted without
     # key function. But using a key function is preferred for speed.
     return cmp(
-        (self.suite.name, self.name, self.variant_flags),
-        (other.suite.name, other.name, other.variant_flags)
+        (self.suite.name, self.name, self.variant),
+        (other.suite.name, other.name, other.variant)
     )
 
-  def __hash__(self):
-    return hash((self.suite.name, self.name, ''.join(self.variant_flags)))
-
   def __str__(self):
     return self.suite.name + '/' + self.name
 
-  # TODO(majeski): Rename `id` field or `get_id` function since they're
-  # unrelated.
-  def get_id(self):
-    return '%s/%s %s' % (
-        self.suite.name, self.name, ' '.join(self.variant_flags))
+
+class D8TestCase(TestCase):
+  def get_shell(self):
+    return "d8"
+
+  def _get_shell_flags(self):
+    return ['--test']
+
+  def _get_resources_for_file(self, file):
+    """Returns for a given file a list of absolute paths of files needed by the
+    given file.
+    """
+    with open(file) as f:
+      source = f.read()
+    result = []
+    def add_path(path):
+      result.append(os.path.abspath(path.replace('/', os.path.sep)))
+    for match in RESOURCES_PATTERN.finditer(source):
+      # There are several resources per line. Relative to base dir.
+      for path in match.group(1).strip().split():
+        add_path(path)
+    for match in LOAD_PATTERN.finditer(source):
+      # Files in load statements are relative to base dir.
+      add_path(match.group(1))
+    for match in MODULE_RESOURCES_PATTERN_1.finditer(source):
+      # Imported files are relative to the file importing them.
+      add_path(os.path.join(os.path.dirname(file), match.group(1)))
+    for match in MODULE_RESOURCES_PATTERN_2.finditer(source):
+      # Imported files are relative to the file importing them.
+      add_path(os.path.join(os.path.dirname(file), match.group(1)))
+    return result
+
+  def _get_resources(self):
+    """Returns the list of files needed by a test case."""
+    if not self._get_source_path():
+      return []
+    result = set()
+    to_check = [self._get_source_path()]
+    # Recurse over all files until reaching a fixpoint.
+    while to_check:
+      next_resource = to_check.pop()
+      result.add(next_resource)
+      for resource in self._get_resources_for_file(next_resource):
+        # Only add files that exist on disc. The pattens we check for give some
+        # false positives otherwise.
+        if resource not in result and os.path.exists(resource):
+          to_check.append(resource)
+    return sorted(list(result))
+
+  def skip_predictable(self):
+    """Returns True if the test case is not suitable for predictable testing."""
+    return (statusfile.FAIL in self.expected_outcomes or
+            self.output_proc.negative)
diff --git a/src/v8/tools/testrunner/outproc/base.py b/src/v8/tools/testrunner/outproc/base.py
index 9a9db4e..39efb60 100644
--- a/src/v8/tools/testrunner/outproc/base.py
+++ b/src/v8/tools/testrunner/outproc/base.py
@@ -2,24 +2,45 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import collections
 import itertools
 
+from ..testproc.base import (
+    DROP_RESULT, DROP_OUTPUT, DROP_PASS_OUTPUT, DROP_PASS_STDOUT)
 from ..local import statusfile
 from ..testproc.result import Result
 
 
 OUTCOMES_PASS = [statusfile.PASS]
 OUTCOMES_FAIL = [statusfile.FAIL]
+OUTCOMES_PASS_OR_TIMEOUT = [statusfile.PASS, statusfile.TIMEOUT]
+OUTCOMES_FAIL_OR_TIMEOUT = [statusfile.FAIL, statusfile.TIMEOUT]
 
 
 class BaseOutProc(object):
-  def process(self, output):
-    return Result(self.has_unexpected_output(output), output)
+  def process(self, output, reduction=None):
+    has_unexpected_output = self.has_unexpected_output(output)
+    return self._create_result(has_unexpected_output, output, reduction)
 
   def has_unexpected_output(self, output):
     return self.get_outcome(output) not in self.expected_outcomes
 
+  def _create_result(self, has_unexpected_output, output, reduction):
+    """Creates Result instance. When reduction is passed it tries to drop some
+    parts of the result to save memory and time needed to send the result
+    across process boundary. None disables reduction and full result is created.
+    """
+    if reduction == DROP_RESULT:
+      return None
+    if reduction == DROP_OUTPUT:
+      return Result(has_unexpected_output, None)
+    if not has_unexpected_output:
+      if reduction == DROP_PASS_OUTPUT:
+        return Result(has_unexpected_output, None)
+      if reduction == DROP_PASS_STDOUT:
+        return Result(has_unexpected_output, output.without_text())
+
+    return Result(has_unexpected_output, output)
+
   def get_outcome(self, output):
     if output.HasCrashed():
       return statusfile.CRASH
@@ -64,6 +85,11 @@
     return OUTCOMES_PASS
 
 
+class NegPassOutProc(Negative, PassOutProc):
+  """Output processor optimized for negative tests expected to PASS"""
+  pass
+
+
 class OutProc(BaseOutProc):
   """Output processor optimized for positive tests with expected outcomes
   different than a single PASS.
@@ -92,6 +118,7 @@
 
 # TODO(majeski): Override __reduce__ to make it deserialize as one instance.
 DEFAULT = PassOutProc()
+DEFAULT_NEGATIVE = NegPassOutProc()
 
 
 class ExpectedOutProc(OutProc):
diff --git a/src/v8/tools/testrunner/outproc/message.py b/src/v8/tools/testrunner/outproc/message.py
index bbfc1cd..f196cfd 100644
--- a/src/v8/tools/testrunner/outproc/message.py
+++ b/src/v8/tools/testrunner/outproc/message.py
@@ -32,8 +32,15 @@
     if len(expected_lines) != len(actual_lines):
       return True
 
+    # Try .js first, and fall back to .mjs.
+    # TODO(v8:9406): clean this up by never separating the path from
+    # the extension in the first place.
+    base_path = self._basepath + '.js'
+    if not os.path.exists(base_path):
+      base_path = self._basepath + '.mjs'
+
     env = {
-      'basename': os.path.basename(self._basepath + '.js'),
+      'basename': os.path.basename(base_path),
     }
     for (expected, actual) in itertools.izip_longest(
         expected_lines, actual_lines, fillvalue=''):
diff --git a/src/v8/tools/testrunner/outproc/mkgrokdump.py b/src/v8/tools/testrunner/outproc/mkgrokdump.py
index 8efde12..4013023 100644
--- a/src/v8/tools/testrunner/outproc/mkgrokdump.py
+++ b/src/v8/tools/testrunner/outproc/mkgrokdump.py
@@ -20,7 +20,7 @@
     diff = difflib.unified_diff(expected_lines, actual_lines, lineterm="",
                                 fromfile="expected_path")
     diffstring = '\n'.join(diff)
-    if diffstring is not "":
+    if diffstring != "":
       if "generated from a non-shipping build" in output.stdout:
         return False
       if not "generated from a shipping build" in output.stdout:
diff --git a/src/v8/tools/testrunner/outproc/test262.py b/src/v8/tools/testrunner/outproc/test262.py
index b5eb554..bf3bc05 100644
--- a/src/v8/tools/testrunner/outproc/test262.py
+++ b/src/v8/tools/testrunner/outproc/test262.py
@@ -7,18 +7,29 @@
 from . import base
 
 
+def _is_failure_output(output):
+  return (
+    output.exit_code != 0 or
+    'FAILED!' in output.stdout
+  )
+
+
 class ExceptionOutProc(base.OutProc):
   """Output processor for tests with expected exception."""
-  def __init__(self, expected_outcomes, expected_exception=None):
+  def __init__(
+      self, expected_outcomes, expected_exception=None, negative=False):
     super(ExceptionOutProc, self).__init__(expected_outcomes)
     self._expected_exception = expected_exception
+    self._negative = negative
+
+  @property
+  def negative(self):
+    return self._negative
 
   def _is_failure_output(self, output):
-    if output.exit_code != 0:
-      return True
     if self._expected_exception != self._parse_exception(output.stdout):
       return True
-    return 'FAILED!' in output.stdout
+    return _is_failure_output(output)
 
   def _parse_exception(self, string):
     # somefile:somelinenumber: someerror[: sometext]
@@ -31,16 +42,13 @@
       return None
 
 
-def _is_failure_output(self, output):
-  return (
-    output.exit_code != 0 or
-    'FAILED!' in output.stdout
-  )
-
-
 class NoExceptionOutProc(base.OutProc):
   """Output processor optimized for tests without expected exception."""
-NoExceptionOutProc._is_failure_output = _is_failure_output
+  def __init__(self, expected_outcomes):
+    super(NoExceptionOutProc, self).__init__(expected_outcomes)
+
+  def _is_failure_output(self, output):
+    return _is_failure_output(output)
 
 
 class PassNoExceptionOutProc(base.PassOutProc):
@@ -48,7 +56,8 @@
   Output processor optimized for tests expected to PASS without expected
   exception.
   """
-PassNoExceptionOutProc._is_failure_output = _is_failure_output
+  def _is_failure_output(self, output):
+    return _is_failure_output(output)
 
 
 PASS_NO_EXCEPTION = PassNoExceptionOutProc()
diff --git a/src/v8/tools/testrunner/standard_runner.py b/src/v8/tools/testrunner/standard_runner.py
index 3be2099..51e7860 100755
--- a/src/v8/tools/testrunner/standard_runner.py
+++ b/src/v8/tools/testrunner/standard_runner.py
@@ -4,595 +4,356 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
 
-from collections import OrderedDict
-from os.path import join
-import multiprocessing
 import os
-import random
-import shlex
-import subprocess
 import sys
-import time
 
 # Adds testrunner to the path hence it has to be imported at the beggining.
 import base_runner
 
-from testrunner.local import execution
-from testrunner.local import progress
-from testrunner.local import testsuite
 from testrunner.local import utils
-from testrunner.local import verbose
 from testrunner.local.variants import ALL_VARIANTS
-from testrunner.objects import context
 from testrunner.objects import predictable
 from testrunner.testproc.execution import ExecutionProc
 from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
 from testrunner.testproc.loader import LoadProc
-from testrunner.testproc.progress import (VerboseProgressIndicator,
-                                          ResultsTracker,
-                                          TestsCounter)
-from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.seed import SeedProc
 from testrunner.testproc.variant import VariantProc
 
 
-TIMEOUT_DEFAULT = 60
+ARCH_GUESS = utils.DefaultArch()
 
-# Variants ordered by expected runtime (slowest first).
-VARIANTS = ["default"]
+VARIANTS = ['default']
 
 MORE_VARIANTS = [
-  "stress",
-  "stress_incremental_marking",
-  "nooptimization",
-  "stress_background_compile",
-  "wasm_traps",
+  'jitless',
+  'stress',
+  'stress_js_bg_compile_wasm_code_gc',
+  'stress_incremental_marking',
 ]
 
 VARIANT_ALIASES = {
   # The default for developer workstations.
-  "dev": VARIANTS,
+  'dev': VARIANTS,
   # Additional variants, run on all bots.
-  "more": MORE_VARIANTS,
-  # Shortcut for the two above ("more" first - it has the longer running tests).
-  "exhaustive": MORE_VARIANTS + VARIANTS,
+  'more': MORE_VARIANTS,
+  # Shortcut for the two above ('more' first - it has the longer running tests)
+  'exhaustive': MORE_VARIANTS + VARIANTS,
   # Additional variants, run on a subset of bots.
-  "extra": ["future", "liftoff", "trusted"],
+  'extra': ['nooptimization', 'future', 'no_wasm_traps'],
 }
 
-GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
-                   "--concurrent-recompilation-queue-length=64",
-                   "--concurrent-recompilation-delay=500",
-                   "--concurrent-recompilation"]
+GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
+                   '--concurrent-recompilation-queue-length=64',
+                   '--concurrent-recompilation-delay=500',
+                   '--concurrent-recompilation',
+                   '--stress-flush-bytecode',
+                   '--wasm-code-gc', '--stress-wasm-code-gc']
 
-# Double the timeout for these:
-SLOW_ARCHS = ["arm",
-              "mips",
-              "mipsel",
-              "mips64",
-              "mips64el",
-              "s390",
-              "s390x",
-              "arm64"]
+RANDOM_GC_STRESS_FLAGS = ['--random-gc-interval=5000',
+                          '--stress-compaction-random']
+
 
 PREDICTABLE_WRAPPER = os.path.join(
     base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
 
 
 class StandardTestRunner(base_runner.BaseTestRunner):
-    def __init__(self, *args, **kwargs):
-        super(StandardTestRunner, self).__init__(*args, **kwargs)
+  def __init__(self, *args, **kwargs):
+    super(StandardTestRunner, self).__init__(*args, **kwargs)
 
-        self.sancov_dir = None
+    self.sancov_dir = None
+    self._variants = None
 
-    def _get_default_suite_names(self):
-      return ['default']
+  @property
+  def framework_name(self):
+    return 'standard_runner'
 
-    def _do_execute(self, suites, args, options):
-      if options.swarming:
-        # Swarming doesn't print how isolated commands are called. Lets make
-        # this less cryptic by printing it ourselves.
-        print ' '.join(sys.argv)
+  def _get_default_suite_names(self):
+    return ['default']
 
-        if utils.GuessOS() == "macos":
-          # TODO(machenbach): Temporary output for investigating hanging test
-          # driver on mac.
-          print "V8 related processes running on this host:"
-          try:
-            print subprocess.check_output(
-              "ps -e | egrep 'd8|cctest|unittests'", shell=True)
-          except Exception:
-            pass
+  def _add_parser_options(self, parser):
+    parser.add_option('--novfp3',
+                      help='Indicates that V8 was compiled without VFP3'
+                      ' support',
+                      default=False, action='store_true')
 
-      return self._execute(args, options, suites)
+    # Variants
+    parser.add_option('--no-variants', '--novariants',
+                      help='Deprecated. '
+                           'Equivalent to passing --variants=default',
+                      default=False, dest='no_variants', action='store_true')
+    parser.add_option('--variants',
+                      help='Comma-separated list of testing variants;'
+                      ' default: "%s"' % ','.join(VARIANTS))
+    parser.add_option('--exhaustive-variants',
+                      default=False, action='store_true',
+                      help='Deprecated. '
+                           'Equivalent to passing --variants=exhaustive')
 
-    def _add_parser_options(self, parser):
-      parser.add_option("--sancov-dir",
-                        help="Directory where to collect coverage data")
-      parser.add_option("--cfi-vptr",
-                        help="Run tests with UBSAN cfi_vptr option.",
-                        default=False, action="store_true")
-      parser.add_option("--novfp3",
-                        help="Indicates that V8 was compiled without VFP3"
-                        " support",
-                        default=False, action="store_true")
-      parser.add_option("--cat", help="Print the source of the tests",
-                        default=False, action="store_true")
-      parser.add_option("--slow-tests",
-                        help="Regard slow tests (run|skip|dontcare)",
-                        default="dontcare")
-      parser.add_option("--pass-fail-tests",
-                        help="Regard pass|fail tests (run|skip|dontcare)",
-                        default="dontcare")
-      parser.add_option("--gc-stress",
-                        help="Switch on GC stress mode",
-                        default=False, action="store_true")
-      parser.add_option("--command-prefix",
-                        help="Prepended to each shell command used to run a"
-                        " test",
-                        default="")
-      parser.add_option("--extra-flags",
-                        help="Additional flags to pass to each test command",
-                        action="append", default=[])
-      parser.add_option("--infra-staging", help="Use new test runner features",
-                        default=False, action="store_true")
-      parser.add_option("--isolates", help="Whether to test isolates",
-                        default=False, action="store_true")
-      parser.add_option("-j", help="The number of parallel tasks to run",
-                        default=0, type="int")
-      parser.add_option("--no-harness", "--noharness",
-                        help="Run without test harness of a given suite",
-                        default=False, action="store_true")
-      parser.add_option("--no-presubmit", "--nopresubmit",
-                        help='Skip presubmit checks (deprecated)',
-                        default=False, dest="no_presubmit", action="store_true")
-      parser.add_option("--no-sorting", "--nosorting",
-                        help="Don't sort tests according to duration of last"
-                        " run.",
-                        default=False, dest="no_sorting", action="store_true")
-      parser.add_option("--no-variants", "--novariants",
-                        help="Deprecated. "
-                             "Equivalent to passing --variants=default",
-                        default=False, dest="no_variants", action="store_true")
-      parser.add_option("--variants",
-                        help="Comma-separated list of testing variants;"
-                        " default: \"%s\"" % ",".join(VARIANTS))
-      parser.add_option("--exhaustive-variants",
-                        default=False, action="store_true",
-                        help="Deprecated. "
-                             "Equivalent to passing --variants=exhaustive")
-      parser.add_option("-p", "--progress",
-                        help=("The style of progress indicator"
-                              " (verbose, dots, color, mono)"),
-                        choices=progress.PROGRESS_INDICATORS.keys(),
-                        default="mono")
-      parser.add_option("--quickcheck", default=False, action="store_true",
-                        help=("Quick check mode (skip slow tests)"))
-      parser.add_option("--report", help="Print a summary of the tests to be"
-                        " run",
-                        default=False, action="store_true")
-      parser.add_option("--json-test-results",
-                        help="Path to a file for storing json results.")
-      parser.add_option("--flakiness-results",
-                        help="Path to a file for storing flakiness json.")
-      parser.add_option("--rerun-failures-count",
-                        help=("Number of times to rerun each failing test case."
-                              " Very slow tests will be rerun only once."),
-                        default=0, type="int")
-      parser.add_option("--rerun-failures-max",
-                        help="Maximum number of failing test cases to rerun.",
-                        default=100, type="int")
-      parser.add_option("--dont-skip-slow-simulator-tests",
-                        help="Don't skip more slow tests when using a"
-                        " simulator.",
-                        default=False, action="store_true",
-                        dest="dont_skip_simulator_slow_tests")
-      parser.add_option("--swarming",
-                        help="Indicates running test driver on swarming.",
-                        default=False, action="store_true")
-      parser.add_option("--time", help="Print timing information after running",
-                        default=False, action="store_true")
-      parser.add_option("-t", "--timeout", help="Timeout in seconds",
-                        default=TIMEOUT_DEFAULT, type="int")
-      parser.add_option("--warn-unused", help="Report unused rules",
-                        default=False, action="store_true")
-      parser.add_option("--junitout", help="File name of the JUnit output")
-      parser.add_option("--junittestsuite",
-                        help="The testsuite name in the JUnit output file",
-                        default="v8tests")
-      parser.add_option("--random-seed", default=0, dest="random_seed",
-                        help="Default seed for initializing random generator",
-                        type=int)
-      parser.add_option("--random-seed-stress-count", default=1, type="int",
-                        dest="random_seed_stress_count",
-                        help="Number of runs with different random seeds")
+    # Filters
+    parser.add_option('--slow-tests', default='dontcare',
+                      help='Regard slow tests (run|skip|dontcare)')
+    parser.add_option('--pass-fail-tests', default='dontcare',
+                      help='Regard pass|fail tests (run|skip|dontcare)')
+    parser.add_option('--quickcheck', default=False, action='store_true',
+                      help=('Quick check mode (skip slow tests)'))
+    parser.add_option('--dont-skip-slow-simulator-tests',
+                      help='Don\'t skip more slow tests when using a'
+                      ' simulator.',
+                      default=False, action='store_true',
+                      dest='dont_skip_simulator_slow_tests')
 
-    def _process_options(self, options):
-      global VARIANTS
+    # Stress modes
+    parser.add_option('--gc-stress',
+                      help='Switch on GC stress mode',
+                      default=False, action='store_true')
+    parser.add_option('--random-gc-stress',
+                      help='Switch on random GC stress mode',
+                      default=False, action='store_true')
+    parser.add_option('--random-seed-stress-count', default=1, type='int',
+                      dest='random_seed_stress_count',
+                      help='Number of runs with different random seeds. Only '
+                           'with test processors: 0 means infinite '
+                           'generation.')
 
-      if options.sancov_dir:
-        self.sancov_dir = options.sancov_dir
-        if not os.path.exists(self.sancov_dir):
-          print("sancov-dir %s doesn't exist" % self.sancov_dir)
-          raise base_runner.TestRunnerError()
+    # Noop
+    parser.add_option('--cfi-vptr',
+                      help='Run tests with UBSAN cfi_vptr option.',
+                      default=False, action='store_true')
+    parser.add_option('--infra-staging', help='Use new test runner features',
+                      dest='infra_staging', default=None,
+                      action='store_true')
+    parser.add_option('--no-infra-staging',
+                      help='Opt out of new test runner features',
+                      dest='infra_staging', default=None,
+                      action='store_false')
+    parser.add_option('--no-sorting', '--nosorting',
+                      help='Don\'t sort tests according to duration of last'
+                      ' run.',
+                      default=False, dest='no_sorting', action='store_true')
+    parser.add_option('--no-presubmit', '--nopresubmit',
+                      help='Skip presubmit checks (deprecated)',
+                      default=False, dest='no_presubmit', action='store_true')
 
-      options.command_prefix = shlex.split(options.command_prefix)
-      options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+    # Unimplemented for test processors
+    parser.add_option('--sancov-dir',
+                      help='Directory where to collect coverage data')
+    parser.add_option('--cat', help='Print the source of the tests',
+                      default=False, action='store_true')
+    parser.add_option('--flakiness-results',
+                      help='Path to a file for storing flakiness json.')
+    parser.add_option('--time', help='Print timing information after running',
+                      default=False, action='store_true')
+    parser.add_option('--warn-unused', help='Report unused rules',
+                      default=False, action='store_true')
+    parser.add_option('--report', default=False, action='store_true',
+                      help='Print a summary of the tests to be run')
 
-      if options.gc_stress:
-        options.extra_flags += GC_STRESS_FLAGS
-
-      if self.build_config.asan:
-        options.extra_flags.append("--invoke-weak-callbacks")
-        options.extra_flags.append("--omit-quit")
-
-      if options.novfp3:
-        options.extra_flags.append("--noenable-vfp3")
-
-      if options.no_variants:  # pragma: no cover
-        print ("Option --no-variants is deprecated. "
-               "Pass --variants=default instead.")
-        assert not options.variants
-        options.variants = "default"
-
-      if options.exhaustive_variants:  # pragma: no cover
-        # TODO(machenbach): Switch infra to --variants=exhaustive after M65.
-        print ("Option --exhaustive-variants is deprecated. "
-               "Pass --variants=exhaustive instead.")
-        # This is used on many bots. It includes a larger set of default
-        # variants.
-        # Other options for manipulating variants still apply afterwards.
-        assert not options.variants
-        options.variants = "exhaustive"
-
-      if options.quickcheck:
-        assert not options.variants
-        options.variants = "stress,default"
-        options.slow_tests = "skip"
-        options.pass_fail_tests = "skip"
-
-      if self.build_config.predictable:
-        options.variants = "default"
-        options.extra_flags.append("--predictable")
-        options.extra_flags.append("--verify_predictable")
-        options.extra_flags.append("--no-inline-new")
-        # Add predictable wrapper to command prefix.
-        options.command_prefix = (
-            [sys.executable, PREDICTABLE_WRAPPER] + options.command_prefix)
-
-      # TODO(machenbach): Figure out how to test a bigger subset of variants on
-      # msan.
-      if self.build_config.msan:
-        options.variants = "default"
-
-      if options.j == 0:
-        options.j = multiprocessing.cpu_count()
-
-      if options.random_seed_stress_count <= 1 and options.random_seed == 0:
-        options.random_seed = self._random_seed()
-
-      # Use developer defaults if no variant was specified.
-      options.variants = options.variants or "dev"
-
-      if options.variants == "infra_staging":
-        options.variants = "exhaustive"
-        options.infra_staging = True
-
-      # Resolve variant aliases and dedupe.
-      # TODO(machenbach): Don't mutate global variable. Rather pass mutated
-      # version as local variable.
-      VARIANTS = list(set(reduce(
-          list.__add__,
-          (VARIANT_ALIASES.get(v, [v]) for v in options.variants.split(",")),
-          [],
-      )))
-
-      if not set(VARIANTS).issubset(ALL_VARIANTS):
-        print "All variants must be in %s" % str(ALL_VARIANTS)
+  def _process_options(self, options):
+    if options.sancov_dir:
+      self.sancov_dir = options.sancov_dir
+      if not os.path.exists(self.sancov_dir):
+        print('sancov-dir %s doesn\'t exist' % self.sancov_dir)
         raise base_runner.TestRunnerError()
 
-      def CheckTestMode(name, option):  # pragma: no cover
-        if not option in ["run", "skip", "dontcare"]:
-          print "Unknown %s mode %s" % (name, option)
-          raise base_runner.TestRunnerError()
-      CheckTestMode("slow test", options.slow_tests)
-      CheckTestMode("pass|fail test", options.pass_fail_tests)
-      if self.build_config.no_i18n:
-        base_runner.TEST_MAP["bot_default"].remove("intl")
-        base_runner.TEST_MAP["default"].remove("intl")
-        # TODO(machenbach): uncomment after infra side lands.
-        # base_runner.TEST_MAP["d8_default"].remove("intl")
+    if options.gc_stress:
+      options.extra_flags += GC_STRESS_FLAGS
 
-    def _setup_env(self):
-      super(StandardTestRunner, self)._setup_env()
+    if options.random_gc_stress:
+      options.extra_flags += RANDOM_GC_STRESS_FLAGS
 
-      symbolizer_option = self._get_external_symbolizer_option()
+    if self.build_config.asan:
+      options.extra_flags.append('--invoke-weak-callbacks')
+      options.extra_flags.append('--omit-quit')
 
-      if self.sancov_dir:
-        os.environ['ASAN_OPTIONS'] = ":".join([
-          'coverage=1',
-          'coverage_dir=%s' % self.sancov_dir,
-          symbolizer_option,
-          "allow_user_segv_handler=1",
-        ])
+    if self.build_config.no_snap:
+      # Speed up slow nosnap runs. Allocation verification is covered by
+      # running mksnapshot on other builders.
+      options.extra_flags.append('--no-turbo-verify-allocation')
 
-    def _random_seed(self):
-      seed = 0
-      while not seed:
-        seed = random.SystemRandom().randint(-2147483648, 2147483647)
-      return seed
+    if options.novfp3:
+      options.extra_flags.append('--noenable-vfp3')
 
-    def _execute(self, args, options, suites):
-      print(">>> Running tests for %s.%s" % (self.build_config.arch,
-                                             self.mode_name))
-      # Populate context object.
+    if options.no_variants:  # pragma: no cover
+      print ('Option --no-variants is deprecated. '
+             'Pass --variants=default instead.')
+      assert not options.variants
+      options.variants = 'default'
 
-      # Simulators are slow, therefore allow a longer timeout.
-      if self.build_config.arch in SLOW_ARCHS:
-        options.timeout *= 2
+    if options.exhaustive_variants:  # pragma: no cover
+      # TODO(machenbach): Switch infra to --variants=exhaustive after M65.
+      print ('Option --exhaustive-variants is deprecated. '
+             'Pass --variants=exhaustive instead.')
+      # This is used on many bots. It includes a larger set of default
+      # variants.
+      # Other options for manipulating variants still apply afterwards.
+      assert not options.variants
+      options.variants = 'exhaustive'
 
-      options.timeout *= self.mode_options.timeout_scalefactor
+    if options.quickcheck:
+      assert not options.variants
+      options.variants = 'stress,default'
+      options.slow_tests = 'skip'
+      options.pass_fail_tests = 'skip'
 
-      if self.build_config.predictable:
-        # Predictable mode is slower.
-        options.timeout *= 2
+    if self.build_config.predictable:
+      options.variants = 'default'
+      options.extra_flags.append('--predictable')
+      options.extra_flags.append('--verify-predictable')
+      options.extra_flags.append('--no-inline-new')
+      # Add predictable wrapper to command prefix.
+      options.command_prefix = (
+          [sys.executable, PREDICTABLE_WRAPPER] + options.command_prefix)
 
-      ctx = context.Context(self.build_config.arch,
-                            self.mode_options.execution_mode,
-                            self.outdir,
-                            self.mode_options.flags,
-                            options.verbose,
-                            options.timeout,
-                            options.isolates,
-                            options.command_prefix,
-                            options.extra_flags,
-                            self.build_config.no_i18n,
-                            options.random_seed,
-                            options.no_sorting,
-                            options.rerun_failures_count,
-                            options.rerun_failures_max,
-                            options.no_harness,
-                            use_perf_data=not options.swarming,
-                            sancov_dir=self.sancov_dir,
-                            infra_staging=options.infra_staging)
+    # TODO(machenbach): Figure out how to test a bigger subset of variants on
+    # msan.
+    if self.build_config.msan:
+      options.variants = 'default'
 
-      # TODO(all): Combine "simulator" and "simulator_run".
-      # TODO(machenbach): In GN we can derive simulator run from
-      # target_arch != v8_target_arch in the dumped build config.
-      simulator_run = (
-        not options.dont_skip_simulator_slow_tests and
-        self.build_config.arch in [
-          'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
-          'ppc64', 's390', 's390x'] and
-        bool(base_runner.ARCH_GUESS) and
-        self.build_config.arch != base_runner.ARCH_GUESS)
-      # Find available test suites and read test cases from them.
-      variables = {
-        "arch": self.build_config.arch,
-        "asan": self.build_config.asan,
-        "byteorder": sys.byteorder,
-        "dcheck_always_on": self.build_config.dcheck_always_on,
-        "deopt_fuzzer": False,
-        "gc_fuzzer": False,
-        "gc_stress": options.gc_stress,
-        "gcov_coverage": self.build_config.gcov_coverage,
-        "isolates": options.isolates,
-        "mode": self.mode_options.status_mode,
-        "msan": self.build_config.msan,
-        "no_harness": options.no_harness,
-        "no_i18n": self.build_config.no_i18n,
-        "no_snap": self.build_config.no_snap,
-        "novfp3": options.novfp3,
-        "predictable": self.build_config.predictable,
-        "simulator": utils.UseSimulator(self.build_config.arch),
-        "simulator_run": simulator_run,
-        "system": utils.GuessOS(),
-        "tsan": self.build_config.tsan,
-        "ubsan_vptr": self.build_config.ubsan_vptr,
-      }
+    if options.variants == 'infra_staging':
+      options.variants = 'exhaustive'
 
-      progress_indicator = progress.IndicatorNotifier()
-      progress_indicator.Register(
-        progress.PROGRESS_INDICATORS[options.progress]())
-      if options.junitout:  # pragma: no cover
-        progress_indicator.Register(progress.JUnitTestProgressIndicator(
-            options.junitout, options.junittestsuite))
-      if options.json_test_results:
-        progress_indicator.Register(progress.JsonTestProgressIndicator(
-          options.json_test_results,
-          self.build_config.arch,
-          self.mode_options.execution_mode,
-          ctx.random_seed))
-      if options.flakiness_results:  # pragma: no cover
-        progress_indicator.Register(progress.FlakinessTestProgressIndicator(
-            options.flakiness_results))
+    self._variants = self._parse_variants(options.variants)
 
-      if options.infra_staging:
-        for s in suites:
-          s.ReadStatusFile(variables)
-          s.ReadTestCases(ctx)
+    def CheckTestMode(name, option):  # pragma: no cover
+      if option not in ['run', 'skip', 'dontcare']:
+        print('Unknown %s mode %s' % (name, option))
+        raise base_runner.TestRunnerError()
+    CheckTestMode('slow test', options.slow_tests)
+    CheckTestMode('pass|fail test', options.pass_fail_tests)
+    if self.build_config.no_i18n:
+      base_runner.TEST_MAP['bot_default'].remove('intl')
+      base_runner.TEST_MAP['default'].remove('intl')
+      # TODO(machenbach): uncomment after infra side lands.
+      # base_runner.TEST_MAP['d8_default'].remove('intl')
 
-        return self._run_test_procs(suites, args, options, progress_indicator,
-                                    ctx)
+  def _parse_variants(self, aliases_str):
+    # Use developer defaults if no variant was specified.
+    aliases_str = aliases_str or 'dev'
+    aliases = aliases_str.split(',')
+    user_variants = set(reduce(
+        list.__add__, [VARIANT_ALIASES.get(a, [a]) for a in aliases]))
 
-      all_tests = []
-      num_tests = 0
-      for s in suites:
-        s.ReadStatusFile(variables)
-        s.ReadTestCases(ctx)
-        if len(args) > 0:
-          s.FilterTestCasesByArgs(args)
-        all_tests += s.tests
+    result = [v for v in ALL_VARIANTS if v in user_variants]
+    if len(result) == len(user_variants):
+      return result
 
-        # First filtering by status applying the generic rules (tests without
-        # variants)
-        if options.warn_unused:
-          tests = [(t.name, t.variant) for t in s.tests]
-          s.statusfile.warn_unused_rules(tests, check_variant_rules=False)
-        s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
+    for v in user_variants:
+      if v not in ALL_VARIANTS:
+        print('Unknown variant: %s' % v)
+        raise base_runner.TestRunnerError()
+    assert False, 'Unreachable'
 
-        if options.cat:
-          verbose.PrintTestSource(s.tests)
-          continue
-        variant_gen = s.CreateLegacyVariantsGenerator(VARIANTS)
-        variant_tests = [ t.create_variant(v, flags)
-                          for t in s.tests
-                          for v in variant_gen.FilterVariantsByTest(t)
-                          for flags in variant_gen.GetFlagSets(t, v) ]
+  def _setup_env(self):
+    super(StandardTestRunner, self)._setup_env()
 
-        if options.random_seed_stress_count > 1:
-          # Duplicate test for random seed stress mode.
-          def iter_seed_flags():
-            for _ in range(0, options.random_seed_stress_count):
-              # Use given random seed for all runs (set by default in
-              # execution.py) or a new random seed if none is specified.
-              if options.random_seed:
-                yield []
-              else:
-                yield ["--random-seed=%d" % self._random_seed()]
-          s.tests = [
-            t.create_variant(t.variant, flags, 'seed-stress-%d' % n)
-            for t in variant_tests
-            for n, flags in enumerate(iter_seed_flags())
-          ]
-        else:
-          s.tests = variant_tests
+    symbolizer_option = self._get_external_symbolizer_option()
 
-        # Second filtering by status applying also the variant-dependent rules.
-        if options.warn_unused:
-          tests = [(t.name, t.variant) for t in s.tests]
-          s.statusfile.warn_unused_rules(tests, check_variant_rules=True)
+    if self.sancov_dir:
+      os.environ['ASAN_OPTIONS'] = ':'.join([
+        'coverage=1',
+        'coverage_dir=%s' % self.sancov_dir,
+        symbolizer_option,
+        'allow_user_segv_handler=1',
+      ])
 
-        s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
-        s.tests = self._shard_tests(s.tests, options)
+  def _get_statusfile_variables(self, options):
+    variables = (
+        super(StandardTestRunner, self)._get_statusfile_variables(options))
 
-        for t in s.tests:
-          t.cmd = t.get_command(ctx)
+    simulator_run = (
+      not options.dont_skip_simulator_slow_tests and
+      self.build_config.arch in [
+        'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
+        'ppc64', 's390', 's390x'] and
+      bool(ARCH_GUESS) and
+      self.build_config.arch != ARCH_GUESS)
 
-        num_tests += len(s.tests)
+    variables.update({
+      'gc_stress': options.gc_stress or options.random_gc_stress,
+      'gc_fuzzer': options.random_gc_stress,
+      'novfp3': options.novfp3,
+      'simulator_run': simulator_run,
+    })
+    return variables
 
-      if options.cat:
-        return 0  # We're done here.
+  def _do_execute(self, tests, args, options):
+    jobs = options.j
 
-      if options.report:
-        verbose.PrintReport(all_tests)
+    print('>>> Running with test processors')
+    loader = LoadProc(tests)
+    results = self._create_result_tracker(options)
+    indicators = self._create_progress_indicators(
+        tests.test_count_estimate, options)
 
-      # Run the tests.
-      start_time = time.time()
+    outproc_factory = None
+    if self.build_config.predictable:
+      outproc_factory = predictable.get_outproc
+    execproc = ExecutionProc(jobs, outproc_factory)
+    sigproc = self._create_signal_proc()
 
-      if self.build_config.predictable:
-        outproc_factory = predictable.get_outproc
-      else:
-        outproc_factory = None
+    procs = [
+      loader,
+      NameFilterProc(args) if args else None,
+      StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+      VariantProc(self._variants),
+      StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+      self._create_predictable_filter(),
+      self._create_shard_proc(options),
+      self._create_seed_proc(options),
+      sigproc,
+    ] + indicators + [
+      results,
+      self._create_timeout_proc(options),
+      self._create_rerun_proc(options),
+      execproc,
+    ]
 
-      runner = execution.Runner(suites, progress_indicator, ctx,
-                                outproc_factory)
-      exit_code = runner.Run(options.j)
-      overall_duration = time.time() - start_time
+    self._prepare_procs(procs)
 
-      if options.time:
-        verbose.PrintTestDurations(suites, runner.outputs, overall_duration)
+    loader.load_initial_tests(initial_batch_size=options.j * 2)
 
-      if num_tests == 0:
-        print("Warning: no tests were run!")
+    # This starts up worker processes and blocks until all tests are
+    # processed.
+    execproc.run()
 
-      if exit_code == 1 and options.json_test_results:
-        print("Force exit code 0 after failures. Json test results file "
-              "generated with failure information.")
-        exit_code = 0
+    for indicator in indicators:
+      indicator.finished()
 
-      if self.sancov_dir:
-        # If tests ran with sanitizer coverage, merge coverage files in the end.
-        try:
-          print "Merging sancov files."
-          subprocess.check_call([
-            sys.executable,
-            join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
-            "--coverage-dir=%s" % self.sancov_dir])
-        except:
-          print >> sys.stderr, "Error: Merging sancov files failed."
-          exit_code = 1
+    if tests.test_count_estimate:
+      percentage = float(results.total) / tests.test_count_estimate * 100
+    else:
+      percentage = 0
 
-      return exit_code
+    print (('>>> %d base tests produced %d (%d%s)'
+           ' non-filtered tests') % (
+        tests.test_count_estimate, results.total, percentage, '%'))
 
-    def _shard_tests(self, tests, options):
-      shard_run, shard_count = self._get_shard_info(options)
+    print('>>> %d tests ran' % (results.total - results.remaining))
 
-      if shard_count < 2:
-        return tests
-      count = 0
-      shard = []
-      for test in tests:
-        if count % shard_count == shard_run - 1:
-          shard.append(test)
-        count += 1
-      return shard
+    exit_code = utils.EXIT_CODE_PASS
+    if results.failed:
+      exit_code = utils.EXIT_CODE_FAILURES
+    if not results.total:
+      exit_code = utils.EXIT_CODE_NO_TESTS
 
-    def _run_test_procs(self, suites, args, options, progress_indicator,
-                        context):
-      jobs = options.j
+    # Indicate if a SIGINT or SIGTERM happened.
+    return max(exit_code, sigproc.exit_code)
 
-      print '>>> Running with test processors'
-      loader = LoadProc()
-      tests_counter = TestsCounter()
-      results = ResultsTracker()
-      indicators = progress_indicator.ToProgressIndicatorProcs()
-      execproc = ExecutionProc(jobs, context)
+  def _create_predictable_filter(self):
+    if not self.build_config.predictable:
+      return None
+    return predictable.PredictableFilterProc()
 
-      procs = [
-        loader,
-        NameFilterProc(args) if args else None,
-        StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
-        self._create_shard_proc(options),
-        tests_counter,
-        VariantProc(VARIANTS),
-        StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
-      ] + indicators + [
-        results,
-        self._create_rerun_proc(context),
-        execproc,
-      ]
-
-      procs = filter(None, procs)
-
-      for i in xrange(0, len(procs) - 1):
-        procs[i].connect_to(procs[i + 1])
-
-      tests = [t for s in suites for t in s.tests]
-      tests.sort(key=lambda t: t.is_slow, reverse=True)
-
-      loader.setup()
-      loader.load_tests(tests)
-
-      print '>>> Running %d base tests' % tests_counter.total
-      tests_counter.remove_from_chain()
-
-      execproc.start()
-
-      for indicator in indicators:
-        indicator.finished()
-
-      print '>>> %d tests ran' % results.total
-
-      exit_code = 0
-      if results.failed:
-        exit_code = 1
-      if results.remaining:
-        exit_code = 2
-
-
-      if exit_code == 1 and options.json_test_results:
-        print("Force exit code 0 after failures. Json test results file "
-              "generated with failure information.")
-        exit_code = 0
-      return exit_code
-
-    def _create_rerun_proc(self, ctx):
-      if not ctx.rerun_failures_count:
-        return None
-      return RerunProc(ctx.rerun_failures_count,
-                       ctx.rerun_failures_max)
-
+  def _create_seed_proc(self, options):
+    if options.random_seed_stress_count == 1:
+      return None
+    return SeedProc(options.random_seed_stress_count, options.random_seed,
+                    options.j * 4)
 
 
 if __name__ == '__main__':
diff --git a/src/v8/tools/testrunner/test_config.py b/src/v8/tools/testrunner/test_config.py
new file mode 100644
index 0000000..27ac72b
--- /dev/null
+++ b/src/v8/tools/testrunner/test_config.py
@@ -0,0 +1,34 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+from .utils import random_utils
+
+
+class TestConfig(object):
+  def __init__(self,
+               command_prefix,
+               extra_flags,
+               isolates,
+               mode_flags,
+               no_harness,
+               noi18n,
+               random_seed,
+               run_skipped,
+               shell_dir,
+               timeout,
+               verbose):
+    self.command_prefix = command_prefix
+    self.extra_flags = extra_flags
+    self.isolates = isolates
+    self.mode_flags = mode_flags
+    self.no_harness = no_harness
+    self.noi18n = noi18n
+    # random_seed is always not None.
+    self.random_seed = random_seed or random_utils.random_seed()
+    self.run_skipped = run_skipped
+    self.shell_dir = shell_dir
+    self.timeout = timeout
+    self.verbose = verbose
diff --git a/src/v8/tools/testrunner/testproc/base.py b/src/v8/tools/testrunner/testproc/base.py
index 1a87dbe..c52c779 100644
--- a/src/v8/tools/testrunner/testproc/base.py
+++ b/src/v8/tools/testrunner/testproc/base.py
@@ -37,36 +37,12 @@
 DROP_PASS_OUTPUT = 2
 DROP_PASS_STDOUT = 3
 
-def get_reduce_result_function(requirement):
-  if requirement == DROP_RESULT:
-    return lambda _: None
-
-  if requirement == DROP_OUTPUT:
-    def f(result):
-      result.output = None
-      return result
-    return f
-
-  if requirement == DROP_PASS_OUTPUT:
-    def f(result):
-      if not result.has_unexpected_output:
-        result.output = None
-      return result
-    return f
-
-  if requirement == DROP_PASS_STDOUT:
-    def f(result):
-      if not result.has_unexpected_output:
-        result.output.stdout = None
-        result.output.stderr = None
-      return result
-    return f
-
 
 class TestProc(object):
   def __init__(self):
     self._prev_proc = None
     self._next_proc = None
+    self._stopped = False
     self._requirement = DROP_RESULT
     self._prev_requirement = None
     self._reduce_result = lambda result: result
@@ -90,13 +66,21 @@
     self._prev_requirement = requirement
     if self._next_proc:
       self._next_proc.setup(max(requirement, self._requirement))
-    if self._prev_requirement < self._requirement:
-      self._reduce_result = get_reduce_result_function(self._prev_requirement)
+
+    # Since we're not winning anything by droping part of the result we are
+    # dropping the whole result or pass it as it is. The real reduction happens
+    # during result creation (in the output processor), so the result is
+    # immutable.
+    if (self._prev_requirement < self._requirement and
+        self._prev_requirement == DROP_RESULT):
+      self._reduce_result = lambda _: None
 
   def next_test(self, test):
     """
     Method called by previous processor whenever it produces new test.
     This method shouldn't be called by anyone except previous processor.
+    Returns a boolean value to signal whether the test was loaded into the
+    execution queue successfully or not.
     """
     raise NotImplementedError()
 
@@ -111,15 +95,28 @@
     if self._prev_proc:
       self._prev_proc.heartbeat()
 
+  def stop(self):
+    if not self._stopped:
+      self._stopped = True
+      if self._prev_proc:
+        self._prev_proc.stop()
+      if self._next_proc:
+        self._next_proc.stop()
+
+  @property
+  def is_stopped(self):
+    return self._stopped
+
   ### Communication
 
   def _send_test(self, test):
     """Helper method for sending test to the next processor."""
-    self._next_proc.next_test(test)
+    return self._next_proc.next_test(test)
 
   def _send_result(self, test, result):
     """Helper method for sending result to the previous processor."""
-    result = self._reduce_result(result)
+    if not test.keep_output:
+      result = self._reduce_result(result)
     self._prev_proc.result_for(test, result)
 
 
@@ -131,7 +128,7 @@
 
   def next_test(self, test):
     self._on_next_test(test)
-    self._send_test(test)
+    return self._send_test(test)
 
   def result_for(self, test, result):
     self._on_result_for(test, result)
@@ -163,7 +160,7 @@
     self._name = name
 
   def next_test(self, test):
-    self._next_test(test)
+    return self._next_test(test)
 
   def result_for(self, subtest, result):
     self._result_for(subtest.origin, subtest, result)
@@ -195,9 +192,9 @@
 
   def next_test(self, test):
     if self._filter(test):
-      self._send_result(test, SKIPPED)
-    else:
-      self._send_test(test)
+      return False
+
+    return self._send_test(test)
 
   def result_for(self, test, result):
     self._send_result(test, result)
diff --git a/src/v8/tools/testrunner/testproc/combiner.py b/src/v8/tools/testrunner/testproc/combiner.py
new file mode 100644
index 0000000..4d992f4
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/combiner.py
@@ -0,0 +1,127 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from collections import defaultdict
+import time
+
+from . import base
+from ..objects import testcase
+from ..outproc import base as outproc
+
+class CombinerProc(base.TestProc):
+  def __init__(self, rng, min_group_size, max_group_size, count):
+    """
+    Args:
+      rng: random number generator
+      min_group_size: minimum number of tests to combine
+      max_group_size: maximum number of tests to combine
+      count: how many tests to generate. 0 means infinite running
+    """
+    super(CombinerProc, self).__init__()
+
+    self._rng = rng
+    self._min_size = min_group_size
+    self._max_size = max_group_size
+    self._count = count
+
+    # Index of the last generated test
+    self._current_num = 0
+
+    # {suite name: instance of TestGroups}
+    self._groups = defaultdict(TestGroups)
+
+    # {suite name: instance of TestCombiner}
+    self._combiners = {}
+
+  def setup(self, requirement=base.DROP_RESULT):
+    # Combiner is not able to pass results (even as None) to the previous
+    # processor.
+    assert requirement == base.DROP_RESULT
+    self._next_proc.setup(base.DROP_RESULT)
+
+  def next_test(self, test):
+    group_key = self._get_group_key(test)
+    if not group_key:
+      # Test not suitable for combining
+      return False
+
+    self._groups[test.suite.name].add_test(group_key, test)
+    return True
+
+  def _get_group_key(self, test):
+    combiner =  self._get_combiner(test.suite)
+    if not combiner:
+      print ('>>> Warning: There is no combiner for %s testsuite' %
+             test.suite.name)
+      return None
+    return combiner.get_group_key(test)
+
+  def result_for(self, test, result):
+    self._send_next_test()
+
+  def generate_initial_tests(self, num=1):
+    for _ in range(0, num):
+      self._send_next_test()
+
+  def _send_next_test(self):
+    if self.is_stopped:
+      return False
+
+    if self._count and self._current_num >= self._count:
+      return False
+
+    combined_test = self._create_new_test()
+    if not combined_test:
+      # Not enough tests
+      return False
+
+    return self._send_test(combined_test)
+
+  def _create_new_test(self):
+    suite, combiner = self._select_suite()
+    groups = self._groups[suite]
+
+    max_size = self._rng.randint(self._min_size, self._max_size)
+    sample = groups.sample(self._rng, max_size)
+    if not sample:
+      return None
+
+    self._current_num += 1
+    return combiner.combine('%s-%d' % (suite, self._current_num), sample)
+
+  def _select_suite(self):
+    """Returns pair (suite name, combiner)."""
+    selected = self._rng.randint(0, len(self._groups) - 1)
+    for n, suite in enumerate(self._groups):
+      if n == selected:
+        return suite, self._combiners[suite]
+
+  def _get_combiner(self, suite):
+    combiner = self._combiners.get(suite.name)
+    if not combiner:
+      combiner = suite.get_test_combiner()
+      self._combiners[suite.name] = combiner
+    return combiner
+
+
+class TestGroups(object):
+  def __init__(self):
+    self._groups = defaultdict(list)
+    self._keys = []
+
+  def add_test(self, key, test):
+    self._groups[key].append(test)
+    self._keys.append(key)
+
+  def sample(self, rng, max_size):
+    # Not enough tests
+    if not self._groups:
+      return None
+
+    group_key = rng.choice(self._keys)
+    tests = self._groups[group_key]
+    return [rng.choice(tests) for _ in range(0, max_size)]
diff --git a/src/v8/tools/testrunner/testproc/execution.py b/src/v8/tools/testrunner/testproc/execution.py
index 021b02a..68ecf45 100644
--- a/src/v8/tools/testrunner/testproc/execution.py
+++ b/src/v8/tools/testrunner/testproc/execution.py
@@ -15,12 +15,12 @@
   return job.run(process_context)
 
 
-def create_process_context(requirement):
-  return ProcessContext(base.get_reduce_result_function(requirement))
+def create_process_context(result_reduction):
+  return ProcessContext(result_reduction)
 
 
 JobResult = collections.namedtuple('JobResult', ['id', 'result'])
-ProcessContext = collections.namedtuple('ProcessContext', ['reduce_result_f'])
+ProcessContext = collections.namedtuple('ProcessContext', ['result_reduction'])
 
 
 class Job(object):
@@ -32,9 +32,8 @@
 
   def run(self, process_ctx):
     output = self.cmd.execute()
-    result = self.outproc.process(output)
-    if not self.keep_output:
-      result = process_ctx.reduce_result_f(result)
+    reduction = process_ctx.result_reduction if not self.keep_output else None
+    result = self.outproc.process(output, reduction)
     return JobResult(self.test_id, result)
 
 
@@ -44,49 +43,53 @@
   sends results to the previous processor.
   """
 
-  def __init__(self, jobs, context):
+  def __init__(self, jobs, outproc_factory=None):
     super(ExecutionProc, self).__init__()
     self._pool = pool.Pool(jobs)
-    self._context = context
+    self._outproc_factory = outproc_factory or (lambda t: t.output_proc)
     self._tests = {}
 
   def connect_to(self, next_proc):
     assert False, 'ExecutionProc cannot be connected to anything'
 
-  def start(self):
-    try:
-      it = self._pool.imap_unordered(
+  def run(self):
+    it = self._pool.imap_unordered(
         fn=run_job,
         gen=[],
         process_context_fn=create_process_context,
         process_context_args=[self._prev_requirement],
-      )
-      for pool_result in it:
-        if pool_result.heartbeat:
-          continue
-
-        job_result = pool_result.value
-        test_id, result = job_result
-
-        test, result.cmd = self._tests[test_id]
-        del self._tests[test_id]
-        self._send_result(test, result)
-    except KeyboardInterrupt:
-      raise
-    except:
-      traceback.print_exc()
-      raise
-    finally:
-      self._pool.terminate()
+    )
+    for pool_result in it:
+      self._unpack_result(pool_result)
 
   def next_test(self, test):
+    if self.is_stopped:
+      return False
+
     test_id = test.procid
-    cmd = test.get_command(self._context)
+    cmd = test.get_command()
     self._tests[test_id] = test, cmd
 
-    # TODO(majeski): Needs factory for outproc as in local/execution.py
-    outproc = test.output_proc
+    outproc = self._outproc_factory(test)
     self._pool.add([Job(test_id, cmd, outproc, test.keep_output)])
 
+    return True
+
   def result_for(self, test, result):
     assert False, 'ExecutionProc cannot receive results'
+
+  def stop(self):
+    super(ExecutionProc, self).stop()
+    self._pool.abort()
+
+  def _unpack_result(self, pool_result):
+    if pool_result.heartbeat:
+      self.heartbeat()
+      return
+
+    job_result = pool_result.value
+    test_id, result = job_result
+
+    test, result.cmd = self._tests[test_id]
+    del self._tests[test_id]
+    self._send_result(test, result)
diff --git a/src/v8/tools/testrunner/testproc/expectation.py b/src/v8/tools/testrunner/testproc/expectation.py
new file mode 100644
index 0000000..fdc9e3e
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/expectation.py
@@ -0,0 +1,28 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+from testrunner.local import statusfile
+from testrunner.outproc import base as outproc
+
+class ForgiveTimeoutProc(base.TestProcProducer):
+  """Test processor passing tests and results through and forgiving timeouts."""
+  def __init__(self):
+    super(ForgiveTimeoutProc, self).__init__('no-timeout')
+
+  def _next_test(self, test):
+    subtest = self._create_subtest(test, 'no_timeout')
+    if subtest.expected_outcomes == outproc.OUTCOMES_PASS:
+      subtest.expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
+    elif subtest.expected_outcomes == outproc.OUTCOMES_FAIL:
+      subtest.expected_outcomes = outproc.OUTCOMES_FAIL_OR_TIMEOUT
+    elif statusfile.TIMEOUT not in subtest.expected_outcomes:
+      subtest.expected_outcomes = (
+          subtest.expected_outcomes + [statusfile.TIMEOUT])
+
+    return self._send_test(subtest)
+
+  def _result_for(self, test, subtest, result):
+    self._send_result(test, result)
diff --git a/src/v8/tools/testrunner/testproc/filter.py b/src/v8/tools/testrunner/testproc/filter.py
index 5081997..e2a5e97 100644
--- a/src/v8/tools/testrunner/testproc/filter.py
+++ b/src/v8/tools/testrunner/testproc/filter.py
@@ -59,25 +59,25 @@
     super(NameFilterProc, self).__init__()
 
     self._globs = defaultdict(list)
+    self._exact_matches = defaultdict(dict)
     for a in args:
       argpath = a.split('/')
       suitename = argpath[0]
       path = '/'.join(argpath[1:]) or '*'
-      self._globs[suitename].append(path)
+      if '*' in path:
+        self._globs[suitename].append(path)
+      else:
+        self._exact_matches[suitename][path] = True
 
     for s, globs in self._globs.iteritems():
       if not globs or '*' in globs:
-        self._globs[s] = []
+        self._globs[s] = ['*']
 
   def _filter(self, test):
-    globs = self._globs.get(test.suite.name)
-    if globs is None:
-      return True
-
-    if not globs:
-      return False
-
+    globs = self._globs.get(test.suite.name, [])
     for g in globs:
+      if g == '*': return False
       if fnmatch.fnmatch(test.path, g):
         return False
-    return True
+    exact_matches = self._exact_matches.get(test.suite.name, {})
+    return test.path not in exact_matches
diff --git a/src/v8/tools/testrunner/testproc/fuzzer.py b/src/v8/tools/testrunner/testproc/fuzzer.py
new file mode 100644
index 0000000..187145b
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/fuzzer.py
@@ -0,0 +1,287 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import namedtuple
+import time
+
+from . import base
+
+
+class FuzzerConfig(object):
+  def __init__(self, probability, analyzer, fuzzer):
+    """
+    Args:
+      probability: of choosing this fuzzer (0; 10]
+      analyzer: instance of Analyzer class, can be None if no analysis is needed
+      fuzzer: instance of Fuzzer class
+    """
+    assert probability > 0 and probability <= 10
+
+    self.probability = probability
+    self.analyzer = analyzer
+    self.fuzzer = fuzzer
+
+
+class Analyzer(object):
+  def get_analysis_flags(self):
+    raise NotImplementedError()
+
+  def do_analysis(self, result):
+    raise NotImplementedError()
+
+
+class Fuzzer(object):
+  def create_flags_generator(self, rng, test, analysis_value):
+    """
+    Args:
+      rng: random number generator
+      test: test for which to create flags
+      analysis_value: value returned by the analyzer. None if there is no
+        corresponding analyzer to this fuzzer or the analysis phase is disabled
+    """
+    raise NotImplementedError()
+
+
+# TODO(majeski): Allow multiple subtests to run at once.
+class FuzzerProc(base.TestProcProducer):
+  def __init__(self, rng, count, fuzzers, disable_analysis=False):
+    """
+    Args:
+      rng: random number generator used to select flags and values for them
+      count: number of tests to generate based on each base test
+      fuzzers: list of FuzzerConfig instances
+      disable_analysis: disable analysis phase and filtering base on it. When
+        set, processor passes None as analysis result to fuzzers
+    """
+    super(FuzzerProc, self).__init__('Fuzzer')
+
+    self._rng = rng
+    self._count = count
+    self._fuzzer_configs = fuzzers
+    self._disable_analysis = disable_analysis
+    self._gens = {}
+
+  def setup(self, requirement=base.DROP_RESULT):
+    # Fuzzer is optimized to not store the results
+    assert requirement == base.DROP_RESULT
+    super(FuzzerProc, self).setup(requirement)
+
+  def _next_test(self, test):
+    if self.is_stopped:
+      return False
+
+    analysis_subtest = self._create_analysis_subtest(test)
+    if analysis_subtest:
+      return self._send_test(analysis_subtest)
+
+    self._gens[test.procid] = self._create_gen(test)
+    return self._try_send_next_test(test)
+
+  def _create_analysis_subtest(self, test):
+    if self._disable_analysis:
+      return None
+
+    analysis_flags = []
+    for fuzzer_config in self._fuzzer_configs:
+      if fuzzer_config.analyzer:
+        analysis_flags += fuzzer_config.analyzer.get_analysis_flags()
+
+    if analysis_flags:
+      analysis_flags = list(set(analysis_flags))
+      return self._create_subtest(test, 'analysis', flags=analysis_flags,
+                                  keep_output=True)
+
+
+  def _result_for(self, test, subtest, result):
+    if not self._disable_analysis:
+      if result is not None:
+        # Analysis phase, for fuzzing we drop the result.
+        if result.has_unexpected_output:
+          self._send_result(test, None)
+          return
+
+        self._gens[test.procid] = self._create_gen(test, result)
+
+    self._try_send_next_test(test)
+
+  def _create_gen(self, test, analysis_result=None):
+    # It will be called with analysis_result==None only when there is no
+    # analysis phase at all, so no fuzzer has it's own analyzer.
+    gens = []
+    indexes = []
+    for i, fuzzer_config in enumerate(self._fuzzer_configs):
+      analysis_value = None
+      if analysis_result and fuzzer_config.analyzer:
+        analysis_value = fuzzer_config.analyzer.do_analysis(analysis_result)
+        if not analysis_value:
+          # Skip fuzzer for this test since it doesn't have analysis data
+          continue
+      p = fuzzer_config.probability
+      flag_gen = fuzzer_config.fuzzer.create_flags_generator(self._rng, test,
+                                                             analysis_value)
+      indexes += [len(gens)] * p
+      gens.append((p, flag_gen))
+
+    if not gens:
+      # No fuzzers for this test, skip it
+      return
+
+    i = 0
+    while not self._count or i < self._count:
+      main_index = self._rng.choice(indexes)
+      _, main_gen = gens[main_index]
+
+      flags = next(main_gen)
+      for index, (p, gen) in enumerate(gens):
+        if index == main_index:
+          continue
+        if self._rng.randint(1, 10) <= p:
+          flags += next(gen)
+
+      flags.append('--fuzzer-random-seed=%s' % self._next_seed())
+      yield self._create_subtest(test, str(i), flags=flags)
+
+      i += 1
+
+  def _try_send_next_test(self, test):
+    if not self.is_stopped:
+      for subtest in self._gens[test.procid]:
+        if self._send_test(subtest):
+          return True
+
+    del self._gens[test.procid]
+    return False
+
+  def _next_seed(self):
+    seed = None
+    while not seed:
+      seed = self._rng.randint(-2147483648, 2147483647)
+    return seed
+
+
+class ScavengeAnalyzer(Analyzer):
+  def get_analysis_flags(self):
+    return ['--fuzzer-gc-analysis']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('### Maximum new space size reached = '):
+        return int(float(line.split()[7]))
+
+
+class ScavengeFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-scavenge=%d' % (analysis_value or 100)]
+
+
+class MarkingAnalyzer(Analyzer):
+  def get_analysis_flags(self):
+    return ['--fuzzer-gc-analysis']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('### Maximum marking limit reached = '):
+        return int(float(line.split()[6]))
+
+
+class MarkingFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-marking=%d' % (analysis_value or 100)]
+
+
+class GcIntervalAnalyzer(Analyzer):
+  def get_analysis_flags(self):
+    return ['--fuzzer-gc-analysis']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('### Allocations = '):
+        return int(float(line.split()[3][:-1]))
+
+
+class GcIntervalFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    if analysis_value:
+      value = analysis_value / 10
+    else:
+      value = 10000
+    while True:
+      yield ['--random-gc-interval=%d' % value]
+
+
+class CompactionFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-compaction-random']
+
+
+class TaskDelayFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-delay-tasks']
+
+
+class ThreadPoolSizeFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--thread-pool-size=%d' % rng.randint(1, 8)]
+
+
+class DeoptAnalyzer(Analyzer):
+  MAX_DEOPT=1000000000
+
+  def __init__(self, min_interval):
+    super(DeoptAnalyzer, self).__init__()
+    self._min = min_interval
+
+  def get_analysis_flags(self):
+    return ['--deopt-every-n-times=%d' % self.MAX_DEOPT,
+            '--print-deopt-stress']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('=== Stress deopt counter: '):
+        counter = self.MAX_DEOPT - int(line.split(' ')[-1])
+        if counter < self._min:
+          # Skip this test since we won't generate any meaningful interval with
+          # given minimum.
+          return None
+        return counter
+
+
+class DeoptFuzzer(Fuzzer):
+  def __init__(self, min_interval):
+    super(DeoptFuzzer, self).__init__()
+    self._min = min_interval
+
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      if analysis_value:
+        value = analysis_value / 2
+      else:
+        value = 10000
+      interval = rng.randint(self._min, max(value, self._min))
+      yield ['--deopt-every-n-times=%d' % interval]
+
+
+FUZZERS = {
+  'compaction': (None, CompactionFuzzer),
+  'delay': (None, TaskDelayFuzzer),
+  'deopt': (DeoptAnalyzer, DeoptFuzzer),
+  'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer),
+  'marking': (MarkingAnalyzer, MarkingFuzzer),
+  'scavenge': (ScavengeAnalyzer, ScavengeFuzzer),
+  'threads': (None, ThreadPoolSizeFuzzer),
+}
+
+
+def create_fuzzer_config(name, probability, *args, **kwargs):
+  analyzer_class, fuzzer_class = FUZZERS[name]
+  return FuzzerConfig(
+      probability,
+      analyzer_class(*args, **kwargs) if analyzer_class else None,
+      fuzzer_class(*args, **kwargs),
+  )
diff --git a/src/v8/tools/testrunner/testproc/loader.py b/src/v8/tools/testrunner/testproc/loader.py
index 0a3d0df..f4afeae 100644
--- a/src/v8/tools/testrunner/testproc/loader.py
+++ b/src/v8/tools/testrunner/testproc/loader.py
@@ -9,19 +9,34 @@
   """First processor in the chain that passes all tests to the next processor.
   """
 
-  def load_tests(self, tests):
-    loaded = set()
-    for test in tests:
-      if test.procid in loaded:
-        print 'Warning: %s already obtained' % test.procid
-        continue
+  def __init__(self, tests):
+    super(LoadProc, self).__init__()
 
-      loaded.add(test.procid)
-      self._send_test(test)
+    self.tests = tests
+
+  def load_initial_tests(self, initial_batch_size):
+    """
+    Args:
+      exec_proc: execution processor that the tests are being loaded into
+      initial_batch_size: initial number of tests to load
+    """
+    loaded_tests = 0
+    while loaded_tests < initial_batch_size:
+      try:
+        t = next(self.tests)
+      except StopIteration:
+        return
+
+      if self._send_test(t):
+        loaded_tests += 1
 
   def next_test(self, test):
     assert False, 'Nothing can be connected to the LoadProc'
 
   def result_for(self, test, result):
-    # Ignore all results.
-    pass
+    try:
+      while not self._send_test(next(self.tests)):
+        pass
+    except StopIteration:
+      # No more tests to load.
+      pass
diff --git a/src/v8/tools/testrunner/testproc/progress.py b/src/v8/tools/testrunner/testproc/progress.py
index 78514f7..aad6740 100644
--- a/src/v8/tools/testrunner/testproc/progress.py
+++ b/src/v8/tools/testrunner/testproc/progress.py
@@ -2,13 +2,22 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import json
 import os
+import platform
+import subprocess
 import sys
 import time
 
 from . import base
-from ..local import junit_output
+
+
+# Base dir of the build products for Release and Debug.
+OUT_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out'))
 
 
 def print_failure_header(test):
@@ -16,29 +25,22 @@
     negative_marker = '[negative] '
   else:
     negative_marker = ''
-  print "=== %(label)s %(negative)s===" % {
+  print("=== %(label)s %(negative)s===" % {
     'label': test,
     'negative': negative_marker,
-  }
-
-
-class TestsCounter(base.TestProcObserver):
-  def __init__(self):
-    super(TestsCounter, self).__init__()
-    self.total = 0
-
-  def _on_next_test(self, test):
-    self.total += 1
+  })
 
 
 class ResultsTracker(base.TestProcObserver):
-  def __init__(self):
+  """Tracks number of results and stops to run tests if max_failures reached."""
+  def __init__(self, max_failures):
     super(ResultsTracker, self).__init__()
     self._requirement = base.DROP_OUTPUT
 
     self.failed = 0
     self.remaining = 0
     self.total = 0
+    self.max_failures = max_failures
 
   def _on_next_test(self, test):
     self.total += 1
@@ -48,6 +50,9 @@
     self.remaining -= 1
     if result.has_unexpected_output:
       self.failed += 1
+      if self.max_failures and self.failed >= self.max_failures:
+        print('>>> Too many failures, exiting...')
+        self.stop()
 
 
 class ProgressIndicator(base.TestProcObserver):
@@ -61,10 +66,6 @@
     self._requirement = base.DROP_PASS_OUTPUT
 
     self._failed = []
-    self._total = 0
-
-  def _on_next_test(self, test):
-    self._total += 1
 
   def _on_result_for(self, test, result):
     # TODO(majeski): Support for dummy/grouped results
@@ -73,36 +74,45 @@
 
   def finished(self):
     crashed = 0
-    print
+    print()
     for test, result in self._failed:
       print_failure_header(test)
       if result.output.stderr:
-        print "--- stderr ---"
-        print result.output.stderr.strip()
+        print("--- stderr ---")
+        print(result.output.stderr.strip())
       if result.output.stdout:
-        print "--- stdout ---"
-        print result.output.stdout.strip()
-      print "Command: %s" % result.cmd.to_string()
+        print("--- stdout ---")
+        print(result.output.stdout.strip())
+      print("Command: %s" % result.cmd.to_string())
       if result.output.HasCrashed():
-        print "exit code: %d" % result.output.exit_code
-        print "--- CRASHED ---"
+        print("exit code: %d" % result.output.exit_code)
+        print("--- CRASHED ---")
         crashed += 1
       if result.output.HasTimedOut():
-        print "--- TIMEOUT ---"
+        print("--- TIMEOUT ---")
     if len(self._failed) == 0:
-      print "==="
-      print "=== All tests succeeded"
-      print "==="
+      print("===")
+      print("=== All tests succeeded")
+      print("===")
     else:
-      print
-      print "==="
-      print "=== %i tests failed" % len(self._failed)
+      print()
+      print("===")
+      print("=== %i tests failed" % len(self._failed))
       if crashed > 0:
-        print "=== %i tests CRASHED" % crashed
-      print "==="
+        print("=== %i tests CRASHED" % crashed)
+      print("===")
 
 
 class VerboseProgressIndicator(SimpleProgressIndicator):
+  def __init__(self):
+    super(VerboseProgressIndicator, self).__init__()
+    self._last_printed_time = time.time()
+
+  def _print(self, text):
+    print(text)
+    sys.stdout.flush()
+    self._last_printed_time = time.time()
+
   def _on_result_for(self, test, result):
     super(VerboseProgressIndicator, self)._on_result_for(test, result)
     # TODO(majeski): Support for dummy/grouped results
@@ -113,12 +123,31 @@
         outcome = 'FAIL'
     else:
       outcome = 'pass'
-    print 'Done running %s: %s' % (test, outcome)
-    sys.stdout.flush()
+
+    self._print('Done running %s %s: %s' % (
+      test, test.variant or 'default', outcome))
+
+  # TODO(machenbach): Remove this platform specific hack and implement a proper
+  # feedback channel from the workers, providing which tests are currently run.
+  def _print_processes_linux(self):
+    if platform.system() == 'Linux':
+      try:
+        cmd = 'ps -aux | grep "%s"' % OUT_DIR
+        output = subprocess.check_output(cmd, shell=True)
+        self._print('List of processes:')
+        for line in (output or '').splitlines():
+          # Show command with pid, but other process info cut off.
+          self._print('pid: %s cmd: %s' %
+                      (line.split()[1], line[line.index(OUT_DIR):]))
+      except:
+        pass
 
   def _on_heartbeat(self):
-    print 'Still working...'
-    sys.stdout.flush()
+    if time.time() - self._last_printed_time > 30:
+      # Print something every 30 seconds to not get killed by an output
+      # timeout.
+      self._print('Still working...')
+      self._print_processes_linux()
 
 
 class DotsProgressIndicator(SimpleProgressIndicator):
@@ -127,6 +156,7 @@
     self._count = 0
 
   def _on_result_for(self, test, result):
+    super(DotsProgressIndicator, self)._on_result_for(test, result)
     # TODO(majeski): Support for dummy/grouped results
     self._count += 1
     if self._count > 1 and self._count % 50 == 1:
@@ -155,12 +185,11 @@
     self._last_status_length = 0
     self._start_time = time.time()
 
-    self._total = 0
     self._passed = 0
     self._failed = 0
 
-  def _on_next_test(self, test):
-    self._total += 1
+  def set_test_count(self, test_count):
+    self._total = test_count
 
   def _on_result_for(self, test, result):
     # TODO(majeski): Support for dummy/grouped results
@@ -178,27 +207,27 @@
       self._clear_line(self._last_status_length)
       print_failure_header(test)
       if len(stdout):
-        print self._templates['stdout'] % stdout
+        print(self._templates['stdout'] % stdout)
       if len(stderr):
-        print self._templates['stderr'] % stderr
-      print "Command: %s" % result.cmd
+        print(self._templates['stderr'] % stderr)
+      print("Command: %s" % result.cmd.to_string(relative=True))
       if output.HasCrashed():
-        print "exit code: %d" % output.exit_code
-        print "--- CRASHED ---"
+        print("exit code: %d" % output.exit_code)
+        print("--- CRASHED ---")
       if output.HasTimedOut():
-        print "--- TIMEOUT ---"
+        print("--- TIMEOUT ---")
 
   def finished(self):
     self._print_progress('Done')
-    print
+    print()
 
   def _print_progress(self, name):
     self._clear_line(self._last_status_length)
     elapsed = time.time() - self._start_time
-    if not self._total:
-      progress = 0
-    else:
+    if self._total:
       progress = (self._passed + self._failed) * 100 // self._total
+    else:
+      progress = 0
     status = self._templates['status_line'] % {
       'passed': self._passed,
       'progress': progress,
@@ -209,7 +238,7 @@
     }
     status = self._truncate(status, 78)
     self._last_status_length = len(status)
-    print status,
+    print(status, end='')
     sys.stdout.flush()
 
   def _truncate(self, string, length):
@@ -235,7 +264,7 @@
     super(ColorProgressIndicator, self).__init__(templates)
 
   def _clear_line(self, last_length):
-    print "\033[1K\r",
+    print("\033[1K\r", end='')
 
 
 class MonochromeProgressIndicator(CompactProgressIndicator):
@@ -249,50 +278,11 @@
     super(MonochromeProgressIndicator, self).__init__(templates)
 
   def _clear_line(self, last_length):
-    print ("\r" + (" " * last_length) + "\r"),
-
-
-class JUnitTestProgressIndicator(ProgressIndicator):
-  def __init__(self, junitout, junittestsuite):
-    super(JUnitTestProgressIndicator, self).__init__()
-    self._requirement = base.DROP_PASS_STDOUT
-
-    self.outputter = junit_output.JUnitTestOutput(junittestsuite)
-    if junitout:
-      self.outfile = open(junitout, "w")
-    else:
-      self.outfile = sys.stdout
-
-  def _on_result_for(self, test, result):
-    # TODO(majeski): Support for dummy/grouped results
-    fail_text = ""
-    output = result.output
-    if result.has_unexpected_output:
-      stdout = output.stdout.strip()
-      if len(stdout):
-        fail_text += "stdout:\n%s\n" % stdout
-      stderr = output.stderr.strip()
-      if len(stderr):
-        fail_text += "stderr:\n%s\n" % stderr
-      fail_text += "Command: %s" % result.cmd.to_string()
-      if output.HasCrashed():
-        fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
-      if output.HasTimedOut():
-        fail_text += "--- TIMEOUT ---"
-    self.outputter.HasRunTest(
-        test_name=str(test),
-        test_cmd=result.cmd.to_string(relative=True),
-        test_duration=output.duration,
-        test_failure=fail_text)
-
-  def finished(self):
-    self.outputter.FinishAndWrite(self.outfile)
-    if self.outfile != sys.stdout:
-      self.outfile.close()
+    print(("\r" + (" " * last_length) + "\r"), end='')
 
 
 class JsonTestProgressIndicator(ProgressIndicator):
-  def __init__(self, json_test_results, arch, mode, random_seed):
+  def __init__(self, framework_name, json_test_results, arch, mode):
     super(JsonTestProgressIndicator, self).__init__()
     # We want to drop stdout/err for all passed tests on the first try, but we
     # need to get outputs for all runs after the first one. To accommodate that,
@@ -300,10 +290,10 @@
     # keep_output set to True in the RerunProc.
     self._requirement = base.DROP_PASS_STDOUT
 
+    self.framework_name = framework_name
     self.json_test_results = json_test_results
     self.arch = arch
     self.mode = mode
-    self.random_seed = random_seed
     self.results = []
     self.tests = []
 
@@ -338,12 +328,11 @@
         "result": test.output_proc.get_outcome(output),
         "expected": test.expected_outcomes,
         "duration": output.duration,
-
-        # TODO(machenbach): This stores only the global random seed from the
-        # context and not possible overrides when using random-seed stress.
-        "random_seed": self.random_seed,
+        "random_seed": test.random_seed,
         "target_name": test.get_shell(),
         "variant": test.variant,
+        "variant_flags": test.variant_flags,
+        "framework_name": self.framework_name,
       })
 
   def finished(self):
@@ -361,7 +350,7 @@
           float(len(self.tests)))
 
     # Sort tests by duration.
-    self.tests.sort(key=lambda (_, duration, cmd): duration, reverse=True)
+    self.tests.sort(key=lambda __duration_cmd: __duration_cmd[1], reverse=True)
     slowest_tests = [
       {
         "name": str(test),
diff --git a/src/v8/tools/testrunner/testproc/rerun.py b/src/v8/tools/testrunner/testproc/rerun.py
index 7f96e02..d085c55 100644
--- a/src/v8/tools/testrunner/testproc/rerun.py
+++ b/src/v8/tools/testrunner/testproc/rerun.py
@@ -19,7 +19,7 @@
     self._rerun_total_left = rerun_max_total
 
   def _next_test(self, test):
-    self._send_next_subtest(test)
+    return self._send_next_subtest(test)
 
   def _result_for(self, test, subtest, result):
     # First result
@@ -34,7 +34,7 @@
     results = self._results[test.procid]
     results.append(result)
 
-    if self._needs_rerun(test, result):
+    if not self.is_stopped and self._needs_rerun(test, result):
       self._rerun[test.procid] += 1
       if self._rerun_total_left is not None:
         self._rerun_total_left -= 1
@@ -52,7 +52,7 @@
 
   def _send_next_subtest(self, test, run=0):
     subtest = self._create_subtest(test, str(run + 1), keep_output=(run != 0))
-    self._send_test(subtest)
+    return self._send_test(subtest)
 
   def _finalize_test(self, test):
     del self._rerun[test.procid]
diff --git a/src/v8/tools/testrunner/testproc/seed.py b/src/v8/tools/testrunner/testproc/seed.py
new file mode 100644
index 0000000..160eac8
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/seed.py
@@ -0,0 +1,63 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+from collections import defaultdict
+
+from . import base
+from ..utils import random_utils
+
+
+class SeedProc(base.TestProcProducer):
+  def __init__(self, count, seed=None, parallel_subtests=1):
+    """
+    Args:
+      count: How many subtests with different seeds to create for each test.
+        0 means infinite.
+      seed: seed to use. None means random seed for each subtest.
+      parallel_subtests: How many subtest of each test to run at the same time.
+    """
+    super(SeedProc, self).__init__('Seed')
+    self._count = count
+    self._seed = seed
+    self._last_idx = defaultdict(int)
+    self._todo = defaultdict(int)
+    self._parallel_subtests = parallel_subtests
+    if count:
+      self._parallel_subtests = min(self._parallel_subtests, count)
+
+  def setup(self, requirement=base.DROP_RESULT):
+    super(SeedProc, self).setup(requirement)
+
+    # SeedProc is optimized for dropping the result
+    assert requirement == base.DROP_RESULT
+
+  def _next_test(self, test):
+    is_loaded = False
+    for _ in range(0, self._parallel_subtests):
+      is_loaded |= self._try_send_next_test(test)
+
+    return is_loaded
+
+  def _result_for(self, test, subtest, result):
+    self._todo[test.procid] -= 1
+    if not self._try_send_next_test(test):
+      if not self._todo.get(test.procid):
+        del self._last_idx[test.procid]
+        del self._todo[test.procid]
+        self._send_result(test, None)
+
+  def _try_send_next_test(self, test):
+    def create_subtest(idx):
+      seed = self._seed or random_utils.random_seed()
+      return self._create_subtest(test, idx, random_seed=seed)
+
+    num = self._last_idx[test.procid]
+    if not self._count or num < self._count:
+      num += 1
+      self._todo[test.procid] += 1
+      self._last_idx[test.procid] = num
+      return self._send_test(create_subtest(num))
+
+    return False
diff --git a/src/v8/tools/testrunner/testproc/shard.py b/src/v8/tools/testrunner/testproc/shard.py
index 1caac9f..9475ea1 100644
--- a/src/v8/tools/testrunner/testproc/shard.py
+++ b/src/v8/tools/testrunner/testproc/shard.py
@@ -5,10 +5,21 @@
 from . import base
 
 
+# Alphabet size determines the hashing radix. Choosing a prime number prevents
+# clustering of the hashes.
+HASHING_ALPHABET_SIZE = 2 ** 7 -1
+
+def radix_hash(capacity, key):
+  h = 0
+  for character in key:
+    h = (h * HASHING_ALPHABET_SIZE + ord(character)) % capacity
+
+  return h
+
+
 class ShardProc(base.TestProcFilter):
   """Processor distributing tests between shards.
-  It simply passes every n-th test. To be deterministic it has to be placed
-  before all processors that generate tests dynamically.
+  It hashes the unique test identifiers uses the hash to shard tests.
   """
   def __init__(self, myid, shards_count):
     """
@@ -22,9 +33,6 @@
 
     self._myid = myid
     self._shards_count = shards_count
-    self._last = 0
 
   def _filter(self, test):
-    res = self._last != self._myid
-    self._last = (self._last + 1) % self._shards_count
-    return res
+    return self._myid != radix_hash(self._shards_count, test.procid)
diff --git a/src/v8/tools/testrunner/testproc/shard_unittest.py b/src/v8/tools/testrunner/testproc/shard_unittest.py
new file mode 100755
index 0000000..33a094e
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/shard_unittest.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import tempfile
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.testproc.shard import radix_hash
+
+
+class TestRadixHashing(unittest.TestCase):
+  def test_hash_character_by_radix(self):
+    self.assertEqual(97, radix_hash(capacity=2**32, key="a"))
+
+  def test_hash_character_by_radix_with_capacity(self):
+    self.assertEqual(6, radix_hash(capacity=7, key="a"))
+
+  def test_hash_string(self):
+    self.assertEqual(6, radix_hash(capacity=7, key="ab"))
+
+  def test_hash_test_id(self):
+    self.assertEqual(
+      5,
+      radix_hash(capacity=7,
+                 key="test262/Map/class-private-method-Variant-0-1"))
+
+  def test_hash_boundaries(self):
+    total_variants = 5
+    cases = []
+    for case in [
+      "test262/Map/class-private-method",
+      "test262/Map/class-public-method",
+      "test262/Map/object-retrieval",
+      "test262/Map/object-deletion",
+      "test262/Map/object-creation",
+      "test262/Map/garbage-collection",
+    ]:
+      for variant_index in range(total_variants):
+        cases.append("%s-Variant-%d" % (case, variant_index))
+
+    for case in cases:
+      self.assertTrue(0 <= radix_hash(capacity=7, key=case) < 7)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/v8/tools/testrunner/testproc/sigproc.py b/src/v8/tools/testrunner/testproc/sigproc.py
new file mode 100644
index 0000000..f29fa22
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/sigproc.py
@@ -0,0 +1,34 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import signal
+
+from . import base
+from testrunner.local import utils
+
+
+class SignalProc(base.TestProcObserver):
+  def __init__(self):
+    super(SignalProc, self).__init__()
+    self.exit_code = utils.EXIT_CODE_PASS
+
+  def setup(self, *args, **kwargs):
+    super(SignalProc, self).setup(*args, **kwargs)
+    # It should be called after processors are chained together to not loose
+    # catched signal.
+    signal.signal(signal.SIGINT, self._on_ctrlc)
+    signal.signal(signal.SIGTERM, self._on_sigterm)
+
+  def _on_ctrlc(self, _signum, _stack_frame):
+    print('>>> Ctrl-C detected, early abort...')
+    self.exit_code = utils.EXIT_CODE_INTERRUPTED
+    self.stop()
+
+  def _on_sigterm(self, _signum, _stack_frame):
+    print('>>> SIGTERM received, early abort...')
+    self.exit_code = utils.EXIT_CODE_TERMINATED
+    self.stop()
diff --git a/src/v8/tools/testrunner/testproc/timeout.py b/src/v8/tools/testrunner/testproc/timeout.py
new file mode 100644
index 0000000..54dc60e
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/timeout.py
@@ -0,0 +1,29 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+
+from . import base
+
+
+class TimeoutProc(base.TestProcObserver):
+  def __init__(self, duration_sec):
+    super(TimeoutProc, self).__init__()
+    self._duration_sec = duration_sec
+    self._start = time.time()
+
+  def _on_next_test(self, test):
+    self._on_event()
+
+  def _on_result_for(self, test, result):
+    self._on_event()
+
+  def _on_heartbeat(self):
+    self._on_event()
+
+  def _on_event(self):
+    if not self.is_stopped:
+      if time.time() - self._start > self._duration_sec:
+        print('>>> Total timeout reached.')
+        self.stop()
diff --git a/src/v8/tools/testrunner/testproc/variant.py b/src/v8/tools/testrunner/testproc/variant.py
index dba1af9..0164ad8 100644
--- a/src/v8/tools/testrunner/testproc/variant.py
+++ b/src/v8/tools/testrunner/testproc/variant.py
@@ -39,21 +39,22 @@
   def _next_test(self, test):
     gen = self._variants_gen(test)
     self._next_variant[test.procid] = gen
-    self._try_send_new_subtest(test, gen)
+    return self._try_send_new_subtest(test, gen)
 
   def _result_for(self, test, subtest, result):
     gen = self._next_variant[test.procid]
-    self._try_send_new_subtest(test, gen)
+    if not self._try_send_new_subtest(test, gen):
+      self._send_result(test, None)
 
   def _try_send_new_subtest(self, test, variants_gen):
     for variant, flags, suffix in variants_gen:
       subtest = self._create_subtest(test, '%s-%s' % (variant, suffix),
                                      variant=variant, flags=flags)
-      self._send_test(subtest)
-      return
+      if self._send_test(subtest):
+        return True
 
     del self._next_variant[test.procid]
-    self._send_result(test, None)
+    return False
 
   def _variants_gen(self, test):
     """Generator producing (variant, flags, procid suffix) tuples."""
diff --git a/src/v8/tools/testrunner/testproc/variant_unittest.py b/src/v8/tools/testrunner/testproc/variant_unittest.py
new file mode 100755
index 0000000..56e28c8
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/variant_unittest.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import tempfile
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.testproc import base
+from testrunner.testproc.variant import VariantProc
+
+
+class FakeResultObserver(base.TestProcObserver):
+  def __init__(self):
+    super(FakeResultObserver, self).__init__()
+
+    self.results = set()
+
+  def result_for(self, test, result):
+    self.results.add((test, result))
+
+
+class FakeFilter(base.TestProcFilter):
+  def __init__(self, filter_predicate):
+    super(FakeFilter, self).__init__()
+
+    self._filter_predicate = filter_predicate
+
+    self.loaded = set()
+    self.call_counter = 0
+
+  def next_test(self, test):
+    self.call_counter += 1
+
+    if self._filter_predicate(test):
+      return False
+
+    self.loaded.add(test)
+    return True
+
+
+class FakeSuite(object):
+  def __init__(self, name):
+    self.name = name
+
+
+class FakeTest(object):
+  def __init__(self, procid):
+    self.suite = FakeSuite("fake_suite")
+    self.procid = procid
+
+    self.keep_output = False
+
+  def create_subtest(self, proc, subtest_id, **kwargs):
+    variant = kwargs['variant']
+
+    variant.origin = self
+    return variant
+
+
+class FakeVariantGen(object):
+  def __init__(self, variants):
+    self._variants = variants
+
+  def gen(self, test):
+    for variant in self._variants:
+      yield variant, [], "fake_suffix"
+
+
+class TestVariantProcLoading(unittest.TestCase):
+  def setUp(self):
+    self.test = FakeTest("test")
+
+  def _simulate_proc(self, variants):
+    """Expects the list of instantiated test variants to load into the
+    VariantProc."""
+    variants_mapping = {self.test: variants}
+
+    # Creates a Variant processor containing the possible types of test
+    # variants.
+    self.variant_proc = VariantProc(variants=["to_filter", "to_load"])
+    self.variant_proc._variant_gens = {
+      "fake_suite": FakeVariantGen(variants)}
+
+    # FakeFilter only lets tests passing the predicate to be loaded.
+    self.fake_filter = FakeFilter(
+      filter_predicate=(lambda t: t.procid == "to_filter"))
+
+    # FakeResultObserver to verify that VariantProc calls result_for correctly.
+    self.fake_result_observer = FakeResultObserver()
+
+    # Links up processors together to form a test processing pipeline.
+    self.variant_proc._prev_proc = self.fake_result_observer
+    self.fake_filter._prev_proc = self.variant_proc
+    self.variant_proc._next_proc = self.fake_filter
+
+    # Injects the test into the VariantProc
+    is_loaded = self.variant_proc.next_test(self.test)
+
+    # Verifies the behavioral consistency by using the instrumentation in
+    # FakeFilter
+    loaded_variants = list(self.fake_filter.loaded)
+    self.assertEqual(is_loaded, any(loaded_variants))
+    return self.fake_filter.loaded, self.fake_filter.call_counter
+
+  def test_filters_first_two_variants(self):
+    variants = [
+      FakeTest('to_filter'),
+      FakeTest('to_filter'),
+      FakeTest('to_load'),
+      FakeTest('to_load'),
+    ]
+    expected_load_results = {variants[2]}
+
+    load_results, call_count = self._simulate_proc(variants)
+
+    self.assertSetEqual(expected_load_results, load_results)
+    self.assertEqual(call_count, 3)
+
+  def test_stops_loading_after_first_successful_load(self):
+    variants = [
+      FakeTest('to_load'),
+      FakeTest('to_load'),
+      FakeTest('to_filter'),
+    ]
+    expected_load_results = {variants[0]}
+
+    loaded_tests, call_count = self._simulate_proc(variants)
+
+    self.assertSetEqual(expected_load_results, loaded_tests)
+    self.assertEqual(call_count, 1)
+
+  def test_return_result_when_out_of_variants(self):
+    variants = [
+      FakeTest('to_filter'),
+      FakeTest('to_load'),
+    ]
+
+    self._simulate_proc(variants)
+
+    self.variant_proc.result_for(variants[1], None)
+
+    expected_results = {(self.test, None)}
+
+    self.assertSetEqual(expected_results, self.fake_result_observer.results)
+
+  def test_return_result_after_running_variants(self):
+    variants = [
+      FakeTest('to_filter'),
+      FakeTest('to_load'),
+      FakeTest('to_load'),
+    ]
+
+    self._simulate_proc(variants)
+    self.variant_proc.result_for(variants[1], None)
+
+    self.assertSetEqual(set(variants[1:]), self.fake_filter.loaded)
+
+    self.variant_proc.result_for(variants[2], None)
+
+    expected_results = {(self.test, None)}
+    self.assertSetEqual(expected_results, self.fake_result_observer.results)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/v8/tools/testrunner/trycatch_loader.js b/src/v8/tools/testrunner/trycatch_loader.js
new file mode 100644
index 0000000..737c8e4
--- /dev/null
+++ b/src/v8/tools/testrunner/trycatch_loader.js
@@ -0,0 +1,42 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Wrapper loading javascript tests passed as arguments used by gc fuzzer.
+// It ignores all exceptions and run tests in a separate namespaces.
+//
+// It can't prevent %AbortJS function from aborting execution, so it should be
+// used with d8's --disable-abortjs flag to ignore all possible errors inside
+// tests.
+
+// We use -- as an additional separator for test preamble files and test files.
+// The preamble files (before --) will be loaded in each realm before each
+// test.
+var separator = arguments.indexOf("--")
+var preamble = arguments.slice(0, separator)
+var tests = arguments.slice(separator + 1)
+
+var preambleString = ""
+for (let jstest of preamble) {
+  preambleString += "load(\"" + jstest + "\");"
+}
+
+for (let jstest of tests) {
+  print("Loading " + jstest);
+  let start = performance.now();
+
+  // anonymous function to not populate global namespace.
+  (function () {
+    let realm = Realm.create();
+    try {
+      Realm.eval(realm, preambleString + "load(\"" + jstest + "\");");
+    } catch (err) {
+      // ignore all errors
+    }
+    Realm.dispose(realm);
+  })();
+
+  let durationSec = ((performance.now() - start) / 1000.0).toFixed(2);
+  print("Duration " + durationSec + "s");
+}
diff --git a/src/v8/tools/testrunner/utils/__init__.py b/src/v8/tools/testrunner/utils/__init__.py
new file mode 100644
index 0000000..4433538
--- /dev/null
+++ b/src/v8/tools/testrunner/utils/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/src/v8/tools/testrunner/utils/dump_build_config_gyp.py b/src/v8/tools/testrunner/utils/dump_build_config_gyp.py
index 7f72627..963b0e2 100644
--- a/src/v8/tools/testrunner/utils/dump_build_config_gyp.py
+++ b/src/v8/tools/testrunner/utils/dump_build_config_gyp.py
@@ -11,6 +11,9 @@
 """
 # TODO(machenbach): Remove this when gyp is deprecated.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import json
 import os
 import sys
@@ -47,7 +50,7 @@
   try:
     return k, json.loads(v2)
   except ValueError as e:
-    print(k, v, v2)
+    print((k, v, v2))
     raise e
 
 with open(sys.argv[1], 'w') as f:
diff --git a/src/v8/tools/testrunner/utils/random_utils.py b/src/v8/tools/testrunner/utils/random_utils.py
new file mode 100644
index 0000000..0d2cb3f
--- /dev/null
+++ b/src/v8/tools/testrunner/utils/random_utils.py
@@ -0,0 +1,13 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+
+def random_seed():
+  """Returns random, non-zero seed."""
+  seed = 0
+  while not seed:
+    seed = random.SystemRandom().randint(-2147483648, 2147483647)
+  return seed
diff --git a/src/v8/tools/tick-processor.html b/src/v8/tools/tick-processor.html
index b841cc0..32f8d66 100644
--- a/src/v8/tools/tick-processor.html
+++ b/src/v8/tools/tick-processor.html
@@ -27,10 +27,10 @@
 
 <html lang="en">
 <head>
-  <meta charset="utf-8"/>
+  <meta charset="utf-8">
   <title>V8 Tick Processor</title>
 
-  <style type="text/css">
+  <style>
     body {
       font-family: Verdana, Arial, Helvetica, sans-serif;
       font-size: 10pt;
@@ -53,7 +53,7 @@
   <script src="arguments.js"></script>
   <script src="tickprocessor.js"></script>
 
-  <script type="text/javascript">
+  <script>
 
 var v8log_content;
 var textout;
@@ -89,6 +89,7 @@
     ignoreUnknown: false,
     separateIc: true,
     targetRootFS: '',
+    apkEmbeddedLibrary: '',
     nm: 'nm'
   };
 
@@ -100,7 +101,7 @@
 
   var tickProcessor = new TickProcessor(
     new (entriesProviders[DEFAULTS.platform])(
-        DEFAULTS.nm, DEFAULTS.targetRootFS),
+        DEFAULTS.nm, DEFAULTS.targetRootFS, DEFAULTS.apkEmbeddedLibrary),
     DEFAULTS.separateIc, DEFAULTS.callGraphSize,
     DEFAULTS.ignoreUnknown, DEFAULTS.stateFilter);
 
diff --git a/src/v8/tools/tickprocessor-driver.js b/src/v8/tools/tickprocessor-driver.js
index 58844c1..93331cf 100644
--- a/src/v8/tools/tickprocessor-driver.js
+++ b/src/v8/tools/tickprocessor-driver.js
@@ -62,7 +62,8 @@
   sourceMap = SourceMap.load(params.sourceMap);
 }
 var tickProcessor = new TickProcessor(
-  new (entriesProviders[params.platform])(params.nm, params.targetRootFS),
+  new (entriesProviders[params.platform])(params.nm, params.targetRootFS,
+                                          params.apkEmbeddedLibrary),
   params.separateIc,
   params.separateBytecodes,
   params.separateBuiltins,
diff --git a/src/v8/tools/tickprocessor.js b/src/v8/tools/tickprocessor.js
index 057d328..34c2249 100644
--- a/src/v8/tools/tickprocessor.js
+++ b/src/v8/tools/tickprocessor.js
@@ -102,42 +102,43 @@
     preprocessJson) {
   this.preprocessJson = preprocessJson;
   LogReader.call(this, {
-      'shared-library': { parsers: [null, parseInt, parseInt, parseInt],
+      'shared-library': { parsers: [parseString, parseInt, parseInt, parseInt],
           processor: this.processSharedLibrary },
       'code-creation': {
-          parsers: [null, parseInt, parseInt, parseInt, parseInt,
-                    null, 'var-args'],
+          parsers: [parseString, parseInt, parseInt, parseInt, parseInt,
+                    parseString, parseVarArgs],
           processor: this.processCodeCreation },
       'code-deopt': {
           parsers: [parseInt, parseInt, parseInt, parseInt, parseInt,
-                    null, null, null],
+                    parseString, parseString, parseString],
           processor: this.processCodeDeopt },
       'code-move': { parsers: [parseInt, parseInt, ],
           processor: this.processCodeMove },
       'code-delete': { parsers: [parseInt],
           processor: this.processCodeDelete },
       'code-source-info': {
-          parsers: [parseInt, parseInt, parseInt, parseInt, null, null, null],
+          parsers: [parseInt, parseInt, parseInt, parseInt, parseString,
+                    parseString, parseString],
           processor: this.processCodeSourceInfo },
-      'script': {
-          parsers: [parseInt, null, null],
-          processor: this.processCodeScript },
+      'script-source': {
+          parsers: [parseInt, parseString, parseString],
+          processor: this.processScriptSource },
       'sfi-move': { parsers: [parseInt, parseInt],
           processor: this.processFunctionMove },
       'active-runtime-timer': {
-        parsers: [null],
+        parsers: [parseString],
         processor: this.processRuntimeTimerEvent },
       'tick': {
           parsers: [parseInt, parseInt, parseInt,
-                    parseInt, parseInt, 'var-args'],
+                    parseInt, parseInt, parseVarArgs],
           processor: this.processTick },
-      'heap-sample-begin': { parsers: [null, null, parseInt],
+      'heap-sample-begin': { parsers: [parseString, parseString, parseInt],
           processor: this.processHeapSampleBegin },
-      'heap-sample-end': { parsers: [null, null],
+      'heap-sample-end': { parsers: [parseString, parseString],
           processor: this.processHeapSampleEnd },
-      'timer-event-start' : { parsers: [null, null, null],
+      'timer-event-start' : { parsers: [parseString, parseString, parseString],
                               processor: this.advanceDistortion },
-      'timer-event-end' : { parsers: [null, null, null],
+      'timer-event-end' : { parsers: [parseString, parseString, parseString],
                             processor: this.advanceDistortion },
       // Ignored events.
       'profiler': null,
@@ -159,7 +160,6 @@
   this.stateFilter_ = stateFilter;
   this.runtimeTimerFilter_ = runtimeTimerFilter;
   this.sourceMap = sourceMap;
-  this.deserializedEntriesNames_ = [];
   var ticks = this.ticks_ =
     { total: 0, unaccounted: 0, excluded: 0, gc: 0 };
 
@@ -298,7 +298,6 @@
 
 TickProcessor.prototype.processCodeCreation = function(
     type, kind, timestamp, start, size, name, maybe_func) {
-  name = this.deserializedEntriesNames_[start] || name;
   if (maybe_func.length) {
     var funcAddr = parseInt(maybe_func[0]);
     var state = parseState(maybe_func[1]);
@@ -332,7 +331,7 @@
     endPos, sourcePositions, inliningPositions, inlinedFunctions);
 };
 
-TickProcessor.prototype.processCodeScript = function(script, url, source) {
+TickProcessor.prototype.processScriptSource = function(script, url, source) {
   this.profile_.addScriptSource(script, url, source);
 };
 
@@ -636,23 +635,44 @@
     libName, libStart, libEnd, libASLRSlide, processorFunc) {
   this.loadSymbols(libName);
 
-  var prevEntry;
+  var lastUnknownSize;
+  var lastAdded;
+
+  function inRange(funcInfo, start, end) {
+    return funcInfo.start >= start && funcInfo.end <= end;
+  }
 
   function addEntry(funcInfo) {
     // Several functions can be mapped onto the same address. To avoid
     // creating zero-sized entries, skip such duplicates.
     // Also double-check that function belongs to the library address space.
-    if (prevEntry && !prevEntry.end &&
-        prevEntry.start < funcInfo.start &&
-        prevEntry.start >= libStart && funcInfo.start <= libEnd) {
-      processorFunc(prevEntry.name, prevEntry.start, funcInfo.start);
+
+    if (lastUnknownSize &&
+        lastUnknownSize.start < funcInfo.start) {
+      // Try to update lastUnknownSize based on new entries start position.
+      lastUnknownSize.end = funcInfo.start;
+      if ((!lastAdded || !inRange(lastUnknownSize, lastAdded.start,
+                                  lastAdded.end)) &&
+          inRange(lastUnknownSize, libStart, libEnd)) {
+        processorFunc(lastUnknownSize.name, lastUnknownSize.start,
+                      lastUnknownSize.end);
+        lastAdded = lastUnknownSize;
+      }
     }
-    if (funcInfo.end &&
-        (!prevEntry || prevEntry.start != funcInfo.start) &&
-        funcInfo.start >= libStart && funcInfo.end <= libEnd) {
-      processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
+    lastUnknownSize = undefined;
+
+    if (funcInfo.end) {
+      // Skip duplicates that have the same start address as the last added.
+      if ((!lastAdded || lastAdded.start != funcInfo.start) &&
+          inRange(funcInfo, libStart, libEnd)) {
+        processorFunc(funcInfo.name, funcInfo.start, funcInfo.end);
+        lastAdded = funcInfo;
+      }
+    } else {
+      // If a funcInfo doesn't have an end, try to match it up with then next
+      // entry.
+      lastUnknownSize = funcInfo;
     }
-    prevEntry = funcInfo;
   }
 
   while (true) {
@@ -686,11 +706,12 @@
 };
 
 
-function UnixCppEntriesProvider(nmExec, targetRootFS) {
+function UnixCppEntriesProvider(nmExec, targetRootFS, apkEmbeddedLibrary) {
   this.symbols = [];
   this.parsePos = 0;
   this.nmExec = nmExec;
   this.targetRootFS = targetRootFS;
+  this.apkEmbeddedLibrary = apkEmbeddedLibrary;
   this.FUNC_RE = /^([0-9a-fA-F]{8,16}) ([0-9a-fA-F]{8,16} )?[tTwW] (.*)$/;
 };
 inherits(UnixCppEntriesProvider, CppEntriesProvider);
@@ -698,7 +719,13 @@
 
 UnixCppEntriesProvider.prototype.loadSymbols = function(libName) {
   this.parsePos = 0;
-  libName = this.targetRootFS + libName;
+  if (this.apkEmbeddedLibrary && libName.endsWith('.apk')) {
+    libName = this.apkEmbeddedLibrary;
+  }
+  if (this.targetRootFS) {
+    libName = libName.substring(libName.lastIndexOf('/') + 1);
+    libName = this.targetRootFS + libName;
+  }
   try {
     this.symbols = [
       os.system(this.nmExec, ['-C', '-n', '-S', libName], -1, -1),
@@ -736,8 +763,8 @@
 };
 
 
-function MacCppEntriesProvider(nmExec, targetRootFS) {
-  UnixCppEntriesProvider.call(this, nmExec, targetRootFS);
+function MacCppEntriesProvider(nmExec, targetRootFS, apkEmbeddedLibrary) {
+  UnixCppEntriesProvider.call(this, nmExec, targetRootFS, apkEmbeddedLibrary);
   // Note an empty group. It is required, as UnixCppEntriesProvider expects 3 groups.
   this.FUNC_RE = /^([0-9a-fA-F]{8,16})() (.*)$/;
 };
@@ -759,7 +786,8 @@
 };
 
 
-function WindowsCppEntriesProvider(_ignored_nmExec, targetRootFS) {
+function WindowsCppEntriesProvider(_ignored_nmExec, targetRootFS,
+                                   _ignored_apkEmbeddedLibrary) {
   this.targetRootFS = targetRootFS;
   this.symbols = '';
   this.parsePos = 0;
@@ -883,6 +911,8 @@
           'Specify the \'nm\' executable to use (e.g. --nm=/my_dir/nm)'],
       '--target': ['targetRootFS', '',
           'Specify the target root directory for cross environment'],
+      '--apk-embedded-library': ['apkEmbeddedLibrary', '',
+          'Specify the path of the embedded library for Android traces'],
       '--range': ['range', 'auto,auto',
           'Specify the range limit as [start],[end]'],
       '--distortion': ['distortion', 0,
diff --git a/src/v8/tools/toolchain/BUILD.gn b/src/v8/tools/toolchain/BUILD.gn
new file mode 100644
index 0000000..b252c5e
--- /dev/null
+++ b/src/v8/tools/toolchain/BUILD.gn
@@ -0,0 +1,93 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/toolchain/gcc_toolchain.gni")
+
+gcc_toolchain("mips-bundled") {
+  toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-",
+                           root_build_dir)
+  cc = "${toolprefix}gcc"
+  cxx = "${toolprefix}g++"
+
+  readelf = "${toolprefix}readelf"
+  nm = "${toolprefix}nm"
+  ar = "${toolprefix}ar"
+  ld = cxx
+
+  # Flag that sets endianness
+  extra_ldflags = "-EB"
+  extra_cppflags = "-EB"
+
+  toolchain_args = {
+    current_cpu = "mips"
+    current_os = "linux"
+    is_clang = false
+  }
+}
+
+gcc_toolchain("mips64-bundled") {
+  toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-",
+                           root_build_dir)
+  cc = "${toolprefix}gcc"
+  cxx = "${toolprefix}g++"
+
+  readelf = "${toolprefix}readelf"
+  nm = "${toolprefix}nm"
+  ar = "${toolprefix}ar"
+  ld = cxx
+
+  # Flag that sets endianness and ABI
+  extra_ldflags = "-EB -mabi=64"
+  extra_cppflags = "-EB -mabi=64"
+
+  toolchain_args = {
+    current_cpu = "mips64"
+    current_os = "linux"
+    is_clang = false
+  }
+}
+
+gcc_toolchain("mipsel-bundled") {
+  toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-",
+                           root_build_dir)
+  cc = "${toolprefix}gcc"
+  cxx = "${toolprefix}g++"
+
+  readelf = "${toolprefix}readelf"
+  nm = "${toolprefix}nm"
+  ar = "${toolprefix}ar"
+  ld = cxx
+
+  # Flag that sets endianness
+  extra_ldflags = "-EL"
+  extra_cppflags = "-EL"
+
+  toolchain_args = {
+    current_cpu = "mipsel"
+    current_os = "linux"
+    is_clang = false
+  }
+}
+
+gcc_toolchain("mips64el-bundled") {
+  toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-",
+                           root_build_dir)
+  cc = "${toolprefix}gcc"
+  cxx = "${toolprefix}g++"
+
+  readelf = "${toolprefix}readelf"
+  nm = "${toolprefix}nm"
+  ar = "${toolprefix}ar"
+  ld = cxx
+
+  # Flag that sets endianness and ABI
+  extra_ldflags = "-EL -mabi=64"
+  extra_cppflags = "-EL -mabi=64"
+
+  toolchain_args = {
+    current_cpu = "mips64el"
+    current_os = "linux"
+    is_clang = false
+  }
+}
diff --git a/src/v8/tools/torque/format-torque.py b/src/v8/tools/torque/format-torque.py
new file mode 100755
index 0000000..2150d7e
--- /dev/null
+++ b/src/v8/tools/torque/format-torque.py
@@ -0,0 +1,170 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""This program either generates the parser files for Torque, generating
+the source and header files directly in V8's src directory."""
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import subprocess
+import sys
+import re
+from subprocess import Popen, PIPE
+
+kPercentEscape = r'α';  # Unicode alpha
+
+def preprocess(input):
+  input = re.sub(r'(if\s+)constexpr(\s*\()', r'\1/*COxp*/\2', input)
+  input = re.sub(r'(\s+)operator\s*(\'[^\']+\')', r'\1/*_OPE \2*/', input)
+
+  # Mangle typeswitches to look like switch statements with the extra type
+  # information and syntax encoded in comments.
+  input = re.sub(r'(\s+)typeswitch\s*\(', r'\1/*_TYPE*/switch (', input)
+  input = re.sub(r'(\s+)case\s*\(\s*([^\:]+)\s*\)(\s*)\:\s*deferred',
+      r'\1case \2: /*_TSXDEFERRED_*/', input)
+  input = re.sub(r'(\s+)case\s*\(\s*([^\:]+)\s*\)(\s*)\:',
+      r'\1case \2: /*_TSX*/', input)
+  input = re.sub(r'(\s+)case\s*\(\s*([^\s]+)\s*\:\s*([^\:]+)\s*\)(\s*)\:\s*deferred',
+      r'\1case \3: /*_TSVDEFERRED_\2:*/', input)
+  input = re.sub(r'(\s+)case\s*\(\s*([^\s]+)\s*\:\s*([^\:]+)\s*\)(\s*)\:',
+      r'\1case \3: /*_TSV\2:*/', input)
+
+  # Add extra space around | operators to fix union types later.
+  while True:
+    old = input
+    input = re.sub(r'(\w+\s*)\|(\s*\w+)',
+        r'\1|/**/\2', input)
+    if old == input:
+      break;
+
+  input = re.sub(r'\bgenerates\s+\'([^\']+)\'\s*',
+      r' _GeNeRaTeS00_/*\1@*/', input)
+  input = re.sub(r'\bconstexpr\s+\'([^\']+)\'\s*',
+      r' _CoNsExP_/*\1@*/', input)
+  input = re.sub(r'\notherwise',
+      r'\n otherwise', input)
+  input = re.sub(r'(\n\s*\S[^\n]*\s)otherwise',
+      r'\1_OtheSaLi', input)
+  input = re.sub(r'@if\(', r'@iF(', input)
+  input = re.sub(r'@export', r'@eXpOrT', input)
+  input = re.sub(r'js-implicit[ \n]+', r'jS_iMpLiCiT_', input)
+
+  # Special handing of '%' for intrinsics, turn the percent
+  # into a unicode character so that it gets treated as part of the
+  # intrinsic's name if it's already adjacent to it.
+  input = re.sub(r'%([A-Za-z])', kPercentEscape + r'\1', input)
+
+  return input
+
+def postprocess(output):
+  output = re.sub(r'\/\*COxp\*\/', r'constexpr', output)
+  output = re.sub(r'(\S+)\s*: type([,>])', r'\1: type\2', output)
+  output = re.sub(r'(\n\s*)labels( [A-Z])', r'\1    labels\2', output)
+  output = re.sub(r'\/\*_OPE \'([^\']+)\'\*\/', r"operator '\1'", output)
+  output = re.sub(r'\/\*_TYPE\*\/(\s*)switch', r'typeswitch', output)
+  output = re.sub(r'case (\w+)\:\s*\/\*_TSXDEFERRED_\*\/',
+      r'case (\1): deferred', output)
+  output = re.sub(r'case (\w+)\:\s*\/\*_TSX\*\/',
+      r'case (\1):', output)
+  output = re.sub(r'case (\w+)\:\s*\/\*_TSVDEFERRED_([^\:]+)\:\*\/',
+      r'case (\2: \1): deferred', output)
+  output = re.sub(r'case (\w+)\:\s*\/\*_TSV([^\:]+)\:\*\/',
+      r'case (\2: \1):', output)
+  output = re.sub(r'\n_GeNeRaTeS00_\s*\/\*([^@]+)@\*\/',
+      r"\n    generates '\1'", output)
+  output = re.sub(r'_GeNeRaTeS00_\s*\/\*([^@]+)@\*\/',
+      r"generates '\1'", output)
+  output = re.sub(r'_CoNsExP_\s*\/\*([^@]+)@\*\/',
+      r"constexpr '\1'", output)
+  output = re.sub(r'\n(\s+)otherwise',
+      r"\n\1    otherwise", output)
+  output = re.sub(r'\n(\s+)_OtheSaLi',
+      r"\n\1otherwise", output)
+  output = re.sub(r'_OtheSaLi',
+      r"otherwise", output)
+  output = re.sub(r'@iF\(', r'@if(', output)
+  output = re.sub(r'@eXpOrT',
+      r"@export", output)
+  output = re.sub(r'jS_iMpLiCiT_',
+      r"js-implicit ", output)
+
+  while True:
+    old = output
+    output = re.sub(r'(\w+)\s{0,1}\|\s{0,1}/\*\*/(\s*\w+)',
+        r'\1 |\2', output)
+    if old == output:
+      break;
+
+  output = re.sub(kPercentEscape, r'%', output)
+
+  return output
+
+def process(filename, lint, should_format):
+  with open(filename, 'r') as content_file:
+    content = content_file.read()
+
+  original_input = content
+
+  if sys.platform.startswith('win'):
+    p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True)
+  else:
+    p = Popen(['clang-format', '-assume-filename=.ts'], stdin=PIPE, stdout=PIPE, stderr=PIPE)
+  output, err = p.communicate(preprocess(content))
+  output = postprocess(output)
+  rc = p.returncode
+  if (rc != 0):
+    print("error code " + str(rc) + " running clang-format. Exiting...")
+    sys.exit(rc);
+
+  if (output != original_input):
+    if lint:
+      print(filename + ' requires formatting', file=sys.stderr)
+
+    if should_format:
+      output_file = open(filename, 'w')
+      output_file.write(output);
+      output_file.close()
+
+def print_usage():
+  print('format-torque -i file1[, file2[, ...]]')
+  print('    format and overwrite input files')
+  print('format-torque -l file1[, file2[, ...]]')
+  print('    merely indicate which files need formatting')
+
+def Main():
+  if len(sys.argv) < 3:
+    print("error: at least 2 arguments required")
+    print_usage();
+    sys.exit(-1)
+
+  def is_option(arg):
+    return arg in ['-i', '-l', '-il']
+
+  should_format = lint = False
+  use_stdout = True
+
+  flag, files = sys.argv[1], sys.argv[2:]
+  if is_option(flag):
+    if '-i' == flag:
+      should_format = True
+    elif '-l' == flag:
+      lint = True
+    else:
+      lint = True
+      should_format = True
+  else:
+    print("error: -i and/or -l flags must be specified")
+    print_usage();
+    sys.exit(-1);
+
+  for filename in files:
+    process(filename, lint, should_format)
+
+  return 0
+
+if __name__ == '__main__':
+  sys.exit(Main());
diff --git a/src/v8/tools/torque/vim-torque/README.md b/src/v8/tools/torque/vim-torque/README.md
new file mode 100644
index 0000000..fbdef0f
--- /dev/null
+++ b/src/v8/tools/torque/vim-torque/README.md
@@ -0,0 +1,33 @@
+# V8 Torque syntax support for vim
+
+This plugin adds syntax highlighting support for the V8 Torque domain-specific
+language.
+
+## Installation
+
+Installation depends on your favorite plugin manager.
+
+**Pathogen:**
+
+Run
+
+```sh
+ln -s $V8/tools/torque/vim-torque ~/.vim/bundle/vim-torque
+# or ~/.config/nvim/bundle/vim-torque for Neovim
+```
+
+**Vundle:**
+
+Add this line to your `.vimrc` or `~/.config/nvim/init.vim`.
+
+```vim
+Plugin 'file:///path/to/v8/tools/torque/vim-torque'
+```
+
+**vim-plug:**
+
+Add this line to your `.vimrc` or `~/.config/nvim/init.vim`.
+
+```vim
+Plug '~/path/to/v8/tools/torque/vim-torque'
+```
diff --git a/src/v8/tools/torque/vim-torque/ftdetect/torque.vim b/src/v8/tools/torque/vim-torque/ftdetect/torque.vim
new file mode 100644
index 0000000..ead2c5e
--- /dev/null
+++ b/src/v8/tools/torque/vim-torque/ftdetect/torque.vim
@@ -0,0 +1 @@
+au BufRead,BufNewFile *.tq set filetype=torque
diff --git a/src/v8/tools/torque/vim-torque/syntax/torque.vim b/src/v8/tools/torque/vim-torque/syntax/torque.vim
new file mode 100644
index 0000000..592e870
--- /dev/null
+++ b/src/v8/tools/torque/vim-torque/syntax/torque.vim
@@ -0,0 +1,84 @@
+" Copyright 2018 the V8 project authors. All rights reserved.
+" Use of this source code is governed by a BSD-style license that can be
+" found in the LICENSE file.
+
+if !exists("main_syntax")
+  " quit when a syntax file was already loaded
+  if exists("b:current_syntax")
+    finish
+  endif
+  let main_syntax = 'torque'
+elseif exists("b:current_syntax") && b:current_syntax == "torque"
+  finish
+endif
+
+let s:cpo_save = &cpo
+set cpo&vim
+
+syn match   torqueLineComment      "\/\/.*" contains=@Spell
+syn region  torqueComment	   start="/\*"  end="\*/" contains=@Spell
+syn region  torqueStringS	   start=+'+  skip=+\\\\\|\\'+  end=+'\|$+
+
+syn keyword torqueAssert assert check debug unreachable
+syn keyword torqueAtom True False Undefined TheHole Null
+syn keyword torqueBoolean true false
+syn keyword torqueBranch break continue goto
+syn keyword torqueConditional if else typeswitch otherwise
+syn match torqueConstant /\v<[A-Z][A-Z0-9_]+>/
+syn match torqueConstant /\v<k[A-Z][A-Za-z0-9]*>/
+syn keyword torqueFunction macro builtin runtime intrinsic
+syn keyword torqueKeyword cast convert from_constexpr min max unsafe_cast js-implicit implicit
+syn keyword torqueLabel case
+syn keyword torqueMatching try label catch
+syn keyword torqueModifier extern javascript constexpr transitioning transient weak export
+syn match torqueNumber /\v<[0-9]+(\.[0-9]*)?>/
+syn match torqueNumber /\v<0x[0-9a-fA-F]+>/
+syn keyword torqueOperator operator
+syn keyword torqueRel extends generates labels
+syn keyword torqueRepeat while for of
+syn keyword torqueStatement return tail
+syn keyword torqueStructure module struct type class
+syn keyword torqueVariable const let
+
+syn match torqueType /\v(\<)@<=([A-Za-z][0-9A-Za-z_]*)(>)@=/
+syn match torqueType /\v(:\s*(constexpr\s*)?)@<=([A-Za-z][0-9A-Za-z_]*)/
+" Include some common types also
+syn keyword torqueType Arguments void never
+syn keyword torqueType Tagged Smi HeapObject Object
+syn keyword torqueType int32 uint32 int64 intptr uintptr float32 float64
+syn keyword torqueType bool string
+syn keyword torqueType int31 RawPtr AbstractCode Code JSReceiver Context String
+syn keyword torqueType Oddball HeapNumber Number BigInt Numeric Boolean JSProxy
+syn keyword torqueType JSObject JSArray JSFunction JSBoundFunction Callable Map
+
+hi def link torqueAssert		Statement
+hi def link torqueAtom		Constant
+hi def link torqueBoolean		Boolean
+hi def link torqueBranch		Conditional
+hi def link torqueComment		Comment
+hi def link torqueConditional		Conditional
+hi def link torqueConstant		Constant
+hi def link torqueFunction		Function
+hi def link torqueKeyword		Keyword
+hi def link torqueLabel		Label
+hi def link torqueLineComment		Comment
+hi def link torqueMatching		Exception
+hi def link torqueModifier		StorageClass
+hi def link torqueNumber		Number
+hi def link torqueOperator		Operator
+hi def link torqueRel		StorageClass
+hi def link torqueRepeat		Repeat
+hi def link torqueStatement		Statement
+hi def link torqueStringS		String
+hi def link torqueStructure		Structure
+hi def link torqueType		Type
+hi def link torqueVariable		Identifier
+
+let b:current_syntax = "torque"
+if main_syntax == 'torque'
+  unlet main_syntax
+endif
+let &cpo = s:cpo_save
+unlet s:cpo_save
+
+" vim: set ts=8:
diff --git a/src/v8/tools/trace-maps-processor.py b/src/v8/tools/trace-maps-processor.py
index bf8c8a8..4a29eab 100755
--- a/src/v8/tools/trace-maps-processor.py
+++ b/src/v8/tools/trace-maps-processor.py
@@ -3,6 +3,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import sys
 
 
@@ -169,4 +172,4 @@
     reasons_list.append("%8d %s" % (reasons[r], r))
   reasons_list.sort(reverse=True)
   for r in reasons_list[:20]:
-    print r
+    print(r)
diff --git a/src/v8/tools/tracing/proto-converter/.nvmrc b/src/v8/tools/tracing/proto-converter/.nvmrc
new file mode 100644
index 0000000..a7b32ad
--- /dev/null
+++ b/src/v8/tools/tracing/proto-converter/.nvmrc
@@ -0,0 +1 @@
+v11.9.0
diff --git a/src/v8/tools/tracing/proto-converter/package-lock.json b/src/v8/tools/tracing/proto-converter/package-lock.json
new file mode 100644
index 0000000..52e52b3
--- /dev/null
+++ b/src/v8/tools/tracing/proto-converter/package-lock.json
@@ -0,0 +1,123 @@
+{
+  "requires": true,
+  "lockfileVersion": 1,
+  "dependencies": {
+    "@protobufjs/aspromise": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz",
+      "integrity": "sha1-m4sMxmPWaafY9vXQiToU00jzD78=",
+      "dev": true
+    },
+    "@protobufjs/base64": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz",
+      "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==",
+      "dev": true
+    },
+    "@protobufjs/codegen": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz",
+      "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==",
+      "dev": true
+    },
+    "@protobufjs/eventemitter": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz",
+      "integrity": "sha1-NVy8mLr61ZePntCV85diHx0Ga3A=",
+      "dev": true
+    },
+    "@protobufjs/fetch": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz",
+      "integrity": "sha1-upn7WYYUr2VwDBYZ/wbUVLDYTEU=",
+      "dev": true,
+      "requires": {
+        "@protobufjs/aspromise": "^1.1.1",
+        "@protobufjs/inquire": "^1.1.0"
+      }
+    },
+    "@protobufjs/float": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz",
+      "integrity": "sha1-Xp4avctz/Ap8uLKR33jIy9l7h9E=",
+      "dev": true
+    },
+    "@protobufjs/inquire": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz",
+      "integrity": "sha1-/yAOPnzyQp4tyvwRQIKOjMY48Ik=",
+      "dev": true
+    },
+    "@protobufjs/path": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz",
+      "integrity": "sha1-bMKyDFya1q0NzP0hynZz2Nf79o0=",
+      "dev": true
+    },
+    "@protobufjs/pool": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz",
+      "integrity": "sha1-Cf0V8tbTq/qbZbw2ZQbWrXhG/1Q=",
+      "dev": true
+    },
+    "@protobufjs/utf8": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz",
+      "integrity": "sha1-p3c2C1s5oaLlEG+OhY8v0tBgxXA=",
+      "dev": true
+    },
+    "@types/long": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/@types/long/-/long-4.0.0.tgz",
+      "integrity": "sha512-1w52Nyx4Gq47uuu0EVcsHBxZFJgurQ+rTKS3qMHxR1GY2T8c2AJYd6vZoZ9q1rupaDjU0yT+Jc2XTyXkjeMA+Q==",
+      "dev": true
+    },
+    "@types/node": {
+      "version": "11.11.4",
+      "resolved": "https://registry.npmjs.org/@types/node/-/node-11.11.4.tgz",
+      "integrity": "sha512-02tIL+QIi/RW4E5xILdoAMjeJ9kYq5t5S2vciUdFPXv/ikFTb0zK8q9vXkg4+WAJuYXGiVT1H28AkD2C+IkXVw==",
+      "dev": true
+    },
+    "long": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/long/-/long-4.0.0.tgz",
+      "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==",
+      "dev": true
+    },
+    "protobufjs": {
+      "version": "6.8.8",
+      "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-6.8.8.tgz",
+      "integrity": "sha512-AAmHtD5pXgZfi7GMpllpO3q1Xw1OYldr+dMUlAnffGTAhqkg72WdmSY71uKBF/JuyiKs8psYbtKrhi0ASCD8qw==",
+      "dev": true,
+      "requires": {
+        "@protobufjs/aspromise": "^1.1.2",
+        "@protobufjs/base64": "^1.1.2",
+        "@protobufjs/codegen": "^2.0.4",
+        "@protobufjs/eventemitter": "^1.1.0",
+        "@protobufjs/fetch": "^1.1.0",
+        "@protobufjs/float": "^1.0.2",
+        "@protobufjs/inquire": "^1.1.0",
+        "@protobufjs/path": "^1.1.2",
+        "@protobufjs/pool": "^1.1.0",
+        "@protobufjs/utf8": "^1.1.0",
+        "@types/long": "^4.0.0",
+        "@types/node": "^10.1.0",
+        "long": "^4.0.0"
+      },
+      "dependencies": {
+        "@types/node": {
+          "version": "10.14.1",
+          "resolved": "https://registry.npmjs.org/@types/node/-/node-10.14.1.tgz",
+          "integrity": "sha512-Rymt08vh1GaW4vYB6QP61/5m/CFLGnFZP++bJpWbiNxceNa6RBipDmb413jvtSf/R1gg5a/jQVl2jY4XVRscEA==",
+          "dev": true
+        }
+      }
+    },
+    "typescript": {
+      "version": "3.3.4000",
+      "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.3.4000.tgz",
+      "integrity": "sha512-jjOcCZvpkl2+z7JFn0yBOoLQyLoIkNZAs/fYJkUG6VKy6zLPHJGfQJYFHzibB6GJaF/8QrcECtlQ5cpvRHSMEA==",
+      "dev": true
+    }
+  }
+}
diff --git a/src/v8/tools/tracing/proto-converter/package.json b/src/v8/tools/tracing/proto-converter/package.json
new file mode 100644
index 0000000..41401a1
--- /dev/null
+++ b/src/v8/tools/tracing/proto-converter/package.json
@@ -0,0 +1,11 @@
+{
+  "private": true,
+  "scripts": {
+    "build": "tsc proto-to-json.ts"
+  },
+  "devDependencies": {
+    "@types/node": "^11.11.4",
+    "protobufjs": "^6.8.8",
+    "typescript": "^3.3.4000"
+  }
+}
diff --git a/src/v8/tools/tracing/proto-converter/proto-to-json.ts b/src/v8/tools/tracing/proto-converter/proto-to-json.ts
new file mode 100644
index 0000000..2427410
--- /dev/null
+++ b/src/v8/tools/tracing/proto-converter/proto-to-json.ts
@@ -0,0 +1,132 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as fs from 'fs';
+import * as path from 'path';
+import { Root } from 'protobufjs';
+
+// Requirements: node 10.4.0+, npm
+
+// Setup:
+// (nvm is optional, you can also just install node manually)
+// $ nvm use
+// $ npm install
+// $ npm run build
+
+// Usage: node proto-to-json.js path_to_trace.proto input_file output_file
+
+// Converts a binary proto file to a 'Trace Event Format' compatible .json file
+// that can be used with chrome://tracing. Documentation of this format:
+// https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU
+
+// Attempts to reproduce the logic of the JSONTraceWriter in V8 in terms of the
+// JSON fields it will include/exclude based on the data present in the trace
+// event.
+
+// TODO(petermarshall): Replace with Array#flat once it lands in Node.js.
+const flatten = <T>(a: T[], b: T[]) => { a.push(...b); return a; }
+
+// Convert a string representing an int or uint (64 bit) to a Number or throw
+// if the value won't fit.
+function parseIntOrThrow(int: string) {
+  if (BigInt(int) > Number.MAX_SAFE_INTEGER) {
+    throw new Error("Loss of int precision");
+  }
+  return Number(int);
+}
+
+function uint64AsHexString(val : string) : string {
+  return "0x" + BigInt(val).toString(16);
+}
+
+function parseArgValue(arg: any) : any {
+  if (arg.jsonValue) {
+    return JSON.parse(arg.jsonValue);
+  }
+  if (typeof arg.stringValue !== 'undefined') {
+    return arg.stringValue;
+  }
+  if (typeof arg.uintValue !== 'undefined') {
+    return parseIntOrThrow(arg.uintValue);
+  }
+  if (typeof arg.intValue !== 'undefined') {
+    return parseIntOrThrow(arg.intValue);
+  }
+  if (typeof arg.boolValue !== 'undefined') {
+    return arg.boolValue;
+  }
+  if (typeof arg.doubleValue !== 'undefined') {
+    // Handle [-]Infinity and NaN which protobufjs outputs as strings here.
+    return typeof arg.doubleValue === 'string' ?
+        arg.doubleValue : Number(arg.doubleValue);
+  }
+  if (typeof arg.pointerValue !== 'undefined') {
+    return uint64AsHexString(arg.pointerValue);
+  }
+}
+
+// These come from
+// https://cs.chromium.org/chromium/src/base/trace_event/common/trace_event_common.h
+const TRACE_EVENT_FLAG_HAS_ID: number = 1 << 1;
+const TRACE_EVENT_FLAG_FLOW_IN: number = 1 << 8;
+const TRACE_EVENT_FLAG_FLOW_OUT: number = 1 << 9;
+
+async function main() {
+  const root = new Root();
+  const { resolvePath } = root;
+  const numDirectoriesToStrip = 2;
+  let initialOrigin: string|null;
+  root.resolvePath = (origin, target) => {
+    if (!origin) {
+      initialOrigin = target;
+      for (let i = 0; i <= numDirectoriesToStrip; i++) {
+        initialOrigin = path.dirname(initialOrigin);
+      }
+      return resolvePath(origin, target);
+    }
+    return path.resolve(initialOrigin!, target);
+  };
+  const traceProto = await root.load(process.argv[2]);
+  const Trace = traceProto.lookupType("Trace");
+  const payload = await fs.promises.readFile(process.argv[3]);
+  const msg = Trace.decode(payload).toJSON();
+  const output = {
+    traceEvents: msg.packet
+      .filter((packet: any) => !!packet.chromeEvents)
+      .map((packet: any) => packet.chromeEvents.traceEvents)
+      .map((traceEvents: any) => traceEvents.map((e: any) => {
+
+        const bind_id = (e.flags & (TRACE_EVENT_FLAG_FLOW_IN |
+          TRACE_EVENT_FLAG_FLOW_OUT)) ? e.bindId : undefined;
+        const scope = (e.flags & TRACE_EVENT_FLAG_HAS_ID) && e.scope ?
+            e.scope : undefined;
+
+        return {
+          pid: e.processId,
+          tid: e.threadId,
+          ts: parseIntOrThrow(e.timestamp),
+          tts: parseIntOrThrow(e.threadTimestamp),
+          ph: String.fromCodePoint(e.phase),
+          cat: e.categoryGroupName,
+          name: e.name,
+          dur: parseIntOrThrow(e.duration),
+          tdur: parseIntOrThrow(e.threadDuration),
+          bind_id: bind_id,
+          flow_in: e.flags & TRACE_EVENT_FLAG_FLOW_IN ? true : undefined,
+          flow_out: e.flags & TRACE_EVENT_FLAG_FLOW_OUT ? true : undefined,
+          scope: scope,
+          id: (e.flags & TRACE_EVENT_FLAG_HAS_ID) ?
+              uint64AsHexString(e.id) : undefined,
+          args: (e.args || []).reduce((js_args: any, proto_arg: any) => {
+            js_args[proto_arg.name] = parseArgValue(proto_arg);
+            return js_args;
+          }, {})
+        };
+      }))
+      .reduce(flatten, [])
+  };
+  await fs.promises.writeFile(process.argv[4], JSON.stringify(output, null, 2));
+}
+
+main().catch(console.error);
diff --git a/src/v8/tools/tracing/proto-converter/tsconfig.json b/src/v8/tools/tracing/proto-converter/tsconfig.json
new file mode 100644
index 0000000..defc4ef
--- /dev/null
+++ b/src/v8/tools/tracing/proto-converter/tsconfig.json
@@ -0,0 +1,12 @@
+{
+  "compilerOptions": {
+    "target": "ES2018",
+    "module": "commonjs",
+    "lib": ["es6","dom"],
+    "outDir": "lib",
+    "rootDir": "src",
+    "strict": true,
+    "esModuleInterop": true,
+    "resolveJsonModule": true
+  }
+}
diff --git a/src/v8/tools/try_perf.py b/src/v8/tools/try_perf.py
index cad836b..2c9c382 100755
--- a/src/v8/tools/try_perf.py
+++ b/src/v8/tools/try_perf.py
@@ -3,35 +3,39 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import argparse
 import os
 import subprocess
 import sys
 
 BOTS = {
-  '--arm32': 'v8_arm32_perf_try',
+  '--chromebook': 'v8_chromebook_perf_try',
   '--linux32': 'v8_linux32_perf_try',
   '--linux64': 'v8_linux64_perf_try',
   '--linux64_atom': 'v8_linux64_atom_perf_try',
-  '--linux64_haswell': 'v8_linux64_haswell_perf_try',
   '--nexus5': 'v8_nexus5_perf_try',
   '--nexus7': 'v8_nexus7_perf_try',
-  '--nexus9': 'v8_nexus9_perf_try',
-  '--nexus10': 'v8_nexus10_perf_try',
+  '--nokia1': 'v8_nokia1_perf_try',
+  '--odroid32': 'v8_odroid32_perf_try',
+  '--pixel2': 'v8_pixel2_perf_try',
 }
 
 DEFAULT_BOTS = [
-  'v8_arm32_perf_try',
+  'v8_chromebook_perf_try',
   'v8_linux32_perf_try',
-  'v8_linux64_haswell_perf_try',
-  'v8_nexus10_perf_try',
+  'v8_linux64_perf_try',
 ]
 
 PUBLIC_BENCHMARKS = [
   'arewefastyet',
+  'ares6',
+  'blazor',
+  'compile',
   'embenchen',
   'emscripten',
-  'compile',
   'jetstream',
   'jsbench',
   'jstests',
@@ -46,6 +50,7 @@
   'sunspider',
   'unity',
   'wasm',
+  'web-tooling-benchmark',
 ]
 
 V8_BASE = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
@@ -59,17 +64,28 @@
                       help='Revision (use full hash!) to use for the try job; '
                            'default: the revision will be determined by the '
                            'try server; see its waterfall for more info')
+  parser.add_argument('-v', '--verbose', action='store_true',
+                      help='Print debug information')
+  parser.add_argument('-c', '--confidence-level', type=float,
+                      help='Repeatedly runs each benchmark until specified '
+                      'confidence level is reached. The value is interpreted '
+                      'as the number of standard deviations from the mean that '
+                      'all values must lie within. Typical values are 1, 2 and '
+                      '3 and correspond to 68%%, 95%% and 99.7%% probability '
+                      'that the measured value is within 0.1%% of the true '
+                      'value. Larger values result in more retries and thus '
+                      'longer runtime, but also provide more reliable results.')
   for option in sorted(BOTS):
     parser.add_argument(
         option, dest='bots', action='append_const', const=BOTS[option],
         help='Add %s trybot.' % BOTS[option])
   options = parser.parse_args()
   if not options.bots:
-    print 'No trybots specified. Using default %s.' % ','.join(DEFAULT_BOTS)
+    print('No trybots specified. Using default %s.' % ','.join(DEFAULT_BOTS))
     options.bots = DEFAULT_BOTS
 
   if not options.benchmarks:
-    print 'Please specify the benchmarks to run as arguments.'
+    print('Please specify the benchmarks to run as arguments.')
     return 1
 
   for benchmark in options.benchmarks:
@@ -77,7 +93,7 @@
       print ('%s not found in our benchmark list. The respective trybot might '
             'fail, unless you run something this script isn\'t aware of. '
             'Available public benchmarks: %s' % (benchmark, PUBLIC_BENCHMARKS))
-      print 'Proceed anyways? [Y/n] ',
+      print('Proceed anyways? [Y/n] ', end=' ')
       answer = sys.stdin.readline().strip()
       if answer != "" and answer != "Y" and answer != "y":
         return 1
@@ -89,15 +105,20 @@
   subprocess.check_output(
       'update_depot_tools', shell=True, stderr=subprocess.STDOUT, cwd=V8_BASE)
 
-  cmd = ['git cl try -m internal.client.v8']
+  cmd = ['git cl try', '-B', 'luci.v8-internal.try']
   cmd += ['-b %s' % bot for bot in options.bots]
-  if options.revision: cmd += ['-r %s' % options.revision]
+  if options.revision:
+    cmd.append('-r %s' % options.revision)
   benchmarks = ['"%s"' % benchmark for benchmark in options.benchmarks]
-  cmd += ['-p \'testfilter=[%s]\'' % ','.join(benchmarks)]
+  cmd.append('-p \'testfilter=[%s]\'' % ','.join(benchmarks))
   if options.extra_flags:
-    cmd += ['-p \'extra_flags="%s"\'' % options.extra_flags]
+    cmd.append('-p \'extra_flags="%s"\'' % options.extra_flags)
+  if options.confidence_level:
+    cmd.append('-p confidence_level=%f' % options.confidence_level)
+  if options.verbose:
+    cmd.append('-vv')
+    print('Running %s' % ' '.join(cmd))
   subprocess.check_call(' '.join(cmd), shell=True, cwd=V8_BASE)
 
-
 if __name__ == '__main__':  # pragma: no cover
   sys.exit(main())
diff --git a/src/v8/tools/turbolizer-perf.py b/src/v8/tools/turbolizer-perf.py
index c90a117..d35f538 100644
--- a/src/v8/tools/turbolizer-perf.py
+++ b/src/v8/tools/turbolizer-perf.py
@@ -2,6 +2,9 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
 import sys
 import json
@@ -25,7 +28,7 @@
           known_addrs.add(result.group(0))
 
 def trace_end():
-  print json.dumps(json_obj)
+  print(json.dumps(json_obj))
 
 def process_event(param_dict):
   addr = "0x%x" % int(param_dict['sample']['ip'])
diff --git a/src/v8/tools/turbolizer/README.md b/src/v8/tools/turbolizer/README.md
index d4010d8..c5ee729 100644
--- a/src/v8/tools/turbolizer/README.md
+++ b/src/v8/tools/turbolizer/README.md
@@ -8,12 +8,22 @@
 Turbolizer consumes .json files that are generated per-function by d8 by passing
 the '--trace-turbo' command-line flag.
 
-Host the turbolizer locally by starting a web server that serves the contents of
-the turbolizer directory, e.g.:
+Turbolizer is build using npm:
 
-    cd src/tools/turbolizer
+    cd tools/turbolizer
+    npm i
+    npm run-script build
+
+Afterwards, turbolizer can be hosted locally by starting a web server that serve
+the contents of the turbolizer directory, e.g.:
+
     python -m SimpleHTTPServer 8000
 
+To deploy to a directory that can be hosted the script `deploy` can be used. The
+following command will deploy to the directory /www/turbolizer:
+
+    npm run deploy -- /www/turbolizer
+
 Optionally, profiling data generated by the perf tools in linux can be merged
 with the .json files using the turbolizer-perf.py file included. The following
 command is an example of using the perf script:
@@ -60,3 +70,11 @@
 There are many options that can be added to the first command, for example '-e'
 can be used to specify the counting of specific events (default: cycles), as
 well as '--cpu' to specify which CPU to sample.
+
+Turbolizer build process
+------------------------
+
+Turbolizer is currently migrating to TypeScript. The typescript sources reside in
+tools/turbolizer/src, and the typescript compiler will put the JavaScript output
+into tools/turbolizer/build/. The index.html file is set up to load the JavaScript
+from that directory.
diff --git a/src/v8/tools/turbolizer/deploy.sh b/src/v8/tools/turbolizer/deploy.sh
new file mode 100755
index 0000000..011c2f4
--- /dev/null
+++ b/src/v8/tools/turbolizer/deploy.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+
+DEST=$1
+
+if [ ! -d "$DEST" ]; then
+  echo -e "Destination \"$DEST\" is not a directory. Run\n\tnpm deploy -- [destination-directory]"
+  exit 1
+fi
+
+function copy() {
+  echo -n "."
+  cp "$@"
+}
+
+echo -n "Deploying..."
+copy *.png $DEST/
+copy *.css $DEST/
+copy index.html $DEST/
+copy info-view.html $DEST/
+copy -R build $DEST/
+copy -R img $DEST/
+echo "done!"
+
+echo "Deployed to $DEST/."
diff --git a/src/v8/tools/turbolizer/img/hide-selected-icon.png b/src/v8/tools/turbolizer/img/hide-selected-icon.png
new file mode 100644
index 0000000..207cdbb
--- /dev/null
+++ b/src/v8/tools/turbolizer/img/hide-selected-icon.png
Binary files differ
diff --git a/src/v8/tools/turbolizer/img/hide-unselected-icon.png b/src/v8/tools/turbolizer/img/hide-unselected-icon.png
new file mode 100644
index 0000000..15617b0
--- /dev/null
+++ b/src/v8/tools/turbolizer/img/hide-unselected-icon.png
Binary files differ
diff --git a/src/v8/tools/turbolizer/img/layout-icon.png b/src/v8/tools/turbolizer/img/layout-icon.png
new file mode 100644
index 0000000..95a517a
--- /dev/null
+++ b/src/v8/tools/turbolizer/img/layout-icon.png
Binary files differ
diff --git a/src/v8/tools/turbolizer/img/show-all-icon.png b/src/v8/tools/turbolizer/img/show-all-icon.png
new file mode 100644
index 0000000..50fc845
--- /dev/null
+++ b/src/v8/tools/turbolizer/img/show-all-icon.png
Binary files differ
diff --git a/src/v8/tools/turbolizer/img/show-control-icon.png b/src/v8/tools/turbolizer/img/show-control-icon.png
new file mode 100644
index 0000000..4238bee
--- /dev/null
+++ b/src/v8/tools/turbolizer/img/show-control-icon.png
Binary files differ
diff --git a/src/v8/tools/turbolizer/img/toggle-hide-dead-icon.png b/src/v8/tools/turbolizer/img/toggle-hide-dead-icon.png
new file mode 100644
index 0000000..ac72bb9
--- /dev/null
+++ b/src/v8/tools/turbolizer/img/toggle-hide-dead-icon.png
Binary files differ
diff --git a/src/v8/tools/turbolizer/img/toggle-types-icon.png b/src/v8/tools/turbolizer/img/toggle-types-icon.png
new file mode 100644
index 0000000..8fead8f
--- /dev/null
+++ b/src/v8/tools/turbolizer/img/toggle-types-icon.png
Binary files differ
diff --git a/src/v8/tools/turbolizer/img/zoom-selection-icon.png b/src/v8/tools/turbolizer/img/zoom-selection-icon.png
new file mode 100644
index 0000000..12dc3e3
--- /dev/null
+++ b/src/v8/tools/turbolizer/img/zoom-selection-icon.png
Binary files differ
diff --git a/src/v8/tools/turbolizer/index.html b/src/v8/tools/turbolizer/index.html
index 552e837..f970a6d 100644
--- a/src/v8/tools/turbolizer/index.html
+++ b/src/v8/tools/turbolizer/index.html
@@ -1,99 +1,46 @@
-<!DOCTYPE HTML>
+<!DOCTYPE html>
 <html>
-  <head>
-    <title>Turbolizer</title>
-    <link rel="stylesheet" href="turbo-visualizer.css" />
-  </head>
-  <body>
-    <div id="left">
-      <div id='source-text'>
-        <pre id='source-text-pre'\>
-      </div>
-    </div>
-    <div class="resizer-left"></div>
-    <div id="middle" class="resizable-pane">
-      <div id="graph-toolbox-anchor">
-        <span id="graph-toolbox">
-          <input id="layout" type="image" title="layout graph" src="layout-icon.png"
-                 alt="layout graph" class="button-input">
-          <input id="show-all" type="image" title="show all nodes" src="expand-all.jpg"
-                 alt="show all nodes" class="button-input">
-          <input id="hide-dead" type="image" title="only live nodes" src="live.png"
-                 alt="only live nodes" class="button-input">
-          <input id="hide-unselected" type="image" title="hide unselected nodes"
-                 src="hide-unselected.png" alt="hide unselected nodes" class="button-input">
-          <input id="hide-selected" type="image" title="hide selected nodes"
-                 src="hide-selected.png" alt="hide selected nodes" class="button-input">
-          <input id="zoom-selection" type="image" title="zoom to selection"
-                 src="search.png" alt="zoom to selection" class="button-input">
-          <input id="toggle-types" type="image" title="show/hide types"
-                 src="types.png" alt="show/hide types" class="button-input">
-          <input id="search-input" type="text" title="search nodes for regex"
-                 alt="search node for regex" class="search-input"
-                 placeholder="find with regexp&hellip;">
-          <select id="display-selector">
-            <option disabled selected>(please open a file)</option>
-          </select>
-        </span>
-      </div>
+<!--
+Copyright 2019 the V8 project authors. All rights reserved.  Use of this source
+code is governed by a BSD-style license that can be found in the LICENSE file.
+-->
+<head>
+  <meta charset="utf-8">
+  <title>V8 Turbolizer</title>
+  <link rel="stylesheet" href="turbo-visualizer.css">
+  <link rel="stylesheet" href="tabs.css">
+  <link rel="stylesheet" href="prettify.css">
+  <link rel="icon" type="image/png" href="turbolizer.png">
+</head>
 
-      <div id="load-file">
-        <input type="file" id="hidden-file-upload">
-        <input id="upload" type="image" title="load graph" class="button-input"
-               src="upload-icon.png" alt="upload graph">
-      </div>
-      <div id="empty" width="100%" height="100%"></div>
-      <div id="graph" width="100%" height="100%"></div>
-      <div id="schedule" width="100%">
-        <pre id="schedule-text-pre" class='prettyprint prettyprinted'>
-          <ul id="schedule-list" class='nolinenums noindent'>
-          </ul>
-        </pre>
-      </div>
-      <div id='text-placeholder' width="0px" height="0px" style="position: absolute; top:100000px;" ><svg><text text-anchor="right">
-          <tspan white-space="inherit" id="text-measure"/>
-      </text></svg></div>
+<body>
+  <div id="left" class="content"></div>
+  <div id="resizer-left" class="resizer"></div>
+  <div id="middle">
+
+    <div id="load-file">
+      <input id="upload-helper" type="file">
+      <input id="upload" type="image" title="load graph" class="button-input" src="upload-icon.png" alt="upload graph">
     </div>
-    <div class="resizer-right"></div>
-    <div id="right">
-      <div id='disassembly'>
-        <pre id='disassembly-text-pre' class='prettyprint prettyprinted'>
-          <ul id='disassembly-list' class='nolinenums noindent'>
-          </ul>
-        </pre>
-      </div>
-    </div>
-    <div id="source-collapse" class="collapse-pane">
-      <input id="source-expand" type="image" title="show source"
-             src="right-arrow.png" class="button-input invisible">
-      <input id="source-shrink" type="image" title="hide source"
-             src="left-arrow.png" class="button-input">
-    </div>
-    <div id="disassembly-collapse" class="collapse-pane">
-      <input id="disassembly-expand" type="image" title="show disassembly"
-             src="left-arrow.png" class="button-input invisible">
-      <input id="disassembly-shrink" type="image" title="hide disassembly"
-             src="right-arrow.png" class="button-input">
-    </div>
-    <script src="https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js"></script>
-    <script src="http://d3js.org/d3.v3.min.js" charset="utf-8"></script>
-    <script src="https://cdn.jsdelivr.net/filesaver.js/0.1/FileSaver.min.js"></script>
-    <script src="monkey.js"></script>
-    <script src="util.js"></script>
-    <script src="lang-disassembly.js"></script>
-    <script src="node.js"></script>
-    <script src="edge.js"></script>
-    <script src="selection.js"></script>
-    <script src="selection-broker.js"></script>
-    <script src="constants.js"></script>
-    <script src="view.js"></script>
-    <script src="text-view.js"></script>
-    <script src="empty-view.js"></script>
-    <script src="code-view.js"></script>
-    <script src="graph-layout.js"></script>
-    <script src="graph-view.js"></script>
-    <script src="schedule-view.js"></script>
-    <script src="disassembly-view.js"></script>
-    <script src="turbo-visualizer.js"></script>
-  </body>
+  </div>
+  <div id="resizer-right" class="resizer"></div>
+  <div id="right" class="content"></div>
+  <div id="source-collapse" class="collapse-pane">
+    <input id="source-expand" type="image" title="show source" src="right-arrow.png" class="button-input invisible">
+    <input id="source-shrink" type="image" title="hide source" src="left-arrow.png" class="button-input">
+  </div>
+  <div id="disassembly-collapse" class="collapse-pane">
+    <input id="disassembly-expand" type="image" title="show disassembly" src="left-arrow.png" class="button-input invisible">
+    <input id="disassembly-shrink" type="image" title="hide disassembly" src="right-arrow.png" class="button-input">
+  </div>
+  <div id="text-placeholder" width="0" height="0" style="position: absolute; top:100000px;">
+    <svg>
+      <text text-anchor="right">
+        <tspan white-space="inherit" id="text-measure">
+      </text>
+    </svg>
+  </div>
+  <script src="https://cdn.rawgit.com/google/code-prettify/master/loader/run_prettify.js"></script>
+  <script src="build/turbolizer.js"></script>
+</body>
 </html>
diff --git a/src/v8/tools/turbolizer/info-view.html b/src/v8/tools/turbolizer/info-view.html
new file mode 100644
index 0000000..b523e65
--- /dev/null
+++ b/src/v8/tools/turbolizer/info-view.html
@@ -0,0 +1,119 @@
+<div>This is view contains hints about available keyboard shortcuts.</div>
+<div class="info-topic" id="info-global">
+  <div class="info-topic-header">Global shortcuts</div>
+  <div class="info-topic-content">
+    <table>
+      <tr>
+        <td>CTRL+L</td>
+        <td>Open load file dialog.</td>
+      </tr>
+      <tr>
+        <td>CTRL+R</td>
+        <td>Reload turbolizer (Chrome shortcut)</td>
+      </tr>
+    </table>
+  </div>
+</div>
+<div class="info-topic" id="info-graph-view">
+  <div class="info-topic-header">Graph view</div>
+  <div class="info-topic-content">
+    <table>
+      <tr>
+        <td>r</td>
+        <td>Relayout graph</td>
+      </tr>
+      <tr>
+        <td>a</td>
+        <td>Select all nodes</td>
+      </tr>
+      <tr>
+        <td>/</td>
+        <td>Select search box</td>
+      </tr>
+    </table>
+  </div>
+</div>
+<div class="info-topic" id="info-graph-nodes">
+  <div class="info-topic-header">TurboFan graph nodes</div>
+  <div class="info-topic-content">
+    <div>The following commands transform node selections, i.e. each operation will be applied
+      to each node in the current selection and the union of the resulting nodes will become the
+      new selection.</div>
+    <table>
+      <tr>
+        <td>UP</td>
+        <td>Select all input nodes</td>
+      </tr>
+      <tr>
+        <td>DOWN</td>
+        <td>Select all output nodes</td>
+      </tr>
+      <tr>
+        <td>1-9</td>
+        <td>Select input node 1-9</td>
+      </tr>
+      <tr>
+        <td>CTRL+1-9</td>
+        <td>Toggle input edge 1-9</td>
+      </tr>
+      <tr>
+        <td>c</td>
+        <td>Select control output node</td>
+      </tr>
+      <tr>
+        <td>e</td>
+        <td>Select effect output node</td>
+      </tr>
+      <tr>
+        <td>i</td>
+        <td>Reveal node's input nodes</td>
+      </tr>
+      <tr>
+        <td>o</td>
+        <td>Reveal node's output nodes</td>
+      </tr>
+      <tr>
+        <td>s</td>
+        <td>Select node's origin node</td>
+      </tr>
+      <tr>
+        <td>/</td>
+        <td>Select search box</td>
+      </tr>
+    </table>
+  </div>
+</div>
+<div class="info-topic" id="info-graph-search">
+  <div class="info-topic-header">Graph search</div>
+  <div class="info-topic-content">
+    <table>
+      <tr>
+        <td>ENTER</td>
+        <td>Select nodes according to regular expression. Invisible nodes are included depending on the state of the
+          checkbox "only visible".</td>
+      </tr>
+      <tr>
+        <td>CTRL+ENTER</td>
+        <td>Select nodes according to regular expression, always including invisible nodes regardless of checkbox.</td>
+      </tr>
+    </table>
+    <div style="font-weight: bold">
+      Useful patterns
+    </div>
+    <table>
+      <tr>
+        <td>IfTrue</td>
+        <td>Select nodes which have 'IfTrue' in title or description.</td>
+      </tr>
+      <tr>
+        <td>^42:</td>
+        <td>Select exactly the node with id 14.</td>
+      </tr>
+      <tr>
+        <td>Origin:&nbsp;#42&nbsp;</td>
+        <td>Select nodes which were created while node with id 42 was reduced. This is inaccurate if the node was
+          changed in-place.</td>
+      </tr>
+    </table>
+  </div>
+</div>
diff --git a/src/v8/tools/turbolizer/package-lock.json b/src/v8/tools/turbolizer/package-lock.json
new file mode 100644
index 0000000..9c8049f
--- /dev/null
+++ b/src/v8/tools/turbolizer/package-lock.json
@@ -0,0 +1,3568 @@
+{
+  "name": "turbolizer",
+  "version": "0.1.0",
+  "lockfileVersion": 1,
+  "requires": true,
+  "dependencies": {
+    "@koa/cors": {
+      "version": "2.2.2",
+      "resolved": "https://registry.npmjs.org/@koa/cors/-/cors-2.2.2.tgz",
+      "integrity": "sha512-Ollvsy3wB8+7R9w6hPVzlj3wekF6nK+IHpHj7faSPVXCkahqCwNEPp9+0C4b51RDkdpHjevLEGLOKuVjqtXgSQ==",
+      "dev": true
+    },
+    "@types/d3": {
+      "version": "5.5.0",
+      "resolved": "https://registry.npmjs.org/@types/d3/-/d3-5.5.0.tgz",
+      "integrity": "sha512-Bz9EAhWnaO93jLYSAT13blgzwP5Z0grO5THBOXSMeWHIIFHA7ntJSLpHSCr1kDtQunEZKCYT9OfE+4lYY/PwlA==",
+      "requires": {
+        "@types/d3-array": "*",
+        "@types/d3-axis": "*",
+        "@types/d3-brush": "*",
+        "@types/d3-chord": "*",
+        "@types/d3-collection": "*",
+        "@types/d3-color": "*",
+        "@types/d3-contour": "*",
+        "@types/d3-dispatch": "*",
+        "@types/d3-drag": "*",
+        "@types/d3-dsv": "*",
+        "@types/d3-ease": "*",
+        "@types/d3-fetch": "*",
+        "@types/d3-force": "*",
+        "@types/d3-format": "*",
+        "@types/d3-geo": "*",
+        "@types/d3-hierarchy": "*",
+        "@types/d3-interpolate": "*",
+        "@types/d3-path": "*",
+        "@types/d3-polygon": "*",
+        "@types/d3-quadtree": "*",
+        "@types/d3-random": "*",
+        "@types/d3-scale": "*",
+        "@types/d3-scale-chromatic": "*",
+        "@types/d3-selection": "*",
+        "@types/d3-shape": "*",
+        "@types/d3-time": "*",
+        "@types/d3-time-format": "*",
+        "@types/d3-timer": "*",
+        "@types/d3-transition": "*",
+        "@types/d3-voronoi": "*",
+        "@types/d3-zoom": "*"
+      }
+    },
+    "@types/d3-array": {
+      "version": "1.2.4",
+      "resolved": "https://registry.npmjs.org/@types/d3-array/-/d3-array-1.2.4.tgz",
+      "integrity": "sha512-3r1fOAAb+SGfcOGXty/LGvoP0ovMec4UtGNUyHOSzYyvSGpmt+eNMxLowol/3HryusevznSfcHZebEShXMwsZA=="
+    },
+    "@types/d3-axis": {
+      "version": "1.0.11",
+      "resolved": "https://registry.npmjs.org/@types/d3-axis/-/d3-axis-1.0.11.tgz",
+      "integrity": "sha512-cuigApCyCwYJxaQPghj+BqaxzbdRdT/lpZBMtF7EuEIJ61NMQ8yvGnqFvHCIgJEmUu2Wb2wiZqy9kiHi3Ddftg==",
+      "requires": {
+        "@types/d3-selection": "*"
+      }
+    },
+    "@types/d3-brush": {
+      "version": "1.0.9",
+      "resolved": "https://registry.npmjs.org/@types/d3-brush/-/d3-brush-1.0.9.tgz",
+      "integrity": "sha512-mAx8IVc0luUHfk51pl0UN1vzybnAzLMUsvIwLt3fbsqqPkSXr+Pu1AxOPPeyNc27LhHJnfH/LCV7Jlv+Yzqu1A==",
+      "requires": {
+        "@types/d3-selection": "*"
+      }
+    },
+    "@types/d3-chord": {
+      "version": "1.0.8",
+      "resolved": "https://registry.npmjs.org/@types/d3-chord/-/d3-chord-1.0.8.tgz",
+      "integrity": "sha512-F0ftYOo7FenAIxsRjXLt8vbij0NLDuVcL+xaGY7R9jUmF2Mrpj1T5XukBI9Cad+Ei7YSxEWREIO+CYcaKCl2qQ=="
+    },
+    "@types/d3-collection": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/@types/d3-collection/-/d3-collection-1.0.7.tgz",
+      "integrity": "sha512-vR3BT0GwHc5y93Jv6bxn3zoxP/vGu+GdXu/r1ApjbP9dLk9I2g6NiV7iP/QMQSuFZd0It0n/qWrfXHxCWwHIkg=="
+    },
+    "@types/d3-color": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-color/-/d3-color-1.2.1.tgz",
+      "integrity": "sha512-xwb1tqvYNWllbHuhMFhiXk63Imf+QNq/dJdmbXmr2wQVnwGenCuj3/0IWJ9hdIFQIqzvhT7T37cvx93jtAsDbQ=="
+    },
+    "@types/d3-contour": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/@types/d3-contour/-/d3-contour-1.3.0.tgz",
+      "integrity": "sha512-AUCUIjEnC5lCGBM9hS+MryRaFLIrPls4Rbv6ktqbd+TK/RXZPwOy9rtBWmGpbeXcSOYCJTUDwNJuEnmYPJRxHQ==",
+      "requires": {
+        "@types/d3-array": "*",
+        "@types/geojson": "*"
+      }
+    },
+    "@types/d3-dispatch": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/@types/d3-dispatch/-/d3-dispatch-1.0.6.tgz",
+      "integrity": "sha512-xyWJQMr832vqhu6fD/YqX+MSFBWnkxasNhcStvlhqygXxj0cKqPft0wuGoH5TIq5ADXgP83qeNVa4R7bEYN3uA=="
+    },
+    "@types/d3-drag": {
+      "version": "1.2.2",
+      "resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-1.2.2.tgz",
+      "integrity": "sha512-+UKFeaMVTfSQvMO0PTzOyLXSr7OZbF2Rx1iNVwo2XsyiOsd4MSuLyJKUwRmGn67044QpbNzr+VD6/8iBBLExWw==",
+      "requires": {
+        "@types/d3-selection": "*"
+      }
+    },
+    "@types/d3-dsv": {
+      "version": "1.0.34",
+      "resolved": "https://registry.npmjs.org/@types/d3-dsv/-/d3-dsv-1.0.34.tgz",
+      "integrity": "sha512-/grhPLPFJ17GxH18EB8OSOlqcsLahz1xlKb08cVUu3OP83wBPxfoX2otVvLJDTL6BEP0kyTNsA2SdGrRhWwSBQ=="
+    },
+    "@types/d3-ease": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/@types/d3-ease/-/d3-ease-1.0.7.tgz",
+      "integrity": "sha1-k6MBhovp4VBh89RDQ7GrP4rLbwk="
+    },
+    "@types/d3-fetch": {
+      "version": "1.1.4",
+      "resolved": "https://registry.npmjs.org/@types/d3-fetch/-/d3-fetch-1.1.4.tgz",
+      "integrity": "sha512-POR6AHGEjUk8VjHhU2HfcKxVKnZUIhhHjU65greJs34NlfmWfaDxE+6+ABeMsRCAWa/DRTRNe+1ExuMPBwb7/Q==",
+      "requires": {
+        "@types/d3-dsv": "*"
+      }
+    },
+    "@types/d3-force": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-force/-/d3-force-1.1.1.tgz",
+      "integrity": "sha512-ePkELuaFWY4yOuf+Bvx5Xd+ihFiYG4bdnW0BlvigovIm8Sob2t76e9RGO6lybQbv6AlW9Icn9HuZ9fmdzEoJyg=="
+    },
+    "@types/d3-format": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/@types/d3-format/-/d3-format-1.3.0.tgz",
+      "integrity": "sha512-ZiY4j3iJvAdOwzwW24WjlZbUNvqOsnPAMfPBmdXqxj3uKJbrzBlRrdGl5uC89pZpFs9Dc92E81KcwG2uEgkIZA=="
+    },
+    "@types/d3-geo": {
+      "version": "1.10.3",
+      "resolved": "https://registry.npmjs.org/@types/d3-geo/-/d3-geo-1.10.3.tgz",
+      "integrity": "sha512-hfdaxM2L0wA9mDZrrSf2o+DyhEpnJYCiAN+lHFtpfZOVCQrYBA5g33sGRpUbAvjSMyO5jkHbftMWPEhuCMChSg==",
+      "requires": {
+        "@types/geojson": "*"
+      }
+    },
+    "@types/d3-hierarchy": {
+      "version": "1.1.5",
+      "resolved": "https://registry.npmjs.org/@types/d3-hierarchy/-/d3-hierarchy-1.1.5.tgz",
+      "integrity": "sha512-DKhqURrURt2c7MsF9sHiF2wrWf2+yZR4Q9oIG026t/ZY4VWoM0Yd7UonaR+rygyReWcFSEjKC/+5A27TgD8R8g=="
+    },
+    "@types/d3-interpolate": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/@types/d3-interpolate/-/d3-interpolate-1.3.0.tgz",
+      "integrity": "sha512-Ng4ds7kPSvP/c3W3J5PPUQlgewif1tGBqCeh5lgY+UG82Y7H9zQ8c2gILsEFDLg7wRGOwnuKZ940Q/LSN14w9w==",
+      "requires": {
+        "@types/d3-color": "*"
+      }
+    },
+    "@types/d3-path": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/@types/d3-path/-/d3-path-1.0.7.tgz",
+      "integrity": "sha512-U8dFRG+8WhkLJr2sxZ9Cw/5WeRgBnNqMxGdA1+Z0+ZG6tK0s75OQ4OXnxeyfKuh6E4wQPY8OAKr1+iNDx01BEQ=="
+    },
+    "@types/d3-polygon": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/@types/d3-polygon/-/d3-polygon-1.0.6.tgz",
+      "integrity": "sha512-E6Kyodn9JThgLq20nxSbEce9ow5/ePgm9PX2EO6W1INIL4DayM7cFaiG10DStuamjYAd0X4rntW2q+GRjiIktw=="
+    },
+    "@types/d3-quadtree": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/@types/d3-quadtree/-/d3-quadtree-1.0.6.tgz",
+      "integrity": "sha512-sphVuDdiSIaxLt9kQgebJW98pTktQ/xuN7Ysd8X68Rnjeg/q8+c36/ShlqU52qoKg9nob/JEHH1uQMdxURZidQ=="
+    },
+    "@types/d3-random": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/@types/d3-random/-/d3-random-1.1.1.tgz",
+      "integrity": "sha512-jUPeBq1XKK9/5XasTvy5QAUwFeMsjma2yt/nP02yC2Tijovx7i/W5776U/HZugxc5SSmtpx4Z3g9KFVon0QrjQ=="
+    },
+    "@types/d3-scale": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/@types/d3-scale/-/d3-scale-2.1.0.tgz",
+      "integrity": "sha512-vLzRDF5lRxZdCLUOvmw90pkiuSsZdgroBQaat0Ov7Z7OnO9iJsPSm/TZw3wW6m2z/NhIn1E4N0RLNfEi1k4kAA==",
+      "requires": {
+        "@types/d3-time": "*"
+      }
+    },
+    "@types/d3-scale-chromatic": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/@types/d3-scale-chromatic/-/d3-scale-chromatic-1.3.0.tgz",
+      "integrity": "sha512-JqQH5uu1kmdQEa6XSu7NYzQM71lL1YreBPS5o8SnmEDcBRKL6ooykXa8iFPPOEUiTah25ydi+cTrbsogBSMNSQ=="
+    },
+    "@types/d3-selection": {
+      "version": "1.3.4",
+      "resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-1.3.4.tgz",
+      "integrity": "sha512-WQ6Ivy7VuUlZ/Grqc8493ZxC+y/fpvZLy5+8ELvmCr2hll8eJPUqC05l6fgRRA7kjqlpbH7lbmvY6pRKf6yzxw=="
+    },
+    "@types/d3-shape": {
+      "version": "1.2.7",
+      "resolved": "https://registry.npmjs.org/@types/d3-shape/-/d3-shape-1.2.7.tgz",
+      "integrity": "sha512-b2jpGcddOseeNxchaR1SNLqA5xZAbgKix3cXiFeuGeYIEAEUu91UbtelCxOHIUTbNURFnjcbkf4plRbejNzVaQ==",
+      "requires": {
+        "@types/d3-path": "*"
+      }
+    },
+    "@types/d3-time": {
+      "version": "1.0.9",
+      "resolved": "https://registry.npmjs.org/@types/d3-time/-/d3-time-1.0.9.tgz",
+      "integrity": "sha512-m+D4NbQdDlTVaO7QgXAnatR3IDxQYDMBtRhgSCi5rs9R1LPq1y7/2aqa1FJ2IWjFm1mOV63swDxonnCDlHgHMA=="
+    },
+    "@types/d3-time-format": {
+      "version": "2.1.0",
+      "resolved": "https://registry.npmjs.org/@types/d3-time-format/-/d3-time-format-2.1.0.tgz",
+      "integrity": "sha512-/myT3I7EwlukNOX2xVdMzb8FRgNzRMpsZddwst9Ld/VFe6LyJyRp0s32l/V9XoUzk+Gqu56F/oGk6507+8BxrA=="
+    },
+    "@types/d3-timer": {
+      "version": "1.0.8",
+      "resolved": "https://registry.npmjs.org/@types/d3-timer/-/d3-timer-1.0.8.tgz",
+      "integrity": "sha512-AKUgQ/nljUFcUO2P3gK24weVI5XwUTdJvjoh8gJ0yxT4aJ+d7t2Or3TB+k9dEYl14BAjoj32D0ky+YzQSVszfg=="
+    },
+    "@types/d3-transition": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-1.1.3.tgz",
+      "integrity": "sha512-1EukXNuVu/z2G1GZpZagzFJnie9C5zze17ox/vhTgGXNy46rYAm4UkhLLlUeeZ1ndq88k95SOeC8898RpKMLOQ==",
+      "requires": {
+        "@types/d3-selection": "*"
+      }
+    },
+    "@types/d3-voronoi": {
+      "version": "1.1.8",
+      "resolved": "https://registry.npmjs.org/@types/d3-voronoi/-/d3-voronoi-1.1.8.tgz",
+      "integrity": "sha512-zqNhW7QsYQGlfOdrwPNPG3Wk64zUa4epKRurkJ/dVc6oeXrB+iTDt8sRZ0KZKOOXvvfa1dcdB0e45TZeLBiodQ=="
+    },
+    "@types/d3-zoom": {
+      "version": "1.7.3",
+      "resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-1.7.3.tgz",
+      "integrity": "sha512-Tz7+z4+Id0MxERw/ozinC5QHJmGLARs9Mpi/7VVfiR+9AHcFGe9q+fjQa30/oPNY8WPuCh5p5uuXmBYAJ3y91Q==",
+      "requires": {
+        "@types/d3-interpolate": "*",
+        "@types/d3-selection": "*"
+      }
+    },
+    "@types/estree": {
+      "version": "0.0.39",
+      "resolved": "https://registry.npmjs.org/@types/estree/-/estree-0.0.39.tgz",
+      "integrity": "sha512-EYNwp3bU+98cpU4lAWYYL7Zz+2gryWH1qbdDTidVd6hkiR6weksdbMadyXKXNPEkQFhXM+hVO9ZygomHXp+AIw=="
+    },
+    "@types/geojson": {
+      "version": "7946.0.4",
+      "resolved": "https://registry.npmjs.org/@types/geojson/-/geojson-7946.0.4.tgz",
+      "integrity": "sha512-MHmwBtCb7OCv1DSivz2UNJXPGU/1btAWRKlqJ2saEhVJkpkvqHMMaOpKg0v4sAbDWSQekHGvPVMM8nQ+Jen03Q=="
+    },
+    "@types/json5": {
+      "version": "0.0.29",
+      "resolved": "https://registry.npmjs.org/@types/json5/-/json5-0.0.29.tgz",
+      "integrity": "sha1-7ihweulOEdK4J7y+UnC86n8+ce4=",
+      "dev": true,
+      "optional": true
+    },
+    "@types/node": {
+      "version": "10.12.18",
+      "resolved": "https://registry.npmjs.org/@types/node/-/node-10.12.18.tgz",
+      "integrity": "sha512-fh+pAqt4xRzPfqA6eh3Z2y6fyZavRIumvjhaCL753+TVkGKGhpPeyrJG2JftD0T9q4GF00KjefsQ+PQNDdWQaQ=="
+    },
+    "JSONStream": {
+      "version": "1.3.5",
+      "resolved": "https://registry.npmjs.org/JSONStream/-/JSONStream-1.3.5.tgz",
+      "integrity": "sha512-E+iruNOY8VV9s4JEbe1aNEm6MiszPRr/UfcHMz0TQh1BXSxHK+ASV1R6W4HpjBhSeS+54PIsAMCBmwD06LLsqQ==",
+      "dev": true,
+      "requires": {
+        "jsonparse": "^1.2.0",
+        "through": ">=2.2.7 <3"
+      }
+    },
+    "accepts": {
+      "version": "1.3.5",
+      "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.5.tgz",
+      "integrity": "sha1-63d99gEXI6OxTopywIBcjoZ0a9I=",
+      "dev": true,
+      "requires": {
+        "mime-types": "~2.1.18",
+        "negotiator": "0.6.1"
+      }
+    },
+    "ansi-escape-sequences": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/ansi-escape-sequences/-/ansi-escape-sequences-4.0.0.tgz",
+      "integrity": "sha512-v+0wW9Wezwsyb0uF4aBVCjmSqit3Ru7PZFziGF0o2KwTvN2zWfTi3BRLq9EkJFdg3eBbyERXGTntVpBxH1J68Q==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0"
+      }
+    },
+    "ansi-regex": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz",
+      "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=",
+      "dev": true
+    },
+    "ansi-styles": {
+      "version": "3.2.1",
+      "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+      "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+      "dev": true,
+      "requires": {
+        "color-convert": "^1.9.0"
+      }
+    },
+    "any-promise": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz",
+      "integrity": "sha1-q8av7tzqUugJzcA3au0845Y10X8=",
+      "dev": true
+    },
+    "argparse": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+      "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+      "dev": true,
+      "requires": {
+        "sprintf-js": "~1.0.2"
+      }
+    },
+    "argv-tools": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/argv-tools/-/argv-tools-0.1.1.tgz",
+      "integrity": "sha512-Cc0dBvx4dvrjjKpyDA6w8RlNAw8Su30NvZbWl/Tv9ZALEVlLVkWQiHMi84Q0xNfpVuSaiQbYkdmWK8g1PLGhKw==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0",
+        "find-replace": "^2.0.1"
+      }
+    },
+    "arr-diff": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/arr-diff/-/arr-diff-4.0.0.tgz",
+      "integrity": "sha1-1kYQdP6/7HHn4VI1dhoyml3HxSA="
+    },
+    "arr-flatten": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/arr-flatten/-/arr-flatten-1.1.0.tgz",
+      "integrity": "sha512-L3hKV5R/p5o81R7O02IGnwpDmkp6E982XhtbuwSe3O4qOtMMMtodicASA1Cny2U+aCXcNpml+m4dPsvsJ3jatg=="
+    },
+    "arr-union": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/arr-union/-/arr-union-3.1.0.tgz",
+      "integrity": "sha1-45sJrqne+Gao8gbiiK9jkZuuOcQ="
+    },
+    "array-back": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/array-back/-/array-back-2.0.0.tgz",
+      "integrity": "sha512-eJv4pLLufP3g5kcZry0j6WXpIbzYw9GUB4mVJZno9wfwiBxbizTnHCw3VJb07cBihbFX48Y7oSrW9y+gt4glyw==",
+      "dev": true,
+      "requires": {
+        "typical": "^2.6.1"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "array-unique": {
+      "version": "0.3.2",
+      "resolved": "https://registry.npmjs.org/array-unique/-/array-unique-0.3.2.tgz",
+      "integrity": "sha1-qJS3XUvE9s1nnvMkSp/Y9Gri1Cg="
+    },
+    "arrify": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/arrify/-/arrify-1.0.1.tgz",
+      "integrity": "sha1-iYUI2iIm84DfkEcoRWhJwVAaSw0=",
+      "dev": true
+    },
+    "assertion-error": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-1.1.0.tgz",
+      "integrity": "sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==",
+      "dev": true
+    },
+    "assign-symbols": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/assign-symbols/-/assign-symbols-1.0.0.tgz",
+      "integrity": "sha1-WWZ/QfrdTyDMvCu5a41Pf3jsA2c="
+    },
+    "async-limiter": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/async-limiter/-/async-limiter-1.0.0.tgz",
+      "integrity": "sha512-jp/uFnooOiO+L211eZOoSyzpOITMXx1rBITauYykG3BRYPu8h0UcxsPNB04RR5vo4Tyz3+ay17tR6JVf9qzYWg==",
+      "dev": true
+    },
+    "atob": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/atob/-/atob-2.1.2.tgz",
+      "integrity": "sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg=="
+    },
+    "babel-code-frame": {
+      "version": "6.26.0",
+      "resolved": "https://registry.npmjs.org/babel-code-frame/-/babel-code-frame-6.26.0.tgz",
+      "integrity": "sha1-Y/1D99weO7fONZR9uP42mj9Yx0s=",
+      "dev": true,
+      "requires": {
+        "chalk": "^1.1.3",
+        "esutils": "^2.0.2",
+        "js-tokens": "^3.0.2"
+      },
+      "dependencies": {
+        "ansi-styles": {
+          "version": "2.2.1",
+          "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz",
+          "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=",
+          "dev": true
+        },
+        "chalk": {
+          "version": "1.1.3",
+          "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz",
+          "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=",
+          "dev": true,
+          "requires": {
+            "ansi-styles": "^2.2.1",
+            "escape-string-regexp": "^1.0.2",
+            "has-ansi": "^2.0.0",
+            "strip-ansi": "^3.0.0",
+            "supports-color": "^2.0.0"
+          }
+        },
+        "supports-color": {
+          "version": "2.0.0",
+          "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz",
+          "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=",
+          "dev": true
+        }
+      }
+    },
+    "balanced-match": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz",
+      "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=",
+      "dev": true
+    },
+    "base": {
+      "version": "0.11.2",
+      "resolved": "https://registry.npmjs.org/base/-/base-0.11.2.tgz",
+      "integrity": "sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==",
+      "requires": {
+        "cache-base": "^1.0.1",
+        "class-utils": "^0.3.5",
+        "component-emitter": "^1.2.1",
+        "define-property": "^1.0.0",
+        "isobject": "^3.0.1",
+        "mixin-deep": "^1.2.0",
+        "pascalcase": "^0.1.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+          "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+          "requires": {
+            "is-descriptor": "^1.0.0"
+          }
+        },
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "requires": {
+            "is-accessor-descriptor": "^1.0.0",
+            "is-data-descriptor": "^1.0.0",
+            "kind-of": "^6.0.2"
+          }
+        }
+      }
+    },
+    "basic-auth": {
+      "version": "1.1.0",
+      "resolved": "http://registry.npmjs.org/basic-auth/-/basic-auth-1.1.0.tgz",
+      "integrity": "sha1-RSIe5Cn37h5QNb4/UVM/HN/SmIQ=",
+      "dev": true
+    },
+    "batch": {
+      "version": "0.6.1",
+      "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
+      "integrity": "sha1-3DQxT05nkxgJP8dgJyUl+UvyXBY=",
+      "dev": true
+    },
+    "brace-expansion": {
+      "version": "1.1.11",
+      "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+      "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+      "dev": true,
+      "requires": {
+        "balanced-match": "^1.0.0",
+        "concat-map": "0.0.1"
+      }
+    },
+    "braces": {
+      "version": "2.3.2",
+      "resolved": "https://registry.npmjs.org/braces/-/braces-2.3.2.tgz",
+      "integrity": "sha512-aNdbnj9P8PjdXU4ybaWLK2IF3jc/EoDYbC7AazW6to3TRsfXxscC9UXOB5iDiEQrkyIbWp2SLQda4+QAa7nc3w==",
+      "requires": {
+        "arr-flatten": "^1.1.0",
+        "array-unique": "^0.3.2",
+        "extend-shallow": "^2.0.1",
+        "fill-range": "^4.0.0",
+        "isobject": "^3.0.1",
+        "repeat-element": "^1.1.2",
+        "snapdragon": "^0.8.1",
+        "snapdragon-node": "^2.0.1",
+        "split-string": "^3.0.2",
+        "to-regex": "^3.0.1"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        }
+      }
+    },
+    "browser-stdout": {
+      "version": "1.3.1",
+      "resolved": "https://registry.npmjs.org/browser-stdout/-/browser-stdout-1.3.1.tgz",
+      "integrity": "sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw==",
+      "dev": true
+    },
+    "buffer-from": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz",
+      "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==",
+      "dev": true
+    },
+    "builtin-modules": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-3.0.0.tgz",
+      "integrity": "sha512-hMIeU4K2ilbXV6Uv93ZZ0Avg/M91RaKXucQ+4me2Do1txxBDyDZWCBa5bJSLqoNTRpXTLwEzIk1KmloenDDjhg=="
+    },
+    "byte-size": {
+      "version": "4.0.4",
+      "resolved": "https://registry.npmjs.org/byte-size/-/byte-size-4.0.4.tgz",
+      "integrity": "sha512-82RPeneC6nqCdSwCX2hZUz3JPOvN5at/nTEw/CMf05Smu3Hrpo9Psb7LjN+k+XndNArG1EY8L4+BM3aTM4BCvw==",
+      "dev": true
+    },
+    "bytes": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
+      "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=",
+      "dev": true
+    },
+    "cache-base": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/cache-base/-/cache-base-1.0.1.tgz",
+      "integrity": "sha512-AKcdTnFSWATd5/GCPRxr2ChwIJ85CeyrEyjRHlKxQ56d4XJMGym0uAiKn0xbLOGOl3+yRpOTi484dVCEc5AUzQ==",
+      "requires": {
+        "collection-visit": "^1.0.0",
+        "component-emitter": "^1.2.1",
+        "get-value": "^2.0.6",
+        "has-value": "^1.0.0",
+        "isobject": "^3.0.1",
+        "set-value": "^2.0.0",
+        "to-object-path": "^0.3.0",
+        "union-value": "^1.0.0",
+        "unset-value": "^1.0.0"
+      }
+    },
+    "cache-content-type": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/cache-content-type/-/cache-content-type-1.0.1.tgz",
+      "integrity": "sha512-IKufZ1o4Ut42YUrZSo8+qnMTrFuKkvyoLXUywKz9GJ5BrhOFGhLdkx9sG4KAnVvbY6kEcSFjLQul+DVmBm2bgA==",
+      "dev": true,
+      "requires": {
+        "mime-types": "^2.1.18",
+        "ylru": "^1.2.0"
+      }
+    },
+    "chai": {
+      "version": "4.2.0",
+      "resolved": "https://registry.npmjs.org/chai/-/chai-4.2.0.tgz",
+      "integrity": "sha512-XQU3bhBukrOsQCuwZndwGcCVQHyZi53fQ6Ys1Fym7E4olpIqqZZhhoFJoaKVvV17lWQoXYwgWN2nF5crA8J2jw==",
+      "dev": true,
+      "requires": {
+        "assertion-error": "^1.1.0",
+        "check-error": "^1.0.2",
+        "deep-eql": "^3.0.1",
+        "get-func-name": "^2.0.0",
+        "pathval": "^1.1.0",
+        "type-detect": "^4.0.5"
+      }
+    },
+    "chalk": {
+      "version": "2.4.1",
+      "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.1.tgz",
+      "integrity": "sha512-ObN6h1v2fTJSmUXoS3nMQ92LbDK9be4TV+6G+omQlGJFdcUX5heKi1LZ1YnRMIgwTLEj3E24bT6tYni50rlCfQ==",
+      "dev": true,
+      "requires": {
+        "ansi-styles": "^3.2.1",
+        "escape-string-regexp": "^1.0.5",
+        "supports-color": "^5.3.0"
+      }
+    },
+    "check-error": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.2.tgz",
+      "integrity": "sha1-V00xLt2Iu13YkS6Sht1sCu1KrII=",
+      "dev": true
+    },
+    "class-utils": {
+      "version": "0.3.6",
+      "resolved": "https://registry.npmjs.org/class-utils/-/class-utils-0.3.6.tgz",
+      "integrity": "sha512-qOhPa/Fj7s6TY8H8esGu5QNpMMQxz79h+urzrNYN6mn+9BnxlDGf5QZ+XeCDsxSjPqsSR56XOZOJmpeurnLMeg==",
+      "requires": {
+        "arr-union": "^3.1.0",
+        "define-property": "^0.2.5",
+        "isobject": "^3.0.0",
+        "static-extend": "^0.1.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "requires": {
+            "is-descriptor": "^0.1.0"
+          }
+        }
+      }
+    },
+    "cli-commands": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/cli-commands/-/cli-commands-0.4.0.tgz",
+      "integrity": "sha512-zAvJlR7roeMgpUIhMDYATYL90vz+9ffuyPr0+qq4LzcZ0Jq+gM+H1KdYKxerc6U2nhitiDEx79YiJlXdrooEOA==",
+      "dev": true,
+      "requires": {
+        "command-line-args": "^5.0.2",
+        "command-line-commands": "^2.0.1"
+      }
+    },
+    "co": {
+      "version": "4.6.0",
+      "resolved": "https://registry.npmjs.org/co/-/co-4.6.0.tgz",
+      "integrity": "sha1-bqa989hTrlTMuOR7+gvz+QMfsYQ=",
+      "dev": true
+    },
+    "co-body": {
+      "version": "6.0.0",
+      "resolved": "https://registry.npmjs.org/co-body/-/co-body-6.0.0.tgz",
+      "integrity": "sha512-9ZIcixguuuKIptnY8yemEOuhb71L/lLf+Rl5JfJEUiDNJk0e02MBt7BPxR2GEh5mw8dPthQYR4jPI/BnS1MQgw==",
+      "dev": true,
+      "requires": {
+        "inflation": "^2.0.0",
+        "qs": "^6.5.2",
+        "raw-body": "^2.3.3",
+        "type-is": "^1.6.16"
+      }
+    },
+    "collection-visit": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/collection-visit/-/collection-visit-1.0.0.tgz",
+      "integrity": "sha1-S8A3PBZLwykbTTaMgpzxqApZ3KA=",
+      "requires": {
+        "map-visit": "^1.0.0",
+        "object-visit": "^1.0.0"
+      }
+    },
+    "color-convert": {
+      "version": "1.9.3",
+      "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+      "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+      "dev": true,
+      "requires": {
+        "color-name": "1.1.3"
+      }
+    },
+    "color-name": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+      "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=",
+      "dev": true
+    },
+    "command-line-args": {
+      "version": "5.0.2",
+      "resolved": "https://registry.npmjs.org/command-line-args/-/command-line-args-5.0.2.tgz",
+      "integrity": "sha512-/qPcbL8zpqg53x4rAaqMFlRV4opN3pbla7I7k9x8kyOBMQoGT6WltjN6sXZuxOXw6DgdK7Ad+ijYS5gjcr7vlA==",
+      "dev": true,
+      "requires": {
+        "argv-tools": "^0.1.1",
+        "array-back": "^2.0.0",
+        "find-replace": "^2.0.1",
+        "lodash.camelcase": "^4.3.0",
+        "typical": "^2.6.1"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "command-line-commands": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/command-line-commands/-/command-line-commands-2.0.1.tgz",
+      "integrity": "sha512-m8c2p1DrNd2ruIAggxd/y6DgygQayf6r8RHwchhXryaLF8I6koYjoYroVP+emeROE9DXN5b9sP1Gh+WtvTTdtQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0"
+      }
+    },
+    "command-line-usage": {
+      "version": "5.0.5",
+      "resolved": "https://registry.npmjs.org/command-line-usage/-/command-line-usage-5.0.5.tgz",
+      "integrity": "sha512-d8NrGylA5oCXSbGoKz05FkehDAzSmIm4K03S5VDh4d5lZAtTWfc3D1RuETtuQCn8129nYfJfDdF7P/lwcz1BlA==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0",
+        "chalk": "^2.4.1",
+        "table-layout": "^0.4.3",
+        "typical": "^2.6.1"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "commander": {
+      "version": "2.15.1",
+      "resolved": "https://registry.npmjs.org/commander/-/commander-2.15.1.tgz",
+      "integrity": "sha512-VlfT9F3V0v+jr4yxPc5gg9s62/fIVWsd2Bk2iD435um1NlGMYdVCq+MjcXnhYq2icNOizHr1kK+5TI6H0Hy0ag=="
+    },
+    "common-log-format": {
+      "version": "0.1.4",
+      "resolved": "https://registry.npmjs.org/common-log-format/-/common-log-format-0.1.4.tgz",
+      "integrity": "sha512-BXcgq+wzr2htmBmnT7cL7YHzPAWketWbr4kozjoM9kWe4sk3+zMgjcH0HO+EddjDlEw2LZysqLpVRwbF318tDw==",
+      "dev": true
+    },
+    "component-emitter": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/component-emitter/-/component-emitter-1.2.1.tgz",
+      "integrity": "sha1-E3kY1teCg/ffemt8WmPhQOaUJeY="
+    },
+    "compressible": {
+      "version": "2.0.15",
+      "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.15.tgz",
+      "integrity": "sha512-4aE67DL33dSW9gw4CI2H/yTxqHLNcxp0yS6jB+4h+wr3e43+1z7vm0HU9qXOH8j+qjKuL8+UtkOxYQSMq60Ylw==",
+      "dev": true,
+      "requires": {
+        "mime-db": ">= 1.36.0 < 2"
+      }
+    },
+    "concat-map": {
+      "version": "0.0.1",
+      "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+      "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=",
+      "dev": true
+    },
+    "content-disposition": {
+      "version": "0.5.2",
+      "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
+      "integrity": "sha1-DPaLud318r55YcOoUXjLhdunjLQ=",
+      "dev": true
+    },
+    "content-type": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.4.tgz",
+      "integrity": "sha512-hIP3EEPs8tB9AT1L+NUqtwOAps4mk2Zob89MWXMHjHWg9milF/j4osnnQLXBCBFBk/tvIG/tUc9mOUJiPBhPXA==",
+      "dev": true
+    },
+    "cookies": {
+      "version": "0.7.2",
+      "resolved": "https://registry.npmjs.org/cookies/-/cookies-0.7.2.tgz",
+      "integrity": "sha512-J2JjH9T3PUNKPHknprxgCrCaZshIfxW2j49gq1E1CP5Micj1LppWAR2y9EHSQAzEiX84zOsScWNwUZ0b/ChlMw==",
+      "dev": true,
+      "requires": {
+        "depd": "~1.1.2",
+        "keygrip": "~1.0.2"
+      }
+    },
+    "copy-descriptor": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/copy-descriptor/-/copy-descriptor-0.1.1.tgz",
+      "integrity": "sha1-Z29us8OZl8LuGsOpJP1hJHSPV40="
+    },
+    "copy-to": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/copy-to/-/copy-to-2.0.1.tgz",
+      "integrity": "sha1-JoD7uAaKSNCGVrYJgJK9r8kG9KU=",
+      "dev": true
+    },
+    "core-util-is": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
+      "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=",
+      "dev": true
+    },
+    "d3": {
+      "version": "5.7.0",
+      "resolved": "https://registry.npmjs.org/d3/-/d3-5.7.0.tgz",
+      "integrity": "sha512-8KEIfx+dFm8PlbJN9PI0suazrZ41QcaAufsKE9PRcqYPWLngHIyWJZX96n6IQKePGgeSu0l7rtlueSSNq8Zc3g==",
+      "requires": {
+        "d3-array": "1",
+        "d3-axis": "1",
+        "d3-brush": "1",
+        "d3-chord": "1",
+        "d3-collection": "1",
+        "d3-color": "1",
+        "d3-contour": "1",
+        "d3-dispatch": "1",
+        "d3-drag": "1",
+        "d3-dsv": "1",
+        "d3-ease": "1",
+        "d3-fetch": "1",
+        "d3-force": "1",
+        "d3-format": "1",
+        "d3-geo": "1",
+        "d3-hierarchy": "1",
+        "d3-interpolate": "1",
+        "d3-path": "1",
+        "d3-polygon": "1",
+        "d3-quadtree": "1",
+        "d3-random": "1",
+        "d3-scale": "2",
+        "d3-scale-chromatic": "1",
+        "d3-selection": "1",
+        "d3-shape": "1",
+        "d3-time": "1",
+        "d3-time-format": "2",
+        "d3-timer": "1",
+        "d3-transition": "1",
+        "d3-voronoi": "1",
+        "d3-zoom": "1"
+      }
+    },
+    "d3-array": {
+      "version": "1.2.4",
+      "resolved": "https://registry.npmjs.org/d3-array/-/d3-array-1.2.4.tgz",
+      "integrity": "sha512-KHW6M86R+FUPYGb3R5XiYjXPq7VzwxZ22buHhAEVG5ztoEcZZMLov530mmccaqA1GghZArjQV46fuc8kUqhhHw=="
+    },
+    "d3-axis": {
+      "version": "1.0.12",
+      "resolved": "https://registry.npmjs.org/d3-axis/-/d3-axis-1.0.12.tgz",
+      "integrity": "sha512-ejINPfPSNdGFKEOAtnBtdkpr24c4d4jsei6Lg98mxf424ivoDP2956/5HDpIAtmHo85lqT4pruy+zEgvRUBqaQ=="
+    },
+    "d3-brush": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/d3-brush/-/d3-brush-1.0.6.tgz",
+      "integrity": "sha512-lGSiF5SoSqO5/mYGD5FAeGKKS62JdA1EV7HPrU2b5rTX4qEJJtpjaGLJngjnkewQy7UnGstnFd3168wpf5z76w==",
+      "requires": {
+        "d3-dispatch": "1",
+        "d3-drag": "1",
+        "d3-interpolate": "1",
+        "d3-selection": "1",
+        "d3-transition": "1"
+      }
+    },
+    "d3-chord": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/d3-chord/-/d3-chord-1.0.6.tgz",
+      "integrity": "sha512-JXA2Dro1Fxw9rJe33Uv+Ckr5IrAa74TlfDEhE/jfLOaXegMQFQTAgAw9WnZL8+HxVBRXaRGCkrNU7pJeylRIuA==",
+      "requires": {
+        "d3-array": "1",
+        "d3-path": "1"
+      }
+    },
+    "d3-collection": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/d3-collection/-/d3-collection-1.0.7.tgz",
+      "integrity": "sha512-ii0/r5f4sjKNTfh84Di+DpztYwqKhEyUlKoPrzUFfeSkWxjW49xU2QzO9qrPrNkpdI0XJkfzvmTu8V2Zylln6A=="
+    },
+    "d3-color": {
+      "version": "1.2.3",
+      "resolved": "https://registry.npmjs.org/d3-color/-/d3-color-1.2.3.tgz",
+      "integrity": "sha512-x37qq3ChOTLd26hnps36lexMRhNXEtVxZ4B25rL0DVdDsGQIJGB18S7y9XDwlDD6MD/ZBzITCf4JjGMM10TZkw=="
+    },
+    "d3-contour": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/d3-contour/-/d3-contour-1.3.2.tgz",
+      "integrity": "sha512-hoPp4K/rJCu0ladiH6zmJUEz6+u3lgR+GSm/QdM2BBvDraU39Vr7YdDCicJcxP1z8i9B/2dJLgDC1NcvlF8WCg==",
+      "requires": {
+        "d3-array": "^1.1.1"
+      }
+    },
+    "d3-dispatch": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/d3-dispatch/-/d3-dispatch-1.0.5.tgz",
+      "integrity": "sha512-vwKx+lAqB1UuCeklr6Jh1bvC4SZgbSqbkGBLClItFBIYH4vqDJCA7qfoy14lXmJdnBOdxndAMxjCbImJYW7e6g=="
+    },
+    "d3-drag": {
+      "version": "1.2.3",
+      "resolved": "https://registry.npmjs.org/d3-drag/-/d3-drag-1.2.3.tgz",
+      "integrity": "sha512-8S3HWCAg+ilzjJsNtWW1Mutl74Nmzhb9yU6igspilaJzeZVFktmY6oO9xOh5TDk+BM2KrNFjttZNoJJmDnkjkg==",
+      "requires": {
+        "d3-dispatch": "1",
+        "d3-selection": "1"
+      }
+    },
+    "d3-dsv": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/d3-dsv/-/d3-dsv-1.0.10.tgz",
+      "integrity": "sha512-vqklfpxmtO2ZER3fq/B33R/BIz3A1PV0FaZRuFM8w6jLo7sUX1BZDh73fPlr0s327rzq4H6EN1q9U+eCBCSN8g==",
+      "requires": {
+        "commander": "2",
+        "iconv-lite": "0.4",
+        "rw": "1"
+      }
+    },
+    "d3-ease": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/d3-ease/-/d3-ease-1.0.5.tgz",
+      "integrity": "sha512-Ct1O//ly5y5lFM9YTdu+ygq7LleSgSE4oj7vUt9tPLHUi8VCV7QoizGpdWRWAwCO9LdYzIrQDg97+hGVdsSGPQ=="
+    },
+    "d3-fetch": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/d3-fetch/-/d3-fetch-1.1.2.tgz",
+      "integrity": "sha512-S2loaQCV/ZeyTyIF2oP8D1K9Z4QizUzW7cWeAOAS4U88qOt3Ucf6GsmgthuYSdyB2HyEm4CeGvkQxWsmInsIVA==",
+      "requires": {
+        "d3-dsv": "1"
+      }
+    },
+    "d3-force": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/d3-force/-/d3-force-1.1.2.tgz",
+      "integrity": "sha512-p1vcHAUF1qH7yR+e8ip7Bs61AHjLeKkIn8Z2gzwU2lwEf2wkSpWdjXG0axudTHsVFnYGlMkFaEsVy2l8tAg1Gw==",
+      "requires": {
+        "d3-collection": "1",
+        "d3-dispatch": "1",
+        "d3-quadtree": "1",
+        "d3-timer": "1"
+      }
+    },
+    "d3-format": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/d3-format/-/d3-format-1.3.2.tgz",
+      "integrity": "sha512-Z18Dprj96ExragQ0DeGi+SYPQ7pPfRMtUXtsg/ChVIKNBCzjO8XYJvRTC1usblx52lqge56V5ect+frYTQc8WQ=="
+    },
+    "d3-geo": {
+      "version": "1.11.3",
+      "resolved": "https://registry.npmjs.org/d3-geo/-/d3-geo-1.11.3.tgz",
+      "integrity": "sha512-n30yN9qSKREvV2fxcrhmHUdXP9TNH7ZZj3C/qnaoU0cVf/Ea85+yT7HY7i8ySPwkwjCNYtmKqQFTvLFngfkItQ==",
+      "requires": {
+        "d3-array": "1"
+      }
+    },
+    "d3-hierarchy": {
+      "version": "1.1.8",
+      "resolved": "https://registry.npmjs.org/d3-hierarchy/-/d3-hierarchy-1.1.8.tgz",
+      "integrity": "sha512-L+GHMSZNwTpiq4rt9GEsNcpLa4M96lXMR8M/nMG9p5hBE0jy6C+3hWtyZMenPQdwla249iJy7Nx0uKt3n+u9+w=="
+    },
+    "d3-interpolate": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/d3-interpolate/-/d3-interpolate-1.3.2.tgz",
+      "integrity": "sha512-NlNKGopqaz9qM1PXh9gBF1KSCVh+jSFErrSlD/4hybwoNX/gt1d8CDbDW+3i+5UOHhjC6s6nMvRxcuoMVNgL2w==",
+      "requires": {
+        "d3-color": "1"
+      }
+    },
+    "d3-path": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/d3-path/-/d3-path-1.0.7.tgz",
+      "integrity": "sha512-q0cW1RpvA5c5ma2rch62mX8AYaiLX0+bdaSM2wxSU9tXjU4DNvkx9qiUvjkuWCj3p22UO/hlPivujqMiR9PDzA=="
+    },
+    "d3-polygon": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/d3-polygon/-/d3-polygon-1.0.5.tgz",
+      "integrity": "sha512-RHhh1ZUJZfhgoqzWWuRhzQJvO7LavchhitSTHGu9oj6uuLFzYZVeBzaWTQ2qSO6bz2w55RMoOCf0MsLCDB6e0w=="
+    },
+    "d3-quadtree": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/d3-quadtree/-/d3-quadtree-1.0.5.tgz",
+      "integrity": "sha512-U2tjwDFbZ75JRAg8A+cqMvqPg1G3BE7UTJn3h8DHjY/pnsAfWdbJKgyfcy7zKjqGtLAmI0q8aDSeG1TVIKRaHQ=="
+    },
+    "d3-random": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/d3-random/-/d3-random-1.1.2.tgz",
+      "integrity": "sha512-6AK5BNpIFqP+cx/sreKzNjWbwZQCSUatxq+pPRmFIQaWuoD+NrbVWw7YWpHiXpCQ/NanKdtGDuB+VQcZDaEmYQ=="
+    },
+    "d3-scale": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/d3-scale/-/d3-scale-2.1.2.tgz",
+      "integrity": "sha512-bESpd64ylaKzCDzvULcmHKZTlzA/6DGSVwx7QSDj/EnX9cpSevsdiwdHFYI9ouo9tNBbV3v5xztHS2uFeOzh8Q==",
+      "requires": {
+        "d3-array": "^1.2.0",
+        "d3-collection": "1",
+        "d3-format": "1",
+        "d3-interpolate": "1",
+        "d3-time": "1",
+        "d3-time-format": "2"
+      }
+    },
+    "d3-scale-chromatic": {
+      "version": "1.3.3",
+      "resolved": "https://registry.npmjs.org/d3-scale-chromatic/-/d3-scale-chromatic-1.3.3.tgz",
+      "integrity": "sha512-BWTipif1CimXcYfT02LKjAyItX5gKiwxuPRgr4xM58JwlLocWbjPLI7aMEjkcoOQXMkYsmNsvv3d2yl/OKuHHw==",
+      "requires": {
+        "d3-color": "1",
+        "d3-interpolate": "1"
+      }
+    },
+    "d3-selection": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-1.3.2.tgz",
+      "integrity": "sha512-OoXdv1nZ7h2aKMVg3kaUFbLLK5jXUFAMLD/Tu5JA96mjf8f2a9ZUESGY+C36t8R1WFeWk/e55hy54Ml2I62CRQ=="
+    },
+    "d3-shape": {
+      "version": "1.2.2",
+      "resolved": "https://registry.npmjs.org/d3-shape/-/d3-shape-1.2.2.tgz",
+      "integrity": "sha512-hUGEozlKecFZ2bOSNt7ENex+4Tk9uc/m0TtTEHBvitCBxUNjhzm5hS2GrrVRD/ae4IylSmxGeqX5tWC2rASMlQ==",
+      "requires": {
+        "d3-path": "1"
+      }
+    },
+    "d3-time": {
+      "version": "1.0.10",
+      "resolved": "https://registry.npmjs.org/d3-time/-/d3-time-1.0.10.tgz",
+      "integrity": "sha512-hF+NTLCaJHF/JqHN5hE8HVGAXPStEq6/omumPE/SxyHVrR7/qQxusFDo0t0c/44+sCGHthC7yNGFZIEgju0P8g=="
+    },
+    "d3-time-format": {
+      "version": "2.1.3",
+      "resolved": "https://registry.npmjs.org/d3-time-format/-/d3-time-format-2.1.3.tgz",
+      "integrity": "sha512-6k0a2rZryzGm5Ihx+aFMuO1GgelgIz+7HhB4PH4OEndD5q2zGn1mDfRdNrulspOfR6JXkb2sThhDK41CSK85QA==",
+      "requires": {
+        "d3-time": "1"
+      }
+    },
+    "d3-timer": {
+      "version": "1.0.9",
+      "resolved": "https://registry.npmjs.org/d3-timer/-/d3-timer-1.0.9.tgz",
+      "integrity": "sha512-rT34J5HnQUHhcLvhSB9GjCkN0Ddd5Y8nCwDBG2u6wQEeYxT/Lf51fTFFkldeib/sE/J0clIe0pnCfs6g/lRbyg=="
+    },
+    "d3-transition": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/d3-transition/-/d3-transition-1.1.3.tgz",
+      "integrity": "sha512-tEvo3qOXL6pZ1EzcXxFcPNxC/Ygivu5NoBY6mbzidATAeML86da+JfVIUzon3dNM6UX6zjDx+xbYDmMVtTSjuA==",
+      "requires": {
+        "d3-color": "1",
+        "d3-dispatch": "1",
+        "d3-ease": "1",
+        "d3-interpolate": "1",
+        "d3-selection": "^1.1.0",
+        "d3-timer": "1"
+      }
+    },
+    "d3-voronoi": {
+      "version": "1.1.4",
+      "resolved": "https://registry.npmjs.org/d3-voronoi/-/d3-voronoi-1.1.4.tgz",
+      "integrity": "sha512-dArJ32hchFsrQ8uMiTBLq256MpnZjeuBtdHpaDlYuQyjU0CVzCJl/BVW+SkszaAeH95D/8gxqAhgx0ouAWAfRg=="
+    },
+    "d3-zoom": {
+      "version": "1.7.3",
+      "resolved": "https://registry.npmjs.org/d3-zoom/-/d3-zoom-1.7.3.tgz",
+      "integrity": "sha512-xEBSwFx5Z9T3/VrwDkMt+mr0HCzv7XjpGURJ8lWmIC8wxe32L39eWHIasEe/e7Ox8MPU4p1hvH8PKN2olLzIBg==",
+      "requires": {
+        "d3-dispatch": "1",
+        "d3-drag": "1",
+        "d3-interpolate": "1",
+        "d3-selection": "1",
+        "d3-transition": "1"
+      }
+    },
+    "debug": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/debug/-/debug-3.1.0.tgz",
+      "integrity": "sha512-OX8XqP7/1a9cqkxYw2yXss15f26NKWBpDXQd0/uK/KPqdQhxbPa994hnzjcE2VqQpDslf55723cKPUOGSmMY3g==",
+      "dev": true,
+      "requires": {
+        "ms": "2.0.0"
+      }
+    },
+    "decode-uri-component": {
+      "version": "0.2.0",
+      "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.2.0.tgz",
+      "integrity": "sha1-6zkTMzRYd1y4TNGh+uBiEGu4dUU="
+    },
+    "deep-eql": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-3.0.1.tgz",
+      "integrity": "sha512-+QeIQyN5ZuO+3Uk5DYh6/1eKO0m0YmJFGNmFHGACpf1ClL1nmlV/p4gNgbl2pJGxgXb4faqo6UE+M5ACEMyVcw==",
+      "dev": true,
+      "requires": {
+        "type-detect": "^4.0.0"
+      }
+    },
+    "deep-equal": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/deep-equal/-/deep-equal-1.0.1.tgz",
+      "integrity": "sha1-9dJgKStmDghO/0zbyfCK0yR0SLU=",
+      "dev": true
+    },
+    "deep-extend": {
+      "version": "0.6.0",
+      "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
+      "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
+      "dev": true
+    },
+    "deepmerge": {
+      "version": "2.2.1",
+      "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-2.2.1.tgz",
+      "integrity": "sha512-R9hc1Xa/NOBi9WRVUWg19rl1UB7Tt4kuPd+thNJgFZoxXsTz7ncaPaeIm+40oSGuP33DfMb4sZt1QIGiJzC4EA==",
+      "dev": true,
+      "optional": true
+    },
+    "defer-promise": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/defer-promise/-/defer-promise-1.0.1.tgz",
+      "integrity": "sha1-HKb/7dvO8XFd16riXHYW+a4iky8=",
+      "dev": true
+    },
+    "define-property": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/define-property/-/define-property-2.0.2.tgz",
+      "integrity": "sha512-jwK2UV4cnPpbcG7+VRARKTZPUWowwXA8bzH5NP6ud0oeAxyYPuGZUAC7hMugpCdz4BeSZl2Dl9k66CHJ/46ZYQ==",
+      "requires": {
+        "is-descriptor": "^1.0.2",
+        "isobject": "^3.0.1"
+      },
+      "dependencies": {
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "requires": {
+            "is-accessor-descriptor": "^1.0.0",
+            "is-data-descriptor": "^1.0.0",
+            "kind-of": "^6.0.2"
+          }
+        }
+      }
+    },
+    "delegates": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz",
+      "integrity": "sha1-hMbhWbgZBP3KWaDvRM2HDTElD5o=",
+      "dev": true
+    },
+    "depd": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+      "integrity": "sha1-m81S4UwJd2PnSbJ0xDRu0uVgtak=",
+      "dev": true
+    },
+    "destroy": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.0.4.tgz",
+      "integrity": "sha1-l4hXRCxEdJ5CBmE+N5RiBYJqvYA=",
+      "dev": true
+    },
+    "diff": {
+      "version": "3.5.0",
+      "resolved": "https://registry.npmjs.org/diff/-/diff-3.5.0.tgz",
+      "integrity": "sha512-A46qtFgd+g7pDZinpnwiRJtxbC1hpgf0uzP3iG89scHk0AUC7A1TGxf5OiiOUv/JMZR8GOt8hL900hV0bOy5xA==",
+      "dev": true
+    },
+    "ee-first": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+      "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=",
+      "dev": true
+    },
+    "error-inject": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/error-inject/-/error-inject-1.0.0.tgz",
+      "integrity": "sha1-4rPZG1Su1nLzCdlQ0VSFD6EdTzc=",
+      "dev": true
+    },
+    "escape-html": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+      "integrity": "sha1-Aljq5NPQwJdN4cFpGI7wBR0dGYg=",
+      "dev": true
+    },
+    "escape-string-regexp": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+      "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=",
+      "dev": true
+    },
+    "esprima": {
+      "version": "4.0.1",
+      "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+      "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+      "dev": true
+    },
+    "estree-walker": {
+      "version": "0.6.0",
+      "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-0.6.0.tgz",
+      "integrity": "sha512-peq1RfVAVzr3PU/jL31RaOjUKLoZJpObQWJJ+LgfcxDUifyLZ1RjPQZTl0pzj2uJ45b7A7XpyppXvxdEqzo4rw=="
+    },
+    "esutils": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.2.tgz",
+      "integrity": "sha1-Cr9PHKpbyx96nYrMbepPqqBLrJs=",
+      "dev": true
+    },
+    "etag": {
+      "version": "1.8.1",
+      "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+      "integrity": "sha1-Qa4u62XvpiJorr/qg6x9eSmbCIc=",
+      "dev": true
+    },
+    "expand-brackets": {
+      "version": "2.1.4",
+      "resolved": "https://registry.npmjs.org/expand-brackets/-/expand-brackets-2.1.4.tgz",
+      "integrity": "sha1-t3c14xXOMPa27/D4OwQVGiJEliI=",
+      "requires": {
+        "debug": "^2.3.3",
+        "define-property": "^0.2.5",
+        "extend-shallow": "^2.0.1",
+        "posix-character-classes": "^0.1.0",
+        "regex-not": "^1.0.0",
+        "snapdragon": "^0.8.1",
+        "to-regex": "^3.0.1"
+      },
+      "dependencies": {
+        "debug": {
+          "version": "2.6.9",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+          "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+          "requires": {
+            "ms": "2.0.0"
+          }
+        },
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "requires": {
+            "is-descriptor": "^0.1.0"
+          }
+        },
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        }
+      }
+    },
+    "extend-shallow": {
+      "version": "3.0.2",
+      "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-3.0.2.tgz",
+      "integrity": "sha1-Jqcarwc7OfshJxcnRhMcJwQCjbg=",
+      "requires": {
+        "assign-symbols": "^1.0.0",
+        "is-extendable": "^1.0.1"
+      },
+      "dependencies": {
+        "is-extendable": {
+          "version": "1.0.1",
+          "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+          "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+          "requires": {
+            "is-plain-object": "^2.0.4"
+          }
+        }
+      }
+    },
+    "extglob": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/extglob/-/extglob-2.0.4.tgz",
+      "integrity": "sha512-Nmb6QXkELsuBr24CJSkilo6UHHgbekK5UiZgfE6UHD3Eb27YC6oD+bhcT+tJ6cl8dmsgdQxnWlcry8ksBIBLpw==",
+      "requires": {
+        "array-unique": "^0.3.2",
+        "define-property": "^1.0.0",
+        "expand-brackets": "^2.1.4",
+        "extend-shallow": "^2.0.1",
+        "fragment-cache": "^0.2.1",
+        "regex-not": "^1.0.0",
+        "snapdragon": "^0.8.1",
+        "to-regex": "^3.0.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+          "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+          "requires": {
+            "is-descriptor": "^1.0.0"
+          }
+        },
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        },
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "requires": {
+            "is-accessor-descriptor": "^1.0.0",
+            "is-data-descriptor": "^1.0.0",
+            "kind-of": "^6.0.2"
+          }
+        }
+      }
+    },
+    "fill-range": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-4.0.0.tgz",
+      "integrity": "sha1-1USBHUKPmOsGpj3EAtJAPDKMOPc=",
+      "requires": {
+        "extend-shallow": "^2.0.1",
+        "is-number": "^3.0.0",
+        "repeat-string": "^1.6.1",
+        "to-regex-range": "^2.1.0"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        }
+      }
+    },
+    "find-replace": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/find-replace/-/find-replace-2.0.1.tgz",
+      "integrity": "sha512-LzDo3Fpa30FLIBsh6DCDnMN1KW2g4QKkqKmejlImgWY67dDFPX/x9Kh/op/GK522DchQXEvDi/wD48HKW49XOQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0",
+        "test-value": "^3.0.0"
+      }
+    },
+    "for-in": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/for-in/-/for-in-1.0.2.tgz",
+      "integrity": "sha1-gQaNKVqBQuwKxybG4iAMMPttXoA="
+    },
+    "fragment-cache": {
+      "version": "0.2.1",
+      "resolved": "https://registry.npmjs.org/fragment-cache/-/fragment-cache-0.2.1.tgz",
+      "integrity": "sha1-QpD60n8T6Jvn8zeZxrxaCr//DRk=",
+      "requires": {
+        "map-cache": "^0.2.2"
+      }
+    },
+    "fresh": {
+      "version": "0.5.2",
+      "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+      "integrity": "sha1-PYyt2Q2XZWn6g1qx+OSyOhBWBac=",
+      "dev": true
+    },
+    "fs-extra": {
+      "version": "7.0.1",
+      "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz",
+      "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==",
+      "requires": {
+        "graceful-fs": "^4.1.2",
+        "jsonfile": "^4.0.0",
+        "universalify": "^0.1.0"
+      }
+    },
+    "fs.realpath": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+      "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=",
+      "dev": true
+    },
+    "get-func-name": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/get-func-name/-/get-func-name-2.0.0.tgz",
+      "integrity": "sha1-6td0q+5y4gQJQzoGY2YCPdaIekE=",
+      "dev": true
+    },
+    "get-value": {
+      "version": "2.0.6",
+      "resolved": "https://registry.npmjs.org/get-value/-/get-value-2.0.6.tgz",
+      "integrity": "sha1-3BXKHGcjh8p2vTesCjlbogQqLCg="
+    },
+    "glob": {
+      "version": "7.1.2",
+      "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.2.tgz",
+      "integrity": "sha512-MJTUg1kjuLeQCJ+ccE4Vpa6kKVXkPYJ2mOCQyUuKLcLQsdrMCpBPUi8qVE6+YuaJkozeA9NusTAw3hLr8Xe5EQ==",
+      "dev": true,
+      "requires": {
+        "fs.realpath": "^1.0.0",
+        "inflight": "^1.0.4",
+        "inherits": "2",
+        "minimatch": "^3.0.4",
+        "once": "^1.3.0",
+        "path-is-absolute": "^1.0.0"
+      }
+    },
+    "graceful-fs": {
+      "version": "4.1.15",
+      "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.1.15.tgz",
+      "integrity": "sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA=="
+    },
+    "growl": {
+      "version": "1.10.5",
+      "resolved": "https://registry.npmjs.org/growl/-/growl-1.10.5.tgz",
+      "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==",
+      "dev": true
+    },
+    "has-ansi": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz",
+      "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=",
+      "dev": true,
+      "requires": {
+        "ansi-regex": "^2.0.0"
+      }
+    },
+    "has-flag": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+      "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=",
+      "dev": true
+    },
+    "has-value": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/has-value/-/has-value-1.0.0.tgz",
+      "integrity": "sha1-GLKB2lhbHFxR3vJMkw7SmgvmsXc=",
+      "requires": {
+        "get-value": "^2.0.6",
+        "has-values": "^1.0.0",
+        "isobject": "^3.0.0"
+      }
+    },
+    "has-values": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/has-values/-/has-values-1.0.0.tgz",
+      "integrity": "sha1-lbC2P+whRmGab+V/51Yo1aOe/k8=",
+      "requires": {
+        "is-number": "^3.0.0",
+        "kind-of": "^4.0.0"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "4.0.0",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-4.0.0.tgz",
+          "integrity": "sha1-IIE989cSkosgc3hpGkUGb65y3Vc=",
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "he": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/he/-/he-1.1.1.tgz",
+      "integrity": "sha1-k0EP0hsAlzUVH4howvJx80J+I/0=",
+      "dev": true
+    },
+    "http-assert": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/http-assert/-/http-assert-1.4.0.tgz",
+      "integrity": "sha512-tPVv62a6l3BbQoM/N5qo969l0OFxqpnQzNUPeYfTP6Spo4zkgWeDBD1D5thI7sDLg7jCCihXTLB0X8UtdyAy8A==",
+      "dev": true,
+      "requires": {
+        "deep-equal": "~1.0.1",
+        "http-errors": "~1.7.1"
+      }
+    },
+    "http-errors": {
+      "version": "1.7.1",
+      "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.7.1.tgz",
+      "integrity": "sha512-jWEUgtZWGSMba9I1N3gc1HmvpBUaNC9vDdA46yScAdp+C5rdEuKWUBLWTQpW9FwSWSbYYs++b6SDCxf9UEJzfw==",
+      "dev": true,
+      "requires": {
+        "depd": "~1.1.2",
+        "inherits": "2.0.3",
+        "setprototypeof": "1.1.0",
+        "statuses": ">= 1.5.0 < 2",
+        "toidentifier": "1.0.0"
+      }
+    },
+    "iconv-lite": {
+      "version": "0.4.23",
+      "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.23.tgz",
+      "integrity": "sha512-neyTUVFtahjf0mB3dZT77u+8O0QB89jFdnBkd5P1JgYPbPaia3gXXOVL2fq8VyU2gMMD7SaN7QukTB/pmXYvDA==",
+      "requires": {
+        "safer-buffer": ">= 2.1.2 < 3"
+      }
+    },
+    "inflation": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/inflation/-/inflation-2.0.0.tgz",
+      "integrity": "sha1-i0F+R8KPklpFEz2RTKH9OJEH8w8=",
+      "dev": true
+    },
+    "inflight": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+      "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=",
+      "dev": true,
+      "requires": {
+        "once": "^1.3.0",
+        "wrappy": "1"
+      }
+    },
+    "inherits": {
+      "version": "2.0.3",
+      "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+      "integrity": "sha1-Yzwsg+PaQqUC9SRmAiSA9CCCYd4=",
+      "dev": true
+    },
+    "is-accessor-descriptor": {
+      "version": "0.1.6",
+      "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-0.1.6.tgz",
+      "integrity": "sha1-qeEss66Nh2cn7u84Q/igiXtcmNY=",
+      "requires": {
+        "kind-of": "^3.0.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "is-buffer": {
+      "version": "1.1.6",
+      "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-1.1.6.tgz",
+      "integrity": "sha512-NcdALwpXkTm5Zvvbk7owOUSvVvBKDgKP5/ewfXEznmQFfs4ZRmanOeKBTjRVjka3QFoN6XJ+9F3USqfHqTaU5w=="
+    },
+    "is-data-descriptor": {
+      "version": "0.1.4",
+      "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-0.1.4.tgz",
+      "integrity": "sha1-C17mSDiOLIYCgueT8YVv7D8wG1Y=",
+      "requires": {
+        "kind-of": "^3.0.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "is-descriptor": {
+      "version": "0.1.6",
+      "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-0.1.6.tgz",
+      "integrity": "sha512-avDYr0SB3DwO9zsMov0gKCESFYqCnE4hq/4z3TdUlukEy5t9C0YRq7HLrsN52NAcqXKaepeCD0n+B0arnVG3Hg==",
+      "requires": {
+        "is-accessor-descriptor": "^0.1.6",
+        "is-data-descriptor": "^0.1.4",
+        "kind-of": "^5.0.0"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "5.1.0",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-5.1.0.tgz",
+          "integrity": "sha512-NGEErnH6F2vUuXDh+OlbcKW7/wOcfdRHaZ7VWtqCztfHri/++YKmP51OdWeGPuqCOba6kk2OTe5d02VmTB80Pw=="
+        }
+      }
+    },
+    "is-extendable": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+      "integrity": "sha1-YrEQ4omkcUGOPsNqYX1HLjAd/Ik="
+    },
+    "is-generator-function": {
+      "version": "1.0.7",
+      "resolved": "https://registry.npmjs.org/is-generator-function/-/is-generator-function-1.0.7.tgz",
+      "integrity": "sha512-YZc5EwyO4f2kWCax7oegfuSr9mFz1ZvieNYBEjmukLxgXfBUbxAWGVF7GZf0zidYtoBl3WvC07YK0wT76a+Rtw==",
+      "dev": true
+    },
+    "is-module": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/is-module/-/is-module-1.0.0.tgz",
+      "integrity": "sha1-Mlj7afeMFNW4FdZkM2tM/7ZEFZE="
+    },
+    "is-number": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/is-number/-/is-number-3.0.0.tgz",
+      "integrity": "sha1-JP1iAaR4LPUFYcgQJ2r8fRLXEZU=",
+      "requires": {
+        "kind-of": "^3.0.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "is-plain-object": {
+      "version": "2.0.4",
+      "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+      "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+      "requires": {
+        "isobject": "^3.0.1"
+      }
+    },
+    "is-windows": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+      "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA=="
+    },
+    "is-wsl": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-1.1.0.tgz",
+      "integrity": "sha1-HxbkqiKwTRM2tmGIpmrzxgDDpm0=",
+      "dev": true
+    },
+    "isarray": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+      "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE="
+    },
+    "isobject": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+      "integrity": "sha1-TkMekrEalzFjaqH5yNHMvP2reN8="
+    },
+    "js-tokens": {
+      "version": "3.0.2",
+      "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-3.0.2.tgz",
+      "integrity": "sha1-mGbfOVECEw449/mWvOtlRDIJwls=",
+      "dev": true
+    },
+    "js-yaml": {
+      "version": "3.12.1",
+      "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.12.1.tgz",
+      "integrity": "sha512-um46hB9wNOKlwkHgiuyEVAybXBjwFUV0Z/RaHJblRd9DXltue9FTYvzCr9ErQrK9Adz5MU4gHWVaNUfdmrC8qA==",
+      "dev": true,
+      "requires": {
+        "argparse": "^1.0.7",
+        "esprima": "^4.0.0"
+      }
+    },
+    "json-stringify-safe": {
+      "version": "5.0.1",
+      "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
+      "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=",
+      "dev": true
+    },
+    "json5": {
+      "version": "1.0.1",
+      "resolved": "http://registry.npmjs.org/json5/-/json5-1.0.1.tgz",
+      "integrity": "sha512-aKS4WQjPenRxiQsC93MNfjx+nbF4PAdYzmd/1JIj8HYzqfbu86beTuNgXDzPknWk0n0uARlyewZo4s++ES36Ow==",
+      "dev": true,
+      "optional": true,
+      "requires": {
+        "minimist": "^1.2.0"
+      }
+    },
+    "jsonfile": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
+      "integrity": "sha1-h3Gq4HmbZAdrdmQPygWPnBDjPss=",
+      "requires": {
+        "graceful-fs": "^4.1.6"
+      }
+    },
+    "jsonparse": {
+      "version": "1.3.1",
+      "resolved": "https://registry.npmjs.org/jsonparse/-/jsonparse-1.3.1.tgz",
+      "integrity": "sha1-P02uSpH6wxX3EGL4UhzCOfE2YoA=",
+      "dev": true
+    },
+    "keygrip": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/keygrip/-/keygrip-1.0.3.tgz",
+      "integrity": "sha512-/PpesirAIfaklxUzp4Yb7xBper9MwP6hNRA6BGGUFCgbJ+BM5CKBtsoxinNXkLHAr+GXS1/lSlF2rP7cv5Fl+g==",
+      "dev": true
+    },
+    "kind-of": {
+      "version": "6.0.2",
+      "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.2.tgz",
+      "integrity": "sha512-s5kLOcnH0XqDO+FvuaLX8DDjZ18CGFk7VygH40QoKPUQhW4e2rvM0rwUq0t8IQDOwYSeLK01U90OjzBTme2QqA=="
+    },
+    "koa": {
+      "version": "2.6.1",
+      "resolved": "https://registry.npmjs.org/koa/-/koa-2.6.1.tgz",
+      "integrity": "sha512-n9R5Eex4y0drUeqFTeCIeXyz8wjr2AxBo2Cq8LvmiXbJl4yDA5KIrecMPkhnmgACZnPXMRyCLbJoyLmpM9aFAw==",
+      "dev": true,
+      "requires": {
+        "accepts": "^1.3.5",
+        "cache-content-type": "^1.0.0",
+        "content-disposition": "~0.5.2",
+        "content-type": "^1.0.4",
+        "cookies": "~0.7.1",
+        "debug": "~3.1.0",
+        "delegates": "^1.0.0",
+        "depd": "^1.1.2",
+        "destroy": "^1.0.4",
+        "error-inject": "^1.0.0",
+        "escape-html": "^1.0.3",
+        "fresh": "~0.5.2",
+        "http-assert": "^1.3.0",
+        "http-errors": "^1.6.3",
+        "is-generator-function": "^1.0.7",
+        "koa-compose": "^4.1.0",
+        "koa-convert": "^1.2.0",
+        "koa-is-json": "^1.0.0",
+        "on-finished": "^2.3.0",
+        "only": "~0.0.2",
+        "parseurl": "^1.3.2",
+        "statuses": "^1.5.0",
+        "type-is": "^1.6.16",
+        "vary": "^1.1.2"
+      }
+    },
+    "koa-bodyparser": {
+      "version": "4.2.1",
+      "resolved": "https://registry.npmjs.org/koa-bodyparser/-/koa-bodyparser-4.2.1.tgz",
+      "integrity": "sha512-UIjPAlMZfNYDDe+4zBaOAUKYqkwAGcIU6r2ARf1UOXPAlfennQys5IiShaVeNf7KkVBlf88f2LeLvBFvKylttw==",
+      "dev": true,
+      "requires": {
+        "co-body": "^6.0.0",
+        "copy-to": "^2.0.1"
+      }
+    },
+    "koa-compose": {
+      "version": "4.1.0",
+      "resolved": "https://registry.npmjs.org/koa-compose/-/koa-compose-4.1.0.tgz",
+      "integrity": "sha512-8ODW8TrDuMYvXRwra/Kh7/rJo9BtOfPc6qO8eAfC80CnCvSjSl0bkRM24X6/XBBEyj0v1nRUQ1LyOy3dbqOWXw==",
+      "dev": true
+    },
+    "koa-compress": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/koa-compress/-/koa-compress-2.0.0.tgz",
+      "integrity": "sha1-e36ykhuEd0a14SK6n1zYpnHo6jo=",
+      "dev": true,
+      "requires": {
+        "bytes": "^2.3.0",
+        "compressible": "^2.0.0",
+        "koa-is-json": "^1.0.0",
+        "statuses": "^1.0.0"
+      },
+      "dependencies": {
+        "bytes": {
+          "version": "2.5.0",
+          "resolved": "https://registry.npmjs.org/bytes/-/bytes-2.5.0.tgz",
+          "integrity": "sha1-TJQj6i0lLCcMQbK97+/5u2tiwGo=",
+          "dev": true
+        }
+      }
+    },
+    "koa-conditional-get": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/koa-conditional-get/-/koa-conditional-get-2.0.0.tgz",
+      "integrity": "sha1-pD83I8HQFLcwo07Oit8wuTyCM/I=",
+      "dev": true
+    },
+    "koa-convert": {
+      "version": "1.2.0",
+      "resolved": "https://registry.npmjs.org/koa-convert/-/koa-convert-1.2.0.tgz",
+      "integrity": "sha1-2kCHXfSd4FOQmNFwC1CCDOvNIdA=",
+      "dev": true,
+      "requires": {
+        "co": "^4.6.0",
+        "koa-compose": "^3.0.0"
+      },
+      "dependencies": {
+        "koa-compose": {
+          "version": "3.2.1",
+          "resolved": "https://registry.npmjs.org/koa-compose/-/koa-compose-3.2.1.tgz",
+          "integrity": "sha1-qFzLQLfZhtjlo0Wzoazo6rz1Tec=",
+          "dev": true,
+          "requires": {
+            "any-promise": "^1.1.0"
+          }
+        }
+      }
+    },
+    "koa-etag": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/koa-etag/-/koa-etag-3.0.0.tgz",
+      "integrity": "sha1-nvc4Ld1agqsN6xU0FckVg293HT8=",
+      "dev": true,
+      "requires": {
+        "etag": "^1.3.0",
+        "mz": "^2.1.0"
+      }
+    },
+    "koa-is-json": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/koa-is-json/-/koa-is-json-1.0.0.tgz",
+      "integrity": "sha1-JzwH7c3Ljfaiwat9We52SRRR7BQ=",
+      "dev": true
+    },
+    "koa-json": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/koa-json/-/koa-json-2.0.2.tgz",
+      "integrity": "sha1-Nq8U5uofXWRtfESihXAcb4Wk/eQ=",
+      "dev": true,
+      "requires": {
+        "koa-is-json": "1",
+        "streaming-json-stringify": "3"
+      }
+    },
+    "koa-mock-response": {
+      "version": "0.2.0",
+      "resolved": "https://registry.npmjs.org/koa-mock-response/-/koa-mock-response-0.2.0.tgz",
+      "integrity": "sha512-HmybRN1a3WqcSFvf7tycu2YhBIEHeqzm8bwcsShNWGsTgP86coZOpdI8aqYm/1DFsAQMctnpdWrva4rDr1Pibg==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0",
+        "path-to-regexp": "^1.7.0",
+        "typical": "^2.6.1"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "0.0.1",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+          "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+          "dev": true
+        },
+        "path-to-regexp": {
+          "version": "1.7.0",
+          "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+          "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+          "dev": true,
+          "requires": {
+            "isarray": "0.0.1"
+          }
+        },
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "koa-morgan": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/koa-morgan/-/koa-morgan-1.0.1.tgz",
+      "integrity": "sha1-CAUuDODYOdPEMXi5CluzQkvvH5k=",
+      "dev": true,
+      "requires": {
+        "morgan": "^1.6.1"
+      }
+    },
+    "koa-range": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/koa-range/-/koa-range-0.3.0.tgz",
+      "integrity": "sha1-NYjjSWRzqDmhvSZNKkKx2FvX/qw=",
+      "dev": true,
+      "requires": {
+        "stream-slice": "^0.1.2"
+      }
+    },
+    "koa-rewrite-75lb": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/koa-rewrite-75lb/-/koa-rewrite-75lb-2.1.1.tgz",
+      "integrity": "sha512-i9ofDKLs0xNCb2PW7wKGFzBFX6+Ce3aKoZzNKPh0fkejeUOTWkkDqnjXrgqrJEP2ifX6WWsHp6VtGuXzSYLSWQ==",
+      "dev": true,
+      "requires": {
+        "path-to-regexp": "1.7.0"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "0.0.1",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+          "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+          "dev": true
+        },
+        "path-to-regexp": {
+          "version": "1.7.0",
+          "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+          "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+          "dev": true,
+          "requires": {
+            "isarray": "0.0.1"
+          }
+        }
+      }
+    },
+    "koa-route": {
+      "version": "3.2.0",
+      "resolved": "https://registry.npmjs.org/koa-route/-/koa-route-3.2.0.tgz",
+      "integrity": "sha1-dimLmaa8+p44yrb+XHmocz51i84=",
+      "dev": true,
+      "requires": {
+        "debug": "*",
+        "methods": "~1.1.0",
+        "path-to-regexp": "^1.2.0"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "0.0.1",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+          "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+          "dev": true
+        },
+        "path-to-regexp": {
+          "version": "1.7.0",
+          "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+          "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+          "dev": true,
+          "requires": {
+            "isarray": "0.0.1"
+          }
+        }
+      }
+    },
+    "koa-send": {
+      "version": "4.1.3",
+      "resolved": "http://registry.npmjs.org/koa-send/-/koa-send-4.1.3.tgz",
+      "integrity": "sha512-3UetMBdaXSiw24qM2Mx5mKmxLKw5ZTPRjACjfhK6Haca55RKm9hr/uHDrkrxhSl5/S1CKI/RivZVIopiatZuTA==",
+      "dev": true,
+      "requires": {
+        "debug": "^2.6.3",
+        "http-errors": "^1.6.1",
+        "mz": "^2.6.0",
+        "resolve-path": "^1.4.0"
+      },
+      "dependencies": {
+        "debug": {
+          "version": "2.6.9",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+          "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+          "dev": true,
+          "requires": {
+            "ms": "2.0.0"
+          }
+        }
+      }
+    },
+    "koa-static": {
+      "version": "4.0.3",
+      "resolved": "https://registry.npmjs.org/koa-static/-/koa-static-4.0.3.tgz",
+      "integrity": "sha512-JGmxTuPWy4bH7bt6gD/OMWkhprawvRmzJSr8TWKmTL4N7+IMv3s0SedeQi5S4ilxM9Bo6ptkCyXj/7wf+VS5tg==",
+      "dev": true,
+      "requires": {
+        "debug": "^3.1.0",
+        "koa-send": "^4.1.3"
+      }
+    },
+    "load-module": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/load-module/-/load-module-1.0.0.tgz",
+      "integrity": "sha512-FmoAJI/RM4vmvIRk65g/SFCnGQC9BbALY3zy38Z0cMllNnra1+iCdxAf051LVymzE60/FweOo9or9XJiGgFshg==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0"
+      }
+    },
+    "local-web-server": {
+      "version": "2.6.0",
+      "resolved": "https://registry.npmjs.org/local-web-server/-/local-web-server-2.6.0.tgz",
+      "integrity": "sha512-m7Z5zlzZFxMyiK1W8xR5TJMh00Fy9z7Po8vilSQCpeU4LG2VMK667xCkASBUepFR9fPj6heUMBHu9P/TrwDqFw==",
+      "dev": true,
+      "requires": {
+        "lws": "^1.3.0",
+        "lws-basic-auth": "^0.1.1",
+        "lws-blacklist": "^0.3.0",
+        "lws-body-parser": "^0.2.4",
+        "lws-compress": "^0.2.1",
+        "lws-conditional-get": "^0.3.4",
+        "lws-cors": "^1.0.0",
+        "lws-index": "^0.4.0",
+        "lws-json": "^0.3.2",
+        "lws-log": "^0.3.2",
+        "lws-mime": "^0.2.2",
+        "lws-mock-response": "^0.5.1",
+        "lws-range": "^1.1.0",
+        "lws-request-monitor": "^0.1.5",
+        "lws-rewrite": "^0.4.1",
+        "lws-spa": "^0.3.0",
+        "lws-static": "^0.5.0",
+        "node-version-matches": "^1.0.0"
+      }
+    },
+    "lodash.assignwith": {
+      "version": "4.2.0",
+      "resolved": "https://registry.npmjs.org/lodash.assignwith/-/lodash.assignwith-4.2.0.tgz",
+      "integrity": "sha1-EnqX8CrcQXUalU0ksN4X4QDgOOs=",
+      "dev": true
+    },
+    "lodash.camelcase": {
+      "version": "4.3.0",
+      "resolved": "https://registry.npmjs.org/lodash.camelcase/-/lodash.camelcase-4.3.0.tgz",
+      "integrity": "sha1-soqmKIorn8ZRA1x3EfZathkDMaY=",
+      "dev": true
+    },
+    "lodash.padend": {
+      "version": "4.6.1",
+      "resolved": "https://registry.npmjs.org/lodash.padend/-/lodash.padend-4.6.1.tgz",
+      "integrity": "sha1-U8y6BH0G4VjTEfRdpiX05J5vFm4=",
+      "dev": true
+    },
+    "lodash.pick": {
+      "version": "4.4.0",
+      "resolved": "https://registry.npmjs.org/lodash.pick/-/lodash.pick-4.4.0.tgz",
+      "integrity": "sha1-UvBWEP/53tQiYRRB7R/BI6AwAbM=",
+      "dev": true
+    },
+    "lodash.throttle": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/lodash.throttle/-/lodash.throttle-4.1.1.tgz",
+      "integrity": "sha1-wj6RtxAkKscMN/HhzaknTMOb8vQ=",
+      "dev": true
+    },
+    "lws": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/lws/-/lws-1.3.0.tgz",
+      "integrity": "sha512-2gOJzVtgjg4mA1cyWnzkICR/NLuMD24sbRSwQeVZeVkadp0VOKTlpmnjvA1tQpkb1TGrcOS+N+3vKMJST8tt2w==",
+      "dev": true,
+      "requires": {
+        "ansi-escape-sequences": "^4.0.0",
+        "array-back": "^2.0.0",
+        "byte-size": "^4.0.3",
+        "cli-commands": "^0.4.0",
+        "command-line-args": "^5.0.2",
+        "command-line-usage": "^5.0.5",
+        "koa": "^2.5.2",
+        "load-module": "^1.0.0",
+        "lodash.assignwith": "^4.2.0",
+        "node-version-matches": "^1.0.0",
+        "opn": "^5.3.0",
+        "reduce-flatten": "^2.0.0",
+        "typical": "^3.0.0",
+        "walk-back": "^3.0.0",
+        "ws": "^5.2.1"
+      }
+    },
+    "lws-basic-auth": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/lws-basic-auth/-/lws-basic-auth-0.1.1.tgz",
+      "integrity": "sha512-npPpqkOFzJzB9yJ2pGXmiYOswH+0n86ro75WhromeGuNo0GfE18ZLI/VCOVWmBbeXp2pcnPIMUAdkNSgukpAww==",
+      "dev": true,
+      "requires": {
+        "basic-auth": "^1.1.0"
+      }
+    },
+    "lws-blacklist": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/lws-blacklist/-/lws-blacklist-0.3.0.tgz",
+      "integrity": "sha512-ZA8dujYaZwRNMBhgP+oGsZi9tum44Ba6VHsA3JrV1JVrjZ8c65kLaO/41rLBqQDKP3SDPu7dLity4YLwe1FuNQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0",
+        "path-to-regexp": "^2.2.0"
+      }
+    },
+    "lws-body-parser": {
+      "version": "0.2.4",
+      "resolved": "https://registry.npmjs.org/lws-body-parser/-/lws-body-parser-0.2.4.tgz",
+      "integrity": "sha512-XKJzbzK97TUsewIPA5J2RpEk7kRoJcL+/Du6JlwzqIq84tWuXMfiT2a4Ncj12+tRWrdY2avV6d8uLhqlHLz1yg==",
+      "dev": true,
+      "requires": {
+        "koa-bodyparser": "^4.2.0"
+      }
+    },
+    "lws-compress": {
+      "version": "0.2.1",
+      "resolved": "https://registry.npmjs.org/lws-compress/-/lws-compress-0.2.1.tgz",
+      "integrity": "sha512-14++1o6U8upi3DLx9J2O2sFELsijEJF9utoFxSH4Stoo9SdU2Cxw6BtqQTrb9SEA6O6IsApzstdMYnq8floLSg==",
+      "dev": true,
+      "requires": {
+        "koa-compress": "^2.0.0"
+      }
+    },
+    "lws-conditional-get": {
+      "version": "0.3.4",
+      "resolved": "https://registry.npmjs.org/lws-conditional-get/-/lws-conditional-get-0.3.4.tgz",
+      "integrity": "sha512-6asZSfM747snhdz4xexRllm09pebz8pjYeg2d5khLR53D/OJznZWHsIqW0JGiScJObri2D7+H4z7yRLBjokT7g==",
+      "dev": true,
+      "requires": {
+        "koa-conditional-get": "^2.0.0",
+        "koa-etag": "^3.0.0"
+      }
+    },
+    "lws-cors": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/lws-cors/-/lws-cors-1.0.0.tgz",
+      "integrity": "sha512-4C0m4lvYdAnpAa03tr9AqziB4d8SRPh4beQBuzPiefv7N9/tpVdrl9kgXrUe1hLHhISnVJ5MoOZuZ6wFeMiU4g==",
+      "dev": true,
+      "requires": {
+        "@koa/cors": "^2.2.1"
+      }
+    },
+    "lws-index": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/lws-index/-/lws-index-0.4.0.tgz",
+      "integrity": "sha512-k+mkqgMSzx1ipzVpaxsAJU4Qe7R1kp1B/u+qC+d1Y3l+auBz+bLcIxL4dYKfaxLqiz0IFwg1dZwGzVm/dd7FFw==",
+      "dev": true,
+      "requires": {
+        "serve-index-75lb": "^2.0.0"
+      }
+    },
+    "lws-json": {
+      "version": "0.3.2",
+      "resolved": "https://registry.npmjs.org/lws-json/-/lws-json-0.3.2.tgz",
+      "integrity": "sha512-ElmCA8hi3GPMfxbtiI015PDHuJovhhcbXX/qTTTifXhopedAzIBzn/rF5dHZHE4k7HQDYfbiaPgPMbmpv9dMvQ==",
+      "dev": true,
+      "requires": {
+        "koa-json": "^2.0.2"
+      }
+    },
+    "lws-log": {
+      "version": "0.3.2",
+      "resolved": "https://registry.npmjs.org/lws-log/-/lws-log-0.3.2.tgz",
+      "integrity": "sha512-DRp4bFl4a7hjwR/RjARjhFLEXs8pIeqKbUvojaAl1hhfRBuW2JsDxRSKC+ViQN06CW4Qypg3ZsztMMR8dRO8dA==",
+      "dev": true,
+      "requires": {
+        "koa-morgan": "^1.0.1",
+        "stream-log-stats": "^2.0.2"
+      }
+    },
+    "lws-mime": {
+      "version": "0.2.2",
+      "resolved": "https://registry.npmjs.org/lws-mime/-/lws-mime-0.2.2.tgz",
+      "integrity": "sha512-cWBj9CuuSvvaqdYMPiXRid0QhzJmr+5gWAA96pEDOiW8tMCMoxl7CIgTpHXZwhJzCqdI84RZDVm+FswByATS5w==",
+      "dev": true
+    },
+    "lws-mock-response": {
+      "version": "0.5.1",
+      "resolved": "https://registry.npmjs.org/lws-mock-response/-/lws-mock-response-0.5.1.tgz",
+      "integrity": "sha512-4R5Q1RmRglC0pqEwywrS5g62aKaLQsteMnShGmWU9aQ/737Bq0/3qbQ3mb8VbMk3lLzo3ZaNZ1DUsPgVvZaXNQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0",
+        "koa-mock-response": "0.2.0",
+        "load-module": "^1.0.0",
+        "reduce-flatten": "^2.0.0"
+      }
+    },
+    "lws-range": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/lws-range/-/lws-range-1.1.0.tgz",
+      "integrity": "sha512-Mpx6FdO58Z4l6DAXlATsC2zm10QvyGYElQvFd7P1xqUSTPoYG0wAxfjlpqI+Qdb2O7W4Ah21yESVnPEwae3SIw==",
+      "dev": true,
+      "requires": {
+        "koa-range": "^0.3.0"
+      }
+    },
+    "lws-request-monitor": {
+      "version": "0.1.5",
+      "resolved": "https://registry.npmjs.org/lws-request-monitor/-/lws-request-monitor-0.1.5.tgz",
+      "integrity": "sha512-u9eczHPowH17ftUjQ8ysutGDADNZdDD6k8wgFMzOB7/rRq1Is12lTYA4u8pfKZ8C2oyoy+HYsDSrOzTwespTlA==",
+      "dev": true,
+      "requires": {
+        "byte-size": "^4.0.2"
+      }
+    },
+    "lws-rewrite": {
+      "version": "0.4.1",
+      "resolved": "https://registry.npmjs.org/lws-rewrite/-/lws-rewrite-0.4.1.tgz",
+      "integrity": "sha512-EHUdbqfdwc4Baa7iXOdG2y815WC040Cing1GwhM9VsBL7lHtZ7zl3EHzjWFv3styoO3qNqZ4W0xCey4hoo/aYg==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0",
+        "koa-rewrite-75lb": "^2.1.1",
+        "koa-route": "^3.2.0",
+        "path-to-regexp": "^1.7.0",
+        "req-then": "^0.6.4",
+        "stream-read-all": "^0.1.2",
+        "typical": "^2.6.1"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "0.0.1",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+          "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
+          "dev": true
+        },
+        "path-to-regexp": {
+          "version": "1.7.0",
+          "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.7.0.tgz",
+          "integrity": "sha1-Wf3g9DW62suhA6hOnTvGTpa5k30=",
+          "dev": true,
+          "requires": {
+            "isarray": "0.0.1"
+          }
+        },
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "lws-spa": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/lws-spa/-/lws-spa-0.3.0.tgz",
+      "integrity": "sha512-8wxZl5dOI/CQsJ6oOG8Y7B4khjlQXfB7GlVkjYFPuOYM+JIw/QzMvezKjKweG0qGePmHJVHWa38+CyololV4aw==",
+      "dev": true,
+      "requires": {
+        "koa-route": "^3.2.0",
+        "koa-send": "^4.1.3"
+      }
+    },
+    "lws-static": {
+      "version": "0.5.0",
+      "resolved": "https://registry.npmjs.org/lws-static/-/lws-static-0.5.0.tgz",
+      "integrity": "sha512-r3QIeJfBox/hSJLSL7TPhNSZsTKE0r4mWYHbGZ+DwrBcKbLt1ljsh5NAtmJpsqCcjYpyOuD/DlsZ0yQY9VI8bA==",
+      "dev": true,
+      "requires": {
+        "koa-static": "^4.0.2"
+      }
+    },
+    "make-error": {
+      "version": "1.3.5",
+      "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.5.tgz",
+      "integrity": "sha512-c3sIjNUow0+8swNwVpqoH4YCShKNFkMaw6oH1mNS2haDZQqkeZFlHS3dhoeEbKKmJB4vXpJucU6oH75aDYeE9g==",
+      "dev": true
+    },
+    "map-cache": {
+      "version": "0.2.2",
+      "resolved": "https://registry.npmjs.org/map-cache/-/map-cache-0.2.2.tgz",
+      "integrity": "sha1-wyq9C9ZSXZsFFkW7TyasXcmKDb8="
+    },
+    "map-visit": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/map-visit/-/map-visit-1.0.0.tgz",
+      "integrity": "sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=",
+      "requires": {
+        "object-visit": "^1.0.0"
+      }
+    },
+    "media-typer": {
+      "version": "0.3.0",
+      "resolved": "http://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+      "integrity": "sha1-hxDXrwqmJvj/+hzgAWhUUmMlV0g=",
+      "dev": true
+    },
+    "methods": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+      "integrity": "sha1-VSmk1nZUE07cxSZmVoNbD4Ua/O4=",
+      "dev": true
+    },
+    "micromatch": {
+      "version": "3.1.10",
+      "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-3.1.10.tgz",
+      "integrity": "sha512-MWikgl9n9M3w+bpsY3He8L+w9eF9338xRl8IAO5viDizwSzziFEyUzo2xrrloB64ADbTf8uA8vRqqttDTOmccg==",
+      "requires": {
+        "arr-diff": "^4.0.0",
+        "array-unique": "^0.3.2",
+        "braces": "^2.3.1",
+        "define-property": "^2.0.2",
+        "extend-shallow": "^3.0.2",
+        "extglob": "^2.0.4",
+        "fragment-cache": "^0.2.1",
+        "kind-of": "^6.0.2",
+        "nanomatch": "^1.2.9",
+        "object.pick": "^1.3.0",
+        "regex-not": "^1.0.0",
+        "snapdragon": "^0.8.1",
+        "to-regex": "^3.0.2"
+      }
+    },
+    "mime-db": {
+      "version": "1.37.0",
+      "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.37.0.tgz",
+      "integrity": "sha512-R3C4db6bgQhlIhPU48fUtdVmKnflq+hRdad7IyKhtFj06VPNVdk2RhiYL3UjQIlso8L+YxAtFkobT0VK+S/ybg==",
+      "dev": true
+    },
+    "mime-types": {
+      "version": "2.1.21",
+      "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.21.tgz",
+      "integrity": "sha512-3iL6DbwpyLzjR3xHSFNFeb9Nz/M8WDkX33t1GFQnFOllWk8pOrh/LSrB5OXlnlW5P9LH73X6loW/eogc+F5lJg==",
+      "dev": true,
+      "requires": {
+        "mime-db": "~1.37.0"
+      }
+    },
+    "minimatch": {
+      "version": "3.0.4",
+      "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz",
+      "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==",
+      "dev": true,
+      "requires": {
+        "brace-expansion": "^1.1.7"
+      }
+    },
+    "minimist": {
+      "version": "1.2.0",
+      "resolved": "http://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz",
+      "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=",
+      "dev": true
+    },
+    "mixin-deep": {
+      "version": "1.3.1",
+      "resolved": "https://registry.npmjs.org/mixin-deep/-/mixin-deep-1.3.1.tgz",
+      "integrity": "sha512-8ZItLHeEgaqEvd5lYBXfm4EZSFCX29Jb9K+lAHhDKzReKBQKj3R+7NOF6tjqYi9t4oI8VUfaWITJQm86wnXGNQ==",
+      "requires": {
+        "for-in": "^1.0.2",
+        "is-extendable": "^1.0.1"
+      },
+      "dependencies": {
+        "is-extendable": {
+          "version": "1.0.1",
+          "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-1.0.1.tgz",
+          "integrity": "sha512-arnXMxT1hhoKo9k1LZdmlNyJdDDfy2v0fXjFlmok4+i8ul/6WlbVge9bhM74OpNPQPMGUToDtz+KXa1PneJxOA==",
+          "requires": {
+            "is-plain-object": "^2.0.4"
+          }
+        }
+      }
+    },
+    "mkdirp": {
+      "version": "0.5.1",
+      "resolved": "http://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz",
+      "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=",
+      "dev": true,
+      "requires": {
+        "minimist": "0.0.8"
+      },
+      "dependencies": {
+        "minimist": {
+          "version": "0.0.8",
+          "resolved": "http://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz",
+          "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=",
+          "dev": true
+        }
+      }
+    },
+    "mocha": {
+      "version": "5.2.0",
+      "resolved": "https://registry.npmjs.org/mocha/-/mocha-5.2.0.tgz",
+      "integrity": "sha512-2IUgKDhc3J7Uug+FxMXuqIyYzH7gJjXECKe/w43IGgQHTSj3InJi+yAA7T24L9bQMRKiUEHxEX37G5JpVUGLcQ==",
+      "dev": true,
+      "requires": {
+        "browser-stdout": "1.3.1",
+        "commander": "2.15.1",
+        "debug": "3.1.0",
+        "diff": "3.5.0",
+        "escape-string-regexp": "1.0.5",
+        "glob": "7.1.2",
+        "growl": "1.10.5",
+        "he": "1.1.1",
+        "minimatch": "3.0.4",
+        "mkdirp": "0.5.1",
+        "supports-color": "5.4.0"
+      }
+    },
+    "morgan": {
+      "version": "1.9.1",
+      "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.9.1.tgz",
+      "integrity": "sha512-HQStPIV4y3afTiCYVxirakhlCfGkI161c76kKFca7Fk1JusM//Qeo1ej2XaMniiNeaZklMVrh3vTtIzpzwbpmA==",
+      "dev": true,
+      "requires": {
+        "basic-auth": "~2.0.0",
+        "debug": "2.6.9",
+        "depd": "~1.1.2",
+        "on-finished": "~2.3.0",
+        "on-headers": "~1.0.1"
+      },
+      "dependencies": {
+        "basic-auth": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz",
+          "integrity": "sha512-NF+epuEdnUYVlGuhaxbbq+dvJttwLnGY+YixlXlME5KpQ5W3CnXA5cVTneY3SPbPDRkcjMbifrwmFYcClgOZeg==",
+          "dev": true,
+          "requires": {
+            "safe-buffer": "5.1.2"
+          }
+        },
+        "debug": {
+          "version": "2.6.9",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+          "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+          "dev": true,
+          "requires": {
+            "ms": "2.0.0"
+          }
+        }
+      }
+    },
+    "ms": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+      "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g="
+    },
+    "mz": {
+      "version": "2.7.0",
+      "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz",
+      "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==",
+      "dev": true,
+      "requires": {
+        "any-promise": "^1.0.0",
+        "object-assign": "^4.0.1",
+        "thenify-all": "^1.0.0"
+      }
+    },
+    "nanomatch": {
+      "version": "1.2.13",
+      "resolved": "https://registry.npmjs.org/nanomatch/-/nanomatch-1.2.13.tgz",
+      "integrity": "sha512-fpoe2T0RbHwBTBUOftAfBPaDEi06ufaUai0mE6Yn1kacc3SnTErfb/h+X94VXzI64rKFHYImXSvdwGGCmwOqCA==",
+      "requires": {
+        "arr-diff": "^4.0.0",
+        "array-unique": "^0.3.2",
+        "define-property": "^2.0.2",
+        "extend-shallow": "^3.0.2",
+        "fragment-cache": "^0.2.1",
+        "is-windows": "^1.0.2",
+        "kind-of": "^6.0.2",
+        "object.pick": "^1.3.0",
+        "regex-not": "^1.0.0",
+        "snapdragon": "^0.8.1",
+        "to-regex": "^3.0.1"
+      }
+    },
+    "negotiator": {
+      "version": "0.6.1",
+      "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.1.tgz",
+      "integrity": "sha1-KzJxhOiZIQEXeyhWP7XnECrNDKk=",
+      "dev": true
+    },
+    "node-version-matches": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/node-version-matches/-/node-version-matches-1.0.0.tgz",
+      "integrity": "sha512-E1OQnAUB+BvEyNTXTWpUUMAWXYCa7yjiS64djOuTJEkm20yaQfNmWTfx/kvN6nC7fc0GQS182IaefOPxQvpxXg==",
+      "dev": true,
+      "requires": {
+        "semver": "^5.5.0"
+      }
+    },
+    "object-assign": {
+      "version": "4.1.1",
+      "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+      "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=",
+      "dev": true
+    },
+    "object-copy": {
+      "version": "0.1.0",
+      "resolved": "https://registry.npmjs.org/object-copy/-/object-copy-0.1.0.tgz",
+      "integrity": "sha1-fn2Fi3gb18mRpBupde04EnVOmYw=",
+      "requires": {
+        "copy-descriptor": "^0.1.0",
+        "define-property": "^0.2.5",
+        "kind-of": "^3.0.3"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "requires": {
+            "is-descriptor": "^0.1.0"
+          }
+        },
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "object-visit": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/object-visit/-/object-visit-1.0.1.tgz",
+      "integrity": "sha1-95xEk68MU3e1n+OdOV5BBC3QRbs=",
+      "requires": {
+        "isobject": "^3.0.0"
+      }
+    },
+    "object.pick": {
+      "version": "1.3.0",
+      "resolved": "https://registry.npmjs.org/object.pick/-/object.pick-1.3.0.tgz",
+      "integrity": "sha1-h6EKxMFpS9Lhy/U1kaZhQftd10c=",
+      "requires": {
+        "isobject": "^3.0.1"
+      }
+    },
+    "on-finished": {
+      "version": "2.3.0",
+      "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.3.0.tgz",
+      "integrity": "sha1-IPEzZIGwg811M3mSoWlxqi2QaUc=",
+      "dev": true,
+      "requires": {
+        "ee-first": "1.1.1"
+      }
+    },
+    "on-headers": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.1.tgz",
+      "integrity": "sha1-ko9dD0cNSTQmUepnlLCFfBAGk/c=",
+      "dev": true
+    },
+    "once": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+      "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=",
+      "dev": true,
+      "requires": {
+        "wrappy": "1"
+      }
+    },
+    "only": {
+      "version": "0.0.2",
+      "resolved": "https://registry.npmjs.org/only/-/only-0.0.2.tgz",
+      "integrity": "sha1-Kv3oTQPlC5qO3EROMGEKcCle37Q=",
+      "dev": true
+    },
+    "opn": {
+      "version": "5.4.0",
+      "resolved": "https://registry.npmjs.org/opn/-/opn-5.4.0.tgz",
+      "integrity": "sha512-YF9MNdVy/0qvJvDtunAOzFw9iasOQHpVthTCvGzxt61Il64AYSGdK+rYwld7NAfk9qJ7dt+hymBNSc9LNYS+Sw==",
+      "dev": true,
+      "requires": {
+        "is-wsl": "^1.1.0"
+      }
+    },
+    "parseurl": {
+      "version": "1.3.2",
+      "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.2.tgz",
+      "integrity": "sha1-/CidTtiZMRlGDBViUyYs3I3mW/M=",
+      "dev": true
+    },
+    "pascalcase": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/pascalcase/-/pascalcase-0.1.1.tgz",
+      "integrity": "sha1-s2PlXoAGym/iF4TS2yK9FdeRfxQ="
+    },
+    "path-is-absolute": {
+      "version": "1.0.1",
+      "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+      "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
+      "dev": true
+    },
+    "path-parse": {
+      "version": "1.0.5",
+      "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.5.tgz",
+      "integrity": "sha1-PBrfhx6pzWyUMbbqK9dKD/BVxME="
+    },
+    "path-to-regexp": {
+      "version": "2.4.0",
+      "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.4.0.tgz",
+      "integrity": "sha512-G6zHoVqC6GGTQkZwF4lkuEyMbVOjoBKAEybQUypI1WTkqinCOrq2x6U2+phkJ1XsEMTy4LjtwPI7HW+NVrRR2w==",
+      "dev": true
+    },
+    "pathval": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/pathval/-/pathval-1.1.0.tgz",
+      "integrity": "sha1-uULm1L3mUwBe9rcTYd74cn0GReA=",
+      "dev": true
+    },
+    "posix-character-classes": {
+      "version": "0.1.1",
+      "resolved": "https://registry.npmjs.org/posix-character-classes/-/posix-character-classes-0.1.1.tgz",
+      "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs="
+    },
+    "process-nextick-args": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.0.tgz",
+      "integrity": "sha512-MtEC1TqN0EU5nephaJ4rAtThHtC86dNN9qCuEhtshvpVBkAW5ZO7BASN9REnF9eoXGcRub+pFuKEpOHE+HbEMw==",
+      "dev": true
+    },
+    "qs": {
+      "version": "6.5.2",
+      "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
+      "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==",
+      "dev": true
+    },
+    "raw-body": {
+      "version": "2.3.3",
+      "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.3.3.tgz",
+      "integrity": "sha512-9esiElv1BrZoI3rCDuOuKCBRbuApGGaDPQfjSflGxdy4oyzqghxu6klEkkVIvBje+FF0BX9coEv8KqW6X/7njw==",
+      "dev": true,
+      "requires": {
+        "bytes": "3.0.0",
+        "http-errors": "1.6.3",
+        "iconv-lite": "0.4.23",
+        "unpipe": "1.0.0"
+      },
+      "dependencies": {
+        "http-errors": {
+          "version": "1.6.3",
+          "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+          "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+          "dev": true,
+          "requires": {
+            "depd": "~1.1.2",
+            "inherits": "2.0.3",
+            "setprototypeof": "1.1.0",
+            "statuses": ">= 1.4.0 < 2"
+          }
+        }
+      }
+    },
+    "readable-stream": {
+      "version": "2.3.6",
+      "resolved": "http://registry.npmjs.org/readable-stream/-/readable-stream-2.3.6.tgz",
+      "integrity": "sha512-tQtKA9WIAhBF3+VLAseyMqZeBjW0AHJoxOtYqSUZNJxauErmLbVm2FW1y+J/YA9dUrAC39ITejlZWhVIwawkKw==",
+      "dev": true,
+      "requires": {
+        "core-util-is": "~1.0.0",
+        "inherits": "~2.0.3",
+        "isarray": "~1.0.0",
+        "process-nextick-args": "~2.0.0",
+        "safe-buffer": "~5.1.1",
+        "string_decoder": "~1.1.1",
+        "util-deprecate": "~1.0.1"
+      }
+    },
+    "reduce-flatten": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/reduce-flatten/-/reduce-flatten-2.0.0.tgz",
+      "integrity": "sha512-EJ4UNY/U1t2P/2k6oqotuX2Cc3T6nxJwsM0N0asT7dhrtH1ltUxDn4NalSYmPE2rCkVpcf/X6R0wDwcFpzhd4w==",
+      "dev": true
+    },
+    "regex-not": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/regex-not/-/regex-not-1.0.2.tgz",
+      "integrity": "sha512-J6SDjUgDxQj5NusnOtdFxDwN/+HWykR8GELwctJ7mdqhcyy1xEc4SRFHUXvxTp661YaVKAjfRLZ9cCqS6tn32A==",
+      "requires": {
+        "extend-shallow": "^3.0.2",
+        "safe-regex": "^1.1.0"
+      }
+    },
+    "repeat-element": {
+      "version": "1.1.3",
+      "resolved": "https://registry.npmjs.org/repeat-element/-/repeat-element-1.1.3.tgz",
+      "integrity": "sha512-ahGq0ZnV5m5XtZLMb+vP76kcAM5nkLqk0lpqAuojSKGgQtn4eRi4ZZGm2olo2zKFH+sMsWaqOCW1dqAnOru72g=="
+    },
+    "repeat-string": {
+      "version": "1.6.1",
+      "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+      "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc="
+    },
+    "req-then": {
+      "version": "0.6.4",
+      "resolved": "https://registry.npmjs.org/req-then/-/req-then-0.6.4.tgz",
+      "integrity": "sha512-Uf7xsK1qPqPUetESHemNQ7nGtgOxngSFtlcAOOkx0lDAo+XRZpEA9QDrGBdyOfGq4b+a0z/D5gR2VJ+pp/dzBA==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0",
+        "defer-promise": "^1.0.1",
+        "lodash.pick": "^4.4.0",
+        "stream-read-all": "^0.1.0",
+        "typical": "^2.6.1"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "resolve": {
+      "version": "1.8.1",
+      "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.8.1.tgz",
+      "integrity": "sha512-AicPrAC7Qu1JxPCZ9ZgCZlY35QgFnNqc+0LtbRNxnVw4TXvjQ72wnuL9JQcEBgXkI9JM8MsT9kaQoHcpCRJOYA==",
+      "requires": {
+        "path-parse": "^1.0.5"
+      }
+    },
+    "resolve-path": {
+      "version": "1.4.0",
+      "resolved": "https://registry.npmjs.org/resolve-path/-/resolve-path-1.4.0.tgz",
+      "integrity": "sha1-xL2p9e+y/OZSR4c6s2u02DT+Fvc=",
+      "dev": true,
+      "requires": {
+        "http-errors": "~1.6.2",
+        "path-is-absolute": "1.0.1"
+      },
+      "dependencies": {
+        "http-errors": {
+          "version": "1.6.3",
+          "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+          "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+          "dev": true,
+          "requires": {
+            "depd": "~1.1.2",
+            "inherits": "2.0.3",
+            "setprototypeof": "1.1.0",
+            "statuses": ">= 1.4.0 < 2"
+          }
+        }
+      }
+    },
+    "resolve-url": {
+      "version": "0.2.1",
+      "resolved": "https://registry.npmjs.org/resolve-url/-/resolve-url-0.2.1.tgz",
+      "integrity": "sha1-LGN/53yJOv0qZj/iGqkIAGjiBSo="
+    },
+    "ret": {
+      "version": "0.1.15",
+      "resolved": "https://registry.npmjs.org/ret/-/ret-0.1.15.tgz",
+      "integrity": "sha512-TTlYpa+OL+vMMNG24xSlQGEJ3B/RzEfUlLct7b5G/ytav+wPrplCpVMFuwzXbkecJrb6IYo1iFb0S9v37754mg=="
+    },
+    "rollup": {
+      "version": "0.68.2",
+      "resolved": "https://registry.npmjs.org/rollup/-/rollup-0.68.2.tgz",
+      "integrity": "sha512-WgjNCXYv7ZbtStIap1+tz4pd2zwz0XYN//OILwEY6dINIFLVizK1iWdu+ZtUURL/OKnp8Lv2w8FBds8YihzX7Q==",
+      "requires": {
+        "@types/estree": "0.0.39",
+        "@types/node": "*"
+      }
+    },
+    "rollup-plugin-node-resolve": {
+      "version": "4.0.0",
+      "resolved": "https://registry.npmjs.org/rollup-plugin-node-resolve/-/rollup-plugin-node-resolve-4.0.0.tgz",
+      "integrity": "sha512-7Ni+/M5RPSUBfUaP9alwYQiIKnKeXCOHiqBpKUl9kwp3jX5ZJtgXAait1cne6pGEVUUztPD6skIKH9Kq9sNtfw==",
+      "requires": {
+        "builtin-modules": "^3.0.0",
+        "is-module": "^1.0.0",
+        "resolve": "^1.8.1"
+      }
+    },
+    "rollup-plugin-typescript2": {
+      "version": "0.20.1",
+      "resolved": "https://registry.npmjs.org/rollup-plugin-typescript2/-/rollup-plugin-typescript2-0.20.1.tgz",
+      "integrity": "sha512-uxA5JQNOfmJ9rsO0yJKTObb1t4nNYUexCg9zxhEKF+NzZwljYWdfgrA06UzA24cOk8fQjGEe7Q5+Vge2vFlnnw==",
+      "requires": {
+        "fs-extra": "7.0.1",
+        "resolve": "1.10.0",
+        "rollup-pluginutils": "2.4.1",
+        "tslib": "1.9.3"
+      },
+      "dependencies": {
+        "path-parse": {
+          "version": "1.0.6",
+          "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.6.tgz",
+          "integrity": "sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw=="
+        },
+        "resolve": {
+          "version": "1.10.0",
+          "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.10.0.tgz",
+          "integrity": "sha512-3sUr9aq5OfSg2S9pNtPA9hL1FVEAjvfOC4leW0SNf/mpnaakz2a9femSd6LqAww2RaFctwyf1lCqnTHuF1rxDg==",
+          "requires": {
+            "path-parse": "^1.0.6"
+          }
+        }
+      }
+    },
+    "rollup-pluginutils": {
+      "version": "2.4.1",
+      "resolved": "https://registry.npmjs.org/rollup-pluginutils/-/rollup-pluginutils-2.4.1.tgz",
+      "integrity": "sha512-wesMQ9/172IJDIW/lYWm0vW0LiKe5Ekjws481R7z9WTRtmO59cqyM/2uUlxvf6yzm/fElFmHUobeQOYz46dZJw==",
+      "requires": {
+        "estree-walker": "^0.6.0",
+        "micromatch": "^3.1.10"
+      }
+    },
+    "rw": {
+      "version": "1.3.3",
+      "resolved": "https://registry.npmjs.org/rw/-/rw-1.3.3.tgz",
+      "integrity": "sha1-P4Yt+pGrdmsUiF700BEkv9oHT7Q="
+    },
+    "safe-buffer": {
+      "version": "5.1.2",
+      "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+      "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
+      "dev": true
+    },
+    "safe-regex": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/safe-regex/-/safe-regex-1.1.0.tgz",
+      "integrity": "sha1-QKNmnzsHfR6UPURinhV91IAjvy4=",
+      "requires": {
+        "ret": "~0.1.10"
+      }
+    },
+    "safer-buffer": {
+      "version": "2.1.2",
+      "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+      "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+    },
+    "semver": {
+      "version": "5.5.0",
+      "resolved": "https://registry.npmjs.org/semver/-/semver-5.5.0.tgz",
+      "integrity": "sha512-4SJ3dm0WAwWy/NVeioZh5AntkdJoWKxHxcmyP622fOkgHa4z3R0TdBJICINyaSDE6uNwVc8gZr+ZinwZAH4xIA==",
+      "dev": true
+    },
+    "serve-index-75lb": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/serve-index-75lb/-/serve-index-75lb-2.0.1.tgz",
+      "integrity": "sha512-/d9r8bqJlFQcwy0a0nb1KnWAA+Mno+V+VaoKocdkbW5aXKRQd/+4bfnRhQRQr6uEoYwTRJ4xgztOyCJvWcpBpQ==",
+      "dev": true,
+      "requires": {
+        "accepts": "~1.3.4",
+        "batch": "0.6.1",
+        "debug": "2.6.9",
+        "escape-html": "~1.0.3",
+        "http-errors": "~1.6.2",
+        "mime-types": "~2.1.18",
+        "parseurl": "~1.3.2"
+      },
+      "dependencies": {
+        "debug": {
+          "version": "2.6.9",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+          "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+          "dev": true,
+          "requires": {
+            "ms": "2.0.0"
+          }
+        },
+        "http-errors": {
+          "version": "1.6.3",
+          "resolved": "http://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+          "integrity": "sha1-i1VoC7S+KDoLW/TqLjhYC+HZMg0=",
+          "dev": true,
+          "requires": {
+            "depd": "~1.1.2",
+            "inherits": "2.0.3",
+            "setprototypeof": "1.1.0",
+            "statuses": ">= 1.4.0 < 2"
+          }
+        }
+      }
+    },
+    "set-value": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/set-value/-/set-value-2.0.0.tgz",
+      "integrity": "sha512-hw0yxk9GT/Hr5yJEYnHNKYXkIA8mVJgd9ditYZCe16ZczcaELYYcfvaXesNACk2O8O0nTiPQcQhGUQj8JLzeeg==",
+      "requires": {
+        "extend-shallow": "^2.0.1",
+        "is-extendable": "^0.1.1",
+        "is-plain-object": "^2.0.3",
+        "split-string": "^3.0.1"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        }
+      }
+    },
+    "setprototypeof": {
+      "version": "1.1.0",
+      "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
+      "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ==",
+      "dev": true
+    },
+    "snapdragon": {
+      "version": "0.8.2",
+      "resolved": "https://registry.npmjs.org/snapdragon/-/snapdragon-0.8.2.tgz",
+      "integrity": "sha512-FtyOnWN/wCHTVXOMwvSv26d+ko5vWlIDD6zoUJ7LW8vh+ZBC8QdljveRP+crNrtBwioEUWy/4dMtbBjA4ioNlg==",
+      "requires": {
+        "base": "^0.11.1",
+        "debug": "^2.2.0",
+        "define-property": "^0.2.5",
+        "extend-shallow": "^2.0.1",
+        "map-cache": "^0.2.2",
+        "source-map": "^0.5.6",
+        "source-map-resolve": "^0.5.0",
+        "use": "^3.1.0"
+      },
+      "dependencies": {
+        "debug": {
+          "version": "2.6.9",
+          "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+          "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+          "requires": {
+            "ms": "2.0.0"
+          }
+        },
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "requires": {
+            "is-descriptor": "^0.1.0"
+          }
+        },
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        },
+        "source-map": {
+          "version": "0.5.7",
+          "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+          "integrity": "sha1-igOdLRAh0i0eoUyA2OpGi6LvP8w="
+        }
+      }
+    },
+    "snapdragon-node": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/snapdragon-node/-/snapdragon-node-2.1.1.tgz",
+      "integrity": "sha512-O27l4xaMYt/RSQ5TR3vpWCAB5Kb/czIcqUFOM/C4fYcLnbZUc1PkjTAMjof2pBWaSTwOUd6qUHcFGVGj7aIwnw==",
+      "requires": {
+        "define-property": "^1.0.0",
+        "isobject": "^3.0.0",
+        "snapdragon-util": "^3.0.1"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-1.0.0.tgz",
+          "integrity": "sha1-dp66rz9KY6rTr56NMEybvnm/sOY=",
+          "requires": {
+            "is-descriptor": "^1.0.0"
+          }
+        },
+        "is-accessor-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-accessor-descriptor/-/is-accessor-descriptor-1.0.0.tgz",
+          "integrity": "sha512-m5hnHTkcVsPfqx3AKlyttIPb7J+XykHvJP2B9bZDjlhLIoEq4XoK64Vg7boZlVWYK6LUY94dYPEE7Lh0ZkZKcQ==",
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-data-descriptor": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/is-data-descriptor/-/is-data-descriptor-1.0.0.tgz",
+          "integrity": "sha512-jbRXy1FmtAoCjQkVmIVYwuuqDFUbaOeDjmed1tOGPrsMhtJA4rD9tkgA0F1qJ3gRFRXcHYVkdeaP50Q5rE/jLQ==",
+          "requires": {
+            "kind-of": "^6.0.0"
+          }
+        },
+        "is-descriptor": {
+          "version": "1.0.2",
+          "resolved": "https://registry.npmjs.org/is-descriptor/-/is-descriptor-1.0.2.tgz",
+          "integrity": "sha512-2eis5WqQGV7peooDyLmNEPUrps9+SXX5c9pL3xEB+4e9HnGuDa7mB7kHxHw4CbqS9k1T2hOH3miL8n8WtiYVtg==",
+          "requires": {
+            "is-accessor-descriptor": "^1.0.0",
+            "is-data-descriptor": "^1.0.0",
+            "kind-of": "^6.0.2"
+          }
+        }
+      }
+    },
+    "snapdragon-util": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/snapdragon-util/-/snapdragon-util-3.0.1.tgz",
+      "integrity": "sha512-mbKkMdQKsjX4BAL4bRYTj21edOf8cN7XHdYUJEe+Zn99hVEYcMvKPct1IqNe7+AZPirn8BCDOQBHQZknqmKlZQ==",
+      "requires": {
+        "kind-of": "^3.2.0"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "source-map": {
+      "version": "0.6.1",
+      "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+      "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+      "dev": true
+    },
+    "source-map-resolve": {
+      "version": "0.5.2",
+      "resolved": "https://registry.npmjs.org/source-map-resolve/-/source-map-resolve-0.5.2.tgz",
+      "integrity": "sha512-MjqsvNwyz1s0k81Goz/9vRBe9SZdB09Bdw+/zYyO+3CuPk6fouTaxscHkgtE8jKvf01kVfl8riHzERQ/kefaSA==",
+      "requires": {
+        "atob": "^2.1.1",
+        "decode-uri-component": "^0.2.0",
+        "resolve-url": "^0.2.1",
+        "source-map-url": "^0.4.0",
+        "urix": "^0.1.0"
+      }
+    },
+    "source-map-support": {
+      "version": "0.5.9",
+      "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.9.tgz",
+      "integrity": "sha512-gR6Rw4MvUlYy83vP0vxoVNzM6t8MUXqNuRsuBmBHQDu1Fh6X015FrLdgoDKcNdkwGubozq0P4N0Q37UyFVr1EA==",
+      "dev": true,
+      "requires": {
+        "buffer-from": "^1.0.0",
+        "source-map": "^0.6.0"
+      }
+    },
+    "source-map-url": {
+      "version": "0.4.0",
+      "resolved": "https://registry.npmjs.org/source-map-url/-/source-map-url-0.4.0.tgz",
+      "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM="
+    },
+    "split-string": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/split-string/-/split-string-3.1.0.tgz",
+      "integrity": "sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==",
+      "requires": {
+        "extend-shallow": "^3.0.0"
+      }
+    },
+    "sprintf-js": {
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+      "integrity": "sha1-BOaSb2YolTVPPdAVIDYzuFcpfiw=",
+      "dev": true
+    },
+    "static-extend": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/static-extend/-/static-extend-0.1.2.tgz",
+      "integrity": "sha1-YICcOcv/VTNyJv1eC1IPNB8ftcY=",
+      "requires": {
+        "define-property": "^0.2.5",
+        "object-copy": "^0.1.0"
+      },
+      "dependencies": {
+        "define-property": {
+          "version": "0.2.5",
+          "resolved": "https://registry.npmjs.org/define-property/-/define-property-0.2.5.tgz",
+          "integrity": "sha1-w1se+RjsPJkPmlvFe+BKrOxcgRY=",
+          "requires": {
+            "is-descriptor": "^0.1.0"
+          }
+        }
+      }
+    },
+    "statuses": {
+      "version": "1.5.0",
+      "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+      "integrity": "sha1-Fhx9rBd2Wf2YEfQ3cfqZOBR4Yow=",
+      "dev": true
+    },
+    "stream-log-stats": {
+      "version": "2.0.2",
+      "resolved": "https://registry.npmjs.org/stream-log-stats/-/stream-log-stats-2.0.2.tgz",
+      "integrity": "sha512-b1LccxXhMlOQQrzSqapQHyZ3UI00QTAv+8VecFgsJz//sGB5LFl/+mkFeWBVVI2/E4DlCT4sGgvLExB/VTVFfA==",
+      "dev": true,
+      "requires": {
+        "JSONStream": "^1.3.1",
+        "ansi-escape-sequences": "^3.0.0",
+        "byte-size": "^3.0.0",
+        "common-log-format": "~0.1.3",
+        "lodash.throttle": "^4.1.1",
+        "stream-via": "^1.0.3",
+        "table-layout": "~0.4.0"
+      },
+      "dependencies": {
+        "ansi-escape-sequences": {
+          "version": "3.0.0",
+          "resolved": "https://registry.npmjs.org/ansi-escape-sequences/-/ansi-escape-sequences-3.0.0.tgz",
+          "integrity": "sha1-HBg5S2r5t2/5pjUJ+kl2af0s5T4=",
+          "dev": true,
+          "requires": {
+            "array-back": "^1.0.3"
+          }
+        },
+        "array-back": {
+          "version": "1.0.4",
+          "resolved": "https://registry.npmjs.org/array-back/-/array-back-1.0.4.tgz",
+          "integrity": "sha1-ZEun8JX3/898Q7Xw3DnTwfA8Bjs=",
+          "dev": true,
+          "requires": {
+            "typical": "^2.6.0"
+          }
+        },
+        "byte-size": {
+          "version": "3.0.0",
+          "resolved": "http://registry.npmjs.org/byte-size/-/byte-size-3.0.0.tgz",
+          "integrity": "sha1-QG+eI2aqXav2NnLrKR17sJSV2nU=",
+          "dev": true
+        },
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "stream-read-all": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/stream-read-all/-/stream-read-all-0.1.2.tgz",
+      "integrity": "sha512-KX42xBg853m+KnwRtwCKT95ShopAbY/MNKs2dBQ0WkNeuJdqgQYRtGRbTlxdx0L6t979h3z/wMq2eMSAu7Tygw==",
+      "dev": true
+    },
+    "stream-slice": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/stream-slice/-/stream-slice-0.1.2.tgz",
+      "integrity": "sha1-LcT04bk2+xPz6zmi3vGTJ5jQeks=",
+      "dev": true
+    },
+    "stream-via": {
+      "version": "1.0.4",
+      "resolved": "https://registry.npmjs.org/stream-via/-/stream-via-1.0.4.tgz",
+      "integrity": "sha512-DBp0lSvX5G9KGRDTkR/R+a29H+Wk2xItOF+MpZLLNDWbEV9tGPnqLPxHEYjmiz8xGtJHRIqmI+hCjmNzqoA4nQ==",
+      "dev": true
+    },
+    "streaming-json-stringify": {
+      "version": "3.1.0",
+      "resolved": "https://registry.npmjs.org/streaming-json-stringify/-/streaming-json-stringify-3.1.0.tgz",
+      "integrity": "sha1-gCAEN6mTzDnE/gAmO3s7kDrIevU=",
+      "dev": true,
+      "requires": {
+        "json-stringify-safe": "5",
+        "readable-stream": "2"
+      }
+    },
+    "string_decoder": {
+      "version": "1.1.1",
+      "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+      "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+      "dev": true,
+      "requires": {
+        "safe-buffer": "~5.1.0"
+      }
+    },
+    "strip-ansi": {
+      "version": "3.0.1",
+      "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz",
+      "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=",
+      "dev": true,
+      "requires": {
+        "ansi-regex": "^2.0.0"
+      }
+    },
+    "strip-bom": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/strip-bom/-/strip-bom-3.0.0.tgz",
+      "integrity": "sha1-IzTBjpx1n3vdVv3vfprj1YjmjtM=",
+      "dev": true,
+      "optional": true
+    },
+    "supports-color": {
+      "version": "5.4.0",
+      "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.4.0.tgz",
+      "integrity": "sha512-zjaXglF5nnWpsq470jSv6P9DwPvgLkuapYmfDm3JWOm0vkNTVF2tI4UrN2r6jH1qM/uc/WtxYY1hYoA2dOKj5w==",
+      "dev": true,
+      "requires": {
+        "has-flag": "^3.0.0"
+      }
+    },
+    "table-layout": {
+      "version": "0.4.4",
+      "resolved": "https://registry.npmjs.org/table-layout/-/table-layout-0.4.4.tgz",
+      "integrity": "sha512-uNaR3SRMJwfdp9OUr36eyEi6LLsbcTqTO/hfTsNviKsNeyMBPICJCC7QXRF3+07bAP6FRwA8rczJPBqXDc0CkQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0",
+        "deep-extend": "~0.6.0",
+        "lodash.padend": "^4.6.1",
+        "typical": "^2.6.1",
+        "wordwrapjs": "^3.0.0"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "test-value": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/test-value/-/test-value-3.0.0.tgz",
+      "integrity": "sha512-sVACdAWcZkSU9x7AOmJo5TqE+GyNJknHaHsMrR6ZnhjVlVN9Yx6FjHrsKZ3BjIpPCT68zYesPWkakrNupwfOTQ==",
+      "dev": true,
+      "requires": {
+        "array-back": "^2.0.0",
+        "typical": "^2.6.1"
+      },
+      "dependencies": {
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "thenify": {
+      "version": "3.3.0",
+      "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.0.tgz",
+      "integrity": "sha1-5p44obq+lpsBCCB5eLn2K4hgSDk=",
+      "dev": true,
+      "requires": {
+        "any-promise": "^1.0.0"
+      }
+    },
+    "thenify-all": {
+      "version": "1.6.0",
+      "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz",
+      "integrity": "sha1-GhkY1ALY/D+Y+/I02wvMjMEOlyY=",
+      "dev": true,
+      "requires": {
+        "thenify": ">= 3.1.0 < 4"
+      }
+    },
+    "through": {
+      "version": "2.3.8",
+      "resolved": "http://registry.npmjs.org/through/-/through-2.3.8.tgz",
+      "integrity": "sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU=",
+      "dev": true
+    },
+    "to-object-path": {
+      "version": "0.3.0",
+      "resolved": "https://registry.npmjs.org/to-object-path/-/to-object-path-0.3.0.tgz",
+      "integrity": "sha1-KXWIt7Dn4KwI4E5nL4XB9JmeF68=",
+      "requires": {
+        "kind-of": "^3.0.2"
+      },
+      "dependencies": {
+        "kind-of": {
+          "version": "3.2.2",
+          "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-3.2.2.tgz",
+          "integrity": "sha1-MeohpzS6ubuw8yRm2JOupR5KPGQ=",
+          "requires": {
+            "is-buffer": "^1.1.5"
+          }
+        }
+      }
+    },
+    "to-regex": {
+      "version": "3.0.2",
+      "resolved": "https://registry.npmjs.org/to-regex/-/to-regex-3.0.2.tgz",
+      "integrity": "sha512-FWtleNAtZ/Ki2qtqej2CXTOayOH9bHDQF+Q48VpWyDXjbYxA4Yz8iDB31zXOBUlOHHKidDbqGVrTUvQMPmBGBw==",
+      "requires": {
+        "define-property": "^2.0.2",
+        "extend-shallow": "^3.0.2",
+        "regex-not": "^1.0.2",
+        "safe-regex": "^1.1.0"
+      }
+    },
+    "to-regex-range": {
+      "version": "2.1.1",
+      "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-2.1.1.tgz",
+      "integrity": "sha1-fIDBe53+vlmeJzZ+DU3VWQFB2zg=",
+      "requires": {
+        "is-number": "^3.0.0",
+        "repeat-string": "^1.6.1"
+      }
+    },
+    "toidentifier": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.0.tgz",
+      "integrity": "sha512-yaOH/Pk/VEhBWWTlhI+qXxDFXlejDGcQipMlyxda9nthulaxLZUNcUqFxokp0vcYnvteJln5FNQDRrxj3YcbVw==",
+      "dev": true
+    },
+    "ts-mocha": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/ts-mocha/-/ts-mocha-2.0.0.tgz",
+      "integrity": "sha512-Rj6+vvwKtOTs5GsNO1jLl4DIXUGnyAg5HFt2Yb4SHIRN45clTJkHWpNdTxCSL0u+1oeavSYJah6d1PZ++Ju5pw==",
+      "dev": true,
+      "requires": {
+        "ts-node": "7.0.0",
+        "tsconfig-paths": "^3.5.0"
+      }
+    },
+    "ts-node": {
+      "version": "7.0.0",
+      "resolved": "https://registry.npmjs.org/ts-node/-/ts-node-7.0.0.tgz",
+      "integrity": "sha512-klJsfswHP0FuOLsvBZ/zzCfUvakOSSxds78mVeK7I+qP76YWtxf16hEZsp3U+b0kIo82R5UatGFeblYMqabb2Q==",
+      "dev": true,
+      "requires": {
+        "arrify": "^1.0.0",
+        "buffer-from": "^1.1.0",
+        "diff": "^3.1.0",
+        "make-error": "^1.1.1",
+        "minimist": "^1.2.0",
+        "mkdirp": "^0.5.1",
+        "source-map-support": "^0.5.6",
+        "yn": "^2.0.0"
+      }
+    },
+    "tsconfig-paths": {
+      "version": "3.6.0",
+      "resolved": "https://registry.npmjs.org/tsconfig-paths/-/tsconfig-paths-3.6.0.tgz",
+      "integrity": "sha512-mrqQIP2F4e03aMTCiPdedCIT300//+q0ET53o5WqqtQjmEICxP9yfz/sHTpPqXpssuJEzODsEzJaLRaf5J2X1g==",
+      "dev": true,
+      "optional": true,
+      "requires": {
+        "@types/json5": "^0.0.29",
+        "deepmerge": "^2.0.1",
+        "json5": "^1.0.1",
+        "minimist": "^1.2.0",
+        "strip-bom": "^3.0.0"
+      }
+    },
+    "tslib": {
+      "version": "1.9.3",
+      "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.9.3.tgz",
+      "integrity": "sha512-4krF8scpejhaOgqzBEcGM7yDIEfi0/8+8zDRZhNZZ2kjmHJ4hv3zCbQWxoJGz1iw5U0Jl0nma13xzHXcncMavQ=="
+    },
+    "tslint": {
+      "version": "5.12.0",
+      "resolved": "https://registry.npmjs.org/tslint/-/tslint-5.12.0.tgz",
+      "integrity": "sha512-CKEcH1MHUBhoV43SA/Jmy1l24HJJgI0eyLbBNSRyFlsQvb9v6Zdq+Nz2vEOH00nC5SUx4SneJ59PZUS/ARcokQ==",
+      "dev": true,
+      "requires": {
+        "babel-code-frame": "^6.22.0",
+        "builtin-modules": "^1.1.1",
+        "chalk": "^2.3.0",
+        "commander": "^2.12.1",
+        "diff": "^3.2.0",
+        "glob": "^7.1.1",
+        "js-yaml": "^3.7.0",
+        "minimatch": "^3.0.4",
+        "resolve": "^1.3.2",
+        "semver": "^5.3.0",
+        "tslib": "^1.8.0",
+        "tsutils": "^2.27.2"
+      },
+      "dependencies": {
+        "builtin-modules": {
+          "version": "1.1.1",
+          "resolved": "https://registry.npmjs.org/builtin-modules/-/builtin-modules-1.1.1.tgz",
+          "integrity": "sha1-Jw8HbFpywC9bZaR9+Uxf46J4iS8=",
+          "dev": true
+        }
+      }
+    },
+    "tsutils": {
+      "version": "2.29.0",
+      "resolved": "https://registry.npmjs.org/tsutils/-/tsutils-2.29.0.tgz",
+      "integrity": "sha512-g5JVHCIJwzfISaXpXE1qvNalca5Jwob6FjI4AoPlqMusJ6ftFE7IkkFoMhVLRgK+4Kx3gkzb8UZK5t5yTTvEmA==",
+      "dev": true,
+      "requires": {
+        "tslib": "^1.8.1"
+      }
+    },
+    "type-detect": {
+      "version": "4.0.8",
+      "resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
+      "integrity": "sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==",
+      "dev": true
+    },
+    "type-is": {
+      "version": "1.6.16",
+      "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.16.tgz",
+      "integrity": "sha512-HRkVv/5qY2G6I8iab9cI7v1bOIdhm94dVjQCPFElW9W+3GeDOSHmy2EBYe4VTApuzolPcmgFTN3ftVJRKR2J9Q==",
+      "dev": true,
+      "requires": {
+        "media-typer": "0.3.0",
+        "mime-types": "~2.1.18"
+      }
+    },
+    "typescript": {
+      "version": "3.2.2",
+      "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.2.2.tgz",
+      "integrity": "sha512-VCj5UiSyHBjwfYacmDuc/NOk4QQixbE+Wn7MFJuS0nRuPQbof132Pw4u53dm264O8LPc2MVsc7RJNml5szurkg==",
+      "dev": true
+    },
+    "typical": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/typical/-/typical-3.0.0.tgz",
+      "integrity": "sha512-2/pGDQD/q1iJWlrj357aEKGIlRvHirm81x04lsg51hreiohy2snAXoFc9dIHFWEx9LsfOVA5K7lUGM9rcUqwlQ==",
+      "dev": true
+    },
+    "union-value": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/union-value/-/union-value-1.0.0.tgz",
+      "integrity": "sha1-XHHDTLW61dzr4+oM0IIHulqhrqQ=",
+      "requires": {
+        "arr-union": "^3.1.0",
+        "get-value": "^2.0.6",
+        "is-extendable": "^0.1.1",
+        "set-value": "^0.4.3"
+      },
+      "dependencies": {
+        "extend-shallow": {
+          "version": "2.0.1",
+          "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+          "integrity": "sha1-Ua99YUrZqfYQ6huvu5idaxxWiQ8=",
+          "requires": {
+            "is-extendable": "^0.1.0"
+          }
+        },
+        "set-value": {
+          "version": "0.4.3",
+          "resolved": "https://registry.npmjs.org/set-value/-/set-value-0.4.3.tgz",
+          "integrity": "sha1-fbCPnT0i3H945Trzw79GZuzfzPE=",
+          "requires": {
+            "extend-shallow": "^2.0.1",
+            "is-extendable": "^0.1.1",
+            "is-plain-object": "^2.0.1",
+            "to-object-path": "^0.3.0"
+          }
+        }
+      }
+    },
+    "universalify": {
+      "version": "0.1.2",
+      "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
+      "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
+    },
+    "unpipe": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+      "integrity": "sha1-sr9O6FFKrmFltIF4KdIbLvSZBOw=",
+      "dev": true
+    },
+    "unset-value": {
+      "version": "1.0.0",
+      "resolved": "https://registry.npmjs.org/unset-value/-/unset-value-1.0.0.tgz",
+      "integrity": "sha1-g3aHP30jNRef+x5vw6jtDfyKtVk=",
+      "requires": {
+        "has-value": "^0.3.1",
+        "isobject": "^3.0.0"
+      },
+      "dependencies": {
+        "has-value": {
+          "version": "0.3.1",
+          "resolved": "https://registry.npmjs.org/has-value/-/has-value-0.3.1.tgz",
+          "integrity": "sha1-ex9YutpiyoJ+wKIHgCVlSEWZXh8=",
+          "requires": {
+            "get-value": "^2.0.3",
+            "has-values": "^0.1.4",
+            "isobject": "^2.0.0"
+          },
+          "dependencies": {
+            "isobject": {
+              "version": "2.1.0",
+              "resolved": "https://registry.npmjs.org/isobject/-/isobject-2.1.0.tgz",
+              "integrity": "sha1-8GVWEJaj8dou9GJy+BXIQNh+DIk=",
+              "requires": {
+                "isarray": "1.0.0"
+              }
+            }
+          }
+        },
+        "has-values": {
+          "version": "0.1.4",
+          "resolved": "https://registry.npmjs.org/has-values/-/has-values-0.1.4.tgz",
+          "integrity": "sha1-bWHeldkd/Km5oCCJrThL/49it3E="
+        }
+      }
+    },
+    "urix": {
+      "version": "0.1.0",
+      "resolved": "https://registry.npmjs.org/urix/-/urix-0.1.0.tgz",
+      "integrity": "sha1-2pN/emLiH+wf0Y1Js1wpNQZ6bHI="
+    },
+    "use": {
+      "version": "3.1.1",
+      "resolved": "https://registry.npmjs.org/use/-/use-3.1.1.tgz",
+      "integrity": "sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ=="
+    },
+    "util-deprecate": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+      "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=",
+      "dev": true
+    },
+    "vary": {
+      "version": "1.1.2",
+      "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+      "integrity": "sha1-IpnwLG3tMNSllhsLn3RSShj2NPw=",
+      "dev": true
+    },
+    "walk-back": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/walk-back/-/walk-back-3.0.0.tgz",
+      "integrity": "sha1-I1h4ejXakQMtrV6S+AsSNw2HlcU=",
+      "dev": true
+    },
+    "wordwrapjs": {
+      "version": "3.0.0",
+      "resolved": "https://registry.npmjs.org/wordwrapjs/-/wordwrapjs-3.0.0.tgz",
+      "integrity": "sha512-mO8XtqyPvykVCsrwj5MlOVWvSnCdT+C+QVbm6blradR7JExAhbkZ7hZ9A+9NUtwzSqrlUo9a67ws0EiILrvRpw==",
+      "dev": true,
+      "requires": {
+        "reduce-flatten": "^1.0.1",
+        "typical": "^2.6.1"
+      },
+      "dependencies": {
+        "reduce-flatten": {
+          "version": "1.0.1",
+          "resolved": "https://registry.npmjs.org/reduce-flatten/-/reduce-flatten-1.0.1.tgz",
+          "integrity": "sha1-JYx479FT3fk8tWEjf2EYTzaW4yc=",
+          "dev": true
+        },
+        "typical": {
+          "version": "2.6.1",
+          "resolved": "https://registry.npmjs.org/typical/-/typical-2.6.1.tgz",
+          "integrity": "sha1-XAgOXWYcu+OCWdLnCjxyU+hziB0=",
+          "dev": true
+        }
+      }
+    },
+    "wrappy": {
+      "version": "1.0.2",
+      "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+      "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=",
+      "dev": true
+    },
+    "ws": {
+      "version": "5.2.2",
+      "resolved": "https://registry.npmjs.org/ws/-/ws-5.2.2.tgz",
+      "integrity": "sha512-jaHFD6PFv6UgoIVda6qZllptQsMlDEJkTQcybzzXDYM1XO9Y8em691FGMPmM46WGyLU4z9KMgQN+qrux/nhlHA==",
+      "dev": true,
+      "requires": {
+        "async-limiter": "~1.0.0"
+      }
+    },
+    "ylru": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/ylru/-/ylru-1.2.1.tgz",
+      "integrity": "sha512-faQrqNMzcPCHGVC2aaOINk13K+aaBDUPjGWl0teOXywElLjyVAB6Oe2jj62jHYtwsU49jXhScYbvPENK+6zAvQ==",
+      "dev": true
+    },
+    "yn": {
+      "version": "2.0.0",
+      "resolved": "https://registry.npmjs.org/yn/-/yn-2.0.0.tgz",
+      "integrity": "sha1-5a2ryKz0CPY4X8dklWhMiOavaJo=",
+      "dev": true
+    }
+  }
+}
diff --git a/src/v8/tools/turbolizer/package.json b/src/v8/tools/turbolizer/package.json
new file mode 100644
index 0000000..2213fe3
--- /dev/null
+++ b/src/v8/tools/turbolizer/package.json
@@ -0,0 +1,34 @@
+{
+  "name": "turbolizer",
+  "version": "0.1.0",
+  "description": "Visualization tool for V8 TurboFan IR graphs",
+  "scripts": {
+    "build": "rollup -c",
+    "watch": "rollup -c -w",
+    "deploy": "./deploy.sh",
+    "test": "ts-mocha -p tsconfig.test.json test/**/*-test.ts",
+    "dev-server": "ws",
+    "presubmit": "tslint --project ./tslint.json --fix"
+  },
+  "author": "The V8 team",
+  "license": "MIT",
+  "dependencies": {
+    "@types/d3": "^5.5.0",
+    "d3": "^5.7.0",
+    "rollup": "^0.68.2",
+    "rollup-plugin-node-resolve": "^4.0.0",
+    "rollup-plugin-typescript2": "^0.20.1"
+  },
+  "repository": {
+    "type": "git",
+    "url": "https://github.com/v8/v8.git"
+  },
+  "devDependencies": {
+    "chai": "^4.2.0",
+    "local-web-server": "^2.6.0",
+    "mocha": "^5.2.0",
+    "ts-mocha": "^2.0.0",
+    "typescript": "^3.2.2",
+    "tslint": "^5.12.0"
+  }
+}
diff --git a/src/v8/tools/turbolizer/rollup.config.js b/src/v8/tools/turbolizer/rollup.config.js
new file mode 100644
index 0000000..05b69b8
--- /dev/null
+++ b/src/v8/tools/turbolizer/rollup.config.js
@@ -0,0 +1,32 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import typescript from 'rollup-plugin-typescript2';
+import node from 'rollup-plugin-node-resolve';
+
+import path from 'path'
+
+const onwarn = warning => {
+  // Silence circular dependency warning for moment package
+  const node_modules = path.normalize('node_modules/');
+  if (warning.code === 'CIRCULAR_DEPENDENCY' &&
+    !warning.importer.indexOf(node_modules)) {
+    return
+  }
+
+  console.warn(`(!) ${warning.message}`)
+}
+
+export default {
+  input: "src/turbo-visualizer.ts",
+  plugins: [node(), typescript({
+    abortOnError: false
+  })],
+  output: {
+    file: "build/turbolizer.js",
+    format: "iife",
+    sourcemap: true
+  },
+  onwarn: onwarn
+};
diff --git a/src/v8/tools/turbolizer/src/code-view.ts b/src/v8/tools/turbolizer/src/code-view.ts
new file mode 100644
index 0000000..298f08b
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/code-view.ts
@@ -0,0 +1,274 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { Source, SourceResolver, sourcePositionToStringKey } from "../src/source-resolver";
+import { SelectionBroker } from "../src/selection-broker";
+import { View } from "../src/view";
+import { MySelection } from "../src/selection";
+import { ViewElements } from "../src/util";
+import { SelectionHandler } from "./selection-handler";
+
+export enum CodeMode {
+  MAIN_SOURCE = "main function",
+  INLINED_SOURCE = "inlined function"
+}
+
+export class CodeView extends View {
+  broker: SelectionBroker;
+  source: Source;
+  sourceResolver: SourceResolver;
+  codeMode: CodeMode;
+  sourcePositionToHtmlElement: Map<string, HTMLElement>;
+  showAdditionalInliningPosition: boolean;
+  selectionHandler: SelectionHandler;
+  selection: MySelection;
+
+  createViewElement() {
+    const sourceContainer = document.createElement("div");
+    sourceContainer.classList.add("source-container");
+    return sourceContainer;
+  }
+
+  constructor(parent: HTMLElement, broker: SelectionBroker, sourceResolver: SourceResolver, sourceFunction: Source, codeMode: CodeMode) {
+    super(parent);
+    const view = this;
+    view.broker = broker;
+    view.sourceResolver = sourceResolver;
+    view.source = sourceFunction;
+    view.codeMode = codeMode;
+    this.sourcePositionToHtmlElement = new Map();
+    this.showAdditionalInliningPosition = false;
+
+    const selectionHandler = {
+      clear: function () {
+        view.selection.clear();
+        view.updateSelection();
+        broker.broadcastClear(this);
+      },
+      select: function (sourcePositions, selected) {
+        const locations = [];
+        for (const sourcePosition of sourcePositions) {
+          locations.push(sourcePosition);
+          sourceResolver.addInliningPositions(sourcePosition, locations);
+        }
+        if (locations.length == 0) return;
+        view.selection.select(locations, selected);
+        view.updateSelection();
+        broker.broadcastSourcePositionSelect(this, locations, selected);
+      },
+      brokeredSourcePositionSelect: function (locations, selected) {
+        const firstSelect = view.selection.isEmpty();
+        for (const location of locations) {
+          const translated = sourceResolver.translateToSourceId(view.source.sourceId, location);
+          if (!translated) continue;
+          view.selection.select([translated], selected);
+        }
+        view.updateSelection(firstSelect);
+      },
+      brokeredClear: function () {
+        view.selection.clear();
+        view.updateSelection();
+      },
+    };
+    view.selection = new MySelection(sourcePositionToStringKey);
+    broker.addSourcePositionHandler(selectionHandler);
+    this.selectionHandler = selectionHandler;
+    this.initializeCode();
+  }
+
+  addHtmlElementToSourcePosition(sourcePosition, element) {
+    const key = sourcePositionToStringKey(sourcePosition);
+    if (this.sourcePositionToHtmlElement.has(key)) {
+      console.log("Warning: duplicate source position", sourcePosition);
+    }
+    this.sourcePositionToHtmlElement.set(key, element);
+  }
+
+  getHtmlElementForSourcePosition(sourcePosition) {
+    const key = sourcePositionToStringKey(sourcePosition);
+    return this.sourcePositionToHtmlElement.get(key);
+  }
+
+  updateSelection(scrollIntoView: boolean = false): void {
+    const mkVisible = new ViewElements(this.divNode.parentNode as HTMLElement);
+    for (const [sp, el] of this.sourcePositionToHtmlElement.entries()) {
+      const isSelected = this.selection.isKeySelected(sp);
+      mkVisible.consider(el, isSelected);
+      el.classList.toggle("selected", isSelected);
+    }
+    mkVisible.apply(scrollIntoView);
+  }
+
+  getCodeHtmlElementName() {
+    return `source-pre-${this.source.sourceId}`;
+  }
+
+  getCodeHeaderHtmlElementName() {
+    return `source-pre-${this.source.sourceId}-header`;
+  }
+
+  getHtmlCodeLines(): NodeListOf<HTMLElement> {
+    const ordereList = this.divNode.querySelector(`#${this.getCodeHtmlElementName()} ol`);
+    return ordereList.childNodes as NodeListOf<HTMLElement>;
+  }
+
+  onSelectLine(lineNumber: number, doClear: boolean) {
+    if (doClear) {
+      this.selectionHandler.clear();
+    }
+    const positions = this.sourceResolver.linetoSourcePositions(lineNumber - 1);
+    if (positions !== undefined) {
+      this.selectionHandler.select(positions, undefined);
+    }
+  }
+
+  onSelectSourcePosition(sourcePosition, doClear: boolean) {
+    if (doClear) {
+      this.selectionHandler.clear();
+    }
+    this.selectionHandler.select([sourcePosition], undefined);
+  }
+
+  initializeCode() {
+    const view = this;
+    const source = this.source;
+    const sourceText = source.sourceText;
+    if (!sourceText) return;
+    const sourceContainer = view.divNode;
+    if (this.codeMode == CodeMode.MAIN_SOURCE) {
+      sourceContainer.classList.add("main-source");
+    } else {
+      sourceContainer.classList.add("inlined-source");
+    }
+    const codeHeader = document.createElement("div");
+    codeHeader.setAttribute("id", this.getCodeHeaderHtmlElementName());
+    codeHeader.classList.add("code-header");
+    const codeFileFunction = document.createElement("div");
+    codeFileFunction.classList.add("code-file-function");
+    codeFileFunction.innerHTML = `${source.sourceName}:${source.functionName}`;
+    codeHeader.appendChild(codeFileFunction);
+    const codeModeDiv = document.createElement("div");
+    codeModeDiv.classList.add("code-mode");
+    codeModeDiv.innerHTML = `${this.codeMode}`;
+    codeHeader.appendChild(codeModeDiv);
+    const clearDiv = document.createElement("div");
+    clearDiv.style.clear = "both";
+    codeHeader.appendChild(clearDiv);
+    sourceContainer.appendChild(codeHeader);
+    const codePre = document.createElement("pre");
+    codePre.setAttribute("id", this.getCodeHtmlElementName());
+    codePre.classList.add("prettyprint");
+    sourceContainer.appendChild(codePre);
+
+    codeHeader.onclick = function myFunction() {
+      if (codePre.style.display === "none") {
+        codePre.style.display = "block";
+      } else {
+        codePre.style.display = "none";
+      }
+    };
+    if (sourceText != "") {
+      codePre.classList.add("linenums");
+      codePre.textContent = sourceText;
+      try {
+        // Wrap in try to work when offline.
+        PR.prettyPrint(undefined, sourceContainer);
+      } catch (e) {
+        console.log(e);
+      }
+
+      view.divNode.onclick = function (e: MouseEvent) {
+        if (e.target instanceof Element && e.target.tagName == "DIV") {
+          const targetDiv = e.target as HTMLDivElement;
+          if (targetDiv.classList.contains("line-number")) {
+            e.stopPropagation();
+            view.onSelectLine(Number(targetDiv.dataset.lineNumber), !e.shiftKey);
+          }
+        } else {
+          view.selectionHandler.clear();
+        }
+      };
+
+      const base: number = source.startPosition;
+      let current = 0;
+      const lineListDiv = this.getHtmlCodeLines();
+      let newlineAdjust = 0;
+      for (let i = 0; i < lineListDiv.length; i++) {
+        // Line numbers are not zero-based.
+        const lineNumber = i + 1;
+        const currentLineElement = lineListDiv[i];
+        currentLineElement.id = "li" + i;
+        currentLineElement.dataset.lineNumber = "" + lineNumber;
+        const spans = currentLineElement.childNodes;
+        for (const currentSpan of spans) {
+          if (currentSpan instanceof HTMLSpanElement) {
+            const pos = base + current;
+            const end = pos + currentSpan.textContent.length;
+            current += currentSpan.textContent.length;
+            this.insertSourcePositions(currentSpan, lineNumber, pos, end, newlineAdjust);
+            newlineAdjust = 0;
+          }
+        }
+
+        this.insertLineNumber(currentLineElement, lineNumber);
+
+        while ((current < sourceText.length) &&
+          (sourceText[current] == '\n' || sourceText[current] == '\r')) {
+          ++current;
+          ++newlineAdjust;
+        }
+      }
+    }
+  }
+
+  insertSourcePositions(currentSpan, lineNumber, pos, end, adjust) {
+    const view = this;
+    const sps = this.sourceResolver.sourcePositionsInRange(this.source.sourceId, pos - adjust, end);
+    let offset = 0;
+    for (const sourcePosition of sps) {
+      this.sourceResolver.addAnyPositionToLine(lineNumber, sourcePosition);
+      const textnode = currentSpan.tagName == 'SPAN' ? currentSpan.lastChild : currentSpan;
+      if (!(textnode instanceof Text)) continue;
+      const splitLength = Math.max(0, sourcePosition.scriptOffset - pos - offset);
+      offset += splitLength;
+      const replacementNode = textnode.splitText(splitLength);
+      const span = document.createElement('span');
+      span.setAttribute("scriptOffset", sourcePosition.scriptOffset);
+      span.classList.add("source-position");
+      const marker = document.createElement('span');
+      marker.classList.add("marker");
+      span.appendChild(marker);
+      const inlining = this.sourceResolver.getInliningForPosition(sourcePosition);
+      if (inlining != undefined && view.showAdditionalInliningPosition) {
+        const sourceName = this.sourceResolver.getSourceName(inlining.sourceId);
+        const inliningMarker = document.createElement('span');
+        inliningMarker.classList.add("inlining-marker");
+        inliningMarker.setAttribute("data-descr", `${sourceName} was inlined here`);
+        span.appendChild(inliningMarker);
+      }
+      span.onclick = function (e) {
+        e.stopPropagation();
+        view.onSelectSourcePosition(sourcePosition, !e.shiftKey);
+      };
+      view.addHtmlElementToSourcePosition(sourcePosition, span);
+      textnode.parentNode.insertBefore(span, replacementNode);
+    }
+  }
+
+  insertLineNumber(lineElement: HTMLElement, lineNumber: number) {
+    const view = this;
+    const lineNumberElement = document.createElement("div");
+    lineNumberElement.classList.add("line-number");
+    lineNumberElement.dataset.lineNumber = `${lineNumber}`;
+    lineNumberElement.innerText = `${lineNumber}`;
+    lineElement.insertBefore(lineNumberElement, lineElement.firstChild);
+    // Don't add lines to source positions of not in backwardsCompatibility mode.
+    if (this.source.backwardsCompatibility === true) {
+      for (const sourcePosition of this.sourceResolver.linetoSourcePositions(lineNumber - 1)) {
+        view.addHtmlElementToSourcePosition(sourcePosition, lineElement);
+      }
+    }
+  }
+
+}
diff --git a/src/v8/tools/turbolizer/src/constants.ts b/src/v8/tools/turbolizer/src/constants.ts
new file mode 100644
index 0000000..1551d48
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/constants.ts
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export const MAX_RANK_SENTINEL = 0;
+export const GRAPH_MARGIN = 250;
+export const SOURCE_PANE_ID = 'left';
+export const SOURCE_COLLAPSE_ID = 'source-shrink';
+export const SOURCE_EXPAND_ID = 'source-expand';
+export const INTERMEDIATE_PANE_ID = 'middle';
+export const GRAPH_PANE_ID = 'graph';
+export const SCHEDULE_PANE_ID = 'schedule';
+export const GENERATED_PANE_ID = 'right';
+export const DISASSEMBLY_PANE_ID = 'disassembly';
+export const DISASSEMBLY_COLLAPSE_ID = 'disassembly-shrink';
+export const DISASSEMBLY_EXPAND_ID = 'disassembly-expand';
+export const COLLAPSE_PANE_BUTTON_VISIBLE = 'button-input';
+export const COLLAPSE_PANE_BUTTON_INVISIBLE = 'button-input-invisible';
+export const UNICODE_BLOCK = '&#9611;';
+export const PROF_COLS = [
+  { perc: 0, col: { r: 255, g: 255, b: 255 } },
+  { perc: 0.5, col: { r: 255, g: 255, b: 128 } },
+  { perc: 5, col: { r: 255, g: 128, b: 0 } },
+  { perc: 15, col: { r: 255, g: 0, b: 0 } },
+  { perc: 100, col: { r: 0, g: 0, b: 0 } }
+];
diff --git a/src/v8/tools/turbolizer/src/disassembly-view.ts b/src/v8/tools/turbolizer/src/disassembly-view.ts
new file mode 100644
index 0000000..4b8fc6e
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/disassembly-view.ts
@@ -0,0 +1,354 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { PROF_COLS, UNICODE_BLOCK } from "../src/constants";
+import { SelectionBroker } from "../src/selection-broker";
+import { TextView } from "../src/text-view";
+import { MySelection } from "./selection";
+import { anyToString, interpolate } from "./util";
+import { InstructionSelectionHandler } from "./selection-handler";
+
+const toolboxHTML = `<div id="disassembly-toolbox">
+<form>
+  <label><input id="show-instruction-address" type="checkbox" name="instruction-address">Show addresses</label>
+  <label><input id="show-instruction-binary" type="checkbox" name="instruction-binary">Show binary literal</label>
+</form>
+</div>`;
+
+export class DisassemblyView extends TextView {
+  SOURCE_POSITION_HEADER_REGEX: any;
+  addrEventCounts: any;
+  totalEventCounts: any;
+  maxEventCounts: any;
+  posLines: Array<any>;
+  instructionSelectionHandler: InstructionSelectionHandler;
+  offsetSelection: MySelection;
+  showInstructionAddressHandler: () => void;
+  showInstructionBinaryHandler: () => void;
+
+  createViewElement() {
+    const pane = document.createElement('div');
+    pane.setAttribute('id', "disassembly");
+    pane.innerHTML =
+      `<pre id='disassembly-text-pre' class='prettyprint prettyprinted'>
+       <ul id='disassembly-list' class='nolinenums noindent'>
+       </ul>
+     </pre>`;
+
+    return pane;
+  }
+
+  constructor(parentId, broker: SelectionBroker) {
+    super(parentId, broker);
+    const view = this;
+    const ADDRESS_STYLE = {
+      associateData: (text, fragment: HTMLElement) => {
+        const matches = text.match(/(?<address>0?x?[0-9a-fA-F]{8,16})(?<addressSpace>\s+)(?<offset>[0-9a-f]+)(?<offsetSpace>\s*)/);
+        const offset = Number.parseInt(matches.groups["offset"], 16);
+        const addressElement = document.createElement("SPAN");
+        addressElement.className = "instruction-address";
+        addressElement.innerText = matches.groups["address"];
+        const offsetElement = document.createElement("SPAN");
+        offsetElement.innerText = matches.groups["offset"];
+        fragment.appendChild(addressElement);
+        fragment.appendChild(document.createTextNode(matches.groups["addressSpace"]));
+        fragment.appendChild(offsetElement);
+        fragment.appendChild(document.createTextNode(matches.groups["offsetSpace"]));
+        fragment.classList.add('tag');
+
+        if (!Number.isNaN(offset)) {
+          const pcOffset = view.sourceResolver.getKeyPcOffset(offset);
+          fragment.dataset.pcOffset = `${pcOffset}`;
+          addressElement.classList.add('linkable-text');
+          offsetElement.classList.add('linkable-text');
+        }
+      }
+    };
+    const UNCLASSIFIED_STYLE = {
+      css: 'com'
+    };
+    const NUMBER_STYLE = {
+      css: ['instruction-binary', 'lit']
+    };
+    const COMMENT_STYLE = {
+      css: 'com'
+    };
+    const OPCODE_ARGS = {
+      associateData: function (text, fragment) {
+        fragment.innerHTML = text;
+        const replacer = (match, hexOffset) => {
+          const offset = Number.parseInt(hexOffset, 16);
+          const keyOffset = view.sourceResolver.getKeyPcOffset(offset);
+          return `<span class="tag linkable-text" data-pc-offset="${keyOffset}">${match}</span>`;
+        };
+        const html = text.replace(/<.0?x?([0-9a-fA-F]+)>/g, replacer);
+        fragment.innerHTML = html;
+      }
+    };
+    const OPCODE_STYLE = {
+      css: 'kwd'
+    };
+    const BLOCK_HEADER_STYLE = {
+      associateData: function (text, fragment) {
+        const matches = /\d+/.exec(text);
+        if (!matches) return;
+        const blockId = matches[0];
+        fragment.dataset.blockId = blockId;
+        fragment.innerHTML = text;
+        fragment.className = "com block";
+      }
+    };
+    const SOURCE_POSITION_HEADER_STYLE = {
+      css: 'com'
+    };
+    view.SOURCE_POSITION_HEADER_REGEX = /^\s*--[^<]*<.*(not inlined|inlined\((\d+)\)):(\d+)>\s*--/;
+    const patterns = [
+      [
+        [/^0?x?[0-9a-fA-F]{8,16}\s+[0-9a-f]+\s+/, ADDRESS_STYLE, 1],
+        [view.SOURCE_POSITION_HEADER_REGEX, SOURCE_POSITION_HEADER_STYLE, -1],
+        [/^\s+-- B\d+ start.*/, BLOCK_HEADER_STYLE, -1],
+        [/^.*/, UNCLASSIFIED_STYLE, -1]
+      ],
+      [
+        [/^\s*[0-9a-f]+\s+/, NUMBER_STYLE, 2],
+        [/^\s*[0-9a-f]+\s+[0-9a-f]+\s+/, NUMBER_STYLE, 2],
+        [/^.*/, null, -1]
+      ],
+      [
+        [/^REX.W \S+\s+/, OPCODE_STYLE, 3],
+        [/^\S+\s+/, OPCODE_STYLE, 3],
+        [/^\S+$/, OPCODE_STYLE, -1],
+        [/^.*/, null, -1]
+      ],
+      [
+        [/^\s+/, null],
+        [/^[^;]+$/, OPCODE_ARGS, -1],
+        [/^[^;]+/, OPCODE_ARGS, 4],
+        [/^;/, COMMENT_STYLE, 5]
+      ],
+      [
+        [/^.+$/, COMMENT_STYLE, -1]
+      ]
+    ];
+    view.setPatterns(patterns);
+
+    const linkHandler = (e: MouseEvent) => {
+      if (!(e.target instanceof HTMLElement)) return;
+      const offsetAsString = e.target.dataset.pcOffset ? e.target.dataset.pcOffset : e.target.parentElement.dataset.pcOffset;
+      const offset = Number.parseInt(offsetAsString, 10);
+      if ((typeof offsetAsString) != "undefined" && !Number.isNaN(offset)) {
+        view.offsetSelection.select([offset], true);
+        const nodes = view.sourceResolver.nodesForPCOffset(offset)[0];
+        if (nodes.length > 0) {
+          e.stopPropagation();
+          if (!e.shiftKey) {
+            view.selectionHandler.clear();
+          }
+          view.selectionHandler.select(nodes, true);
+        } else {
+          view.updateSelection();
+        }
+      }
+      return undefined;
+    };
+    view.divNode.addEventListener('click', linkHandler);
+
+    const linkHandlerBlock = e => {
+      const blockId = e.target.dataset.blockId;
+      if (typeof blockId != "undefined" && !Number.isNaN(blockId)) {
+        e.stopPropagation();
+        if (!e.shiftKey) {
+          view.selectionHandler.clear();
+        }
+        view.blockSelectionHandler.select([blockId], true);
+      }
+    };
+    view.divNode.addEventListener('click', linkHandlerBlock);
+
+    this.offsetSelection = new MySelection(anyToString);
+    const instructionSelectionHandler = {
+      clear: function () {
+        view.offsetSelection.clear();
+        view.updateSelection();
+        broker.broadcastClear(instructionSelectionHandler);
+      },
+      select: function (instructionIds, selected) {
+        view.offsetSelection.select(instructionIds, selected);
+        view.updateSelection();
+        broker.broadcastBlockSelect(instructionSelectionHandler, instructionIds, selected);
+      },
+      brokeredInstructionSelect: function (instructionIds, selected) {
+        const firstSelect = view.offsetSelection.isEmpty();
+        const keyPcOffsets = view.sourceResolver.instructionsToKeyPcOffsets(instructionIds);
+        view.offsetSelection.select(keyPcOffsets, selected);
+        view.updateSelection(firstSelect);
+      },
+      brokeredClear: function () {
+        view.offsetSelection.clear();
+        view.updateSelection();
+      }
+    };
+    this.instructionSelectionHandler = instructionSelectionHandler;
+    broker.addInstructionHandler(instructionSelectionHandler);
+
+    const toolbox = document.createElement("div");
+    toolbox.id = "toolbox-anchor";
+    toolbox.innerHTML = toolboxHTML;
+    view.divNode.insertBefore(toolbox, view.divNode.firstChild);
+    const instructionAddressInput: HTMLInputElement = view.divNode.querySelector("#show-instruction-address");
+    const lastShowInstructionAddress = window.sessionStorage.getItem("show-instruction-address");
+    instructionAddressInput.checked = lastShowInstructionAddress == 'true';
+    const showInstructionAddressHandler = () => {
+      window.sessionStorage.setItem("show-instruction-address", `${instructionAddressInput.checked}`);
+      for (const el of view.divNode.querySelectorAll(".instruction-address")) {
+        el.classList.toggle("invisible", !instructionAddressInput.checked);
+      }
+    };
+    instructionAddressInput.addEventListener("change", showInstructionAddressHandler);
+    this.showInstructionAddressHandler = showInstructionAddressHandler;
+
+    const instructionBinaryInput: HTMLInputElement = view.divNode.querySelector("#show-instruction-binary");
+    const lastShowInstructionBinary = window.sessionStorage.getItem("show-instruction-binary");
+    instructionBinaryInput.checked = lastShowInstructionBinary == 'true';
+    const showInstructionBinaryHandler = () => {
+      window.sessionStorage.setItem("show-instruction-binary", `${instructionBinaryInput.checked}`);
+      for (const el of view.divNode.querySelectorAll(".instruction-binary")) {
+        el.classList.toggle("invisible", !instructionBinaryInput.checked);
+      }
+    };
+    instructionBinaryInput.addEventListener("change", showInstructionBinaryHandler);
+    this.showInstructionBinaryHandler = showInstructionBinaryHandler;
+  }
+
+  updateSelection(scrollIntoView: boolean = false) {
+    super.updateSelection(scrollIntoView);
+    const keyPcOffsets = this.sourceResolver.nodesToKeyPcOffsets(this.selection.selectedKeys());
+    if (this.offsetSelection) {
+      for (const key of this.offsetSelection.selectedKeys()) {
+        keyPcOffsets.push(Number(key));
+      }
+    }
+    for (const keyPcOffset of keyPcOffsets) {
+      const elementsToSelect = this.divNode.querySelectorAll(`[data-pc-offset='${keyPcOffset}']`);
+      for (const el of elementsToSelect) {
+        el.classList.toggle("selected", true);
+      }
+    }
+  }
+
+  initializeCode(sourceText, sourcePosition: number = 0) {
+    const view = this;
+    view.addrEventCounts = null;
+    view.totalEventCounts = null;
+    view.maxEventCounts = null;
+    view.posLines = new Array();
+    // Comment lines for line 0 include sourcePosition already, only need to
+    // add sourcePosition for lines > 0.
+    view.posLines[0] = sourcePosition;
+    if (sourceText && sourceText != "") {
+      const base = sourcePosition;
+      let current = 0;
+      const sourceLines = sourceText.split("\n");
+      for (let i = 1; i < sourceLines.length; i++) {
+        // Add 1 for newline character that is split off.
+        current += sourceLines[i - 1].length + 1;
+        view.posLines[i] = base + current;
+      }
+    }
+  }
+
+  initializePerfProfile(eventCounts) {
+    const view = this;
+    if (eventCounts !== undefined) {
+      view.addrEventCounts = eventCounts;
+
+      view.totalEventCounts = {};
+      view.maxEventCounts = {};
+      for (const evName in view.addrEventCounts) {
+        if (view.addrEventCounts.hasOwnProperty(evName)) {
+          const keys = Object.keys(view.addrEventCounts[evName]);
+          const values = keys.map(key => view.addrEventCounts[evName][key]);
+          view.totalEventCounts[evName] = values.reduce((a, b) => a + b);
+          view.maxEventCounts[evName] = values.reduce((a, b) => Math.max(a, b));
+        }
+      }
+    } else {
+      view.addrEventCounts = null;
+      view.totalEventCounts = null;
+      view.maxEventCounts = null;
+    }
+  }
+
+  showContent(data): void {
+    console.time("disassembly-view");
+    super.initializeContent(data, null);
+    this.showInstructionAddressHandler();
+    this.showInstructionBinaryHandler();
+    console.timeEnd("disassembly-view");
+  }
+
+  // Shorten decimals and remove trailing zeroes for readability.
+  humanize(num) {
+    return num.toFixed(3).replace(/\.?0+$/, "") + "%";
+  }
+
+  processLine(line) {
+    const view = this;
+    let fragments = super.processLine(line);
+
+    // Add profiling data per instruction if available.
+    if (view.totalEventCounts) {
+      const matches = /^(0x[0-9a-fA-F]+)\s+\d+\s+[0-9a-fA-F]+/.exec(line);
+      if (matches) {
+        const newFragments = [];
+        for (const event in view.addrEventCounts) {
+          if (!view.addrEventCounts.hasOwnProperty(event)) continue;
+          const count = view.addrEventCounts[event][matches[1]];
+          let str = " ";
+          const cssCls = "prof";
+          if (count !== undefined) {
+            const perc = count / view.totalEventCounts[event] * 100;
+
+            let col = { r: 255, g: 255, b: 255 };
+            for (let i = 0; i < PROF_COLS.length; i++) {
+              if (perc === PROF_COLS[i].perc) {
+                col = PROF_COLS[i].col;
+                break;
+              } else if (perc > PROF_COLS[i].perc && perc < PROF_COLS[i + 1].perc) {
+                const col1 = PROF_COLS[i].col;
+                const col2 = PROF_COLS[i + 1].col;
+
+                const val = perc - PROF_COLS[i].perc;
+                const max = PROF_COLS[i + 1].perc - PROF_COLS[i].perc;
+
+                col.r = Math.round(interpolate(val, max, col1.r, col2.r));
+                col.g = Math.round(interpolate(val, max, col1.g, col2.g));
+                col.b = Math.round(interpolate(val, max, col1.b, col2.b));
+                break;
+              }
+            }
+
+            str = UNICODE_BLOCK;
+
+            const fragment = view.createFragment(str, cssCls);
+            fragment.title = event + ": " + view.humanize(perc) + " (" + count + ")";
+            fragment.style.color = "rgb(" + col.r + ", " + col.g + ", " + col.b + ")";
+
+            newFragments.push(fragment);
+          } else {
+            newFragments.push(view.createFragment(str, cssCls));
+          }
+        }
+        fragments = newFragments.concat(fragments);
+      }
+    }
+    return fragments;
+  }
+
+  detachSelection() { return null; }
+
+  public searchInputAction(searchInput: HTMLInputElement, e: Event, onlyVisible: boolean): void {
+    throw new Error("Method not implemented.");
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/edge.ts b/src/v8/tools/turbolizer/src/edge.ts
new file mode 100644
index 0000000..30d265c
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/edge.ts
@@ -0,0 +1,90 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { GNode, DEFAULT_NODE_BUBBLE_RADIUS } from "../src/node";
+import { Graph } from "./graph";
+
+export const MINIMUM_EDGE_SEPARATION = 20;
+
+export class Edge {
+  target: GNode;
+  source: GNode;
+  index: number;
+  type: string;
+  backEdgeNumber: number;
+  visible: boolean;
+
+  constructor(target: GNode, index: number, source: GNode, type: string) {
+    this.target = target;
+    this.source = source;
+    this.index = index;
+    this.type = type;
+    this.backEdgeNumber = 0;
+    this.visible = false;
+  }
+
+  stringID() {
+    return this.source.id + "," + this.index + "," + this.target.id;
+  }
+
+  isVisible() {
+    return this.visible && this.source.visible && this.target.visible;
+  }
+
+  getInputHorizontalPosition(graph: Graph, showTypes: boolean) {
+    if (this.backEdgeNumber > 0) {
+      return graph.maxGraphNodeX + this.backEdgeNumber * MINIMUM_EDGE_SEPARATION;
+    }
+    const source = this.source;
+    const target = this.target;
+    const index = this.index;
+    const inputX = target.x + target.getInputX(index);
+    const inputApproach = target.getInputApproach(this.index);
+    const outputApproach = source.getOutputApproach(showTypes);
+    if (inputApproach > outputApproach) {
+      return inputX;
+    } else {
+      const inputOffset = MINIMUM_EDGE_SEPARATION * (index + 1);
+      return (target.x < source.x)
+        ? (target.x + target.getTotalNodeWidth() + inputOffset)
+        : (target.x - inputOffset);
+    }
+  }
+
+  generatePath(graph: Graph, showTypes: boolean) {
+    const target = this.target;
+    const source = this.source;
+    const inputX = target.x + target.getInputX(this.index);
+    const arrowheadHeight = 7;
+    const inputY = target.y - 2 * DEFAULT_NODE_BUBBLE_RADIUS - arrowheadHeight;
+    const outputX = source.x + source.getOutputX();
+    const outputY = source.y + source.getNodeHeight(showTypes) + DEFAULT_NODE_BUBBLE_RADIUS;
+    let inputApproach = target.getInputApproach(this.index);
+    const outputApproach = source.getOutputApproach(showTypes);
+    const horizontalPos = this.getInputHorizontalPosition(graph, showTypes);
+
+    let result = "M" + outputX + "," + outputY +
+      "L" + outputX + "," + outputApproach +
+      "L" + horizontalPos + "," + outputApproach;
+
+    if (horizontalPos != inputX) {
+      result += "L" + horizontalPos + "," + inputApproach;
+    } else {
+      if (inputApproach < outputApproach) {
+        inputApproach = outputApproach;
+      }
+    }
+
+    result += "L" + inputX + "," + inputApproach +
+      "L" + inputX + "," + inputY;
+    return result;
+  }
+
+  isBackEdge() {
+    return this.target.hasBackEdges() && (this.target.rank < this.source.rank);
+  }
+
+}
+
+export const edgeToStr = (e: Edge) => e.stringID();
diff --git a/src/v8/tools/turbolizer/src/graph-layout.ts b/src/v8/tools/turbolizer/src/graph-layout.ts
new file mode 100644
index 0000000..3687c28
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/graph-layout.ts
@@ -0,0 +1,461 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { MAX_RANK_SENTINEL } from "../src/constants";
+import { MINIMUM_EDGE_SEPARATION, Edge } from "../src/edge";
+import { NODE_INPUT_WIDTH, MINIMUM_NODE_OUTPUT_APPROACH, DEFAULT_NODE_BUBBLE_RADIUS, GNode } from "../src/node";
+import { Graph } from "./graph";
+
+const DEFAULT_NODE_ROW_SEPARATION = 130;
+const traceLayout = false;
+
+function newGraphOccupation(graph: Graph) {
+  const isSlotFilled = [];
+  let maxSlot = 0;
+  let minSlot = 0;
+  let nodeOccupation: Array<[number, number]> = [];
+
+  function slotToIndex(slot: number) {
+    if (slot >= 0) {
+      return slot * 2;
+    } else {
+      return slot * 2 + 1;
+    }
+  }
+
+  function positionToSlot(pos: number) {
+    return Math.floor(pos / NODE_INPUT_WIDTH);
+  }
+
+  function slotToLeftPosition(slot: number) {
+    return slot * NODE_INPUT_WIDTH;
+  }
+
+  function findSpace(pos: number, width: number, direction: number) {
+    const widthSlots = Math.floor((width + NODE_INPUT_WIDTH - 1) /
+      NODE_INPUT_WIDTH);
+    const currentSlot = positionToSlot(pos + width / 2);
+    let currentScanSlot = currentSlot;
+    let widthSlotsRemainingLeft = widthSlots;
+    let widthSlotsRemainingRight = widthSlots;
+    let slotsChecked = 0;
+    while (true) {
+      const mod = slotsChecked++ % 2;
+      currentScanSlot = currentSlot + (mod ? -1 : 1) * (slotsChecked >> 1);
+      if (!isSlotFilled[slotToIndex(currentScanSlot)]) {
+        if (mod) {
+          if (direction <= 0) --widthSlotsRemainingLeft;
+        } else {
+          if (direction >= 0) --widthSlotsRemainingRight;
+        }
+        if (widthSlotsRemainingLeft == 0 ||
+          widthSlotsRemainingRight == 0 ||
+          (widthSlotsRemainingLeft + widthSlotsRemainingRight) == widthSlots &&
+          (widthSlots == slotsChecked)) {
+          if (mod) {
+            return [currentScanSlot, widthSlots];
+          } else {
+            return [currentScanSlot - widthSlots + 1, widthSlots];
+          }
+        }
+      } else {
+        if (mod) {
+          widthSlotsRemainingLeft = widthSlots;
+        } else {
+          widthSlotsRemainingRight = widthSlots;
+        }
+      }
+    }
+  }
+
+  function setIndexRange(from: number, to: number, value: boolean) {
+    if (to < from) {
+      throw ("illegal slot range");
+    }
+    while (from <= to) {
+      if (from > maxSlot) {
+        maxSlot = from;
+      }
+      if (from < minSlot) {
+        minSlot = from;
+      }
+      isSlotFilled[slotToIndex(from++)] = value;
+    }
+  }
+
+  function occupySlotRange(from: number, to: number) {
+    if (traceLayout) {
+      console.log("Occupied [" + slotToLeftPosition(from) + "  " + slotToLeftPosition(to + 1) + ")");
+    }
+    setIndexRange(from, to, true);
+  }
+
+  function clearSlotRange(from: number, to: number) {
+    if (traceLayout) {
+      console.log("Cleared [" + slotToLeftPosition(from) + "  " + slotToLeftPosition(to + 1) + ")");
+    }
+    setIndexRange(from, to, false);
+  }
+
+  function occupyPositionRange(from: number, to: number) {
+    occupySlotRange(positionToSlot(from), positionToSlot(to - 1));
+  }
+
+  function clearPositionRange(from: number, to: number) {
+    clearSlotRange(positionToSlot(from), positionToSlot(to - 1));
+  }
+
+  function occupyPositionRangeWithMargin(from: number, to: number, margin: number) {
+    const fromMargin = from - Math.floor(margin);
+    const toMargin = to + Math.floor(margin);
+    occupyPositionRange(fromMargin, toMargin);
+  }
+
+  function clearPositionRangeWithMargin(from: number, to: number, margin: number) {
+    const fromMargin = from - Math.floor(margin);
+    const toMargin = to + Math.floor(margin);
+    clearPositionRange(fromMargin, toMargin);
+  }
+
+  const occupation = {
+    occupyNodeInputs: function (node: GNode, showTypes: boolean) {
+      for (let i = 0; i < node.inputs.length; ++i) {
+        if (node.inputs[i].isVisible()) {
+          const edge = node.inputs[i];
+          if (!edge.isBackEdge()) {
+            const horizontalPos = edge.getInputHorizontalPosition(graph, showTypes);
+            if (traceLayout) {
+              console.log("Occupying input " + i + " of " + node.id + " at " + horizontalPos);
+            }
+            occupyPositionRangeWithMargin(horizontalPos,
+              horizontalPos,
+              NODE_INPUT_WIDTH / 2);
+          }
+        }
+      }
+    },
+    occupyNode: function (node: GNode) {
+      const getPlacementHint = function (n: GNode) {
+        let pos = 0;
+        let direction = -1;
+        let outputEdges = 0;
+        let inputEdges = 0;
+        for (const outputEdge of n.outputs) {
+          if (outputEdge.isVisible()) {
+            const output = outputEdge.target;
+            for (let l = 0; l < output.inputs.length; ++l) {
+              if (output.rank > n.rank) {
+                const inputEdge = output.inputs[l];
+                if (inputEdge.isVisible()) {
+                  ++inputEdges;
+                }
+                if (output.inputs[l].source == n) {
+                  pos += output.x + output.getInputX(l) + NODE_INPUT_WIDTH / 2;
+                  outputEdges++;
+                  if (l >= (output.inputs.length / 2)) {
+                    direction = 1;
+                  }
+                }
+              }
+            }
+          }
+        }
+        if (outputEdges != 0) {
+          pos = pos / outputEdges;
+        }
+        if (outputEdges > 1 || inputEdges == 1) {
+          direction = 0;
+        }
+        return [direction, pos];
+      };
+      const width = node.getTotalNodeWidth();
+      const margin = MINIMUM_EDGE_SEPARATION;
+      const paddedWidth = width + 2 * margin;
+      const placementHint = getPlacementHint(node);
+      const x = placementHint[1] - paddedWidth + margin;
+      if (traceLayout) {
+        console.log("Node " + node.id + " placement hint [" + x + ", " + (x + paddedWidth) + ")");
+      }
+      const placement = findSpace(x, paddedWidth, placementHint[0]);
+      const firstSlot = placement[0];
+      const slotWidth = placement[1];
+      const endSlotExclusive = firstSlot + slotWidth - 1;
+      occupySlotRange(firstSlot, endSlotExclusive);
+      nodeOccupation.push([firstSlot, endSlotExclusive]);
+      if (placementHint[0] < 0) {
+        return slotToLeftPosition(firstSlot + slotWidth) - width - margin;
+      } else if (placementHint[0] > 0) {
+        return slotToLeftPosition(firstSlot) + margin;
+      } else {
+        return slotToLeftPosition(firstSlot + slotWidth / 2) - (width / 2);
+      }
+    },
+    clearOccupiedNodes: function () {
+      nodeOccupation.forEach(([firstSlot, endSlotExclusive]) => {
+        clearSlotRange(firstSlot, endSlotExclusive);
+      });
+      nodeOccupation = [];
+    },
+    clearNodeOutputs: function (source: GNode, showTypes: boolean) {
+      source.outputs.forEach(function (edge) {
+        if (edge.isVisible()) {
+          const target = edge.target;
+          for (const inputEdge of target.inputs) {
+            if (inputEdge.source === source) {
+              const horizontalPos = edge.getInputHorizontalPosition(graph, showTypes);
+              clearPositionRangeWithMargin(horizontalPos,
+                horizontalPos,
+                NODE_INPUT_WIDTH / 2);
+            }
+          }
+        }
+      });
+    },
+    print: function () {
+      let s = "";
+      for (let currentSlot = -40; currentSlot < 40; ++currentSlot) {
+        if (currentSlot != 0) {
+          s += " ";
+        } else {
+          s += "|";
+        }
+      }
+      console.log(s);
+      s = "";
+      for (let currentSlot2 = -40; currentSlot2 < 40; ++currentSlot2) {
+        if (isSlotFilled[slotToIndex(currentSlot2)]) {
+          s += "*";
+        } else {
+          s += " ";
+        }
+      }
+      console.log(s);
+    }
+  };
+  return occupation;
+}
+
+export function layoutNodeGraph(graph: Graph, showTypes: boolean): void {
+  // First determine the set of nodes that have no outputs. Those are the
+  // basis for bottom-up DFS to determine rank and node placement.
+
+  const start = performance.now();
+
+  const endNodesHasNoOutputs = [];
+  const startNodesHasNoInputs = [];
+  for (const n of graph.nodes()) {
+    endNodesHasNoOutputs[n.id] = true;
+    startNodesHasNoInputs[n.id] = true;
+  }
+  graph.forEachEdge((e: Edge) => {
+    endNodesHasNoOutputs[e.source.id] = false;
+    startNodesHasNoInputs[e.target.id] = false;
+  });
+
+  // Finialize the list of start and end nodes.
+  const endNodes: Array<GNode> = [];
+  const startNodes: Array<GNode> = [];
+  let visited: Array<boolean> = [];
+  const rank: Array<number> = [];
+  for (const n of graph.nodes()) {
+    if (endNodesHasNoOutputs[n.id]) {
+      endNodes.push(n);
+    }
+    if (startNodesHasNoInputs[n.id]) {
+      startNodes.push(n);
+    }
+    visited[n.id] = false;
+    rank[n.id] = -1;
+    n.rank = 0;
+    n.visitOrderWithinRank = 0;
+    n.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH;
+  }
+
+  if (traceLayout) {
+    console.log(`layoutGraph init ${performance.now() - start}`);
+  }
+
+  let maxRank = 0;
+  visited = [];
+  let visitOrderWithinRank = 0;
+
+  const worklist: Array<GNode> = startNodes.slice();
+  while (worklist.length != 0) {
+    const n: GNode = worklist.pop();
+    let changed = false;
+    if (n.rank == MAX_RANK_SENTINEL) {
+      n.rank = 1;
+      changed = true;
+    }
+    let begin = 0;
+    let end = n.inputs.length;
+    if (n.nodeLabel.opcode == 'Phi' ||
+      n.nodeLabel.opcode == 'EffectPhi' ||
+      n.nodeLabel.opcode == 'InductionVariablePhi') {
+      // Keep with merge or loop node
+      begin = n.inputs.length - 1;
+    } else if (n.hasBackEdges()) {
+      end = 1;
+    }
+    for (let l = begin; l < end; ++l) {
+      const input = n.inputs[l].source;
+      if (input.visible && input.rank >= n.rank) {
+        n.rank = input.rank + 1;
+        changed = true;
+      }
+    }
+    if (changed) {
+      const hasBackEdges = n.hasBackEdges();
+      for (let l = n.outputs.length - 1; l >= 0; --l) {
+        if (hasBackEdges && (l != 0)) {
+          worklist.unshift(n.outputs[l].target);
+        } else {
+          worklist.push(n.outputs[l].target);
+        }
+      }
+    }
+    if (n.rank > maxRank) {
+      maxRank = n.rank;
+    }
+  }
+
+  if (traceLayout) {
+    console.log(`layoutGraph worklist ${performance.now() - start}`);
+  }
+
+  visited = [];
+  function dfsFindRankLate(n: GNode) {
+    if (visited[n.id]) return;
+    visited[n.id] = true;
+    const originalRank = n.rank;
+    let newRank = n.rank;
+    let isFirstInput = true;
+    for (const outputEdge of n.outputs) {
+      const output = outputEdge.target;
+      dfsFindRankLate(output);
+      const outputRank = output.rank;
+      if (output.visible && (isFirstInput || outputRank <= newRank) &&
+        (outputRank > originalRank)) {
+        newRank = outputRank - 1;
+      }
+      isFirstInput = false;
+    }
+    if (n.nodeLabel.opcode != "Start" && n.nodeLabel.opcode != "Phi" && n.nodeLabel.opcode != "EffectPhi" && n.nodeLabel.opcode != "InductionVariablePhi") {
+      n.rank = newRank;
+    }
+  }
+
+  startNodes.forEach(dfsFindRankLate);
+
+  visited = [];
+  function dfsRankOrder(n: GNode) {
+    if (visited[n.id]) return;
+    visited[n.id] = true;
+    for (const outputEdge of n.outputs) {
+      if (outputEdge.isVisible()) {
+        const output = outputEdge.target;
+        dfsRankOrder(output);
+      }
+    }
+    if (n.visitOrderWithinRank == 0) {
+      n.visitOrderWithinRank = ++visitOrderWithinRank;
+    }
+  }
+  startNodes.forEach(dfsRankOrder);
+
+  endNodes.forEach(function (n) {
+    n.rank = maxRank + 1;
+  });
+
+  const rankSets: Array<Array<GNode>> = [];
+  // Collect sets for each rank.
+  for (const n of graph.nodes()) {
+    n.y = n.rank * (DEFAULT_NODE_ROW_SEPARATION + n.getNodeHeight(showTypes) +
+      2 * DEFAULT_NODE_BUBBLE_RADIUS);
+    if (n.visible) {
+      if (rankSets[n.rank] === undefined) {
+        rankSets[n.rank] = [n];
+      } else {
+        rankSets[n.rank].push(n);
+      }
+    }
+  }
+
+  // Iterate backwards from highest to lowest rank, placing nodes so that they
+  // spread out from the "center" as much as possible while still being
+  // compact and not overlapping live input lines.
+  const occupation = newGraphOccupation(graph);
+
+  rankSets.reverse().forEach(function (rankSet: Array<GNode>) {
+
+    for (const node of rankSet) {
+      occupation.clearNodeOutputs(node, showTypes);
+    }
+
+    if (traceLayout) {
+      console.log("After clearing outputs");
+      occupation.print();
+    }
+
+    let placedCount = 0;
+    rankSet = rankSet.sort((a: GNode, b: GNode) => {
+      if (a.visitOrderWithinRank < b.visitOrderWithinRank) {
+        return -1;
+      } else if (a.visitOrderWithinRank == b.visitOrderWithinRank) {
+        return 0;
+      } else {
+        return 1;
+      }
+    });
+
+    for (const nodeToPlace of rankSet) {
+      if (nodeToPlace.visible) {
+        nodeToPlace.x = occupation.occupyNode(nodeToPlace);
+        if (traceLayout) {
+          console.log("Node " + nodeToPlace.id + " is placed between [" + nodeToPlace.x + ", " + (nodeToPlace.x + nodeToPlace.getTotalNodeWidth()) + ")");
+        }
+        const staggeredFlooredI = Math.floor(placedCount++ % 3);
+        const delta = MINIMUM_EDGE_SEPARATION * staggeredFlooredI;
+        nodeToPlace.outputApproach += delta;
+      } else {
+        nodeToPlace.x = 0;
+      }
+    }
+
+    if (traceLayout) {
+      console.log("Before clearing nodes");
+      occupation.print();
+    }
+
+    occupation.clearOccupiedNodes();
+
+    if (traceLayout) {
+      console.log("After clearing nodes");
+      occupation.print();
+    }
+
+    for (const node of rankSet) {
+      occupation.occupyNodeInputs(node, showTypes);
+    }
+
+    if (traceLayout) {
+      console.log("After occupying inputs");
+      occupation.print();
+    }
+
+    if (traceLayout) {
+      console.log("After determining bounding box");
+      occupation.print();
+    }
+  });
+
+  graph.maxBackEdgeNumber = 0;
+  graph.forEachEdge((e: Edge) => {
+    if (e.isBackEdge()) {
+      e.backEdgeNumber = ++graph.maxBackEdgeNumber;
+    } else {
+      e.backEdgeNumber = 0;
+    }
+  });
+}
diff --git a/src/v8/tools/turbolizer/src/graph-view.ts b/src/v8/tools/turbolizer/src/graph-view.ts
new file mode 100644
index 0000000..07e0d7f
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/graph-view.ts
@@ -0,0 +1,931 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as d3 from "d3";
+import { layoutNodeGraph } from "../src/graph-layout";
+import { GNode, nodeToStr } from "../src/node";
+import { NODE_INPUT_WIDTH } from "../src/node";
+import { DEFAULT_NODE_BUBBLE_RADIUS } from "../src/node";
+import { Edge, edgeToStr } from "../src/edge";
+import { PhaseView } from "../src/view";
+import { MySelection } from "../src/selection";
+import { partial } from "../src/util";
+import { NodeSelectionHandler, ClearableHandler } from "./selection-handler";
+import { Graph } from "./graph";
+import { SelectionBroker } from "./selection-broker";
+
+function nodeToStringKey(n: GNode) {
+  return "" + n.id;
+}
+
+interface GraphState {
+  showTypes: boolean;
+  selection: MySelection;
+  mouseDownNode: any;
+  justDragged: boolean;
+  justScaleTransGraph: boolean;
+  hideDead: boolean;
+}
+
+export class GraphView extends PhaseView {
+  divElement: d3.Selection<any, any, any, any>;
+  svg: d3.Selection<any, any, any, any>;
+  showPhaseByName: (p: string, s: Set<any>) => void;
+  state: GraphState;
+  selectionHandler: NodeSelectionHandler & ClearableHandler;
+  graphElement: d3.Selection<any, any, any, any>;
+  visibleNodes: d3.Selection<any, GNode, any, any>;
+  visibleEdges: d3.Selection<any, Edge, any, any>;
+  drag: d3.DragBehavior<any, GNode, GNode>;
+  panZoom: d3.ZoomBehavior<SVGElement, any>;
+  visibleBubbles: d3.Selection<any, any, any, any>;
+  transitionTimout: number;
+  graph: Graph;
+  broker: SelectionBroker;
+  phaseName: string;
+  toolbox: HTMLElement;
+
+  createViewElement() {
+    const pane = document.createElement('div');
+    pane.setAttribute('id', "graph");
+    return pane;
+  }
+
+  constructor(idOrContainer: string | HTMLElement, broker: SelectionBroker,
+    showPhaseByName: (s: string) => void, toolbox: HTMLElement) {
+    super(idOrContainer);
+    const view = this;
+    this.broker = broker;
+    this.showPhaseByName = showPhaseByName;
+    this.divElement = d3.select(this.divNode);
+    this.phaseName = "";
+    this.toolbox = toolbox;
+    const svg = this.divElement.append("svg")
+      .attr('version', '2.0')
+      .attr("width", "100%")
+      .attr("height", "100%");
+    svg.on("click", function (d) {
+      view.selectionHandler.clear();
+    });
+    // Listen for key events. Note that the focus handler seems
+    // to be important even if it does nothing.
+    svg
+      .attr("focusable", false)
+      .on("focus", e => { })
+      .on("keydown", e => { view.svgKeyDown(); });
+
+    view.svg = svg;
+
+    this.state = {
+      selection: null,
+      mouseDownNode: null,
+      justDragged: false,
+      justScaleTransGraph: false,
+      showTypes: false,
+      hideDead: false
+    };
+
+    this.selectionHandler = {
+      clear: function () {
+        view.state.selection.clear();
+        broker.broadcastClear(this);
+        view.updateGraphVisibility();
+      },
+      select: function (nodes: Array<GNode>, selected: boolean) {
+        const locations = [];
+        for (const node of nodes) {
+          if (node.nodeLabel.sourcePosition) {
+            locations.push(node.nodeLabel.sourcePosition);
+          }
+          if (node.nodeLabel.origin && node.nodeLabel.origin.bytecodePosition) {
+            locations.push({ bytecodePosition: node.nodeLabel.origin.bytecodePosition });
+          }
+        }
+        view.state.selection.select(nodes, selected);
+        broker.broadcastSourcePositionSelect(this, locations, selected);
+        view.updateGraphVisibility();
+      },
+      brokeredNodeSelect: function (locations, selected: boolean) {
+        if (!view.graph) return;
+        const selection = view.graph.nodes(n => {
+          return locations.has(nodeToStringKey(n))
+            && (!view.state.hideDead || n.isLive());
+        });
+        view.state.selection.select(selection, selected);
+        // Update edge visibility based on selection.
+        for (const n of view.graph.nodes()) {
+          if (view.state.selection.isSelected(n)) {
+            n.visible = true;
+            n.inputs.forEach(e => {
+              e.visible = e.visible || view.state.selection.isSelected(e.source);
+            });
+            n.outputs.forEach(e => {
+              e.visible = e.visible || view.state.selection.isSelected(e.target);
+            });
+          }
+        }
+        view.updateGraphVisibility();
+      },
+      brokeredClear: function () {
+        view.state.selection.clear();
+        view.updateGraphVisibility();
+      }
+    };
+
+    view.state.selection = new MySelection(nodeToStringKey);
+
+    const defs = svg.append('svg:defs');
+    defs.append('svg:marker')
+      .attr('id', 'end-arrow')
+      .attr('viewBox', '0 -4 8 8')
+      .attr('refX', 2)
+      .attr('markerWidth', 2.5)
+      .attr('markerHeight', 2.5)
+      .attr('orient', 'auto')
+      .append('svg:path')
+      .attr('d', 'M0,-4L8,0L0,4');
+
+    this.graphElement = svg.append("g");
+    view.visibleEdges = this.graphElement.append("g");
+    view.visibleNodes = this.graphElement.append("g");
+
+    view.drag = d3.drag<any, GNode, GNode>()
+      .on("drag", function (d) {
+        d.x += d3.event.dx;
+        d.y += d3.event.dy;
+        view.updateGraphVisibility();
+      });
+
+    function zoomed() {
+      if (d3.event.shiftKey) return false;
+      view.graphElement.attr("transform", d3.event.transform);
+      return true;
+    }
+
+    const zoomSvg = d3.zoom<SVGElement, any>()
+      .scaleExtent([0.2, 40])
+      .on("zoom", zoomed)
+      .on("start", function () {
+        if (d3.event.shiftKey) return;
+        d3.select('body').style("cursor", "move");
+      })
+      .on("end", function () {
+        d3.select('body').style("cursor", "auto");
+      });
+
+    svg.call(zoomSvg).on("dblclick.zoom", null);
+
+    view.panZoom = zoomSvg;
+
+  }
+
+  getEdgeFrontier(nodes: Iterable<GNode>, inEdges: boolean,
+    edgeFilter: (e: Edge, i: number) => boolean) {
+    const frontier: Set<Edge> = new Set();
+    for (const n of nodes) {
+      const edges = inEdges ? n.inputs : n.outputs;
+      let edgeNumber = 0;
+      edges.forEach((edge: Edge) => {
+        if (edgeFilter == undefined || edgeFilter(edge, edgeNumber)) {
+          frontier.add(edge);
+        }
+        ++edgeNumber;
+      });
+    }
+    return frontier;
+  }
+
+  getNodeFrontier(nodes: Iterable<GNode>, inEdges: boolean,
+    edgeFilter: (e: Edge, i: number) => boolean) {
+    const view = this;
+    const frontier: Set<GNode> = new Set();
+    let newState = true;
+    const edgeFrontier = view.getEdgeFrontier(nodes, inEdges, edgeFilter);
+    // Control key toggles edges rather than just turning them on
+    if (d3.event.ctrlKey) {
+      edgeFrontier.forEach(function (edge: Edge) {
+        if (edge.visible) {
+          newState = false;
+        }
+      });
+    }
+    edgeFrontier.forEach(function (edge: Edge) {
+      edge.visible = newState;
+      if (newState) {
+        const node = inEdges ? edge.source : edge.target;
+        node.visible = true;
+        frontier.add(node);
+      }
+    });
+    view.updateGraphVisibility();
+    if (newState) {
+      return frontier;
+    } else {
+      return undefined;
+    }
+  }
+
+  initializeContent(data, rememberedSelection) {
+    this.show();
+    function createImgInput(id: string, title: string, onClick): HTMLElement {
+      const input = document.createElement("input");
+      input.setAttribute("id", id);
+      input.setAttribute("type", "image");
+      input.setAttribute("title", title);
+      input.setAttribute("src", `img/${id}-icon.png`);
+      input.className = "button-input graph-toolbox-item";
+      input.addEventListener("click", onClick);
+      return input;
+    }
+    this.toolbox.appendChild(createImgInput("layout", "layout graph",
+      partial(this.layoutAction, this)));
+    this.toolbox.appendChild(createImgInput("show-all", "show all nodes",
+      partial(this.showAllAction, this)));
+    this.toolbox.appendChild(createImgInput("show-control", "show only control nodes",
+      partial(this.showControlAction, this)));
+    this.toolbox.appendChild(createImgInput("toggle-hide-dead", "toggle hide dead nodes",
+      partial(this.toggleHideDead, this)));
+    this.toolbox.appendChild(createImgInput("hide-unselected", "hide unselected",
+      partial(this.hideUnselectedAction, this)));
+    this.toolbox.appendChild(createImgInput("hide-selected", "hide selected",
+      partial(this.hideSelectedAction, this)));
+    this.toolbox.appendChild(createImgInput("zoom-selection", "zoom selection",
+      partial(this.zoomSelectionAction, this)));
+    this.toolbox.appendChild(createImgInput("toggle-types", "toggle types",
+      partial(this.toggleTypesAction, this)));
+
+    this.phaseName = data.name;
+    this.createGraph(data.data, rememberedSelection);
+    this.broker.addNodeHandler(this.selectionHandler);
+
+    if (rememberedSelection != null && rememberedSelection.size > 0) {
+      this.attachSelection(rememberedSelection);
+      this.connectVisibleSelectedNodes();
+      this.viewSelection();
+    } else {
+      this.viewWholeGraph();
+    }
+  }
+
+  deleteContent() {
+    for (const item of this.toolbox.querySelectorAll(".graph-toolbox-item")) {
+      item.parentElement.removeChild(item);
+    }
+
+    for (const n of this.graph.nodes()) {
+      n.visible = false;
+    }
+    this.graph.forEachEdge((e: Edge) => {
+      e.visible = false;
+    });
+    this.updateGraphVisibility();
+  }
+
+  public hide(): void {
+    super.hide();
+    this.deleteContent();
+  }
+
+  createGraph(data, rememberedSelection) {
+    this.graph = new Graph(data);
+
+    this.showControlAction(this);
+
+    if (rememberedSelection != undefined) {
+      for (const n of this.graph.nodes()) {
+        n.visible = n.visible || rememberedSelection.has(nodeToStringKey(n));
+      }
+    }
+
+    this.graph.forEachEdge(e => e.visible = e.source.visible && e.target.visible);
+
+    this.layoutGraph();
+    this.updateGraphVisibility();
+  }
+
+  connectVisibleSelectedNodes() {
+    const view = this;
+    for (const n of view.state.selection) {
+      n.inputs.forEach(function (edge: Edge) {
+        if (edge.source.visible && edge.target.visible) {
+          edge.visible = true;
+        }
+      });
+      n.outputs.forEach(function (edge: Edge) {
+        if (edge.source.visible && edge.target.visible) {
+          edge.visible = true;
+        }
+      });
+    }
+  }
+
+  updateInputAndOutputBubbles() {
+    const view = this;
+    const g = this.graph;
+    const s = this.visibleBubbles;
+    s.classed("filledBubbleStyle", function (c) {
+      const components = this.id.split(',');
+      if (components[0] == "ib") {
+        const edge = g.nodeMap[components[3]].inputs[components[2]];
+        return edge.isVisible();
+      } else {
+        return g.nodeMap[components[1]].areAnyOutputsVisible() == 2;
+      }
+    }).classed("halfFilledBubbleStyle", function (c) {
+      const components = this.id.split(',');
+      if (components[0] == "ib") {
+        return false;
+      } else {
+        return g.nodeMap[components[1]].areAnyOutputsVisible() == 1;
+      }
+    }).classed("bubbleStyle", function (c) {
+      const components = this.id.split(',');
+      if (components[0] == "ib") {
+        const edge = g.nodeMap[components[3]].inputs[components[2]];
+        return !edge.isVisible();
+      } else {
+        return g.nodeMap[components[1]].areAnyOutputsVisible() == 0;
+      }
+    });
+    s.each(function (c) {
+      const components = this.id.split(',');
+      if (components[0] == "ob") {
+        const from = g.nodeMap[components[1]];
+        const x = from.getOutputX();
+        const y = from.getNodeHeight(view.state.showTypes) + DEFAULT_NODE_BUBBLE_RADIUS;
+        const transform = "translate(" + x + "," + y + ")";
+        this.setAttribute('transform', transform);
+      }
+    });
+  }
+
+  attachSelection(s) {
+    if (!(s instanceof Set)) return;
+    this.selectionHandler.clear();
+    const selected = [...this.graph.nodes(n =>
+      s.has(this.state.selection.stringKey(n)) && (!this.state.hideDead || n.isLive()))];
+    this.selectionHandler.select(selected, true);
+  }
+
+  detachSelection() {
+    return this.state.selection.detachSelection();
+  }
+
+  selectAllNodes() {
+    if (!d3.event.shiftKey) {
+      this.state.selection.clear();
+    }
+    const allVisibleNodes = [...this.graph.nodes(n => n.visible)];
+    this.state.selection.select(allVisibleNodes, true);
+    this.updateGraphVisibility();
+  }
+
+  layoutAction(graph: GraphView) {
+    graph.layoutGraph();
+    graph.updateGraphVisibility();
+    graph.viewWholeGraph();
+  }
+
+  showAllAction(view: GraphView) {
+    for (const n of view.graph.nodes()) {
+      n.visible = !view.state.hideDead || n.isLive();
+    }
+    view.graph.forEachEdge((e: Edge) => {
+      e.visible = e.source.visible || e.target.visible;
+    });
+    view.updateGraphVisibility();
+    view.viewWholeGraph();
+  }
+
+  showControlAction(view: GraphView) {
+    for (const n of view.graph.nodes()) {
+      n.visible = n.cfg && (!view.state.hideDead || n.isLive());
+    }
+    view.graph.forEachEdge((e: Edge) => {
+      e.visible = e.type == 'control' && e.source.visible && e.target.visible;
+    });
+    view.updateGraphVisibility();
+    view.viewWholeGraph();
+  }
+
+  toggleHideDead(view: GraphView) {
+    view.state.hideDead = !view.state.hideDead;
+    if (view.state.hideDead) {
+      view.hideDead();
+    } else {
+      view.showDead();
+    }
+    const element = document.getElementById('toggle-hide-dead');
+    element.classList.toggle('button-input-toggled', view.state.hideDead);
+  }
+
+  hideDead() {
+    for (const n of this.graph.nodes()) {
+      if (!n.isLive()) {
+        n.visible = false;
+        this.state.selection.select([n], false);
+      }
+    }
+    this.updateGraphVisibility();
+  }
+
+  showDead() {
+    for (const n of this.graph.nodes()) {
+      if (!n.isLive()) {
+        n.visible = true;
+      }
+    }
+    this.updateGraphVisibility();
+  }
+
+  hideUnselectedAction(view: GraphView) {
+    for (const n of view.graph.nodes()) {
+      if (!view.state.selection.isSelected(n)) {
+        n.visible = false;
+      }
+    }
+    view.updateGraphVisibility();
+  }
+
+  hideSelectedAction(view: GraphView) {
+    for (const n of view.graph.nodes()) {
+      if (view.state.selection.isSelected(n)) {
+        n.visible = false;
+      }
+    }
+    view.selectionHandler.clear();
+  }
+
+  zoomSelectionAction(view: GraphView) {
+    view.viewSelection();
+  }
+
+  toggleTypesAction(view: GraphView) {
+    view.toggleTypes();
+  }
+
+  searchInputAction(searchBar: HTMLInputElement, e: KeyboardEvent, onlyVisible: boolean) {
+    if (e.keyCode == 13) {
+      this.selectionHandler.clear();
+      const query = searchBar.value;
+      window.sessionStorage.setItem("lastSearch", query);
+      if (query.length == 0) return;
+
+      const reg = new RegExp(query);
+      const filterFunction = (n: GNode) => {
+        return (reg.exec(n.getDisplayLabel()) != null ||
+          (this.state.showTypes && reg.exec(n.getDisplayType())) ||
+          (reg.exec(n.getTitle())) ||
+          reg.exec(n.nodeLabel.opcode) != null);
+      };
+
+      const selection = [...this.graph.nodes(n => {
+        if ((e.ctrlKey || n.visible || !onlyVisible) && filterFunction(n)) {
+          if (e.ctrlKey || !onlyVisible) n.visible = true;
+          return true;
+        }
+        return false;
+      })];
+
+      this.selectionHandler.select(selection, true);
+      this.connectVisibleSelectedNodes();
+      this.updateGraphVisibility();
+      searchBar.blur();
+      this.viewSelection();
+    }
+    e.stopPropagation();
+  }
+
+  svgKeyDown() {
+    const view = this;
+    const state = this.state;
+
+    const showSelectionFrontierNodes = (inEdges: boolean, filter: (e: Edge, i: number) => boolean, doSelect: boolean) => {
+      const frontier = view.getNodeFrontier(state.selection, inEdges, filter);
+      if (frontier != undefined && frontier.size) {
+        if (doSelect) {
+          if (!d3.event.shiftKey) {
+            state.selection.clear();
+          }
+          state.selection.select([...frontier], true);
+        }
+        view.updateGraphVisibility();
+      }
+    };
+
+    let eventHandled = true; // unless the below switch defaults
+    switch (d3.event.keyCode) {
+      case 49:
+      case 50:
+      case 51:
+      case 52:
+      case 53:
+      case 54:
+      case 55:
+      case 56:
+      case 57:
+        // '1'-'9'
+        showSelectionFrontierNodes(true,
+          (edge: Edge, index: number) => index == (d3.event.keyCode - 49),
+          !d3.event.ctrlKey);
+        break;
+      case 97:
+      case 98:
+      case 99:
+      case 100:
+      case 101:
+      case 102:
+      case 103:
+      case 104:
+      case 105:
+        // 'numpad 1'-'numpad 9'
+        showSelectionFrontierNodes(true,
+          (edge, index) => index == (d3.event.keyCode - 97),
+          !d3.event.ctrlKey);
+        break;
+      case 67:
+        // 'c'
+        showSelectionFrontierNodes(d3.event.altKey,
+          (edge, index) => edge.type == 'control',
+          true);
+        break;
+      case 69:
+        // 'e'
+        showSelectionFrontierNodes(d3.event.altKey,
+          (edge, index) => edge.type == 'effect',
+          true);
+        break;
+      case 79:
+        // 'o'
+        showSelectionFrontierNodes(false, undefined, false);
+        break;
+      case 73:
+        // 'i'
+        if (!d3.event.ctrlKey && !d3.event.shiftKey) {
+          showSelectionFrontierNodes(true, undefined, false);
+        } else {
+          eventHandled = false;
+        }
+        break;
+      case 65:
+        // 'a'
+        view.selectAllNodes();
+        break;
+      case 38:
+      // UP
+      case 40: {
+        // DOWN
+        showSelectionFrontierNodes(d3.event.keyCode == 38, undefined, true);
+        break;
+      }
+      case 82:
+        // 'r'
+        if (!d3.event.ctrlKey && !d3.event.shiftKey) {
+          this.layoutAction(this);
+        } else {
+          eventHandled = false;
+        }
+        break;
+      case 83:
+        // 's'
+        view.selectOrigins();
+        break;
+      default:
+        eventHandled = false;
+        break;
+    }
+    if (eventHandled) {
+      d3.event.preventDefault();
+    }
+  }
+
+  layoutGraph() {
+    console.time("layoutGraph");
+    layoutNodeGraph(this.graph, this.state.showTypes);
+    const extent = this.graph.redetermineGraphBoundingBox(this.state.showTypes);
+    this.panZoom.translateExtent(extent);
+    this.minScale();
+    console.timeEnd("layoutGraph");
+  }
+
+  selectOrigins() {
+    const state = this.state;
+    const origins = [];
+    let phase = this.phaseName;
+    const selection = new Set<any>();
+    for (const n of state.selection) {
+      const origin = n.nodeLabel.origin;
+      if (origin) {
+        phase = origin.phase;
+        const node = this.graph.nodeMap[origin.nodeId];
+        if (phase === this.phaseName && node) {
+          origins.push(node);
+        } else {
+          selection.add(`${origin.nodeId}`);
+        }
+      }
+    }
+    // Only go through phase reselection if we actually need
+    // to display another phase.
+    if (selection.size > 0 && phase !== this.phaseName) {
+      this.showPhaseByName(phase, selection);
+    } else if (origins.length > 0) {
+      this.selectionHandler.clear();
+      this.selectionHandler.select(origins, true);
+    }
+  }
+
+  // call to propagate changes to graph
+  updateGraphVisibility() {
+    const view = this;
+    const graph = this.graph;
+    const state = this.state;
+    if (!graph) return;
+
+    const filteredEdges = [...graph.filteredEdges(function (e) {
+      return e.source.visible && e.target.visible;
+    })];
+    const selEdges = view.visibleEdges.selectAll<SVGPathElement, Edge>("path").data(filteredEdges, edgeToStr);
+
+    // remove old links
+    selEdges.exit().remove();
+
+    // add new paths
+    const newEdges = selEdges.enter()
+      .append('path');
+
+    newEdges.style('marker-end', 'url(#end-arrow)')
+      .attr("id", function (edge) { return "e," + edge.stringID(); })
+      .on("click", function (edge) {
+        d3.event.stopPropagation();
+        if (!d3.event.shiftKey) {
+          view.selectionHandler.clear();
+        }
+        view.selectionHandler.select([edge.source, edge.target], true);
+      })
+      .attr("adjacentToHover", "false")
+      .classed('value', function (e) {
+        return e.type == 'value' || e.type == 'context';
+      }).classed('control', function (e) {
+        return e.type == 'control';
+      }).classed('effect', function (e) {
+        return e.type == 'effect';
+      }).classed('frame-state', function (e) {
+        return e.type == 'frame-state';
+      }).attr('stroke-dasharray', function (e) {
+        if (e.type == 'frame-state') return "10,10";
+        return (e.type == 'effect') ? "5,5" : "";
+      });
+
+    const newAndOldEdges = newEdges.merge(selEdges);
+
+    newAndOldEdges.classed('hidden', e => !e.isVisible());
+
+    // select existing nodes
+    const filteredNodes = [...graph.nodes(n => n.visible)];
+    const allNodes = view.visibleNodes.selectAll<SVGGElement, GNode>("g");
+    const selNodes = allNodes.data(filteredNodes, nodeToStr);
+
+    // remove old nodes
+    selNodes.exit().remove();
+
+    // add new nodes
+    const newGs = selNodes.enter()
+      .append("g");
+
+    newGs.classed("turbonode", function (n) { return true; })
+      .classed("control", function (n) { return n.isControl(); })
+      .classed("live", function (n) { return n.isLive(); })
+      .classed("dead", function (n) { return !n.isLive(); })
+      .classed("javascript", function (n) { return n.isJavaScript(); })
+      .classed("input", function (n) { return n.isInput(); })
+      .classed("simplified", function (n) { return n.isSimplified(); })
+      .classed("machine", function (n) { return n.isMachine(); })
+      .on('mouseenter', function (node) {
+        const visibleEdges = view.visibleEdges.selectAll<SVGPathElement, Edge>('path');
+        const adjInputEdges = visibleEdges.filter(e => e.target === node);
+        const adjOutputEdges = visibleEdges.filter(e => e.source === node);
+        adjInputEdges.attr('relToHover', "input");
+        adjOutputEdges.attr('relToHover', "output");
+        const adjInputNodes = adjInputEdges.data().map(e => e.source);
+        const visibleNodes = view.visibleNodes.selectAll<SVGGElement, GNode>("g");
+        visibleNodes.data<GNode>(adjInputNodes, nodeToStr).attr('relToHover', "input");
+        const adjOutputNodes = adjOutputEdges.data().map(e => e.target);
+        visibleNodes.data<GNode>(adjOutputNodes, nodeToStr).attr('relToHover', "output");
+        view.updateGraphVisibility();
+      })
+      .on('mouseleave', function (node) {
+        const visibleEdges = view.visibleEdges.selectAll<SVGPathElement, Edge>('path');
+        const adjEdges = visibleEdges.filter(e => e.target === node || e.source === node);
+        adjEdges.attr('relToHover', "none");
+        const adjNodes = adjEdges.data().map(e => e.target).concat(adjEdges.data().map(e => e.source));
+        const visibleNodes = view.visibleNodes.selectAll<SVGPathElement, GNode>("g");
+        visibleNodes.data(adjNodes, nodeToStr).attr('relToHover', "none");
+        view.updateGraphVisibility();
+      })
+      .on("click", d => {
+        if (!d3.event.shiftKey) view.selectionHandler.clear();
+        view.selectionHandler.select([d], undefined);
+        d3.event.stopPropagation();
+      })
+      .call(view.drag);
+
+    newGs.append("rect")
+      .attr("rx", 10)
+      .attr("ry", 10)
+      .attr('width', function (d) {
+        return d.getTotalNodeWidth();
+      })
+      .attr('height', function (d) {
+        return d.getNodeHeight(view.state.showTypes);
+      });
+
+    function appendInputAndOutputBubbles(g, d) {
+      for (let i = 0; i < d.inputs.length; ++i) {
+        const x = d.getInputX(i);
+        const y = -DEFAULT_NODE_BUBBLE_RADIUS;
+        g.append('circle')
+          .classed("filledBubbleStyle", function (c) {
+            return d.inputs[i].isVisible();
+          })
+          .classed("bubbleStyle", function (c) {
+            return !d.inputs[i].isVisible();
+          })
+          .attr("id", "ib," + d.inputs[i].stringID())
+          .attr("r", DEFAULT_NODE_BUBBLE_RADIUS)
+          .attr("transform", function (d) {
+            return "translate(" + x + "," + y + ")";
+          })
+          .on("click", function (this: SVGCircleElement, d) {
+            const components = this.id.split(',');
+            const node = graph.nodeMap[components[3]];
+            const edge = node.inputs[components[2]];
+            const visible = !edge.isVisible();
+            node.setInputVisibility(components[2], visible);
+            d3.event.stopPropagation();
+            view.updateGraphVisibility();
+          });
+      }
+      if (d.outputs.length != 0) {
+        const x = d.getOutputX();
+        const y = d.getNodeHeight(view.state.showTypes) + DEFAULT_NODE_BUBBLE_RADIUS;
+        g.append('circle')
+          .classed("filledBubbleStyle", function (c) {
+            return d.areAnyOutputsVisible() == 2;
+          })
+          .classed("halFilledBubbleStyle", function (c) {
+            return d.areAnyOutputsVisible() == 1;
+          })
+          .classed("bubbleStyle", function (c) {
+            return d.areAnyOutputsVisible() == 0;
+          })
+          .attr("id", "ob," + d.id)
+          .attr("r", DEFAULT_NODE_BUBBLE_RADIUS)
+          .attr("transform", function (d) {
+            return "translate(" + x + "," + y + ")";
+          })
+          .on("click", function (d) {
+            d.setOutputVisibility(d.areAnyOutputsVisible() == 0);
+            d3.event.stopPropagation();
+            view.updateGraphVisibility();
+          });
+      }
+    }
+
+    newGs.each(function (d) {
+      appendInputAndOutputBubbles(d3.select(this), d);
+    });
+
+    newGs.each(function (d) {
+      d3.select(this).append("text")
+        .classed("label", true)
+        .attr("text-anchor", "right")
+        .attr("dx", 5)
+        .attr("dy", 5)
+        .append('tspan')
+        .text(function (l) {
+          return d.getDisplayLabel();
+        })
+        .append("title")
+        .text(function (l) {
+          return d.getTitle();
+        });
+      if (d.nodeLabel.type != undefined) {
+        d3.select(this).append("text")
+          .classed("label", true)
+          .classed("type", true)
+          .attr("text-anchor", "right")
+          .attr("dx", 5)
+          .attr("dy", d.labelbbox.height + 5)
+          .append('tspan')
+          .text(function (l) {
+            return d.getDisplayType();
+          })
+          .append("title")
+          .text(function (l) {
+            return d.getType();
+          });
+      }
+    });
+
+    const newAndOldNodes = newGs.merge(selNodes);
+
+    newAndOldNodes.select<SVGTextElement>('.type').each(function (d) {
+      this.setAttribute('visibility', view.state.showTypes ? 'visible' : 'hidden');
+    });
+
+    newAndOldNodes
+      .classed("selected", function (n) {
+        if (state.selection.isSelected(n)) return true;
+        return false;
+      })
+      .attr("transform", function (d) { return "translate(" + d.x + "," + d.y + ")"; })
+      .select('rect')
+      .attr('height', function (d) { return d.getNodeHeight(view.state.showTypes); });
+
+    view.visibleBubbles = d3.selectAll('circle');
+
+    view.updateInputAndOutputBubbles();
+
+    graph.maxGraphX = graph.maxGraphNodeX;
+    newAndOldEdges.attr("d", function (edge) {
+      return edge.generatePath(graph, view.state.showTypes);
+    });
+  }
+
+  getSvgViewDimensions() {
+    return [this.container.clientWidth, this.container.clientHeight];
+  }
+
+  getSvgExtent(): [[number, number], [number, number]] {
+    return [[0, 0], [this.container.clientWidth, this.container.clientHeight]];
+  }
+
+  minScale() {
+    const dimensions = this.getSvgViewDimensions();
+    const minXScale = dimensions[0] / (2 * this.graph.width);
+    const minYScale = dimensions[1] / (2 * this.graph.height);
+    const minScale = Math.min(minXScale, minYScale);
+    this.panZoom.scaleExtent([minScale, 40]);
+    return minScale;
+  }
+
+  onresize() {
+    const trans = d3.zoomTransform(this.svg.node());
+    const ctrans = this.panZoom.constrain()(trans, this.getSvgExtent(), this.panZoom.translateExtent());
+    this.panZoom.transform(this.svg, ctrans);
+  }
+
+  toggleTypes() {
+    const view = this;
+    view.state.showTypes = !view.state.showTypes;
+    const element = document.getElementById('toggle-types');
+    element.classList.toggle('button-input-toggled', view.state.showTypes);
+    view.updateGraphVisibility();
+  }
+
+  viewSelection() {
+    const view = this;
+    let minX;
+    let maxX;
+    let minY;
+    let maxY;
+    let hasSelection = false;
+    view.visibleNodes.selectAll<SVGGElement, GNode>("g").each(function (n) {
+      if (view.state.selection.isSelected(n)) {
+        hasSelection = true;
+        minX = minX ? Math.min(minX, n.x) : n.x;
+        maxX = maxX ? Math.max(maxX, n.x + n.getTotalNodeWidth()) :
+          n.x + n.getTotalNodeWidth();
+        minY = minY ? Math.min(minY, n.y) : n.y;
+        maxY = maxY ? Math.max(maxY, n.y + n.getNodeHeight(view.state.showTypes)) :
+          n.y + n.getNodeHeight(view.state.showTypes);
+      }
+    });
+    if (hasSelection) {
+      view.viewGraphRegion(minX - NODE_INPUT_WIDTH, minY - 60,
+        maxX + NODE_INPUT_WIDTH, maxY + 60);
+    }
+  }
+
+  viewGraphRegion(minX, minY, maxX, maxY) {
+    const [width, height] = this.getSvgViewDimensions();
+    const dx = maxX - minX;
+    const dy = maxY - minY;
+    const x = (minX + maxX) / 2;
+    const y = (minY + maxY) / 2;
+    const scale = Math.min(width / (1.1 * dx), height / (1.1 * dy));
+    this.svg
+      .transition().duration(300).call(this.panZoom.translateTo, x, y)
+      .transition().duration(300).call(this.panZoom.scaleTo, scale)
+      .transition().duration(300).call(this.panZoom.translateTo, x, y);
+  }
+
+  viewWholeGraph() {
+    this.panZoom.scaleTo(this.svg, 0);
+    this.panZoom.translateTo(this.svg,
+      this.graph.minGraphX + this.graph.width / 2,
+      this.graph.minGraphY + this.graph.height / 2);
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/graph.ts b/src/v8/tools/turbolizer/src/graph.ts
new file mode 100644
index 0000000..0eb2e3e
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/graph.ts
@@ -0,0 +1,107 @@
+import { GNode } from "./node";
+import { Edge, MINIMUM_EDGE_SEPARATION } from "./edge";
+
+export class Graph {
+  nodeMap: Array<GNode>;
+  minGraphX: number;
+  maxGraphX: number;
+  minGraphY: number;
+  maxGraphY: number;
+  maxGraphNodeX: number;
+  maxBackEdgeNumber: number;
+  width: number;
+  height: number;
+
+  constructor(data: any) {
+    this.nodeMap = [];
+
+    this.minGraphX = 0;
+    this.maxGraphX = 1;
+    this.minGraphY = 0;
+    this.maxGraphY = 1;
+    this.width = 1;
+    this.height = 1;
+
+    data.nodes.forEach((jsonNode: any) => {
+      this.nodeMap[jsonNode.id] = new GNode(jsonNode.nodeLabel);
+    });
+
+    data.edges.forEach((e: any) => {
+      const t = this.nodeMap[e.target];
+      const s = this.nodeMap[e.source];
+      const newEdge = new Edge(t, e.index, s, e.type);
+      t.inputs.push(newEdge);
+      s.outputs.push(newEdge);
+      if (e.type == 'control') {
+        // Every source of a control edge is a CFG node.
+        s.cfg = true;
+      }
+    });
+
+  }
+
+  *nodes(p = (n: GNode) => true) {
+    for (const node of this.nodeMap) {
+      if (!node || !p(node)) continue;
+      yield node;
+    }
+  }
+
+  *filteredEdges(p: (e: Edge) => boolean) {
+    for (const node of this.nodes()) {
+      for (const edge of node.inputs) {
+        if (p(edge)) yield edge;
+      }
+    }
+  }
+
+  forEachEdge(p: (e: Edge) => void) {
+    for (const node of this.nodeMap) {
+      if (!node) continue;
+      for (const edge of node.inputs) {
+        p(edge);
+      }
+    }
+  }
+
+  redetermineGraphBoundingBox(showTypes: boolean): [[number, number], [number, number]] {
+    this.minGraphX = 0;
+    this.maxGraphNodeX = 1;
+    this.maxGraphX = undefined;  // see below
+    this.minGraphY = 0;
+    this.maxGraphY = 1;
+
+    for (const node of this.nodes()) {
+      if (!node.visible) {
+        continue;
+      }
+
+      if (node.x < this.minGraphX) {
+        this.minGraphX = node.x;
+      }
+      if ((node.x + node.getTotalNodeWidth()) > this.maxGraphNodeX) {
+        this.maxGraphNodeX = node.x + node.getTotalNodeWidth();
+      }
+      if ((node.y - 50) < this.minGraphY) {
+        this.minGraphY = node.y - 50;
+      }
+      if ((node.y + node.getNodeHeight(showTypes) + 50) > this.maxGraphY) {
+        this.maxGraphY = node.y + node.getNodeHeight(showTypes) + 50;
+      }
+    }
+
+    this.maxGraphX = this.maxGraphNodeX +
+      this.maxBackEdgeNumber * MINIMUM_EDGE_SEPARATION;
+
+    this.width = this.maxGraphX - this.minGraphX;
+    this.height = this.maxGraphY - this.minGraphY;
+
+    const extent: [[number, number], [number, number]] = [
+      [this.minGraphX - this.width / 2, this.minGraphY - this.height / 2],
+      [this.maxGraphX + this.width / 2, this.maxGraphY + this.height / 2]
+    ];
+
+    return extent;
+  }
+
+}
diff --git a/src/v8/tools/turbolizer/src/graphmultiview.ts b/src/v8/tools/turbolizer/src/graphmultiview.ts
new file mode 100644
index 0000000..43ec418
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/graphmultiview.ts
@@ -0,0 +1,136 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { GraphView } from "../src/graph-view";
+import { ScheduleView } from "../src/schedule-view";
+import { SequenceView } from "../src/sequence-view";
+import { SourceResolver } from "../src/source-resolver";
+import { SelectionBroker } from "../src/selection-broker";
+import { View, PhaseView } from "../src/view";
+
+const multiviewID = "multiview";
+
+const toolboxHTML = `
+<div class="graph-toolbox">
+  <select id="phase-select">
+    <option disabled selected>(please open a file)</option>
+  </select>
+  <input id="search-input" type="text" title="search nodes for regex" alt="search node for regex" class="search-input"
+    placeholder="find with regexp&hellip;">
+  <label><input id="search-only-visible" type="checkbox" name="instruction-address" alt="Apply search to visible nodes only">only visible</label>
+</div>`;
+
+export class GraphMultiView extends View {
+  sourceResolver: SourceResolver;
+  selectionBroker: SelectionBroker;
+  graph: GraphView;
+  schedule: ScheduleView;
+  sequence: SequenceView;
+  selectMenu: HTMLSelectElement;
+  currentPhaseView: PhaseView;
+
+  createViewElement() {
+    const pane = document.createElement("div");
+    pane.setAttribute("id", multiviewID);
+    pane.className = "viewpane";
+    return pane;
+  }
+
+  constructor(id, selectionBroker, sourceResolver) {
+    super(id);
+    const view = this;
+    view.sourceResolver = sourceResolver;
+    view.selectionBroker = selectionBroker;
+    const toolbox = document.createElement("div");
+    toolbox.className = "toolbox-anchor";
+    toolbox.innerHTML = toolboxHTML;
+    view.divNode.appendChild(toolbox);
+    const searchInput = toolbox.querySelector("#search-input") as HTMLInputElement;
+    const onlyVisibleCheckbox = toolbox.querySelector("#search-only-visible") as HTMLInputElement;
+    searchInput.addEventListener("keyup", e => {
+      if (!view.currentPhaseView) return;
+      view.currentPhaseView.searchInputAction(searchInput, e, onlyVisibleCheckbox.checked);
+    });
+    view.divNode.addEventListener("keyup", (e: KeyboardEvent) => {
+      if (e.keyCode == 191) { // keyCode == '/'
+        searchInput.focus();
+      }
+    });
+    searchInput.setAttribute("value", window.sessionStorage.getItem("lastSearch") || "");
+    this.graph = new GraphView(this.divNode, selectionBroker, view.displayPhaseByName.bind(this),
+      toolbox.querySelector(".graph-toolbox"));
+    this.schedule = new ScheduleView(this.divNode, selectionBroker);
+    this.sequence = new SequenceView(this.divNode, selectionBroker);
+    this.selectMenu = toolbox.querySelector("#phase-select") as HTMLSelectElement;
+  }
+
+  initializeSelect() {
+    const view = this;
+    view.selectMenu.innerHTML = "";
+    view.sourceResolver.forEachPhase(phase => {
+      const optionElement = document.createElement("option");
+      let maxNodeId = "";
+      if (phase.type == "graph" && phase.highestNodeId != 0) {
+        maxNodeId = ` ${phase.highestNodeId}`;
+      }
+      optionElement.text = `${phase.name}${maxNodeId}`;
+      view.selectMenu.add(optionElement);
+    });
+    this.selectMenu.onchange = function (this: HTMLSelectElement) {
+      const phaseIndex = this.selectedIndex;
+      window.sessionStorage.setItem("lastSelectedPhase", phaseIndex.toString());
+      view.displayPhase(view.sourceResolver.getPhase(phaseIndex));
+    };
+  }
+
+  show() {
+    super.show();
+    this.initializeSelect();
+    const lastPhaseIndex = +window.sessionStorage.getItem("lastSelectedPhase");
+    const initialPhaseIndex = this.sourceResolver.repairPhaseId(lastPhaseIndex);
+    this.selectMenu.selectedIndex = initialPhaseIndex;
+    this.displayPhase(this.sourceResolver.getPhase(initialPhaseIndex));
+  }
+
+  displayPhase(phase, selection?: Set<any>) {
+    if (phase.type == "graph") {
+      this.displayPhaseView(this.graph, phase, selection);
+    } else if (phase.type == "schedule") {
+      this.displayPhaseView(this.schedule, phase, selection);
+    } else if (phase.type == "sequence") {
+      this.displayPhaseView(this.sequence, phase, selection);
+    }
+  }
+
+  displayPhaseView(view: PhaseView, data, selection?: Set<any>) {
+    const rememberedSelection = selection ? selection : this.hideCurrentPhase();
+    view.initializeContent(data, rememberedSelection);
+    this.divNode.classList.toggle("scrollable", view.isScrollable());
+    this.currentPhaseView = view;
+  }
+
+  displayPhaseByName(phaseName, selection?: Set<any>) {
+    const phaseId = this.sourceResolver.getPhaseIdByName(phaseName);
+    this.selectMenu.selectedIndex = phaseId;
+    this.displayPhase(this.sourceResolver.getPhase(phaseId), selection);
+  }
+
+  hideCurrentPhase() {
+    let rememberedSelection = null;
+    if (this.currentPhaseView != null) {
+      rememberedSelection = this.currentPhaseView.detachSelection();
+      this.currentPhaseView.hide();
+      this.currentPhaseView = null;
+    }
+    return rememberedSelection;
+  }
+
+  onresize() {
+    if (this.currentPhaseView) this.currentPhaseView.onresize();
+  }
+
+  detachSelection() {
+    return null;
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/info-view.ts b/src/v8/tools/turbolizer/src/info-view.ts
new file mode 100644
index 0000000..3858536
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/info-view.ts
@@ -0,0 +1,17 @@
+import { View } from "./view";
+
+export class InfoView extends View {
+
+  constructor(idOrContainer: HTMLElement | string) {
+    super(idOrContainer);
+    fetch("info-view.html")
+      .then(response => response.text())
+      .then(htmlText => this.divNode.innerHTML = htmlText);
+  }
+
+  createViewElement(): HTMLElement {
+    const infoContainer = document.createElement("div");
+    infoContainer.classList.add("info-container");
+    return infoContainer;
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/node-label.ts b/src/v8/tools/turbolizer/src/node-label.ts
new file mode 100644
index 0000000..6e7d41d
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/node-label.ts
@@ -0,0 +1,86 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function formatOrigin(origin) {
+  if (origin.nodeId) {
+    return `#${origin.nodeId} in phase ${origin.phase}/${origin.reducer}`;
+  }
+  if (origin.bytecodePosition) {
+    return `Bytecode line ${origin.bytecodePosition} in phase ${origin.phase}/${origin.reducer}`;
+  }
+  return "unknown origin";
+}
+
+export class NodeLabel {
+  id: number;
+  label: string;
+  title: string;
+  live: boolean;
+  properties: string;
+  sourcePosition: any;
+  origin: any;
+  opcode: string;
+  control: boolean;
+  opinfo: string;
+  type: string;
+  inplaceUpdatePhase: string;
+
+  constructor(id: number, label: string, title: string, live: boolean, properties: string, sourcePosition: any, origin: any, opcode: string, control: boolean, opinfo: string, type: string) {
+    this.id = id;
+    this.label = label;
+    this.title = title;
+    this.live = live;
+    this.properties = properties;
+    this.sourcePosition = sourcePosition;
+    this.origin = origin;
+    this.opcode = opcode;
+    this.control = control;
+    this.opinfo = opinfo;
+    this.type = type;
+    this.inplaceUpdatePhase = null;
+  }
+
+  equals(that?: NodeLabel) {
+    if (!that) return false;
+    if (this.id != that.id) return false;
+    if (this.label != that.label) return false;
+    if (this.title != that.title) return false;
+    if (this.live != that.live) return false;
+    if (this.properties != that.properties) return false;
+    if (this.opcode != that.opcode) return false;
+    if (this.control != that.control) return false;
+    if (this.opinfo != that.opinfo) return false;
+    if (this.type != that.type) return false;
+    return true;
+  }
+
+  getTitle() {
+    let propsString = "";
+    if (this.properties === "") {
+      propsString = "no properties";
+    } else {
+      propsString = "[" + this.properties + "]";
+    }
+    let title = this.title + "\n" + propsString + "\n" + this.opinfo;
+    if (this.origin) {
+      title += `\nOrigin: ${formatOrigin(this.origin)}`;
+    }
+    if (this.inplaceUpdatePhase) {
+      title += `\nInplace update in phase: ${this.inplaceUpdatePhase}`;
+    }
+    return title;
+  }
+
+  getDisplayLabel() {
+    const result = `${this.id}: ${this.label}`;
+    if (result.length > 40) {
+      return `${this.id}: ${this.opcode}`;
+    }
+    return result;
+  }
+
+  setInplaceUpdatePhase(name: string): any {
+    this.inplaceUpdatePhase = name;
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/node.ts b/src/v8/tools/turbolizer/src/node.ts
new file mode 100644
index 0000000..02906d1
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/node.ts
@@ -0,0 +1,180 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { MINIMUM_EDGE_SEPARATION, Edge } from "../src/edge";
+import { NodeLabel } from "./node-label";
+import { MAX_RANK_SENTINEL } from "./constants";
+import { alignUp, measureText } from "./util";
+
+export const DEFAULT_NODE_BUBBLE_RADIUS = 12;
+export const NODE_INPUT_WIDTH = 50;
+export const MINIMUM_NODE_OUTPUT_APPROACH = 15;
+const MINIMUM_NODE_INPUT_APPROACH = 15 + 2 * DEFAULT_NODE_BUBBLE_RADIUS;
+
+export class GNode {
+  id: number;
+  nodeLabel: NodeLabel;
+  displayLabel: string;
+  inputs: Array<Edge>;
+  outputs: Array<Edge>;
+  visible: boolean;
+  x: number;
+  y: number;
+  rank: number;
+  outputApproach: number;
+  cfg: boolean;
+  labelbbox: { width: number, height: number };
+  width: number;
+  normalheight: number;
+  visitOrderWithinRank: number;
+
+  constructor(nodeLabel: NodeLabel) {
+    this.id = nodeLabel.id;
+    this.nodeLabel = nodeLabel;
+    this.displayLabel = nodeLabel.getDisplayLabel();
+    this.inputs = [];
+    this.outputs = [];
+    this.visible = false;
+    this.x = 0;
+    this.y = 0;
+    this.rank = MAX_RANK_SENTINEL;
+    this.outputApproach = MINIMUM_NODE_OUTPUT_APPROACH;
+    // Every control node is a CFG node.
+    this.cfg = nodeLabel.control;
+    this.labelbbox = measureText(this.displayLabel);
+    const typebbox = measureText(this.getDisplayType());
+    const innerwidth = Math.max(this.labelbbox.width, typebbox.width);
+    this.width = alignUp(innerwidth + NODE_INPUT_WIDTH * 2,
+      NODE_INPUT_WIDTH);
+    const innerheight = Math.max(this.labelbbox.height, typebbox.height);
+    this.normalheight = innerheight + 20;
+    this.visitOrderWithinRank = 0;
+  }
+
+  isControl() {
+    return this.nodeLabel.control;
+  }
+  isInput() {
+    return this.nodeLabel.opcode == 'Parameter' || this.nodeLabel.opcode.endsWith('Constant');
+  }
+  isLive() {
+    return this.nodeLabel.live !== false;
+  }
+  isJavaScript() {
+    return this.nodeLabel.opcode.startsWith('JS');
+  }
+  isSimplified() {
+    if (this.isJavaScript()) return false;
+    const opcode = this.nodeLabel.opcode;
+    return opcode.endsWith('Phi') ||
+      opcode.startsWith('Boolean') ||
+      opcode.startsWith('Number') ||
+      opcode.startsWith('String') ||
+      opcode.startsWith('Change') ||
+      opcode.startsWith('Object') ||
+      opcode.startsWith('Reference') ||
+      opcode.startsWith('Any') ||
+      opcode.endsWith('ToNumber') ||
+      (opcode == 'AnyToBoolean') ||
+      (opcode.startsWith('Load') && opcode.length > 4) ||
+      (opcode.startsWith('Store') && opcode.length > 5);
+  }
+  isMachine() {
+    return !(this.isControl() || this.isInput() ||
+      this.isJavaScript() || this.isSimplified());
+  }
+  getTotalNodeWidth() {
+    const inputWidth = this.inputs.length * NODE_INPUT_WIDTH;
+    return Math.max(inputWidth, this.width);
+  }
+  getTitle() {
+    return this.nodeLabel.getTitle();
+  }
+  getDisplayLabel() {
+    return this.nodeLabel.getDisplayLabel();
+  }
+  getType() {
+    return this.nodeLabel.type;
+  }
+  getDisplayType() {
+    let typeString = this.nodeLabel.type;
+    if (typeString == undefined) return "";
+    if (typeString.length > 24) {
+      typeString = typeString.substr(0, 25) + "...";
+    }
+    return typeString;
+  }
+  deepestInputRank() {
+    let deepestRank = 0;
+    this.inputs.forEach(function (e) {
+      if (e.isVisible() && !e.isBackEdge()) {
+        if (e.source.rank > deepestRank) {
+          deepestRank = e.source.rank;
+        }
+      }
+    });
+    return deepestRank;
+  }
+  areAnyOutputsVisible() {
+    let visibleCount = 0;
+    this.outputs.forEach(function (e) { if (e.isVisible())++visibleCount; });
+    if (this.outputs.length == visibleCount) return 2;
+    if (visibleCount != 0) return 1;
+    return 0;
+  }
+  setOutputVisibility(v) {
+    let result = false;
+    this.outputs.forEach(function (e) {
+      e.visible = v;
+      if (v) {
+        if (!e.target.visible) {
+          e.target.visible = true;
+          result = true;
+        }
+      }
+    });
+    return result;
+  }
+  setInputVisibility(i, v) {
+    const edge = this.inputs[i];
+    edge.visible = v;
+    if (v) {
+      if (!edge.source.visible) {
+        edge.source.visible = true;
+        return true;
+      }
+    }
+    return false;
+  }
+  getInputApproach(index) {
+    return this.y - MINIMUM_NODE_INPUT_APPROACH -
+      (index % 4) * MINIMUM_EDGE_SEPARATION - DEFAULT_NODE_BUBBLE_RADIUS;
+  }
+  getNodeHeight(showTypes: boolean): number {
+    if (showTypes) {
+      return this.normalheight + this.labelbbox.height;
+    } else {
+      return this.normalheight;
+    }
+  }
+  getOutputApproach(showTypes: boolean) {
+    return this.y + this.outputApproach + this.getNodeHeight(showTypes) +
+      + DEFAULT_NODE_BUBBLE_RADIUS;
+  }
+  getInputX(index) {
+    const result = this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2) +
+      (index - this.inputs.length + 1) * NODE_INPUT_WIDTH;
+    return result;
+  }
+  getOutputX() {
+    return this.getTotalNodeWidth() - (NODE_INPUT_WIDTH / 2);
+  }
+  hasBackEdges() {
+    return (this.nodeLabel.opcode == "Loop") ||
+      ((this.nodeLabel.opcode == "Phi" || this.nodeLabel.opcode == "EffectPhi" || this.nodeLabel.opcode == "InductionVariablePhi") &&
+        this.inputs[this.inputs.length - 1].source.nodeLabel.opcode == "Loop");
+  }
+}
+
+export const nodeToStr = (n: GNode) => "N" + n.id;
diff --git a/src/v8/tools/turbolizer/src/resizer.ts b/src/v8/tools/turbolizer/src/resizer.ts
new file mode 100644
index 0000000..ec2d68c
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/resizer.ts
@@ -0,0 +1,199 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import * as d3 from "d3";
+import * as C from "../src/constants";
+
+class Snapper {
+  resizer: Resizer;
+  sourceExpand: HTMLElement;
+  sourceCollapse: HTMLElement;
+  disassemblyExpand: HTMLElement;
+  disassemblyCollapse: HTMLElement;
+
+  constructor(resizer: Resizer) {
+    this.resizer = resizer;
+    this.sourceExpand = document.getElementById(C.SOURCE_EXPAND_ID);
+    this.sourceCollapse = document.getElementById(C.SOURCE_COLLAPSE_ID);
+    this.disassemblyExpand = document.getElementById(C.DISASSEMBLY_EXPAND_ID);
+    this.disassemblyCollapse = document.getElementById(C.DISASSEMBLY_COLLAPSE_ID);
+
+    document.getElementById("source-collapse").addEventListener("click", () => {
+      this.setSourceExpanded(!this.sourceExpand.classList.contains("invisible"));
+      this.resizer.updatePanes();
+    });
+    document.getElementById("disassembly-collapse").addEventListener("click", () => {
+      this.setDisassemblyExpanded(!this.disassemblyExpand.classList.contains("invisible"));
+      this.resizer.updatePanes();
+    });
+  }
+
+  restoreExpandedState(): void {
+    this.setSourceExpanded(this.getLastExpandedState("source", true));
+    this.setDisassemblyExpanded(this.getLastExpandedState("disassembly", false));
+  }
+
+  getLastExpandedState(type: string, defaultState: boolean): boolean {
+    const state = window.sessionStorage.getItem("expandedState-" + type);
+    if (state === null) return defaultState;
+    return state === 'true';
+  }
+
+  sourceExpandUpdate(newState: boolean): void {
+    window.sessionStorage.setItem("expandedState-source", `${newState}`);
+    this.sourceExpand.classList.toggle("invisible", newState);
+    this.sourceCollapse.classList.toggle("invisible", !newState);
+  }
+
+  setSourceExpanded(newState: boolean): void {
+    if (this.sourceExpand.classList.contains("invisible") === newState) return;
+    const resizer = this.resizer;
+    this.sourceExpandUpdate(newState);
+    if (newState) {
+      resizer.sepLeft = resizer.sepLeftSnap;
+      resizer.sepLeftSnap = 0;
+    } else {
+      resizer.sepLeftSnap = resizer.sepLeft;
+      resizer.sepLeft = 0;
+    }
+  }
+
+  disassemblyExpandUpdate(newState: boolean): void {
+    window.sessionStorage.setItem("expandedState-disassembly", `${newState}`);
+    this.disassemblyExpand.classList.toggle("invisible", newState);
+    this.disassemblyCollapse.classList.toggle("invisible", !newState);
+  }
+
+  setDisassemblyExpanded(newState: boolean): void {
+    if (this.disassemblyExpand.classList.contains("invisible") === newState) return;
+    const resizer = this.resizer;
+    this.disassemblyExpandUpdate(newState);
+    if (newState) {
+      resizer.sepRight = resizer.sepRightSnap;
+      resizer.sepRightSnap = resizer.clientWidth;
+    } else {
+      resizer.sepRightSnap = resizer.sepRight;
+      resizer.sepRight = resizer.clientWidth;
+    }
+  }
+
+  panesUpdated(): void {
+    this.sourceExpandUpdate(this.resizer.sepLeft > this.resizer.deadWidth);
+    this.disassemblyExpandUpdate(this.resizer.sepRight <
+      (this.resizer.clientWidth - this.resizer.deadWidth));
+  }
+}
+
+export class Resizer {
+  snapper: Snapper;
+  deadWidth: number;
+  clientWidth: number;
+  left: HTMLElement;
+  right: HTMLElement;
+  middle: HTMLElement;
+  sepLeft: number;
+  sepRight: number;
+  sepLeftSnap: number;
+  sepRightSnap: number;
+  sepWidthOffset: number;
+  panesUpdatedCallback: () => void;
+  resizerRight: d3.Selection<HTMLDivElement, any, any, any>;
+  resizerLeft: d3.Selection<HTMLDivElement, any, any, any>;
+
+  constructor(panesUpdatedCallback: () => void, deadWidth: number) {
+    const resizer = this;
+    resizer.panesUpdatedCallback = panesUpdatedCallback;
+    resizer.deadWidth = deadWidth;
+    resizer.left = document.getElementById(C.SOURCE_PANE_ID);
+    resizer.middle = document.getElementById(C.INTERMEDIATE_PANE_ID);
+    resizer.right = document.getElementById(C.GENERATED_PANE_ID);
+    resizer.resizerLeft = d3.select('#resizer-left');
+    resizer.resizerRight = d3.select('#resizer-right');
+    resizer.sepLeftSnap = 0;
+    resizer.sepRightSnap = 0;
+    // Offset to prevent resizers from sliding slightly over one another.
+    resizer.sepWidthOffset = 7;
+    this.updateWidths();
+
+    const dragResizeLeft = d3.drag()
+      .on('drag', function () {
+        const x = d3.mouse(this.parentElement)[0];
+        resizer.sepLeft = Math.min(Math.max(0, x), resizer.sepRight - resizer.sepWidthOffset);
+        resizer.updatePanes();
+      })
+      .on('start', function () {
+        resizer.resizerLeft.classed("dragged", true);
+        const x = d3.mouse(this.parentElement)[0];
+        if (x > deadWidth) {
+          resizer.sepLeftSnap = resizer.sepLeft;
+        }
+      })
+      .on('end', function () {
+        if (!resizer.isRightSnapped()) {
+          window.sessionStorage.setItem("source-pane-width", `${resizer.sepLeft / resizer.clientWidth}`);
+        }
+        resizer.resizerLeft.classed("dragged", false);
+      });
+    resizer.resizerLeft.call(dragResizeLeft);
+
+    const dragResizeRight = d3.drag()
+      .on('drag', function () {
+        const x = d3.mouse(this.parentElement)[0];
+        resizer.sepRight = Math.max(resizer.sepLeft + resizer.sepWidthOffset, Math.min(x, resizer.clientWidth));
+        resizer.updatePanes();
+      })
+      .on('start', function () {
+        resizer.resizerRight.classed("dragged", true);
+        const x = d3.mouse(this.parentElement)[0];
+        if (x < (resizer.clientWidth - deadWidth)) {
+          resizer.sepRightSnap = resizer.sepRight;
+        }
+      })
+      .on('end', function () {
+        if (!resizer.isRightSnapped()) {
+          console.log(`disassembly-pane-width ${resizer.sepRight}`);
+          window.sessionStorage.setItem("disassembly-pane-width", `${resizer.sepRight / resizer.clientWidth}`);
+        }
+        resizer.resizerRight.classed("dragged", false);
+      });
+    resizer.resizerRight.call(dragResizeRight);
+    window.onresize = function () {
+      resizer.updateWidths();
+      resizer.updatePanes();
+    };
+    resizer.snapper = new Snapper(resizer);
+    resizer.snapper.restoreExpandedState();
+  }
+
+  isLeftSnapped() {
+    return this.sepLeft === 0;
+  }
+
+  isRightSnapped() {
+    return this.sepRight >= this.clientWidth - 1;
+  }
+
+  updatePanes() {
+    const leftSnapped = this.isLeftSnapped();
+    const rightSnapped = this.isRightSnapped();
+    this.resizerLeft.classed("snapped", leftSnapped);
+    this.resizerRight.classed("snapped", rightSnapped);
+    this.left.style.width = this.sepLeft + 'px';
+    this.middle.style.width = (this.sepRight - this.sepLeft) + 'px';
+    this.right.style.width = (this.clientWidth - this.sepRight) + 'px';
+    this.resizerLeft.style('left', this.sepLeft + 'px');
+    this.resizerRight.style('right', (this.clientWidth - this.sepRight - 1) + 'px');
+
+    this.snapper.panesUpdated();
+    this.panesUpdatedCallback();
+  }
+
+  updateWidths() {
+    this.clientWidth = document.body.getBoundingClientRect().width;
+    const sepLeft = window.sessionStorage.getItem("source-pane-width");
+    this.sepLeft = this.clientWidth * (sepLeft ? Number.parseFloat(sepLeft) : (1 / 3));
+    const sepRight = window.sessionStorage.getItem("disassembly-pane-width");
+    this.sepRight = this.clientWidth * (sepRight ? Number.parseFloat(sepRight) : (2 / 3));
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/schedule-view.ts b/src/v8/tools/turbolizer/src/schedule-view.ts
new file mode 100644
index 0000000..3da62ec
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/schedule-view.ts
@@ -0,0 +1,187 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { Schedule, SourceResolver } from "../src/source-resolver";
+import { TextView } from "../src/text-view";
+
+export class ScheduleView extends TextView {
+  schedule: Schedule;
+  sourceResolver: SourceResolver;
+
+  createViewElement() {
+    const pane = document.createElement('div');
+    pane.setAttribute('id', "schedule");
+    return pane;
+  }
+
+  constructor(parentId, broker) {
+    super(parentId, broker);
+    this.sourceResolver = broker.sourceResolver;
+  }
+
+  attachSelection(s) {
+    const view = this;
+    if (!(s instanceof Set)) return;
+    view.selectionHandler.clear();
+    view.blockSelectionHandler.clear();
+    const selected = new Array();
+    for (const key of s) selected.push(key);
+    view.selectionHandler.select(selected, true);
+  }
+
+  detachSelection() {
+    this.blockSelection.clear();
+    return this.selection.detachSelection();
+  }
+
+  initializeContent(data, rememberedSelection) {
+    this.divNode.innerHTML = '';
+    this.schedule = data.schedule;
+    this.addBlocks(data.schedule.blocks);
+    this.attachSelection(rememberedSelection);
+    this.show();
+  }
+
+  createElementFromString(htmlString) {
+    const div = document.createElement('div');
+    div.innerHTML = htmlString.trim();
+    return div.firstChild;
+  }
+
+  elementForBlock(block) {
+    const view = this;
+    function createElement(tag: string, cls: string, content?: string) {
+      const el = document.createElement(tag);
+      el.className = cls;
+      if (content != undefined) el.innerHTML = content;
+      return el;
+    }
+
+    function mkNodeLinkHandler(nodeId) {
+      return function (e) {
+        e.stopPropagation();
+        if (!e.shiftKey) {
+          view.selectionHandler.clear();
+        }
+        view.selectionHandler.select([nodeId], true);
+      };
+    }
+
+    function getMarker(start, end) {
+      if (start != end) {
+        return ["&#8857;", `This node generated instructions in range [${start},${end}). ` +
+          `This is currently unreliable for constants.`];
+      }
+      if (start != -1) {
+        return ["&#183;", `The instruction selector did not generate instructions ` +
+          `for this node, but processed the node at instruction ${start}. ` +
+          `This usually means that this node was folded into another node; ` +
+          `the highlighted machine code is a guess.`];
+      }
+      return ["", `This not is not in the final schedule.`];
+    }
+
+    function createElementForNode(node) {
+      const nodeEl = createElement("div", "node");
+
+      const [start, end] = view.sourceResolver.getInstruction(node.id);
+      const [marker, tooltip] = getMarker(start, end);
+      const instrMarker = createElement("div", "instr-marker com", marker);
+      instrMarker.setAttribute("title", tooltip);
+      instrMarker.onclick = mkNodeLinkHandler(node.id);
+      nodeEl.appendChild(instrMarker);
+
+      const nodeId = createElement("div", "node-id tag clickable", node.id);
+      nodeId.onclick = mkNodeLinkHandler(node.id);
+      view.addHtmlElementForNodeId(node.id, nodeId);
+      nodeEl.appendChild(nodeId);
+      const nodeLabel = createElement("div", "node-label", node.label);
+      nodeEl.appendChild(nodeLabel);
+      if (node.inputs.length > 0) {
+        const nodeParameters = createElement("div", "parameter-list comma-sep-list");
+        for (const param of node.inputs) {
+          const paramEl = createElement("div", "parameter tag clickable", param);
+          nodeParameters.appendChild(paramEl);
+          paramEl.onclick = mkNodeLinkHandler(param);
+          view.addHtmlElementForNodeId(param, paramEl);
+        }
+        nodeEl.appendChild(nodeParameters);
+      }
+
+      return nodeEl;
+    }
+
+    function mkBlockLinkHandler(blockId) {
+      return function (e) {
+        e.stopPropagation();
+        if (!e.shiftKey) {
+          view.blockSelectionHandler.clear();
+        }
+        view.blockSelectionHandler.select(["" + blockId], true);
+      };
+    }
+
+    const scheduleBlock = createElement("div", "schedule-block");
+    scheduleBlock.classList.toggle("deferred", block.isDeferred);
+
+    const [start, end] = view.sourceResolver.getInstructionRangeForBlock(block.id);
+    const instrMarker = createElement("div", "instr-marker com", "&#8857;");
+    instrMarker.setAttribute("title", `Instructions range for this block is [${start}, ${end})`);
+    instrMarker.onclick = mkBlockLinkHandler(block.id);
+    scheduleBlock.appendChild(instrMarker);
+
+    const blockId = createElement("div", "block-id com clickable", block.id);
+    blockId.onclick = mkBlockLinkHandler(block.id);
+    scheduleBlock.appendChild(blockId);
+    const blockPred = createElement("div", "predecessor-list block-list comma-sep-list");
+    for (const pred of block.pred) {
+      const predEl = createElement("div", "block-id com clickable", pred);
+      predEl.onclick = mkBlockLinkHandler(pred);
+      blockPred.appendChild(predEl);
+    }
+    if (block.pred.length) scheduleBlock.appendChild(blockPred);
+    const nodes = createElement("div", "nodes");
+    for (const node of block.nodes) {
+      nodes.appendChild(createElementForNode(node));
+    }
+    scheduleBlock.appendChild(nodes);
+    const blockSucc = createElement("div", "successor-list block-list comma-sep-list");
+    for (const succ of block.succ) {
+      const succEl = createElement("div", "block-id com clickable", succ);
+      succEl.onclick = mkBlockLinkHandler(succ);
+      blockSucc.appendChild(succEl);
+    }
+    if (block.succ.length) scheduleBlock.appendChild(blockSucc);
+    this.addHtmlElementForBlockId(block.id, scheduleBlock);
+    return scheduleBlock;
+  }
+
+  addBlocks(blocks) {
+    for (const block of blocks) {
+      const blockEl = this.elementForBlock(block);
+      this.divNode.appendChild(blockEl);
+    }
+  }
+
+  lineString(node) {
+    return `${node.id}: ${node.label}(${node.inputs.join(", ")})`;
+  }
+
+  searchInputAction(searchBar, e, onlyVisible) {
+    e.stopPropagation();
+    this.selectionHandler.clear();
+    const query = searchBar.value;
+    if (query.length == 0) return;
+    const select = [];
+    window.sessionStorage.setItem("lastSearch", query);
+    const reg = new RegExp(query);
+    for (const node of this.schedule.nodes) {
+      if (node === undefined) continue;
+      if (reg.exec(this.lineString(node)) != null) {
+        select.push(node.id);
+      }
+    }
+    this.selectionHandler.select(select, true);
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/selection-broker.ts b/src/v8/tools/turbolizer/src/selection-broker.ts
new file mode 100644
index 0000000..7e0c0dd
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/selection-broker.ts
@@ -0,0 +1,89 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { SourceResolver, sourcePositionValid } from "../src/source-resolver";
+import { ClearableHandler, SelectionHandler, NodeSelectionHandler, BlockSelectionHandler, InstructionSelectionHandler } from "../src/selection-handler";
+
+export class SelectionBroker {
+  sourceResolver: SourceResolver;
+  allHandlers: Array<ClearableHandler>;
+  sourcePositionHandlers: Array<SelectionHandler>;
+  nodeHandlers: Array<NodeSelectionHandler>;
+  blockHandlers: Array<BlockSelectionHandler>;
+  instructionHandlers: Array<InstructionSelectionHandler>;
+
+  constructor(sourceResolver) {
+    this.allHandlers = [];
+    this.sourcePositionHandlers = [];
+    this.nodeHandlers = [];
+    this.blockHandlers = [];
+    this.instructionHandlers = [];
+    this.sourceResolver = sourceResolver;
+  }
+
+  addSourcePositionHandler(handler: SelectionHandler & ClearableHandler) {
+    this.allHandlers.push(handler);
+    this.sourcePositionHandlers.push(handler);
+  }
+
+  addNodeHandler(handler: NodeSelectionHandler & ClearableHandler) {
+    this.allHandlers.push(handler);
+    this.nodeHandlers.push(handler);
+  }
+
+  addBlockHandler(handler: BlockSelectionHandler & ClearableHandler) {
+    this.allHandlers.push(handler);
+    this.blockHandlers.push(handler);
+  }
+
+  addInstructionHandler(handler: InstructionSelectionHandler & ClearableHandler) {
+    this.allHandlers.push(handler);
+    this.instructionHandlers.push(handler);
+  }
+
+  broadcastInstructionSelect(from, instructionOffsets, selected) {
+    for (const b of this.instructionHandlers) {
+      if (b != from) b.brokeredInstructionSelect(instructionOffsets, selected);
+    }
+  }
+
+  broadcastSourcePositionSelect(from, sourcePositions, selected) {
+    sourcePositions = sourcePositions.filter(l => {
+      if (!sourcePositionValid(l)) {
+        console.log("Warning: invalid source position");
+        return false;
+      }
+      return true;
+    });
+    for (const b of this.sourcePositionHandlers) {
+      if (b != from) b.brokeredSourcePositionSelect(sourcePositions, selected);
+    }
+    const nodes = this.sourceResolver.sourcePositionsToNodeIds(sourcePositions);
+    for (const b of this.nodeHandlers) {
+      if (b != from) b.brokeredNodeSelect(nodes, selected);
+    }
+  }
+
+  broadcastNodeSelect(from, nodes, selected) {
+    for (const b of this.nodeHandlers) {
+      if (b != from) b.brokeredNodeSelect(nodes, selected);
+    }
+    const sourcePositions = this.sourceResolver.nodeIdsToSourcePositions(nodes);
+    for (const b of this.sourcePositionHandlers) {
+      if (b != from) b.brokeredSourcePositionSelect(sourcePositions, selected);
+    }
+  }
+
+  broadcastBlockSelect(from, blocks, selected) {
+    for (const b of this.blockHandlers) {
+      if (b != from) b.brokeredBlockSelect(blocks, selected);
+    }
+  }
+
+  broadcastClear(from) {
+    this.allHandlers.forEach(function (b) {
+      if (b != from) b.brokeredClear();
+    });
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/selection-handler.ts b/src/v8/tools/turbolizer/src/selection-handler.ts
new file mode 100644
index 0000000..a605149
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/selection-handler.ts
@@ -0,0 +1,31 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export interface ClearableHandler {
+  brokeredClear(): void;
+}
+
+export interface SelectionHandler {
+  clear(): void;
+  select(nodeIds: any, selected: any): void;
+  brokeredSourcePositionSelect(sourcePositions: any, selected: any): void;
+}
+
+export interface NodeSelectionHandler {
+  clear(): void;
+  select(nodeIds: any, selected: any): void;
+  brokeredNodeSelect(nodeIds: any, selected: any): void;
+}
+
+export interface BlockSelectionHandler {
+  clear(): void;
+  select(nodeIds: any, selected: any): void;
+  brokeredBlockSelect(blockIds: any, selected: any): void;
+}
+
+export interface InstructionSelectionHandler {
+  clear(): void;
+  select(instructionIds: any, selected: any): void;
+  brokeredInstructionSelect(instructionIds: any, selected: any): void;
+}
diff --git a/src/v8/tools/turbolizer/src/selection.ts b/src/v8/tools/turbolizer/src/selection.ts
new file mode 100644
index 0000000..90fe3bd
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/selection.ts
@@ -0,0 +1,59 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export class MySelection {
+  selection: any;
+  stringKey: (o: any) => string;
+
+  constructor(stringKeyFnc) {
+    this.selection = new Map();
+    this.stringKey = stringKeyFnc;
+  }
+
+  isEmpty(): boolean {
+    return this.selection.size == 0;
+  }
+
+  clear(): void {
+    this.selection = new Map();
+  }
+
+  select(s: Iterable<any>, isSelected?: boolean) {
+    for (const i of s) {
+      if (!i) continue;
+      if (isSelected == undefined) {
+        isSelected = !this.selection.has(this.stringKey(i));
+      }
+      if (isSelected) {
+        this.selection.set(this.stringKey(i), i);
+      } else {
+        this.selection.delete(this.stringKey(i));
+      }
+    }
+  }
+
+  isSelected(i: any): boolean {
+    return this.selection.has(this.stringKey(i));
+  }
+
+  isKeySelected(key: string): boolean {
+    return this.selection.has(key);
+  }
+
+  selectedKeys() {
+    const result = new Set();
+    for (const i of this.selection.keys()) {
+      result.add(i);
+    }
+    return result;
+  }
+
+  detachSelection() {
+    const result = this.selectedKeys();
+    this.clear();
+    return result;
+  }
+
+  [Symbol.iterator]() { return this.selection.values(); }
+}
diff --git a/src/v8/tools/turbolizer/src/sequence-view.ts b/src/v8/tools/turbolizer/src/sequence-view.ts
new file mode 100644
index 0000000..e7691c6
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/sequence-view.ts
@@ -0,0 +1,251 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { Sequence } from "../src/source-resolver";
+import { isIterable } from "../src/util";
+import { TextView } from "../src/text-view";
+
+export class SequenceView extends TextView {
+  sequence: Sequence;
+  searchInfo: Array<any>;
+
+  createViewElement() {
+    const pane = document.createElement('div');
+    pane.setAttribute('id', "sequence");
+    return pane;
+  }
+
+  constructor(parentId, broker) {
+    super(parentId, broker);
+  }
+
+  attachSelection(s) {
+    const view = this;
+    if (!(s instanceof Set)) return;
+    view.selectionHandler.clear();
+    view.blockSelectionHandler.clear();
+    const selected = new Array();
+    for (const key of s) selected.push(key);
+    view.selectionHandler.select(selected, true);
+  }
+
+  detachSelection() {
+    this.blockSelection.clear();
+    return this.selection.detachSelection();
+  }
+
+  initializeContent(data, rememberedSelection) {
+    this.divNode.innerHTML = '';
+    this.sequence = data.sequence;
+    this.searchInfo = [];
+    this.divNode.addEventListener('click', (e: MouseEvent) => {
+      if (!(e.target instanceof HTMLElement)) return;
+      const instructionId = Number.parseInt(e.target.dataset.instructionId, 10);
+      if (!instructionId) return;
+      if (!e.shiftKey) this.broker.broadcastClear(null);
+      this.broker.broadcastInstructionSelect(null, [instructionId], true);
+    });
+    this.addBlocks(this.sequence.blocks);
+    this.attachSelection(rememberedSelection);
+    this.show();
+  }
+
+  elementForBlock(block) {
+    const view = this;
+    function createElement(tag: string, cls: string | Array<string>, content?: string) {
+      const el = document.createElement(tag);
+      if (isIterable(cls)) {
+        for (const c of cls) el.classList.add(c);
+      } else {
+        el.classList.add(cls);
+      }
+      if (content != undefined) el.innerHTML = content;
+      return el;
+    }
+
+    function mkLinkHandler(id, handler) {
+      return function (e) {
+        e.stopPropagation();
+        if (!e.shiftKey) {
+          handler.clear();
+        }
+        handler.select(["" + id], true);
+      };
+    }
+
+    function mkBlockLinkHandler(blockId) {
+      return mkLinkHandler(blockId, view.blockSelectionHandler);
+    }
+
+    function mkOperandLinkHandler(text) {
+      return mkLinkHandler(text, view.selectionHandler);
+    }
+
+    function elementForOperand(operand, searchInfo) {
+      const text = operand.text;
+      const operandEl = createElement("div", ["parameter", "tag", "clickable", operand.type], text);
+      if (operand.tooltip) {
+        operandEl.setAttribute("title", operand.tooltip);
+      }
+      operandEl.onclick = mkOperandLinkHandler(text);
+      searchInfo.push(text);
+      view.addHtmlElementForNodeId(text, operandEl);
+      return operandEl;
+    }
+
+    function elementForInstruction(instruction, searchInfo) {
+      const instNodeEl = createElement("div", "instruction-node");
+
+      const instId = createElement("div", "instruction-id", instruction.id);
+      instId.classList.add("clickable");
+      instId.dataset.instructionId = instruction.id;
+      instNodeEl.appendChild(instId);
+
+      const instContentsEl = createElement("div", "instruction-contents");
+      instNodeEl.appendChild(instContentsEl);
+
+      // Print gap moves.
+      const gapEl = createElement("div", "gap", "gap");
+      let hasGaps = false;
+      for (const gap of instruction.gaps) {
+        const moves = createElement("div", ["comma-sep-list", "gap-move"]);
+        for (const move of gap) {
+          hasGaps = true;
+          const moveEl = createElement("div", "move");
+          const destinationEl = elementForOperand(move[0], searchInfo);
+          moveEl.appendChild(destinationEl);
+          const assignEl = createElement("div", "assign", "=");
+          moveEl.appendChild(assignEl);
+          const sourceEl = elementForOperand(move[1], searchInfo);
+          moveEl.appendChild(sourceEl);
+          moves.appendChild(moveEl);
+        }
+        gapEl.appendChild(moves);
+      }
+      if (hasGaps) {
+        instContentsEl.appendChild(gapEl);
+      }
+
+      const instEl = createElement("div", "instruction");
+      instContentsEl.appendChild(instEl);
+
+      if (instruction.outputs.length > 0) {
+        const outputs = createElement("div", ["comma-sep-list", "input-output-list"]);
+        for (const output of instruction.outputs) {
+          const outputEl = elementForOperand(output, searchInfo);
+          outputs.appendChild(outputEl);
+        }
+        instEl.appendChild(outputs);
+        const assignEl = createElement("div", "assign", "=");
+        instEl.appendChild(assignEl);
+      }
+
+      let text = instruction.opcode + instruction.flags;
+      const instLabel = createElement("div", "node-label", text)
+      if (instruction.opcode == "ArchNop" && instruction.outputs.length == 1 && instruction.outputs[0].tooltip) {
+        instLabel.innerText = instruction.outputs[0].tooltip;
+      }
+
+      searchInfo.push(text);
+      view.addHtmlElementForNodeId(text, instLabel);
+      instEl.appendChild(instLabel);
+
+      if (instruction.inputs.length > 0) {
+        const inputs = createElement("div", ["comma-sep-list", "input-output-list"]);
+        for (const input of instruction.inputs) {
+          const inputEl = elementForOperand(input, searchInfo);
+          inputs.appendChild(inputEl);
+        }
+        instEl.appendChild(inputs);
+      }
+
+      if (instruction.temps.length > 0) {
+        const temps = createElement("div", ["comma-sep-list", "input-output-list", "temps"]);
+        for (const temp of instruction.temps) {
+          const tempEl = elementForOperand(temp, searchInfo);
+          temps.appendChild(tempEl);
+        }
+        instEl.appendChild(temps);
+      }
+
+      return instNodeEl;
+    }
+
+    const sequenceBlock = createElement("div", "schedule-block");
+    sequenceBlock.classList.toggle("deferred", block.deferred);
+
+    const blockId = createElement("div", ["block-id", "com", "clickable"], block.id);
+    blockId.onclick = mkBlockLinkHandler(block.id);
+    sequenceBlock.appendChild(blockId);
+    const blockPred = createElement("div", ["predecessor-list", "block-list", "comma-sep-list"]);
+    for (const pred of block.predecessors) {
+      const predEl = createElement("div", ["block-id", "com", "clickable"], pred);
+      predEl.onclick = mkBlockLinkHandler(pred);
+      blockPred.appendChild(predEl);
+    }
+    if (block.predecessors.length > 0) sequenceBlock.appendChild(blockPred);
+    const phis = createElement("div", "phis");
+    sequenceBlock.appendChild(phis);
+
+    const phiLabel = createElement("div", "phi-label", "phi:");
+    phis.appendChild(phiLabel);
+
+    const phiContents = createElement("div", "phi-contents");
+    phis.appendChild(phiContents);
+
+    for (const phi of block.phis) {
+      const phiEl = createElement("div", "phi");
+      phiContents.appendChild(phiEl);
+
+      const outputEl = elementForOperand(phi.output, this.searchInfo);
+      phiEl.appendChild(outputEl);
+
+      const assignEl = createElement("div", "assign", "=");
+      phiEl.appendChild(assignEl);
+
+      for (const input of phi.operands) {
+        const inputEl = createElement("div", ["parameter", "tag", "clickable"], input);
+        phiEl.appendChild(inputEl);
+      }
+    }
+
+    const instructions = createElement("div", "instructions");
+    for (const instruction of block.instructions) {
+      instructions.appendChild(elementForInstruction(instruction, this.searchInfo));
+    }
+    sequenceBlock.appendChild(instructions);
+    const blockSucc = createElement("div", ["successor-list", "block-list", "comma-sep-list"]);
+    for (const succ of block.successors) {
+      const succEl = createElement("div", ["block-id", "com", "clickable"], succ);
+      succEl.onclick = mkBlockLinkHandler(succ);
+      blockSucc.appendChild(succEl);
+    }
+    if (block.successors.length > 0) sequenceBlock.appendChild(blockSucc);
+    this.addHtmlElementForBlockId(block.id, sequenceBlock);
+    return sequenceBlock;
+  }
+
+  addBlocks(blocks) {
+    for (const block of blocks) {
+      const blockEl = this.elementForBlock(block);
+      this.divNode.appendChild(blockEl);
+    }
+  }
+
+  searchInputAction(searchBar, e) {
+    e.stopPropagation();
+    this.selectionHandler.clear();
+    const query = searchBar.value;
+    if (query.length == 0) return;
+    const select = [];
+    window.sessionStorage.setItem("lastSearch", query);
+    const reg = new RegExp(query);
+    for (const item of this.searchInfo) {
+      if (reg.exec(item) != null) {
+        select.push(item);
+      }
+    }
+    this.selectionHandler.select(select, true);
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/source-resolver.ts b/src/v8/tools/turbolizer/src/source-resolver.ts
new file mode 100644
index 0000000..67f9c08
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/source-resolver.ts
@@ -0,0 +1,626 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { sortUnique, anyToString } from "../src/util";
+import { NodeLabel } from "./node-label";
+
+function sourcePositionLe(a, b) {
+  if (a.inliningId == b.inliningId) {
+    return a.scriptOffset - b.scriptOffset;
+  }
+  return a.inliningId - b.inliningId;
+}
+
+function sourcePositionEq(a, b) {
+  return a.inliningId == b.inliningId &&
+    a.scriptOffset == b.scriptOffset;
+}
+
+export function sourcePositionToStringKey(sourcePosition: AnyPosition): string {
+  if (!sourcePosition) return "undefined";
+  if ('inliningId' in sourcePosition && 'scriptOffset' in sourcePosition) {
+    return "SP:" + sourcePosition.inliningId + ":" + sourcePosition.scriptOffset;
+  }
+  if (sourcePosition.bytecodePosition) {
+    return "BCP:" + sourcePosition.bytecodePosition;
+  }
+  return "undefined";
+}
+
+export function sourcePositionValid(l) {
+  return (typeof l.scriptOffset !== undefined
+    && typeof l.inliningId !== undefined) || typeof l.bytecodePosition != undefined;
+}
+
+export interface SourcePosition {
+  scriptOffset: number;
+  inliningId: number;
+}
+
+interface TurboFanOrigin {
+  phase: string;
+  reducer: string;
+}
+
+export interface NodeOrigin {
+  nodeId: number;
+}
+
+interface BytecodePosition {
+  bytecodePosition: number;
+}
+
+export type Origin = NodeOrigin | BytecodePosition;
+export type TurboFanNodeOrigin = NodeOrigin & TurboFanOrigin;
+export type TurboFanBytecodeOrigin = BytecodePosition & TurboFanOrigin;
+
+type AnyPosition = SourcePosition | BytecodePosition;
+
+export interface Source {
+  sourcePositions: Array<SourcePosition>;
+  sourceName: string;
+  functionName: string;
+  sourceText: string;
+  sourceId: number;
+  startPosition?: number;
+  backwardsCompatibility: boolean;
+}
+interface Inlining {
+  inliningPosition: SourcePosition;
+  sourceId: number;
+}
+interface OtherPhase {
+  type: "disassembly" | "sequence" | "schedule";
+  name: string;
+  data: any;
+}
+
+interface InstructionsPhase {
+  type: "instructions";
+  name: string;
+  data: any;
+  instructionOffsetToPCOffset?: any;
+  blockIdtoInstructionRange?: any;
+  nodeIdToInstructionRange?: any;
+}
+
+interface GraphPhase {
+  type: "graph";
+  name: string;
+  data: any;
+  highestNodeId: number;
+  nodeLabelMap: Array<NodeLabel>;
+}
+
+type Phase = GraphPhase | InstructionsPhase | OtherPhase;
+
+export interface Schedule {
+  nodes: Array<any>;
+}
+
+export interface Sequence {
+  blocks: Array<any>;
+}
+
+export class SourceResolver {
+  nodePositionMap: Array<AnyPosition>;
+  sources: Array<Source>;
+  inlinings: Array<Inlining>;
+  inliningsMap: Map<string, Inlining>;
+  positionToNodes: Map<string, Array<string>>;
+  phases: Array<Phase>;
+  phaseNames: Map<string, number>;
+  disassemblyPhase: Phase;
+  lineToSourcePositions: Map<string, Array<AnyPosition>>;
+  nodeIdToInstructionRange: Array<[number, number]>;
+  blockIdToInstructionRange: Array<[number, number]>;
+  instructionToPCOffset: Array<number>;
+  pcOffsetToInstructions: Map<number, Array<number>>;
+  pcOffsets: Array<number>;
+
+  constructor() {
+    // Maps node ids to source positions.
+    this.nodePositionMap = [];
+    // Maps source ids to source objects.
+    this.sources = [];
+    // Maps inlining ids to inlining objects.
+    this.inlinings = [];
+    // Maps source position keys to inlinings.
+    this.inliningsMap = new Map();
+    // Maps source position keys to node ids.
+    this.positionToNodes = new Map();
+    // Maps phase ids to phases.
+    this.phases = [];
+    // Maps phase names to phaseIds.
+    this.phaseNames = new Map();
+    // The disassembly phase is stored separately.
+    this.disassemblyPhase = undefined;
+    // Maps line numbers to source positions
+    this.lineToSourcePositions = new Map();
+    // Maps node ids to instruction ranges.
+    this.nodeIdToInstructionRange = [];
+    // Maps block ids to instruction ranges.
+    this.blockIdToInstructionRange = [];
+    // Maps instruction numbers to PC offsets.
+    this.instructionToPCOffset = [];
+    // Maps PC offsets to instructions.
+    this.pcOffsetToInstructions = new Map();
+    this.pcOffsets = [];
+  }
+
+  setSources(sources, mainBackup) {
+    if (sources) {
+      for (const [sourceId, source] of Object.entries(sources)) {
+        this.sources[sourceId] = source;
+        this.sources[sourceId].sourcePositions = [];
+      }
+    }
+    // This is a fallback if the JSON is incomplete (e.g. due to compiler crash).
+    if (!this.sources[-1]) {
+      this.sources[-1] = mainBackup;
+      this.sources[-1].sourcePositions = [];
+    }
+  }
+
+  setInlinings(inlinings) {
+    if (inlinings) {
+      for (const [inliningId, inlining] of Object.entries<Inlining>(inlinings)) {
+        this.inlinings[inliningId] = inlining;
+        this.inliningsMap.set(sourcePositionToStringKey(inlining.inliningPosition), inlining);
+      }
+    }
+    // This is a default entry for the script itself that helps
+    // keep other code more uniform.
+    this.inlinings[-1] = { sourceId: -1, inliningPosition: null };
+  }
+
+  setNodePositionMap(map) {
+    if (!map) return;
+    if (typeof map[0] != 'object') {
+      const alternativeMap = {};
+      for (const [nodeId, scriptOffset] of Object.entries<number>(map)) {
+        alternativeMap[nodeId] = { scriptOffset: scriptOffset, inliningId: -1 };
+      }
+      map = alternativeMap;
+    }
+
+    for (const [nodeId, sourcePosition] of Object.entries<SourcePosition>(map)) {
+      if (sourcePosition == undefined) {
+        console.log("Warning: undefined source position ", sourcePosition, " for nodeId ", nodeId);
+      }
+      const inliningId = sourcePosition.inliningId;
+      const inlining = this.inlinings[inliningId];
+      if (inlining) {
+        const sourceId = inlining.sourceId;
+        this.sources[sourceId].sourcePositions.push(sourcePosition);
+      }
+      this.nodePositionMap[nodeId] = sourcePosition;
+      const key = sourcePositionToStringKey(sourcePosition);
+      if (!this.positionToNodes.has(key)) {
+        this.positionToNodes.set(key, []);
+      }
+      this.positionToNodes.get(key).push(nodeId);
+    }
+    for (const [, source] of Object.entries(this.sources)) {
+      source.sourcePositions = sortUnique(source.sourcePositions,
+        sourcePositionLe, sourcePositionEq);
+    }
+  }
+
+  sourcePositionsToNodeIds(sourcePositions) {
+    const nodeIds = new Set();
+    for (const sp of sourcePositions) {
+      const key = sourcePositionToStringKey(sp);
+      const nodeIdsForPosition = this.positionToNodes.get(key);
+      if (!nodeIdsForPosition) continue;
+      for (const nodeId of nodeIdsForPosition) {
+        nodeIds.add(nodeId);
+      }
+    }
+    return nodeIds;
+  }
+
+  nodeIdsToSourcePositions(nodeIds): Array<AnyPosition> {
+    const sourcePositions = new Map();
+    for (const nodeId of nodeIds) {
+      const sp = this.nodePositionMap[nodeId];
+      const key = sourcePositionToStringKey(sp);
+      sourcePositions.set(key, sp);
+    }
+    const sourcePositionArray = [];
+    for (const sp of sourcePositions.values()) {
+      sourcePositionArray.push(sp);
+    }
+    return sourcePositionArray;
+  }
+
+  forEachSource(f: (value: Source, index: number, array: Array<Source>) => void) {
+    this.sources.forEach(f);
+  }
+
+  translateToSourceId(sourceId: number, location?: SourcePosition) {
+    for (const position of this.getInlineStack(location)) {
+      const inlining = this.inlinings[position.inliningId];
+      if (!inlining) continue;
+      if (inlining.sourceId == sourceId) {
+        return position;
+      }
+    }
+    return location;
+  }
+
+  addInliningPositions(sourcePosition: AnyPosition, locations: Array<SourcePosition>) {
+    const inlining = this.inliningsMap.get(sourcePositionToStringKey(sourcePosition));
+    if (!inlining) return;
+    const sourceId = inlining.sourceId;
+    const source = this.sources[sourceId];
+    for (const sp of source.sourcePositions) {
+      locations.push(sp);
+      this.addInliningPositions(sp, locations);
+    }
+  }
+
+  getInliningForPosition(sourcePosition: AnyPosition) {
+    return this.inliningsMap.get(sourcePositionToStringKey(sourcePosition));
+  }
+
+  getSource(sourceId: number) {
+    return this.sources[sourceId];
+  }
+
+  getSourceName(sourceId: number) {
+    const source = this.sources[sourceId];
+    return `${source.sourceName}:${source.functionName}`;
+  }
+
+  sourcePositionFor(sourceId: number, scriptOffset: number) {
+    if (!this.sources[sourceId]) {
+      return null;
+    }
+    const list = this.sources[sourceId].sourcePositions;
+    for (let i = 0; i < list.length; i++) {
+      const sourcePosition = list[i];
+      const position = sourcePosition.scriptOffset;
+      const nextPosition = list[Math.min(i + 1, list.length - 1)].scriptOffset;
+      if ((position <= scriptOffset && scriptOffset < nextPosition)) {
+        return sourcePosition;
+      }
+    }
+    return null;
+  }
+
+  sourcePositionsInRange(sourceId: number, start: number, end: number) {
+    if (!this.sources[sourceId]) return [];
+    const res = [];
+    const list = this.sources[sourceId].sourcePositions;
+    for (const sourcePosition of list) {
+      if (start <= sourcePosition.scriptOffset && sourcePosition.scriptOffset < end) {
+        res.push(sourcePosition);
+      }
+    }
+    return res;
+  }
+
+  getInlineStack(sourcePosition?: SourcePosition) {
+    if (!sourcePosition) return [];
+
+    const inliningStack = [];
+    let cur = sourcePosition;
+    while (cur && cur.inliningId != -1) {
+      inliningStack.push(cur);
+      const inlining = this.inlinings[cur.inliningId];
+      if (!inlining) {
+        break;
+      }
+      cur = inlining.inliningPosition;
+    }
+    if (cur && cur.inliningId == -1) {
+      inliningStack.push(cur);
+    }
+    return inliningStack;
+  }
+
+  recordOrigins(phase: GraphPhase) {
+    if (phase.type != "graph") return;
+    for (const node of phase.data.nodes) {
+      phase.highestNodeId = Math.max(phase.highestNodeId, node.id);
+      if (node.origin != undefined &&
+        node.origin.bytecodePosition != undefined) {
+        const position = { bytecodePosition: node.origin.bytecodePosition };
+        this.nodePositionMap[node.id] = position;
+        const key = sourcePositionToStringKey(position);
+        if (!this.positionToNodes.has(key)) {
+          this.positionToNodes.set(key, []);
+        }
+        const A = this.positionToNodes.get(key);
+        if (!A.includes(node.id)) A.push(`${node.id}`);
+      }
+
+      // Backwards compatibility.
+      if (typeof node.pos === "number") {
+        node.sourcePosition = { scriptOffset: node.pos, inliningId: -1 };
+      }
+    }
+  }
+
+  readNodeIdToInstructionRange(nodeIdToInstructionRange) {
+    for (const [nodeId, range] of Object.entries<[number, number]>(nodeIdToInstructionRange)) {
+      this.nodeIdToInstructionRange[nodeId] = range;
+    }
+  }
+
+  readBlockIdToInstructionRange(blockIdToInstructionRange) {
+    for (const [blockId, range] of Object.entries<[number, number]>(blockIdToInstructionRange)) {
+      this.blockIdToInstructionRange[blockId] = range;
+    }
+  }
+
+  getInstruction(nodeId: number): [number, number] {
+    const X = this.nodeIdToInstructionRange[nodeId];
+    if (X === undefined) return [-1, -1];
+    return X;
+  }
+
+  getInstructionRangeForBlock(blockId: number): [number, number] {
+    const X = this.blockIdToInstructionRange[blockId];
+    if (X === undefined) return [-1, -1];
+    return X;
+  }
+
+  readInstructionOffsetToPCOffset(instructionToPCOffset) {
+    for (const [instruction, offset] of Object.entries<number>(instructionToPCOffset)) {
+      this.instructionToPCOffset[instruction] = offset;
+      if (!this.pcOffsetToInstructions.has(offset)) {
+        this.pcOffsetToInstructions.set(offset, []);
+      }
+      this.pcOffsetToInstructions.get(offset).push(Number(instruction));
+    }
+    this.pcOffsets = Array.from(this.pcOffsetToInstructions.keys()).sort((a, b) => b - a);
+  }
+
+  hasPCOffsets() {
+    return this.pcOffsetToInstructions.size > 0;
+  }
+
+  getKeyPcOffset(offset: number): number {
+    if (this.pcOffsets.length === 0) return -1;
+    for (const key of this.pcOffsets) {
+      if (key <= offset) {
+        return key;
+      }
+    }
+    return -1;
+  }
+
+  instructionRangeToKeyPcOffsets([start, end]: [number, number]) {
+    if (start == end) return [this.instructionToPCOffset[start]];
+    return this.instructionToPCOffset.slice(start, end);
+  }
+
+  instructionsToKeyPcOffsets(instructionIds: Iterable<number>) {
+    const keyPcOffsets = [];
+    for (const instructionId of instructionIds) {
+      keyPcOffsets.push(this.instructionToPCOffset[instructionId]);
+    }
+    return keyPcOffsets;
+  }
+
+  nodesToKeyPcOffsets(nodes) {
+    let offsets = [];
+    for (const node of nodes) {
+      const range = this.nodeIdToInstructionRange[node];
+      if (!range) continue;
+      offsets = offsets.concat(this.instructionRangeToKeyPcOffsets(range));
+    }
+    return offsets;
+  }
+
+  nodesForPCOffset(offset: number): [Array<string>, Array<string>] {
+    if (this.pcOffsets.length === 0) return [[], []];
+    for (const key of this.pcOffsets) {
+      if (key <= offset) {
+        const instrs = this.pcOffsetToInstructions.get(key);
+        const nodes = [];
+        const blocks = [];
+        for (const instr of instrs) {
+          for (const [nodeId, range] of this.nodeIdToInstructionRange.entries()) {
+            if (!range) continue;
+            const [start, end] = range;
+            if (start == end && instr == start) {
+              nodes.push("" + nodeId);
+            }
+            if (start <= instr && instr < end) {
+              nodes.push("" + nodeId);
+            }
+          }
+        }
+        return [nodes, blocks];
+      }
+    }
+    return [[], []];
+  }
+
+  parsePhases(phases) {
+    const nodeLabelMap = [];
+    for (const [, phase] of Object.entries<Phase>(phases)) {
+      switch (phase.type) {
+        case 'disassembly':
+          this.disassemblyPhase = phase;
+          break;
+        case 'schedule':
+          this.phaseNames.set(phase.name, this.phases.length);
+          this.phases.push(this.parseSchedule(phase));
+          break;
+        case 'sequence':
+          this.phaseNames.set(phase.name, this.phases.length);
+          this.phases.push(this.parseSequence(phase));
+          break;
+        case 'instructions':
+          if (phase.nodeIdToInstructionRange) {
+            this.readNodeIdToInstructionRange(phase.nodeIdToInstructionRange);
+          }
+          if (phase.blockIdtoInstructionRange) {
+            this.readBlockIdToInstructionRange(phase.blockIdtoInstructionRange);
+          }
+          if (phase.instructionOffsetToPCOffset) {
+            this.readInstructionOffsetToPCOffset(phase.instructionOffsetToPCOffset);
+          }
+          break;
+        case 'graph':
+          const graphPhase: GraphPhase = Object.assign(phase, { highestNodeId: 0 });
+          this.phaseNames.set(graphPhase.name, this.phases.length);
+          this.phases.push(graphPhase);
+          this.recordOrigins(graphPhase);
+          this.internNodeLabels(graphPhase, nodeLabelMap);
+          graphPhase.nodeLabelMap = nodeLabelMap.slice();
+          break;
+        default:
+          throw "Unsupported phase type";
+      }
+    }
+  }
+
+  internNodeLabels(phase: GraphPhase, nodeLabelMap: Array<NodeLabel>) {
+    for (const n of phase.data.nodes) {
+      const label = new NodeLabel(n.id, n.label, n.title, n.live,
+        n.properties, n.sourcePosition, n.origin, n.opcode, n.control,
+        n.opinfo, n.type);
+      const previous = nodeLabelMap[label.id];
+      if (!label.equals(previous)) {
+        if (previous != undefined) {
+          label.setInplaceUpdatePhase(phase.name);
+        }
+        nodeLabelMap[label.id] = label;
+      }
+      n.nodeLabel = nodeLabelMap[label.id];
+    }
+  }
+
+  repairPhaseId(anyPhaseId) {
+    return Math.max(0, Math.min(anyPhaseId | 0, this.phases.length - 1));
+  }
+
+  getPhase(phaseId: number) {
+    return this.phases[phaseId];
+  }
+
+  getPhaseIdByName(phaseName: string) {
+    return this.phaseNames.get(phaseName);
+  }
+
+  forEachPhase(f: (value: Phase, index: number, array: Array<Phase>) => void) {
+    this.phases.forEach(f);
+  }
+
+  addAnyPositionToLine(lineNumber: number | string, sourcePosition: AnyPosition) {
+    const lineNumberString = anyToString(lineNumber);
+    if (!this.lineToSourcePositions.has(lineNumberString)) {
+      this.lineToSourcePositions.set(lineNumberString, []);
+    }
+    const A = this.lineToSourcePositions.get(lineNumberString);
+    if (!A.includes(sourcePosition)) A.push(sourcePosition);
+  }
+
+  setSourceLineToBytecodePosition(sourceLineToBytecodePosition: Array<number> | undefined) {
+    if (!sourceLineToBytecodePosition) return;
+    sourceLineToBytecodePosition.forEach((pos, i) => {
+      this.addAnyPositionToLine(i, { bytecodePosition: pos });
+    });
+  }
+
+  linetoSourcePositions(lineNumber: number | string) {
+    const positions = this.lineToSourcePositions.get(anyToString(lineNumber));
+    if (positions === undefined) return [];
+    return positions;
+  }
+
+  parseSchedule(phase) {
+    function createNode(state: any, match) {
+      let inputs = [];
+      if (match.groups.args) {
+        const nodeIdsString = match.groups.args.replace(/\s/g, '');
+        const nodeIdStrings = nodeIdsString.split(',');
+        inputs = nodeIdStrings.map(n => Number.parseInt(n, 10));
+      }
+      const node = {
+        id: Number.parseInt(match.groups.id, 10),
+        label: match.groups.label,
+        inputs: inputs
+      };
+      if (match.groups.blocks) {
+        const nodeIdsString = match.groups.blocks.replace(/\s/g, '').replace(/B/g, '');
+        const nodeIdStrings = nodeIdsString.split(',');
+        const successors = nodeIdStrings.map(n => Number.parseInt(n, 10));
+        state.currentBlock.succ = successors;
+      }
+      state.nodes[node.id] = node;
+      state.currentBlock.nodes.push(node);
+    }
+    function createBlock(state, match) {
+      let predecessors = [];
+      if (match.groups.in) {
+        const blockIdsString = match.groups.in.replace(/\s/g, '').replace(/B/g, '');
+        const blockIdStrings = blockIdsString.split(',');
+        predecessors = blockIdStrings.map(n => Number.parseInt(n, 10));
+      }
+      const block = {
+        id: Number.parseInt(match.groups.id, 10),
+        isDeferred: match.groups.deferred != undefined,
+        pred: predecessors.sort(),
+        succ: [],
+        nodes: []
+      };
+      state.blocks[block.id] = block;
+      state.currentBlock = block;
+    }
+    function setGotoSuccessor(state, match) {
+      state.currentBlock.succ = [Number.parseInt(match.groups.successor.replace(/\s/g, ''), 10)];
+    }
+    const rules = [
+      {
+        lineRegexps:
+          [/^\s*(?<id>\d+):\ (?<label>.*)\((?<args>.*)\)$/,
+            /^\s*(?<id>\d+):\ (?<label>.*)\((?<args>.*)\)\ ->\ (?<blocks>.*)$/,
+            /^\s*(?<id>\d+):\ (?<label>.*)$/
+          ],
+        process: createNode
+      },
+      {
+        lineRegexps:
+          [/^\s*---\s*BLOCK\ B(?<id>\d+)\s*(?<deferred>\(deferred\))?(\ <-\ )?(?<in>[^-]*)?\ ---$/
+          ],
+        process: createBlock
+      },
+      {
+        lineRegexps:
+          [/^\s*Goto\s*->\s*B(?<successor>\d+)\s*$/
+          ],
+        process: setGotoSuccessor
+      }
+    ];
+
+    const lines = phase.data.split(/[\n]/);
+    const state = { currentBlock: undefined, blocks: [], nodes: [] };
+
+    nextLine:
+    for (const line of lines) {
+      for (const rule of rules) {
+        for (const lineRegexp of rule.lineRegexps) {
+          const match = line.match(lineRegexp);
+          if (match) {
+            rule.process(state, match);
+            continue nextLine;
+          }
+        }
+      }
+      console.log("Warning: unmatched schedule line \"" + line + "\"");
+    }
+    phase.schedule = state;
+    return phase;
+  }
+  parseSequence(phase) {
+    phase.sequence = { blocks: phase.blocks };
+    return phase;
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/tabs.ts b/src/v8/tools/turbolizer/src/tabs.ts
new file mode 100644
index 0000000..0416b9e
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/tabs.ts
@@ -0,0 +1,114 @@
+
+export class Tabs {
+  private container: HTMLElement;
+  private tabBar: HTMLElement;
+  private nextTabId: number;
+
+  private mkTabBar(container: HTMLElement) {
+    container.classList.add("nav-tabs-container");
+    this.tabBar = document.createElement("ul");
+    this.tabBar.id = `tab-bar-${container.id}`;
+    this.tabBar.className = "nav-tabs";
+    this.tabBar.ondrop = this.tabBarOnDrop.bind(this);
+    this.tabBar.ondragover = this.tabBarOnDragover.bind(this);
+    this.tabBar.onclick = this.tabBarOnClick.bind(this);
+
+    const defaultDiv = document.createElement("div");
+    defaultDiv.className = "tab-content tab-default";
+    defaultDiv.id = `tab-content-${container.id}-default`;
+    container.insertBefore(defaultDiv, container.firstChild);
+    container.insertBefore(this.tabBar, container.firstChild);
+  }
+
+  constructor(container: HTMLElement) {
+    this.container = container;
+    this.nextTabId = 0;
+    this.mkTabBar(container);
+  }
+
+  activateTab(tab: HTMLLIElement) {
+    if (typeof tab.dataset.divid !== "string") return;
+    for (const li of this.tabBar.querySelectorAll<HTMLLIElement>("li.active")) {
+      li.classList.remove("active");
+      this.showTab(li, false);
+    }
+    tab.classList.add("active");
+    this.showTab(tab, true);
+  }
+
+  clearTabsAndContent() {
+    for (const tab of this.tabBar.querySelectorAll(".nav-tabs > li")) {
+        if (!(tab instanceof HTMLLIElement)) continue;
+        if (tab.classList.contains("persistent-tab")) continue;
+        const tabDiv = document.getElementById(tab.dataset.divid);
+        tabDiv.parentNode.removeChild(tabDiv);
+        tab.parentNode.removeChild(tab);
+    }
+  }
+
+  private showTab(li: HTMLElement, show: boolean = true) {
+    const tabDiv = document.getElementById(li.dataset.divid);
+    tabDiv.style.display = show ? "block" : "none";
+  }
+
+  public addTab(caption: string): HTMLLIElement {
+    const newTab = document.createElement("li");
+    newTab.innerHTML = caption;
+    newTab.id = `tab-header-${this.container.id}-${this.nextTabId++}`;
+    const lastTab = this.tabBar.querySelector("li.last-tab");
+    this.tabBar.insertBefore(newTab, lastTab);
+    return newTab;
+  }
+
+  public addTabAndContent(caption: string): [HTMLLIElement, HTMLDivElement] {
+    const contentDiv = document.createElement("div");
+    contentDiv.className = "tab-content tab-default";
+    contentDiv.id = `tab-content-${this.container.id}-${this.nextTabId++}`;
+    contentDiv.style.display = "none";
+    this.container.appendChild(contentDiv);
+
+    const newTab = this.addTab(caption);
+    newTab.dataset.divid = contentDiv.id;
+    newTab.draggable = true;
+    newTab.ondragstart = this.tabOnDragStart.bind(this);
+    const lastTab = this.tabBar.querySelector("li.last-tab");
+    this.tabBar.insertBefore(newTab, lastTab);
+    return [newTab, contentDiv];
+  }
+
+  private moveTabDiv(tab: HTMLLIElement) {
+    const tabDiv = document.getElementById(tab.dataset.divid);
+    tabDiv.style.display = "none";
+    tab.classList.remove("active");
+    this.tabBar.parentNode.appendChild(tabDiv);
+  }
+
+  private tabBarOnDrop(e: DragEvent) {
+    if (!(e.target instanceof HTMLElement)) return;
+    e.preventDefault();
+    const tabId = e.dataTransfer.getData("text");
+    const tab = document.getElementById(tabId) as HTMLLIElement;
+    if (tab.parentNode != this.tabBar) {
+      this.moveTabDiv(tab);
+    }
+    const dropTab =
+      e.target.parentNode == this.tabBar
+        ? e.target : this.tabBar.querySelector("li.last-tab");
+    this.tabBar.insertBefore(tab, dropTab);
+    this.activateTab(tab);
+  }
+
+  private tabBarOnDragover(e) {
+    e.preventDefault();
+  }
+
+  private tabOnDragStart(e: DragEvent) {
+    if (!(e.target instanceof HTMLElement)) return;
+    e.dataTransfer.setData("text", e.target.id);
+  }
+
+  private tabBarOnClick(e: MouseEvent) {
+    const li = e.target as HTMLLIElement;
+    this.activateTab(li);
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/text-view.ts b/src/v8/tools/turbolizer/src/text-view.ts
new file mode 100644
index 0000000..41a06ea
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/text-view.ts
@@ -0,0 +1,252 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { PhaseView } from "../src/view";
+import { anyToString, ViewElements, isIterable } from "../src/util";
+import { MySelection } from "../src/selection";
+import { SourceResolver } from "./source-resolver";
+import { SelectionBroker } from "./selection-broker";
+import { NodeSelectionHandler, BlockSelectionHandler } from "./selection-handler";
+
+export abstract class TextView extends PhaseView {
+  selectionHandler: NodeSelectionHandler;
+  blockSelectionHandler: BlockSelectionHandler;
+  selection: MySelection;
+  blockSelection: MySelection;
+  textListNode: HTMLUListElement;
+  nodeIdToHtmlElementsMap: Map<string, Array<HTMLElement>>;
+  blockIdToHtmlElementsMap: Map<string, Array<HTMLElement>>;
+  blockIdtoNodeIds: Map<string, Array<string>>;
+  nodeIdToBlockId: Array<string>;
+  patterns: any;
+  sourceResolver: SourceResolver;
+  broker: SelectionBroker;
+
+  constructor(id, broker) {
+    super(id);
+    const view = this;
+    view.textListNode = view.divNode.getElementsByTagName('ul')[0];
+    view.patterns = null;
+    view.nodeIdToHtmlElementsMap = new Map();
+    view.blockIdToHtmlElementsMap = new Map();
+    view.blockIdtoNodeIds = new Map();
+    view.nodeIdToBlockId = [];
+    view.selection = new MySelection(anyToString);
+    view.blockSelection = new MySelection(anyToString);
+    view.broker = broker;
+    view.sourceResolver = broker.sourceResolver;
+    const selectionHandler = {
+      clear: function () {
+        view.selection.clear();
+        view.updateSelection();
+        broker.broadcastClear(selectionHandler);
+      },
+      select: function (nodeIds, selected) {
+        view.selection.select(nodeIds, selected);
+        view.updateSelection();
+        broker.broadcastNodeSelect(selectionHandler, view.selection.selectedKeys(), selected);
+      },
+      brokeredNodeSelect: function (nodeIds, selected) {
+        const firstSelect = view.blockSelection.isEmpty();
+        view.selection.select(nodeIds, selected);
+        view.updateSelection(firstSelect);
+      },
+      brokeredClear: function () {
+        view.selection.clear();
+        view.updateSelection();
+      }
+    };
+    this.selectionHandler = selectionHandler;
+    broker.addNodeHandler(selectionHandler);
+    view.divNode.addEventListener('click', e => {
+      if (!e.shiftKey) {
+        view.selectionHandler.clear();
+      }
+      e.stopPropagation();
+    });
+    const blockSelectionHandler = {
+      clear: function () {
+        view.blockSelection.clear();
+        view.updateSelection();
+        broker.broadcastClear(blockSelectionHandler);
+      },
+      select: function (blockIds, selected) {
+        view.blockSelection.select(blockIds, selected);
+        view.updateSelection();
+        broker.broadcastBlockSelect(blockSelectionHandler, blockIds, selected);
+      },
+      brokeredBlockSelect: function (blockIds, selected) {
+        const firstSelect = view.blockSelection.isEmpty();
+        view.blockSelection.select(blockIds, selected);
+        view.updateSelection(firstSelect);
+      },
+      brokeredClear: function () {
+        view.blockSelection.clear();
+        view.updateSelection();
+      }
+    };
+    this.blockSelectionHandler = blockSelectionHandler;
+    broker.addBlockHandler(blockSelectionHandler);
+  }
+
+  addHtmlElementForNodeId(anyNodeId: any, htmlElement: HTMLElement) {
+    const nodeId = anyToString(anyNodeId);
+    if (!this.nodeIdToHtmlElementsMap.has(nodeId)) {
+      this.nodeIdToHtmlElementsMap.set(nodeId, []);
+    }
+    this.nodeIdToHtmlElementsMap.get(nodeId).push(htmlElement);
+  }
+
+  addHtmlElementForBlockId(anyBlockId, htmlElement) {
+    const blockId = anyToString(anyBlockId);
+    if (!this.blockIdToHtmlElementsMap.has(blockId)) {
+      this.blockIdToHtmlElementsMap.set(blockId, []);
+    }
+    this.blockIdToHtmlElementsMap.get(blockId).push(htmlElement);
+  }
+
+  addNodeIdToBlockId(anyNodeId, anyBlockId) {
+    const blockId = anyToString(anyBlockId);
+    if (!this.blockIdtoNodeIds.has(blockId)) {
+      this.blockIdtoNodeIds.set(blockId, []);
+    }
+    this.blockIdtoNodeIds.get(blockId).push(anyToString(anyNodeId));
+    this.nodeIdToBlockId[anyNodeId] = blockId;
+  }
+
+  blockIdsForNodeIds(nodeIds) {
+    const blockIds = [];
+    for (const nodeId of nodeIds) {
+      const blockId = this.nodeIdToBlockId[nodeId];
+      if (blockId == undefined) continue;
+      blockIds.push(blockId);
+    }
+    return blockIds;
+  }
+
+  updateSelection(scrollIntoView: boolean = false) {
+    if (this.divNode.parentNode == null) return;
+    const mkVisible = new ViewElements(this.divNode.parentNode as HTMLElement);
+    const view = this;
+    for (const [blockId, elements] of this.blockIdToHtmlElementsMap.entries()) {
+      const isSelected = view.blockSelection.isSelected(blockId);
+      for (const element of elements) {
+        mkVisible.consider(element, isSelected);
+        element.classList.toggle("selected", isSelected);
+      }
+    }
+    const elementsToSelect = view.divNode.querySelectorAll(`[data-pc-offset]`);
+    for (const el of elementsToSelect) {
+      el.classList.toggle("selected", false);
+    }
+    for (const key of this.nodeIdToHtmlElementsMap.keys()) {
+      for (const element of this.nodeIdToHtmlElementsMap.get(key)) {
+        element.classList.toggle("selected", false);
+      }
+    }
+    for (const nodeId of view.selection.selectedKeys()) {
+      const elements = this.nodeIdToHtmlElementsMap.get(nodeId);
+      if (!elements) continue;
+      for (const element of elements) {
+        mkVisible.consider(element, true);
+        element.classList.toggle("selected", true);
+      }
+    }
+    mkVisible.apply(scrollIntoView);
+  }
+
+  setPatterns(patterns) {
+    this.patterns = patterns;
+  }
+
+  clearText() {
+    while (this.textListNode.firstChild) {
+      this.textListNode.removeChild(this.textListNode.firstChild);
+    }
+  }
+
+  createFragment(text, style) {
+    const fragment = document.createElement("SPAN");
+
+    if (typeof style.associateData == 'function') {
+      style.associateData(text, fragment);
+    } else {
+      if (style.css != undefined) {
+        const css = isIterable(style.css) ? style.css : [style.css];
+        for (const cls of css) {
+          fragment.classList.add(cls);
+        }
+      }
+      fragment.innerText = text;
+    }
+
+    return fragment;
+  }
+
+  processLine(line) {
+    const view = this;
+    const result = [];
+    let patternSet = 0;
+    while (true) {
+      const beforeLine = line;
+      for (const pattern of view.patterns[patternSet]) {
+        const matches = line.match(pattern[0]);
+        if (matches != null) {
+          if (matches[0] != '') {
+            const style = pattern[1] != null ? pattern[1] : {};
+            const text = matches[0];
+            if (text != '') {
+              const fragment = view.createFragment(matches[0], style);
+              result.push(fragment);
+            }
+            line = line.substr(matches[0].length);
+          }
+          let nextPatternSet = patternSet;
+          if (pattern.length > 2) {
+            nextPatternSet = pattern[2];
+          }
+          if (line == "") {
+            if (nextPatternSet != -1) {
+              throw ("illegal parsing state in text-view in patternSet" + patternSet);
+            }
+            return result;
+          }
+          patternSet = nextPatternSet;
+          break;
+        }
+      }
+      if (beforeLine == line) {
+        throw ("input not consumed in text-view in patternSet" + patternSet);
+      }
+    }
+  }
+
+  processText(text) {
+    const view = this;
+    const textLines = text.split(/[\n]/);
+    let lineNo = 0;
+    for (const line of textLines) {
+      const li = document.createElement("LI");
+      li.className = "nolinenums";
+      li.dataset.lineNo = "" + lineNo++;
+      const fragments = view.processLine(line);
+      for (const fragment of fragments) {
+        li.appendChild(fragment);
+      }
+      view.textListNode.appendChild(li);
+    }
+  }
+
+  initializeContent(data, rememberedSelection) {
+    this.clearText();
+    this.processText(data);
+    this.show();
+  }
+
+  public onresize(): void {}
+
+  isScrollable() {
+    return true;
+  }
+}
diff --git a/src/v8/tools/turbolizer/src/turbo-visualizer.ts b/src/v8/tools/turbolizer/src/turbo-visualizer.ts
new file mode 100644
index 0000000..87924b7
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/turbo-visualizer.ts
@@ -0,0 +1,148 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import { SourceResolver } from "../src/source-resolver";
+import { SelectionBroker } from "../src/selection-broker";
+import { DisassemblyView } from "../src/disassembly-view";
+import { GraphMultiView } from "../src/graphmultiview";
+import { CodeMode, CodeView } from "../src/code-view";
+import { Tabs } from "../src/tabs";
+import { Resizer } from "../src/resizer";
+import * as C from "../src/constants";
+import { InfoView } from "./info-view";
+
+window.onload = function () {
+  let multiview: GraphMultiView = null;
+  let disassemblyView: DisassemblyView = null;
+  let sourceViews: Array<CodeView> = [];
+  let selectionBroker: SelectionBroker = null;
+  let sourceResolver: SourceResolver = null;
+  const resizer = new Resizer(panesUpdatedCallback, 100);
+  const sourceTabsContainer = document.getElementById(C.SOURCE_PANE_ID);
+  const sourceTabs = new Tabs(sourceTabsContainer);
+  sourceTabs.addTab("&#x2b;").classList.add("last-tab", "persistent-tab");
+  const disassemblyTabsContainer = document.getElementById(C.GENERATED_PANE_ID);
+  const disassemblyTabs = new Tabs(disassemblyTabsContainer);
+  disassemblyTabs.addTab("&#x2b;").classList.add("last-tab", "persistent-tab");
+  const [infoTab, infoContainer] = sourceTabs.addTabAndContent("Info");
+  infoTab.classList.add("persistent-tab");
+  infoContainer.classList.add("viewpane", "scrollable");
+  const infoView = new InfoView(infoContainer);
+  infoView.show();
+  sourceTabs.activateTab(infoTab);
+
+  function panesUpdatedCallback() {
+    if (multiview) multiview.onresize();
+  }
+
+  function loadFile(txtRes: string) {
+    sourceTabs.clearTabsAndContent();
+    disassemblyTabs.clearTabsAndContent();
+    // If the JSON isn't properly terminated, assume compiler crashed and
+    // add best-guess empty termination
+    if (txtRes[txtRes.length - 2] == ',') {
+      txtRes += '{"name":"disassembly","type":"disassembly","data":""}]}';
+    }
+    try {
+      sourceViews.forEach(sv => sv.hide());
+      if (multiview) multiview.hide();
+      multiview = null;
+      if (disassemblyView) disassemblyView.hide();
+      sourceViews = [];
+      sourceResolver = new SourceResolver();
+      selectionBroker = new SelectionBroker(sourceResolver);
+
+      const jsonObj = JSON.parse(txtRes);
+
+      let fnc = null;
+      // Backwards compatibility.
+      if (typeof jsonObj.function == 'string') {
+        fnc = {
+          functionName: fnc,
+          sourceId: -1,
+          startPosition: jsonObj.sourcePosition,
+          endPosition: jsonObj.sourcePosition + jsonObj.source.length,
+          sourceText: jsonObj.source,
+          backwardsCompatibility: true
+        };
+      } else {
+        fnc = Object.assign(jsonObj.function, { backwardsCompatibility: false });
+      }
+
+      sourceResolver.setInlinings(jsonObj.inlinings);
+      sourceResolver.setSourceLineToBytecodePosition(jsonObj.sourceLineToBytecodePosition);
+      sourceResolver.setSources(jsonObj.sources, fnc);
+      sourceResolver.setNodePositionMap(jsonObj.nodePositions);
+      sourceResolver.parsePhases(jsonObj.phases);
+
+      const [sourceTab, sourceContainer] = sourceTabs.addTabAndContent("Source");
+      sourceContainer.classList.add("viewpane", "scrollable");
+      sourceTabs.activateTab(sourceTab);
+      const sourceView = new CodeView(sourceContainer, selectionBroker, sourceResolver, fnc, CodeMode.MAIN_SOURCE);
+      sourceView.show();
+      sourceViews.push(sourceView);
+
+      sourceResolver.forEachSource(source => {
+        const sourceView = new CodeView(sourceContainer, selectionBroker, sourceResolver, source, CodeMode.INLINED_SOURCE);
+        sourceView.show();
+        sourceViews.push(sourceView);
+      });
+
+      const [disassemblyTab, disassemblyContainer] = disassemblyTabs.addTabAndContent("Disassembly");
+      disassemblyContainer.classList.add("viewpane", "scrollable");
+      disassemblyTabs.activateTab(disassemblyTab);
+      disassemblyView = new DisassemblyView(disassemblyContainer, selectionBroker);
+      disassemblyView.initializeCode(fnc.sourceText);
+      if (sourceResolver.disassemblyPhase) {
+        disassemblyView.initializePerfProfile(jsonObj.eventCounts);
+        disassemblyView.showContent(sourceResolver.disassemblyPhase.data);
+        disassemblyView.show();
+      }
+
+      multiview = new GraphMultiView(C.INTERMEDIATE_PANE_ID, selectionBroker, sourceResolver);
+      multiview.show();
+    } catch (err) {
+      if (window.confirm("Error: Exception during load of TurboFan JSON file:\n" +
+        "error: " + err.message + "\nDo you want to clear session storage?")) {
+        window.sessionStorage.clear();
+      }
+      return;
+    }
+  }
+
+  function initializeUploadHandlers() {
+    // The <input> form #upload-helper with type file can't be a picture.
+    // We hence keep it hidden, and forward the click from the picture
+    // button #upload.
+    document.getElementById("upload").addEventListener("click", e => {
+      document.getElementById("upload-helper").click();
+      e.stopPropagation();
+    });
+    document.getElementById("upload-helper").addEventListener("change",
+      function (this: HTMLInputElement) {
+        const uploadFile = this.files && this.files[0];
+        if (uploadFile) {
+          const filereader = new FileReader();
+          filereader.onload = () => {
+            const txtRes = filereader.result;
+            if (typeof txtRes == 'string') {
+              loadFile(txtRes);
+            }
+          };
+          filereader.readAsText(uploadFile);
+        }
+      }
+    );
+    window.addEventListener("keydown", (e: KeyboardEvent) => {
+      if (e.keyCode == 76 && e.ctrlKey) { // CTRL + L
+        document.getElementById("upload-helper").click();
+        e.stopPropagation();
+        e.preventDefault();
+      }
+    });
+  }
+
+  initializeUploadHandlers();
+  resizer.updatePanes();
+};
diff --git a/src/v8/tools/turbolizer/src/util.ts b/src/v8/tools/turbolizer/src/util.ts
new file mode 100644
index 0000000..d9c8dcd
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/util.ts
@@ -0,0 +1,93 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export function anyToString(x: any): string {
+  return "" + x;
+}
+
+function computeScrollTop(container, element) {
+  const height = container.offsetHeight;
+  const margin = Math.floor(height / 4);
+  const pos = element.offsetTop;
+  const currentScrollTop = container.scrollTop;
+  if (pos < currentScrollTop + margin) {
+    return Math.max(0, pos - margin);
+  } else if (pos > (currentScrollTop + 3 * margin)) {
+    return Math.max(0, pos - 3 * margin);
+  }
+  return pos;
+}
+
+export class ViewElements {
+  container: HTMLElement;
+  scrollTop: number;
+
+  constructor(container: HTMLElement) {
+    this.container = container;
+    this.scrollTop = undefined;
+  }
+
+  consider(element, doConsider) {
+    if (!doConsider) return;
+    const newScrollTop = computeScrollTop(this.container, element);
+    if (isNaN(newScrollTop)) {
+      console.log("NOO");
+    }
+    if (this.scrollTop === undefined) {
+      this.scrollTop = newScrollTop;
+    } else {
+      this.scrollTop = Math.min(this.scrollTop, newScrollTop);
+    }
+  }
+
+  apply(doApply) {
+    if (!doApply || this.scrollTop === undefined) return;
+    this.container.scrollTop = this.scrollTop;
+  }
+}
+
+export function sortUnique<T>(arr: Array<T>, f: (a: T, b: T) => number, equal: (a: T, b: T) => boolean) {
+  if (arr.length == 0) return arr;
+  arr = arr.sort(f);
+  const ret = [arr[0]];
+  for (let i = 1; i < arr.length; i++) {
+    if (!equal(arr[i - 1], arr[i])) {
+      ret.push(arr[i]);
+    }
+  }
+  return ret;
+}
+
+// Partial application without binding the receiver
+export function partial(f: any, ...arguments1: Array<any>) {
+  return function (this: any, ...arguments2: Array<any>) {
+    f.apply(this, [...arguments1, ...arguments2]);
+  };
+}
+
+export function isIterable(obj: any): obj is Iterable<any> {
+  return obj != null && obj != undefined
+    && typeof obj != 'string' && typeof obj[Symbol.iterator] === 'function';
+}
+
+export function alignUp(raw: number, multiple: number): number {
+  return Math.floor((raw + multiple - 1) / multiple) * multiple;
+}
+
+export function measureText(text: string) {
+  const textMeasure = document.getElementById('text-measure');
+  if (textMeasure instanceof SVGTSpanElement) {
+    textMeasure.textContent = text;
+    return {
+      width: textMeasure.getBBox().width,
+      height: textMeasure.getBBox().height,
+    };
+  }
+  return { width: 0, height: 0 };
+}
+
+// Interpolate between the given start and end values by a fraction of val/max.
+export function interpolate(val: number, max: number, start: number, end: number) {
+  return start + (end - start) * (val / max);
+}
diff --git a/src/v8/tools/turbolizer/src/view.ts b/src/v8/tools/turbolizer/src/view.ts
new file mode 100644
index 0000000..a8bb125
--- /dev/null
+++ b/src/v8/tools/turbolizer/src/view.ts
@@ -0,0 +1,37 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+export abstract class View {
+  protected container: HTMLElement;
+  protected divNode: HTMLElement;
+  protected abstract createViewElement(): HTMLElement;
+
+  constructor(idOrContainer: string | HTMLElement) {
+    this.container = typeof idOrContainer == "string" ? document.getElementById(idOrContainer) : idOrContainer;
+    this.divNode = this.createViewElement();
+  }
+
+  public show(): void {
+    this.container.appendChild(this.divNode);
+  }
+
+  public hide(): void {
+    this.container.removeChild(this.divNode);
+  }
+}
+
+export abstract class PhaseView extends View {
+  public abstract initializeContent(data: any, rememberedSelection: Set<any>): void;
+  public abstract detachSelection(): Set<string>;
+  public abstract onresize(): void;
+  public abstract searchInputAction(searchInput: HTMLInputElement, e: Event, onlyVisible: boolean): void;
+
+  constructor(idOrContainer: string | HTMLElement) {
+    super(idOrContainer);
+  }
+
+  public isScrollable(): boolean {
+    return false;
+  }
+}
diff --git a/src/v8/tools/turbolizer/tabs.css b/src/v8/tools/turbolizer/tabs.css
new file mode 100644
index 0000000..54dba72
--- /dev/null
+++ b/src/v8/tools/turbolizer/tabs.css
@@ -0,0 +1,55 @@
+.content {
+    display: grid;
+    grid-template-areas: "tabs" "window";
+    grid-template-columns: 1fr;
+    grid-template-rows: auto 1fr;
+    min-height: calc(100vh);
+}
+
+.nav-tabs-container {
+    grid-area: tabs;
+    padding: 0px;
+    background-color: #999999;
+    border-bottom: 4px solid #CCCCCC;
+}
+
+.tab-content {
+    grid-area: window;
+    background-color: white;
+    padding: 0px;
+    display:none;
+}
+
+.tab-content.tab-default {
+    display: block;
+}
+
+ul.nav-tabs {
+    padding: 0px;
+    margin: 0px;
+    overflow: auto;
+    display: table-row;
+    min-height: 2ex;
+}
+
+.nav-tabs li {
+    display: inline-block;
+    padding-left: 10px;
+    padding-right: 10px;
+    padding-top: 4px;
+    padding-bottom: 4px;
+    min-width: 20px;
+    text-decoration: none;
+    color: black;
+    text-align: center;
+    user-select: none;
+    cursor: pointer;
+}
+
+.nav-tabs li:hover {
+    background-color: #EEEEEE;
+}
+
+.nav-tabs li.active {
+    background-color: #CCCCCC;
+}
\ No newline at end of file
diff --git a/src/v8/tools/turbolizer/test/source-resolver-test.ts b/src/v8/tools/turbolizer/test/source-resolver-test.ts
new file mode 100644
index 0000000..38d6745
--- /dev/null
+++ b/src/v8/tools/turbolizer/test/source-resolver-test.ts
@@ -0,0 +1,10 @@
+import { SourceResolver } from '../src/source-resolver';
+import { expect } from 'chai';
+import { describe, it } from 'mocha';
+
+describe('SourceResolver', () => {
+  it('should be constructible', () => {
+    const a: SourceResolver = new SourceResolver();
+    expect(a.sources.length).to.equal(0);
+  });
+});
diff --git a/src/v8/tools/turbolizer/tsconfig.json b/src/v8/tools/turbolizer/tsconfig.json
new file mode 100644
index 0000000..cd036ac
--- /dev/null
+++ b/src/v8/tools/turbolizer/tsconfig.json
@@ -0,0 +1,39 @@
+{
+  "compilerOptions": {
+    "outDir": "build/",
+    "allowJs": false,
+    "target": "es2018",
+    "module": "es2015",
+    "sourceMap": true,
+    "experimentalDecorators": true,
+    "emitDecoratorMetadata": true,
+    "moduleResolution": "node",
+    "noUnusedLocals": true,
+    "noImplicitReturns": true,
+    "noImplicitThis": true,
+    "lib": ["dom", "es6", "dom.iterable", "scripthost", "es2018"]
+  },
+  "files": [
+    "src/util.ts",
+    "src/node.ts",
+    "src/edge.ts",
+    "src/graph.ts",
+    "src/node-label.ts",
+    "src/source-resolver.ts",
+    "src/selection.ts",
+    "src/selection-broker.ts",
+    "src/selection-handler.ts",
+    "src/constants.ts",
+    "src/view.ts",
+    "src/text-view.ts",
+    "src/code-view.ts",
+    "src/graph-layout.ts",
+    "src/graph-view.ts",
+    "src/schedule-view.ts",
+    "src/disassembly-view.ts",
+    "src/graphmultiview.ts",
+    "src/turbo-visualizer.ts",
+    "src/resizer.ts",
+    "src/info-view.ts"
+  ]
+}
diff --git a/src/v8/tools/turbolizer/tsconfig.test.json b/src/v8/tools/turbolizer/tsconfig.test.json
new file mode 100644
index 0000000..1b7a591
--- /dev/null
+++ b/src/v8/tools/turbolizer/tsconfig.test.json
@@ -0,0 +1,6 @@
+{
+    "extends": "./tsconfig.json",
+    "compilerOptions": {
+        "module": "commonjs"
+    }
+}
diff --git a/src/v8/tools/turbolizer/tsfmt.json b/src/v8/tools/turbolizer/tsfmt.json
new file mode 100644
index 0000000..2ff95b8
--- /dev/null
+++ b/src/v8/tools/turbolizer/tsfmt.json
@@ -0,0 +1,16 @@
+{
+  "tabSize": 2,
+  "indentSize": 2,
+  "convertTabsToSpaces": true,
+  "insertSpaceAfterCommaDelimiter": true,
+  "insertSpaceAfterSemicolonInForStatements": true,
+  "insertSpaceBeforeAndAfterBinaryOperators": true,
+  "insertSpaceAfterKeywordsInControlFlowStatements": true,
+  "insertSpaceAfterFunctionKeywordForAnonymousFunctions": true,
+  "insertSpaceAfterOpeningAndBeforeClosingNonemptyParenthesis": false,
+  "insertSpaceAfterOpeningAndBeforeClosingNonemptyBrackets": false,
+  "insertSpaceAfterOpeningAndBeforeClosingTemplateStringBraces": false,
+  "insertSpaceBeforeFunctionParenthesis": false,
+  "placeOpenBraceOnNewLineForFunctions": false,
+  "placeOpenBraceOnNewLineForControlBlocks": false
+}
\ No newline at end of file
diff --git a/src/v8/tools/turbolizer/tslint.json b/src/v8/tools/turbolizer/tslint.json
new file mode 100644
index 0000000..e07e057
--- /dev/null
+++ b/src/v8/tools/turbolizer/tslint.json
@@ -0,0 +1,45 @@
+{
+  "defaultSeverity": "error",
+  "extends": "tslint:recommended",
+  "jsRules": {},
+  "rules": {
+    "curly": [true, "ignore-same-line"],
+    "quotemark": [false, "double", "avoid-escape", "avoid-template"],
+    "only-arrow-functions": [false],
+    "no-var-keyword": true,
+    "prefer-const": [true],
+    "max-line-length": [false, {
+      "limit": 80
+    }],
+    "ordered-imports": false,
+    "array-type": [true, "generic"],
+    "semicolon": true,
+    "member-access": false,
+    "object-literal-shorthand": false,
+    "object-literal-key-quotes": [true, "as-needed"],
+    "object-literal-sort-keys": false,
+    "space-before-function-paren": [true, {
+      "anonymous": "always"
+    }],
+    "triple-equals": false,
+    "no-string-throw": false,
+    "no-empty": [true, "allow-empty-catch", "allow-empty-functions"],
+    "trailing-comma": false,
+    "member-ordering": false,
+    "no-string-literal": false,
+    "arrow-parens": [true, "ban-single-arg-parens"],
+    "no-console": false,
+    "interface-name": false,
+    "no-bitwise": false,
+    "no-shadowed-variable": false,
+    "prefer-for-of": true,
+    "align": true,
+    "arrow-return-shorthand": true,
+    "max-classes-per-file": false,
+    "variable-name": true,
+    "forin": false,
+    "one-variable-per-declaration": true,
+    "no-consecutive-blank-lines": true
+  },
+  "rulesDirectory": []
+}
diff --git a/src/v8/tools/turbolizer/turbo-visualizer.css b/src/v8/tools/turbolizer/turbo-visualizer.css
index 7fd9c48..216ca13 100644
--- a/src/v8/tools/turbolizer/turbo-visualizer.css
+++ b/src/v8/tools/turbolizer/turbo-visualizer.css
@@ -1,121 +1,212 @@
 .visible-transition {
-    transition-delay: 0s;
-    transition-duration: 1s;
-    transition-property: all;
-    transition-timing-function: ease;
+  transition-delay: 0s;
+  transition-duration: 1s;
+  transition-property: all;
+  transition-timing-function: ease;
 }
 
 .collapse-pane {
-    background: #A0A0A0;
-    bottom: 0;
-    position: absolute;
-    margin-bottom: 0.5em;
-    margin-right: 0.5em;
-    margin-left: 0.5em;
-    border-radius: 5px;
-    padding: 0.5em;
-    z-index: 5;
-    opacity: 0.7;
-    cursor: pointer;
+  background: #A0A0A0;
+  bottom: 0;
+  position: absolute;
+  margin-bottom: 0.5em;
+  margin-right: 0.5em;
+  margin-left: 0.5em;
+  border-radius: 5px;
+  padding: 0.5em;
+  z-index: 20;
+  opacity: 0.7;
+  cursor: pointer;
 }
 
 .search-input {
-    vertical-align: middle;
-    width: 145px;
-    opacity: 1;
+  vertical-align: middle;
+  width: 145px;
+  opacity: 1;
+  box-sizing: border-box;
+  height: 1.5em;
+}
+
+#phase-select {
+  box-sizing: border-box;
+  height: 1.5em;
+}
+
+#search-only-visible {
+  vertical-align: middle;
 }
 
 .button-input {
-    vertical-align: middle;
-    width: 24px;
-    opacity: 0.4;
-    cursor: pointer;
+  vertical-align: middle;
+  width: 24px;
+  opacity: 0.4;
+  cursor: pointer;
 }
 
 .button-input-toggled {
-    border-radius: 5px;
-    background-color: #505050;
+  border-radius: 5px;
+  background-color: #505050;
 }
 
 .button-input:focus {
-    outline: none;
+  outline: none;
 }
 
 .invisible {
-    display: none;
+  display: none;
 }
 
-
 .selected {
-    background-color: #FFFF33;
+  background-color: #FFFF33;
 }
 
-.prettyprint ol.linenums > li {
-    list-style-type: decimal;
-    !important
+.selected.block,
+.selected.block-id,
+.selected.schedule-block {
+  background-color: #AAFFAA;
+}
+
+ol.linenums {
+  -webkit-padding-start: 8px;
+}
+
+.line-number {
+  display: inline-block;
+  min-width: 3ex;
+  text-align: right;
+  color: #444444;
+  margin-right: 0.5ex;
+  padding-right: 0.5ex;
+  background: #EEEEEE;
+  /* font-size: 80%; */
+  user-select: none;
+  height: 120%;
+}
+
+.line-number:hover {
+  background-color: #CCCCCC;
+}
+
+.prettyprint ol.linenums>li.selected {
+  background-color: #FFFF33 !important;
+}
+
+li.selected .line-number {
+  background-color: #FFFF33;
+}
+
+.prettyprint ol.linenums>li {
+  list-style-type: decimal;
+  display: block;
+}
+
+.source-container {
+  border-bottom: 2px solid #AAAAAA;
+}
+
+.code-header {
+  background-color: #CCCCCC;
+  padding-left: 1em;
+  padding-right: 1em;
+  padding-top: 1ex;
+  padding-bottom: 1ex;
+  font-family: monospace;
+  user-select: none;
+}
+
+.main-source .code-header {
+  border-top: 2px solid #AAAAAA;
+  font-weight: bold;
+}
+
+.code-header .code-file-function {
+  font-family: monospace;
+  float: left;
+  user-select: text;
+}
+
+.code-header .code-mode {
+  float: right;
+  font-family: sans-serif;
+  font-size: small;
+}
+
+.info-container {
+  font-family: sans-serif;
+  font-size: small;
+}
+
+.info-topic {
+  border: 1px solid lightgray;
+  margin: 2px;
+}
+
+.info-topic-header {
+  background-color: lightgray;
+  padding: 1px;
+}
+
+.info-topic-content {
+  padding: 2px;
 }
 
 
+html,
 body {
-    margin: 0;
-    padding: 0;
-    height: 100vh;
-    width: 100vw;
-    overflow:hidden;
-    -webkit-touch-callout: none;
-    -webkit-user-select: none;
-    -khtml-user-select: none;
-    -moz-user-select: none;
-    -ms-user-select: none;
-    user-select: none;
+  margin: 0;
+  padding: 0;
+  /*height: 99vh;
+    width: 99vw;*/
+  overflow: hidden;
 }
 
 p {
-    text-align: center;
-    overflow: overlay;
-    position: relative;
+  text-align: center;
+  overflow: overlay;
+  position: relative;
 }
 
 marker {
-    fill: #080808;
+  fill: #080808;
 }
 
 g rect {
-    fill: #F0F0F0;
-    stroke: #080808;
-    stroke-width: 2px;
+  fill: #F0F0F0;
+  stroke: #080808;
+  stroke-width: 2px;
 }
 
 g.dead {
-    opacity: .5;
+  opacity: .5;
 }
 
 g.unsorted rect {
-    opacity: 0.5;
+  opacity: 0.5;
 }
 
 div.scrollable {
-    overflow-y: _croll; overflow-x: hidden;
+  overflow-y: auto;
+  overflow-x: hidden;
 }
 
 g.turbonode[relToHover="input"] rect {
-    stroke: #67e62c;
-    stroke-width: 16px;
+  stroke: #67e62c;
+  stroke-width: 16px;
 }
 
 g.turbonode[relToHover="output"] rect {
-    stroke: #d23b14;
-    stroke-width: 16px;
+  stroke: #d23b14;
+  stroke-width: 16px;
 }
 
 path[relToHover="input"] {
-    stroke: #67e62c;
-    stroke-width: 16px;
+  stroke: #67e62c;
+  stroke-width: 16px;
 }
 
 path[relToHover="output"] {
-    stroke: #d23b14;
-    stroke-width: 16px;
+  stroke: #d23b14;
+  stroke-width: 16px;
 }
 
 
@@ -125,82 +216,82 @@
 }
 
 g.control rect {
-    fill: #EFCC00;
-    stroke: #080808;
-    stroke-width: 5px;
+  fill: #EFCC00;
+  stroke: #080808;
+  stroke-width: 5px;
 }
 
 g.javascript rect {
-    fill: #DD7E6B;
+  fill: #DD7E6B;
 }
 
 g.simplified rect {
-    fill: #3C78D8;
+  fill: #3C78D8;
 }
 
 g.machine rect {
-    fill: #6AA84F;
+  fill: #6AA84F;
 }
 
 g.input rect {
-    fill: #CFE2F3;
+  fill: #CFE2F3;
 }
 
 g.selected rect {
-    fill: #FFFF33;
+  fill: #FFFF33;
 }
 
 circle.bubbleStyle {
-    fill: #080808;
-    fill-opacity: 0.0;
-    stroke: #080808;
-    stroke-width: 2px;
+  fill: #080808;
+  fill-opacity: 0.0;
+  stroke: #080808;
+  stroke-width: 2px;
 }
 
 circle.bubbleStyle:hover {
-    stroke-width: 3px;
+  stroke-width: 3px;
 }
 
 circle.filledBubbleStyle {
-    fill: #080808;
-    stroke: #080808;
-    stroke-width: 2px;
+  fill: #080808;
+  stroke: #080808;
+  stroke-width: 2px;
 }
 
 circle.filledBubbleStyle:hover {
-    fill: #080808;
-    stroke-width: 3px;
+  fill: #080808;
+  stroke-width: 3px;
 }
 
 circle.halfFilledBubbleStyle {
-    fill: #808080;
-    stroke: #101010;
-    stroke-width: 2px;
+  fill: #808080;
+  stroke: #101010;
+  stroke-width: 2px;
 }
 
 circle.halfFilledBubbleStyle:hover {
-    fill: #808080;
-    stroke-width: 3px;
+  fill: #808080;
+  stroke-width: 3px;
 }
 
 path {
-    fill: none;
-    stroke: #080808;
-    stroke-width: 4px;
-    cursor: default;
+  fill: none;
+  stroke: #080808;
+  stroke-width: 4px;
+  cursor: default;
 }
 
 path:hover {
-    stroke-width: 6px;
+  stroke-width: 6px;
 }
 
 path.hidden {
-    fill: none;
-    stroke-width: 0;
+  fill: none;
+  stroke-width: 0;
 }
 
 path.link.selected {
-    stroke: #FFFF33;
+  stroke: #FFFF33;
 }
 
 pre.prettyprint {
@@ -213,11 +304,11 @@
 li.L5,
 li.L7,
 li.L9 {
-    background: none !important
+  background: none !important
 }
 
 li.nolinenums {
-  list-style-type:none;
+  list-style-type: none;
 }
 
 ul.noindent {
@@ -226,136 +317,382 @@
   -webkit-margin-after: 0px;
 }
 
-input:hover, .collapse-pane:hover input {
-    opacity: 1;
-    cursor: pointer;
+input:hover,
+.collapse-pane:hover input {
+  opacity: 1;
+  cursor: pointer;
 }
 
-span.linkable-text {
-    text-decoration: underline;
+.linkable-text {
+  text-decoration: underline;
 }
 
-span.linkable-text:hover {
-    cursor: pointer;
-    font-weight: bold;
+.linkable-text:hover {
+  cursor: pointer;
+  font-weight: bold;
 }
 
 
 #left {
-    float: left; height: 100%; background-color: #FFFFFF;
+  float: left;
+  user-select: none;
 }
 
 #middle {
-    float:left; height: 100%; background-color: #F8F8F8;
+  float: left;
+  background-color: #F8F8F8;
+  user-select: none;
 }
 
 #right {
-    float: right; background-color: #FFFFFF;
+  float: right;
+}
+
+.viewpane {
+  height: 100vh;
+  background-color: #FFFFFF;
+}
+
+.multiview {
+  width: 100%;
 }
 
 
 #disassembly-collapse {
-    right: 0;
+  right: 0;
 }
 
 #source-collapse {
-    left: 0;
+  left: 0;
 }
 
-#graph-toolbox-anchor {
-    height: 0px;
+#graph {
+  width: 100%;
+  height: 100%;
 }
 
-#graph-toolbox {
-    position: relative;
-    top: 1em;
-    left: 25px;
-    border: 2px solid #eee8d5;
-    border-radius: 5px;
-    padding: 0.7em;
-    z-index: 5;
-    background: rgba(100%, 100%, 100%, 0.7);
+.toolbox-anchor {
+  height: 0px;
 }
 
-#disassembly-toolbox {
-    position: relative;
-    top: 1em;
-    left: 0.7em;
-    border: 2px solid #eee8d5;
-    border-radius: 5px;
-    padding: 0.7em;
-    z-index: 5;
+.graph-toolbox {
+  position: relative;
+  border-bottom: 2px solid #eee8d5;
+  z-index: 5;
+  background: rgba(100%, 100%, 100%, 0.7);
+  box-sizing: border-box;
+  padding: 3px;
+  margin-left: 4px;
+  margin-right: 4px;
+}
+
+.disassembly-toolbox {
+  position: relative;
+  padding-bottom: 3px;
+  z-index: 5;
+  background: rgba(100%, 100%, 100%, 0.7);
+  padding-top: 3px;
+  box-sizing: border-box;
+  margin-left: 4px;
+  margin-right: 4px;
 }
 
 #load-file {
-    position: absolute;
-    top: 0;
-    right: 0;
-    margin-top: 0.5em;
-    margin-right: 0.5em;
-    z-index: 5;
-    opacity: 0.7;
+  position: absolute;
+  top: 0;
+  right: 0;
+  margin-top: 0.5em;
+  margin-right: 0.5em;
+  z-index: 20;
+  opacity: 0.7;
 }
 
 #load-file input {
-    background: #A0A0A0;
-    border-radius: 5px;
-    padding: 0.5em;
+  background: #A0A0A0;
+  border-radius: 5px;
+  padding: 0.5em;
 }
 
-#hidden-file-upload {
-    display: none;
+#upload-helper {
+  display: none;
 }
 
 .prof {
-    cursor: default;
+  cursor: default;
 }
 
 tspan {
-    font-size: 500%;
-    font-family: sans-serif;
+  font-size: 500%;
+  font-family: sans-serif;
 }
 
 text {
-    dominant-baseline: text-before-edge;
+  dominant-baseline: text-before-edge;
 }
 
-.resizer-left {
-    position:absolute;
-    width: 4px;
-    height:100%;
-    background: #a0a0a0;
-    cursor: pointer;
+.resizer {
+  position: absolute;
+  z-index: 10;
+  width: 4px;
+  height: 100%;
+  background: #a0a0a0;
+  cursor: pointer;
 }
 
-.resizer-left.snapped {
-    width: 12px;
+.resizer.snapped {
+  width: 12px;
 }
 
-.resizer-left:hover {
-    background: orange;
+.resizer.snapped:hover {
+  width: 12px;
+  margin-left: 0px;
 }
 
-.resizer-left.dragged {
-    background: orange;
+.resizer:hover,
+.resizer.dragged {
+  width: 10px;
+  margin-left: -4px;
+  background: orange;
 }
 
-.resizer-right {
-    position:absolute;
-    width: 4px;
-    height:100%;
-    background: #a0a0a0;
-    cursor: pointer;
+.source-position {
+  /* border-left: 1px solid #FF3333; */
+  width: 0;
+  display: inline-block;
 }
 
-.resizer-right.snapped {
-    width: 12px;
+.source-position .inlining-marker {
+  content: "";
+  position: relative;
+  display: inline-block;
+  top: -0.5ex;
+  margin-left: -4px;
+  margin-right: -4px;
+  border-width: 5px;
+  border-style: solid;
+  border-color: #555 transparent transparent transparent;
 }
 
-.resizer-right:hover {
-    background: orange;
+.source-position .marker {
+  content: "";
+  display: inline-block;
+  bottom: -1ex;
+  width: 0px;
+  margin-left: -4px;
+  margin-right: -4px;
+  margin-bottom: -1ex;
+  border-width: 5px;
+  border-style: solid;
+  border-color: transparent transparent #555 transparent;
 }
 
-.resizer-right.dragged {
-    background: orange;
-}
\ No newline at end of file
+.source-position.selected .marker {
+  border-color: transparent transparent #F00 transparent;
+}
+
+.source-position .inlining-marker:hover {
+  border-color: transparent transparent #AA5 transparent;
+}
+
+.source-position .inlining-marker[data-descr]:hover::after {
+  content: attr(data-descr);
+  position: absolute;
+  font-size: 10px;
+  z-index: 1;
+  background-color: #555;
+  color: #fff;
+  text-align: center;
+  border-radius: 6px;
+  padding: 6px;
+  top: 6px;
+  left: 50%;
+  margin-left: -80px;
+}
+
+#sequence {
+  font-family: monospace;
+  margin-top: 50px;
+}
+
+#schedule {
+  font-family: monospace;
+  margin-top: 50px;
+}
+
+.schedule-block {
+  margin: 5px;
+  background-color: white;
+  padding-left: 5px;
+}
+
+.schedule-block .block-id {
+  display: inline-block;
+  font-size: large;
+  text-decoration: underline;
+  padding-left: 1ex;
+}
+
+.schedule-block .block-id:hover {
+  font-weight: bold;
+}
+
+.schedule-block>.block-id::before {
+  content: "Block B";
+}
+
+.schedule-block.deferred>.block-id::after {
+  content: " (deferred)";
+}
+
+.schedule-block .block-list {
+  display: inline-block;
+}
+
+.schedule-block .block-list * {
+  display: inline-block;
+}
+
+.schedule-block .block-list .block-id {
+  padding-left: 1ex;
+}
+
+.schedule-block .block-list .block-id:before {
+  content: "B";
+}
+
+.schedule-block .predecessor-list::before {
+  display: inline-block;
+  content: "  \2B05  ";
+  padding-left: 1ex;
+  padding-right: 1ex;
+}
+
+.schedule-block .successor-list::before {
+  display: inline-block;
+  content: "  \2B95  ";
+  padding-left: 1ex;
+  padding-right: 1ex;
+}
+
+.schedule-block .nodes .node * {
+  display: inline-block;
+}
+
+.schedule-block .nodes .node .node-id {
+  padding-right: 1ex;
+  min-width: 5ex;
+  text-align: right;
+}
+
+.schedule-block .nodes .node .node-id:after {
+  content: ":";
+}
+
+.schedule-block .nodes .node .node-label {
+  user-select: text;
+}
+
+.schedule-block .nodes .node .parameter-list:before {
+  content: "(";
+}
+
+.schedule-block .nodes .node .parameter-list:after {
+  content: ")";
+}
+
+.schedule-block .instr-marker {
+  padding-right: .5ex;
+  padding-left: .5ex;
+  min-width: 1em;
+  background: #EEEEEE;
+  /* display: none; */
+}
+
+.schedule-block>.instr-marker {
+  display: inline;
+}
+
+.instruction * {
+  padding-right: .5ex;
+}
+
+.phi-label,
+.instruction-id {
+  display: inline-block;
+  padding-right: .5ex;
+  padding-left: .5ex;
+  min-width: 1ex;
+  vertical-align: top;
+}
+
+.instruction-id:after {
+  content: ":";
+}
+
+.instruction-node,
+.gap,
+.instruction {
+  display: block;
+}
+
+.phi-contents,
+.instruction-contents,
+.gap *,
+.instruction * {
+  display: inline-block;
+}
+
+.phi * {
+  padding-right: 1ex;
+  display: inline-block;
+}
+
+.gap .gap-move {
+  padding-left: .5ex;
+  padding-right: .5ex;
+}
+
+.gap>*:before {
+  content: "(";
+}
+
+.gap>*:after {
+  content: ")";
+}
+
+.parameter.constant {
+  outline: 1px dotted red;
+}
+
+.clickable:hover {
+  text-decoration: underline;
+}
+
+.clickable:hover {
+  font-weight: bold;
+}
+
+.comma-sep-list>* {
+  padding-right: 1ex;
+}
+
+.comma-sep-list>*:after {
+  content: ",";
+}
+
+.comma-sep-list>*:last-child:after {
+  content: "";
+}
+
+.comma-sep-list>*:last-child {
+  padding-right: 0ex;
+}
+
+.temps:before {
+  content: "temps: ";
+}
+
+.temps {
+  padding-left: .5ex;
+  outline: 1px dotted grey;
+}
diff --git a/src/v8/tools/turbolizer/turbolizer.png b/src/v8/tools/turbolizer/turbolizer.png
new file mode 100644
index 0000000..1af1a49
--- /dev/null
+++ b/src/v8/tools/turbolizer/turbolizer.png
Binary files differ
diff --git a/src/v8/tools/ubsan/blacklist.txt b/src/v8/tools/ubsan/blacklist.txt
new file mode 100644
index 0000000..0705adc
--- /dev/null
+++ b/src/v8/tools/ubsan/blacklist.txt
@@ -0,0 +1,11 @@
+#############################################################################
+# UBSan blacklist.
+
+# Bug 8735: PropertyCallbackInfo<void> vs PropertyCallbackInfo<T>.
+fun:*v8*internal*PropertyCallbackArguments*CallAccessorSetter*
+fun:*v8*internal*PropertyCallbackArguments*BasicCallNamedGetterCallback*
+fun:*v8*internal*InvokeAccessorGetterCallback*
+
+# Bug 8735: WeakCallbackInfo<void> vs. WeakCallbackInfo<T>.
+fun:*v8*internal*GlobalHandles*PendingPhantomCallback*Invoke*
+fun:*v8*internal*GlobalHandles*Node*PostGarbageCollectionProcessing*
diff --git a/src/v8/tools/unittests/__init__.py b/src/v8/tools/unittests/__init__.py
new file mode 100644
index 0000000..3841a86
--- /dev/null
+++ b/src/v8/tools/unittests/__init__.py
@@ -0,0 +1,4 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/src/v8/tools/unittests/run_perf_test.py b/src/v8/tools/unittests/run_perf_test.py
index 07dd515..083d224 100755
--- a/src/v8/tools/unittests/run_perf_test.py
+++ b/src/v8/tools/unittests/run_perf_test.py
@@ -3,19 +3,22 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 from collections import namedtuple
-import coverage
 import json
-from mock import DEFAULT
-from mock import MagicMock
 import os
-from os import path, sys
 import platform
 import shutil
 import subprocess
+import sys
 import tempfile
 import unittest
 
+import coverage
+import mock
+
 # Requires python-coverage and python-mock. Native python coverage
 # version >= 3.7.1 should be installed to get the best speed.
 
@@ -23,130 +26,135 @@
 RUN_PERF = os.path.join(BASE_DIR, 'run_perf.py')
 TEST_DATA = os.path.join(BASE_DIR, 'unittests', 'testdata')
 
-TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
+TEST_WORKSPACE = os.path.join(tempfile.gettempdir(), 'test-v8-run-perf')
 
 V8_JSON = {
-  "path": ["."],
-  "binary": "d7",
-  "flags": ["--flag"],
-  "main": "run.js",
-  "run_count": 1,
-  "results_regexp": "^%s: (.+)$",
-  "tests": [
-    {"name": "Richards"},
-    {"name": "DeltaBlue"},
+  'path': ['.'],
+  'owners': ['username@chromium.org'],
+  'binary': 'd7',
+  'timeout': 60,
+  'flags': ['--flag'],
+  'main': 'run.js',
+  'run_count': 1,
+  'results_regexp': '^%s: (.+)$',
+  'tests': [
+    {'name': 'Richards'},
+    {'name': 'DeltaBlue'},
   ]
 }
 
 V8_NESTED_SUITES_JSON = {
-  "path": ["."],
-  "flags": ["--flag"],
-  "run_count": 1,
-  "units": "score",
-  "tests": [
-    {"name": "Richards",
-     "path": ["richards"],
-     "binary": "d7",
-     "main": "run.js",
-     "resources": ["file1.js", "file2.js"],
-     "run_count": 2,
-     "results_regexp": "^Richards: (.+)$"},
-    {"name": "Sub",
-     "path": ["sub"],
-     "tests": [
-       {"name": "Leaf",
-        "path": ["leaf"],
-        "run_count_x64": 3,
-        "units": "ms",
-        "main": "run.js",
-        "results_regexp": "^Simple: (.+) ms.$"},
+  'path': ['.'],
+  'owners': ['username@chromium.org'],
+  'flags': ['--flag'],
+  'run_count': 1,
+  'units': 'score',
+  'tests': [
+    {'name': 'Richards',
+     'path': ['richards'],
+     'binary': 'd7',
+     'main': 'run.js',
+     'resources': ['file1.js', 'file2.js'],
+     'run_count': 2,
+     'results_regexp': '^Richards: (.+)$'},
+    {'name': 'Sub',
+     'path': ['sub'],
+     'tests': [
+       {'name': 'Leaf',
+        'path': ['leaf'],
+        'run_count_x64': 3,
+        'units': 'ms',
+        'main': 'run.js',
+        'results_regexp': '^Simple: (.+) ms.$'},
      ]
     },
-    {"name": "DeltaBlue",
-     "path": ["delta_blue"],
-     "main": "run.js",
-     "flags": ["--flag2"],
-     "results_regexp": "^DeltaBlue: (.+)$"},
-    {"name": "ShouldntRun",
-     "path": ["."],
-     "archs": ["arm"],
-     "main": "run.js"},
+    {'name': 'DeltaBlue',
+     'path': ['delta_blue'],
+     'main': 'run.js',
+     'flags': ['--flag2'],
+     'results_regexp': '^DeltaBlue: (.+)$'},
+    {'name': 'ShouldntRun',
+     'path': ['.'],
+     'archs': ['arm'],
+     'main': 'run.js'},
   ]
 }
 
 V8_GENERIC_JSON = {
-  "path": ["."],
-  "binary": "cc",
-  "flags": ["--flag"],
-  "generic": True,
-  "run_count": 1,
-  "units": "ms",
+  'path': ['.'],
+  'owners': ['username@chromium.org'],
+  'binary': 'cc',
+  'flags': ['--flag'],
+  'generic': True,
+  'run_count': 1,
+  'units': 'ms',
 }
 
-Output = namedtuple("Output", "stdout, stderr, timed_out")
-
 class PerfTest(unittest.TestCase):
   @classmethod
   def setUpClass(cls):
-    cls.base = path.dirname(path.dirname(path.abspath(__file__)))
-    sys.path.append(cls.base)
+    sys.path.insert(0, BASE_DIR)
     cls._cov = coverage.coverage(
-        include=([os.path.join(cls.base, "run_perf.py")]))
+        include=([os.path.join(BASE_DIR, 'run_perf.py')]))
     cls._cov.start()
     import run_perf
     from testrunner.local import command
-    global command
-    global run_perf
+    from testrunner.objects.output import Output, NULL_OUTPUT
+    global command, run_perf, Output, NULL_OUTPUT
 
   @classmethod
   def tearDownClass(cls):
     cls._cov.stop()
-    print ""
-    print cls._cov.report()
+    print('')
+    print(cls._cov.report())
 
   def setUp(self):
     self.maxDiff = None
-    if path.exists(TEST_WORKSPACE):
+    if os.path.exists(TEST_WORKSPACE):
       shutil.rmtree(TEST_WORKSPACE)
     os.makedirs(TEST_WORKSPACE)
 
   def tearDown(self):
-    if path.exists(TEST_WORKSPACE):
+    mock.patch.stopall()
+    if os.path.exists(TEST_WORKSPACE):
       shutil.rmtree(TEST_WORKSPACE)
 
   def _WriteTestInput(self, json_content):
-    self._test_input = path.join(TEST_WORKSPACE, "test.json")
-    with open(self._test_input, "w") as f:
+    self._test_input = os.path.join(TEST_WORKSPACE, 'test.json')
+    with open(self._test_input, 'w') as f:
       f.write(json.dumps(json_content))
 
   def _MockCommand(self, *args, **kwargs):
     # Fake output for each test run.
     test_outputs = [Output(stdout=arg,
-                           stderr=None,
-                           timed_out=kwargs.get("timed_out", False))
+                           timed_out=kwargs.get('timed_out', False),
+                           exit_code=kwargs.get('exit_code', 0),
+                           duration=42)
                     for arg in args[1]]
     def create_cmd(*args, **kwargs):
-      cmd = MagicMock()
+      cmd = mock.MagicMock()
       def execute(*args, **kwargs):
         return test_outputs.pop()
-      cmd.execute = MagicMock(side_effect=execute)
+      cmd.execute = mock.MagicMock(side_effect=execute)
       return cmd
 
-    command.Command = MagicMock(side_effect=create_cmd)
+    mock.patch.object(
+        run_perf.command, 'PosixCommand',
+        mock.MagicMock(side_effect=create_cmd)).start()
 
     # Check that d8 is called from the correct cwd for each test run.
-    dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
+    dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]]
     def chdir(*args, **kwargs):
-      self.assertEquals(dirs.pop(), args[0])
-    os.chdir = MagicMock(side_effect=chdir)
+      self.assertEqual(dirs.pop(), args[0])
+    os.chdir = mock.MagicMock(side_effect=chdir)
 
-    subprocess.check_call = MagicMock()
-    platform.system = MagicMock(return_value='Linux')
+    subprocess.check_call = mock.MagicMock()
+    platform.system = mock.MagicMock(return_value='Linux')
 
   def _CallMain(self, *args):
-    self._test_output = path.join(TEST_WORKSPACE, "results.json")
+    self._test_output = os.path.join(TEST_WORKSPACE, 'results.json')
     all_args=[
-      "--json-test-results",
+      '--json-test-results',
       self._test_output,
       self._test_input,
     ]
@@ -158,18 +166,27 @@
       return json.load(f)
 
   def _VerifyResults(self, suite, units, traces, file_name=None):
-    self.assertEquals([
-      {"units": units,
-       "graphs": [suite, trace["name"]],
-       "results": trace["results"],
-       "stddev": trace["stddev"]} for trace in traces],
-      self._LoadResults(file_name)["traces"])
+    self.assertListEqual(sorted([
+      {'units': units,
+       'graphs': [suite, trace['name']],
+       'results': trace['results'],
+       'stddev': trace['stddev']} for trace in traces]),
+      sorted(self._LoadResults(file_name)['traces']))
+
+  def _VerifyRunnableDurations(self, runs, timeout, file_name=None):
+    self.assertListEqual([
+      {
+        'graphs': ['test'],
+        'durations': [42] * runs,
+        'timeout': timeout,
+      },
+    ], self._LoadResults(file_name)['runnables'])
 
   def _VerifyErrors(self, errors):
-    self.assertEquals(errors, self._LoadResults()["errors"])
+    self.assertListEqual(errors, self._LoadResults()['errors'])
 
   def _VerifyMock(self, binary, *args, **kwargs):
-    shell = path.join(path.dirname(self.base), binary)
+    shell = os.path.join(os.path.dirname(BASE_DIR), binary)
     command.Command.assert_called_with(
         cmd_prefix=[],
         shell=shell,
@@ -177,381 +194,419 @@
         timeout=kwargs.get('timeout', 60))
 
   def _VerifyMockMultiple(self, *args, **kwargs):
-    self.assertEquals(len(args), len(command.Command.call_args_list))
+    self.assertEqual(len(args), len(command.Command.call_args_list))
     for arg, actual in zip(args, command.Command.call_args_list):
       expected = {
         'cmd_prefix': [],
-        'shell': path.join(path.dirname(self.base), arg[0]),
+        'shell': os.path.join(os.path.dirname(BASE_DIR), arg[0]),
         'args': list(arg[1:]),
         'timeout': kwargs.get('timeout', 60)
       }
-      self.assertEquals((expected, ), actual)
+      self.assertTupleEqual((expected, ), actual)
 
   def testOneRun(self):
     self._WriteTestInput(V8_JSON)
-    self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
-    self.assertEquals(0, self._CallMain())
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+    self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
     ])
+    self._VerifyRunnableDurations(1, 60)
     self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+    self._VerifyMock(
+        os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
 
   def testOneRunWithTestFlags(self):
     test_input = dict(V8_JSON)
-    test_input["test_flags"] = ["2", "test_name"]
+    test_input['test_flags'] = ['2', 'test_name']
     self._WriteTestInput(test_input)
-    self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567"])
-    self.assertEquals(0, self._CallMain())
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+    self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567'])
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
     ])
     self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js",
-                     "--", "2", "test_name")
+    self._VerifyMock(os.path.join(
+      'out', 'x64.release', 'd7'), '--flag', 'run.js', '--', '2', 'test_name')
 
   def testTwoRuns_Units_SuiteName(self):
     test_input = dict(V8_JSON)
-    test_input["run_count"] = 2
-    test_input["name"] = "v8"
-    test_input["units"] = "ms"
+    test_input['run_count'] = 2
+    test_input['name'] = 'v8'
+    test_input['units'] = 'ms'
     self._WriteTestInput(test_input)
-    self._MockCommand([".", "."],
-                      ["Richards: 100\nDeltaBlue: 200\n",
-                       "Richards: 50\nDeltaBlue: 300\n"])
-    self.assertEquals(0, self._CallMain())
-    self._VerifyResults("v8", "ms", [
-      {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["300.0", "200.0"], "stddev": ""},
+    self._MockCommand(['.', '.'],
+                      ['Richards: 100\nDeltaBlue: 200\n',
+                       'Richards: 50\nDeltaBlue: 300\n'])
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('v8', 'ms', [
+      {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [300.0, 200.0], 'stddev': ''},
     ])
     self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+    self._VerifyMock(os.path.join(
+      'out', 'x64.release', 'd7'), '--flag', 'run.js')
 
   def testTwoRuns_SubRegexp(self):
     test_input = dict(V8_JSON)
-    test_input["run_count"] = 2
-    del test_input["results_regexp"]
-    test_input["tests"][0]["results_regexp"] = "^Richards: (.+)$"
-    test_input["tests"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
+    test_input['run_count'] = 2
+    del test_input['results_regexp']
+    test_input['tests'][0]['results_regexp'] = '^Richards: (.+)$'
+    test_input['tests'][1]['results_regexp'] = '^DeltaBlue: (.+)$'
     self._WriteTestInput(test_input)
-    self._MockCommand([".", "."],
-                      ["Richards: 100\nDeltaBlue: 200\n",
-                       "Richards: 50\nDeltaBlue: 300\n"])
-    self.assertEquals(0, self._CallMain())
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["300.0", "200.0"], "stddev": ""},
+    self._MockCommand(['.', '.'],
+                      ['Richards: 100\nDeltaBlue: 200\n',
+                       'Richards: 50\nDeltaBlue: 300\n'])
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [300.0, 200.0], 'stddev': ''},
     ])
     self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+    self._VerifyMock(os.path.join(
+      'out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testPerfectConfidenceRuns(self):
+    self._WriteTestInput(V8_JSON)
+    self._MockCommand(
+        ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'] * 10)
+    self.assertEqual(0, self._CallMain('--confidence-level', '1'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234] * 10, 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0] * 10, 'stddev': ''},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join(
+      'out', 'x64.release', 'd7'), '--flag', 'run.js')
+
+  def testNoisyConfidenceRuns(self):
+    self._WriteTestInput(V8_JSON)
+    self._MockCommand(
+        ['.'],
+        reversed([
+          # First 10 runs are mandatory. DeltaBlue is slightly noisy.
+          'x\nRichards: 1.234\nDeltaBlue: 10757567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10557567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          # Need 4 more runs for confidence in DeltaBlue results.
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+          'x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n',
+        ]),
+    )
+    self.assertEqual(0, self._CallMain('--confidence-level', '1'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234] * 14, 'stddev': ''},
+      {
+        'name': 'DeltaBlue',
+        'results': [10757567.0, 10557567.0] + [10657567.0] * 12,
+        'stddev': '',
+      },
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join(
+      'out', 'x64.release', 'd7'), '--flag', 'run.js')
 
   def testNestedSuite(self):
     self._WriteTestInput(V8_NESTED_SUITES_JSON)
-    self._MockCommand(["delta_blue", "sub/leaf", "richards"],
-                      ["DeltaBlue: 200\n",
-                       "Simple: 1 ms.\n",
-                       "Simple: 2 ms.\n",
-                       "Simple: 3 ms.\n",
-                       "Richards: 100\n",
-                       "Richards: 50\n"])
-    self.assertEquals(0, self._CallMain())
-    self.assertEquals([
-      {"units": "score",
-       "graphs": ["test", "Richards"],
-       "results": ["50.0", "100.0"],
-       "stddev": ""},
-      {"units": "ms",
-       "graphs": ["test", "Sub", "Leaf"],
-       "results": ["3.0", "2.0", "1.0"],
-       "stddev": ""},
-      {"units": "score",
-       "graphs": ["test", "DeltaBlue"],
-       "results": ["200.0"],
-       "stddev": ""},
-      ], self._LoadResults()["traces"])
+    self._MockCommand(['delta_blue', 'sub/leaf', 'richards'],
+                      ['DeltaBlue: 200\n',
+                       'Simple: 1 ms.\n',
+                       'Simple: 2 ms.\n',
+                       'Simple: 3 ms.\n',
+                       'Richards: 100\n',
+                       'Richards: 50\n'])
+    self.assertEqual(0, self._CallMain())
+    self.assertListEqual(sorted([
+      {'units': 'score',
+       'graphs': ['test', 'Richards'],
+       'results': [50.0, 100.0],
+       'stddev': ''},
+      {'units': 'ms',
+       'graphs': ['test', 'Sub', 'Leaf'],
+       'results': [3.0, 2.0, 1.0],
+       'stddev': ''},
+      {'units': 'score',
+       'graphs': ['test', 'DeltaBlue'],
+       'results': [200.0],
+       'stddev': ''},
+      ]), sorted(self._LoadResults()['traces']))
     self._VerifyErrors([])
     self._VerifyMockMultiple(
-        (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
-        (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
-        (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
-        (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
-        (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
-        (path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js"))
+        (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd8'), '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd8'),
+         '--flag', '--flag2', 'run.js'))
 
   def testOneRunStdDevRegExp(self):
     test_input = dict(V8_JSON)
-    test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
+    test_input['stddev_regexp'] = '^%s\-stddev: (.+)$'
     self._WriteTestInput(test_input)
-    self._MockCommand(["."], ["Richards: 1.234\nRichards-stddev: 0.23\n"
-                              "DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n"])
-    self.assertEquals(0, self._CallMain())
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"], "stddev": "0.23"},
-      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": "106"},
+    self._MockCommand(['.'], ['Richards: 1.234\nRichards-stddev: 0.23\n'
+                              'DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n'])
+    self.assertEqual(0, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': '0.23'},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': '106'},
     ])
     self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+    self._VerifyMock(
+        os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
 
   def testTwoRunsStdDevRegExp(self):
     test_input = dict(V8_JSON)
-    test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
-    test_input["run_count"] = 2
+    test_input['stddev_regexp'] = '^%s\-stddev: (.+)$'
+    test_input['run_count'] = 2
     self._WriteTestInput(test_input)
-    self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n"
-                              "DeltaBlue: 6\nDeltaBlue-boom: 0.9\n",
-                              "Richards: 2\nRichards-stddev: 0.5\n"
-                              "DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"])
-    self.assertEquals(1, self._CallMain())
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["2.0", "3.0"], "stddev": "0.7"},
-      {"name": "DeltaBlue", "results": ["5.0", "6.0"], "stddev": "0.8"},
+    self._MockCommand(['.'], ['Richards: 3\nRichards-stddev: 0.7\n'
+                              'DeltaBlue: 6\nDeltaBlue-boom: 0.9\n',
+                              'Richards: 2\nRichards-stddev: 0.5\n'
+                              'DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n'])
+    self.assertEqual(1, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [2.0, 3.0], 'stddev': '0.7'},
+      {'name': 'DeltaBlue', 'results': [5.0, 6.0], 'stddev': '0.8'},
     ])
     self._VerifyErrors(
-        ["Test test/Richards should only run once since a stddev is provided "
-         "by the test.",
-         "Test test/DeltaBlue should only run once since a stddev is provided "
-         "by the test.",
-         "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for test "
-         "test/DeltaBlue."])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+        ['Test test/Richards should only run once since a stddev is provided '
+         'by the test.',
+         'Test test/DeltaBlue should only run once since a stddev is provided '
+         'by the test.',
+         'Regexp "^DeltaBlue\-stddev: (.+)$" did not match for test '
+         'test/DeltaBlue.'])
+    self._VerifyMock(
+        os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
 
   def testBuildbot(self):
     self._WriteTestInput(V8_JSON)
-    self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
-    self.assertEquals(0, self._CallMain("--buildbot"))
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+    self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'])
+    mock.patch.object(
+        run_perf.Platform, 'ReadBuildConfig',
+        mock.MagicMock(return_value={'is_android': False})).start()
+    self.assertEqual(0, self._CallMain('--buildbot'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
     ])
     self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+    self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
 
   def testBuildbotWithTotal(self):
     test_input = dict(V8_JSON)
-    test_input["total"] = True
+    test_input['total'] = True
     self._WriteTestInput(test_input)
-    self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
-    self.assertEquals(0, self._CallMain("--buildbot"))
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
-      {"name": "Total", "results": ["3626.49109719"], "stddev": ""},
+    self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'])
+    mock.patch.object(
+        run_perf.Platform, 'ReadBuildConfig',
+        mock.MagicMock(return_value={'is_android': False})).start()
+    self.assertEqual(0, self._CallMain('--buildbot'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
+      {'name': 'Total', 'results': [3626.491097190233], 'stddev': ''},
     ])
     self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+    self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
 
   def testBuildbotWithTotalAndErrors(self):
     test_input = dict(V8_JSON)
-    test_input["total"] = True
+    test_input['total'] = True
     self._WriteTestInput(test_input)
-    self._MockCommand(["."], ["x\nRichards: bla\nDeltaBlue: 10657567\ny\n"])
-    self.assertEquals(1, self._CallMain("--buildbot"))
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": [], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+    self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n'])
+    mock.patch.object(
+        run_perf.Platform, 'ReadBuildConfig',
+        mock.MagicMock(return_value={'is_android': False})).start()
+    self.assertEqual(1, self._CallMain('--buildbot'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
     ])
     self._VerifyErrors(
-        ["Regexp \"^Richards: (.+)$\" "
-         "returned a non-numeric for test test/Richards.",
-         "Not all traces have the same number of results."])
-    self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+        ['Regexp "^Richards: (.+)$" '
+         'returned a non-numeric for test test/Richards.',
+         'Not all traces have produced results. Can not compute total for '
+         'test.'])
+    self._VerifyMock(os.path.join('out', 'Release', 'd7'), '--flag', 'run.js')
 
   def testRegexpNoMatch(self):
     self._WriteTestInput(V8_JSON)
-    self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
-    self.assertEquals(1, self._CallMain())
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": [], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+    self._MockCommand(['.'], ['x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n'])
+    self.assertEqual(1, self._CallMain())
+    self._VerifyResults('test', 'score', [
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
     ])
     self._VerifyErrors(
-        ["Regexp \"^Richards: (.+)$\" didn't match for test test/Richards."])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+        ['Regexp "^Richards: (.+)$" did not match for test test/Richards.'])
+    self._VerifyMock(
+        os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
 
-  def testOneRunGeneric(self):
-    test_input = dict(V8_GENERIC_JSON)
+  def testOneRunCrashed(self):
+    test_input = dict(V8_JSON)
+    test_input['retry_count'] = 1
     self._WriteTestInput(test_input)
-    self._MockCommand(["."], [
-      "RESULT Infra: Constant1= 11 count\n"
-      "RESULT Infra: Constant2= [10,5,10,15] count\n"
-      "RESULT Infra: Constant3= {12,1.2} count\n"
-      "RESULT Infra: Constant4= [10,5,error,15] count\n"])
-    self.assertEquals(1, self._CallMain())
-    self.assertEquals([
-      {"units": "count",
-       "graphs": ["test", "Infra", "Constant1"],
-       "results": ["11.0"],
-       "stddev": ""},
-      {"units": "count",
-       "graphs": ["test", "Infra", "Constant2"],
-       "results": ["10.0", "5.0", "10.0", "15.0"],
-       "stddev": ""},
-      {"units": "count",
-       "graphs": ["test", "Infra", "Constant3"],
-       "results": ["12.0"],
-       "stddev": "1.2"},
-      {"units": "count",
-       "graphs": ["test", "Infra", "Constant4"],
-       "results": [],
-       "stddev": ""},
-      ], self._LoadResults()["traces"])
-    self._VerifyErrors(["Found non-numeric in test/Infra/Constant4"])
-    self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
+    self._MockCommand(
+        ['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n', ''],
+        exit_code=-1)
+    self.assertEqual(1, self._CallMain())
+    self._VerifyResults('test', 'score', [])
+    self._VerifyErrors([])
+    self._VerifyMock(
+        os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js')
 
   def testOneRunTimingOut(self):
     test_input = dict(V8_JSON)
-    test_input["timeout"] = 70
+    test_input['timeout'] = 70
+    test_input['retry_count'] = 0
     self._WriteTestInput(test_input)
-    self._MockCommand(["."], [""], timed_out=True)
-    self.assertEquals(1, self._CallMain())
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": [], "stddev": ""},
-      {"name": "DeltaBlue", "results": [], "stddev": ""},
-    ])
-    self._VerifyErrors([
-      "Regexp \"^Richards: (.+)$\" didn't match for test test/Richards.",
-      "Regexp \"^DeltaBlue: (.+)$\" didn't match for test test/DeltaBlue.",
-    ])
-    self._VerifyMock(
-        path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
+    self._MockCommand(['.'], [''], timed_out=True)
+    self.assertEqual(1, self._CallMain())
+    self._VerifyResults('test', 'score', [])
+    self._VerifyErrors([])
+    self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
+                     '--flag', 'run.js', timeout=70)
 
-  # Simple test that mocks out the android platform. Testing the platform would
-  # require lots of complicated mocks for the android tools.
   def testAndroid(self):
     self._WriteTestInput(V8_JSON)
-    # FIXME(machenbach): This is not test-local!
-    platform = run_perf.AndroidPlatform
-    platform.PreExecution = MagicMock(return_value=None)
-    platform.PostExecution = MagicMock(return_value=None)
-    platform.PreTests = MagicMock(return_value=None)
-    platform.Run = MagicMock(
-        return_value=("Richards: 1.234\nDeltaBlue: 10657567\n", None))
-    run_perf.AndroidPlatform = MagicMock(return_value=platform)
-    self.assertEquals(
-        0, self._CallMain("--android-build-tools", "/some/dir",
-                          "--arch", "arm"))
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+    mock.patch('run_perf.AndroidPlatform.PreExecution').start()
+    mock.patch('run_perf.AndroidPlatform.PostExecution').start()
+    mock.patch('run_perf.AndroidPlatform.PreTests').start()
+    mock.patch(
+        'run_perf.AndroidPlatform.Run',
+        return_value=(Output(stdout='Richards: 1.234\nDeltaBlue: 10657567\n'),
+                      NULL_OUTPUT)).start()
+    mock.patch('testrunner.local.android._Driver', autospec=True).start()
+    mock.patch(
+        'run_perf.Platform.ReadBuildConfig',
+        return_value={'is_android': True}).start()
+    self.assertEqual(0, self._CallMain('--arch', 'arm'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
     ])
 
   def testTwoRuns_Trybot(self):
     test_input = dict(V8_JSON)
-    test_input["run_count"] = 2
+    test_input['run_count'] = 2
     self._WriteTestInput(test_input)
-    self._MockCommand([".", ".", ".", "."],
-                      ["Richards: 100\nDeltaBlue: 200\n",
-                       "Richards: 200\nDeltaBlue: 20\n",
-                       "Richards: 50\nDeltaBlue: 200\n",
-                       "Richards: 100\nDeltaBlue: 20\n"])
-    test_output_secondary = path.join(TEST_WORKSPACE, "results_secondary.json")
-    self.assertEquals(0, self._CallMain(
-        "--outdir-secondary", "out-secondary",
-        "--json-test-results-secondary", test_output_secondary,
+    self._MockCommand(['.', '.', '.', '.'],
+                      ['Richards: 100\nDeltaBlue: 200\n',
+                       'Richards: 200\nDeltaBlue: 20\n',
+                       'Richards: 50\nDeltaBlue: 200\n',
+                       'Richards: 100\nDeltaBlue: 20\n'])
+    test_output_secondary = os.path.join(
+        TEST_WORKSPACE, 'results_secondary.json')
+    self.assertEqual(0, self._CallMain(
+        '--outdir-secondary', 'out-secondary',
+        '--json-test-results-secondary', test_output_secondary,
     ))
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["100.0", "200.0"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["20.0", "20.0"], "stddev": ""},
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [100.0, 200.0], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [20.0, 20.0], 'stddev': ''},
     ])
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["200.0", "200.0"], "stddev": ""},
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [50.0, 100.0], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [200.0, 200.0], 'stddev': ''},
     ], test_output_secondary)
+    self._VerifyRunnableDurations(2, 60, test_output_secondary)
     self._VerifyErrors([])
     self._VerifyMockMultiple(
-        (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
-        (path.join("out-secondary", "x64.release", "d7"), "--flag", "run.js"),
-        (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
-        (path.join("out-secondary", "x64.release", "d7"), "--flag", "run.js"),
+        (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+        (os.path.join('out-secondary', 'x64.release', 'd7'),
+         '--flag', 'run.js'),
+        (os.path.join('out', 'x64.release', 'd7'), '--flag', 'run.js'),
+        (os.path.join('out-secondary', 'x64.release', 'd7'),
+         '--flag', 'run.js'),
     )
 
   def testWrongBinaryWithProf(self):
     test_input = dict(V8_JSON)
     self._WriteTestInput(test_input)
-    self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
-    self.assertEquals(0, self._CallMain("--extra-flags=--prof"))
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+    self._MockCommand(['.'], ['x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n'])
+    self.assertEqual(0, self._CallMain('--extra-flags=--prof'))
+    self._VerifyResults('test', 'score', [
+      {'name': 'Richards', 'results': [1.234], 'stddev': ''},
+      {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''},
     ])
     self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "x64.release", "d7"),
-                     "--flag", "--prof", "run.js")
-
-  def testUnzip(self):
-    def Gen():
-      for i in [1, 2, 3]:
-        yield i, i + 1
-    l, r = run_perf.Unzip(Gen())
-    self.assertEquals([1, 2, 3], list(l()))
-    self.assertEquals([2, 3, 4], list(r()))
+    self._VerifyMock(os.path.join('out', 'x64.release', 'd7'),
+                     '--flag', '--prof', 'run.js')
 
   #############################################################################
   ### System tests
 
   def _RunPerf(self, mocked_d8, test_json):
-    output_json = path.join(TEST_WORKSPACE, "output.json")
+    output_json = os.path.join(TEST_WORKSPACE, 'output.json')
     args = [
-      sys.executable, RUN_PERF,
-      "--binary-override-path", os.path.join(TEST_DATA, mocked_d8),
-      "--json-test-results", output_json,
+      os.sys.executable, RUN_PERF,
+      '--binary-override-path', os.path.join(TEST_DATA, mocked_d8),
+      '--json-test-results', output_json,
       os.path.join(TEST_DATA, test_json),
     ]
     subprocess.check_output(args)
     return self._LoadResults(output_json)
 
   def testNormal(self):
-    results = self._RunPerf("d8_mocked1.py", "test1.json")
-    self.assertEquals([], results['errors'])
-    self.assertEquals([
+    results = self._RunPerf('d8_mocked1.py', 'test1.json')
+    self.assertListEqual([], results['errors'])
+    self.assertListEqual(sorted([
       {
         'units': 'score',
         'graphs': ['test1', 'Richards'],
-        'results': [u'1.2', u'1.2'],
+        'results': [1.2, 1.2],
         'stddev': '',
       },
       {
         'units': 'score',
         'graphs': ['test1', 'DeltaBlue'],
-        'results': [u'2.1', u'2.1'],
+        'results': [2.1, 2.1],
         'stddev': '',
       },
-    ], results['traces'])
+    ]), sorted(results['traces']))
 
   def testResultsProcessor(self):
-    results = self._RunPerf("d8_mocked2.py", "test2.json")
-    self.assertEquals([], results['errors'])
-    self.assertEquals([
+    results = self._RunPerf('d8_mocked2.py', 'test2.json')
+    self.assertListEqual([], results['errors'])
+    self.assertListEqual([
       {
         'units': 'score',
         'graphs': ['test2', 'Richards'],
-        'results': [u'1.2', u'1.2'],
+        'results': [1.2, 1.2],
         'stddev': '',
       },
       {
         'units': 'score',
         'graphs': ['test2', 'DeltaBlue'],
-        'results': [u'2.1', u'2.1'],
+        'results': [2.1, 2.1],
         'stddev': '',
       },
     ], results['traces'])
 
   def testResultsProcessorNested(self):
-    results = self._RunPerf("d8_mocked2.py", "test3.json")
-    self.assertEquals([], results['errors'])
-    self.assertEquals([
+    results = self._RunPerf('d8_mocked2.py', 'test3.json')
+    self.assertListEqual([], results['errors'])
+    self.assertListEqual([
       {
         'units': 'score',
         'graphs': ['test3', 'Octane', 'Richards'],
-        'results': [u'1.2'],
+        'results': [1.2],
         'stddev': '',
       },
       {
         'units': 'score',
         'graphs': ['test3', 'Octane', 'DeltaBlue'],
-        'results': [u'2.1'],
+        'results': [2.1],
         'stddev': '',
       },
     ], results['traces'])
diff --git a/src/v8/tools/unittests/run_tests_test.py b/src/v8/tools/unittests/run_tests_test.py
index f4ff3fe..93b10f5 100755
--- a/src/v8/tools/unittests/run_tests_test.py
+++ b/src/v8/tools/unittests/run_tests_test.py
@@ -17,6 +17,9 @@
 # TODO(machenbach): Coverage data from multiprocessing doesn't work.
 # TODO(majeski): Add some tests for the fuzzers.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import collections
 import contextlib
 import json
@@ -101,8 +104,9 @@
     sys_args = ['--command-prefix', sys.executable] + list(args)
     if kwargs.get('infra_staging', False):
       sys_args.append('--infra-staging')
-    code = standard_runner.StandardTestRunner(
-        basedir=basedir).execute(sys_args)
+    else:
+      sys_args.append('--no-infra-staging')
+    code = standard_runner.StandardTestRunner(basedir=basedir).execute(sys_args)
     return Result(stdout.getvalue(), stderr.getvalue(), code)
 
 
@@ -125,7 +129,7 @@
       import coverage
       if int(coverage.__version__.split('.')[0]) < 4:
         cls._cov = None
-        print 'Python coverage version >= 4 required.'
+        print('Python coverage version >= 4 required.')
         raise ImportError()
       cls._cov = coverage.Coverage(
           source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
@@ -141,19 +145,23 @@
       cls._cov.exclude('assert False')
       cls._cov.start()
     except ImportError:
-      print 'Running without python coverage.'
+      print('Running without python coverage.')
     sys.path.append(TOOLS_ROOT)
     global standard_runner
     from testrunner import standard_runner
+    global num_fuzzer
+    from testrunner import num_fuzzer
+    from testrunner.local import command
     from testrunner.local import pool
+    command.setup_testing()
     pool.setup_testing()
 
   @classmethod
   def tearDownClass(cls):
     if cls._cov:
       cls._cov.stop()
-      print ''
-      print cls._cov.report(show_missing=True)
+      print('')
+      print(cls._cov.report(show_missing=True))
 
   def testPass(self):
     """Test running only passing tests in two variants.
@@ -170,10 +178,10 @@
           'sweet/bananas',
           'sweet/raspberries',
       )
-      self.assertIn('Running 4 tests', result.stdout, result)
-      self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
-      self.assertIn('Total time:', result.stderr, result)
-      self.assertIn('sweet/bananas', result.stderr, result)
+      self.assertIn('Done running sweet/bananas default: pass', result.stdout, result)
+      # TODO(majeski): Implement for test processors
+      # self.assertIn('Total time:', result.stderr, result)
+      # self.assertIn('sweet/bananas', result.stderr, result)
       self.assertEqual(0, result.returncode, result)
 
   def testShardedProc(self):
@@ -186,19 +194,26 @@
             '--variants=default,stress',
             '--shard-count=2',
             '--shard-run=%d' % shard,
-            'sweet/bananas',
+            'sweet/blackberries',
             'sweet/raspberries',
-            infra_staging=True,
+            infra_staging=False,
         )
         # One of the shards gets one variant of each test.
-        self.assertIn('Running 1 base tests', result.stdout, result)
         self.assertIn('2 tests ran', result.stdout, result)
         if shard == 1:
-          self.assertIn('Done running sweet/bananas', result.stdout, result)
+          self.assertIn(
+            'Done running sweet/raspberries default', result.stdout, result)
+          self.assertIn(
+            'Done running sweet/raspberries stress', result.stdout, result)
+          self.assertEqual(0, result.returncode, result)
         else:
-          self.assertIn('Done running sweet/raspberries', result.stdout, result)
-        self.assertEqual(0, result.returncode, result)
+          self.assertIn(
+            'sweet/blackberries default: FAIL', result.stdout, result)
+          self.assertIn(
+            'sweet/blackberries stress: FAIL', result.stdout, result)
+          self.assertEqual(1, result.returncode, result)
 
+  @unittest.skip("incompatible with test processors")
   def testSharded(self):
     """Test running a particular shard."""
     with temp_base() as basedir:
@@ -219,10 +234,7 @@
         self.assertIn('Done running sweet/raspberries', result.stdout, result)
         self.assertEqual(0, result.returncode, result)
 
-  def testFailProc(self):
-    self.testFail(infra_staging=True)
-
-  def testFail(self, infra_staging=False):
+  def testFail(self):
     """Test running only failing tests in two variants."""
     with temp_base() as basedir:
       result = run_tests(
@@ -231,17 +243,13 @@
           '--progress=verbose',
           '--variants=default,stress',
           'sweet/strawberries',
-          infra_staging=infra_staging,
+          infra_staging=False,
       )
-      if not infra_staging:
-        self.assertIn('Running 2 tests', result.stdout, result)
-      else:
-        self.assertIn('Running 1 base tests', result.stdout, result)
-        self.assertIn('2 tests ran', result.stdout, result)
-      self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
+      self.assertIn('Done running sweet/strawberries default: FAIL', result.stdout, result)
       self.assertEqual(1, result.returncode, result)
 
-  def check_cleaned_json_output(self, expected_results_name, actual_json):
+  def check_cleaned_json_output(
+      self, expected_results_name, actual_json, basedir):
     # Check relevant properties of the json output.
     with open(actual_json) as f:
       json_output = json.load(f)[0]
@@ -254,6 +262,7 @@
       data['duration'] = 1
       data['command'] = ' '.join(
           ['/usr/bin/python'] + data['command'].split()[1:])
+      data['command'] = data['command'].replace(basedir + '/', '')
     for data in json_output['slowest_tests']:
       replace_variable_data(data)
     for data in json_output['results']:
@@ -266,10 +275,7 @@
     msg = None  # Set to pretty_json for bootstrapping.
     self.assertDictEqual(json_output, expected_test_results, msg)
 
-  def testFailWithRerunAndJSONProc(self):
-    self.testFailWithRerunAndJSON(infra_staging=True)
-
-  def testFailWithRerunAndJSON(self, infra_staging=False):
+  def testFailWithRerunAndJSON(self):
     """Test re-running a failing test and output to json."""
     with temp_base() as basedir:
       json_path = os.path.join(basedir, 'out.json')
@@ -282,33 +288,23 @@
           '--random-seed=123',
           '--json-test-results', json_path,
           'sweet/strawberries',
-          infra_staging=infra_staging,
+          infra_staging=False,
       )
-      if not infra_staging:
-        self.assertIn('Running 1 tests', result.stdout, result)
-      else:
-        self.assertIn('Running 1 base tests', result.stdout, result)
-        self.assertIn('1 tests ran', result.stdout, result)
-      self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
-      if not infra_staging:
-        # We run one test, which fails and gets re-run twice.
-        self.assertIn('3 tests failed', result.stdout, result)
-      else:
-        # With test processors we don't count reruns as separated failures.
-        # TODO(majeski): fix it?
-        self.assertIn('1 tests failed', result.stdout, result)
+      self.assertIn('Done running sweet/strawberries default: FAIL', result.stdout, result)
+      # With test processors we don't count reruns as separated failures.
+      # TODO(majeski): fix it?
+      self.assertIn('1 tests failed', result.stdout, result)
       self.assertEqual(0, result.returncode, result)
 
       # TODO(majeski): Previously we only reported the variant flags in the
       # flags field of the test result.
       # After recent changes we report all flags, including the file names.
       # This is redundant to the command. Needs investigation.
-      self.check_cleaned_json_output('expected_test_results1.json', json_path)
+      self.maxDiff = None
+      self.check_cleaned_json_output(
+          'expected_test_results1.json', json_path, basedir)
 
-  def testFlakeWithRerunAndJSONProc(self):
-    self.testFlakeWithRerunAndJSON(infra_staging=True)
-
-  def testFlakeWithRerunAndJSON(self, infra_staging=False):
+  def testFlakeWithRerunAndJSON(self):
     """Test re-running a failing test and output to json."""
     with temp_base(baseroot='testroot2') as basedir:
       json_path = os.path.join(basedir, 'out.json')
@@ -321,20 +317,15 @@
           '--random-seed=123',
           '--json-test-results', json_path,
           'sweet',
-          infra_staging=infra_staging,
+          infra_staging=False,
       )
-      if not infra_staging:
-        self.assertIn('Running 1 tests', result.stdout, result)
-        self.assertIn(
-            'Done running sweet/bananaflakes: FAIL', result.stdout, result)
-        self.assertIn('1 tests failed', result.stdout, result)
-      else:
-        self.assertIn('Running 1 base tests', result.stdout, result)
-        self.assertIn(
-            'Done running sweet/bananaflakes: pass', result.stdout, result)
-        self.assertIn('All tests succeeded', result.stdout, result)
+      self.assertIn(
+        'Done running sweet/bananaflakes default: pass', result.stdout, result)
+      self.assertIn('All tests succeeded', result.stdout, result)
       self.assertEqual(0, result.returncode, result)
-      self.check_cleaned_json_output('expected_test_results2.json', json_path)
+      self.maxDiff = None
+      self.check_cleaned_json_output(
+          'expected_test_results2.json', json_path, basedir)
 
   def testAutoDetect(self):
     """Fake a build with several auto-detected options.
@@ -347,7 +338,9 @@
           basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
           is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
           v8_enable_i18n_support=False, v8_target_cpu='x86',
-          v8_use_snapshot=False)
+          v8_use_snapshot=False, v8_enable_embedded_builtins=False,
+          v8_enable_verify_csa=False, v8_enable_lite_mode=False,
+          v8_enable_pointer_compression=False)
       result = run_tests(
           basedir,
           '--mode=Release',
@@ -371,10 +364,7 @@
       # TODO(machenbach): Test some more implications of the auto-detected
       # options, e.g. that the right env variables are set.
 
-  def testSkipsProc(self):
-    self.testSkips(infra_staging=True)
-
-  def testSkips(self, infra_staging=False):
+  def testSkips(self):
     """Test skipping tests in status file for a specific variant."""
     with temp_base() as basedir:
       result = run_tests(
@@ -383,19 +373,27 @@
           '--progress=verbose',
           '--variants=nooptimization',
           'sweet/strawberries',
-          infra_staging=infra_staging,
+          infra_staging=False,
       )
-      if not infra_staging:
-        self.assertIn('Running 0 tests', result.stdout, result)
-      else:
-        self.assertIn('Running 1 base tests', result.stdout, result)
-        self.assertIn('0 tests ran', result.stdout, result)
-      self.assertEqual(0, result.returncode, result)
+      self.assertIn('0 tests ran', result.stdout, result)
+      self.assertEqual(2, result.returncode, result)
 
-  def testDefaultProc(self):
-    self.testDefault(infra_staging=True)
+  def testRunSkips(self):
+    """Inverse the above. Test parameter to keep running skipped tests."""
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--mode=Release',
+          '--progress=verbose',
+          '--variants=nooptimization',
+          '--run-skipped',
+          'sweet/strawberries',
+      )
+      self.assertIn('1 tests failed', result.stdout, result)
+      self.assertIn('1 tests ran', result.stdout, result)
+      self.assertEqual(1, result.returncode, result)
 
-  def testDefault(self, infra_staging=False):
+  def testDefault(self):
     """Test using default test suites, though no tests are run since they don't
     exist in a test setting.
     """
@@ -403,28 +401,17 @@
       result = run_tests(
           basedir,
           '--mode=Release',
-          infra_staging=infra_staging,
+          infra_staging=False,
       )
-      if not infra_staging:
-        self.assertIn('Warning: no tests were run!', result.stdout, result)
-      else:
-        self.assertIn('Running 0 base tests', result.stdout, result)
-        self.assertIn('0 tests ran', result.stdout, result)
-      self.assertEqual(0, result.returncode, result)
+      self.assertIn('0 tests ran', result.stdout, result)
+      self.assertEqual(2, result.returncode, result)
 
   def testNoBuildConfig(self):
     """Test failing run when build config is not found."""
     with temp_base() as basedir:
       result = run_tests(basedir)
       self.assertIn('Failed to load build config', result.stdout, result)
-      self.assertEqual(1, result.returncode, result)
-
-  def testGNOption(self):
-    """Test using gn option, but no gn build folder is found."""
-    with temp_base() as basedir:
-      # TODO(machenbach): This should fail gracefully.
-      with self.assertRaises(OSError):
-        run_tests(basedir, '--gn')
+      self.assertEqual(5, result.returncode, result)
 
   def testInconsistentMode(self):
     """Test failing run when attempting to wrongly override the mode."""
@@ -433,7 +420,7 @@
       result = run_tests(basedir, '--mode=Release')
       self.assertIn('execution mode (release) for release is inconsistent '
                     'with build config (debug)', result.stdout, result)
-      self.assertEqual(1, result.returncode, result)
+      self.assertEqual(5, result.returncode, result)
 
   def testInconsistentArch(self):
     """Test failing run when attempting to wrongly override the arch."""
@@ -442,13 +429,13 @@
       self.assertIn(
           '--arch value (ia32) inconsistent with build config (x64).',
           result.stdout, result)
-      self.assertEqual(1, result.returncode, result)
+      self.assertEqual(5, result.returncode, result)
 
   def testWrongVariant(self):
     """Test using a bogus variant."""
     with temp_base() as basedir:
       result = run_tests(basedir, '--mode=Release', '--variants=meh')
-      self.assertEqual(1, result.returncode, result)
+      self.assertEqual(5, result.returncode, result)
 
   def testModeFromBuildConfig(self):
     """Test auto-detection of mode from build config."""
@@ -457,6 +444,7 @@
       self.assertIn('Running tests for x64.release', result.stdout, result)
       self.assertEqual(0, result.returncode, result)
 
+  @unittest.skip("not available with test processors")
   def testReport(self):
     """Test the report feature.
 
@@ -475,6 +463,7 @@
           result.stdout, result)
       self.assertEqual(1, result.returncode, result)
 
+  @unittest.skip("not available with test processors")
   def testWarnUnusedRules(self):
     """Test the unused-rules feature."""
     with temp_base() as basedir:
@@ -489,6 +478,7 @@
       self.assertIn( 'Unused rule: regress/', result.stdout, result)
       self.assertEqual(1, result.returncode, result)
 
+  @unittest.skip("not available with test processors")
   def testCatNoSources(self):
     """Test printing sources, but the suite's tests have none available."""
     with temp_base() as basedir:
@@ -503,10 +493,7 @@
       self.assertIn('(no source available)', result.stdout, result)
       self.assertEqual(0, result.returncode, result)
 
-  def testPredictableProc(self):
-    self.testPredictable(infra_staging=True)
-
-  def testPredictable(self, infra_staging=False):
+  def testPredictable(self):
     """Test running a test in verify-predictable mode.
 
     The test will fail because of missing allocation output. We verify that and
@@ -520,16 +507,13 @@
           '--progress=verbose',
           '--variants=default',
           'sweet/bananas',
-          infra_staging=infra_staging,
+          infra_staging=False,
       )
-      if not infra_staging:
-        self.assertIn('Running 1 tests', result.stdout, result)
-      else:
-        self.assertIn('Running 1 base tests', result.stdout, result)
-        self.assertIn('1 tests ran', result.stdout, result)
-      self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
+      self.assertIn('1 tests ran', result.stdout, result)
+      self.assertIn(
+        'Done running sweet/bananas default: FAIL', result.stdout, result)
       self.assertIn('Test had no allocation output', result.stdout, result)
-      self.assertIn('--predictable --verify_predictable', result.stdout, result)
+      self.assertIn('--predictable --verify-predictable', result.stdout, result)
       self.assertEqual(1, result.returncode, result)
 
   def testSlowArch(self):
@@ -557,8 +541,9 @@
           '--variants=default',
           '--random-seed-stress-count=2',
           'sweet/bananas',
+          infra_staging=False,
       )
-      self.assertIn('Running 2 tests', result.stdout, result)
+      self.assertIn('2 tests ran', result.stdout, result)
       self.assertEqual(0, result.returncode, result)
 
   def testRandomSeedStressWithSeed(self):
@@ -573,7 +558,7 @@
           '--random-seed=123',
           'sweet/strawberries',
       )
-      self.assertIn('Running 2 tests', result.stdout, result)
+      self.assertIn('2 tests ran', result.stdout, result)
       # We use a failing test so that the command is printed and we can verify
       # that the right random seed was passed.
       self.assertIn('--random-seed=123', result.stdout, result)
@@ -598,7 +583,7 @@
       )
       # Both tests are either marked as running in only default or only
       # slow variant.
-      self.assertIn('Running 2 tests', result.stdout, result)
+      self.assertIn('2 tests ran', result.stdout, result)
       self.assertEqual(0, result.returncode, result)
 
   def testStatusFilePresubmit(self):
@@ -608,10 +593,7 @@
       self.assertTrue(statusfile.PresubmitCheck(
           os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
 
-  def testDotsProgressProc(self):
-    self.testDotsProgress(infra_staging=True)
-
-  def testDotsProgress(self, infra_staging=False):
+  def testDotsProgress(self):
     with temp_base() as basedir:
       result = run_tests(
           basedir,
@@ -620,29 +602,19 @@
           'sweet/cherries',
           'sweet/bananas',
           '--no-sorting', '-j1', # make results order deterministic
-          infra_staging=infra_staging,
+          infra_staging=False,
       )
-      if not infra_staging:
-        self.assertIn('Running 2 tests', result.stdout, result)
-      else:
-        self.assertIn('Running 2 base tests', result.stdout, result)
-        self.assertIn('2 tests ran', result.stdout, result)
+      self.assertIn('2 tests ran', result.stdout, result)
       self.assertIn('F.', result.stdout, result)
       self.assertEqual(1, result.returncode, result)
 
-  def testMonoProgressProc(self):
-    self._testCompactProgress('mono', True)
-
   def testMonoProgress(self):
-    self._testCompactProgress('mono', False)
-
-  def testColorProgressProc(self):
-    self._testCompactProgress('color', True)
+    self._testCompactProgress('mono')
 
   def testColorProgress(self):
-    self._testCompactProgress('color', False)
+    self._testCompactProgress('color')
 
-  def _testCompactProgress(self, name, infra_staging):
+  def _testCompactProgress(self, name):
     with temp_base() as basedir:
       result = run_tests(
           basedir,
@@ -650,18 +622,51 @@
           '--progress=%s' % name,
           'sweet/cherries',
           'sweet/bananas',
-          infra_staging=infra_staging,
+          infra_staging=False,
       )
       if name == 'color':
-        expected = ('\033[34m% 100\033[0m|'
+        expected = ('\033[34m%  28\033[0m|'
                     '\033[32m+   1\033[0m|'
                     '\033[31m-   1\033[0m]: Done')
       else:
-        expected = '% 100|+   1|-   1]: Done'
+        expected = '%  28|+   1|-   1]: Done'
       self.assertIn(expected, result.stdout)
       self.assertIn('sweet/cherries', result.stdout)
       self.assertIn('sweet/bananas', result.stdout)
       self.assertEqual(1, result.returncode, result)
 
+  def testExitAfterNFailures(self):
+    with temp_base() as basedir:
+      result = run_tests(
+          basedir,
+          '--mode=Release',
+          '--progress=verbose',
+          '--exit-after-n-failures=2',
+          '-j1',
+          'sweet/mangoes',       # PASS
+          'sweet/strawberries',  # FAIL
+          'sweet/blackberries',  # FAIL
+          'sweet/raspberries',   # should not run
+      )
+      self.assertIn('sweet/mangoes default: pass', result.stdout, result)
+      self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
+      self.assertIn('Too many failures, exiting...', result.stdout, result)
+      self.assertIn('sweet/blackberries default: FAIL', result.stdout, result)
+      self.assertNotIn('Done running sweet/raspberries', result.stdout, result)
+      self.assertIn('2 tests failed', result.stdout, result)
+      self.assertIn('3 tests ran', result.stdout, result)
+      self.assertEqual(1, result.returncode, result)
+
+  def testNumFuzzer(self):
+    sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/Release']
+
+    with temp_base() as basedir:
+      with capture() as (stdout, stderr):
+        code = num_fuzzer.NumFuzzer(basedir=basedir).execute(sys_args)
+        result = Result(stdout.getvalue(), stderr.getvalue(), code)
+
+      self.assertEqual(0, result.returncode, result)
+
+
 if __name__ == '__main__':
   unittest.main()
diff --git a/src/v8/tools/unittests/testdata/d8_mocked1.py b/src/v8/tools/unittests/testdata/d8_mocked1.py
index 53405a6..ff330af 100644
--- a/src/v8/tools/unittests/testdata/d8_mocked1.py
+++ b/src/v8/tools/unittests/testdata/d8_mocked1.py
@@ -3,5 +3,8 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-print 'Richards: 1.2'
-print 'DeltaBlue: 2.1'
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print('Richards: 1.2')
+print('DeltaBlue: 2.1')
diff --git a/src/v8/tools/unittests/testdata/d8_mocked2.py b/src/v8/tools/unittests/testdata/d8_mocked2.py
index 71a3d04..3630462 100644
--- a/src/v8/tools/unittests/testdata/d8_mocked2.py
+++ b/src/v8/tools/unittests/testdata/d8_mocked2.py
@@ -3,8 +3,11 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-print 'Richards1: 1'
-print 'DeltaBlue1: 1'
-print 'Richards2: 0.2'
-print 'DeltaBlue2: 1.0'
-print 'DeltaBlue3: 0.1'
+# for py2/py3 compatibility
+from __future__ import print_function
+
+print('Richards1: 1')
+print('DeltaBlue1: 1')
+print('Richards2: 0.2')
+print('DeltaBlue2: 1.0')
+print('DeltaBlue3: 0.1')
diff --git a/src/v8/tools/unittests/testdata/expected_test_results1.json b/src/v8/tools/unittests/testdata/expected_test_results1.json
index 172b87a..31fac89 100644
--- a/src/v8/tools/unittests/testdata/expected_test_results1.json
+++ b/src/v8/tools/unittests/testdata/expected_test_results1.json
@@ -1,107 +1,124 @@
 {
-  "arch": "x64", 
-  "duration_mean": 1, 
-  "mode": "release", 
+  "arch": "x64",
+  "duration_mean": 1,
+  "mode": "release",
   "results": [
     {
-      "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", 
-      "duration": 1, 
-      "exit_code": 1, 
+      "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+      "duration": 1,
+      "exit_code": 1,
       "expected": [
         "PASS"
-      ], 
+      ],
       "flags": [
-        "--random-seed=123", 
-        "strawberries", 
-        "--nohard-abort"
-      ], 
-      "name": "sweet/strawberries", 
-      "random_seed": 123, 
-      "result": "FAIL", 
-      "run": 1, 
-      "stderr": "", 
-      "stdout": "--random-seed=123 strawberries --nohard-abort\n", 
-      "target_name": "d8_mocked.py", 
-      "variant": "default"
-    }, 
+        "--test",
+        "strawberries",
+        "--random-seed=123",
+        "--nohard-abort",
+	"--testing-d8-test-runner"
+      ],
+      "framework_name": "standard_runner",
+      "name": "sweet/strawberries",
+      "random_seed": 123,
+      "result": "FAIL",
+      "run": 1,
+      "stderr": "",
+      "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+      "target_name": "d8_mocked.py",
+      "variant": "default",
+      "variant_flags": []
+    },
     {
-      "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", 
-      "duration": 1, 
-      "exit_code": 1, 
+      "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+      "duration": 1,
+      "exit_code": 1,
       "expected": [
         "PASS"
-      ], 
+      ],
       "flags": [
-        "--random-seed=123", 
-        "strawberries", 
-        "--nohard-abort"
-      ], 
-      "name": "sweet/strawberries", 
-      "random_seed": 123, 
-      "result": "FAIL", 
-      "run": 2, 
-      "stderr": "", 
-      "stdout": "--random-seed=123 strawberries --nohard-abort\n", 
-      "target_name": "d8_mocked.py", 
-      "variant": "default"
-    }, 
+        "--test",
+        "strawberries",
+        "--random-seed=123",
+        "--nohard-abort",
+	"--testing-d8-test-runner"
+      ],
+      "framework_name": "standard_runner",
+      "name": "sweet/strawberries",
+      "random_seed": 123,
+      "result": "FAIL",
+      "run": 2,
+      "stderr": "",
+      "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+      "target_name": "d8_mocked.py",
+      "variant": "default",
+      "variant_flags": []
+    },
     {
-      "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", 
-      "duration": 1, 
-      "exit_code": 1, 
+      "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+      "duration": 1,
+      "exit_code": 1,
       "expected": [
         "PASS"
-      ], 
+      ],
       "flags": [
-        "--random-seed=123", 
-        "strawberries", 
-        "--nohard-abort"
-      ], 
-      "name": "sweet/strawberries", 
-      "random_seed": 123, 
-      "result": "FAIL", 
-      "run": 3, 
-      "stderr": "", 
-      "stdout": "--random-seed=123 strawberries --nohard-abort\n", 
-      "target_name": "d8_mocked.py", 
-      "variant": "default"
+        "--test",
+        "strawberries",
+        "--random-seed=123",
+        "--nohard-abort",
+	"--testing-d8-test-runner"
+      ],
+      "framework_name": "standard_runner",
+      "name": "sweet/strawberries",
+      "random_seed": 123,
+      "result": "FAIL",
+      "run": 3,
+      "stderr": "",
+      "stdout": "--test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+      "target_name": "d8_mocked.py",
+      "variant": "default",
+      "variant_flags": []
     }
-  ], 
+  ],
   "slowest_tests": [
     {
-      "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", 
-      "duration": 1, 
+      "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+      "duration": 1,
       "flags": [
-        "--random-seed=123", 
-        "strawberries", 
-        "--nohard-abort"
-      ], 
-      "marked_slow": true, 
+        "--test",
+        "strawberries",
+        "--random-seed=123",
+        "--nohard-abort",
+	"--testing-d8-test-runner"
+      ],
+      "marked_slow": true,
       "name": "sweet/strawberries"
-    }, 
+    },
     {
-      "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", 
-      "duration": 1, 
+      "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+      "duration": 1,
       "flags": [
-        "--random-seed=123", 
-        "strawberries", 
-        "--nohard-abort"
-      ], 
-      "marked_slow": true, 
+        "--test",
+        "strawberries",
+        "--random-seed=123",
+        "--nohard-abort",
+	"--testing-d8-test-runner"
+      ],
+      "marked_slow": true,
       "name": "sweet/strawberries"
-    }, 
+    },
     {
-      "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort", 
-      "duration": 1, 
+      "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",
+      "duration": 1,
       "flags": [
-        "--random-seed=123", 
-        "strawberries", 
-        "--nohard-abort"
-      ], 
-      "marked_slow": true, 
+        "--test",
+        "strawberries",
+        "--random-seed=123",
+        "--nohard-abort",
+	"--testing-d8-test-runner"
+      ],
+      "marked_slow": true,
       "name": "sweet/strawberries"
     }
-  ], 
+  ],
   "test_total": 3
 }
-
diff --git a/src/v8/tools/unittests/testdata/expected_test_results2.json b/src/v8/tools/unittests/testdata/expected_test_results2.json
index 7fcfe47..fd17972 100644
--- a/src/v8/tools/unittests/testdata/expected_test_results2.json
+++ b/src/v8/tools/unittests/testdata/expected_test_results2.json
@@ -1,74 +1,82 @@
 {
-  "arch": "x64", 
-  "duration_mean": 1, 
-  "mode": "release", 
+  "arch": "x64",
+  "duration_mean": 1,
+  "mode": "release",
   "results": [
     {
-      "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort", 
-      "duration": 1, 
-      "exit_code": 1, 
+      "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+      "duration": 1,
+      "exit_code": 1,
       "expected": [
         "PASS"
-      ], 
+      ],
       "flags": [
-        "--random-seed=123", 
-        "bananaflakes", 
-        "--nohard-abort"
-      ], 
-      "name": "sweet/bananaflakes", 
-      "random_seed": 123, 
-      "result": "FAIL", 
-      "run": 1, 
-      "stderr": "", 
-      "stdout": "--random-seed=123 bananaflakes --nohard-abort\n", 
-      "target_name": "d8_mocked.py", 
-      "variant": "default"
-    }, 
+        "bananaflakes",
+        "--random-seed=123",
+        "--nohard-abort",
+	"--testing-d8-test-runner"
+      ],
+      "framework_name": "standard_runner",
+      "name": "sweet/bananaflakes",
+      "random_seed": 123,
+      "result": "FAIL",
+      "run": 1,
+      "stderr": "",
+      "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+      "target_name": "d8_mocked.py",
+      "variant": "default",
+      "variant_flags": []
+    },
     {
-      "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort", 
-      "duration": 1, 
-      "exit_code": 0, 
+      "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+      "duration": 1,
+      "exit_code": 0,
       "expected": [
         "PASS"
-      ], 
+      ],
       "flags": [
-        "--random-seed=123", 
-        "bananaflakes", 
-        "--nohard-abort"
-      ], 
-      "name": "sweet/bananaflakes", 
-      "random_seed": 123, 
-      "result": "PASS", 
-      "run": 2, 
-      "stderr": "", 
-      "stdout": "--random-seed=123 bananaflakes --nohard-abort\n", 
-      "target_name": "d8_mocked.py", 
-      "variant": "default"
+        "bananaflakes",
+        "--random-seed=123",
+        "--nohard-abort",
+	"--testing-d8-test-runner"
+      ],
+      "framework_name": "standard_runner",
+      "name": "sweet/bananaflakes",
+      "random_seed": 123,
+      "result": "PASS",
+      "run": 2,
+      "stderr": "",
+      "stdout": "bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner\n",
+      "target_name": "d8_mocked.py",
+      "variant": "default",
+      "variant_flags": []
     }
-  ], 
+  ],
   "slowest_tests": [
     {
-      "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort", 
-      "duration": 1, 
+      "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+      "duration": 1,
       "flags": [
-        "--random-seed=123", 
-        "bananaflakes", 
-        "--nohard-abort"
-      ], 
-      "marked_slow": false, 
+        "bananaflakes",
+        "--random-seed=123",
+        "--nohard-abort",
+	"--testing-d8-test-runner"
+      ],
+      "marked_slow": false,
       "name": "sweet/bananaflakes"
-    }, 
+    },
     {
-      "command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 bananaflakes --nohard-abort", 
-      "duration": 1, 
+      "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",
+      "duration": 1,
       "flags": [
-        "--random-seed=123", 
-        "bananaflakes", 
-        "--nohard-abort"
-      ], 
-      "marked_slow": false, 
+        "bananaflakes",
+        "--random-seed=123",
+        "--nohard-abort",
+	"--testing-d8-test-runner"
+      ],
+      "marked_slow": false,
       "name": "sweet/bananaflakes"
     }
-  ], 
+  ],
   "test_total": 2
 }
diff --git a/src/v8/tools/unittests/testdata/predictable_mocked.py b/src/v8/tools/unittests/testdata/predictable_mocked.py
index cc332c2..b9e73f6 100644
--- a/src/v8/tools/unittests/testdata/predictable_mocked.py
+++ b/src/v8/tools/unittests/testdata/predictable_mocked.py
@@ -3,22 +3,25 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import sys
 
 assert len(sys.argv) == 3
 
 if sys.argv[1] == 'equal':
   # 1. Scenario: print equal allocation hashes.
-  print '### Allocations = 9497, hash = 0xc322c6b0'
+  print('### Allocations = 9497, hash = 0xc322c6b0')
 elif sys.argv[1] == 'differ':
   # 2. Scenario: print different allocation hashes. This prints a different
   # hash on the second run, based on the content of a semaphore file. This
   # file is expected to be empty in the beginning.
   with open(sys.argv[2]) as f:
     if f.read():
-      print '### Allocations = 9497, hash = 0xc322c6b0'
+      print('### Allocations = 9497, hash = 0xc322c6b0')
     else:
-      print '### Allocations = 9497, hash = 0xc322c6b1'
+      print('### Allocations = 9497, hash = 0xc322c6b1')
   with open(sys.argv[2], 'w') as f:
     f.write('something')
 else:
diff --git a/src/v8/tools/unittests/testdata/results_processor.py b/src/v8/tools/unittests/testdata/results_processor.py
index 69c23e3..d8c5ad9 100644
--- a/src/v8/tools/unittests/testdata/results_processor.py
+++ b/src/v8/tools/unittests/testdata/results_processor.py
@@ -7,6 +7,9 @@
 Fake results processor for testing that just sums some things up.
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import fileinput
 import re
 
@@ -21,5 +24,5 @@
   if match:
     deltablue += float(match.group(1))
 
-print 'Richards: %f' % richards
-print 'DeltaBlue: %f' % deltablue
+print('Richards: %f' % richards)
+print('DeltaBlue: %f' % deltablue)
diff --git a/src/v8/tools/unittests/testdata/test1.json b/src/v8/tools/unittests/testdata/test1.json
index 7fa1faa..939d6e2 100644
--- a/src/v8/tools/unittests/testdata/test1.json
+++ b/src/v8/tools/unittests/testdata/test1.json
@@ -1,5 +1,6 @@
 {
   "path": ["."],
+  "owners": ["username@chromium.org"],
   "flags": [],
   "main": "run.js",
   "run_count": 2,
diff --git a/src/v8/tools/unittests/testdata/test2.json b/src/v8/tools/unittests/testdata/test2.json
index 79fed26..632c4e5 100644
--- a/src/v8/tools/unittests/testdata/test2.json
+++ b/src/v8/tools/unittests/testdata/test2.json
@@ -1,5 +1,6 @@
 {
   "path": ["."],
+  "owners": ["username@chromium.org"],
   "flags": [],
   "main": "run.js",
   "run_count": 2,
diff --git a/src/v8/tools/unittests/testdata/test3.json b/src/v8/tools/unittests/testdata/test3.json
index 1b7ef96..3e871de 100644
--- a/src/v8/tools/unittests/testdata/test3.json
+++ b/src/v8/tools/unittests/testdata/test3.json
@@ -1,5 +1,6 @@
 {
   "path": ["."],
+  "owners": ["username@chromium.org"],
   "flags": [],
   "run_count": 1,
   "results_processor": "results_processor.py",
diff --git a/src/v8/tools/unittests/testdata/testroot1/d8_mocked.py b/src/v8/tools/unittests/testdata/testroot1/d8_mocked.py
index c7ca55a..d67e030 100644
--- a/src/v8/tools/unittests/testdata/testroot1/d8_mocked.py
+++ b/src/v8/tools/unittests/testdata/testroot1/d8_mocked.py
@@ -6,10 +6,13 @@
 Dummy d8 replacement. Just passes all test, except if 'berries' is in args.
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import sys
 
 args = ' '.join(sys.argv[1:])
-print args
+print(args)
 # Let all berries fail.
 if 'berries' in args:
   sys.exit(1)
diff --git a/src/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status b/src/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
index 7421463..d823cfd 100644
--- a/src/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
+++ b/src/v8/tools/unittests/testdata/testroot1/test/sweet/sweet.status
@@ -6,6 +6,7 @@
 [ALWAYS, {
   'raspberries': FAIL,
   'strawberries': [PASS, ['mode == release', SLOW], ['mode == debug', NO_VARIANTS]],
+  'mangoes': [PASS, SLOW],
 
   # Both cherries and apples are to test how PASS an FAIL from different
   # sections are merged.
diff --git a/src/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py b/src/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
index 115471a..a2dfc9d 100644
--- a/src/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
+++ b/src/v8/tools/unittests/testdata/testroot1/test/sweet/testcfg.py
@@ -9,23 +9,28 @@
 from testrunner.local import testsuite
 from testrunner.objects import testcase
 
+class TestLoader(testsuite.TestLoader):
+  def _list_test_filenames(self):
+    return [
+          'bananas', 'apples', 'cherries', 'mangoes', 'strawberries',
+          'blackberries', 'raspberries',
+    ]
+
+
 class TestSuite(testsuite.TestSuite):
-  def ListTests(self, context):
-    return map(
-        self._create_test,
-        ['bananas', 'apples', 'cherries', 'strawberries', 'raspberries'],
-    )
+  def _test_loader_class(self):
+    return TestLoader
 
   def _test_class(self):
     return TestCase
 
 
-class TestCase(testcase.TestCase):
+class TestCase(testcase.D8TestCase):
   def get_shell(self):
     return 'd8_mocked.py'
 
-  def _get_files_params(self, ctx):
+  def _get_files_params(self):
     return [self.name]
 
-def GetSuite(name, root):
-  return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+  return TestSuite(*args, **kwargs)
diff --git a/src/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/src/v8/tools/unittests/testdata/testroot1/v8_build_config.json
index c5e3ee3..0192fd8 100644
--- a/src/v8/tools/unittests/testdata/testroot1/v8_build_config.json
+++ b/src/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -1,10 +1,13 @@
 {
   "current_cpu": "x64",
   "dcheck_always_on": false,
+  "is_android": false,
   "is_asan": false,
   "is_cfi": false,
+  "is_clang": true,
   "is_component_build": false,
   "is_debug": false,
+  "is_full_debug": false,
   "is_gcov_coverage": false,
   "is_ubsan_vptr": false,
   "is_msan": false,
@@ -14,5 +17,9 @@
   "v8_enable_i18n_support": true,
   "v8_enable_verify_predictable": false,
   "v8_target_cpu": "x64",
-  "v8_use_snapshot": true
+  "v8_use_snapshot": true,
+  "v8_enable_embedded_builtins": false,
+  "v8_enable_verify_csa": false,
+  "v8_enable_lite_mode": false,
+  "v8_enable_pointer_compression": true
 }
diff --git a/src/v8/tools/unittests/testdata/testroot2/d8_mocked.py b/src/v8/tools/unittests/testdata/testroot2/d8_mocked.py
index e66e299..48d6bce 100644
--- a/src/v8/tools/unittests/testdata/testroot2/d8_mocked.py
+++ b/src/v8/tools/unittests/testdata/testroot2/d8_mocked.py
@@ -6,12 +6,15 @@
 Dummy d8 replacement for flaky tests.
 """
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
 import sys
 
 PATH = os.path.dirname(os.path.abspath(__file__))
 
-print ' '.join(sys.argv[1:])
+print(' '.join(sys.argv[1:]))
 
 # Test files ending in 'flakes' should first fail then pass. We store state in
 # a file side by side with the executable. No clean-up required as all tests
diff --git a/src/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py b/src/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
index 9407769..3606cd3 100644
--- a/src/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
+++ b/src/v8/tools/unittests/testdata/testroot2/test/sweet/testcfg.py
@@ -9,12 +9,15 @@
 from testrunner.local import testsuite
 from testrunner.objects import testcase
 
+
+class TestLoader(testsuite.TestLoader):
+  def _list_test_filenames(self):
+    return ['bananaflakes']
+
+
 class TestSuite(testsuite.TestSuite):
-  def ListTests(self, context):
-    return map(
-        self._create_test,
-        ['bananaflakes'],
-    )
+  def _test_loader_class(self):
+    return TestLoader
 
   def _test_class(self):
     return TestCase
@@ -24,8 +27,8 @@
   def get_shell(self):
     return 'd8_mocked.py'
 
-  def _get_files_params(self, ctx):
+  def _get_files_params(self):
     return [self.name]
 
-def GetSuite(name, root):
-  return TestSuite(name, root)
+def GetSuite(*args, **kwargs):
+  return TestSuite(*args, **kwargs)
diff --git a/src/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/src/v8/tools/unittests/testdata/testroot2/v8_build_config.json
index c5e3ee3..f19c310 100644
--- a/src/v8/tools/unittests/testdata/testroot2/v8_build_config.json
+++ b/src/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -1,10 +1,13 @@
 {
   "current_cpu": "x64",
   "dcheck_always_on": false,
+  "is_android": false,
   "is_asan": false,
   "is_cfi": false,
+  "is_clang": true,
   "is_component_build": false,
   "is_debug": false,
+  "is_full_debug": false,
   "is_gcov_coverage": false,
   "is_ubsan_vptr": false,
   "is_msan": false,
@@ -14,5 +17,9 @@
   "v8_enable_i18n_support": true,
   "v8_enable_verify_predictable": false,
   "v8_target_cpu": "x64",
-  "v8_use_snapshot": true
+  "v8_use_snapshot": true,
+  "v8_enable_embedded_builtins": false,
+  "v8_enable_verify_csa": false,
+  "v8_enable_lite_mode": false,
+  "v8_enable_pointer_compression": false
 }
diff --git a/src/v8/tools/unittests/v8_presubmit_test.py b/src/v8/tools/unittests/v8_presubmit_test.py
new file mode 100755
index 0000000..2c66d18
--- /dev/null
+++ b/src/v8/tools/unittests/v8_presubmit_test.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import tempfile
+import unittest
+
+# Configuring the path for the v8_presubmit module
+TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.append(TOOLS_ROOT)
+
+from v8_presubmit import FileContentsCache, CacheableSourceFileProcessor
+
+
+class FakeCachedProcessor(CacheableSourceFileProcessor):
+  def __init__(self, cache_file_path):
+    super(FakeCachedProcessor, self).__init__(
+      use_cache=True, cache_file_path=cache_file_path, file_type='.test')
+  def GetProcessorWorker(self):
+    return object
+  def GetProcessorScript(self):
+    return "echo", []
+  def DetectUnformattedFiles(_, cmd, worker, files):
+    raise NotImplementedError
+
+class FileContentsCacheTest(unittest.TestCase):
+  def setUp(self):
+    _, self.cache_file_path = tempfile.mkstemp()
+    cache = FileContentsCache(self.cache_file_path)
+    cache.Load()
+
+    def generate_file():
+      _, file_name = tempfile.mkstemp()
+      with open(file_name, "w") as f:
+        f.write(file_name)
+
+      return file_name
+
+    self.target_files = [generate_file() for _ in range(2)]
+    unchanged_files = cache.FilterUnchangedFiles(self.target_files)
+    self.assertEqual(len(unchanged_files), 2)
+    cache.Save()
+
+  def tearDown(self):
+    for file in [self.cache_file_path] + self.target_files:
+      os.remove(file)
+
+  def testCachesFiles(self):
+    cache = FileContentsCache(self.cache_file_path)
+    cache.Load()
+
+    changed_files = cache.FilterUnchangedFiles(self.target_files)
+    self.assertListEqual(changed_files, [])
+
+    modified_file = self.target_files[0]
+    with open(modified_file, "w") as f:
+      f.write("modification")
+
+    changed_files = cache.FilterUnchangedFiles(self.target_files)
+    self.assertListEqual(changed_files, [modified_file])
+
+  def testCacheableSourceFileProcessor(self):
+    class CachedProcessor(FakeCachedProcessor):
+      def DetectFilesToChange(_, files):
+        self.assertListEqual(files, [])
+        return []
+
+    cached_processor = CachedProcessor(cache_file_path=self.cache_file_path)
+    cached_processor.ProcessFiles(self.target_files)
+
+  def testCacheableSourceFileProcessorWithModifications(self):
+    modified_file = self.target_files[0]
+    with open(modified_file, "w") as f:
+      f.write("modification")
+
+    class CachedProcessor(FakeCachedProcessor):
+      def DetectFilesToChange(_, files):
+        self.assertListEqual(files, [modified_file])
+        return []
+
+    cached_processor = CachedProcessor(
+      cache_file_path=self.cache_file_path,
+    )
+    cached_processor.ProcessFiles(self.target_files)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/v8/tools/update-object-macros-undef.py b/src/v8/tools/update-object-macros-undef.py
new file mode 100755
index 0000000..866fdb6
--- /dev/null
+++ b/src/v8/tools/update-object-macros-undef.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+# vim:fenc=utf-8:shiftwidth=2:tabstop=2:softtabstop=2:extandtab
+
+"""
+Generate object-macros-undef.h from object-macros.h.
+"""
+
+import os.path
+import re
+import sys
+
+INPUT = 'src/objects/object-macros.h'
+OUTPUT = 'src/objects/object-macros-undef.h'
+HEADER = """// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Generate this file using the {} script.
+
+// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD
+
+""".format(os.path.basename(__file__))
+
+
+def main():
+  if not os.path.isfile(INPUT):
+    sys.exit("Input file {} does not exist; run this script in a v8 checkout."
+             .format(INPUT))
+  if not os.path.isfile(OUTPUT):
+    sys.exit("Output file {} does not exist; run this script in a v8 checkout."
+             .format(OUTPUT))
+  regexp = re.compile('^#define (\w+)')
+  seen = set()
+  with open(INPUT, 'r') as infile, open(OUTPUT, 'w') as outfile:
+    outfile.write(HEADER)
+    for line in infile:
+      match = regexp.match(line)
+      if match and match.group(1) not in seen:
+        seen.add(match.group(1))
+        outfile.write('#undef {}\n'.format(match.group(1)))
+
+if __name__ == "__main__":
+  main()
diff --git a/src/v8/tools/presubmit.py b/src/v8/tools/v8_presubmit.py
similarity index 71%
rename from src/v8/tools/presubmit.py
rename to src/v8/tools/v8_presubmit.py
index 9ac26dd..346fc9a 100755
--- a/src/v8/tools/presubmit.py
+++ b/src/v8/tools/v8_presubmit.py
@@ -27,10 +27,14 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
 try:
   import hashlib
   md5er = hashlib.md5
-except ImportError, e:
+except ImportError as e:
   import md5
   md5er = md5.new
 
@@ -52,6 +56,7 @@
 
 # Special LINT rules diverging from default and reason.
 # build/header_guard: Our guards have the form "V8_FOO_H_", not "SRC_FOO_H_".
+#   We now run our own header guard check in PRESUBMIT.py.
 # build/include_what_you_use: Started giving false positives for variables
 #   named "string" and "map" assuming that you needed to include STL headers.
 
@@ -59,7 +64,8 @@
 -build/header_guard
 -build/include_what_you_use
 -readability/fn_size
--runtime/references
+-readability/multiline_comment
+-whitespace/comments
 """.split()
 
 LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
@@ -81,7 +87,7 @@
       out_line = process.stderr.readline()
       if out_line == '' and process.poll() != None:
         if error_count == -1:
-          print "Failed to process %s" % command.pop()
+          print("Failed to process %s" % command.pop())
           return 1
         break
       m = LINT_OUTPUT_PATTERN.match(out_line)
@@ -97,6 +103,28 @@
           ' in your $PATH. Lint check skipped.')
     process.kill()
 
+def TorqueLintWorker(command):
+  try:
+    process = subprocess.Popen(command, stderr=subprocess.PIPE)
+    process.wait()
+    out_lines = ""
+    error_count = 0
+    while True:
+      out_line = process.stderr.readline()
+      if out_line == '' and process.poll() != None:
+        break
+      out_lines += out_line
+      error_count += 1
+    sys.stdout.write(out_lines)
+    if error_count != 0:
+        sys.stdout.write(
+          "warning: formatting and overwriting unformatted Torque files\n")
+    return error_count
+  except KeyboardInterrupt:
+    process.kill()
+  except:
+    print('Error running format-torque.py')
+    process.kill()
 
 class FileContentsCache(object):
 
@@ -204,17 +232,98 @@
     return result
 
 
-class CppLintProcessor(SourceFileProcessor):
+class CacheableSourceFileProcessor(SourceFileProcessor):
+  """Utility class that allows caching ProcessFiles() method calls.
+
+  In order to use it, create a ProcessFilesWithoutCaching method that returns
+  the files requiring intervention after processing the source files.
+  """
+
+  def __init__(self, use_cache, cache_file_path, file_type):
+    self.use_cache = use_cache
+    self.cache_file_path = cache_file_path
+    self.file_type = file_type
+
+  def GetProcessorWorker(self):
+    """Expected to return the worker function to run the formatter."""
+    raise NotImplementedError
+
+  def GetProcessorScript(self):
+    """Expected to return a tuple
+    (path to the format processor script, list of arguments)."""
+    raise NotImplementedError
+
+  def GetProcessorCommand(self):
+    format_processor, options = self.GetProcessorScript()
+    if not format_processor:
+      print('Could not find the formatter for % files' % self.file_type)
+      sys.exit(1)
+
+    command = [sys.executable, format_processor]
+    command.extend(options)
+
+    return command
+
+  def ProcessFiles(self, files):
+    if self.use_cache:
+      cache = FileContentsCache(self.cache_file_path)
+      cache.Load()
+      files = cache.FilterUnchangedFiles(files)
+
+    if len(files) == 0:
+      print('No changes in %s files detected. Skipping check' % self.file_type)
+      return True
+
+    files_requiring_changes = self.DetectFilesToChange(files)
+    print (
+      'Total %s files found that require formatting: %d' %
+      (self.file_type, len(files_requiring_changes)))
+    if self.use_cache:
+      for file in files_requiring_changes:
+        cache.RemoveFile(file)
+
+      cache.Save()
+
+    return files_requiring_changes == []
+
+  def DetectFilesToChange(self, files):
+    command = self.GetProcessorCommand()
+    worker = self.GetProcessorWorker()
+
+    commands = [command + [file] for file in files]
+    count = multiprocessing.cpu_count()
+    pool = multiprocessing.Pool(count)
+    try:
+      results = pool.map_async(worker, commands).get(timeout=240)
+    except KeyboardInterrupt:
+      print("\nCaught KeyboardInterrupt, terminating workers.")
+      pool.terminate()
+      pool.join()
+      sys.exit(1)
+
+    unformatted_files = []
+    for index, errors in enumerate(results):
+      if errors > 0:
+        unformatted_files.append(files[index])
+
+    return unformatted_files
+
+
+class CppLintProcessor(CacheableSourceFileProcessor):
   """
   Lint files to check that they follow the google code style.
   """
 
+  def __init__(self, use_cache=True):
+    super(CppLintProcessor, self).__init__(
+      use_cache=use_cache, cache_file_path='.cpplint-cache', file_type='C/C++')
+
   def IsRelevant(self, name):
     return name.endswith('.cc') or name.endswith('.h')
 
   def IgnoreDir(self, name):
     return (super(CppLintProcessor, self).IgnoreDir(name)
-              or (name == 'third_party'))
+            or (name == 'third_party'))
 
   IGNORE_LINT = ['export-template.h', 'flag-definitions.h']
 
@@ -227,50 +336,50 @@
     test_dirs = ['cctest', 'common', 'fuzzer', 'inspector', 'unittests']
     return dirs + [join('test', dir) for dir in test_dirs]
 
-  def GetCpplintScript(self, prio_path):
-    for path in [prio_path] + os.environ["PATH"].split(os.pathsep):
+  def GetProcessorWorker(self):
+    return CppLintWorker
+
+  def GetProcessorScript(self):
+    filters = ','.join([n for n in LINT_RULES])
+    arguments = ['--filter', filters]
+    for path in [TOOLS_PATH] + os.environ["PATH"].split(os.pathsep):
       path = path.strip('"')
-      cpplint = os.path.join(path, "cpplint.py")
+      cpplint = os.path.join(path, 'cpplint.py')
       if os.path.isfile(cpplint):
-        return cpplint
+        return cpplint, arguments
 
-    return None
+    return None, arguments
 
-  def ProcessFiles(self, files):
-    good_files_cache = FileContentsCache('.cpplint-cache')
-    good_files_cache.Load()
-    files = good_files_cache.FilterUnchangedFiles(files)
-    if len(files) == 0:
-      print 'No changes in files detected. Skipping cpplint check.'
-      return True
 
-    filters = ",".join([n for n in LINT_RULES])
-    cpplint = self.GetCpplintScript(TOOLS_PATH)
-    if cpplint is None:
-      print('Could not find cpplint.py. Make sure '
-            'depot_tools is installed and in the path.')
-      sys.exit(1)
+class TorqueLintProcessor(CacheableSourceFileProcessor):
+  """
+  Check .tq files to verify they follow the Torque style guide.
+  """
 
-    command = [sys.executable, cpplint, '--filter', filters]
+  def __init__(self, use_cache=True):
+    super(TorqueLintProcessor, self).__init__(
+      use_cache=use_cache, cache_file_path='.torquelint-cache',
+      file_type='Torque')
 
-    commands = [command + [file] for file in files]
-    count = multiprocessing.cpu_count()
-    pool = multiprocessing.Pool(count)
-    try:
-      results = pool.map_async(CppLintWorker, commands).get(999999)
-    except KeyboardInterrupt:
-      print "\nCaught KeyboardInterrupt, terminating workers."
-      sys.exit(1)
+  def IsRelevant(self, name):
+    return name.endswith('.tq')
 
-    for i in range(len(files)):
-      if results[i] > 0:
-        good_files_cache.RemoveFile(files[i])
+  def GetPathsToSearch(self):
+    dirs = ['third_party', 'src']
+    test_dirs = ['torque']
+    return dirs + [join('test', dir) for dir in test_dirs]
 
-    total_errors = sum(results)
-    print "Total errors found: %d" % total_errors
-    good_files_cache.Save()
-    return total_errors == 0
+  def GetProcessorWorker(self):
+    return TorqueLintWorker
 
+  def GetProcessorScript(self):
+    torque_tools = os.path.join(TOOLS_PATH, "torque")
+    torque_path = os.path.join(torque_tools, "format-torque.py")
+    arguments = ["-il"]
+    if os.path.isfile(torque_path):
+      return torque_path, arguments
+
+    return None, arguments
 
 COPYRIGHT_HEADER_PATTERN = re.compile(
     r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
@@ -280,8 +389,7 @@
   Check that all files include a copyright notice and no trailing whitespaces.
   """
 
-  RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c',
-                         '.status', '.gyp', '.gypi']
+  RELEVANT_EXTENSIONS = ['.js', '.cc', '.h', '.py', '.c', '.status', '.tq', '.g4']
 
   def __init__(self):
     self.runtime_function_call_pattern = self.CreateRuntimeFunctionCallMatcher()
@@ -295,7 +403,7 @@
         m = pattern.match(line)
         if m:
           runtime_functions.append(m.group(1))
-    if len(runtime_functions) < 500:
+    if len(runtime_functions) < 250:
       print ("Runtime functions list is suspiciously short. "
              "Consider updating the presubmit script.")
       sys.exit(1)
@@ -331,26 +439,19 @@
 
   def IgnoreDir(self, name):
     return (super(SourceProcessor, self).IgnoreDir(name) or
-            name in ('third_party', 'gyp', 'out', 'obj', 'DerivedSources'))
+            name in ('third_party', 'out', 'obj', 'DerivedSources'))
 
   IGNORE_COPYRIGHTS = ['box2d.js',
                        'cpplint.py',
-                       'check_injected_script_source.py',
                        'copy.js',
                        'corrections.js',
                        'crypto.js',
                        'daemon.py',
-                       'debugger-script.js',
                        'earley-boyer.js',
                        'fannkuch.js',
                        'fasta.js',
-                       'generate_protocol_externs.py',
                        'injected-script.cc',
                        'injected-script.h',
-                       'injected-script-source.js',
-                       'java-script-call-frame.cc',
-                       'java-script-call-frame.h',
-                       'jsmin.py',
                        'libraries.cc',
                        'libraries-empty.cc',
                        'lua_binarytrees.js',
@@ -361,14 +462,11 @@
                        'raytrace.js',
                        'regexp-pcre.js',
                        'resources-123.js',
-                       'rjsmin.py',
                        'sqlite.js',
                        'sqlite-change-heap.js',
                        'sqlite-pointer-masking.js',
                        'sqlite-safe-heap.js',
                        'v8-debugger-script.h',
-                       'v8-function-call.cc',
-                       'v8-function-call.h',
                        'v8-inspector-impl.cc',
                        'v8-inspector-impl.h',
                        'v8-runtime-agent-impl.cc',
@@ -377,7 +475,10 @@
                        'zlib.js']
   IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
 
-  IGNORE_COPYRIGHTS_DIRECTORY = "test/test262/local-tests"
+  IGNORE_COPYRIGHTS_DIRECTORIES = [
+      "test/test262/local-tests",
+      "test/mjsunit/wasm/bulk-memory-spec",
+  ]
 
   def EndOfDeclaration(self, line):
     return line == "}" or line == "};"
@@ -392,12 +493,13 @@
     base = basename(name)
     if not base in SourceProcessor.IGNORE_TABS:
       if '\t' in contents:
-        print "%s contains tabs" % name
+        print("%s contains tabs" % name)
         result = False
     if not base in SourceProcessor.IGNORE_COPYRIGHTS and \
-        not SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORY in name:
+        not any(ignore_dir in name for ignore_dir
+                in SourceProcessor.IGNORE_COPYRIGHTS_DIRECTORIES):
       if not COPYRIGHT_HEADER_PATTERN.search(contents):
-        print "%s is missing a correct copyright header." % name
+        print("%s is missing a correct copyright header." % name)
         result = False
     if ' \n' in contents or contents.endswith(' '):
       line = 0
@@ -410,34 +512,35 @@
         lines.append(str(line))
       linenumbers = ', '.join(lines)
       if len(lines) > 1:
-        print "%s has trailing whitespaces in lines %s." % (name, linenumbers)
+        print("%s has trailing whitespaces in lines %s." % (name, linenumbers))
       else:
-        print "%s has trailing whitespaces in line %s." % (name, linenumbers)
+        print("%s has trailing whitespaces in line %s." % (name, linenumbers))
       result = False
     if not contents.endswith('\n') or contents.endswith('\n\n'):
-      print "%s does not end with a single new line." % name
+      print("%s does not end with a single new line." % name)
       result = False
     # Sanitize flags for fuzzer.
-    if "mjsunit" in name or "debugger" in name:
+    if (".js" in name or ".mjs" in name) and ("mjsunit" in name or "debugger" in name):
       match = FLAGS_LINE.search(contents)
       if match:
-        print "%s Flags should use '-' (not '_')" % name
+        print("%s Flags should use '-' (not '_')" % name)
         result = False
-      if not "mjsunit/mjsunit.js" in name:
+      if (not "mjsunit/mjsunit.js" in name and
+          not "mjsunit/mjsunit_numfuzz.js" in name):
         if ASSERT_OPTIMIZED_PATTERN.search(contents) and \
             not FLAGS_ENABLE_OPT.search(contents):
-          print "%s Flag --opt should be set if " \
-                "assertOptimized() is used" % name
+          print("%s Flag --opt should be set if " \
+                "assertOptimized() is used" % name)
           result = False
         if ASSERT_UNOPTIMIZED_PATTERN.search(contents) and \
             not FLAGS_NO_ALWAYS_OPT.search(contents):
-          print "%s Flag --no-always-opt should be set if " \
-                "assertUnoptimized() is used" % name
+          print("%s Flag --no-always-opt should be set if " \
+                "assertUnoptimized() is used" % name)
           result = False
 
       match = self.runtime_function_call_pattern.search(contents)
       if match:
-        print "%s has unexpected spaces in a runtime call '%s'" % (name, match.group(1))
+        print("%s has unexpected spaces in a runtime call '%s'" % (name, match.group(1)))
         result = False
     return result
 
@@ -448,12 +551,12 @@
       try:
         handle = open(file)
         contents = handle.read()
-        if not self.ProcessContents(file, contents):
+        if len(contents) > 0 and not self.ProcessContents(file, contents):
           success = False
           violations += 1
       finally:
         handle.close()
-    print "Total violating files: %s" % violations
+    print("Total violating files: %s" % violations)
     return success
 
 def _CheckStatusFileForDuplicateKeys(filepath):
@@ -556,12 +659,16 @@
 def PyTests(workspace):
   result = True
   for script in [
+      join(workspace, 'tools', 'clusterfuzz', 'v8_foozzie_test.py'),
       join(workspace, 'tools', 'release', 'test_scripts.py'),
       join(workspace, 'tools', 'unittests', 'run_tests_test.py'),
+      join(workspace, 'tools', 'unittests', 'run_perf_test.py'),
+      join(workspace, 'tools', 'testrunner', 'testproc', 'variant_unittest.py'),
     ]:
-    print 'Running ' + script
+    print('Running ' + script)
     result &= subprocess.call(
         [sys.executable, script], stdout=subprocess.PIPE) == 0
+
   return result
 
 
@@ -569,6 +676,9 @@
   result = optparse.OptionParser()
   result.add_option('--no-lint', help="Do not run cpplint", default=False,
                     action="store_true")
+  result.add_option('--no-linter-cache', help="Do not cache linter results",
+                    default=False, action="store_true")
+
   return result
 
 
@@ -577,17 +687,22 @@
   parser = GetOptions()
   (options, args) = parser.parse_args()
   success = True
-  print "Running checkdeps..."
+  print("Running checkdeps...")
   success &= CheckDeps(workspace)
+  use_linter_cache = not options.no_linter_cache
   if not options.no_lint:
-    print "Running C++ lint check..."
-    success &= CppLintProcessor().RunOnPath(workspace)
-  print "Running copyright header, trailing whitespaces and " \
-        "two empty lines between declarations check..."
+    print("Running C++ lint check...")
+    success &= CppLintProcessor(use_cache=use_linter_cache).RunOnPath(workspace)
+
+  print("Running Torque formatting check...")
+  success &= TorqueLintProcessor(use_cache=use_linter_cache).RunOnPath(
+    workspace)
+  print("Running copyright header, trailing whitespaces and " \
+        "two empty lines between declarations check...")
   success &= SourceProcessor().RunOnPath(workspace)
-  print "Running status-files check..."
+  print("Running status-files check...")
   success &= StatusFilesProcessor().RunOnPath(workspace)
-  print "Running python tests..."
+  print("Running python tests...")
   success &= PyTests(workspace)
   if success:
     return 0
diff --git a/src/v8/tools/v8heapconst.py b/src/v8/tools/v8heapconst.py
index c96741a..c6c98c0 100644
--- a/src/v8/tools/v8heapconst.py
+++ b/src/v8/tools/v8heapconst.py
@@ -1,4 +1,4 @@
-# Copyright 2017 the V8 project authors. All rights reserved.
+# Copyright 2019 the V8 project authors. All rights reserved.
 # Use of this source code is governed by a BSD-style license that can
 # be found in the LICENSE file.
 
@@ -11,324 +11,418 @@
   2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
   8: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
   10: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
-  18: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
-  34: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE",
-  42: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
-  50: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
-  64: "STRING_TYPE",
-  65: "CONS_STRING_TYPE",
-  66: "EXTERNAL_STRING_TYPE",
-  67: "SLICED_STRING_TYPE",
-  69: "THIN_STRING_TYPE",
-  72: "ONE_BYTE_STRING_TYPE",
-  73: "CONS_ONE_BYTE_STRING_TYPE",
-  74: "EXTERNAL_ONE_BYTE_STRING_TYPE",
-  75: "SLICED_ONE_BYTE_STRING_TYPE",
-  77: "THIN_ONE_BYTE_STRING_TYPE",
-  82: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
-  98: "SHORT_EXTERNAL_STRING_TYPE",
-  106: "SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE",
-  114: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
-  128: "SYMBOL_TYPE",
-  129: "HEAP_NUMBER_TYPE",
-  130: "BIGINT_TYPE",
-  131: "ODDBALL_TYPE",
-  132: "MAP_TYPE",
-  133: "CODE_TYPE",
-  134: "MUTABLE_HEAP_NUMBER_TYPE",
-  135: "FOREIGN_TYPE",
-  136: "BYTE_ARRAY_TYPE",
-  137: "BYTECODE_ARRAY_TYPE",
-  138: "FREE_SPACE_TYPE",
-  139: "FIXED_INT8_ARRAY_TYPE",
-  140: "FIXED_UINT8_ARRAY_TYPE",
-  141: "FIXED_INT16_ARRAY_TYPE",
-  142: "FIXED_UINT16_ARRAY_TYPE",
-  143: "FIXED_INT32_ARRAY_TYPE",
-  144: "FIXED_UINT32_ARRAY_TYPE",
-  145: "FIXED_FLOAT32_ARRAY_TYPE",
-  146: "FIXED_FLOAT64_ARRAY_TYPE",
-  147: "FIXED_UINT8_CLAMPED_ARRAY_TYPE",
-  148: "FIXED_DOUBLE_ARRAY_TYPE",
-  149: "FILLER_TYPE",
-  150: "ACCESS_CHECK_INFO_TYPE",
-  151: "ACCESSOR_INFO_TYPE",
-  152: "ACCESSOR_PAIR_TYPE",
-  153: "ALIASED_ARGUMENTS_ENTRY_TYPE",
-  154: "ALLOCATION_MEMENTO_TYPE",
-  155: "ALLOCATION_SITE_TYPE",
-  156: "ASYNC_GENERATOR_REQUEST_TYPE",
-  157: "CONTEXT_EXTENSION_TYPE",
-  158: "DEBUG_INFO_TYPE",
-  159: "FUNCTION_TEMPLATE_INFO_TYPE",
-  160: "INTERCEPTOR_INFO_TYPE",
-  161: "MODULE_INFO_ENTRY_TYPE",
-  162: "MODULE_TYPE",
-  163: "OBJECT_TEMPLATE_INFO_TYPE",
-  164: "PROMISE_REACTION_JOB_INFO_TYPE",
-  165: "PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE",
-  166: "PROTOTYPE_INFO_TYPE",
-  167: "SCRIPT_TYPE",
-  168: "STACK_FRAME_INFO_TYPE",
-  169: "TUPLE2_TYPE",
-  170: "TUPLE3_TYPE",
-  171: "FIXED_ARRAY_TYPE",
-  172: "DESCRIPTOR_ARRAY_TYPE",
-  173: "HASH_TABLE_TYPE",
-  174: "TRANSITION_ARRAY_TYPE",
-  175: "CELL_TYPE",
-  176: "CODE_DATA_CONTAINER_TYPE",
-  177: "FEEDBACK_VECTOR_TYPE",
-  178: "LOAD_HANDLER_TYPE",
-  179: "PROPERTY_ARRAY_TYPE",
-  180: "PROPERTY_CELL_TYPE",
-  181: "SHARED_FUNCTION_INFO_TYPE",
-  182: "SMALL_ORDERED_HASH_MAP_TYPE",
-  183: "SMALL_ORDERED_HASH_SET_TYPE",
-  184: "STORE_HANDLER_TYPE",
-  185: "WEAK_CELL_TYPE",
+  18: "UNCACHED_EXTERNAL_INTERNALIZED_STRING_TYPE",
+  26: "UNCACHED_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
+  32: "STRING_TYPE",
+  33: "CONS_STRING_TYPE",
+  34: "EXTERNAL_STRING_TYPE",
+  35: "SLICED_STRING_TYPE",
+  37: "THIN_STRING_TYPE",
+  40: "ONE_BYTE_STRING_TYPE",
+  41: "CONS_ONE_BYTE_STRING_TYPE",
+  42: "EXTERNAL_ONE_BYTE_STRING_TYPE",
+  43: "SLICED_ONE_BYTE_STRING_TYPE",
+  45: "THIN_ONE_BYTE_STRING_TYPE",
+  50: "UNCACHED_EXTERNAL_STRING_TYPE",
+  58: "UNCACHED_EXTERNAL_ONE_BYTE_STRING_TYPE",
+  64: "SYMBOL_TYPE",
+  65: "HEAP_NUMBER_TYPE",
+  66: "BIGINT_TYPE",
+  67: "ODDBALL_TYPE",
+  68: "MAP_TYPE",
+  69: "CODE_TYPE",
+  70: "MUTABLE_HEAP_NUMBER_TYPE",
+  71: "FOREIGN_TYPE",
+  72: "BYTE_ARRAY_TYPE",
+  73: "BYTECODE_ARRAY_TYPE",
+  74: "FREE_SPACE_TYPE",
+  75: "FIXED_DOUBLE_ARRAY_TYPE",
+  76: "FEEDBACK_METADATA_TYPE",
+  77: "FILLER_TYPE",
+  78: "ACCESS_CHECK_INFO_TYPE",
+  79: "ACCESSOR_INFO_TYPE",
+  80: "ACCESSOR_PAIR_TYPE",
+  81: "ALIASED_ARGUMENTS_ENTRY_TYPE",
+  82: "ALLOCATION_MEMENTO_TYPE",
+  83: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
+  84: "ASM_WASM_DATA_TYPE",
+  85: "ASYNC_GENERATOR_REQUEST_TYPE",
+  86: "CLASS_POSITIONS_TYPE",
+  87: "DEBUG_INFO_TYPE",
+  88: "ENUM_CACHE_TYPE",
+  89: "FUNCTION_TEMPLATE_INFO_TYPE",
+  90: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+  91: "INTERCEPTOR_INFO_TYPE",
+  92: "INTERPRETER_DATA_TYPE",
+  93: "OBJECT_TEMPLATE_INFO_TYPE",
+  94: "PROMISE_CAPABILITY_TYPE",
+  95: "PROMISE_REACTION_TYPE",
+  96: "PROTOTYPE_INFO_TYPE",
+  97: "SCRIPT_TYPE",
+  98: "SOURCE_POSITION_TABLE_WITH_FRAME_CACHE_TYPE",
+  99: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+  100: "STACK_FRAME_INFO_TYPE",
+  101: "STACK_TRACE_FRAME_TYPE",
+  102: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+  103: "TUPLE2_TYPE",
+  104: "TUPLE3_TYPE",
+  105: "WASM_CAPI_FUNCTION_DATA_TYPE",
+  106: "WASM_DEBUG_INFO_TYPE",
+  107: "WASM_EXCEPTION_TAG_TYPE",
+  108: "WASM_EXPORTED_FUNCTION_DATA_TYPE",
+  109: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+  110: "WASM_JS_FUNCTION_DATA_TYPE",
+  111: "CALLABLE_TASK_TYPE",
+  112: "CALLBACK_TASK_TYPE",
+  113: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE",
+  114: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE",
+  115: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE",
+  116: "FINALIZATION_GROUP_CLEANUP_JOB_TASK_TYPE",
+  117: "INTERNAL_CLASS_TYPE",
+  118: "SMI_PAIR_TYPE",
+  119: "SMI_BOX_TYPE",
+  120: "SORT_STATE_TYPE",
+  121: "SOURCE_TEXT_MODULE_TYPE",
+  122: "SYNTHETIC_MODULE_TYPE",
+  123: "ALLOCATION_SITE_TYPE",
+  124: "EMBEDDER_DATA_ARRAY_TYPE",
+  125: "FIXED_ARRAY_TYPE",
+  126: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+  127: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+  128: "HASH_TABLE_TYPE",
+  129: "ORDERED_HASH_MAP_TYPE",
+  130: "ORDERED_HASH_SET_TYPE",
+  131: "ORDERED_NAME_DICTIONARY_TYPE",
+  132: "NAME_DICTIONARY_TYPE",
+  133: "GLOBAL_DICTIONARY_TYPE",
+  134: "NUMBER_DICTIONARY_TYPE",
+  135: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+  136: "STRING_TABLE_TYPE",
+  137: "EPHEMERON_HASH_TABLE_TYPE",
+  138: "SCOPE_INFO_TYPE",
+  139: "SCRIPT_CONTEXT_TABLE_TYPE",
+  140: "AWAIT_CONTEXT_TYPE",
+  141: "BLOCK_CONTEXT_TYPE",
+  142: "CATCH_CONTEXT_TYPE",
+  143: "DEBUG_EVALUATE_CONTEXT_TYPE",
+  144: "EVAL_CONTEXT_TYPE",
+  145: "FUNCTION_CONTEXT_TYPE",
+  146: "MODULE_CONTEXT_TYPE",
+  147: "NATIVE_CONTEXT_TYPE",
+  148: "SCRIPT_CONTEXT_TYPE",
+  149: "WITH_CONTEXT_TYPE",
+  150: "WEAK_FIXED_ARRAY_TYPE",
+  151: "TRANSITION_ARRAY_TYPE",
+  152: "CALL_HANDLER_INFO_TYPE",
+  153: "CELL_TYPE",
+  154: "CODE_DATA_CONTAINER_TYPE",
+  155: "DESCRIPTOR_ARRAY_TYPE",
+  156: "FEEDBACK_CELL_TYPE",
+  157: "FEEDBACK_VECTOR_TYPE",
+  158: "LOAD_HANDLER_TYPE",
+  159: "PREPARSE_DATA_TYPE",
+  160: "PROPERTY_ARRAY_TYPE",
+  161: "PROPERTY_CELL_TYPE",
+  162: "SHARED_FUNCTION_INFO_TYPE",
+  163: "SMALL_ORDERED_HASH_MAP_TYPE",
+  164: "SMALL_ORDERED_HASH_SET_TYPE",
+  165: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+  166: "STORE_HANDLER_TYPE",
+  167: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+  168: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+  169: "WEAK_ARRAY_LIST_TYPE",
+  170: "WEAK_CELL_TYPE",
   1024: "JS_PROXY_TYPE",
   1025: "JS_GLOBAL_OBJECT_TYPE",
   1026: "JS_GLOBAL_PROXY_TYPE",
   1027: "JS_MODULE_NAMESPACE_TYPE",
   1040: "JS_SPECIAL_API_OBJECT_TYPE",
-  1041: "JS_VALUE_TYPE",
+  1041: "JS_PRIMITIVE_WRAPPER_TYPE",
   1056: "JS_API_OBJECT_TYPE",
   1057: "JS_OBJECT_TYPE",
   1058: "JS_ARGUMENTS_TYPE",
   1059: "JS_ARRAY_BUFFER_TYPE",
-  1060: "JS_ARRAY_TYPE",
-  1061: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
-  1062: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
-  1063: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
-  1064: "JS_DATE_TYPE",
-  1065: "JS_ERROR_TYPE",
-  1066: "JS_GENERATOR_OBJECT_TYPE",
-  1067: "JS_MAP_TYPE",
-  1068: "JS_MAP_KEY_ITERATOR_TYPE",
-  1069: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
-  1070: "JS_MAP_VALUE_ITERATOR_TYPE",
-  1071: "JS_MESSAGE_OBJECT_TYPE",
-  1072: "JS_PROMISE_TYPE",
-  1073: "JS_REGEXP_TYPE",
-  1074: "JS_SET_TYPE",
-  1075: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
-  1076: "JS_SET_VALUE_ITERATOR_TYPE",
-  1077: "JS_STRING_ITERATOR_TYPE",
-  1078: "JS_WEAK_MAP_TYPE",
-  1079: "JS_WEAK_SET_TYPE",
-  1080: "JS_TYPED_ARRAY_TYPE",
-  1081: "JS_DATA_VIEW_TYPE",
-  1082: "JS_TYPED_ARRAY_KEY_ITERATOR_TYPE",
-  1083: "JS_FAST_ARRAY_KEY_ITERATOR_TYPE",
-  1084: "JS_GENERIC_ARRAY_KEY_ITERATOR_TYPE",
-  1085: "JS_UINT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1086: "JS_INT8_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1087: "JS_UINT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1088: "JS_INT16_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1089: "JS_UINT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1090: "JS_INT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1091: "JS_FLOAT32_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1092: "JS_FLOAT64_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1093: "JS_UINT8_CLAMPED_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1094: "JS_FAST_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1095: "JS_FAST_HOLEY_SMI_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1096: "JS_FAST_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1097: "JS_FAST_HOLEY_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1098: "JS_FAST_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1099: "JS_FAST_HOLEY_DOUBLE_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1100: "JS_GENERIC_ARRAY_KEY_VALUE_ITERATOR_TYPE",
-  1101: "JS_UINT8_ARRAY_VALUE_ITERATOR_TYPE",
-  1102: "JS_INT8_ARRAY_VALUE_ITERATOR_TYPE",
-  1103: "JS_UINT16_ARRAY_VALUE_ITERATOR_TYPE",
-  1104: "JS_INT16_ARRAY_VALUE_ITERATOR_TYPE",
-  1105: "JS_UINT32_ARRAY_VALUE_ITERATOR_TYPE",
-  1106: "JS_INT32_ARRAY_VALUE_ITERATOR_TYPE",
-  1107: "JS_FLOAT32_ARRAY_VALUE_ITERATOR_TYPE",
-  1108: "JS_FLOAT64_ARRAY_VALUE_ITERATOR_TYPE",
-  1109: "JS_UINT8_CLAMPED_ARRAY_VALUE_ITERATOR_TYPE",
-  1110: "JS_FAST_SMI_ARRAY_VALUE_ITERATOR_TYPE",
-  1111: "JS_FAST_HOLEY_SMI_ARRAY_VALUE_ITERATOR_TYPE",
-  1112: "JS_FAST_ARRAY_VALUE_ITERATOR_TYPE",
-  1113: "JS_FAST_HOLEY_ARRAY_VALUE_ITERATOR_TYPE",
-  1114: "JS_FAST_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
-  1115: "JS_FAST_HOLEY_DOUBLE_ARRAY_VALUE_ITERATOR_TYPE",
-  1116: "JS_GENERIC_ARRAY_VALUE_ITERATOR_TYPE",
-  1117: "WASM_INSTANCE_TYPE",
-  1118: "WASM_MEMORY_TYPE",
-  1119: "WASM_MODULE_TYPE",
-  1120: "WASM_TABLE_TYPE",
-  1121: "JS_BOUND_FUNCTION_TYPE",
-  1122: "JS_FUNCTION_TYPE",
+  1060: "JS_ARRAY_ITERATOR_TYPE",
+  1061: "JS_ARRAY_TYPE",
+  1062: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
+  1063: "JS_ASYNC_FUNCTION_OBJECT_TYPE",
+  1064: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
+  1065: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
+  1066: "JS_DATE_TYPE",
+  1067: "JS_ERROR_TYPE",
+  1068: "JS_GENERATOR_OBJECT_TYPE",
+  1069: "JS_MAP_TYPE",
+  1070: "JS_MAP_KEY_ITERATOR_TYPE",
+  1071: "JS_MAP_KEY_VALUE_ITERATOR_TYPE",
+  1072: "JS_MAP_VALUE_ITERATOR_TYPE",
+  1073: "JS_MESSAGE_OBJECT_TYPE",
+  1074: "JS_PROMISE_TYPE",
+  1075: "JS_REGEXP_TYPE",
+  1076: "JS_REGEXP_STRING_ITERATOR_TYPE",
+  1077: "JS_SET_TYPE",
+  1078: "JS_SET_KEY_VALUE_ITERATOR_TYPE",
+  1079: "JS_SET_VALUE_ITERATOR_TYPE",
+  1080: "JS_STRING_ITERATOR_TYPE",
+  1081: "JS_WEAK_REF_TYPE",
+  1082: "JS_FINALIZATION_GROUP_CLEANUP_ITERATOR_TYPE",
+  1083: "JS_FINALIZATION_GROUP_TYPE",
+  1084: "JS_WEAK_MAP_TYPE",
+  1085: "JS_WEAK_SET_TYPE",
+  1086: "JS_TYPED_ARRAY_TYPE",
+  1087: "JS_DATA_VIEW_TYPE",
+  1088: "JS_INTL_V8_BREAK_ITERATOR_TYPE",
+  1089: "JS_INTL_COLLATOR_TYPE",
+  1090: "JS_INTL_DATE_TIME_FORMAT_TYPE",
+  1091: "JS_INTL_LIST_FORMAT_TYPE",
+  1092: "JS_INTL_LOCALE_TYPE",
+  1093: "JS_INTL_NUMBER_FORMAT_TYPE",
+  1094: "JS_INTL_PLURAL_RULES_TYPE",
+  1095: "JS_INTL_RELATIVE_TIME_FORMAT_TYPE",
+  1096: "JS_INTL_SEGMENT_ITERATOR_TYPE",
+  1097: "JS_INTL_SEGMENTER_TYPE",
+  1098: "WASM_EXCEPTION_TYPE",
+  1099: "WASM_GLOBAL_TYPE",
+  1100: "WASM_INSTANCE_TYPE",
+  1101: "WASM_MEMORY_TYPE",
+  1102: "WASM_MODULE_TYPE",
+  1103: "WASM_TABLE_TYPE",
+  1104: "JS_BOUND_FUNCTION_TYPE",
+  1105: "JS_FUNCTION_TYPE",
 }
 
 # List of known V8 maps.
 KNOWN_MAPS = {
-  0x02201: (138, "FreeSpaceMap"),
-  0x02251: (132, "MetaMap"),
-  0x022a1: (131, "NullMap"),
-  0x022f1: (172, "DescriptorArrayMap"),
-  0x02341: (171, "FixedArrayMap"),
-  0x02391: (149, "OnePointerFillerMap"),
-  0x023e1: (149, "TwoPointerFillerMap"),
-  0x02431: (131, "UninitializedMap"),
-  0x02481: (8, "OneByteInternalizedStringMap"),
-  0x024d1: (131, "UndefinedMap"),
-  0x02521: (129, "HeapNumberMap"),
-  0x02571: (131, "TheHoleMap"),
-  0x025c1: (131, "BooleanMap"),
-  0x02611: (136, "ByteArrayMap"),
-  0x02661: (171, "FixedCOWArrayMap"),
-  0x026b1: (173, "HashTableMap"),
-  0x02701: (128, "SymbolMap"),
-  0x02751: (72, "OneByteStringMap"),
-  0x027a1: (171, "ScopeInfoMap"),
-  0x027f1: (181, "SharedFunctionInfoMap"),
-  0x02841: (133, "CodeMap"),
-  0x02891: (171, "FunctionContextMap"),
-  0x028e1: (175, "CellMap"),
-  0x02931: (185, "WeakCellMap"),
-  0x02981: (180, "GlobalPropertyCellMap"),
-  0x029d1: (135, "ForeignMap"),
-  0x02a21: (174, "TransitionArrayMap"),
-  0x02a71: (177, "FeedbackVectorMap"),
-  0x02ac1: (131, "ArgumentsMarkerMap"),
-  0x02b11: (131, "ExceptionMap"),
-  0x02b61: (131, "TerminationExceptionMap"),
-  0x02bb1: (131, "OptimizedOutMap"),
-  0x02c01: (131, "StaleRegisterMap"),
-  0x02c51: (171, "NativeContextMap"),
-  0x02ca1: (171, "ModuleContextMap"),
-  0x02cf1: (171, "EvalContextMap"),
-  0x02d41: (171, "ScriptContextMap"),
-  0x02d91: (171, "BlockContextMap"),
-  0x02de1: (171, "CatchContextMap"),
-  0x02e31: (171, "WithContextMap"),
-  0x02e81: (171, "DebugEvaluateContextMap"),
-  0x02ed1: (171, "ScriptContextTableMap"),
-  0x02f21: (171, "ArrayListMap"),
-  0x02f71: (148, "FixedDoubleArrayMap"),
-  0x02fc1: (134, "MutableHeapNumberMap"),
-  0x03011: (173, "OrderedHashMapMap"),
-  0x03061: (173, "OrderedHashSetMap"),
-  0x030b1: (173, "NameDictionaryMap"),
-  0x03101: (173, "GlobalDictionaryMap"),
-  0x03151: (173, "NumberDictionaryMap"),
-  0x031a1: (173, "StringTableMap"),
-  0x031f1: (173, "WeakHashTableMap"),
-  0x03241: (171, "SloppyArgumentsElementsMap"),
-  0x03291: (182, "SmallOrderedHashMapMap"),
-  0x032e1: (183, "SmallOrderedHashSetMap"),
-  0x03331: (176, "CodeDataContainerMap"),
-  0x03381: (1071, "JSMessageObjectMap"),
-  0x033d1: (1057, "ExternalMap"),
-  0x03421: (137, "BytecodeArrayMap"),
-  0x03471: (171, "ModuleInfoMap"),
-  0x034c1: (175, "NoClosuresCellMap"),
-  0x03511: (175, "OneClosureCellMap"),
-  0x03561: (175, "ManyClosuresCellMap"),
-  0x035b1: (179, "PropertyArrayMap"),
-  0x03601: (130, "BigIntMap"),
-  0x03651: (106, "NativeSourceStringMap"),
-  0x036a1: (64, "StringMap"),
-  0x036f1: (73, "ConsOneByteStringMap"),
-  0x03741: (65, "ConsStringMap"),
-  0x03791: (77, "ThinOneByteStringMap"),
-  0x037e1: (69, "ThinStringMap"),
-  0x03831: (67, "SlicedStringMap"),
-  0x03881: (75, "SlicedOneByteStringMap"),
-  0x038d1: (66, "ExternalStringMap"),
-  0x03921: (82, "ExternalStringWithOneByteDataMap"),
-  0x03971: (74, "ExternalOneByteStringMap"),
-  0x039c1: (98, "ShortExternalStringMap"),
-  0x03a11: (114, "ShortExternalStringWithOneByteDataMap"),
-  0x03a61: (0, "InternalizedStringMap"),
-  0x03ab1: (2, "ExternalInternalizedStringMap"),
-  0x03b01: (18, "ExternalInternalizedStringWithOneByteDataMap"),
-  0x03b51: (10, "ExternalOneByteInternalizedStringMap"),
-  0x03ba1: (34, "ShortExternalInternalizedStringMap"),
-  0x03bf1: (50, "ShortExternalInternalizedStringWithOneByteDataMap"),
-  0x03c41: (42, "ShortExternalOneByteInternalizedStringMap"),
-  0x03c91: (106, "ShortExternalOneByteStringMap"),
-  0x03ce1: (140, "FixedUint8ArrayMap"),
-  0x03d31: (139, "FixedInt8ArrayMap"),
-  0x03d81: (142, "FixedUint16ArrayMap"),
-  0x03dd1: (141, "FixedInt16ArrayMap"),
-  0x03e21: (144, "FixedUint32ArrayMap"),
-  0x03e71: (143, "FixedInt32ArrayMap"),
-  0x03ec1: (145, "FixedFloat32ArrayMap"),
-  0x03f11: (146, "FixedFloat64ArrayMap"),
-  0x03f61: (147, "FixedUint8ClampedArrayMap"),
-  0x03fb1: (169, "Tuple2Map"),
-  0x04001: (167, "ScriptMap"),
-  0x04051: (160, "InterceptorInfoMap"),
-  0x040a1: (151, "AccessorInfoMap"),
-  0x040f1: (150, "AccessCheckInfoMap"),
-  0x04141: (152, "AccessorPairMap"),
-  0x04191: (153, "AliasedArgumentsEntryMap"),
-  0x041e1: (154, "AllocationMementoMap"),
-  0x04231: (155, "AllocationSiteMap"),
-  0x04281: (156, "AsyncGeneratorRequestMap"),
-  0x042d1: (157, "ContextExtensionMap"),
-  0x04321: (158, "DebugInfoMap"),
-  0x04371: (159, "FunctionTemplateInfoMap"),
-  0x043c1: (161, "ModuleInfoEntryMap"),
-  0x04411: (162, "ModuleMap"),
-  0x04461: (163, "ObjectTemplateInfoMap"),
-  0x044b1: (164, "PromiseReactionJobInfoMap"),
-  0x04501: (165, "PromiseResolveThenableJobInfoMap"),
-  0x04551: (166, "PrototypeInfoMap"),
-  0x045a1: (168, "StackFrameInfoMap"),
-  0x045f1: (170, "Tuple3Map"),
+  ("read_only_space", 0x00111): (74, "FreeSpaceMap"),
+  ("read_only_space", 0x00161): (68, "MetaMap"),
+  ("read_only_space", 0x001e1): (67, "NullMap"),
+  ("read_only_space", 0x00249): (155, "DescriptorArrayMap"),
+  ("read_only_space", 0x002a9): (150, "WeakFixedArrayMap"),
+  ("read_only_space", 0x002f9): (77, "OnePointerFillerMap"),
+  ("read_only_space", 0x00349): (77, "TwoPointerFillerMap"),
+  ("read_only_space", 0x003c9): (67, "UninitializedMap"),
+  ("read_only_space", 0x00439): (8, "OneByteInternalizedStringMap"),
+  ("read_only_space", 0x004d9): (67, "UndefinedMap"),
+  ("read_only_space", 0x00539): (65, "HeapNumberMap"),
+  ("read_only_space", 0x005b9): (67, "TheHoleMap"),
+  ("read_only_space", 0x00661): (67, "BooleanMap"),
+  ("read_only_space", 0x00739): (72, "ByteArrayMap"),
+  ("read_only_space", 0x00789): (125, "FixedArrayMap"),
+  ("read_only_space", 0x007d9): (125, "FixedCOWArrayMap"),
+  ("read_only_space", 0x00829): (128, "HashTableMap"),
+  ("read_only_space", 0x00879): (64, "SymbolMap"),
+  ("read_only_space", 0x008c9): (40, "OneByteStringMap"),
+  ("read_only_space", 0x00919): (138, "ScopeInfoMap"),
+  ("read_only_space", 0x00969): (162, "SharedFunctionInfoMap"),
+  ("read_only_space", 0x009b9): (69, "CodeMap"),
+  ("read_only_space", 0x00a09): (145, "FunctionContextMap"),
+  ("read_only_space", 0x00a59): (153, "CellMap"),
+  ("read_only_space", 0x00aa9): (161, "GlobalPropertyCellMap"),
+  ("read_only_space", 0x00af9): (71, "ForeignMap"),
+  ("read_only_space", 0x00b49): (151, "TransitionArrayMap"),
+  ("read_only_space", 0x00b99): (157, "FeedbackVectorMap"),
+  ("read_only_space", 0x00c39): (67, "ArgumentsMarkerMap"),
+  ("read_only_space", 0x00cd9): (67, "ExceptionMap"),
+  ("read_only_space", 0x00d79): (67, "TerminationExceptionMap"),
+  ("read_only_space", 0x00e21): (67, "OptimizedOutMap"),
+  ("read_only_space", 0x00ec1): (67, "StaleRegisterMap"),
+  ("read_only_space", 0x00f31): (147, "NativeContextMap"),
+  ("read_only_space", 0x00f81): (146, "ModuleContextMap"),
+  ("read_only_space", 0x00fd1): (144, "EvalContextMap"),
+  ("read_only_space", 0x01021): (148, "ScriptContextMap"),
+  ("read_only_space", 0x01071): (140, "AwaitContextMap"),
+  ("read_only_space", 0x010c1): (141, "BlockContextMap"),
+  ("read_only_space", 0x01111): (142, "CatchContextMap"),
+  ("read_only_space", 0x01161): (149, "WithContextMap"),
+  ("read_only_space", 0x011b1): (143, "DebugEvaluateContextMap"),
+  ("read_only_space", 0x01201): (139, "ScriptContextTableMap"),
+  ("read_only_space", 0x01251): (127, "ClosureFeedbackCellArrayMap"),
+  ("read_only_space", 0x012a1): (76, "FeedbackMetadataArrayMap"),
+  ("read_only_space", 0x012f1): (125, "ArrayListMap"),
+  ("read_only_space", 0x01341): (66, "BigIntMap"),
+  ("read_only_space", 0x01391): (126, "ObjectBoilerplateDescriptionMap"),
+  ("read_only_space", 0x013e1): (73, "BytecodeArrayMap"),
+  ("read_only_space", 0x01431): (154, "CodeDataContainerMap"),
+  ("read_only_space", 0x01481): (75, "FixedDoubleArrayMap"),
+  ("read_only_space", 0x014d1): (133, "GlobalDictionaryMap"),
+  ("read_only_space", 0x01521): (156, "ManyClosuresCellMap"),
+  ("read_only_space", 0x01571): (125, "ModuleInfoMap"),
+  ("read_only_space", 0x015c1): (70, "MutableHeapNumberMap"),
+  ("read_only_space", 0x01611): (132, "NameDictionaryMap"),
+  ("read_only_space", 0x01661): (156, "NoClosuresCellMap"),
+  ("read_only_space", 0x016b1): (134, "NumberDictionaryMap"),
+  ("read_only_space", 0x01701): (156, "OneClosureCellMap"),
+  ("read_only_space", 0x01751): (129, "OrderedHashMapMap"),
+  ("read_only_space", 0x017a1): (130, "OrderedHashSetMap"),
+  ("read_only_space", 0x017f1): (131, "OrderedNameDictionaryMap"),
+  ("read_only_space", 0x01841): (159, "PreparseDataMap"),
+  ("read_only_space", 0x01891): (160, "PropertyArrayMap"),
+  ("read_only_space", 0x018e1): (152, "SideEffectCallHandlerInfoMap"),
+  ("read_only_space", 0x01931): (152, "SideEffectFreeCallHandlerInfoMap"),
+  ("read_only_space", 0x01981): (152, "NextCallSideEffectFreeCallHandlerInfoMap"),
+  ("read_only_space", 0x019d1): (135, "SimpleNumberDictionaryMap"),
+  ("read_only_space", 0x01a21): (125, "SloppyArgumentsElementsMap"),
+  ("read_only_space", 0x01a71): (163, "SmallOrderedHashMapMap"),
+  ("read_only_space", 0x01ac1): (164, "SmallOrderedHashSetMap"),
+  ("read_only_space", 0x01b11): (165, "SmallOrderedNameDictionaryMap"),
+  ("read_only_space", 0x01b61): (121, "SourceTextModuleMap"),
+  ("read_only_space", 0x01bb1): (136, "StringTableMap"),
+  ("read_only_space", 0x01c01): (122, "SyntheticModuleMap"),
+  ("read_only_space", 0x01c51): (167, "UncompiledDataWithoutPreparseDataMap"),
+  ("read_only_space", 0x01ca1): (168, "UncompiledDataWithPreparseDataMap"),
+  ("read_only_space", 0x01cf1): (169, "WeakArrayListMap"),
+  ("read_only_space", 0x01d41): (137, "EphemeronHashTableMap"),
+  ("read_only_space", 0x01d91): (124, "EmbedderDataArrayMap"),
+  ("read_only_space", 0x01de1): (170, "WeakCellMap"),
+  ("read_only_space", 0x01e31): (58, "NativeSourceStringMap"),
+  ("read_only_space", 0x01e81): (32, "StringMap"),
+  ("read_only_space", 0x01ed1): (41, "ConsOneByteStringMap"),
+  ("read_only_space", 0x01f21): (33, "ConsStringMap"),
+  ("read_only_space", 0x01f71): (45, "ThinOneByteStringMap"),
+  ("read_only_space", 0x01fc1): (37, "ThinStringMap"),
+  ("read_only_space", 0x02011): (35, "SlicedStringMap"),
+  ("read_only_space", 0x02061): (43, "SlicedOneByteStringMap"),
+  ("read_only_space", 0x020b1): (34, "ExternalStringMap"),
+  ("read_only_space", 0x02101): (42, "ExternalOneByteStringMap"),
+  ("read_only_space", 0x02151): (50, "UncachedExternalStringMap"),
+  ("read_only_space", 0x021a1): (0, "InternalizedStringMap"),
+  ("read_only_space", 0x021f1): (2, "ExternalInternalizedStringMap"),
+  ("read_only_space", 0x02241): (10, "ExternalOneByteInternalizedStringMap"),
+  ("read_only_space", 0x02291): (18, "UncachedExternalInternalizedStringMap"),
+  ("read_only_space", 0x022e1): (26, "UncachedExternalOneByteInternalizedStringMap"),
+  ("read_only_space", 0x02331): (58, "UncachedExternalOneByteStringMap"),
+  ("read_only_space", 0x02381): (67, "SelfReferenceMarkerMap"),
+  ("read_only_space", 0x023e9): (88, "EnumCacheMap"),
+  ("read_only_space", 0x02489): (83, "ArrayBoilerplateDescriptionMap"),
+  ("read_only_space", 0x02679): (91, "InterceptorInfoMap"),
+  ("read_only_space", 0x04e59): (78, "AccessCheckInfoMap"),
+  ("read_only_space", 0x04ea9): (79, "AccessorInfoMap"),
+  ("read_only_space", 0x04ef9): (80, "AccessorPairMap"),
+  ("read_only_space", 0x04f49): (81, "AliasedArgumentsEntryMap"),
+  ("read_only_space", 0x04f99): (82, "AllocationMementoMap"),
+  ("read_only_space", 0x04fe9): (84, "AsmWasmDataMap"),
+  ("read_only_space", 0x05039): (85, "AsyncGeneratorRequestMap"),
+  ("read_only_space", 0x05089): (86, "ClassPositionsMap"),
+  ("read_only_space", 0x050d9): (87, "DebugInfoMap"),
+  ("read_only_space", 0x05129): (89, "FunctionTemplateInfoMap"),
+  ("read_only_space", 0x05179): (90, "FunctionTemplateRareDataMap"),
+  ("read_only_space", 0x051c9): (92, "InterpreterDataMap"),
+  ("read_only_space", 0x05219): (93, "ObjectTemplateInfoMap"),
+  ("read_only_space", 0x05269): (94, "PromiseCapabilityMap"),
+  ("read_only_space", 0x052b9): (95, "PromiseReactionMap"),
+  ("read_only_space", 0x05309): (96, "PrototypeInfoMap"),
+  ("read_only_space", 0x05359): (97, "ScriptMap"),
+  ("read_only_space", 0x053a9): (98, "SourcePositionTableWithFrameCacheMap"),
+  ("read_only_space", 0x053f9): (99, "SourceTextModuleInfoEntryMap"),
+  ("read_only_space", 0x05449): (100, "StackFrameInfoMap"),
+  ("read_only_space", 0x05499): (101, "StackTraceFrameMap"),
+  ("read_only_space", 0x054e9): (102, "TemplateObjectDescriptionMap"),
+  ("read_only_space", 0x05539): (103, "Tuple2Map"),
+  ("read_only_space", 0x05589): (104, "Tuple3Map"),
+  ("read_only_space", 0x055d9): (105, "WasmCapiFunctionDataMap"),
+  ("read_only_space", 0x05629): (106, "WasmDebugInfoMap"),
+  ("read_only_space", 0x05679): (107, "WasmExceptionTagMap"),
+  ("read_only_space", 0x056c9): (108, "WasmExportedFunctionDataMap"),
+  ("read_only_space", 0x05719): (109, "WasmIndirectFunctionTableMap"),
+  ("read_only_space", 0x05769): (110, "WasmJSFunctionDataMap"),
+  ("read_only_space", 0x057b9): (111, "CallableTaskMap"),
+  ("read_only_space", 0x05809): (112, "CallbackTaskMap"),
+  ("read_only_space", 0x05859): (113, "PromiseFulfillReactionJobTaskMap"),
+  ("read_only_space", 0x058a9): (114, "PromiseRejectReactionJobTaskMap"),
+  ("read_only_space", 0x058f9): (115, "PromiseResolveThenableJobTaskMap"),
+  ("read_only_space", 0x05949): (116, "FinalizationGroupCleanupJobTaskMap"),
+  ("read_only_space", 0x05999): (117, "InternalClassMap"),
+  ("read_only_space", 0x059e9): (118, "SmiPairMap"),
+  ("read_only_space", 0x05a39): (119, "SmiBoxMap"),
+  ("read_only_space", 0x05a89): (120, "SortStateMap"),
+  ("read_only_space", 0x05ad9): (123, "AllocationSiteWithWeakNextMap"),
+  ("read_only_space", 0x05b29): (123, "AllocationSiteWithoutWeakNextMap"),
+  ("read_only_space", 0x05b79): (158, "LoadHandler1Map"),
+  ("read_only_space", 0x05bc9): (158, "LoadHandler2Map"),
+  ("read_only_space", 0x05c19): (158, "LoadHandler3Map"),
+  ("read_only_space", 0x05c69): (166, "StoreHandler0Map"),
+  ("read_only_space", 0x05cb9): (166, "StoreHandler1Map"),
+  ("read_only_space", 0x05d09): (166, "StoreHandler2Map"),
+  ("read_only_space", 0x05d59): (166, "StoreHandler3Map"),
+  ("map_space", 0x00111): (1057, "ExternalMap"),
+  ("map_space", 0x00161): (1073, "JSMessageObjectMap"),
 }
 
 # List of known V8 objects.
 KNOWN_OBJECTS = {
-  ("OLD_SPACE", 0x02201): "NullValue",
-  ("OLD_SPACE", 0x02231): "EmptyDescriptorArray",
-  ("OLD_SPACE", 0x02251): "EmptyFixedArray",
-  ("OLD_SPACE", 0x02261): "UninitializedValue",
-  ("OLD_SPACE", 0x022e1): "UndefinedValue",
-  ("OLD_SPACE", 0x02311): "NanValue",
-  ("OLD_SPACE", 0x02321): "TheHoleValue",
-  ("OLD_SPACE", 0x02371): "HoleNanValue",
-  ("OLD_SPACE", 0x02381): "TrueValue",
-  ("OLD_SPACE", 0x023f1): "FalseValue",
-  ("OLD_SPACE", 0x02441): "empty_string",
-  ("OLD_SPACE", 0x02459): "EmptyScopeInfo",
-  ("OLD_SPACE", 0x02469): "ArgumentsMarker",
-  ("OLD_SPACE", 0x024c1): "Exception",
-  ("OLD_SPACE", 0x02519): "TerminationException",
-  ("OLD_SPACE", 0x02579): "OptimizedOut",
-  ("OLD_SPACE", 0x025d1): "StaleRegister",
-  ("OLD_SPACE", 0x02651): "EmptyByteArray",
-  ("OLD_SPACE", 0x02661): "EmptyFixedUint8Array",
-  ("OLD_SPACE", 0x02681): "EmptyFixedInt8Array",
-  ("OLD_SPACE", 0x026a1): "EmptyFixedUint16Array",
-  ("OLD_SPACE", 0x026c1): "EmptyFixedInt16Array",
-  ("OLD_SPACE", 0x026e1): "EmptyFixedUint32Array",
-  ("OLD_SPACE", 0x02701): "EmptyFixedInt32Array",
-  ("OLD_SPACE", 0x02721): "EmptyFixedFloat32Array",
-  ("OLD_SPACE", 0x02741): "EmptyFixedFloat64Array",
-  ("OLD_SPACE", 0x02761): "EmptyFixedUint8ClampedArray",
-  ("OLD_SPACE", 0x02781): "EmptyScript",
-  ("OLD_SPACE", 0x02809): "UndefinedCell",
-  ("OLD_SPACE", 0x02819): "EmptySloppyArgumentsElements",
-  ("OLD_SPACE", 0x02839): "EmptySlowElementDictionary",
-  ("OLD_SPACE", 0x02881): "EmptyOrderedHashMap",
-  ("OLD_SPACE", 0x028a9): "EmptyOrderedHashSet",
-  ("OLD_SPACE", 0x028d1): "EmptyPropertyCell",
-  ("OLD_SPACE", 0x028f9): "EmptyWeakCell",
-  ("OLD_SPACE", 0x02969): "NoElementsProtector",
-  ("OLD_SPACE", 0x02991): "IsConcatSpreadableProtector",
-  ("OLD_SPACE", 0x029a1): "SpeciesProtector",
-  ("OLD_SPACE", 0x029c9): "StringLengthProtector",
-  ("OLD_SPACE", 0x029d9): "FastArrayIterationProtector",
-  ("OLD_SPACE", 0x029e9): "ArrayIteratorProtector",
-  ("OLD_SPACE", 0x02a11): "ArrayBufferNeuteringProtector",
-  ("OLD_SPACE", 0x02a39): "InfinityValue",
-  ("OLD_SPACE", 0x02a49): "MinusZeroValue",
-  ("OLD_SPACE", 0x02a59): "MinusInfinityValue",
+  ("read_only_space", 0x001b1): "NullValue",
+  ("read_only_space", 0x00231): "EmptyDescriptorArray",
+  ("read_only_space", 0x00299): "EmptyWeakFixedArray",
+  ("read_only_space", 0x00399): "UninitializedValue",
+  ("read_only_space", 0x004a9): "UndefinedValue",
+  ("read_only_space", 0x00529): "NanValue",
+  ("read_only_space", 0x00589): "TheHoleValue",
+  ("read_only_space", 0x00621): "HoleNanValue",
+  ("read_only_space", 0x00631): "TrueValue",
+  ("read_only_space", 0x006e1): "FalseValue",
+  ("read_only_space", 0x00729): "empty_string",
+  ("read_only_space", 0x00be9): "EmptyScopeInfo",
+  ("read_only_space", 0x00bf9): "EmptyFixedArray",
+  ("read_only_space", 0x00c09): "ArgumentsMarker",
+  ("read_only_space", 0x00ca9): "Exception",
+  ("read_only_space", 0x00d49): "TerminationException",
+  ("read_only_space", 0x00df1): "OptimizedOut",
+  ("read_only_space", 0x00e91): "StaleRegister",
+  ("read_only_space", 0x023d1): "EmptyEnumCache",
+  ("read_only_space", 0x02439): "EmptyPropertyArray",
+  ("read_only_space", 0x02449): "EmptyByteArray",
+  ("read_only_space", 0x02459): "EmptyObjectBoilerplateDescription",
+  ("read_only_space", 0x02471): "EmptyArrayBoilerplateDescription",
+  ("read_only_space", 0x024d9): "EmptyClosureFeedbackCellArray",
+  ("read_only_space", 0x024e9): "EmptySloppyArgumentsElements",
+  ("read_only_space", 0x02509): "EmptySlowElementDictionary",
+  ("read_only_space", 0x02551): "EmptyOrderedHashMap",
+  ("read_only_space", 0x02579): "EmptyOrderedHashSet",
+  ("read_only_space", 0x025a1): "EmptyFeedbackMetadata",
+  ("read_only_space", 0x025b1): "EmptyPropertyCell",
+  ("read_only_space", 0x025d9): "EmptyPropertyDictionary",
+  ("read_only_space", 0x02629): "NoOpInterceptorInfo",
+  ("read_only_space", 0x026c9): "EmptyWeakArrayList",
+  ("read_only_space", 0x026e1): "InfinityValue",
+  ("read_only_space", 0x026f1): "MinusZeroValue",
+  ("read_only_space", 0x02701): "MinusInfinityValue",
+  ("read_only_space", 0x02711): "SelfReferenceMarker",
+  ("read_only_space", 0x02769): "OffHeapTrampolineRelocationInfo",
+  ("read_only_space", 0x02781): "TrampolineTrivialCodeDataContainer",
+  ("read_only_space", 0x02799): "TrampolinePromiseRejectionCodeDataContainer",
+  ("read_only_space", 0x027b1): "HashSeed",
+  ("old_space", 0x00111): "ArgumentsIteratorAccessor",
+  ("old_space", 0x00181): "ArrayLengthAccessor",
+  ("old_space", 0x001f1): "BoundFunctionLengthAccessor",
+  ("old_space", 0x00261): "BoundFunctionNameAccessor",
+  ("old_space", 0x002d1): "ErrorStackAccessor",
+  ("old_space", 0x00341): "FunctionArgumentsAccessor",
+  ("old_space", 0x003b1): "FunctionCallerAccessor",
+  ("old_space", 0x00421): "FunctionNameAccessor",
+  ("old_space", 0x00491): "FunctionLengthAccessor",
+  ("old_space", 0x00501): "FunctionPrototypeAccessor",
+  ("old_space", 0x00571): "StringLengthAccessor",
+  ("old_space", 0x005e1): "InvalidPrototypeValidityCell",
+  ("old_space", 0x005f1): "EmptyScript",
+  ("old_space", 0x00671): "ManyClosuresCell",
+  ("old_space", 0x00689): "ArrayConstructorProtector",
+  ("old_space", 0x00699): "NoElementsProtector",
+  ("old_space", 0x006c1): "IsConcatSpreadableProtector",
+  ("old_space", 0x006d1): "ArraySpeciesProtector",
+  ("old_space", 0x006f9): "TypedArraySpeciesProtector",
+  ("old_space", 0x00721): "PromiseSpeciesProtector",
+  ("old_space", 0x00749): "StringLengthProtector",
+  ("old_space", 0x00759): "ArrayIteratorProtector",
+  ("old_space", 0x00781): "ArrayBufferDetachingProtector",
+  ("old_space", 0x007a9): "PromiseHookProtector",
+  ("old_space", 0x007d1): "PromiseResolveProtector",
+  ("old_space", 0x007e1): "MapIteratorProtector",
+  ("old_space", 0x00809): "PromiseThenProtector",
+  ("old_space", 0x00831): "SetIteratorProtector",
+  ("old_space", 0x00859): "StringIteratorProtector",
+  ("old_space", 0x00881): "SingleCharacterStringCache",
+  ("old_space", 0x01091): "StringSplitCache",
+  ("old_space", 0x018a1): "RegExpMultipleCache",
+  ("old_space", 0x020b1): "BuiltinsConstantsTable",
 }
 
 # List of known V8 Frame Markers.
@@ -339,14 +433,16 @@
   "OPTIMIZED",
   "WASM_COMPILED",
   "WASM_TO_JS",
-  "WASM_TO_WASM",
   "JS_TO_WASM",
   "WASM_INTERPRETER_ENTRY",
   "C_WASM_ENTRY",
+  "WASM_EXIT",
+  "WASM_COMPILE_LAZY",
   "INTERPRETED",
   "STUB",
   "BUILTIN_CONTINUATION",
   "JAVA_SCRIPT_BUILTIN_CONTINUATION",
+  "JAVA_SCRIPT_BUILTIN_CONTINUATION_WITH_CATCH",
   "INTERNAL",
   "CONSTRUCT",
   "ARGUMENTS_ADAPTOR",
diff --git a/src/v8/tools/valgrind/asan/dummy b/src/v8/tools/valgrind/asan/dummy
new file mode 100644
index 0000000..0e89814
--- /dev/null
+++ b/src/v8/tools/valgrind/asan/dummy
@@ -0,0 +1,2 @@
+# src/base has some more tools in this folder, which we don't use. But we need
+# to have the folder so that the data deps we inherit doesn't error out.
\ No newline at end of file
diff --git a/src/v8/tools/vim/ninja-build.vim b/src/v8/tools/vim/ninja-build.vim
index 3e9b894..7c88525 100644
--- a/src/v8/tools/vim/ninja-build.vim
+++ b/src/v8/tools/vim/ninja-build.vim
@@ -53,11 +53,8 @@
 
 
 def compute_ninja_command_for_targets(targets='', configuration=None):
-  flags = []
-  if "use_goma=1" in os.getenv('GYP_DEFINES', '').split(' '):
-    flags = ['-j', '512']
   build_dir = path_to_build_dir(configuration);
-  build_cmd = ' '.join(['ninja'] + flags + ['-C', build_dir, targets])
+  build_cmd = ' '.join(['autoninja', '-C', build_dir, targets])
   vim.command('return "%s"' % build_cmd)
 
 
diff --git a/src/v8/tools/wasm-compilation-hints/inject-compilation-hints.py b/src/v8/tools/wasm-compilation-hints/inject-compilation-hints.py
new file mode 100755
index 0000000..fd4b65b
--- /dev/null
+++ b/src/v8/tools/wasm-compilation-hints/inject-compilation-hints.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import argparse
+import io
+import sys
+
+from wasm import *
+
+FUNCTION_SECTION_ID = 3
+
+def parse_args():
+  parser = argparse.ArgumentParser(\
+      description="Inject compilation hints into a Wasm module.")
+  parser.add_argument("-i", "--in-wasm-file", \
+      type=str, \
+      help="original wasm module")
+  parser.add_argument("-o", "--out-wasm-file", \
+      type=str, \
+      help="wasm module with injected hints")
+  parser.add_argument("-x", "--hints-file", \
+      type=str, required=True, \
+      help="binary hints file to be injected as a custom section " + \
+          "'compilationHints'")
+  return parser.parse_args()
+
+if __name__ == "__main__":
+  args = parse_args()
+  in_wasm_file = args.in_wasm_file if args.in_wasm_file else sys.stdin.fileno()
+  out_wasm_file = args.out_wasm_file if args.out_wasm_file else sys.stdout.fileno()
+  hints_bs = open(args.hints_file, "rb").read()
+  with io.open(in_wasm_file, "rb") as fin:
+    with io.open(out_wasm_file, "wb") as fout:
+      magic_number, bs = read_magic_number(fin);
+      fout.write(bs)
+      version, bs = read_version(fin);
+      fout.write(bs)
+      num_declared_functions = None
+      while True:
+        id, bs = read_varuintN(fin)
+        fout.write(bs)
+        if id == None:
+          break
+        payload_length, bs = read_varuintN(fin)
+        fout.write(bs)
+
+        # Peek into function section for upcoming validity check.
+        if id == FUNCTION_SECTION_ID:
+          num_declared_functions, bs = peek_varuintN(fin)
+
+        bs = fin.read(payload_length)
+        fout.write(bs)
+
+        # Instert hint section after function section.
+        if id == FUNCTION_SECTION_ID:
+          assert len(hints_bs) == num_declared_functions, "unexpected number of hints"
+          write_compilation_hints_section(fout, hints_bs)
diff --git a/src/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py b/src/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py
new file mode 100755
index 0000000..a762bd7
--- /dev/null
+++ b/src/v8/tools/wasm-compilation-hints/wasm-objdump-compilation-hints.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import argparse
+import io
+import sys
+
+from wasm import *
+
+def parse_args():
+  parser = argparse.ArgumentParser(\
+      description="Read compilation hints from Wasm module.")
+  parser.add_argument("in_wasm_file", \
+      type=str, \
+      help="wasm module")
+  return parser.parse_args()
+
+if __name__ == "__main__":
+  args = parse_args()
+  in_wasm_file = args.in_wasm_file if args.in_wasm_file else sys.stdin.fileno()
+  with io.open(in_wasm_file, "rb") as fin:
+    read_magic_number(fin);
+    read_version(fin);
+    while True:
+      id, bs = read_varuintN(fin)
+      if id == None:
+        break
+      payload_length, bs = read_varuintN(fin)
+      if id == CUSTOM_SECTION_ID:
+        section_name_length, section_name_length_bs = read_varuintN(fin)
+        section_name_bs = fin.read(section_name_length)
+        if section_name_bs == "compilationHints":
+          num_hints, bs = read_varuintN(fin)
+          print "Custom section compilationHints with", num_hints, "hints:"
+          for i in range(num_hints):
+            hint, bs = read_uint8(fin)
+            print i, hex(hint)
+        else:
+          remaining_length = payload_length \
+              - len(section_name_length_bs) \
+              - len(section_name_bs)
+          fin.read()
+      else:
+        fin.read(payload_length)
diff --git a/src/v8/tools/wasm-compilation-hints/wasm.py b/src/v8/tools/wasm-compilation-hints/wasm.py
new file mode 100644
index 0000000..ae3d084
--- /dev/null
+++ b/src/v8/tools/wasm-compilation-hints/wasm.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be found
+# in the LICENSE file.
+
+import io
+import math
+import struct
+import sys
+
+CUSTOM_SECTION_ID = 0
+FUNCTION_SECTION_ID = 3
+
+def peek_uint8(fin):
+  bs = fin.peek(1)[:1]
+  if len(bs) != 1:
+    return None, bs
+  return ord(bs[0]), bs
+
+def read_uint8(fin):
+  value, bs = peek_uint8(fin)
+  fin.read(len(bs))
+  return value, bs
+
+def peek_uint32(fin):
+  bs = fin.peek(4)[:4]
+  if len(bs) != 4:
+    return None, bs
+  return ord(bs[0]) | ord(bs[1]) << 8 | ord(bs[2]) << 16 | ord(bs[3]) << 24, bs
+
+def read_uint32(fin):
+  value, bs = peek_uint32(fin)
+  fin.read(len(bs))
+  return value, bs
+
+def peek_varuintN(fin):
+  value = 0
+  shift = 0
+  n = 1
+  while True:
+    bs = fin.peek(n)[:n]
+    if len(bs) < n:
+      return None, bs
+    b = ord(bs[-1])
+    value |= (b & 0x7F) << shift;
+    if (b & 0x80) == 0x00:
+      return value, bs
+    shift += 7;
+    n += 1
+
+def read_varuintN(fin):
+  value, bs = peek_varuintN(fin)
+  fin.read(len(bs))
+  return value, bs
+
+def to_varuintN(value):
+  bs = ""
+  while True:
+    b = value & 0x7F
+    value >>= 7
+    if (value != 0x00):
+      b |= 0x80
+    bs += chr(b)
+    if value == 0x00:
+      return bs
+
+def write_varuintN(value, fout):
+  bs = to_varuintN(value)
+  fout.write(bs)
+  return bs
+
+def peek_magic_number(fin, expected_magic_number=0x6d736100):
+  magic_number, bs = peek_uint32(fin)
+  assert magic_number == expected_magic_number, "unexpected magic number"
+  return magic_number, bs
+
+def read_magic_number(fin, expected_magic_number=0x6d736100):
+  magic_number, bs = peek_magic_number(fin, expected_magic_number)
+  fin.read(len(bs))
+  return magic_number, bs
+
+def peek_version(fin, expected_version=1):
+  version, bs = peek_uint32(fin)
+  assert version == expected_version, "unexpected version"
+  return version, bs
+
+def read_version(fin, expected_version=1):
+  version, bs = peek_version(fin, expected_version)
+  fin.read(len(bs))
+  return version, bs
+
+def write_custom_section(fout, section_name_bs, payload_bs):
+  section_name_length_bs = to_varuintN(len(section_name_bs))
+  payload_length_bs = to_varuintN(len(section_name_bs) \
+      + len(section_name_length_bs) + len(payload_bs))
+  section_id_bs = to_varuintN(CUSTOM_SECTION_ID)
+  fout.write(section_id_bs)
+  fout.write(payload_length_bs)
+  fout.write(section_name_length_bs)
+  fout.write(section_name_bs)
+  fout.write(payload_bs)
+
+def write_compilation_hints_section(fout, hints_bs):
+  num_compilation_hints_bs = to_varuintN(len(hints_bs))
+  section_name_bs = b"compilationHints"
+  payload_bs = num_compilation_hints_bs + hints_bs
+  write_custom_section(fout, section_name_bs, payload_bs)
diff --git a/src/v8/tools/wasm/update-wasm-spec-tests.sh b/src/v8/tools/wasm/update-wasm-spec-tests.sh
index c4d18a3..d029ffe 100755
--- a/src/v8/tools/wasm/update-wasm-spec-tests.sh
+++ b/src/v8/tools/wasm/update-wasm-spec-tests.sh
@@ -13,33 +13,101 @@
 # non-zero status, or zero if no command exited with a non-zero status
 set -o pipefail
 
+log_and_run() {
+  echo ">>" $*
+  if ! $*; then
+    echo "sub-command failed: $*"
+    exit
+  fi
+}
+
+###############################################################################
+# Setup directories.
+###############################################################################
+
 TOOLS_WASM_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 V8_DIR="${TOOLS_WASM_DIR}/../.."
 SPEC_TEST_DIR=${V8_DIR}/test/wasm-spec-tests
+TMP_DIR=${SPEC_TEST_DIR}/tmp
 
-cd ${V8_DIR}
+log_and_run cd ${V8_DIR}
 
-rm -rf ${SPEC_TEST_DIR}/tests
-mkdir ${SPEC_TEST_DIR}/tests
+log_and_run rm -rf ${SPEC_TEST_DIR}/tests
+log_and_run mkdir ${SPEC_TEST_DIR}/tests
 
-rm -rf ${SPEC_TEST_DIR}/tmp
-mkdir ${SPEC_TEST_DIR}/tmp
+log_and_run mkdir ${SPEC_TEST_DIR}/tests/proposals
 
-./tools/dev/gm.py x64.release d8
+log_and_run rm -rf ${TMP_DIR}
+log_and_run mkdir ${TMP_DIR}
 
-cd ${V8_DIR}/test/wasm-js/interpreter
-make clean all
+###############################################################################
+# Generate the spec tests.
+###############################################################################
 
-cd ${V8_DIR}/test/wasm-js/test/core
+log_and_run cd ${V8_DIR}/test/wasm-js/data/interpreter
+# The next step requires that ocaml is installed. See the README.md in
+# ${V8_DIR}/test/wasm-js/data/interpreter/.
+log_and_run make clean opt
 
+log_and_run cd ${V8_DIR}/test/wasm-js/data/test/core
+log_and_run cp *.wast ${SPEC_TEST_DIR}/tests/
 
-./run.py --wasm ${V8_DIR}/test/wasm-js/interpreter/wasm --js ${V8_DIR}/out/x64.release/d8 --out ${SPEC_TEST_DIR}/tmp
-cp ${SPEC_TEST_DIR}/tmp/*.js ${SPEC_TEST_DIR}/tests/
-rm -rf ${SPEC_TEST_DIR}/tmp
+log_and_run ./run.py --wasm ${V8_DIR}/test/wasm-js/data/interpreter/wasm --out ${TMP_DIR}
+log_and_run cp ${TMP_DIR}/*.js ${SPEC_TEST_DIR}/tests/
+
+###############################################################################
+# Generate the proposal tests.
+###############################################################################
+
+repos='bulk-memory-operations reference-types'
+
+for repo in ${repos}; do
+  echo "Process ${repo}"
+  log_and_run cd ${TMP_DIR}
+  log_and_run git clone https://github.com/WebAssembly/${repo}
+  # Compile the spec interpreter to generate the .js test cases later.
+  log_and_run cd ${repo}/interpreter
+  log_and_run make clean opt
+  log_and_run cd ../test/core
+  log_and_run mkdir ${SPEC_TEST_DIR}/tests/proposals/${repo}
+
+  # Iterate over all proposal tests. Those which differ from the spec tests are
+  # copied to the output directory and converted to .js tests.
+  for abs_filename in ${TMP_DIR}/${repo}/test/core/*.wast; do
+    rel_filename="$(basename -- $abs_filename)"
+    test_name=${rel_filename%.wast}
+    spec_filename=${V8_DIR}/test/wasm-js/data/test/core/${rel_filename}
+    if [ ! -f "$spec_filename" ] || ! cmp -s $abs_filename $spec_filename ; then
+      log_and_run cp ${rel_filename} ${SPEC_TEST_DIR}/tests/proposals/${repo}/
+      log_and_run ./run.py --wasm ../../interpreter/wasm ${rel_filename} --out _build 2> /dev/null
+    fi
+  done
+  log_and_run cp _build/*.js ${SPEC_TEST_DIR}/tests/proposals/${repo}/
+done
+
+###############################################################################
+# Report and cleanup.
+###############################################################################
 
 cd ${SPEC_TEST_DIR}
 echo
 echo "The following files will get uploaded:"
-ls tests
+ls -R tests
 echo
-upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
+
+log_and_run rm -rf ${TMP_DIR}
+
+###############################################################################
+# Upload all spec tests.
+###############################################################################
+
+echo "****************************************************************************"
+echo "* For the following command you first have to authenticate with google cloud"
+echo "* storage. For that you have to execute"
+echo "*"
+echo "* > gsutil.py config"
+echo "*"
+echo "* When the script asks you for your project-id, use 0."
+echo "****************************************************************************"
+log_and_run cd ${SPEC_TEST_DIR}
+log_and_run upload_to_google_storage.py -a -b v8-wasm-spec-tests tests
diff --git a/src/v8/tools/wasm/wasm-import-profiler-end.js b/src/v8/tools/wasm/wasm-import-profiler-end.js
new file mode 100644
index 0000000..5b5eedd
--- /dev/null
+++ b/src/v8/tools/wasm/wasm-import-profiler-end.js
@@ -0,0 +1,6 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Code to run at shutdown: print out the profiles for all instances.
+if (typeof WebAssembly.dumpAllProfiles == "function") WebAssembly.dumpAllProfiles();
diff --git a/src/v8/tools/wasm/wasm-import-profiler.js b/src/v8/tools/wasm/wasm-import-profiler.js
new file mode 100644
index 0000000..cfbb3fb
--- /dev/null
+++ b/src/v8/tools/wasm/wasm-import-profiler.js
@@ -0,0 +1,131 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(() => {
+  let all_profiles = [];
+  let instanceMap = new WeakMap();
+  let instanceCounter = 0;
+
+  function instrument(imports, profile) {
+    let orig_imports = imports;
+    return new Proxy(imports, {
+      get: (obj, module_name) => {
+        let orig_module = orig_imports[module_name];
+        return new Proxy(orig_module, {
+          get: (obj, item_name) => {
+            let orig_func = orig_module[item_name];
+            let item = orig_func;
+            if (typeof orig_func == "function") {
+              var full_name = module_name + "." + item_name;
+              print("instrumented " + full_name);
+              profile[full_name] = {name: full_name, count: 0, total: 0};
+              item = function profiled_func(...args) {
+                var before = performance.now();
+                var result = orig_func(...args);
+                var delta = performance.now() - before;
+                var data = profile[full_name];
+                data.count++;
+                data.total += delta;
+                return result;
+              }
+            }
+            return item;
+          }
+        })
+      }
+    });
+  }
+
+  function dumpProfile(profile) {
+    let array = [];
+    for (let key in profile) {
+      if (key == "instanceNum") continue;
+      let data = profile[key];
+      if (data.count == 0) continue;
+      array.push(data);
+    }
+    print(`--- Import profile for instance ${profile.instanceNum} ---`);
+    if (array.length == 0) return;
+    array.sort((a, b) => b.total - a.total);
+    for (let data of array) {
+      print(`${padl(data.name, 30)}: ${padr(data.count, 10)} ${padp(data.total, 10)}ms`);
+    }
+  }
+
+  function padl(s, len) {
+    s = s.toString();
+    while (s.length < len) s = s + " ";
+    return s;
+  }
+  function padr(s, len) {
+    s = s.toString();
+    while (s.length < len) s = " " + s;
+    return s;
+  }
+  function padp(s, len) {
+    s = s.toString();
+    var i = s.indexOf(".");
+    if (i == -1) i = s.length;
+    while (i++ < len) s = " " + s;
+    return s;
+  }
+
+  // patch: WebAssembly.instantiate (async)
+  let orig_instantiate = WebAssembly.instantiate;
+  WebAssembly.instantiate = (m, imports, ...args) => {
+    let profile = {};
+    let promise = orig_instantiate(m, instrument(imports, profile), ...args);
+    promise.then((instance) => {
+      instanceMap.set(instance, profile);
+      all_profiles.push(profile);
+      profile.instanceNum = instanceCounter++;
+    });
+    return promise;
+  }
+
+  // patch: new WebAssembly.Instance (sync)
+  let orig_new_instance = WebAssembly.Instance;
+  WebAssembly.Instance = new Proxy(orig_new_instance, {
+    construct: (target, args) => {
+      let profile = {};
+      args[1] = instrument(args[1], profile);
+      let instance = new orig_new_instance(...args);
+      instanceMap.set(instance, profile);
+      all_profiles.push(profile);
+      profile.instanceNum = instanceCounter++;
+      return instance;
+    }
+  });
+
+  // expose: WebAssembly.dumpProfile(instance)
+  WebAssembly.dumpProfile = (instance) => {
+    let profile = instanceMap.get(instance);
+    if (profile === undefined) return;
+    dumpProfile(profile);
+  }
+  // expose: WebAssembly.clearProfile(instance)
+  WebAssembly.clearProfile = (instance) => {
+    let profile = instanceMap.get(instance);
+    if (profile === undefined) return;
+    for (let key in profile) {
+      if (key == "instanceNum") continue;
+      let data = p[key];
+      data.count = 0;
+      data.total = 0;
+    }
+  }
+  // expose: WebAssembly.dumpAllProfiles()
+  WebAssembly.dumpAllProfiles = () => {
+    for (let profile of all_profiles) dumpProfile(profile);
+  }
+  // expose: WebAssembly.getProfile(instance)
+  // returns: {
+  //    func_name1: {name: func_name1, count: <num>, total: <num>}
+  //    func_name2: {name: func_name1, count: <num>, total: <num>}
+  //    ...
+  // }
+  WebAssembly.getProfile = (instance) => {
+    return instanceMap.get(instance);
+  }
+})();
diff --git a/src/v8/tools/whitespace.txt b/src/v8/tools/whitespace.txt
index 83f0066..9a80a32 100644
--- a/src/v8/tools/whitespace.txt
+++ b/src/v8/tools/whitespace.txt
@@ -6,5 +6,5 @@
 "I'm so deoptimized today!"
 The doubles heard this and started to unbox.
 The Smi looked at them when a crazy v8-autoroll account showed up...
-The autoroller bought a round of Himbeerbrause. Suddenly...
-The bartender starts to shake the bottles.......................
+The autoroller bought a round of Himbeerbrause. Suddenly.....
+The bartender starts to shake the bottles..........
diff --git a/src/v8/tools/windbg.js b/src/v8/tools/windbg.js
new file mode 100644
index 0000000..3df14f4
--- /dev/null
+++ b/src/v8/tools/windbg.js
@@ -0,0 +1,420 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/*=============================================================================
+  This is a convenience script for debugging with WinDbg (akin to gdbinit)
+  It can be loaded into WinDbg with: .scriptload full_path\windbg.js
+
+  To printout the help message below into the debugger's command window:
+  !help
+=============================================================================*/
+
+function help() {
+  print("--------------------------------------------------------------------");
+  print("  LIVE debugging only");
+  print("--------------------------------------------------------------------");
+  print("  !jlh(\"local_handle_var_name\")");
+  print("      prints object held by the handle");
+  print("      e.g. !jlh(\"key\") or !jlh(\"this->receiver_\")");
+  print("  !job(address_or_taggedint)");
+  print("      prints object at the address, e.g. !job(0x235cb869f9)");
+  print("  !jobs(start_address, count)");
+  print("      prints 'count' objects from a continuous range of Object");
+  print("      pointers, e.g. !jobs(0x5f7270, 42)");
+  print("  !jst() or !jst");
+  print("      prints javascript stack (output goes into the console)");
+  print("  !jsbp() or !jsbp");
+  print("      sets bp in v8::internal::Execution::Call");
+  print("");
+  print("--------------------------------------------------------------------");
+  print("  Managed heap");
+  print("--------------------------------------------------------------------");
+  print("  !set_iso(isolate_address)");
+  print("      call this function before using !mem or other heap routines");
+  print("  !mem or !mem(\"space1[ space2 ...]\")");
+  print("      prints memory chunks from the 'space' owned by the heap in the");
+  print("      isolate set by !set_iso; valid values for 'space' are:");
+  print("      new, old, map, code, lo [large], nlo [newlarge], ro [readonly]");
+  print("      if no 'space' specified prints memory chunks for all spaces,");
+  print("      e.g. !mem(\"code\"), !mem(\"ro new old\")");
+  print("  !where(address)");
+  print("      prints name of the space and address of the MemoryChunk the");
+  print("      'address' is from, e.g. !where(0x235cb869f9)");
+  print("");
+  print("--------------------------------------------------------------------");
+  print("  To run any function from this script (live or postmortem):");
+  print("");
+  print("  dx @$scriptContents.function_name(args)");
+  print("      e.g. dx @$scriptContents.pointer_size()");
+  print("      e.g. dx @$scriptContents.module_name(\"v8_for_test\")");
+  print("--------------------------------------------------------------------");
+}
+
+/*=============================================================================
+  Output
+=============================================================================*/
+function print(s) {
+  host.diagnostics.debugLog(s + "\n");
+}
+
+function print_filtered(obj, filter) {
+  for (let line of obj) {
+    if (!filter || line.indexOf(filter) != -1) {
+      print(line);
+    }
+  }
+}
+
+function inspect(s) {
+  for (var k of Reflect.ownKeys(s)) {
+    print(k + " => " + Reflect.get(s, k));
+  }
+}
+
+
+/*=============================================================================
+  Utils (postmortem and live)
+=============================================================================*/
+function cast(address, type_name) {
+  return host.createTypedObject(address, module_name(), type_name);
+}
+
+// Failed to figure out how to get pointer size from the debugger's data model,
+// so we parse it out from sizeof(void*) output.
+function pointer_size() {
+  let ctl = host.namespace.Debugger.Utility.Control;
+  let sizeof = ctl.ExecuteCommand("?? sizeof(void*)");
+  let output = "";
+  for (output of sizeof) {} // unsigned int64 8
+  return parseInt(output.trim().split(" ").pop());
+}
+
+function poi(address) {
+  try {
+    // readMemoryValues throws if cannot read from 'address'.
+    return host.memory.readMemoryValues(address, 1, pointer_size())[0];
+  }
+  catch (e){}
+}
+
+function get_register(name) {
+  return host.namespace.Debugger.State.DebuggerVariables.curthread
+         .Registers.User[name];
+}
+
+// In debug builds v8 code is compiled into v8.dll, and in release builds
+// the code is compiled directly into the executable. If you are debugging some
+// other embedder, invoke module_name explicitly from the debugger and provide
+// the module name to use.
+const known_exes = ["d8", "unittests", "mksnapshot", "chrome", "chromium"];
+let module_name_cache;
+function module_name(use_this_module) {
+  if (use_this_module) {
+    module_name_cache = use_this_module;
+  }
+
+  if (!module_name_cache) {
+    let v8 = host.namespace.Debugger.State.DebuggerVariables.curprocess
+             .Modules.Where(
+                function(m) {
+                 return m.Name.indexOf("\\v8.dll") !== -1;
+                });
+
+    if (v8)  {
+      module_name_cache = "v8";
+    }
+    else {
+      for (let exe_name in known_exes) {
+        let exe = host.namespace.Debugger.State.DebuggerVariables.curprocess
+                  .Modules.Where(
+                    function(m) {
+                      return m.Name.indexOf(`\\${exe_name}.exe`) !== -1;
+                    });
+        if (exe) {
+            module_name_cache = exe_name;
+            break;
+        }
+      }
+    }
+  }
+  return module_name_cache;
+};
+
+function make_call(fn) {
+  // .call resets current frame to the top one, so have to manually remember
+  // and restore it after making the call.
+  let curframe = host.namespace.Debugger.State.DebuggerVariables.curframe;
+  let ctl = host.namespace.Debugger.Utility.Control;
+  let output = ctl.ExecuteCommand(`.call ${fn};g`);
+  curframe.SwitchTo();
+  return output;
+}
+
+// Skips the meta output about the .call invocation.
+function make_call_and_print_return(fn) {
+  let output = make_call(fn);
+  let print_line = false;
+  for (let line of output) {
+    if (print_line) {
+      print(line);
+      break;
+    }
+    if (line.includes(".call returns")) {
+      print_line = true;
+    }
+  }
+}
+
+
+/*=============================================================================
+  Wrappers around V8's printing functions and other utils for live-debugging
+=============================================================================*/
+
+/*-----------------------------------------------------------------------------
+  'address' should be an int (so in hex must include '0x' prefix).
+-----------------------------------------------------------------------------*/
+function print_object(address) {
+  let output = make_call(`_v8_internal_Print_Object(${address})`);
+
+  // skip the first few lines with meta info of .call command
+  let skip_line = true;
+  for (let line of output) {
+    if (!skip_line) {
+      print(line);
+      continue;
+    }
+    if (line.includes("deadlocks and corruption of the debuggee")) {
+      skip_line = false;
+    }
+  }
+}
+
+/*-----------------------------------------------------------------------------
+  'handle_to_object' should be a name of a Handle which can be a local
+  variable or it can be a member variable like "this->receiver_".
+-----------------------------------------------------------------------------*/
+function print_object_from_handle(handle_to_object) {
+  let handle = host.evaluateExpression(handle_to_object);
+  let location = handle.location_;
+  let pobj = poi(location.address);
+  print_object(pobj);
+}
+
+/*-----------------------------------------------------------------------------
+  'start_address' should be an int (so in hex must include '0x' prefix), it can
+  point at any continuous memory that contains Object pointers.
+-----------------------------------------------------------------------------*/
+function print_objects_array(start_address, count) {
+  let ctl = host.namespace.Debugger.Utility.Control;
+  let psize = pointer_size();
+  let addr_int = start_address;
+  for (let i = 0; i < count; i++) {
+    const addr_hex = `0x${addr_int.toString(16)}`;
+
+    // TODO: Tried using createPointerObject but it throws unknown exception
+    // from ChakraCore. Why?
+    //let obj = host.createPointerObject(addr_hex, module, "void*");
+
+    let output = ctl.ExecuteCommand(`dp ${addr_hex} l1`);
+    let item = "";
+    for (item of output) {} // 005f7270  34604101
+    let deref = `0x${item.split(" ").pop()}`;
+    print(`${addr_hex} -> ${deref}`);
+    print_object(deref);
+
+    addr_int += psize;
+  }
+}
+
+function print_js_stack() {
+  make_call("_v8_internal_Print_StackTrace()");
+}
+
+function set_user_js_bp() {
+  let ctl = host.namespace.Debugger.Utility.Control;
+  ctl.ExecuteCommand(`bp ${module_name()}!v8::internal::Execution::Call`)
+}
+
+
+/*=============================================================================
+  Managed heap related functions (live and post-mortem debugging)
+=============================================================================*/
+let isolate_address = 0;
+function set_isolate_address(addr) {
+  isolate_address = addr;
+}
+
+/*-----------------------------------------------------------------------------
+    Memory in each Space is organized into a linked list of memory chunks
+-----------------------------------------------------------------------------*/
+const NEVER_EVACUATE = 1 << 7; // see src\heap\spaces.h
+
+function print_memory_chunk_list(space_type, front, top, age_mark) {
+  let alloc_pos = top ? ` (allocating at: ${top})` : "";
+  let age_mark_pos = age_mark ? ` (age_mark at: ${top})` : "";
+  print(`${space_type}${alloc_pos}${age_mark_pos}:`);
+  if (front.isNull) {
+    print("<empty>\n");
+    return;
+  }
+
+  let cur = front;
+  while (!cur.isNull) {
+    let imm = cur.flags_ & NEVER_EVACUATE ? "*" : " ";
+    let addr = `0x${cur.address.toString(16)}`;
+    let area =
+      `0x${cur.area_start_.toString(16)} - 0x${cur.area_end_.toString(16)}`;
+    let dt = `dt ${addr} ${module_name()}!v8::internal::MemoryChunk`;
+    print(`${imm}    ${addr}:\t ${area} (0x${cur.size_.toString(16)}) : ${dt}`);
+    cur = cur.list_node_.next_;
+  }
+  print("");
+}
+
+const space_tags =
+  ['old', 'new_to', 'new_from', 'ro', 'map', 'code', 'lo', 'nlo'];
+
+function get_chunks_space(space_tag, front, chunks) {
+    let cur = front;
+    while (!cur.isNull) {
+        chunks.push({
+          'address':cur.address,
+          'area_start_':cur.area_start_,
+          'area_end_':cur.area_end_,
+          'space':space_tag});
+        cur = cur.list_node_.next_;
+    }
+}
+
+function get_chunks() {
+  let iso = cast(isolate_address, "v8::internal::Isolate");
+  let h = iso.heap_;
+
+  let chunks = [];
+  get_chunks_space('old', h.old_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('new_to',
+    h.new_space_.to_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('new_from',
+    h.new_space_.from_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('ro', h.read_only_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('map', h.map_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('code', h.code_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('lo', h.lo_space_.memory_chunk_list_.front_, chunks);
+  get_chunks_space('nlo', h.new_lo_space_.memory_chunk_list_.front_, chunks);
+
+  return chunks;
+}
+
+function find_chunk(address) {
+  // if 'address' is greater than Number.MAX_SAFE_INTEGER, comparison ops on it
+  // throw  "Error: 64 bit value loses precision on conversion to number"
+  try {
+    let chunks = get_chunks(isolate_address);
+    for (let c of chunks) {
+      let chunk = cast(c.address, "v8::internal::MemoryChunk");
+      if (address >= chunk.area_start_ && address < chunk.area_end_) {
+        return c;
+      }
+    }
+  }
+  catch (e) { }
+  return undefined;
+}
+
+/*-----------------------------------------------------------------------------
+    Print memory chunks from spaces in the current Heap
+      'isolate_address' should be an int (so in hex must include '0x' prefix).
+      'space': space separated string containing "all", "old", "new", "map",
+               "code", "ro [readonly]", "lo [large]", "nlo [newlarge]"
+-----------------------------------------------------------------------------*/
+function print_memory(space = "all") {
+  if (isolate_address == 0) {
+    print("Please call !set_iso(isolate_address) first.");
+    return;
+  }
+
+  let iso = cast(isolate_address, "v8::internal::Isolate");
+  let h = iso.heap_;
+  print(`Heap at ${h.targetLocation}`);
+
+  let st = space.toLowerCase().split(" ");
+
+  print("Im   address:\t object area start - end (size)");
+  if (st.includes("all") || st.includes("old")) {
+    print_memory_chunk_list("OldSpace",
+      h.old_space_.memory_chunk_list_.front_,
+      h.old_space_.allocation_info_.top_);
+  }
+  if (st.includes("all") || st.includes("new")) {
+    // new space doesn't use the chunk list from its base class but from
+    // the to/from semi-spaces it points to
+    print_memory_chunk_list("NewSpace_To",
+      h.new_space_.to_space_.memory_chunk_list_.front_,
+      h.new_space_.allocation_info_.top_,
+      h.new_space_.to_space_.age_mark_);
+    print_memory_chunk_list("NewSpace_From",
+      h.new_space_.from_space_.memory_chunk_list_.front_);
+  }
+  if (st.includes("all") || st.includes("map")) {
+    print_memory_chunk_list("MapSpace",
+      h.map_space_.memory_chunk_list_.front_,
+      h.map_space_.allocation_info_.top_);
+  }
+  if (st.includes("all") || st.includes("code")) {
+    print_memory_chunk_list("CodeSpace",
+      h.code_space_.memory_chunk_list_.front_,
+      h.code_space_.allocation_info_.top_);
+  }
+  if (st.includes("all") || st.includes("large") || st.includes("lo")) {
+    print_memory_chunk_list("LargeObjectSpace",
+      h.lo_space_.memory_chunk_list_.front_);
+  }
+  if (st.includes("all") || st.includes("newlarge") || st.includes("nlo")) {
+    print_memory_chunk_list("NewLargeObjectSpace",
+      h.new_lo_space_.memory_chunk_list_.front_);
+  }
+  if (st.includes("all") || st.includes("readonly") || st.includes("ro")) {
+    print_memory_chunk_list("ReadOnlySpace",
+      h.read_only_space_.memory_chunk_list_.front_);
+  }
+}
+
+/*-----------------------------------------------------------------------------
+    'isolate_address' and 'address' should be ints (so in hex must include '0x'
+    prefix).
+-----------------------------------------------------------------------------*/
+function print_owning_space(address) {
+  if (isolate_address == 0) {
+    print("Please call !set_iso(isolate_address) first.");
+    return;
+  }
+
+  let c = find_chunk(address);
+  let addr = `0x${address.toString(16)}`;
+  if (c) {
+      print(`${addr} is in ${c.space} (chunk: 0x${c.address.toString(16)})`);
+  }
+  else {
+      print(`Address ${addr} is not in managed heap`);
+  }
+}
+
+/*=============================================================================
+  Initialize short aliased names for the most common commands
+=============================================================================*/
+function initializeScript() {
+  return [
+      new host.functionAlias(help, "help"),
+      new host.functionAlias(print_object_from_handle, "jlh"),
+      new host.functionAlias(print_object, "job"),
+      new host.functionAlias(print_objects_array, "jobs"),
+      new host.functionAlias(print_js_stack, "jst"),
+
+      new host.functionAlias(set_isolate_address, "set_iso"),
+      new host.functionAlias(print_memory, "mem"),
+      new host.functionAlias(print_owning_space, "where"),
+
+      new host.functionAlias(set_user_js_bp, "jsbp"),
+  ]
+}