Import Cobalt 20.master.0.234144

Includes the following patches:
  https://cobalt-review.googlesource.com/c/cobalt/+/5590
  by n1214.hwang@samsung.com

  https://cobalt-review.googlesource.com/c/cobalt/+/5530
  by errong.leng@samsung.com

  https://cobalt-review.googlesource.com/c/cobalt/+/5570
  by devin.cai@mediatek.com
diff --git a/src/v8/tools/testrunner/base_runner.py b/src/v8/tools/testrunner/base_runner.py
index 8fc09ee..7f9b434 100644
--- a/src/v8/tools/testrunner/base_runner.py
+++ b/src/v8/tools/testrunner/base_runner.py
@@ -2,12 +2,19 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
 
 from collections import OrderedDict
 import json
+import multiprocessing
 import optparse
 import os
+import shlex
 import sys
+import traceback
+
 
 
 # Add testrunner to the path.
@@ -17,10 +24,15 @@
     os.path.dirname(os.path.abspath(__file__))))
 
 
-from local import testsuite
-from local import utils
-
-from testproc.shard import ShardProc
+from testrunner.local import command
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.test_config import TestConfig
+from testrunner.testproc import progress
+from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.shard import ShardProc
+from testrunner.testproc.sigproc import SignalProc
+from testrunner.testproc.timeout import TimeoutProc
 
 
 BASE_DIR = (
@@ -31,8 +43,6 @@
 
 DEFAULT_OUT_GN = 'out.gn'
 
-ARCH_GUESS = utils.DefaultArch()
-
 # Map of test name synonyms to lists of test suites. Should be ordered by
 # expected runtimes (suites with slow test cases first). These groups are
 # invoked in separate steps on the bots.
@@ -46,6 +56,7 @@
     "inspector",
     "webkit",
     "mkgrokdump",
+    "wasm-js",
     "fuzzer",
     "message",
     "preparser",
@@ -60,6 +71,7 @@
     "wasm-spec-tests",
     "inspector",
     "mkgrokdump",
+    "wasm-js",
     "fuzzer",
     "message",
     "preparser",
@@ -68,13 +80,12 @@
   ],
   # This needs to stay in sync with test/d8_default.isolate.
   "d8_default": [
-    # TODO(machenbach): uncomment after infra side lands.
-    #"debugger",
+    "debugger",
     "mjsunit",
     "webkit",
-    #"message",
-    #"preparser",
-    #"intl",
+    "message",
+    "preparser",
+    "intl",
   ],
   # This needs to stay in sync with test/optimize_for_size.isolate.
   "optimize_for_size": [
@@ -90,6 +101,16 @@
   ],
 }
 
+# Double the timeout for these:
+SLOW_ARCHS = ["arm",
+              "mips",
+              "mipsel",
+              "mips64",
+              "mips64el",
+              "s390",
+              "s390x",
+              "arm64"]
+
 
 class ModeConfig(object):
   def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
@@ -99,8 +120,9 @@
     self.execution_mode = execution_mode
 
 
-DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
-RELEASE_FLAGS = ["--nohard-abort"]
+DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap",
+               "--testing-d8-test-runner"]
+RELEASE_FLAGS = ["--nohard-abort", "--testing-d8-test-runner"]
 MODES = {
   "debug": ModeConfig(
     flags=DEBUG_FLAGS,
@@ -138,6 +160,12 @@
   ),
 }
 
+PROGRESS_INDICATORS = {
+  'verbose': progress.VerboseProgressIndicator,
+  'dots': progress.DotsProgressIndicator,
+  'color': progress.ColorProgressIndicator,
+  'mono': progress.MonochromeProgressIndicator,
+}
 
 class TestRunnerError(Exception):
   pass
@@ -151,17 +179,34 @@
     else:
       self.arch = build_config['v8_target_cpu']
 
-    self.is_debug = build_config['is_debug']
     self.asan = build_config['is_asan']
     self.cfi_vptr = build_config['is_cfi']
     self.dcheck_always_on = build_config['dcheck_always_on']
     self.gcov_coverage = build_config['is_gcov_coverage']
+    self.is_android = build_config['is_android']
+    self.is_clang = build_config['is_clang']
+    self.is_debug = build_config['is_debug']
+    self.is_full_debug = build_config['is_full_debug']
     self.msan = build_config['is_msan']
     self.no_i18n = not build_config['v8_enable_i18n_support']
     self.no_snap = not build_config['v8_use_snapshot']
     self.predictable = build_config['v8_enable_verify_predictable']
     self.tsan = build_config['is_tsan']
+    # TODO(machenbach): We only have ubsan not ubsan_vptr.
     self.ubsan_vptr = build_config['is_ubsan_vptr']
+    self.embedded_builtins = build_config['v8_enable_embedded_builtins']
+    self.verify_csa = build_config['v8_enable_verify_csa']
+    self.lite_mode = build_config['v8_enable_lite_mode']
+    self.pointer_compression = build_config['v8_enable_pointer_compression']
+    # Export only for MIPS target
+    if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']:
+      self.mips_arch_variant = build_config['mips_arch_variant']
+      self.mips_use_msa = build_config['mips_use_msa']
+
+  @property
+  def use_sanitizer(self):
+    return (self.asan or self.cfi_vptr or self.msan or self.tsan or
+            self.ubsan_vptr)
 
   def __str__(self):
     detected_options = []
@@ -186,6 +231,14 @@
       detected_options.append('tsan')
     if self.ubsan_vptr:
       detected_options.append('ubsan_vptr')
+    if self.embedded_builtins:
+      detected_options.append('embedded_builtins')
+    if self.verify_csa:
+      detected_options.append('verify_csa')
+    if self.lite_mode:
+      detected_options.append('lite_mode')
+    if self.pointer_compression:
+      detected_options.append('pointer_compression')
 
     return '\n'.join(detected_options)
 
@@ -197,6 +250,12 @@
     self.build_config = None
     self.mode_name = None
     self.mode_options = None
+    self.target_os = None
+
+  @property
+  def framework_name(self):
+    """String name of the base-runner subclass, used in test results."""
+    raise NotImplementedError()
 
   def execute(self, sys_args=None):
     if sys_args is None:  # pragma: no cover
@@ -204,8 +263,13 @@
     try:
       parser = self._create_parser()
       options, args = self._parse_args(parser, sys_args)
+      if options.swarming:
+        # Swarming doesn't print how isolated commands are called. Lets make
+        # this less cryptic by printing it ourselves.
+        print(' '.join(sys.argv))
 
       self._load_build_config(options)
+      command.setup(self.target_os, options.device)
 
       try:
         self._process_default_options(options)
@@ -215,14 +279,26 @@
         raise
 
       args = self._parse_test_args(args)
-      suites = self._get_suites(args, options.verbose)
-
+      tests = self._load_testsuite_generators(args, options)
       self._setup_env()
-      return self._do_execute(suites, args, options)
+      print(">>> Running tests for %s.%s" % (self.build_config.arch,
+                                            self.mode_name))
+      exit_code = self._do_execute(tests, args, options)
+      if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results:
+        print("Force exit code 0 after failures. Json test results file "
+              "generated with failure information.")
+        exit_code = utils.EXIT_CODE_PASS
+      return exit_code
     except TestRunnerError:
-      return 1
+      traceback.print_exc()
+      return utils.EXIT_CODE_INTERNAL_ERROR
     except KeyboardInterrupt:
-      return 2
+      return utils.EXIT_CODE_INTERRUPTED
+    except Exception:
+      traceback.print_exc()
+      return utils.EXIT_CODE_INTERNAL_ERROR
+    finally:
+      command.tear_down()
 
   def _create_parser(self):
     parser = optparse.OptionParser()
@@ -247,14 +323,70 @@
                       " and buildbot builds): %s" % MODES.keys())
     parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
                       "directory will be used")
-    parser.add_option("-v", "--verbose", help="Verbose output",
+    parser.add_option("--test-root", help="Root directory of the test suites",
+                      default=os.path.join(self.basedir, 'test'))
+    parser.add_option("--total-timeout-sec", default=0, type="int",
+                      help="How long should fuzzer run")
+    parser.add_option("--swarming", default=False, action="store_true",
+                      help="Indicates running test driver on swarming.")
+
+    parser.add_option("-j", help="The number of parallel tasks to run",
+                      default=0, type=int)
+    parser.add_option("-d", "--device",
+                      help="The device ID to run Android tests on. If not "
+                           "given it will be autodetected.")
+
+    # Shard
+    parser.add_option("--shard-count", default=1, type=int,
+                      help="Split tests into this number of shards")
+    parser.add_option("--shard-run", default=1, type=int,
+                      help="Run this shard from the split up tests.")
+
+    # Progress
+    parser.add_option("-p", "--progress",
+                      choices=PROGRESS_INDICATORS.keys(), default="mono",
+                      help="The style of progress indicator (verbose, dots, "
+                           "color, mono)")
+    parser.add_option("--json-test-results",
+                      help="Path to a file for storing json results.")
+    parser.add_option("--exit-after-n-failures", type="int", default=100,
+                      help="Exit after the first N failures instead of "
+                           "running all tests. Pass 0 to disable this feature.")
+
+    # Rerun
+    parser.add_option("--rerun-failures-count", default=0, type=int,
+                      help="Number of times to rerun each failing test case. "
+                           "Very slow tests will be rerun only once.")
+    parser.add_option("--rerun-failures-max", default=100, type=int,
+                      help="Maximum number of failing test cases to rerun")
+
+    # Test config
+    parser.add_option("--command-prefix", default="",
+                      help="Prepended to each shell command used to run a test")
+    parser.add_option("--extra-flags", action="append", default=[],
+                      help="Additional flags to pass to each test command")
+    parser.add_option("--isolates", action="store_true", default=False,
+                      help="Whether to test isolates")
+    parser.add_option("--no-harness", "--noharness",
+                      default=False, action="store_true",
+                      help="Run without test harness of a given suite")
+    parser.add_option("--random-seed", default=0, type=int,
+                      help="Default seed for initializing random generator")
+    parser.add_option("--run-skipped", help="Also run skipped tests.",
                       default=False, action="store_true")
-    parser.add_option("--shard-count",
-                      help="Split tests into this number of shards",
-                      default=1, type="int")
-    parser.add_option("--shard-run",
-                      help="Run this shard from the split up tests.",
-                      default=1, type="int")
+    parser.add_option("-t", "--timeout", default=60, type=int,
+                      help="Timeout for single test in seconds")
+    parser.add_option("-v", "--verbose", default=False, action="store_true",
+                      help="Verbose output")
+
+    # TODO(machenbach): Temporary options for rolling out new test runner
+    # features.
+    parser.add_option("--mastername", default='',
+                      help="Mastername property from infrastructure. Not "
+                           "setting this option indicates manual usage.")
+    parser.add_option("--buildername", default='',
+                      help="Buildername property from infrastructure. Not "
+                           "setting this option indicates manual usage.")
 
   def _add_parser_options(self, parser):
     pass
@@ -264,7 +396,7 @@
 
     if any(map(lambda v: v and ',' in v,
                 [options.arch, options.mode])):  # pragma: no cover
-      print 'Multiple arch/mode are deprecated'
+      print('Multiple arch/mode are deprecated')
       raise TestRunnerError()
 
     return options, args
@@ -277,13 +409,20 @@
         pass
 
     if not self.build_config:  # pragma: no cover
-      print 'Failed to load build config'
+      print('Failed to load build config')
       raise TestRunnerError
 
-    print 'Build found: %s' % self.outdir
+    print('Build found: %s' % self.outdir)
     if str(self.build_config):
-      print '>>> Autodetected:'
-      print self.build_config
+      print('>>> Autodetected:')
+      print(self.build_config)
+
+    # Represents the OS where tests are run on. Same as host OS except for
+    # Android, which is determined by build output.
+    if self.build_config.is_android:
+      self.target_os = 'android'
+    else:
+      self.target_os = utils.GuessOS()
 
   # Returns possible build paths in order:
   # gn
@@ -353,7 +492,7 @@
     build_config_mode = 'debug' if self.build_config.is_debug else 'release'
     if options.mode:
       if options.mode not in MODES:  # pragma: no cover
-        print '%s mode is invalid' % options.mode
+        print('%s mode is invalid' % options.mode)
         raise TestRunnerError()
       if MODES[options.mode].execution_mode != build_config_mode:
         print ('execution mode (%s) for %s is inconsistent with build config '
@@ -378,6 +517,16 @@
       print('Warning: --shell-dir is deprecated. Searching for executables in '
             'build directory (%s) instead.' % self.outdir)
 
+    if options.j == 0:
+      if self.build_config.is_android:
+        # Adb isn't happy about multi-processed file pushing.
+        options.j = 1
+      else:
+        options.j = multiprocessing.cpu_count()
+
+    options.command_prefix = shlex.split(options.command_prefix)
+    options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+
   def _buildbot_to_v8_mode(self, config):
     """Convert buildbot build configs to configs understood by the v8 runner.
 
@@ -410,6 +559,9 @@
         asan_options.append('detect_leaks=1')
       else:
         asan_options.append('detect_leaks=0')
+      if utils.GuessOS() == 'windows':
+        # https://crbug.com/967663
+        asan_options.append('detect_stack_use_after_return=0')
       os.environ['ASAN_OPTIONS'] = ":".join(asan_options)
 
     if self.build_config.cfi_vptr:
@@ -471,34 +623,129 @@
 
     return reduce(list.__add__, map(expand_test_group, args), [])
 
-  def _get_suites(self, args, verbose=False):
-    names = self._args_to_suite_names(args)
-    return self._load_suites(names, verbose)
-
-  def _args_to_suite_names(self, args):
+  def _args_to_suite_names(self, args, test_root):
     # Use default tests if no test configuration was provided at the cmd line.
-    all_names = set(utils.GetSuitePaths(os.path.join(self.basedir, 'test')))
+    all_names = set(utils.GetSuitePaths(test_root))
     args_names = OrderedDict([(arg.split('/')[0], None) for arg in args]) # set
     return [name for name in args_names if name in all_names]
 
   def _get_default_suite_names(self):
     return []
 
-  def _expand_test_group(self, name):
-    return TEST_MAP.get(name, [name])
+  def _load_testsuite_generators(self, args, options):
+    names = self._args_to_suite_names(args, options.test_root)
+    test_config = self._create_test_config(options)
+    variables = self._get_statusfile_variables(options)
 
-  def _load_suites(self, names, verbose=False):
-    def load_suite(name):
-      if verbose:
-        print '>>> Loading test suite: %s' % name
-      return testsuite.TestSuite.LoadTestSuite(
-          os.path.join(self.basedir, 'test', name))
-    return map(load_suite, names)
+    # Head generator with no elements
+    test_chain = testsuite.TestGenerator(0, [], [])
+    for name in names:
+      if options.verbose:
+        print('>>> Loading test suite: %s' % name)
+      suite = testsuite.TestSuite.Load(
+          os.path.join(options.test_root, name), test_config,
+          self.framework_name)
+
+      if self._is_testsuite_supported(suite, options):
+        tests = suite.load_tests_from_disk(variables)
+        test_chain.merge(tests)
+
+    return test_chain
+
+  def _is_testsuite_supported(self, suite, options):
+    """A predicate that can be overridden to filter out unsupported TestSuite
+    instances (see NumFuzzer for usage)."""
+    return True
+
+  def _get_statusfile_variables(self, options):
+    simd_mips = (
+      self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
+      self.build_config.mips_arch_variant == "r6" and
+      self.build_config.mips_use_msa)
+
+    mips_arch_variant = (
+      self.build_config.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] and
+      self.build_config.mips_arch_variant)
+
+    # TODO(machenbach): In GN we can derive simulator run from
+    # target_arch != v8_target_arch in the dumped build config.
+    return {
+      "arch": self.build_config.arch,
+      "asan": self.build_config.asan,
+      "byteorder": sys.byteorder,
+      "dcheck_always_on": self.build_config.dcheck_always_on,
+      "deopt_fuzzer": False,
+      "endurance_fuzzer": False,
+      "gc_fuzzer": False,
+      "gc_stress": False,
+      "gcov_coverage": self.build_config.gcov_coverage,
+      "isolates": options.isolates,
+      "is_clang": self.build_config.is_clang,
+      "is_full_debug": self.build_config.is_full_debug,
+      "mips_arch_variant": mips_arch_variant,
+      "mode": self.mode_options.status_mode
+              if not self.build_config.dcheck_always_on
+              else "debug",
+      "msan": self.build_config.msan,
+      "no_harness": options.no_harness,
+      "no_i18n": self.build_config.no_i18n,
+      "no_snap": self.build_config.no_snap,
+      "novfp3": False,
+      "optimize_for_size": "--optimize-for-size" in options.extra_flags,
+      "predictable": self.build_config.predictable,
+      "simd_mips": simd_mips,
+      "simulator_run": False,
+      "system": self.target_os,
+      "tsan": self.build_config.tsan,
+      "ubsan_vptr": self.build_config.ubsan_vptr,
+      "embedded_builtins": self.build_config.embedded_builtins,
+      "verify_csa": self.build_config.verify_csa,
+      "lite_mode": self.build_config.lite_mode,
+      "pointer_compression": self.build_config.pointer_compression,
+    }
+
+  def _create_test_config(self, options):
+    timeout = options.timeout * self._timeout_scalefactor(options)
+    return TestConfig(
+        command_prefix=options.command_prefix,
+        extra_flags=options.extra_flags,
+        isolates=options.isolates,
+        mode_flags=self.mode_options.flags,
+        no_harness=options.no_harness,
+        noi18n=self.build_config.no_i18n,
+        random_seed=options.random_seed,
+        run_skipped=options.run_skipped,
+        shell_dir=self.outdir,
+        timeout=timeout,
+        verbose=options.verbose,
+    )
+
+  def _timeout_scalefactor(self, options):
+    """Increases timeout for slow build configurations."""
+    factor = self.mode_options.timeout_scalefactor
+    if self.build_config.arch in SLOW_ARCHS:
+      factor *= 4
+    if self.build_config.lite_mode:
+      factor *= 2
+    if self.build_config.predictable:
+      factor *= 4
+    if self.build_config.use_sanitizer:
+      factor *= 1.5
+    if self.build_config.is_full_debug:
+      factor *= 4
+
+    return factor
 
   # TODO(majeski): remove options & args parameters
   def _do_execute(self, suites, args, options):
     raise NotImplementedError()
 
+  def _prepare_procs(self, procs):
+    procs = filter(None, procs)
+    for i in range(0, len(procs) - 1):
+      procs[i].connect_to(procs[i + 1])
+    procs[0].setup()
+
   def _create_shard_proc(self, options):
     myid, count = self._get_shard_info(options)
     if count == 1:
@@ -536,8 +783,42 @@
       # TODO(machenbach): Turn this into an assert. If that's wrong on the
       # bots, printing will be quite useless. Or refactor this code to make
       # sure we get a return code != 0 after testing if we got here.
-      print "shard-run not a valid number, should be in [1:shard-count]"
-      print "defaulting back to running all tests"
+      print("shard-run not a valid number, should be in [1:shard-count]")
+      print("defaulting back to running all tests")
       return 1, 1
 
     return shard_run, shard_count
+
+  def _create_progress_indicators(self, test_count, options):
+    procs = [PROGRESS_INDICATORS[options.progress]()]
+    if options.json_test_results:
+      procs.append(progress.JsonTestProgressIndicator(
+        self.framework_name,
+        options.json_test_results,
+        self.build_config.arch,
+        self.mode_options.execution_mode))
+
+    for proc in procs:
+      try:
+        proc.set_test_count(test_count)
+      except AttributeError:
+        pass
+
+    return procs
+
+  def _create_result_tracker(self, options):
+    return progress.ResultsTracker(options.exit_after_n_failures)
+
+  def _create_timeout_proc(self, options):
+    if not options.total_timeout_sec:
+      return None
+    return TimeoutProc(options.total_timeout_sec)
+
+  def _create_signal_proc(self):
+    return SignalProc()
+
+  def _create_rerun_proc(self, options):
+    if not options.rerun_failures_count:
+      return None
+    return RerunProc(options.rerun_failures_count,
+                     options.rerun_failures_max)
diff --git a/src/v8/tools/testrunner/local/android.py b/src/v8/tools/testrunner/local/android.py
new file mode 100644
index 0000000..ebf04af
--- /dev/null
+++ b/src/v8/tools/testrunner/local/android.py
@@ -0,0 +1,205 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Wrapper around the Android device abstraction from src/build/android.
+"""
+
+import logging
+import os
+import sys
+import re
+
+BASE_DIR = os.path.normpath(
+    os.path.join(os.path.dirname(__file__), '..', '..', '..'))
+ANDROID_DIR = os.path.join(BASE_DIR, 'build', 'android')
+DEVICE_DIR = '/data/local/tmp/v8/'
+
+
+class TimeoutException(Exception):
+  def __init__(self, timeout, output=None):
+    self.timeout = timeout
+    self.output = output
+
+
+class CommandFailedException(Exception):
+  def __init__(self, status, output):
+    self.status = status
+    self.output = output
+
+
+class _Driver(object):
+  """Helper class to execute shell commands on an Android device."""
+  def __init__(self, device=None):
+    assert os.path.exists(ANDROID_DIR)
+    sys.path.insert(0, ANDROID_DIR)
+
+    # We import the dependencies only on demand, so that this file can be
+    # imported unconditionally.
+    import devil_chromium
+    from devil.android import device_errors  # pylint: disable=import-error
+    from devil.android import device_utils  # pylint: disable=import-error
+    from devil.android.perf import cache_control  # pylint: disable=import-error
+    from devil.android.perf import perf_control  # pylint: disable=import-error
+    global cache_control
+    global device_errors
+    global perf_control
+
+    devil_chromium.Initialize()
+
+    # Find specified device or a single attached device if none was specified.
+    # In case none or multiple devices are attached, this raises an exception.
+    self.device = device_utils.DeviceUtils.HealthyDevices(
+        retries=5, enable_usb_resets=True, device_arg=device)[0]
+
+    # This remembers what we have already pushed to the device.
+    self.pushed = set()
+
+  def tear_down(self):
+    """Clean up files after running all tests."""
+    self.device.RemovePath(DEVICE_DIR, force=True, recursive=True)
+
+  def push_file(self, host_dir, file_name, target_rel='.',
+                skip_if_missing=False):
+    """Push a single file to the device (cached).
+
+    Args:
+      host_dir: Absolute parent directory of the file to push.
+      file_name: Name of the file to push.
+      target_rel: Parent directory of the target location on the device
+          (relative to the device's base dir for testing).
+      skip_if_missing: Keeps silent about missing files when set. Otherwise logs
+          error.
+    """
+    # TODO(sergiyb): Implement this method using self.device.PushChangedFiles to
+    # avoid accessing low-level self.device.adb.
+    file_on_host = os.path.join(host_dir, file_name)
+
+    # Only push files not yet pushed in one execution.
+    if file_on_host in self.pushed:
+      return
+
+    file_on_device_tmp = os.path.join(DEVICE_DIR, '_tmp_', file_name)
+    file_on_device = os.path.join(DEVICE_DIR, target_rel, file_name)
+    folder_on_device = os.path.dirname(file_on_device)
+
+    # Only attempt to push files that exist.
+    if not os.path.exists(file_on_host):
+      if not skip_if_missing:
+        logging.critical('Missing file on host: %s' % file_on_host)
+      return
+
+    # Work-around for 'text file busy' errors. Push the files to a temporary
+    # location and then copy them with a shell command.
+    output = self.device.adb.Push(file_on_host, file_on_device_tmp)
+    # Success looks like this: '3035 KB/s (12512056 bytes in 4.025s)'.
+    # Errors look like this: 'failed to copy  ... '.
+    if output and not re.search('^[0-9]', output.splitlines()[-1]):
+      logging.critical('PUSH FAILED: ' + output)
+    self.device.adb.Shell('mkdir -p %s' % folder_on_device)
+    self.device.adb.Shell('cp %s %s' % (file_on_device_tmp, file_on_device))
+    self.pushed.add(file_on_host)
+
+  def push_executable(self, shell_dir, target_dir, binary):
+    """Push files required to run a V8 executable.
+
+    Args:
+      shell_dir: Absolute parent directory of the executable on the host.
+      target_dir: Parent directory of the executable on the device (relative to
+          devices' base dir for testing).
+      binary: Name of the binary to push.
+    """
+    self.push_file(shell_dir, binary, target_dir)
+
+    # Push external startup data. Backwards compatible for revisions where
+    # these files didn't exist. Or for bots that don't produce these files.
+    self.push_file(
+        shell_dir,
+        'natives_blob.bin',
+        target_dir,
+        skip_if_missing=True,
+    )
+    self.push_file(
+        shell_dir,
+        'snapshot_blob.bin',
+        target_dir,
+        skip_if_missing=True,
+    )
+    self.push_file(
+        shell_dir,
+        'snapshot_blob_trusted.bin',
+        target_dir,
+        skip_if_missing=True,
+    )
+    self.push_file(
+        shell_dir,
+        'icudtl.dat',
+        target_dir,
+        skip_if_missing=True,
+    )
+
+  def run(self, target_dir, binary, args, rel_path, timeout, env=None,
+          logcat_file=False):
+    """Execute a command on the device's shell.
+
+    Args:
+      target_dir: Parent directory of the executable on the device (relative to
+          devices' base dir for testing).
+      binary: Name of the binary.
+      args: List of arguments to pass to the binary.
+      rel_path: Relative path on device to use as CWD.
+      timeout: Timeout in seconds.
+      env: The environment variables with which the command should be run.
+      logcat_file: File into which to stream adb logcat log.
+    """
+    binary_on_device = os.path.join(DEVICE_DIR, target_dir, binary)
+    cmd = [binary_on_device] + args
+    def run_inner():
+      try:
+        output = self.device.RunShellCommand(
+            cmd,
+            cwd=os.path.join(DEVICE_DIR, rel_path),
+            check_return=True,
+            env=env,
+            timeout=timeout,
+            retries=0,
+        )
+        return '\n'.join(output)
+      except device_errors.AdbCommandFailedError as e:
+        raise CommandFailedException(e.status, e.output)
+      except device_errors.CommandTimeoutError as e:
+        raise TimeoutException(timeout, e.output)
+
+
+    if logcat_file:
+      with self.device.GetLogcatMonitor(output_file=logcat_file) as logmon:
+        result = run_inner()
+      logmon.Close()
+      return result
+    else:
+      return run_inner()
+
+  def drop_ram_caches(self):
+    """Drop ran caches on device."""
+    cache = cache_control.CacheControl(self.device)
+    cache.DropRamCaches()
+
+  def set_high_perf_mode(self):
+    """Set device into high performance mode."""
+    perf = perf_control.PerfControl(self.device)
+    perf.SetHighPerfMode()
+
+  def set_default_perf_mode(self):
+    """Set device into default performance mode."""
+    perf = perf_control.PerfControl(self.device)
+    perf.SetDefaultPerfMode()
+
+
+_ANDROID_DRIVER = None
+def android_driver(device=None):
+  """Singleton access method to the driver class."""
+  global _ANDROID_DRIVER
+  if not _ANDROID_DRIVER:
+    _ANDROID_DRIVER = _Driver(device)
+  return _ANDROID_DRIVER
diff --git a/src/v8/tools/testrunner/local/command.py b/src/v8/tools/testrunner/local/command.py
index 93b1ac9..b68252c 100644
--- a/src/v8/tools/testrunner/local/command.py
+++ b/src/v8/tools/testrunner/local/command.py
@@ -2,24 +2,57 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
 
 import os
+import re
+import signal
 import subprocess
 import sys
 import threading
 import time
 
+from ..local.android import (
+    android_driver, CommandFailedException, TimeoutException)
 from ..local import utils
 from ..objects import output
 
 
+BASE_DIR = os.path.normpath(
+    os.path.join(os.path.dirname(os.path.abspath(__file__)), '..' , '..', '..'))
+
 SEM_INVALID_VALUE = -1
 SEM_NOGPFAULTERRORBOX = 0x0002  # Microsoft Platform SDK WinBase.h
 
 
+def setup_testing():
+  """For testing only: We use threading under the hood instead of
+  multiprocessing to make coverage work. Signal handling is only supported
+  in the main thread, so we disable it for testing.
+  """
+  signal.signal = lambda *_: None
+
+
+class AbortException(Exception):
+  """Indicates early abort on SIGINT, SIGTERM or internal hard timeout."""
+  pass
+
+
 class BaseCommand(object):
   def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
-               verbose=False):
+               verbose=False, resources_func=None):
+    """Initialize the command.
+
+    Args:
+      shell: The name of the executable (e.g. d8).
+      args: List of args to pass to the executable.
+      cmd_prefix: Prefix of command (e.g. a wrapper script).
+      timeout: Timeout in seconds.
+      env: Environment dict for execution.
+      verbose: Print additional output.
+      resources_func: Callable, returning all test files needed by this command.
+    """
     assert(timeout > 0)
 
     self.shell = shell
@@ -29,16 +62,22 @@
     self.env = env or {}
     self.verbose = verbose
 
-  def execute(self, **additional_popen_kwargs):
+  def execute(self):
     if self.verbose:
-      print '# %s' % self
+      print('# %s' % self)
 
-    process = self._start_process(**additional_popen_kwargs)
+    process = self._start_process()
+
+    # Variable to communicate with the signal handler.
+    abort_occured = [False]
+    def handler(signum, frame):
+      self._abort(process, abort_occured)
+    signal.signal(signal.SIGTERM, handler)
 
     # Variable to communicate with the timer.
     timeout_occured = [False]
     timer = threading.Timer(
-        self.timeout, self._on_timeout, [process, timeout_occured])
+        self.timeout, self._abort, [process, timeout_occured])
     timer.start()
 
     start_time = time.time()
@@ -47,6 +86,9 @@
 
     timer.cancel()
 
+    if abort_occured[0]:
+      raise AbortException()
+
     return output.Output(
       process.returncode,
       timeout_occured[0],
@@ -56,14 +98,13 @@
       duration
     )
 
-  def _start_process(self, **additional_popen_kwargs):
+  def _start_process(self):
     try:
       return subprocess.Popen(
         args=self._get_popen_args(),
         stdout=subprocess.PIPE,
         stderr=subprocess.PIPE,
         env=self._get_env(),
-        **additional_popen_kwargs
       )
     except Exception as e:
       sys.stderr.write('Error executing: %s\n' % self)
@@ -85,12 +126,16 @@
   def _kill_process(self, process):
     raise NotImplementedError()
 
-  def _on_timeout(self, process, timeout_occured):
-    timeout_occured[0] = True
+  def _abort(self, process, abort_called):
+    abort_called[0] = True
     try:
+      print('Attempting to kill process %s' % process.pid)
+      sys.stdout.flush()
       self._kill_process(process)
-    except OSError:
-      sys.stderr.write('Error: Process %s already ended.\n' % process.pid)
+    except OSError as e:
+      print(e)
+      sys.stdout.flush()
+      pass
 
   def __str__(self):
     return self.to_string()
@@ -114,6 +159,25 @@
 
 
 class PosixCommand(BaseCommand):
+  # TODO(machenbach): Use base process start without shell once
+  # https://crbug.com/v8/8889 is resolved.
+  def _start_process(self):
+    def wrapped(arg):
+      if set('() \'"') & set(arg):
+        return "'%s'" % arg.replace("'", "'\"'\"'")
+      return arg
+    try:
+      return subprocess.Popen(
+        args=' '.join(map(wrapped, self._get_popen_args())),
+        stdout=subprocess.PIPE,
+        stderr=subprocess.PIPE,
+        env=self._get_env(),
+        shell=True,
+      )
+    except Exception as e:
+      sys.stderr.write('Error executing: %s\n' % self)
+      raise e
+
   def _kill_process(self, process):
     process.kill()
 
@@ -147,9 +211,6 @@
     return subprocess.list2cmdline(self._to_args_list())
 
   def _kill_process(self, process):
-    if self.verbose:
-      print 'Attempting to kill process %d' % process.pid
-      sys.stdout.flush()
     tk = subprocess.Popen(
         'taskkill /T /F /PID %d' % process.pid,
         stdout=subprocess.PIPE,
@@ -157,15 +218,96 @@
     )
     stdout, stderr = tk.communicate()
     if self.verbose:
-      print 'Taskkill results for %d' % process.pid
-      print stdout
-      print stderr
-      print 'Return code: %d' % tk.returncode
+      print('Taskkill results for %d' % process.pid)
+      print(stdout)
+      print(stderr)
+      print('Return code: %d' % tk.returncode)
       sys.stdout.flush()
 
 
-# Set the Command class to the OS-specific version.
-if utils.IsWindows():
-  Command = WindowsCommand
-else:
-  Command = PosixCommand
+class AndroidCommand(BaseCommand):
+  # This must be initialized before creating any instances of this class.
+  driver = None
+
+  def __init__(self, shell, args=None, cmd_prefix=None, timeout=60, env=None,
+               verbose=False, resources_func=None):
+    """Initialize the command and all files that need to be pushed to the
+    Android device.
+    """
+    self.shell_name = os.path.basename(shell)
+    self.shell_dir = os.path.dirname(shell)
+    self.files_to_push = (resources_func or (lambda: []))()
+
+    # Make all paths in arguments relative and also prepare files from arguments
+    # for pushing to the device.
+    rel_args = []
+    find_path_re = re.compile(r'.*(%s/[^\'"]+).*' % re.escape(BASE_DIR))
+    for arg in (args or []):
+      match = find_path_re.match(arg)
+      if match:
+        self.files_to_push.append(match.group(1))
+      rel_args.append(
+          re.sub(r'(.*)%s/(.*)' % re.escape(BASE_DIR), r'\1\2', arg))
+
+    super(AndroidCommand, self).__init__(
+        shell, args=rel_args, cmd_prefix=cmd_prefix, timeout=timeout, env=env,
+        verbose=verbose)
+
+  def execute(self, **additional_popen_kwargs):
+    """Execute the command on the device.
+
+    This pushes all required files to the device and then runs the command.
+    """
+    if self.verbose:
+      print('# %s' % self)
+
+    self.driver.push_executable(self.shell_dir, 'bin', self.shell_name)
+
+    for abs_file in self.files_to_push:
+      abs_dir = os.path.dirname(abs_file)
+      file_name = os.path.basename(abs_file)
+      rel_dir = os.path.relpath(abs_dir, BASE_DIR)
+      self.driver.push_file(abs_dir, file_name, rel_dir)
+
+    start_time = time.time()
+    return_code = 0
+    timed_out = False
+    try:
+      stdout = self.driver.run(
+          'bin', self.shell_name, self.args, '.', self.timeout, self.env)
+    except CommandFailedException as e:
+      return_code = e.status
+      stdout = e.output
+    except TimeoutException as e:
+      return_code = 1
+      timed_out = True
+      # Sadly the Android driver doesn't provide output on timeout.
+      stdout = ''
+
+    duration = time.time() - start_time
+    return output.Output(
+        return_code,
+        timed_out,
+        stdout,
+        '',  # No stderr available.
+        -1,  # No pid available.
+        duration,
+    )
+
+
+Command = None
+def setup(target_os, device):
+  """Set the Command class to the OS-specific version."""
+  global Command
+  if target_os == 'android':
+    AndroidCommand.driver = android_driver(device)
+    Command = AndroidCommand
+  elif target_os == 'windows':
+    Command = WindowsCommand
+  else:
+    Command = PosixCommand
+
+def tear_down():
+  """Clean up after using commands."""
+  if Command == AndroidCommand:
+    AndroidCommand.driver.tear_down()
diff --git a/src/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status b/src/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status
new file mode 100644
index 0000000..b5ebc84
--- /dev/null
+++ b/src/v8/tools/testrunner/local/fake_testsuite/fake_testsuite.status
@@ -0,0 +1,5 @@
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[]
diff --git a/src/v8/tools/testrunner/local/fake_testsuite/testcfg.py b/src/v8/tools/testrunner/local/fake_testsuite/testcfg.py
new file mode 100644
index 0000000..28de737
--- /dev/null
+++ b/src/v8/tools/testrunner/local/fake_testsuite/testcfg.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+from testrunner.local import testsuite, statusfile
+
+
+class TestLoader(testsuite.TestLoader):
+  def _list_test_filenames(self):
+    return ["fast", "slow"]
+
+  def list_tests(self):
+    self.test_count_estimation = 2
+    fast = self._create_test("fast", self.suite)
+    slow = self._create_test("slow", self.suite)
+
+    slow._statusfile_outcomes.append(statusfile.SLOW)
+    yield fast
+    yield slow
+
+
+class TestSuite(testsuite.TestSuite):
+  def _test_loader_class(self):
+    return TestLoader
+
+  def _test_class(self):
+    return testsuite.TestCase
+
+def GetSuite(*args, **kwargs):
+  return TestSuite(*args, **kwargs)
diff --git a/src/v8/tools/testrunner/local/pool.py b/src/v8/tools/testrunner/local/pool.py
index 9199b62..e0b0ec4 100644
--- a/src/v8/tools/testrunner/local/pool.py
+++ b/src/v8/tools/testrunner/local/pool.py
@@ -3,43 +3,53 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-from Queue import Empty
-from multiprocessing import Event, Process, Queue
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from contextlib import contextmanager
+from multiprocessing import Process, Queue
+import os
+import signal
+import time
 import traceback
 
+try:
+  from queue import Empty  # Python 3
+except ImportError:
+  from Queue import Empty  # Python 2
+
+from . import command
+
 
 def setup_testing():
   """For testing only: Use threading under the hood instead of multiprocessing
   to make coverage work.
   """
   global Queue
-  global Event
   global Process
   del Queue
-  del Event
   del Process
-  from Queue import Queue
-  from threading import Event
+  try:
+    from queue import Queue  # Python 3
+  except ImportError:
+    from Queue import Queue  # Python 2
+
   from threading import Thread as Process
+  # Monkeypatch threading Queue to look like multiprocessing Queue.
+  Queue.cancel_join_thread = lambda self: None
+  # Monkeypatch os.kill and add fake pid property on Thread.
+  os.kill = lambda *args: None
+  Process.pid = property(lambda self: None)
 
 
 class NormalResult():
   def __init__(self, result):
     self.result = result
-    self.exception = False
-    self.break_now = False
-
+    self.exception = None
 
 class ExceptionResult():
-  def __init__(self):
-    self.exception = True
-    self.break_now = False
-
-
-class BreakResult():
-  def __init__(self):
-    self.exception = False
-    self.break_now = True
+  def __init__(self, exception):
+    self.exception = exception
 
 
 class MaybeResult():
@@ -56,26 +66,43 @@
     return MaybeResult(False, value)
 
 
-def Worker(fn, work_queue, done_queue, done,
+def Worker(fn, work_queue, done_queue,
            process_context_fn=None, process_context_args=None):
   """Worker to be run in a child process.
-  The worker stops on two conditions. 1. When the poison pill "STOP" is
-  reached or 2. when the event "done" is set."""
+  The worker stops when the poison pill "STOP" is reached.
+  """
   try:
     kwargs = {}
     if process_context_fn and process_context_args is not None:
       kwargs.update(process_context=process_context_fn(*process_context_args))
     for args in iter(work_queue.get, "STOP"):
-      if done.is_set():
-        break
       try:
         done_queue.put(NormalResult(fn(*args, **kwargs)))
-      except Exception, e:
+      except command.AbortException:
+        # SIGINT, SIGTERM or internal hard timeout.
+        break
+      except Exception as e:
         traceback.print_exc()
         print(">>> EXCEPTION: %s" % e)
-        done_queue.put(ExceptionResult())
+        done_queue.put(ExceptionResult(e))
+    # When we reach here on normal tear down, all items have been pulled from
+    # the done_queue before and this should have no effect. On fast abort, it's
+    # possible that a fast worker left items on the done_queue in memory, which
+    # will never be pulled. This call purges those to avoid a deadlock.
+    done_queue.cancel_join_thread()
   except KeyboardInterrupt:
-    done_queue.put(BreakResult())
+    assert False, 'Unreachable'
+
+
+@contextmanager
+def without_sig():
+  int_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
+  term_handler = signal.signal(signal.SIGTERM, signal.SIG_IGN)
+  try:
+    yield
+  finally:
+    signal.signal(signal.SIGINT, int_handler)
+    signal.signal(signal.SIGTERM, term_handler)
 
 
 class Pool():
@@ -88,24 +115,28 @@
   # Necessary to not overflow the queue's pipe if a keyboard interrupt happens.
   BUFFER_FACTOR = 4
 
-  def __init__(self, num_workers, heartbeat_timeout=30):
+  def __init__(self, num_workers, heartbeat_timeout=1):
     self.num_workers = num_workers
     self.processes = []
     self.terminated = False
+    self.abort_now = False
 
-    # Invariant: count >= #work_queue + #done_queue. It is greater when a
-    # worker takes an item from the work_queue and before the result is
+    # Invariant: processing_count >= #work_queue + #done_queue. It is greater
+    # when a worker takes an item from the work_queue and before the result is
     # submitted to the done_queue. It is equal when no worker is working,
     # e.g. when all workers have finished, and when no results are processed.
     # Count is only accessed by the parent process. Only the parent process is
     # allowed to remove items from the done_queue and to add items to the
     # work_queue.
-    self.count = 0
-    self.work_queue = Queue()
-    self.done_queue = Queue()
-    self.done = Event()
+    self.processing_count = 0
     self.heartbeat_timeout = heartbeat_timeout
 
+    # Disable sigint and sigterm to prevent subprocesses from capturing the
+    # signals.
+    with without_sig():
+      self.work_queue = Queue()
+      self.done_queue = Queue()
+
   def imap_unordered(self, fn, gen,
                      process_context_fn=None, process_context_args=None):
     """Maps function "fn" to items in generator "gen" on the worker processes
@@ -123,58 +154,63 @@
           process_context_fn. All arguments will be pickled and sent beyond the
           process boundary.
     """
+    if self.terminated:
+      return
     try:
       internal_error = False
       gen = iter(gen)
       self.advance = self._advance_more
 
-      for w in xrange(self.num_workers):
-        p = Process(target=Worker, args=(fn,
-                                         self.work_queue,
-                                         self.done_queue,
-                                         self.done,
-                                         process_context_fn,
-                                         process_context_args))
-        p.start()
-        self.processes.append(p)
+      # Disable sigint and sigterm to prevent subprocesses from capturing the
+      # signals.
+      with without_sig():
+        for w in range(self.num_workers):
+          p = Process(target=Worker, args=(fn,
+                                          self.work_queue,
+                                          self.done_queue,
+                                          process_context_fn,
+                                          process_context_args))
+          p.start()
+          self.processes.append(p)
 
       self.advance(gen)
-      while self.count > 0:
+      while self.processing_count > 0:
         while True:
           try:
-            result = self.done_queue.get(timeout=self.heartbeat_timeout)
-            break
-          except Empty:
-            # Indicate a heartbeat. The iterator will continue fetching the
-            # next result.
-            yield MaybeResult.create_heartbeat()
-        self.count -= 1
-        if result.exception:
-          # TODO(machenbach): Handle a few known types of internal errors
-          # gracefully, e.g. missing test files.
-          internal_error = True
-          continue
-        elif result.break_now:
-          # A keyboard interrupt happened in one of the worker processes.
-          raise KeyboardInterrupt
-        else:
-          yield MaybeResult.create_result(result.result)
+            # Read from result queue in a responsive fashion. If available,
+            # this will return a normal result immediately or a heartbeat on
+            # heartbeat timeout (default 1 second).
+            result = self._get_result_from_queue()
+          except:
+            # TODO(machenbach): Handle a few known types of internal errors
+            # gracefully, e.g. missing test files.
+            internal_error = True
+            continue
+
+          if self.abort_now:
+            # SIGINT, SIGTERM or internal hard timeout.
+            return
+
+          yield result
+          break
+
         self.advance(gen)
     except KeyboardInterrupt:
-      raise
+      assert False, 'Unreachable'
     except Exception as e:
       traceback.print_exc()
       print(">>> EXCEPTION: %s" % e)
     finally:
-      self.terminate()
+      self._terminate()
+
     if internal_error:
       raise Exception("Internal error in a worker process.")
 
   def _advance_more(self, gen):
-    while self.count < self.num_workers * self.BUFFER_FACTOR:
+    while self.processing_count < self.num_workers * self.BUFFER_FACTOR:
       try:
-        self.work_queue.put(gen.next())
-        self.count += 1
+        self.work_queue.put(next(gen))
+        self.processing_count += 1
       except StopIteration:
         self.advance = self._advance_empty
         break
@@ -185,27 +221,51 @@
   def add(self, args):
     """Adds an item to the work queue. Can be called dynamically while
     processing the results from imap_unordered."""
-    self.work_queue.put(args)
-    self.count += 1
+    assert not self.terminated
 
-  def terminate(self):
+    self.work_queue.put(args)
+    self.processing_count += 1
+
+  def abort(self):
+    """Schedules abort on next queue read.
+
+    This is safe to call when handling SIGINT, SIGTERM or when an internal
+    hard timeout is reached.
+    """
+    self.abort_now = True
+
+  def _terminate(self):
+    """Terminates execution and cleans up the queues.
+
+    If abort() was called before termination, this also terminates the
+    subprocesses and doesn't wait for ongoing tests.
+    """
     if self.terminated:
       return
     self.terminated = True
 
-    # For exceptional tear down set the "done" event to stop the workers before
-    # they empty the queue buffer.
-    self.done.set()
+    # Drain out work queue from tests
+    try:
+      while True:
+        self.work_queue.get(True, 0.1)
+    except Empty:
+      pass
 
-    for p in self.processes:
+    # Make sure all processes stop
+    for _ in self.processes:
       # During normal tear down the workers block on get(). Feed a poison pill
       # per worker to make them stop.
       self.work_queue.put("STOP")
 
+    if self.abort_now:
+      for p in self.processes:
+        os.kill(p.pid, signal.SIGTERM)
+
     for p in self.processes:
       p.join()
 
-    # Drain the queues to prevent failures when queues are garbage collected.
+    # Drain the queues to prevent stderr chatter when queues are garbage
+    # collected.
     try:
       while True: self.work_queue.get(False)
     except:
@@ -214,3 +274,22 @@
       while True: self.done_queue.get(False)
     except:
       pass
+
+  def _get_result_from_queue(self):
+    """Attempts to get the next result from the queue.
+
+    Returns: A wrapped result if one was available within heartbeat timeout,
+        a heartbeat result otherwise.
+    Raises:
+        Exception: If an exception occured when processing the task on the
+            worker side, it is reraised here.
+    """
+    while True:
+      try:
+        result = self.done_queue.get(timeout=self.heartbeat_timeout)
+        self.processing_count -= 1
+        if result.exception:
+          raise result.exception
+        return MaybeResult.create_result(result.result)
+      except Empty:
+        return MaybeResult.create_heartbeat()
diff --git a/src/v8/tools/testrunner/local/pool_unittest.py b/src/v8/tools/testrunner/local/pool_unittest.py
old mode 100644
new mode 100755
index 235eca6..240cd56
--- a/src/v8/tools/testrunner/local/pool_unittest.py
+++ b/src/v8/tools/testrunner/local/pool_unittest.py
@@ -3,9 +3,16 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import os
+import sys
 import unittest
 
-from pool import Pool
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.local.pool import Pool
 
 def Run(x):
   if x == 10:
@@ -17,6 +24,9 @@
     results = set()
     pool = Pool(3)
     for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
+      if result.heartbeat:
+        # Any result can be a heartbeat due to timings.
+        continue
       results.add(result.value)
     self.assertEquals(set(range(0, 10)), results)
 
@@ -25,6 +35,9 @@
     pool = Pool(3)
     with self.assertRaises(Exception):
       for result in pool.imap_unordered(Run, [[x] for x in range(0, 12)]):
+        if result.heartbeat:
+          # Any result can be a heartbeat due to timings.
+          continue
         # Item 10 will not appear in results due to an internal exception.
         results.add(result.value)
     expect = set(range(0, 12))
@@ -35,8 +48,15 @@
     results = set()
     pool = Pool(3)
     for result in pool.imap_unordered(Run, [[x] for x in range(0, 10)]):
+      if result.heartbeat:
+        # Any result can be a heartbeat due to timings.
+        continue
       results.add(result.value)
       if result.value < 30:
         pool.add([result.value + 20])
     self.assertEquals(set(range(0, 10) + range(20, 30) + range(40, 50)),
                       results)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/src/v8/tools/testrunner/local/statusfile.py b/src/v8/tools/testrunner/local/statusfile.py
index 988750d..e477832 100644
--- a/src/v8/tools/testrunner/local/statusfile.py
+++ b/src/v8/tools/testrunner/local/statusfile.py
@@ -25,6 +25,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import os
 import re
 
@@ -34,8 +37,8 @@
 # Possible outcomes
 FAIL = "FAIL"
 PASS = "PASS"
-TIMEOUT = "TIMEOUT" # TODO(majeski): unused in status files
-CRASH = "CRASH" # TODO(majeski): unused in status files
+TIMEOUT = "TIMEOUT"
+CRASH = "CRASH"
 
 # Outcomes only for status file, need special handling
 FAIL_OK = "FAIL_OK"
@@ -45,21 +48,22 @@
 SKIP = "SKIP"
 SLOW = "SLOW"
 NO_VARIANTS = "NO_VARIANTS"
+FAIL_PHASE_ONLY = "FAIL_PHASE_ONLY"
 
 ALWAYS = "ALWAYS"
 
 KEYWORDS = {}
 for key in [SKIP, FAIL, PASS, CRASH, SLOW, FAIL_OK, NO_VARIANTS, FAIL_SLOPPY,
-            ALWAYS]:
+            ALWAYS, FAIL_PHASE_ONLY]:
   KEYWORDS[key] = key
 
 # Support arches, modes to be written as keywords instead of strings.
 VARIABLES = {ALWAYS: True}
-for var in ["debug", "release", "big", "little",
+for var in ["debug", "release", "big", "little", "android",
             "android_arm", "android_arm64", "android_ia32", "android_x64",
             "arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
             "x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
-            "linux", "aix"]:
+            "linux", "aix", "r1", "r2", "r3", "r5", "r6"]:
   VARIABLES[var] = var
 
 # Allow using variants as keywords.
@@ -132,7 +136,7 @@
             variant_desc = 'variant independent'
           else:
             variant_desc = 'variant: %s' % variant
-          print 'Unused rule: %s -> %s (%s)' % (rule, value, variant_desc)
+          print('Unused rule: %s -> %s (%s)' % (rule, value, variant_desc))
 
 
 def _JoinsPassAndFail(outcomes1, outcomes2):
@@ -328,5 +332,5 @@
                   "missing file for %s test %s" % (basename, rule))
     return status["success"]
   except Exception as e:
-    print e
+    print(e)
     return False
diff --git a/src/v8/tools/testrunner/local/statusfile_unittest.py b/src/v8/tools/testrunner/local/statusfile_unittest.py
index 299e332..e8d5ff9 100755
--- a/src/v8/tools/testrunner/local/statusfile_unittest.py
+++ b/src/v8/tools/testrunner/local/statusfile_unittest.py
@@ -3,6 +3,7 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+
 import unittest
 
 import statusfile
diff --git a/src/v8/tools/testrunner/local/testsuite.py b/src/v8/tools/testrunner/local/testsuite.py
index 6a9e983..864d734 100644
--- a/src/v8/tools/testrunner/local/testsuite.py
+++ b/src/v8/tools/testrunner/local/testsuite.py
@@ -28,38 +28,20 @@
 
 import fnmatch
 import imp
+import itertools
 import os
+from contextlib import contextmanager
 
 from . import command
 from . import statusfile
 from . import utils
 from ..objects.testcase import TestCase
-from variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
+from .variants import ALL_VARIANTS, ALL_VARIANT_FLAGS
 
 
 STANDARD_VARIANT = set(["default"])
 
 
-class LegacyVariantsGenerator(object):
-  def __init__(self, suite, variants):
-    self.suite = suite
-    self.all_variants = ALL_VARIANTS & variants
-    self.standard_variant = STANDARD_VARIANT & variants
-
-  def FilterVariantsByTest(self, test):
-    if test.only_standard_variant:
-      return self.standard_variant
-    return self.all_variants
-
-  def GetFlagSets(self, test, variant):
-    return ALL_VARIANT_FLAGS[variant]
-
-
-class StandardLegacyVariantsGenerator(LegacyVariantsGenerator):
-  def FilterVariantsByTest(self, testcase):
-    return self.standard_variant
-
-
 class VariantsGenerator(object):
   def __init__(self, variants):
     self._all_variants = [v for v in variants if v in ALL_VARIANTS]
@@ -80,45 +62,235 @@
     return self._all_variants
 
 
+class TestCombiner(object):
+  def get_group_key(self, test):
+    """To indicate what tests can be combined with each other we define a group
+    key for each test. Tests with the same group key can be combined. Test
+    without a group key (None) is not combinable with any other test.
+    """
+    raise NotImplementedError()
+
+  def combine(self, name, tests):
+    """Returns test combined from `tests`. Since we identify tests by their
+    suite and name, `name` parameter should be unique within one suite.
+    """
+    return self._combined_test_class()(name, tests)
+
+  def _combined_test_class(self):
+    raise NotImplementedError()
+
+
+class TestLoader(object):
+  """Base class for loading TestSuite tests after applying test suite
+  transformations."""
+
+  def __init__(self, suite, test_class, test_config, test_root):
+    self.suite = suite
+    self.test_class = test_class
+    self.test_config = test_config
+    self.test_root = test_root
+    self.test_count_estimation = len(list(self._list_test_filenames()))
+
+  def _list_test_filenames(self):
+    """Implemented by the subclassed TestLoaders to list filenames.
+
+    Filenames are expected to be sorted and are deterministic."""
+    raise NotImplementedError
+
+  def _should_filter_by_name(self, name):
+    return False
+
+  def _should_filter_by_test(self, test):
+    return False
+
+  def _filename_to_testname(self, filename):
+    """Hook for subclasses to write their own filename transformation
+    logic before the test creation."""
+    return filename
+
+  # TODO: not needed for every TestLoader, extract it into a subclass.
+  def _path_to_name(self, path):
+    if utils.IsWindows():
+      return path.replace(os.path.sep, "/")
+
+    return path
+
+  def _create_test(self, path, suite, **kwargs):
+    """Converts paths into test objects using the given options"""
+    return self.test_class(
+      suite, path, self._path_to_name(path), self.test_config, **kwargs)
+
+  def list_tests(self):
+    """Loads and returns the test objects for a TestSuite"""
+    # TODO: detect duplicate tests.
+    for filename in self._list_test_filenames():
+      if self._should_filter_by_name(filename):
+        continue
+
+      testname = self._filename_to_testname(filename)
+      case = self._create_test(testname, self.suite)
+      if self._should_filter_by_test(case):
+        continue
+
+      yield case
+
+
+class GenericTestLoader(TestLoader):
+  """Generic TestLoader implementing the logic for listing filenames"""
+  @property
+  def excluded_files(self):
+    return set()
+
+  @property
+  def excluded_dirs(self):
+    return set()
+
+  @property
+  def excluded_suffixes(self):
+    return set()
+
+  @property
+  def test_dirs(self):
+    return [self.test_root]
+
+  @property
+  def extensions(self):
+    return []
+
+  def __find_extension(self, filename):
+    for extension in self.extensions:
+      if filename.endswith(extension):
+        return extension
+
+    return False
+
+  def _should_filter_by_name(self, filename):
+    if not self.__find_extension(filename):
+      return True
+
+    for suffix in self.excluded_suffixes:
+      if filename.endswith(suffix):
+        return True
+
+    if os.path.basename(filename) in self.excluded_files:
+      return True
+
+    return False
+
+  def _filename_to_testname(self, filename):
+    extension = self.__find_extension(filename)
+    if not extension:
+      return filename
+
+    return filename[:-len(extension)]
+
+  def _to_relpath(self, abspath, test_root):
+    return os.path.relpath(abspath, test_root)
+
+  def _list_test_filenames(self):
+    for test_dir in sorted(self.test_dirs):
+      test_root = os.path.join(self.test_root, test_dir)
+      for dirname, dirs, files in os.walk(test_root, followlinks=True):
+        dirs.sort()
+        for dir in dirs:
+          if dir in self.excluded_dirs or dir.startswith('.'):
+            dirs.remove(dir)
+
+        files.sort()
+        for filename in files:
+          abspath = os.path.join(dirname, filename)
+
+          yield self._to_relpath(abspath, test_root)
+
+
+class JSTestLoader(GenericTestLoader):
+  @property
+  def extensions(self):
+    return [".js", ".mjs"]
+
+
+class TestGenerator(object):
+  def __init__(self, test_count_estimate, slow_tests, fast_tests):
+    self.test_count_estimate = test_count_estimate
+    self.slow_tests = slow_tests
+    self.fast_tests = fast_tests
+    self._rebuild_iterator()
+
+  def _rebuild_iterator(self):
+    self._iterator = itertools.chain(self.slow_tests, self.fast_tests)
+
+  def __iter__(self):
+    return self
+
+  def __next__(self):
+    return self.next()
+
+  def next(self):
+    return next(self._iterator)
+
+  def merge(self, test_generator):
+    self.test_count_estimate += test_generator.test_count_estimate
+    self.slow_tests = itertools.chain(
+      self.slow_tests, test_generator.slow_tests)
+    self.fast_tests = itertools.chain(
+      self.fast_tests, test_generator.fast_tests)
+    self._rebuild_iterator()
+
+
+@contextmanager
+def _load_testsuite_module(name, root):
+  f = None
+  try:
+    (f, pathname, description) = imp.find_module("testcfg", [root])
+    yield imp.load_module(name + "_testcfg", f, pathname, description)
+  finally:
+    if f:
+      f.close()
+
 class TestSuite(object):
   @staticmethod
-  def LoadTestSuite(root):
+  def Load(root, test_config, framework_name):
     name = root.split(os.path.sep)[-1]
-    f = None
-    try:
-      (f, pathname, description) = imp.find_module("testcfg", [root])
-      module = imp.load_module(name + "_testcfg", f, pathname, description)
-      return module.GetSuite(name, root)
-    finally:
-      if f:
-        f.close()
+    with _load_testsuite_module(name, root) as module:
+      return module.GetSuite(name, root, test_config, framework_name)
 
-  def __init__(self, name, root):
-    # Note: This might be called concurrently from different processes.
+  def __init__(self, name, root, test_config, framework_name):
     self.name = name  # string
     self.root = root  # string containing path
+    self.test_config = test_config
+    self.framework_name = framework_name  # name of the test runner impl
     self.tests = None  # list of TestCase objects
     self.statusfile = None
 
+    self._test_loader = self._test_loader_class()(
+      self, self._test_class(), self.test_config, self.root)
+
   def status_file(self):
     return "%s/%s.status" % (self.root, self.name)
 
-  def ListTests(self, context):
+  @property
+  def _test_loader_class(self):
     raise NotImplementedError
 
-  def _LegacyVariantsGeneratorFactory(self):
-    """The variant generator class to be used."""
-    return LegacyVariantsGenerator
+  def ListTests(self):
+    return self._test_loader.list_tests()
 
-  def CreateLegacyVariantsGenerator(self, variants):
-    """Return a generator for the testing variants of this suite.
+  def __initialize_test_count_estimation(self):
+    # Retrieves a single test to initialize the test generator.
+    next(iter(self.ListTests()), None)
 
-    Args:
-      variants: List of variant names to be run as specified by the test
-                runner.
-    Returns: An object of type LegacyVariantsGenerator.
-    """
-    return self._LegacyVariantsGeneratorFactory()(self, set(variants))
+  def __calculate_test_count(self):
+    self.__initialize_test_count_estimation()
+    return self._test_loader.test_count_estimation
+
+  def load_tests_from_disk(self, statusfile_variables):
+    self.statusfile = statusfile.StatusFile(
+      self.status_file(), statusfile_variables)
+
+    test_count = self.__calculate_test_count()
+    slow_tests = (test for test in self.ListTests() if test.is_slow)
+    fast_tests = (test for test in self.ListTests() if not test.is_slow)
+    return TestGenerator(test_count, slow_tests, fast_tests)
 
   def get_variants_gen(self, variants):
     return self._variants_gen_class()(variants)
@@ -126,83 +298,20 @@
   def _variants_gen_class(self):
     return VariantsGenerator
 
-  def ReadStatusFile(self, variables):
-    self.statusfile = statusfile.StatusFile(self.status_file(), variables)
+  def test_combiner_available(self):
+    return bool(self._test_combiner_class())
 
-  def ReadTestCases(self, context):
-    self.tests = self.ListTests(context)
+  def get_test_combiner(self):
+    cls = self._test_combiner_class()
+    if cls:
+      return cls()
+    return None
 
-
-  def FilterTestCasesByStatus(self,
-                              slow_tests_mode=None,
-                              pass_fail_tests_mode=None):
-    """Filters tests by outcomes from status file.
-
-    Status file has to be loaded before using this function.
-
-    Args:
-      slow_tests_mode: What to do with slow tests.
-      pass_fail_tests_mode: What to do with pass or fail tests.
-
-    Mode options:
-      None (default) - don't skip
-      "skip" - skip if slow/pass_fail
-      "run" - skip if not slow/pass_fail
+  def _test_combiner_class(self):
+    """Returns Combiner subclass. None if suite doesn't support combining
+    tests.
     """
-    def _skip_slow(is_slow, mode):
-      return (
-        (mode == 'run' and not is_slow) or
-        (mode == 'skip' and is_slow))
-
-    def _skip_pass_fail(pass_fail, mode):
-      return (
-        (mode == 'run' and not pass_fail) or
-        (mode == 'skip' and pass_fail))
-
-    def _compliant(test):
-      if test.do_skip:
-        return False
-      if _skip_slow(test.is_slow, slow_tests_mode):
-        return False
-      if _skip_pass_fail(test.is_pass_or_fail, pass_fail_tests_mode):
-        return False
-      return True
-
-    self.tests = filter(_compliant, self.tests)
-
-  def FilterTestCasesByArgs(self, args):
-    """Filter test cases based on command-line arguments.
-
-    args can be a glob: asterisks in any position of the argument
-    represent zero or more characters. Without asterisks, only exact matches
-    will be used with the exeption of the test-suite name as argument.
-    """
-    filtered = []
-    globs = []
-    for a in args:
-      argpath = a.split('/')
-      if argpath[0] != self.name:
-        continue
-      if len(argpath) == 1 or (len(argpath) == 2 and argpath[1] == '*'):
-        return  # Don't filter, run all tests in this suite.
-      path = '/'.join(argpath[1:])
-      globs.append(path)
-
-    for t in self.tests:
-      for g in globs:
-        if fnmatch.fnmatch(t.path, g):
-          filtered.append(t)
-          break
-    self.tests = filtered
-
-  def _create_test(self, path, **kwargs):
-    test = self._test_class()(self, path, self._path_to_name(path), **kwargs)
-    return test
+    return None
 
   def _test_class(self):
     raise NotImplementedError
-
-  def _path_to_name(self, path):
-    if utils.IsWindows():
-      return path.replace("\\", "/")
-    return path
diff --git a/src/v8/tools/testrunner/local/testsuite_unittest.py b/src/v8/tools/testrunner/local/testsuite_unittest.py
index efefe4c..b74fef1 100755
--- a/src/v8/tools/testrunner/local/testsuite_unittest.py
+++ b/src/v8/tools/testrunner/local/testsuite_unittest.py
@@ -3,8 +3,10 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import itertools
 import os
 import sys
+import tempfile
 import unittest
 
 # Needed because the test runner contains relative imports.
@@ -12,109 +14,72 @@
     os.path.abspath(__file__))))
 sys.path.append(TOOLS_PATH)
 
-from testrunner.local.testsuite import TestSuite
+from testrunner.local.testsuite import TestSuite, TestGenerator
 from testrunner.objects.testcase import TestCase
+from testrunner.test_config import TestConfig
 
 
 class TestSuiteTest(unittest.TestCase):
-  def test_filter_testcases_by_status_first_pass(self):
-    suite = TestSuite('foo', 'bar')
-    suite.rules = {
-      '': {
-        'foo/bar': set(['PASS', 'SKIP']),
-        'baz/bar': set(['PASS', 'FAIL']),
-      },
-    }
-    suite.prefix_rules = {
-      '': {
-        'baz/': set(['PASS', 'SLOW']),
-      },
-    }
-    suite.tests = [
-      TestCase(suite, 'foo/bar', 'foo/bar'),
-      TestCase(suite, 'baz/bar', 'baz/bar'),
-    ]
-    suite.FilterTestCasesByStatus()
-    self.assertEquals(
-        [TestCase(suite, 'baz/bar', 'baz/bar')],
-        suite.tests,
-    )
-    outcomes = suite.GetStatusFileOutcomes(suite.tests[0].name,
-                                           suite.tests[0].variant)
-    self.assertEquals(set(['PASS', 'FAIL', 'SLOW']), outcomes)
-
-  def test_filter_testcases_by_status_second_pass(self):
-    suite = TestSuite('foo', 'bar')
-
-    suite.rules = {
-      '': {
-        'foo/bar': set(['PREV']),
-      },
-      'default': {
-        'foo/bar': set(['PASS', 'SKIP']),
-        'baz/bar': set(['PASS', 'FAIL']),
-      },
-      'stress': {
-        'baz/bar': set(['SKIP']),
-      },
-    }
-    suite.prefix_rules = {
-      '': {
-        'baz/': set(['PREV']),
-      },
-      'default': {
-        'baz/': set(['PASS', 'SLOW']),
-      },
-      'stress': {
-        'foo/': set(['PASS', 'SLOW']),
-      },
-    }
-
-    test1 = TestCase(suite, 'foo/bar', 'foo/bar')
-    test2 = TestCase(suite, 'baz/bar', 'baz/bar')
-    suite.tests = [
-      test1.create_variant(variant='default', flags=[]),
-      test1.create_variant(variant='stress', flags=['-v']),
-      test2.create_variant(variant='default', flags=[]),
-      test2.create_variant(variant='stress', flags=['-v']),
-    ]
-
-    suite.FilterTestCasesByStatus()
-    self.assertEquals(
-        [
-          TestCase(suite, 'foo/bar', 'foo/bar').create_variant(None, ['-v']),
-          TestCase(suite, 'baz/bar', 'baz/bar'),
-        ],
-        suite.tests,
+  def setUp(self):
+    test_dir = os.path.dirname(__file__)
+    self.test_root = os.path.join(test_dir, "fake_testsuite")
+    self.test_config = TestConfig(
+        command_prefix=[],
+        extra_flags=[],
+        isolates=False,
+        mode_flags=[],
+        no_harness=False,
+        noi18n=False,
+        random_seed=0,
+        run_skipped=False,
+        shell_dir='fake_testsuite/fake_d8',
+        timeout=10,
+        verbose=False,
     )
 
-    self.assertEquals(
-        set(['PREV', 'PASS', 'SLOW']),
-        suite.GetStatusFileOutcomes(suite.tests[0].name,
-                                    suite.tests[0].variant),
-    )
-    self.assertEquals(
-        set(['PREV', 'PASS', 'FAIL', 'SLOW']),
-        suite.GetStatusFileOutcomes(suite.tests[1].name,
-                                    suite.tests[1].variant),
-    )
+    self.suite = TestSuite.Load(
+        self.test_root, self.test_config, "standard_runner")
 
-  def test_fail_ok_outcome(self):
-    suite = TestSuite('foo', 'bar')
-    suite.rules = {
-      '': {
-        'foo/bar': set(['FAIL_OK']),
-        'baz/bar': set(['FAIL']),
-      },
-    }
-    suite.prefix_rules = {}
-    suite.tests = [
-      TestCase(suite, 'foo/bar', 'foo/bar'),
-      TestCase(suite, 'baz/bar', 'baz/bar'),
-    ]
+  def testLoadingTestSuites(self):
+    self.assertEquals(self.suite.name, "fake_testsuite")
+    self.assertEquals(self.suite.test_config, self.test_config)
 
-    for t in suite.tests:
-      self.assertEquals(['FAIL'], t.expected_outcomes)
+    # Verify that the components of the TestSuite aren't loaded yet.
+    self.assertIsNone(self.suite.tests)
+    self.assertIsNone(self.suite.statusfile)
+
+  def testLoadingTestsFromDisk(self):
+    tests = self.suite.load_tests_from_disk(
+      statusfile_variables={})
+    def is_generator(iterator):
+      return iterator == iter(iterator)
+
+    self.assertTrue(is_generator(tests))
+    self.assertEquals(tests.test_count_estimate, 2)
+
+    slow_tests, fast_tests = list(tests.slow_tests), list(tests.fast_tests)
+    # Verify that the components of the TestSuite are loaded.
+    self.assertTrue(len(slow_tests) == len(fast_tests) == 1)
+    self.assertTrue(all(test.is_slow for test in slow_tests))
+    self.assertFalse(any(test.is_slow for test in fast_tests))
+    self.assertIsNotNone(self.suite.statusfile)
+
+  def testMergingTestGenerators(self):
+    tests = self.suite.load_tests_from_disk(
+      statusfile_variables={})
+    more_tests = self.suite.load_tests_from_disk(
+      statusfile_variables={})
+
+    # Merge the test generators
+    tests.merge(more_tests)
+    self.assertEquals(tests.test_count_estimate, 4)
+
+    # Check the tests are sorted by speed
+    test_speeds = []
+    for test in tests:
+      test_speeds.append(test.is_slow)
+
+    self.assertEquals(test_speeds, [True, True, False, False])
 
 
 if __name__ == '__main__':
diff --git a/src/v8/tools/testrunner/local/utils.py b/src/v8/tools/testrunner/local/utils.py
index bf8c3d9..9128c43 100644
--- a/src/v8/tools/testrunner/local/utils.py
+++ b/src/v8/tools/testrunner/local/utils.py
@@ -25,6 +25,8 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
 
 from os.path import exists
 from os.path import isdir
@@ -36,6 +38,21 @@
 import urllib2
 
 
+### Exit codes and their meaning.
+# Normal execution.
+EXIT_CODE_PASS = 0
+# Execution with test failures.
+EXIT_CODE_FAILURES = 1
+# Execution with no tests executed.
+EXIT_CODE_NO_TESTS = 2
+# Execution aborted with SIGINT (Ctrl-C).
+EXIT_CODE_INTERRUPTED = 3
+# Execution aborted with SIGTERM.
+EXIT_CODE_TERMINATED = 4
+# Internal error.
+EXIT_CODE_INTERNAL_ERROR = 5
+
+
 def GetSuitePaths(test_root):
   return [ f for f in os.listdir(test_root) if isdir(join(test_root, f)) ]
 
@@ -132,7 +149,7 @@
       return
     except:
       # If there's no curl, fall back to urlopen.
-      print "Curl is currently not installed. Falling back to python."
+      print("Curl is currently not installed. Falling back to python.")
       pass
   with open(destination, 'w') as f:
     f.write(urllib2.urlopen(source).read())
diff --git a/src/v8/tools/testrunner/local/variants.py b/src/v8/tools/testrunner/local/variants.py
index f1e9ad3..4b0cf15 100644
--- a/src/v8/tools/testrunner/local/variants.py
+++ b/src/v8/tools/testrunner/local/variants.py
@@ -4,26 +4,57 @@
 
 # Use this to run several variants of the tests.
 ALL_VARIANT_FLAGS = {
+  "assert_types": [["--assert-types"]],
   "code_serializer": [["--cache=code"]],
   "default": [[]],
   "future": [["--future"]],
+  "gc_stats": [["--gc-stats=1"]],
   # Alias of exhaustive variants, but triggering new test framework features.
   "infra_staging": [[]],
-  "liftoff": [["--liftoff"]],
+  "interpreted_regexp": [["--regexp-interpret-all"]],
+  "jitless": [["--jitless"]],
   "minor_mc": [["--minor-mc"]],
   # No optimization means disable all optimizations. OptimizeFunctionOnNextCall
   # would not force optimization too. It turns into a Nop. Please see
   # https://chromium-review.googlesource.com/c/452620/ for more discussion.
-  "nooptimization": [["--noopt"]],
+  # For WebAssembly, we test "Liftoff-only" in the nooptimization variant and
+  # "TurboFan-only" in the stress variant. The WebAssembly configuration is
+  # independent of JS optimizations, so we can combine those configs.
+  "nooptimization": [["--no-opt", "--liftoff", "--no-wasm-tier-up"]],
   "slow_path": [["--force-slow-path"]],
-  "stress": [["--stress-opt", "--always-opt"]],
-  "stress_background_compile": [["--background-compile", "--stress-background-compile"]],
+  "stress": [["--stress-opt", "--always-opt", "--no-liftoff",
+              "--no-wasm-tier-up"]],
+  "stress_js_bg_compile_wasm_code_gc": [["--stress-background-compile",
+                                         "--wasm-code-gc",
+                                         "--stress-wasm-code-gc"]],
   "stress_incremental_marking":  [["--stress-incremental-marking"]],
   # Trigger stress sampling allocation profiler with sample interval = 2^14
   "stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
   "trusted": [["--no-untrusted-code-mitigations"]],
-  "wasm_traps": [["--wasm_trap_handler", "--invoke-weak-callbacks", "--wasm-jit-to-native"]],
-  "wasm_no_native": [["--no-wasm-jit-to-native"]],
+  "no_wasm_traps": [["--no-wasm-trap-handler"]],
 }
 
-ALL_VARIANTS = set(ALL_VARIANT_FLAGS.keys())
+SLOW_VARIANTS = set([
+  'stress',
+  'nooptimization',
+])
+
+FAST_VARIANTS = set([
+  'default'
+])
+
+
+def _variant_order_key(v):
+  if v in SLOW_VARIANTS:
+    return 0
+  if v in FAST_VARIANTS:
+    return 100
+  return 50
+
+ALL_VARIANTS = sorted(ALL_VARIANT_FLAGS.keys(),
+                      key=_variant_order_key)
+
+# Check {SLOW,FAST}_VARIANTS entries
+for variants in [SLOW_VARIANTS, FAST_VARIANTS]:
+  for v in variants:
+    assert v in ALL_VARIANT_FLAGS
diff --git a/src/v8/tools/testrunner/local/verbose.py b/src/v8/tools/testrunner/local/verbose.py
index 49e8085..8569368 100644
--- a/src/v8/tools/testrunner/local/verbose.py
+++ b/src/v8/tools/testrunner/local/verbose.py
@@ -25,6 +25,8 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+# for py2/py3 compatibility
+from __future__ import print_function
 
 import sys
 import time
@@ -63,7 +65,7 @@
     else:
       assert False # Unreachable # TODO: check this in outcomes parsing phase.
 
-  print REPORT_TEMPLATE % {
+  print(REPORT_TEMPLATE % {
     "total": total,
     "skipped": skipped,
     "nocrash": nocrash,
@@ -71,17 +73,17 @@
     "fail_ok": fail_ok,
     "fail": fail,
     "crash": crash,
-  }
+  })
 
 
 def PrintTestSource(tests):
   for test in tests:
-    print "--- begin source: %s ---" % test
+    print("--- begin source: %s ---" % test)
     if test.is_source_available():
-      print test.get_source()
+      print(test.get_source())
     else:
-      print '(no source available)'
-    print "--- end source: %s ---" % test
+      print('(no source available)')
+    print("--- end source: %s ---" % test)
 
 
 def FormatTime(d):
@@ -92,11 +94,11 @@
 def PrintTestDurations(suites, outputs, overall_time):
     # Write the times to stderr to make it easy to separate from the
     # test output.
-    print
+    print()
     sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time))
     timed_tests = [(t, outputs[t].duration) for s in suites for t in s.tests
                    if t in outputs]
-    timed_tests.sort(key=lambda (_, duration): duration, reverse=True)
+    timed_tests.sort(key=lambda test_duration: test_duration[1], reverse=True)
     index = 1
     for test, duration in timed_tests[:20]:
       t = FormatTime(duration)
diff --git a/src/v8/tools/testrunner/num_fuzzer.py b/src/v8/tools/testrunner/num_fuzzer.py
new file mode 100755
index 0000000..d4e92a6
--- /dev/null
+++ b/src/v8/tools/testrunner/num_fuzzer.py
@@ -0,0 +1,226 @@
+#!/usr/bin/env python
+#
+# Copyright 2017 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import random
+import sys
+
+# Adds testrunner to the path hence it has to be imported at the beggining.
+import base_runner
+
+from testrunner.local import utils
+
+from testrunner.testproc import fuzzer
+from testrunner.testproc.base import TestProcProducer
+from testrunner.testproc.combiner import CombinerProc
+from testrunner.testproc.execution import ExecutionProc
+from testrunner.testproc.expectation import ForgiveTimeoutProc
+from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
+from testrunner.testproc.loader import LoadProc
+from testrunner.testproc.progress import ResultsTracker
+from testrunner.utils import random_utils
+
+
+DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
+
+
+class NumFuzzer(base_runner.BaseTestRunner):
+  def __init__(self, *args, **kwargs):
+    super(NumFuzzer, self).__init__(*args, **kwargs)
+
+  @property
+  def framework_name(self):
+    return 'num_fuzzer'
+
+  def _add_parser_options(self, parser):
+    parser.add_option("--fuzzer-random-seed", default=0,
+                      help="Default seed for initializing fuzzer random "
+                      "generator")
+    parser.add_option("--tests-count", default=5, type="int",
+                      help="Number of tests to generate from each base test. "
+                           "Can be combined with --total-timeout-sec with "
+                           "value 0 to provide infinite number of subtests. "
+                           "When --combine-tests is set it indicates how many "
+                           "tests to create in total")
+
+    # Stress gc
+    parser.add_option("--stress-marking", default=0, type="int",
+                      help="probability [0-10] of adding --stress-marking "
+                           "flag to the test")
+    parser.add_option("--stress-scavenge", default=0, type="int",
+                      help="probability [0-10] of adding --stress-scavenge "
+                           "flag to the test")
+    parser.add_option("--stress-compaction", default=0, type="int",
+                      help="probability [0-10] of adding --stress-compaction "
+                           "flag to the test")
+    parser.add_option("--stress-gc", default=0, type="int",
+                      help="probability [0-10] of adding --random-gc-interval "
+                           "flag to the test")
+
+    # Stress tasks
+    parser.add_option("--stress-delay-tasks", default=0, type="int",
+                      help="probability [0-10] of adding --stress-delay-tasks "
+                           "flag to the test")
+    parser.add_option("--stress-thread-pool-size", default=0, type="int",
+                      help="probability [0-10] of adding --thread-pool-size "
+                           "flag to the test")
+
+    # Stress deopt
+    parser.add_option("--stress-deopt", default=0, type="int",
+                      help="probability [0-10] of adding --deopt-every-n-times "
+                           "flag to the test")
+    parser.add_option("--stress-deopt-min", default=1, type="int",
+                      help="extends --stress-deopt to have minimum interval "
+                           "between deopt points")
+
+    # Combine multiple tests
+    parser.add_option("--combine-tests", default=False, action="store_true",
+                      help="Combine multiple tests as one and run with "
+                           "try-catch wrapper")
+    parser.add_option("--combine-max", default=100, type="int",
+                      help="Maximum number of tests to combine")
+    parser.add_option("--combine-min", default=2, type="int",
+                      help="Minimum number of tests to combine")
+
+    # Miscellaneous
+    parser.add_option("--variants", default='default',
+                      help="Comma-separated list of testing variants")
+
+    return parser
+
+
+  def _process_options(self, options):
+    if not options.fuzzer_random_seed:
+      options.fuzzer_random_seed = random_utils.random_seed()
+
+    if options.total_timeout_sec:
+      options.tests_count = 0
+
+    if options.combine_tests:
+      if options.combine_min > options.combine_max:
+        print(('min_group_size (%d) cannot be larger than max_group_size (%d)' %
+               options.min_group_size, options.max_group_size))
+        raise base_runner.TestRunnerError()
+
+    if options.variants != 'default':
+      print ('Only default testing variant is supported with numfuzz')
+      raise base_runner.TestRunnerError()
+
+    return True
+
+  def _get_default_suite_names(self):
+    return DEFAULT_SUITES
+
+  def _get_statusfile_variables(self, options):
+    variables = (
+        super(NumFuzzer, self)._get_statusfile_variables(options))
+    variables.update({
+      'deopt_fuzzer': bool(options.stress_deopt),
+      'endurance_fuzzer': bool(options.combine_tests),
+      'gc_stress': bool(options.stress_gc),
+      'gc_fuzzer': bool(max([options.stress_marking,
+                             options.stress_scavenge,
+                             options.stress_compaction,
+                             options.stress_gc,
+                             options.stress_delay_tasks,
+                             options.stress_thread_pool_size])),
+    })
+    return variables
+
+  def _do_execute(self, tests, args, options):
+    loader = LoadProc(tests)
+    fuzzer_rng = random.Random(options.fuzzer_random_seed)
+
+    combiner = self._create_combiner(fuzzer_rng, options)
+    results = self._create_result_tracker(options)
+    execproc = ExecutionProc(options.j)
+    sigproc = self._create_signal_proc()
+    indicators = self._create_progress_indicators(
+      tests.test_count_estimate, options)
+    procs = [
+      loader,
+      NameFilterProc(args) if args else None,
+      StatusFileFilterProc(None, None),
+      # TODO(majeski): Improve sharding when combiner is present. Maybe select
+      # different random seeds for shards instead of splitting tests.
+      self._create_shard_proc(options),
+      ForgiveTimeoutProc(),
+      combiner,
+      self._create_fuzzer(fuzzer_rng, options),
+      sigproc,
+    ] + indicators + [
+      results,
+      self._create_timeout_proc(options),
+      self._create_rerun_proc(options),
+      execproc,
+    ]
+    self._prepare_procs(procs)
+    loader.load_initial_tests(initial_batch_size=float('inf'))
+
+    # TODO(majeski): maybe some notification from loader would be better?
+    if combiner:
+      combiner.generate_initial_tests(options.j * 4)
+
+    # This starts up worker processes and blocks until all tests are
+    # processed.
+    execproc.run()
+
+    for indicator in indicators:
+      indicator.finished()
+
+    print('>>> %d tests ran' % results.total)
+    if results.failed:
+      return utils.EXIT_CODE_FAILURES
+
+    # Indicate if a SIGINT or SIGTERM happened.
+    return sigproc.exit_code
+
+  def _is_testsuite_supported(self, suite, options):
+    return not options.combine_tests or suite.test_combiner_available()
+
+  def _create_combiner(self, rng, options):
+    if not options.combine_tests:
+      return None
+    return CombinerProc(rng, options.combine_min, options.combine_max,
+                        options.tests_count)
+
+  def _create_fuzzer(self, rng, options):
+    return fuzzer.FuzzerProc(
+        rng,
+        self._tests_count(options),
+        self._create_fuzzer_configs(options),
+        self._disable_analysis(options),
+    )
+
+  def _tests_count(self, options):
+    if options.combine_tests:
+      return 1
+    return options.tests_count
+
+  def _disable_analysis(self, options):
+    """Disable analysis phase when options are used that don't support it."""
+    return options.combine_tests
+
+  def _create_fuzzer_configs(self, options):
+    fuzzers = []
+    def add(name, prob, *args):
+      if prob:
+        fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args))
+
+    add('compaction', options.stress_compaction)
+    add('marking', options.stress_marking)
+    add('scavenge', options.stress_scavenge)
+    add('gc_interval', options.stress_gc)
+    add('threads', options.stress_thread_pool_size)
+    add('delay', options.stress_delay_tasks)
+    add('deopt', options.stress_deopt, options.stress_deopt_min)
+    return fuzzers
+
+
+if __name__ == '__main__':
+  sys.exit(NumFuzzer().execute())
diff --git a/src/v8/tools/testrunner/objects/output.py b/src/v8/tools/testrunner/objects/output.py
index adc33c9..78aa63d 100644
--- a/src/v8/tools/testrunner/objects/output.py
+++ b/src/v8/tools/testrunner/objects/output.py
@@ -27,12 +27,15 @@
 
 
 import signal
+import copy
 
 from ..local import utils
 
+
 class Output(object):
 
-  def __init__(self, exit_code, timed_out, stdout, stderr, pid, duration):
+  def __init__(self, exit_code=0, timed_out=False, stdout=None, stderr=None,
+               pid=None, duration=None):
     self.exit_code = exit_code
     self.timed_out = timed_out
     self.stdout = stdout
@@ -40,6 +43,13 @@
     self.pid = pid
     self.duration = duration
 
+  def without_text(self):
+    """Returns copy of the output without stdout and stderr."""
+    other = copy.copy(self)
+    other.stdout = None
+    other.stderr = None
+    return other
+
   def HasCrashed(self):
     if utils.IsWindows():
       return 0x80000000 & self.exit_code and not (0x3FFFFF00 & self.exit_code)
@@ -52,3 +62,16 @@
 
   def HasTimedOut(self):
     return self.timed_out
+
+  def IsSuccess(self):
+    return not self.HasCrashed() and not self.HasTimedOut()
+
+
+class _NullOutput(Output):
+  """Useful to signal that the binary has not been run."""
+  def __init__(self):
+    super(_NullOutput, self).__init__()
+
+
+# Default instance of the _NullOutput class above.
+NULL_OUTPUT = _NullOutput()
diff --git a/src/v8/tools/testrunner/objects/predictable.py b/src/v8/tools/testrunner/objects/predictable.py
index ad93077..52d14ea 100644
--- a/src/v8/tools/testrunner/objects/predictable.py
+++ b/src/v8/tools/testrunner/objects/predictable.py
@@ -4,6 +4,7 @@
 
 from ..local import statusfile
 from ..outproc import base as outproc_base
+from ..testproc import base as testproc_base
 from ..testproc.result import Result
 
 
@@ -15,11 +16,7 @@
 
 
 def get_outproc(test):
-  output_proc = test.output_proc
-  if output_proc.negative or statusfile.FAIL in test.expected_outcomes:
-    # TODO(majeski): Skip these tests instead of having special outproc.
-    return NeverUnexpectedOutputOutProc(output_proc)
-  return OutProc(output_proc)
+  return OutProc(test.output_proc)
 
 
 class OutProc(outproc_base.BaseOutProc):
@@ -31,9 +28,6 @@
     super(OutProc, self).__init__()
     self._outproc = _outproc
 
-  def process(self, output):
-    return Result(self.has_unexpected_output(output), output)
-
   def has_unexpected_output(self, output):
     return output.exit_code != 0
 
@@ -49,9 +43,6 @@
     return self._outproc.expected_outcomes
 
 
-class NeverUnexpectedOutputOutProc(OutProc):
-  """Output processor wrapper for tests that we will return False for
-  has_unexpected_output in the predictable mode.
-  """
-  def has_unexpected_output(self, output):
-    return False
+class PredictableFilterProc(testproc_base.TestProcFilter):
+  def _filter(self, test):
+    return test.skip_predictable()
diff --git a/src/v8/tools/testrunner/objects/testcase.py b/src/v8/tools/testrunner/objects/testcase.py
index 06db328..6d4dcd1 100644
--- a/src/v8/tools/testrunner/objects/testcase.py
+++ b/src/v8/tools/testrunner/objects/testcase.py
@@ -37,10 +37,33 @@
 
 FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
 
+# Patterns for additional resource files on Android. Files that are not covered
+# by one of the other patterns below will be specified in the resources section.
+RESOURCES_PATTERN = re.compile(r"//\s+Resources:(.*)")
+# Pattern to auto-detect files to push on Android for statements like:
+# load("path/to/file.js")
+LOAD_PATTERN = re.compile(
+    r"(?:load|readbuffer|read)\((?:'|\")([^'\"]+)(?:'|\")\)")
+# Pattern to auto-detect files to push on Android for statements like:
+# import "path/to/file.js"
+MODULE_RESOURCES_PATTERN_1 = re.compile(
+    r"(?:import|export)(?:\(| )(?:'|\")([^'\"]+)(?:'|\")")
+# Pattern to auto-detect files to push on Android for statements like:
+# import foobar from "path/to/file.js"
+MODULE_RESOURCES_PATTERN_2 = re.compile(
+    r"(?:import|export).*from (?:'|\")([^'\"]+)(?:'|\")")
+
+TIMEOUT_LONG = "long"
+
+try:
+  cmp             # Python 2
+except NameError:
+  def cmp(x, y):  # Python 3
+    return (x > y) - (x < y)
 
 
 class TestCase(object):
-  def __init__(self, suite, path, name):
+  def __init__(self, suite, path, name, test_config):
     self.suite = suite        # TestSuite object
 
     self.path = path          # string, e.g. 'div-mod', 'test-api/foo'
@@ -49,60 +72,40 @@
     self.variant = None       # name of the used testing variant
     self.variant_flags = []   # list of strings, flags specific to this test
 
-    self.id = None  # int, used to map result back to TestCase instance
-    self.run = 1  # The nth time this test is executed.
-    self.cmd = None
-
     # Fields used by the test processors.
     self.origin = None # Test that this test is subtest of.
     self.processor = None # Processor that created this subtest.
     self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
     self.keep_output = False # Can output of this test be dropped
 
+    # Test config contains information needed to build the command.
+    self._test_config = test_config
+    self._random_seed = None # Overrides test config value if not None
+
+    # Outcomes
     self._statusfile_outcomes = None
-    self._expected_outcomes = None # optimization: None == [statusfile.PASS]
+    self.expected_outcomes = None
     self._statusfile_flags = None
+
     self._prepare_outcomes()
 
   def create_subtest(self, processor, subtest_id, variant=None, flags=None,
-                     keep_output=False):
+                     keep_output=False, random_seed=None):
     subtest = copy.copy(self)
     subtest.origin = self
     subtest.processor = processor
     subtest.procid += '.%s' % subtest_id
-    subtest.keep_output = keep_output
+    subtest.keep_output |= keep_output
+    if random_seed:
+      subtest._random_seed = random_seed
+    if flags:
+      subtest.variant_flags = subtest.variant_flags + flags
     if variant is not None:
       assert self.variant is None
       subtest.variant = variant
-      subtest.variant_flags = flags
       subtest._prepare_outcomes()
     return subtest
 
-  def create_variant(self, variant, flags, procid_suffix=None):
-    """Makes a shallow copy of the object and updates variant, variant flags and
-    all fields that depend on it, e.g. expected outcomes.
-
-    Args
-      variant       - variant name
-      flags         - flags that should be added to origin test's variant flags
-      procid_suffix - for multiple variants with the same name set suffix to
-        keep procid unique.
-    """
-    other = copy.copy(self)
-    if not self.variant_flags:
-      other.variant_flags = flags
-    else:
-      other.variant_flags = self.variant_flags + flags
-    other.variant = variant
-    if procid_suffix:
-      other.procid += '[%s-%s]' % (variant, procid_suffix)
-    else:
-      other.procid += '[%s]' % variant
-
-    other._prepare_outcomes(variant != self.variant)
-
-    return other
-
   def _prepare_outcomes(self, force_update=True):
     if force_update or self._statusfile_outcomes is None:
       def is_flag(outcome):
@@ -140,7 +143,8 @@
 
   @property
   def do_skip(self):
-    return statusfile.SKIP in self._statusfile_outcomes
+    return (statusfile.SKIP in self._statusfile_outcomes and
+            not self.suite.test_config.run_skipped)
 
   @property
   def is_slow(self):
@@ -160,43 +164,59 @@
   def only_standard_variant(self):
     return statusfile.NO_VARIANTS in self._statusfile_outcomes
 
-  def get_command(self, context):
-    params = self._get_cmd_params(context)
+  def get_command(self):
+    params = self._get_cmd_params()
     env = self._get_cmd_env()
-    shell, shell_flags = self._get_shell_with_flags(context)
-    timeout = self._get_timeout(params, context.timeout)
-    return self._create_cmd(shell, shell_flags + params, env, timeout, context)
+    shell = self.get_shell()
+    if utils.IsWindows():
+      shell += '.exe'
+    shell_flags = self._get_shell_flags()
+    timeout = self._get_timeout(params)
+    return self._create_cmd(shell, shell_flags + params, env, timeout)
 
-  def _get_cmd_params(self, ctx):
+  def _get_cmd_params(self):
     """Gets command parameters and combines them in the following order:
       - files [empty by default]
+      - random seed
       - extra flags (from command line)
       - user flags (variant/fuzzer flags)
-      - statusfile flags
       - mode flags (based on chosen mode)
       - source flags (from source code) [empty by default]
+      - test-suite flags
+      - statusfile flags
 
     The best way to modify how parameters are created is to only override
     methods for getting partial parameters.
     """
     return (
-        self._get_files_params(ctx) +
-        self._get_extra_flags(ctx) +
+        self._get_files_params() +
+        self._get_random_seed_flags() +
+        self._get_extra_flags() +
         self._get_variant_flags() +
-        self._get_statusfile_flags() +
-        self._get_mode_flags(ctx) +
+        self._get_mode_flags() +
         self._get_source_flags() +
-        self._get_suite_flags(ctx)
+        self._get_suite_flags() +
+        self._get_statusfile_flags()
     )
 
   def _get_cmd_env(self):
     return {}
 
-  def _get_files_params(self, ctx):
+  def _get_files_params(self):
     return []
 
-  def _get_extra_flags(self, ctx):
-    return ctx.extra_flags
+  def _get_timeout_param(self):
+    return None
+
+  def _get_random_seed_flags(self):
+    return ['--random-seed=%d' % self.random_seed]
+
+  @property
+  def random_seed(self):
+    return self._random_seed or self._test_config.random_seed
+
+  def _get_extra_flags(self):
+    return self._test_config.extra_flags
 
   def _get_variant_flags(self):
     return self.variant_flags
@@ -208,50 +228,49 @@
     """
     return self._statusfile_flags
 
-  def _get_mode_flags(self, ctx):
-    return ctx.mode_flags
+  def _get_mode_flags(self):
+    return self._test_config.mode_flags
 
   def _get_source_flags(self):
     return []
 
-  def _get_suite_flags(self, ctx):
+  def _get_suite_flags(self):
     return []
 
-  def _get_shell_with_flags(self, ctx):
-    shell = self.get_shell()
-    shell_flags = []
-    if shell == 'd8':
-      shell_flags.append('--test')
-    if utils.IsWindows():
-      shell += '.exe'
-    if ctx.random_seed:
-      shell_flags.append('--random-seed=%s' % ctx.random_seed)
-    return shell, shell_flags
+  def _get_shell_flags(self):
+    return []
 
-  def _get_timeout(self, params, timeout):
+  def _get_timeout(self, params):
+    timeout = self._test_config.timeout
     if "--stress-opt" in params:
       timeout *= 4
+    if "--jitless" in params:
+      timeout *= 2
+    if "--no-opt" in params:
+      timeout *= 2
     if "--noenable-vfp3" in params:
       timeout *= 2
-
-    # TODO(majeski): make it slow outcome dependent.
-    timeout *= 2
+    if self._get_timeout_param() == TIMEOUT_LONG:
+      timeout *= 10
+    if self.is_slow:
+      timeout *= 4
     return timeout
 
   def get_shell(self):
-    return 'd8'
+    raise NotImplementedError()
 
   def _get_suffix(self):
     return '.js'
 
-  def _create_cmd(self, shell, params, env, timeout, ctx):
+  def _create_cmd(self, shell, params, env, timeout):
     return command.Command(
-      cmd_prefix=ctx.command_prefix,
-      shell=os.path.abspath(os.path.join(ctx.shell_dir, shell)),
+      cmd_prefix=self._test_config.command_prefix,
+      shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)),
       args=params,
       env=env,
       timeout=timeout,
-      verbose=ctx.verbose
+      verbose=self._test_config.verbose,
+      resources_func=self._get_resources,
     )
 
   def _parse_source_flags(self, source=None):
@@ -271,6 +290,18 @@
   def _get_source_path(self):
     return None
 
+  def _get_resources(self):
+    """Returns a list of absolute paths with additional files needed by the
+    test case.
+
+    Used to push additional files to Android devices.
+    """
+    return []
+
+  def skip_predictable(self):
+    """Returns True if the test case is not suitable for predictable testing."""
+    return True
+
   @property
   def output_proc(self):
     if self.expected_outcomes is outproc.OUTCOMES_PASS:
@@ -281,18 +312,63 @@
     # Make sure that test cases are sorted correctly if sorted without
     # key function. But using a key function is preferred for speed.
     return cmp(
-        (self.suite.name, self.name, self.variant_flags),
-        (other.suite.name, other.name, other.variant_flags)
+        (self.suite.name, self.name, self.variant),
+        (other.suite.name, other.name, other.variant)
     )
 
-  def __hash__(self):
-    return hash((self.suite.name, self.name, ''.join(self.variant_flags)))
-
   def __str__(self):
     return self.suite.name + '/' + self.name
 
-  # TODO(majeski): Rename `id` field or `get_id` function since they're
-  # unrelated.
-  def get_id(self):
-    return '%s/%s %s' % (
-        self.suite.name, self.name, ' '.join(self.variant_flags))
+
+class D8TestCase(TestCase):
+  def get_shell(self):
+    return "d8"
+
+  def _get_shell_flags(self):
+    return ['--test']
+
+  def _get_resources_for_file(self, file):
+    """Returns for a given file a list of absolute paths of files needed by the
+    given file.
+    """
+    with open(file) as f:
+      source = f.read()
+    result = []
+    def add_path(path):
+      result.append(os.path.abspath(path.replace('/', os.path.sep)))
+    for match in RESOURCES_PATTERN.finditer(source):
+      # There are several resources per line. Relative to base dir.
+      for path in match.group(1).strip().split():
+        add_path(path)
+    for match in LOAD_PATTERN.finditer(source):
+      # Files in load statements are relative to base dir.
+      add_path(match.group(1))
+    for match in MODULE_RESOURCES_PATTERN_1.finditer(source):
+      # Imported files are relative to the file importing them.
+      add_path(os.path.join(os.path.dirname(file), match.group(1)))
+    for match in MODULE_RESOURCES_PATTERN_2.finditer(source):
+      # Imported files are relative to the file importing them.
+      add_path(os.path.join(os.path.dirname(file), match.group(1)))
+    return result
+
+  def _get_resources(self):
+    """Returns the list of files needed by a test case."""
+    if not self._get_source_path():
+      return []
+    result = set()
+    to_check = [self._get_source_path()]
+    # Recurse over all files until reaching a fixpoint.
+    while to_check:
+      next_resource = to_check.pop()
+      result.add(next_resource)
+      for resource in self._get_resources_for_file(next_resource):
+        # Only add files that exist on disc. The pattens we check for give some
+        # false positives otherwise.
+        if resource not in result and os.path.exists(resource):
+          to_check.append(resource)
+    return sorted(list(result))
+
+  def skip_predictable(self):
+    """Returns True if the test case is not suitable for predictable testing."""
+    return (statusfile.FAIL in self.expected_outcomes or
+            self.output_proc.negative)
diff --git a/src/v8/tools/testrunner/outproc/base.py b/src/v8/tools/testrunner/outproc/base.py
index 9a9db4e..39efb60 100644
--- a/src/v8/tools/testrunner/outproc/base.py
+++ b/src/v8/tools/testrunner/outproc/base.py
@@ -2,24 +2,45 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
-import collections
 import itertools
 
+from ..testproc.base import (
+    DROP_RESULT, DROP_OUTPUT, DROP_PASS_OUTPUT, DROP_PASS_STDOUT)
 from ..local import statusfile
 from ..testproc.result import Result
 
 
 OUTCOMES_PASS = [statusfile.PASS]
 OUTCOMES_FAIL = [statusfile.FAIL]
+OUTCOMES_PASS_OR_TIMEOUT = [statusfile.PASS, statusfile.TIMEOUT]
+OUTCOMES_FAIL_OR_TIMEOUT = [statusfile.FAIL, statusfile.TIMEOUT]
 
 
 class BaseOutProc(object):
-  def process(self, output):
-    return Result(self.has_unexpected_output(output), output)
+  def process(self, output, reduction=None):
+    has_unexpected_output = self.has_unexpected_output(output)
+    return self._create_result(has_unexpected_output, output, reduction)
 
   def has_unexpected_output(self, output):
     return self.get_outcome(output) not in self.expected_outcomes
 
+  def _create_result(self, has_unexpected_output, output, reduction):
+    """Creates Result instance. When reduction is passed it tries to drop some
+    parts of the result to save memory and time needed to send the result
+    across process boundary. None disables reduction and full result is created.
+    """
+    if reduction == DROP_RESULT:
+      return None
+    if reduction == DROP_OUTPUT:
+      return Result(has_unexpected_output, None)
+    if not has_unexpected_output:
+      if reduction == DROP_PASS_OUTPUT:
+        return Result(has_unexpected_output, None)
+      if reduction == DROP_PASS_STDOUT:
+        return Result(has_unexpected_output, output.without_text())
+
+    return Result(has_unexpected_output, output)
+
   def get_outcome(self, output):
     if output.HasCrashed():
       return statusfile.CRASH
@@ -64,6 +85,11 @@
     return OUTCOMES_PASS
 
 
+class NegPassOutProc(Negative, PassOutProc):
+  """Output processor optimized for negative tests expected to PASS"""
+  pass
+
+
 class OutProc(BaseOutProc):
   """Output processor optimized for positive tests with expected outcomes
   different than a single PASS.
@@ -92,6 +118,7 @@
 
 # TODO(majeski): Override __reduce__ to make it deserialize as one instance.
 DEFAULT = PassOutProc()
+DEFAULT_NEGATIVE = NegPassOutProc()
 
 
 class ExpectedOutProc(OutProc):
diff --git a/src/v8/tools/testrunner/outproc/message.py b/src/v8/tools/testrunner/outproc/message.py
index bbfc1cd..f196cfd 100644
--- a/src/v8/tools/testrunner/outproc/message.py
+++ b/src/v8/tools/testrunner/outproc/message.py
@@ -32,8 +32,15 @@
     if len(expected_lines) != len(actual_lines):
       return True
 
+    # Try .js first, and fall back to .mjs.
+    # TODO(v8:9406): clean this up by never separating the path from
+    # the extension in the first place.
+    base_path = self._basepath + '.js'
+    if not os.path.exists(base_path):
+      base_path = self._basepath + '.mjs'
+
     env = {
-      'basename': os.path.basename(self._basepath + '.js'),
+      'basename': os.path.basename(base_path),
     }
     for (expected, actual) in itertools.izip_longest(
         expected_lines, actual_lines, fillvalue=''):
diff --git a/src/v8/tools/testrunner/outproc/mkgrokdump.py b/src/v8/tools/testrunner/outproc/mkgrokdump.py
index 8efde12..4013023 100644
--- a/src/v8/tools/testrunner/outproc/mkgrokdump.py
+++ b/src/v8/tools/testrunner/outproc/mkgrokdump.py
@@ -20,7 +20,7 @@
     diff = difflib.unified_diff(expected_lines, actual_lines, lineterm="",
                                 fromfile="expected_path")
     diffstring = '\n'.join(diff)
-    if diffstring is not "":
+    if diffstring != "":
       if "generated from a non-shipping build" in output.stdout:
         return False
       if not "generated from a shipping build" in output.stdout:
diff --git a/src/v8/tools/testrunner/outproc/test262.py b/src/v8/tools/testrunner/outproc/test262.py
index b5eb554..bf3bc05 100644
--- a/src/v8/tools/testrunner/outproc/test262.py
+++ b/src/v8/tools/testrunner/outproc/test262.py
@@ -7,18 +7,29 @@
 from . import base
 
 
+def _is_failure_output(output):
+  return (
+    output.exit_code != 0 or
+    'FAILED!' in output.stdout
+  )
+
+
 class ExceptionOutProc(base.OutProc):
   """Output processor for tests with expected exception."""
-  def __init__(self, expected_outcomes, expected_exception=None):
+  def __init__(
+      self, expected_outcomes, expected_exception=None, negative=False):
     super(ExceptionOutProc, self).__init__(expected_outcomes)
     self._expected_exception = expected_exception
+    self._negative = negative
+
+  @property
+  def negative(self):
+    return self._negative
 
   def _is_failure_output(self, output):
-    if output.exit_code != 0:
-      return True
     if self._expected_exception != self._parse_exception(output.stdout):
       return True
-    return 'FAILED!' in output.stdout
+    return _is_failure_output(output)
 
   def _parse_exception(self, string):
     # somefile:somelinenumber: someerror[: sometext]
@@ -31,16 +42,13 @@
       return None
 
 
-def _is_failure_output(self, output):
-  return (
-    output.exit_code != 0 or
-    'FAILED!' in output.stdout
-  )
-
-
 class NoExceptionOutProc(base.OutProc):
   """Output processor optimized for tests without expected exception."""
-NoExceptionOutProc._is_failure_output = _is_failure_output
+  def __init__(self, expected_outcomes):
+    super(NoExceptionOutProc, self).__init__(expected_outcomes)
+
+  def _is_failure_output(self, output):
+    return _is_failure_output(output)
 
 
 class PassNoExceptionOutProc(base.PassOutProc):
@@ -48,7 +56,8 @@
   Output processor optimized for tests expected to PASS without expected
   exception.
   """
-PassNoExceptionOutProc._is_failure_output = _is_failure_output
+  def _is_failure_output(self, output):
+    return _is_failure_output(output)
 
 
 PASS_NO_EXCEPTION = PassNoExceptionOutProc()
diff --git a/src/v8/tools/testrunner/standard_runner.py b/src/v8/tools/testrunner/standard_runner.py
index 3be2099..51e7860 100755
--- a/src/v8/tools/testrunner/standard_runner.py
+++ b/src/v8/tools/testrunner/standard_runner.py
@@ -4,595 +4,356 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+from functools import reduce
 
-from collections import OrderedDict
-from os.path import join
-import multiprocessing
 import os
-import random
-import shlex
-import subprocess
 import sys
-import time
 
 # Adds testrunner to the path hence it has to be imported at the beggining.
 import base_runner
 
-from testrunner.local import execution
-from testrunner.local import progress
-from testrunner.local import testsuite
 from testrunner.local import utils
-from testrunner.local import verbose
 from testrunner.local.variants import ALL_VARIANTS
-from testrunner.objects import context
 from testrunner.objects import predictable
 from testrunner.testproc.execution import ExecutionProc
 from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
 from testrunner.testproc.loader import LoadProc
-from testrunner.testproc.progress import (VerboseProgressIndicator,
-                                          ResultsTracker,
-                                          TestsCounter)
-from testrunner.testproc.rerun import RerunProc
+from testrunner.testproc.seed import SeedProc
 from testrunner.testproc.variant import VariantProc
 
 
-TIMEOUT_DEFAULT = 60
+ARCH_GUESS = utils.DefaultArch()
 
-# Variants ordered by expected runtime (slowest first).
-VARIANTS = ["default"]
+VARIANTS = ['default']
 
 MORE_VARIANTS = [
-  "stress",
-  "stress_incremental_marking",
-  "nooptimization",
-  "stress_background_compile",
-  "wasm_traps",
+  'jitless',
+  'stress',
+  'stress_js_bg_compile_wasm_code_gc',
+  'stress_incremental_marking',
 ]
 
 VARIANT_ALIASES = {
   # The default for developer workstations.
-  "dev": VARIANTS,
+  'dev': VARIANTS,
   # Additional variants, run on all bots.
-  "more": MORE_VARIANTS,
-  # Shortcut for the two above ("more" first - it has the longer running tests).
-  "exhaustive": MORE_VARIANTS + VARIANTS,
+  'more': MORE_VARIANTS,
+  # Shortcut for the two above ('more' first - it has the longer running tests)
+  'exhaustive': MORE_VARIANTS + VARIANTS,
   # Additional variants, run on a subset of bots.
-  "extra": ["future", "liftoff", "trusted"],
+  'extra': ['nooptimization', 'future', 'no_wasm_traps'],
 }
 
-GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
-                   "--concurrent-recompilation-queue-length=64",
-                   "--concurrent-recompilation-delay=500",
-                   "--concurrent-recompilation"]
+GC_STRESS_FLAGS = ['--gc-interval=500', '--stress-compaction',
+                   '--concurrent-recompilation-queue-length=64',
+                   '--concurrent-recompilation-delay=500',
+                   '--concurrent-recompilation',
+                   '--stress-flush-bytecode',
+                   '--wasm-code-gc', '--stress-wasm-code-gc']
 
-# Double the timeout for these:
-SLOW_ARCHS = ["arm",
-              "mips",
-              "mipsel",
-              "mips64",
-              "mips64el",
-              "s390",
-              "s390x",
-              "arm64"]
+RANDOM_GC_STRESS_FLAGS = ['--random-gc-interval=5000',
+                          '--stress-compaction-random']
+
 
 PREDICTABLE_WRAPPER = os.path.join(
     base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
 
 
 class StandardTestRunner(base_runner.BaseTestRunner):
-    def __init__(self, *args, **kwargs):
-        super(StandardTestRunner, self).__init__(*args, **kwargs)
+  def __init__(self, *args, **kwargs):
+    super(StandardTestRunner, self).__init__(*args, **kwargs)
 
-        self.sancov_dir = None
+    self.sancov_dir = None
+    self._variants = None
 
-    def _get_default_suite_names(self):
-      return ['default']
+  @property
+  def framework_name(self):
+    return 'standard_runner'
 
-    def _do_execute(self, suites, args, options):
-      if options.swarming:
-        # Swarming doesn't print how isolated commands are called. Lets make
-        # this less cryptic by printing it ourselves.
-        print ' '.join(sys.argv)
+  def _get_default_suite_names(self):
+    return ['default']
 
-        if utils.GuessOS() == "macos":
-          # TODO(machenbach): Temporary output for investigating hanging test
-          # driver on mac.
-          print "V8 related processes running on this host:"
-          try:
-            print subprocess.check_output(
-              "ps -e | egrep 'd8|cctest|unittests'", shell=True)
-          except Exception:
-            pass
+  def _add_parser_options(self, parser):
+    parser.add_option('--novfp3',
+                      help='Indicates that V8 was compiled without VFP3'
+                      ' support',
+                      default=False, action='store_true')
 
-      return self._execute(args, options, suites)
+    # Variants
+    parser.add_option('--no-variants', '--novariants',
+                      help='Deprecated. '
+                           'Equivalent to passing --variants=default',
+                      default=False, dest='no_variants', action='store_true')
+    parser.add_option('--variants',
+                      help='Comma-separated list of testing variants;'
+                      ' default: "%s"' % ','.join(VARIANTS))
+    parser.add_option('--exhaustive-variants',
+                      default=False, action='store_true',
+                      help='Deprecated. '
+                           'Equivalent to passing --variants=exhaustive')
 
-    def _add_parser_options(self, parser):
-      parser.add_option("--sancov-dir",
-                        help="Directory where to collect coverage data")
-      parser.add_option("--cfi-vptr",
-                        help="Run tests with UBSAN cfi_vptr option.",
-                        default=False, action="store_true")
-      parser.add_option("--novfp3",
-                        help="Indicates that V8 was compiled without VFP3"
-                        " support",
-                        default=False, action="store_true")
-      parser.add_option("--cat", help="Print the source of the tests",
-                        default=False, action="store_true")
-      parser.add_option("--slow-tests",
-                        help="Regard slow tests (run|skip|dontcare)",
-                        default="dontcare")
-      parser.add_option("--pass-fail-tests",
-                        help="Regard pass|fail tests (run|skip|dontcare)",
-                        default="dontcare")
-      parser.add_option("--gc-stress",
-                        help="Switch on GC stress mode",
-                        default=False, action="store_true")
-      parser.add_option("--command-prefix",
-                        help="Prepended to each shell command used to run a"
-                        " test",
-                        default="")
-      parser.add_option("--extra-flags",
-                        help="Additional flags to pass to each test command",
-                        action="append", default=[])
-      parser.add_option("--infra-staging", help="Use new test runner features",
-                        default=False, action="store_true")
-      parser.add_option("--isolates", help="Whether to test isolates",
-                        default=False, action="store_true")
-      parser.add_option("-j", help="The number of parallel tasks to run",
-                        default=0, type="int")
-      parser.add_option("--no-harness", "--noharness",
-                        help="Run without test harness of a given suite",
-                        default=False, action="store_true")
-      parser.add_option("--no-presubmit", "--nopresubmit",
-                        help='Skip presubmit checks (deprecated)',
-                        default=False, dest="no_presubmit", action="store_true")
-      parser.add_option("--no-sorting", "--nosorting",
-                        help="Don't sort tests according to duration of last"
-                        " run.",
-                        default=False, dest="no_sorting", action="store_true")
-      parser.add_option("--no-variants", "--novariants",
-                        help="Deprecated. "
-                             "Equivalent to passing --variants=default",
-                        default=False, dest="no_variants", action="store_true")
-      parser.add_option("--variants",
-                        help="Comma-separated list of testing variants;"
-                        " default: \"%s\"" % ",".join(VARIANTS))
-      parser.add_option("--exhaustive-variants",
-                        default=False, action="store_true",
-                        help="Deprecated. "
-                             "Equivalent to passing --variants=exhaustive")
-      parser.add_option("-p", "--progress",
-                        help=("The style of progress indicator"
-                              " (verbose, dots, color, mono)"),
-                        choices=progress.PROGRESS_INDICATORS.keys(),
-                        default="mono")
-      parser.add_option("--quickcheck", default=False, action="store_true",
-                        help=("Quick check mode (skip slow tests)"))
-      parser.add_option("--report", help="Print a summary of the tests to be"
-                        " run",
-                        default=False, action="store_true")
-      parser.add_option("--json-test-results",
-                        help="Path to a file for storing json results.")
-      parser.add_option("--flakiness-results",
-                        help="Path to a file for storing flakiness json.")
-      parser.add_option("--rerun-failures-count",
-                        help=("Number of times to rerun each failing test case."
-                              " Very slow tests will be rerun only once."),
-                        default=0, type="int")
-      parser.add_option("--rerun-failures-max",
-                        help="Maximum number of failing test cases to rerun.",
-                        default=100, type="int")
-      parser.add_option("--dont-skip-slow-simulator-tests",
-                        help="Don't skip more slow tests when using a"
-                        " simulator.",
-                        default=False, action="store_true",
-                        dest="dont_skip_simulator_slow_tests")
-      parser.add_option("--swarming",
-                        help="Indicates running test driver on swarming.",
-                        default=False, action="store_true")
-      parser.add_option("--time", help="Print timing information after running",
-                        default=False, action="store_true")
-      parser.add_option("-t", "--timeout", help="Timeout in seconds",
-                        default=TIMEOUT_DEFAULT, type="int")
-      parser.add_option("--warn-unused", help="Report unused rules",
-                        default=False, action="store_true")
-      parser.add_option("--junitout", help="File name of the JUnit output")
-      parser.add_option("--junittestsuite",
-                        help="The testsuite name in the JUnit output file",
-                        default="v8tests")
-      parser.add_option("--random-seed", default=0, dest="random_seed",
-                        help="Default seed for initializing random generator",
-                        type=int)
-      parser.add_option("--random-seed-stress-count", default=1, type="int",
-                        dest="random_seed_stress_count",
-                        help="Number of runs with different random seeds")
+    # Filters
+    parser.add_option('--slow-tests', default='dontcare',
+                      help='Regard slow tests (run|skip|dontcare)')
+    parser.add_option('--pass-fail-tests', default='dontcare',
+                      help='Regard pass|fail tests (run|skip|dontcare)')
+    parser.add_option('--quickcheck', default=False, action='store_true',
+                      help=('Quick check mode (skip slow tests)'))
+    parser.add_option('--dont-skip-slow-simulator-tests',
+                      help='Don\'t skip more slow tests when using a'
+                      ' simulator.',
+                      default=False, action='store_true',
+                      dest='dont_skip_simulator_slow_tests')
 
-    def _process_options(self, options):
-      global VARIANTS
+    # Stress modes
+    parser.add_option('--gc-stress',
+                      help='Switch on GC stress mode',
+                      default=False, action='store_true')
+    parser.add_option('--random-gc-stress',
+                      help='Switch on random GC stress mode',
+                      default=False, action='store_true')
+    parser.add_option('--random-seed-stress-count', default=1, type='int',
+                      dest='random_seed_stress_count',
+                      help='Number of runs with different random seeds. Only '
+                           'with test processors: 0 means infinite '
+                           'generation.')
 
-      if options.sancov_dir:
-        self.sancov_dir = options.sancov_dir
-        if not os.path.exists(self.sancov_dir):
-          print("sancov-dir %s doesn't exist" % self.sancov_dir)
-          raise base_runner.TestRunnerError()
+    # Noop
+    parser.add_option('--cfi-vptr',
+                      help='Run tests with UBSAN cfi_vptr option.',
+                      default=False, action='store_true')
+    parser.add_option('--infra-staging', help='Use new test runner features',
+                      dest='infra_staging', default=None,
+                      action='store_true')
+    parser.add_option('--no-infra-staging',
+                      help='Opt out of new test runner features',
+                      dest='infra_staging', default=None,
+                      action='store_false')
+    parser.add_option('--no-sorting', '--nosorting',
+                      help='Don\'t sort tests according to duration of last'
+                      ' run.',
+                      default=False, dest='no_sorting', action='store_true')
+    parser.add_option('--no-presubmit', '--nopresubmit',
+                      help='Skip presubmit checks (deprecated)',
+                      default=False, dest='no_presubmit', action='store_true')
 
-      options.command_prefix = shlex.split(options.command_prefix)
-      options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
+    # Unimplemented for test processors
+    parser.add_option('--sancov-dir',
+                      help='Directory where to collect coverage data')
+    parser.add_option('--cat', help='Print the source of the tests',
+                      default=False, action='store_true')
+    parser.add_option('--flakiness-results',
+                      help='Path to a file for storing flakiness json.')
+    parser.add_option('--time', help='Print timing information after running',
+                      default=False, action='store_true')
+    parser.add_option('--warn-unused', help='Report unused rules',
+                      default=False, action='store_true')
+    parser.add_option('--report', default=False, action='store_true',
+                      help='Print a summary of the tests to be run')
 
-      if options.gc_stress:
-        options.extra_flags += GC_STRESS_FLAGS
-
-      if self.build_config.asan:
-        options.extra_flags.append("--invoke-weak-callbacks")
-        options.extra_flags.append("--omit-quit")
-
-      if options.novfp3:
-        options.extra_flags.append("--noenable-vfp3")
-
-      if options.no_variants:  # pragma: no cover
-        print ("Option --no-variants is deprecated. "
-               "Pass --variants=default instead.")
-        assert not options.variants
-        options.variants = "default"
-
-      if options.exhaustive_variants:  # pragma: no cover
-        # TODO(machenbach): Switch infra to --variants=exhaustive after M65.
-        print ("Option --exhaustive-variants is deprecated. "
-               "Pass --variants=exhaustive instead.")
-        # This is used on many bots. It includes a larger set of default
-        # variants.
-        # Other options for manipulating variants still apply afterwards.
-        assert not options.variants
-        options.variants = "exhaustive"
-
-      if options.quickcheck:
-        assert not options.variants
-        options.variants = "stress,default"
-        options.slow_tests = "skip"
-        options.pass_fail_tests = "skip"
-
-      if self.build_config.predictable:
-        options.variants = "default"
-        options.extra_flags.append("--predictable")
-        options.extra_flags.append("--verify_predictable")
-        options.extra_flags.append("--no-inline-new")
-        # Add predictable wrapper to command prefix.
-        options.command_prefix = (
-            [sys.executable, PREDICTABLE_WRAPPER] + options.command_prefix)
-
-      # TODO(machenbach): Figure out how to test a bigger subset of variants on
-      # msan.
-      if self.build_config.msan:
-        options.variants = "default"
-
-      if options.j == 0:
-        options.j = multiprocessing.cpu_count()
-
-      if options.random_seed_stress_count <= 1 and options.random_seed == 0:
-        options.random_seed = self._random_seed()
-
-      # Use developer defaults if no variant was specified.
-      options.variants = options.variants or "dev"
-
-      if options.variants == "infra_staging":
-        options.variants = "exhaustive"
-        options.infra_staging = True
-
-      # Resolve variant aliases and dedupe.
-      # TODO(machenbach): Don't mutate global variable. Rather pass mutated
-      # version as local variable.
-      VARIANTS = list(set(reduce(
-          list.__add__,
-          (VARIANT_ALIASES.get(v, [v]) for v in options.variants.split(",")),
-          [],
-      )))
-
-      if not set(VARIANTS).issubset(ALL_VARIANTS):
-        print "All variants must be in %s" % str(ALL_VARIANTS)
+  def _process_options(self, options):
+    if options.sancov_dir:
+      self.sancov_dir = options.sancov_dir
+      if not os.path.exists(self.sancov_dir):
+        print('sancov-dir %s doesn\'t exist' % self.sancov_dir)
         raise base_runner.TestRunnerError()
 
-      def CheckTestMode(name, option):  # pragma: no cover
-        if not option in ["run", "skip", "dontcare"]:
-          print "Unknown %s mode %s" % (name, option)
-          raise base_runner.TestRunnerError()
-      CheckTestMode("slow test", options.slow_tests)
-      CheckTestMode("pass|fail test", options.pass_fail_tests)
-      if self.build_config.no_i18n:
-        base_runner.TEST_MAP["bot_default"].remove("intl")
-        base_runner.TEST_MAP["default"].remove("intl")
-        # TODO(machenbach): uncomment after infra side lands.
-        # base_runner.TEST_MAP["d8_default"].remove("intl")
+    if options.gc_stress:
+      options.extra_flags += GC_STRESS_FLAGS
 
-    def _setup_env(self):
-      super(StandardTestRunner, self)._setup_env()
+    if options.random_gc_stress:
+      options.extra_flags += RANDOM_GC_STRESS_FLAGS
 
-      symbolizer_option = self._get_external_symbolizer_option()
+    if self.build_config.asan:
+      options.extra_flags.append('--invoke-weak-callbacks')
+      options.extra_flags.append('--omit-quit')
 
-      if self.sancov_dir:
-        os.environ['ASAN_OPTIONS'] = ":".join([
-          'coverage=1',
-          'coverage_dir=%s' % self.sancov_dir,
-          symbolizer_option,
-          "allow_user_segv_handler=1",
-        ])
+    if self.build_config.no_snap:
+      # Speed up slow nosnap runs. Allocation verification is covered by
+      # running mksnapshot on other builders.
+      options.extra_flags.append('--no-turbo-verify-allocation')
 
-    def _random_seed(self):
-      seed = 0
-      while not seed:
-        seed = random.SystemRandom().randint(-2147483648, 2147483647)
-      return seed
+    if options.novfp3:
+      options.extra_flags.append('--noenable-vfp3')
 
-    def _execute(self, args, options, suites):
-      print(">>> Running tests for %s.%s" % (self.build_config.arch,
-                                             self.mode_name))
-      # Populate context object.
+    if options.no_variants:  # pragma: no cover
+      print ('Option --no-variants is deprecated. '
+             'Pass --variants=default instead.')
+      assert not options.variants
+      options.variants = 'default'
 
-      # Simulators are slow, therefore allow a longer timeout.
-      if self.build_config.arch in SLOW_ARCHS:
-        options.timeout *= 2
+    if options.exhaustive_variants:  # pragma: no cover
+      # TODO(machenbach): Switch infra to --variants=exhaustive after M65.
+      print ('Option --exhaustive-variants is deprecated. '
+             'Pass --variants=exhaustive instead.')
+      # This is used on many bots. It includes a larger set of default
+      # variants.
+      # Other options for manipulating variants still apply afterwards.
+      assert not options.variants
+      options.variants = 'exhaustive'
 
-      options.timeout *= self.mode_options.timeout_scalefactor
+    if options.quickcheck:
+      assert not options.variants
+      options.variants = 'stress,default'
+      options.slow_tests = 'skip'
+      options.pass_fail_tests = 'skip'
 
-      if self.build_config.predictable:
-        # Predictable mode is slower.
-        options.timeout *= 2
+    if self.build_config.predictable:
+      options.variants = 'default'
+      options.extra_flags.append('--predictable')
+      options.extra_flags.append('--verify-predictable')
+      options.extra_flags.append('--no-inline-new')
+      # Add predictable wrapper to command prefix.
+      options.command_prefix = (
+          [sys.executable, PREDICTABLE_WRAPPER] + options.command_prefix)
 
-      ctx = context.Context(self.build_config.arch,
-                            self.mode_options.execution_mode,
-                            self.outdir,
-                            self.mode_options.flags,
-                            options.verbose,
-                            options.timeout,
-                            options.isolates,
-                            options.command_prefix,
-                            options.extra_flags,
-                            self.build_config.no_i18n,
-                            options.random_seed,
-                            options.no_sorting,
-                            options.rerun_failures_count,
-                            options.rerun_failures_max,
-                            options.no_harness,
-                            use_perf_data=not options.swarming,
-                            sancov_dir=self.sancov_dir,
-                            infra_staging=options.infra_staging)
+    # TODO(machenbach): Figure out how to test a bigger subset of variants on
+    # msan.
+    if self.build_config.msan:
+      options.variants = 'default'
 
-      # TODO(all): Combine "simulator" and "simulator_run".
-      # TODO(machenbach): In GN we can derive simulator run from
-      # target_arch != v8_target_arch in the dumped build config.
-      simulator_run = (
-        not options.dont_skip_simulator_slow_tests and
-        self.build_config.arch in [
-          'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
-          'ppc64', 's390', 's390x'] and
-        bool(base_runner.ARCH_GUESS) and
-        self.build_config.arch != base_runner.ARCH_GUESS)
-      # Find available test suites and read test cases from them.
-      variables = {
-        "arch": self.build_config.arch,
-        "asan": self.build_config.asan,
-        "byteorder": sys.byteorder,
-        "dcheck_always_on": self.build_config.dcheck_always_on,
-        "deopt_fuzzer": False,
-        "gc_fuzzer": False,
-        "gc_stress": options.gc_stress,
-        "gcov_coverage": self.build_config.gcov_coverage,
-        "isolates": options.isolates,
-        "mode": self.mode_options.status_mode,
-        "msan": self.build_config.msan,
-        "no_harness": options.no_harness,
-        "no_i18n": self.build_config.no_i18n,
-        "no_snap": self.build_config.no_snap,
-        "novfp3": options.novfp3,
-        "predictable": self.build_config.predictable,
-        "simulator": utils.UseSimulator(self.build_config.arch),
-        "simulator_run": simulator_run,
-        "system": utils.GuessOS(),
-        "tsan": self.build_config.tsan,
-        "ubsan_vptr": self.build_config.ubsan_vptr,
-      }
+    if options.variants == 'infra_staging':
+      options.variants = 'exhaustive'
 
-      progress_indicator = progress.IndicatorNotifier()
-      progress_indicator.Register(
-        progress.PROGRESS_INDICATORS[options.progress]())
-      if options.junitout:  # pragma: no cover
-        progress_indicator.Register(progress.JUnitTestProgressIndicator(
-            options.junitout, options.junittestsuite))
-      if options.json_test_results:
-        progress_indicator.Register(progress.JsonTestProgressIndicator(
-          options.json_test_results,
-          self.build_config.arch,
-          self.mode_options.execution_mode,
-          ctx.random_seed))
-      if options.flakiness_results:  # pragma: no cover
-        progress_indicator.Register(progress.FlakinessTestProgressIndicator(
-            options.flakiness_results))
+    self._variants = self._parse_variants(options.variants)
 
-      if options.infra_staging:
-        for s in suites:
-          s.ReadStatusFile(variables)
-          s.ReadTestCases(ctx)
+    def CheckTestMode(name, option):  # pragma: no cover
+      if option not in ['run', 'skip', 'dontcare']:
+        print('Unknown %s mode %s' % (name, option))
+        raise base_runner.TestRunnerError()
+    CheckTestMode('slow test', options.slow_tests)
+    CheckTestMode('pass|fail test', options.pass_fail_tests)
+    if self.build_config.no_i18n:
+      base_runner.TEST_MAP['bot_default'].remove('intl')
+      base_runner.TEST_MAP['default'].remove('intl')
+      # TODO(machenbach): uncomment after infra side lands.
+      # base_runner.TEST_MAP['d8_default'].remove('intl')
 
-        return self._run_test_procs(suites, args, options, progress_indicator,
-                                    ctx)
+  def _parse_variants(self, aliases_str):
+    # Use developer defaults if no variant was specified.
+    aliases_str = aliases_str or 'dev'
+    aliases = aliases_str.split(',')
+    user_variants = set(reduce(
+        list.__add__, [VARIANT_ALIASES.get(a, [a]) for a in aliases]))
 
-      all_tests = []
-      num_tests = 0
-      for s in suites:
-        s.ReadStatusFile(variables)
-        s.ReadTestCases(ctx)
-        if len(args) > 0:
-          s.FilterTestCasesByArgs(args)
-        all_tests += s.tests
+    result = [v for v in ALL_VARIANTS if v in user_variants]
+    if len(result) == len(user_variants):
+      return result
 
-        # First filtering by status applying the generic rules (tests without
-        # variants)
-        if options.warn_unused:
-          tests = [(t.name, t.variant) for t in s.tests]
-          s.statusfile.warn_unused_rules(tests, check_variant_rules=False)
-        s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
+    for v in user_variants:
+      if v not in ALL_VARIANTS:
+        print('Unknown variant: %s' % v)
+        raise base_runner.TestRunnerError()
+    assert False, 'Unreachable'
 
-        if options.cat:
-          verbose.PrintTestSource(s.tests)
-          continue
-        variant_gen = s.CreateLegacyVariantsGenerator(VARIANTS)
-        variant_tests = [ t.create_variant(v, flags)
-                          for t in s.tests
-                          for v in variant_gen.FilterVariantsByTest(t)
-                          for flags in variant_gen.GetFlagSets(t, v) ]
+  def _setup_env(self):
+    super(StandardTestRunner, self)._setup_env()
 
-        if options.random_seed_stress_count > 1:
-          # Duplicate test for random seed stress mode.
-          def iter_seed_flags():
-            for _ in range(0, options.random_seed_stress_count):
-              # Use given random seed for all runs (set by default in
-              # execution.py) or a new random seed if none is specified.
-              if options.random_seed:
-                yield []
-              else:
-                yield ["--random-seed=%d" % self._random_seed()]
-          s.tests = [
-            t.create_variant(t.variant, flags, 'seed-stress-%d' % n)
-            for t in variant_tests
-            for n, flags in enumerate(iter_seed_flags())
-          ]
-        else:
-          s.tests = variant_tests
+    symbolizer_option = self._get_external_symbolizer_option()
 
-        # Second filtering by status applying also the variant-dependent rules.
-        if options.warn_unused:
-          tests = [(t.name, t.variant) for t in s.tests]
-          s.statusfile.warn_unused_rules(tests, check_variant_rules=True)
+    if self.sancov_dir:
+      os.environ['ASAN_OPTIONS'] = ':'.join([
+        'coverage=1',
+        'coverage_dir=%s' % self.sancov_dir,
+        symbolizer_option,
+        'allow_user_segv_handler=1',
+      ])
 
-        s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
-        s.tests = self._shard_tests(s.tests, options)
+  def _get_statusfile_variables(self, options):
+    variables = (
+        super(StandardTestRunner, self)._get_statusfile_variables(options))
 
-        for t in s.tests:
-          t.cmd = t.get_command(ctx)
+    simulator_run = (
+      not options.dont_skip_simulator_slow_tests and
+      self.build_config.arch in [
+        'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
+        'ppc64', 's390', 's390x'] and
+      bool(ARCH_GUESS) and
+      self.build_config.arch != ARCH_GUESS)
 
-        num_tests += len(s.tests)
+    variables.update({
+      'gc_stress': options.gc_stress or options.random_gc_stress,
+      'gc_fuzzer': options.random_gc_stress,
+      'novfp3': options.novfp3,
+      'simulator_run': simulator_run,
+    })
+    return variables
 
-      if options.cat:
-        return 0  # We're done here.
+  def _do_execute(self, tests, args, options):
+    jobs = options.j
 
-      if options.report:
-        verbose.PrintReport(all_tests)
+    print('>>> Running with test processors')
+    loader = LoadProc(tests)
+    results = self._create_result_tracker(options)
+    indicators = self._create_progress_indicators(
+        tests.test_count_estimate, options)
 
-      # Run the tests.
-      start_time = time.time()
+    outproc_factory = None
+    if self.build_config.predictable:
+      outproc_factory = predictable.get_outproc
+    execproc = ExecutionProc(jobs, outproc_factory)
+    sigproc = self._create_signal_proc()
 
-      if self.build_config.predictable:
-        outproc_factory = predictable.get_outproc
-      else:
-        outproc_factory = None
+    procs = [
+      loader,
+      NameFilterProc(args) if args else None,
+      StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+      VariantProc(self._variants),
+      StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
+      self._create_predictable_filter(),
+      self._create_shard_proc(options),
+      self._create_seed_proc(options),
+      sigproc,
+    ] + indicators + [
+      results,
+      self._create_timeout_proc(options),
+      self._create_rerun_proc(options),
+      execproc,
+    ]
 
-      runner = execution.Runner(suites, progress_indicator, ctx,
-                                outproc_factory)
-      exit_code = runner.Run(options.j)
-      overall_duration = time.time() - start_time
+    self._prepare_procs(procs)
 
-      if options.time:
-        verbose.PrintTestDurations(suites, runner.outputs, overall_duration)
+    loader.load_initial_tests(initial_batch_size=options.j * 2)
 
-      if num_tests == 0:
-        print("Warning: no tests were run!")
+    # This starts up worker processes and blocks until all tests are
+    # processed.
+    execproc.run()
 
-      if exit_code == 1 and options.json_test_results:
-        print("Force exit code 0 after failures. Json test results file "
-              "generated with failure information.")
-        exit_code = 0
+    for indicator in indicators:
+      indicator.finished()
 
-      if self.sancov_dir:
-        # If tests ran with sanitizer coverage, merge coverage files in the end.
-        try:
-          print "Merging sancov files."
-          subprocess.check_call([
-            sys.executable,
-            join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
-            "--coverage-dir=%s" % self.sancov_dir])
-        except:
-          print >> sys.stderr, "Error: Merging sancov files failed."
-          exit_code = 1
+    if tests.test_count_estimate:
+      percentage = float(results.total) / tests.test_count_estimate * 100
+    else:
+      percentage = 0
 
-      return exit_code
+    print (('>>> %d base tests produced %d (%d%s)'
+           ' non-filtered tests') % (
+        tests.test_count_estimate, results.total, percentage, '%'))
 
-    def _shard_tests(self, tests, options):
-      shard_run, shard_count = self._get_shard_info(options)
+    print('>>> %d tests ran' % (results.total - results.remaining))
 
-      if shard_count < 2:
-        return tests
-      count = 0
-      shard = []
-      for test in tests:
-        if count % shard_count == shard_run - 1:
-          shard.append(test)
-        count += 1
-      return shard
+    exit_code = utils.EXIT_CODE_PASS
+    if results.failed:
+      exit_code = utils.EXIT_CODE_FAILURES
+    if not results.total:
+      exit_code = utils.EXIT_CODE_NO_TESTS
 
-    def _run_test_procs(self, suites, args, options, progress_indicator,
-                        context):
-      jobs = options.j
+    # Indicate if a SIGINT or SIGTERM happened.
+    return max(exit_code, sigproc.exit_code)
 
-      print '>>> Running with test processors'
-      loader = LoadProc()
-      tests_counter = TestsCounter()
-      results = ResultsTracker()
-      indicators = progress_indicator.ToProgressIndicatorProcs()
-      execproc = ExecutionProc(jobs, context)
+  def _create_predictable_filter(self):
+    if not self.build_config.predictable:
+      return None
+    return predictable.PredictableFilterProc()
 
-      procs = [
-        loader,
-        NameFilterProc(args) if args else None,
-        StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
-        self._create_shard_proc(options),
-        tests_counter,
-        VariantProc(VARIANTS),
-        StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
-      ] + indicators + [
-        results,
-        self._create_rerun_proc(context),
-        execproc,
-      ]
-
-      procs = filter(None, procs)
-
-      for i in xrange(0, len(procs) - 1):
-        procs[i].connect_to(procs[i + 1])
-
-      tests = [t for s in suites for t in s.tests]
-      tests.sort(key=lambda t: t.is_slow, reverse=True)
-
-      loader.setup()
-      loader.load_tests(tests)
-
-      print '>>> Running %d base tests' % tests_counter.total
-      tests_counter.remove_from_chain()
-
-      execproc.start()
-
-      for indicator in indicators:
-        indicator.finished()
-
-      print '>>> %d tests ran' % results.total
-
-      exit_code = 0
-      if results.failed:
-        exit_code = 1
-      if results.remaining:
-        exit_code = 2
-
-
-      if exit_code == 1 and options.json_test_results:
-        print("Force exit code 0 after failures. Json test results file "
-              "generated with failure information.")
-        exit_code = 0
-      return exit_code
-
-    def _create_rerun_proc(self, ctx):
-      if not ctx.rerun_failures_count:
-        return None
-      return RerunProc(ctx.rerun_failures_count,
-                       ctx.rerun_failures_max)
-
+  def _create_seed_proc(self, options):
+    if options.random_seed_stress_count == 1:
+      return None
+    return SeedProc(options.random_seed_stress_count, options.random_seed,
+                    options.j * 4)
 
 
 if __name__ == '__main__':
diff --git a/src/v8/tools/testrunner/test_config.py b/src/v8/tools/testrunner/test_config.py
new file mode 100644
index 0000000..27ac72b
--- /dev/null
+++ b/src/v8/tools/testrunner/test_config.py
@@ -0,0 +1,34 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+from .utils import random_utils
+
+
+class TestConfig(object):
+  def __init__(self,
+               command_prefix,
+               extra_flags,
+               isolates,
+               mode_flags,
+               no_harness,
+               noi18n,
+               random_seed,
+               run_skipped,
+               shell_dir,
+               timeout,
+               verbose):
+    self.command_prefix = command_prefix
+    self.extra_flags = extra_flags
+    self.isolates = isolates
+    self.mode_flags = mode_flags
+    self.no_harness = no_harness
+    self.noi18n = noi18n
+    # random_seed is always not None.
+    self.random_seed = random_seed or random_utils.random_seed()
+    self.run_skipped = run_skipped
+    self.shell_dir = shell_dir
+    self.timeout = timeout
+    self.verbose = verbose
diff --git a/src/v8/tools/testrunner/testproc/base.py b/src/v8/tools/testrunner/testproc/base.py
index 1a87dbe..c52c779 100644
--- a/src/v8/tools/testrunner/testproc/base.py
+++ b/src/v8/tools/testrunner/testproc/base.py
@@ -37,36 +37,12 @@
 DROP_PASS_OUTPUT = 2
 DROP_PASS_STDOUT = 3
 
-def get_reduce_result_function(requirement):
-  if requirement == DROP_RESULT:
-    return lambda _: None
-
-  if requirement == DROP_OUTPUT:
-    def f(result):
-      result.output = None
-      return result
-    return f
-
-  if requirement == DROP_PASS_OUTPUT:
-    def f(result):
-      if not result.has_unexpected_output:
-        result.output = None
-      return result
-    return f
-
-  if requirement == DROP_PASS_STDOUT:
-    def f(result):
-      if not result.has_unexpected_output:
-        result.output.stdout = None
-        result.output.stderr = None
-      return result
-    return f
-
 
 class TestProc(object):
   def __init__(self):
     self._prev_proc = None
     self._next_proc = None
+    self._stopped = False
     self._requirement = DROP_RESULT
     self._prev_requirement = None
     self._reduce_result = lambda result: result
@@ -90,13 +66,21 @@
     self._prev_requirement = requirement
     if self._next_proc:
       self._next_proc.setup(max(requirement, self._requirement))
-    if self._prev_requirement < self._requirement:
-      self._reduce_result = get_reduce_result_function(self._prev_requirement)
+
+    # Since we're not winning anything by droping part of the result we are
+    # dropping the whole result or pass it as it is. The real reduction happens
+    # during result creation (in the output processor), so the result is
+    # immutable.
+    if (self._prev_requirement < self._requirement and
+        self._prev_requirement == DROP_RESULT):
+      self._reduce_result = lambda _: None
 
   def next_test(self, test):
     """
     Method called by previous processor whenever it produces new test.
     This method shouldn't be called by anyone except previous processor.
+    Returns a boolean value to signal whether the test was loaded into the
+    execution queue successfully or not.
     """
     raise NotImplementedError()
 
@@ -111,15 +95,28 @@
     if self._prev_proc:
       self._prev_proc.heartbeat()
 
+  def stop(self):
+    if not self._stopped:
+      self._stopped = True
+      if self._prev_proc:
+        self._prev_proc.stop()
+      if self._next_proc:
+        self._next_proc.stop()
+
+  @property
+  def is_stopped(self):
+    return self._stopped
+
   ### Communication
 
   def _send_test(self, test):
     """Helper method for sending test to the next processor."""
-    self._next_proc.next_test(test)
+    return self._next_proc.next_test(test)
 
   def _send_result(self, test, result):
     """Helper method for sending result to the previous processor."""
-    result = self._reduce_result(result)
+    if not test.keep_output:
+      result = self._reduce_result(result)
     self._prev_proc.result_for(test, result)
 
 
@@ -131,7 +128,7 @@
 
   def next_test(self, test):
     self._on_next_test(test)
-    self._send_test(test)
+    return self._send_test(test)
 
   def result_for(self, test, result):
     self._on_result_for(test, result)
@@ -163,7 +160,7 @@
     self._name = name
 
   def next_test(self, test):
-    self._next_test(test)
+    return self._next_test(test)
 
   def result_for(self, subtest, result):
     self._result_for(subtest.origin, subtest, result)
@@ -195,9 +192,9 @@
 
   def next_test(self, test):
     if self._filter(test):
-      self._send_result(test, SKIPPED)
-    else:
-      self._send_test(test)
+      return False
+
+    return self._send_test(test)
 
   def result_for(self, test, result):
     self._send_result(test, result)
diff --git a/src/v8/tools/testrunner/testproc/combiner.py b/src/v8/tools/testrunner/testproc/combiner.py
new file mode 100644
index 0000000..4d992f4
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/combiner.py
@@ -0,0 +1,127 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+from collections import defaultdict
+import time
+
+from . import base
+from ..objects import testcase
+from ..outproc import base as outproc
+
+class CombinerProc(base.TestProc):
+  def __init__(self, rng, min_group_size, max_group_size, count):
+    """
+    Args:
+      rng: random number generator
+      min_group_size: minimum number of tests to combine
+      max_group_size: maximum number of tests to combine
+      count: how many tests to generate. 0 means infinite running
+    """
+    super(CombinerProc, self).__init__()
+
+    self._rng = rng
+    self._min_size = min_group_size
+    self._max_size = max_group_size
+    self._count = count
+
+    # Index of the last generated test
+    self._current_num = 0
+
+    # {suite name: instance of TestGroups}
+    self._groups = defaultdict(TestGroups)
+
+    # {suite name: instance of TestCombiner}
+    self._combiners = {}
+
+  def setup(self, requirement=base.DROP_RESULT):
+    # Combiner is not able to pass results (even as None) to the previous
+    # processor.
+    assert requirement == base.DROP_RESULT
+    self._next_proc.setup(base.DROP_RESULT)
+
+  def next_test(self, test):
+    group_key = self._get_group_key(test)
+    if not group_key:
+      # Test not suitable for combining
+      return False
+
+    self._groups[test.suite.name].add_test(group_key, test)
+    return True
+
+  def _get_group_key(self, test):
+    combiner =  self._get_combiner(test.suite)
+    if not combiner:
+      print ('>>> Warning: There is no combiner for %s testsuite' %
+             test.suite.name)
+      return None
+    return combiner.get_group_key(test)
+
+  def result_for(self, test, result):
+    self._send_next_test()
+
+  def generate_initial_tests(self, num=1):
+    for _ in range(0, num):
+      self._send_next_test()
+
+  def _send_next_test(self):
+    if self.is_stopped:
+      return False
+
+    if self._count and self._current_num >= self._count:
+      return False
+
+    combined_test = self._create_new_test()
+    if not combined_test:
+      # Not enough tests
+      return False
+
+    return self._send_test(combined_test)
+
+  def _create_new_test(self):
+    suite, combiner = self._select_suite()
+    groups = self._groups[suite]
+
+    max_size = self._rng.randint(self._min_size, self._max_size)
+    sample = groups.sample(self._rng, max_size)
+    if not sample:
+      return None
+
+    self._current_num += 1
+    return combiner.combine('%s-%d' % (suite, self._current_num), sample)
+
+  def _select_suite(self):
+    """Returns pair (suite name, combiner)."""
+    selected = self._rng.randint(0, len(self._groups) - 1)
+    for n, suite in enumerate(self._groups):
+      if n == selected:
+        return suite, self._combiners[suite]
+
+  def _get_combiner(self, suite):
+    combiner = self._combiners.get(suite.name)
+    if not combiner:
+      combiner = suite.get_test_combiner()
+      self._combiners[suite.name] = combiner
+    return combiner
+
+
+class TestGroups(object):
+  def __init__(self):
+    self._groups = defaultdict(list)
+    self._keys = []
+
+  def add_test(self, key, test):
+    self._groups[key].append(test)
+    self._keys.append(key)
+
+  def sample(self, rng, max_size):
+    # Not enough tests
+    if not self._groups:
+      return None
+
+    group_key = rng.choice(self._keys)
+    tests = self._groups[group_key]
+    return [rng.choice(tests) for _ in range(0, max_size)]
diff --git a/src/v8/tools/testrunner/testproc/execution.py b/src/v8/tools/testrunner/testproc/execution.py
index 021b02a..68ecf45 100644
--- a/src/v8/tools/testrunner/testproc/execution.py
+++ b/src/v8/tools/testrunner/testproc/execution.py
@@ -15,12 +15,12 @@
   return job.run(process_context)
 
 
-def create_process_context(requirement):
-  return ProcessContext(base.get_reduce_result_function(requirement))
+def create_process_context(result_reduction):
+  return ProcessContext(result_reduction)
 
 
 JobResult = collections.namedtuple('JobResult', ['id', 'result'])
-ProcessContext = collections.namedtuple('ProcessContext', ['reduce_result_f'])
+ProcessContext = collections.namedtuple('ProcessContext', ['result_reduction'])
 
 
 class Job(object):
@@ -32,9 +32,8 @@
 
   def run(self, process_ctx):
     output = self.cmd.execute()
-    result = self.outproc.process(output)
-    if not self.keep_output:
-      result = process_ctx.reduce_result_f(result)
+    reduction = process_ctx.result_reduction if not self.keep_output else None
+    result = self.outproc.process(output, reduction)
     return JobResult(self.test_id, result)
 
 
@@ -44,49 +43,53 @@
   sends results to the previous processor.
   """
 
-  def __init__(self, jobs, context):
+  def __init__(self, jobs, outproc_factory=None):
     super(ExecutionProc, self).__init__()
     self._pool = pool.Pool(jobs)
-    self._context = context
+    self._outproc_factory = outproc_factory or (lambda t: t.output_proc)
     self._tests = {}
 
   def connect_to(self, next_proc):
     assert False, 'ExecutionProc cannot be connected to anything'
 
-  def start(self):
-    try:
-      it = self._pool.imap_unordered(
+  def run(self):
+    it = self._pool.imap_unordered(
         fn=run_job,
         gen=[],
         process_context_fn=create_process_context,
         process_context_args=[self._prev_requirement],
-      )
-      for pool_result in it:
-        if pool_result.heartbeat:
-          continue
-
-        job_result = pool_result.value
-        test_id, result = job_result
-
-        test, result.cmd = self._tests[test_id]
-        del self._tests[test_id]
-        self._send_result(test, result)
-    except KeyboardInterrupt:
-      raise
-    except:
-      traceback.print_exc()
-      raise
-    finally:
-      self._pool.terminate()
+    )
+    for pool_result in it:
+      self._unpack_result(pool_result)
 
   def next_test(self, test):
+    if self.is_stopped:
+      return False
+
     test_id = test.procid
-    cmd = test.get_command(self._context)
+    cmd = test.get_command()
     self._tests[test_id] = test, cmd
 
-    # TODO(majeski): Needs factory for outproc as in local/execution.py
-    outproc = test.output_proc
+    outproc = self._outproc_factory(test)
     self._pool.add([Job(test_id, cmd, outproc, test.keep_output)])
 
+    return True
+
   def result_for(self, test, result):
     assert False, 'ExecutionProc cannot receive results'
+
+  def stop(self):
+    super(ExecutionProc, self).stop()
+    self._pool.abort()
+
+  def _unpack_result(self, pool_result):
+    if pool_result.heartbeat:
+      self.heartbeat()
+      return
+
+    job_result = pool_result.value
+    test_id, result = job_result
+
+    test, result.cmd = self._tests[test_id]
+    del self._tests[test_id]
+    self._send_result(test, result)
diff --git a/src/v8/tools/testrunner/testproc/expectation.py b/src/v8/tools/testrunner/testproc/expectation.py
new file mode 100644
index 0000000..fdc9e3e
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/expectation.py
@@ -0,0 +1,28 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from . import base
+
+from testrunner.local import statusfile
+from testrunner.outproc import base as outproc
+
+class ForgiveTimeoutProc(base.TestProcProducer):
+  """Test processor passing tests and results through and forgiving timeouts."""
+  def __init__(self):
+    super(ForgiveTimeoutProc, self).__init__('no-timeout')
+
+  def _next_test(self, test):
+    subtest = self._create_subtest(test, 'no_timeout')
+    if subtest.expected_outcomes == outproc.OUTCOMES_PASS:
+      subtest.expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
+    elif subtest.expected_outcomes == outproc.OUTCOMES_FAIL:
+      subtest.expected_outcomes = outproc.OUTCOMES_FAIL_OR_TIMEOUT
+    elif statusfile.TIMEOUT not in subtest.expected_outcomes:
+      subtest.expected_outcomes = (
+          subtest.expected_outcomes + [statusfile.TIMEOUT])
+
+    return self._send_test(subtest)
+
+  def _result_for(self, test, subtest, result):
+    self._send_result(test, result)
diff --git a/src/v8/tools/testrunner/testproc/filter.py b/src/v8/tools/testrunner/testproc/filter.py
index 5081997..e2a5e97 100644
--- a/src/v8/tools/testrunner/testproc/filter.py
+++ b/src/v8/tools/testrunner/testproc/filter.py
@@ -59,25 +59,25 @@
     super(NameFilterProc, self).__init__()
 
     self._globs = defaultdict(list)
+    self._exact_matches = defaultdict(dict)
     for a in args:
       argpath = a.split('/')
       suitename = argpath[0]
       path = '/'.join(argpath[1:]) or '*'
-      self._globs[suitename].append(path)
+      if '*' in path:
+        self._globs[suitename].append(path)
+      else:
+        self._exact_matches[suitename][path] = True
 
     for s, globs in self._globs.iteritems():
       if not globs or '*' in globs:
-        self._globs[s] = []
+        self._globs[s] = ['*']
 
   def _filter(self, test):
-    globs = self._globs.get(test.suite.name)
-    if globs is None:
-      return True
-
-    if not globs:
-      return False
-
+    globs = self._globs.get(test.suite.name, [])
     for g in globs:
+      if g == '*': return False
       if fnmatch.fnmatch(test.path, g):
         return False
-    return True
+    exact_matches = self._exact_matches.get(test.suite.name, {})
+    return test.path not in exact_matches
diff --git a/src/v8/tools/testrunner/testproc/fuzzer.py b/src/v8/tools/testrunner/testproc/fuzzer.py
new file mode 100644
index 0000000..187145b
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/fuzzer.py
@@ -0,0 +1,287 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import namedtuple
+import time
+
+from . import base
+
+
+class FuzzerConfig(object):
+  def __init__(self, probability, analyzer, fuzzer):
+    """
+    Args:
+      probability: of choosing this fuzzer (0; 10]
+      analyzer: instance of Analyzer class, can be None if no analysis is needed
+      fuzzer: instance of Fuzzer class
+    """
+    assert probability > 0 and probability <= 10
+
+    self.probability = probability
+    self.analyzer = analyzer
+    self.fuzzer = fuzzer
+
+
+class Analyzer(object):
+  def get_analysis_flags(self):
+    raise NotImplementedError()
+
+  def do_analysis(self, result):
+    raise NotImplementedError()
+
+
+class Fuzzer(object):
+  def create_flags_generator(self, rng, test, analysis_value):
+    """
+    Args:
+      rng: random number generator
+      test: test for which to create flags
+      analysis_value: value returned by the analyzer. None if there is no
+        corresponding analyzer to this fuzzer or the analysis phase is disabled
+    """
+    raise NotImplementedError()
+
+
+# TODO(majeski): Allow multiple subtests to run at once.
+class FuzzerProc(base.TestProcProducer):
+  def __init__(self, rng, count, fuzzers, disable_analysis=False):
+    """
+    Args:
+      rng: random number generator used to select flags and values for them
+      count: number of tests to generate based on each base test
+      fuzzers: list of FuzzerConfig instances
+      disable_analysis: disable analysis phase and filtering base on it. When
+        set, processor passes None as analysis result to fuzzers
+    """
+    super(FuzzerProc, self).__init__('Fuzzer')
+
+    self._rng = rng
+    self._count = count
+    self._fuzzer_configs = fuzzers
+    self._disable_analysis = disable_analysis
+    self._gens = {}
+
+  def setup(self, requirement=base.DROP_RESULT):
+    # Fuzzer is optimized to not store the results
+    assert requirement == base.DROP_RESULT
+    super(FuzzerProc, self).setup(requirement)
+
+  def _next_test(self, test):
+    if self.is_stopped:
+      return False
+
+    analysis_subtest = self._create_analysis_subtest(test)
+    if analysis_subtest:
+      return self._send_test(analysis_subtest)
+
+    self._gens[test.procid] = self._create_gen(test)
+    return self._try_send_next_test(test)
+
+  def _create_analysis_subtest(self, test):
+    if self._disable_analysis:
+      return None
+
+    analysis_flags = []
+    for fuzzer_config in self._fuzzer_configs:
+      if fuzzer_config.analyzer:
+        analysis_flags += fuzzer_config.analyzer.get_analysis_flags()
+
+    if analysis_flags:
+      analysis_flags = list(set(analysis_flags))
+      return self._create_subtest(test, 'analysis', flags=analysis_flags,
+                                  keep_output=True)
+
+
+  def _result_for(self, test, subtest, result):
+    if not self._disable_analysis:
+      if result is not None:
+        # Analysis phase, for fuzzing we drop the result.
+        if result.has_unexpected_output:
+          self._send_result(test, None)
+          return
+
+        self._gens[test.procid] = self._create_gen(test, result)
+
+    self._try_send_next_test(test)
+
+  def _create_gen(self, test, analysis_result=None):
+    # It will be called with analysis_result==None only when there is no
+    # analysis phase at all, so no fuzzer has it's own analyzer.
+    gens = []
+    indexes = []
+    for i, fuzzer_config in enumerate(self._fuzzer_configs):
+      analysis_value = None
+      if analysis_result and fuzzer_config.analyzer:
+        analysis_value = fuzzer_config.analyzer.do_analysis(analysis_result)
+        if not analysis_value:
+          # Skip fuzzer for this test since it doesn't have analysis data
+          continue
+      p = fuzzer_config.probability
+      flag_gen = fuzzer_config.fuzzer.create_flags_generator(self._rng, test,
+                                                             analysis_value)
+      indexes += [len(gens)] * p
+      gens.append((p, flag_gen))
+
+    if not gens:
+      # No fuzzers for this test, skip it
+      return
+
+    i = 0
+    while not self._count or i < self._count:
+      main_index = self._rng.choice(indexes)
+      _, main_gen = gens[main_index]
+
+      flags = next(main_gen)
+      for index, (p, gen) in enumerate(gens):
+        if index == main_index:
+          continue
+        if self._rng.randint(1, 10) <= p:
+          flags += next(gen)
+
+      flags.append('--fuzzer-random-seed=%s' % self._next_seed())
+      yield self._create_subtest(test, str(i), flags=flags)
+
+      i += 1
+
+  def _try_send_next_test(self, test):
+    if not self.is_stopped:
+      for subtest in self._gens[test.procid]:
+        if self._send_test(subtest):
+          return True
+
+    del self._gens[test.procid]
+    return False
+
+  def _next_seed(self):
+    seed = None
+    while not seed:
+      seed = self._rng.randint(-2147483648, 2147483647)
+    return seed
+
+
+class ScavengeAnalyzer(Analyzer):
+  def get_analysis_flags(self):
+    return ['--fuzzer-gc-analysis']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('### Maximum new space size reached = '):
+        return int(float(line.split()[7]))
+
+
+class ScavengeFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-scavenge=%d' % (analysis_value or 100)]
+
+
+class MarkingAnalyzer(Analyzer):
+  def get_analysis_flags(self):
+    return ['--fuzzer-gc-analysis']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('### Maximum marking limit reached = '):
+        return int(float(line.split()[6]))
+
+
+class MarkingFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-marking=%d' % (analysis_value or 100)]
+
+
+class GcIntervalAnalyzer(Analyzer):
+  def get_analysis_flags(self):
+    return ['--fuzzer-gc-analysis']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('### Allocations = '):
+        return int(float(line.split()[3][:-1]))
+
+
+class GcIntervalFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    if analysis_value:
+      value = analysis_value / 10
+    else:
+      value = 10000
+    while True:
+      yield ['--random-gc-interval=%d' % value]
+
+
+class CompactionFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-compaction-random']
+
+
+class TaskDelayFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--stress-delay-tasks']
+
+
+class ThreadPoolSizeFuzzer(Fuzzer):
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      yield ['--thread-pool-size=%d' % rng.randint(1, 8)]
+
+
+class DeoptAnalyzer(Analyzer):
+  MAX_DEOPT=1000000000
+
+  def __init__(self, min_interval):
+    super(DeoptAnalyzer, self).__init__()
+    self._min = min_interval
+
+  def get_analysis_flags(self):
+    return ['--deopt-every-n-times=%d' % self.MAX_DEOPT,
+            '--print-deopt-stress']
+
+  def do_analysis(self, result):
+    for line in reversed(result.output.stdout.splitlines()):
+      if line.startswith('=== Stress deopt counter: '):
+        counter = self.MAX_DEOPT - int(line.split(' ')[-1])
+        if counter < self._min:
+          # Skip this test since we won't generate any meaningful interval with
+          # given minimum.
+          return None
+        return counter
+
+
+class DeoptFuzzer(Fuzzer):
+  def __init__(self, min_interval):
+    super(DeoptFuzzer, self).__init__()
+    self._min = min_interval
+
+  def create_flags_generator(self, rng, test, analysis_value):
+    while True:
+      if analysis_value:
+        value = analysis_value / 2
+      else:
+        value = 10000
+      interval = rng.randint(self._min, max(value, self._min))
+      yield ['--deopt-every-n-times=%d' % interval]
+
+
+FUZZERS = {
+  'compaction': (None, CompactionFuzzer),
+  'delay': (None, TaskDelayFuzzer),
+  'deopt': (DeoptAnalyzer, DeoptFuzzer),
+  'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer),
+  'marking': (MarkingAnalyzer, MarkingFuzzer),
+  'scavenge': (ScavengeAnalyzer, ScavengeFuzzer),
+  'threads': (None, ThreadPoolSizeFuzzer),
+}
+
+
+def create_fuzzer_config(name, probability, *args, **kwargs):
+  analyzer_class, fuzzer_class = FUZZERS[name]
+  return FuzzerConfig(
+      probability,
+      analyzer_class(*args, **kwargs) if analyzer_class else None,
+      fuzzer_class(*args, **kwargs),
+  )
diff --git a/src/v8/tools/testrunner/testproc/loader.py b/src/v8/tools/testrunner/testproc/loader.py
index 0a3d0df..f4afeae 100644
--- a/src/v8/tools/testrunner/testproc/loader.py
+++ b/src/v8/tools/testrunner/testproc/loader.py
@@ -9,19 +9,34 @@
   """First processor in the chain that passes all tests to the next processor.
   """
 
-  def load_tests(self, tests):
-    loaded = set()
-    for test in tests:
-      if test.procid in loaded:
-        print 'Warning: %s already obtained' % test.procid
-        continue
+  def __init__(self, tests):
+    super(LoadProc, self).__init__()
 
-      loaded.add(test.procid)
-      self._send_test(test)
+    self.tests = tests
+
+  def load_initial_tests(self, initial_batch_size):
+    """
+    Args:
+      exec_proc: execution processor that the tests are being loaded into
+      initial_batch_size: initial number of tests to load
+    """
+    loaded_tests = 0
+    while loaded_tests < initial_batch_size:
+      try:
+        t = next(self.tests)
+      except StopIteration:
+        return
+
+      if self._send_test(t):
+        loaded_tests += 1
 
   def next_test(self, test):
     assert False, 'Nothing can be connected to the LoadProc'
 
   def result_for(self, test, result):
-    # Ignore all results.
-    pass
+    try:
+      while not self._send_test(next(self.tests)):
+        pass
+    except StopIteration:
+      # No more tests to load.
+      pass
diff --git a/src/v8/tools/testrunner/testproc/progress.py b/src/v8/tools/testrunner/testproc/progress.py
index 78514f7..aad6740 100644
--- a/src/v8/tools/testrunner/testproc/progress.py
+++ b/src/v8/tools/testrunner/testproc/progress.py
@@ -2,13 +2,22 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import json
 import os
+import platform
+import subprocess
 import sys
 import time
 
 from . import base
-from ..local import junit_output
+
+
+# Base dir of the build products for Release and Debug.
+OUT_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out'))
 
 
 def print_failure_header(test):
@@ -16,29 +25,22 @@
     negative_marker = '[negative] '
   else:
     negative_marker = ''
-  print "=== %(label)s %(negative)s===" % {
+  print("=== %(label)s %(negative)s===" % {
     'label': test,
     'negative': negative_marker,
-  }
-
-
-class TestsCounter(base.TestProcObserver):
-  def __init__(self):
-    super(TestsCounter, self).__init__()
-    self.total = 0
-
-  def _on_next_test(self, test):
-    self.total += 1
+  })
 
 
 class ResultsTracker(base.TestProcObserver):
-  def __init__(self):
+  """Tracks number of results and stops to run tests if max_failures reached."""
+  def __init__(self, max_failures):
     super(ResultsTracker, self).__init__()
     self._requirement = base.DROP_OUTPUT
 
     self.failed = 0
     self.remaining = 0
     self.total = 0
+    self.max_failures = max_failures
 
   def _on_next_test(self, test):
     self.total += 1
@@ -48,6 +50,9 @@
     self.remaining -= 1
     if result.has_unexpected_output:
       self.failed += 1
+      if self.max_failures and self.failed >= self.max_failures:
+        print('>>> Too many failures, exiting...')
+        self.stop()
 
 
 class ProgressIndicator(base.TestProcObserver):
@@ -61,10 +66,6 @@
     self._requirement = base.DROP_PASS_OUTPUT
 
     self._failed = []
-    self._total = 0
-
-  def _on_next_test(self, test):
-    self._total += 1
 
   def _on_result_for(self, test, result):
     # TODO(majeski): Support for dummy/grouped results
@@ -73,36 +74,45 @@
 
   def finished(self):
     crashed = 0
-    print
+    print()
     for test, result in self._failed:
       print_failure_header(test)
       if result.output.stderr:
-        print "--- stderr ---"
-        print result.output.stderr.strip()
+        print("--- stderr ---")
+        print(result.output.stderr.strip())
       if result.output.stdout:
-        print "--- stdout ---"
-        print result.output.stdout.strip()
-      print "Command: %s" % result.cmd.to_string()
+        print("--- stdout ---")
+        print(result.output.stdout.strip())
+      print("Command: %s" % result.cmd.to_string())
       if result.output.HasCrashed():
-        print "exit code: %d" % result.output.exit_code
-        print "--- CRASHED ---"
+        print("exit code: %d" % result.output.exit_code)
+        print("--- CRASHED ---")
         crashed += 1
       if result.output.HasTimedOut():
-        print "--- TIMEOUT ---"
+        print("--- TIMEOUT ---")
     if len(self._failed) == 0:
-      print "==="
-      print "=== All tests succeeded"
-      print "==="
+      print("===")
+      print("=== All tests succeeded")
+      print("===")
     else:
-      print
-      print "==="
-      print "=== %i tests failed" % len(self._failed)
+      print()
+      print("===")
+      print("=== %i tests failed" % len(self._failed))
       if crashed > 0:
-        print "=== %i tests CRASHED" % crashed
-      print "==="
+        print("=== %i tests CRASHED" % crashed)
+      print("===")
 
 
 class VerboseProgressIndicator(SimpleProgressIndicator):
+  def __init__(self):
+    super(VerboseProgressIndicator, self).__init__()
+    self._last_printed_time = time.time()
+
+  def _print(self, text):
+    print(text)
+    sys.stdout.flush()
+    self._last_printed_time = time.time()
+
   def _on_result_for(self, test, result):
     super(VerboseProgressIndicator, self)._on_result_for(test, result)
     # TODO(majeski): Support for dummy/grouped results
@@ -113,12 +123,31 @@
         outcome = 'FAIL'
     else:
       outcome = 'pass'
-    print 'Done running %s: %s' % (test, outcome)
-    sys.stdout.flush()
+
+    self._print('Done running %s %s: %s' % (
+      test, test.variant or 'default', outcome))
+
+  # TODO(machenbach): Remove this platform specific hack and implement a proper
+  # feedback channel from the workers, providing which tests are currently run.
+  def _print_processes_linux(self):
+    if platform.system() == 'Linux':
+      try:
+        cmd = 'ps -aux | grep "%s"' % OUT_DIR
+        output = subprocess.check_output(cmd, shell=True)
+        self._print('List of processes:')
+        for line in (output or '').splitlines():
+          # Show command with pid, but other process info cut off.
+          self._print('pid: %s cmd: %s' %
+                      (line.split()[1], line[line.index(OUT_DIR):]))
+      except:
+        pass
 
   def _on_heartbeat(self):
-    print 'Still working...'
-    sys.stdout.flush()
+    if time.time() - self._last_printed_time > 30:
+      # Print something every 30 seconds to not get killed by an output
+      # timeout.
+      self._print('Still working...')
+      self._print_processes_linux()
 
 
 class DotsProgressIndicator(SimpleProgressIndicator):
@@ -127,6 +156,7 @@
     self._count = 0
 
   def _on_result_for(self, test, result):
+    super(DotsProgressIndicator, self)._on_result_for(test, result)
     # TODO(majeski): Support for dummy/grouped results
     self._count += 1
     if self._count > 1 and self._count % 50 == 1:
@@ -155,12 +185,11 @@
     self._last_status_length = 0
     self._start_time = time.time()
 
-    self._total = 0
     self._passed = 0
     self._failed = 0
 
-  def _on_next_test(self, test):
-    self._total += 1
+  def set_test_count(self, test_count):
+    self._total = test_count
 
   def _on_result_for(self, test, result):
     # TODO(majeski): Support for dummy/grouped results
@@ -178,27 +207,27 @@
       self._clear_line(self._last_status_length)
       print_failure_header(test)
       if len(stdout):
-        print self._templates['stdout'] % stdout
+        print(self._templates['stdout'] % stdout)
       if len(stderr):
-        print self._templates['stderr'] % stderr
-      print "Command: %s" % result.cmd
+        print(self._templates['stderr'] % stderr)
+      print("Command: %s" % result.cmd.to_string(relative=True))
       if output.HasCrashed():
-        print "exit code: %d" % output.exit_code
-        print "--- CRASHED ---"
+        print("exit code: %d" % output.exit_code)
+        print("--- CRASHED ---")
       if output.HasTimedOut():
-        print "--- TIMEOUT ---"
+        print("--- TIMEOUT ---")
 
   def finished(self):
     self._print_progress('Done')
-    print
+    print()
 
   def _print_progress(self, name):
     self._clear_line(self._last_status_length)
     elapsed = time.time() - self._start_time
-    if not self._total:
-      progress = 0
-    else:
+    if self._total:
       progress = (self._passed + self._failed) * 100 // self._total
+    else:
+      progress = 0
     status = self._templates['status_line'] % {
       'passed': self._passed,
       'progress': progress,
@@ -209,7 +238,7 @@
     }
     status = self._truncate(status, 78)
     self._last_status_length = len(status)
-    print status,
+    print(status, end='')
     sys.stdout.flush()
 
   def _truncate(self, string, length):
@@ -235,7 +264,7 @@
     super(ColorProgressIndicator, self).__init__(templates)
 
   def _clear_line(self, last_length):
-    print "\033[1K\r",
+    print("\033[1K\r", end='')
 
 
 class MonochromeProgressIndicator(CompactProgressIndicator):
@@ -249,50 +278,11 @@
     super(MonochromeProgressIndicator, self).__init__(templates)
 
   def _clear_line(self, last_length):
-    print ("\r" + (" " * last_length) + "\r"),
-
-
-class JUnitTestProgressIndicator(ProgressIndicator):
-  def __init__(self, junitout, junittestsuite):
-    super(JUnitTestProgressIndicator, self).__init__()
-    self._requirement = base.DROP_PASS_STDOUT
-
-    self.outputter = junit_output.JUnitTestOutput(junittestsuite)
-    if junitout:
-      self.outfile = open(junitout, "w")
-    else:
-      self.outfile = sys.stdout
-
-  def _on_result_for(self, test, result):
-    # TODO(majeski): Support for dummy/grouped results
-    fail_text = ""
-    output = result.output
-    if result.has_unexpected_output:
-      stdout = output.stdout.strip()
-      if len(stdout):
-        fail_text += "stdout:\n%s\n" % stdout
-      stderr = output.stderr.strip()
-      if len(stderr):
-        fail_text += "stderr:\n%s\n" % stderr
-      fail_text += "Command: %s" % result.cmd.to_string()
-      if output.HasCrashed():
-        fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
-      if output.HasTimedOut():
-        fail_text += "--- TIMEOUT ---"
-    self.outputter.HasRunTest(
-        test_name=str(test),
-        test_cmd=result.cmd.to_string(relative=True),
-        test_duration=output.duration,
-        test_failure=fail_text)
-
-  def finished(self):
-    self.outputter.FinishAndWrite(self.outfile)
-    if self.outfile != sys.stdout:
-      self.outfile.close()
+    print(("\r" + (" " * last_length) + "\r"), end='')
 
 
 class JsonTestProgressIndicator(ProgressIndicator):
-  def __init__(self, json_test_results, arch, mode, random_seed):
+  def __init__(self, framework_name, json_test_results, arch, mode):
     super(JsonTestProgressIndicator, self).__init__()
     # We want to drop stdout/err for all passed tests on the first try, but we
     # need to get outputs for all runs after the first one. To accommodate that,
@@ -300,10 +290,10 @@
     # keep_output set to True in the RerunProc.
     self._requirement = base.DROP_PASS_STDOUT
 
+    self.framework_name = framework_name
     self.json_test_results = json_test_results
     self.arch = arch
     self.mode = mode
-    self.random_seed = random_seed
     self.results = []
     self.tests = []
 
@@ -338,12 +328,11 @@
         "result": test.output_proc.get_outcome(output),
         "expected": test.expected_outcomes,
         "duration": output.duration,
-
-        # TODO(machenbach): This stores only the global random seed from the
-        # context and not possible overrides when using random-seed stress.
-        "random_seed": self.random_seed,
+        "random_seed": test.random_seed,
         "target_name": test.get_shell(),
         "variant": test.variant,
+        "variant_flags": test.variant_flags,
+        "framework_name": self.framework_name,
       })
 
   def finished(self):
@@ -361,7 +350,7 @@
           float(len(self.tests)))
 
     # Sort tests by duration.
-    self.tests.sort(key=lambda (_, duration, cmd): duration, reverse=True)
+    self.tests.sort(key=lambda __duration_cmd: __duration_cmd[1], reverse=True)
     slowest_tests = [
       {
         "name": str(test),
diff --git a/src/v8/tools/testrunner/testproc/rerun.py b/src/v8/tools/testrunner/testproc/rerun.py
index 7f96e02..d085c55 100644
--- a/src/v8/tools/testrunner/testproc/rerun.py
+++ b/src/v8/tools/testrunner/testproc/rerun.py
@@ -19,7 +19,7 @@
     self._rerun_total_left = rerun_max_total
 
   def _next_test(self, test):
-    self._send_next_subtest(test)
+    return self._send_next_subtest(test)
 
   def _result_for(self, test, subtest, result):
     # First result
@@ -34,7 +34,7 @@
     results = self._results[test.procid]
     results.append(result)
 
-    if self._needs_rerun(test, result):
+    if not self.is_stopped and self._needs_rerun(test, result):
       self._rerun[test.procid] += 1
       if self._rerun_total_left is not None:
         self._rerun_total_left -= 1
@@ -52,7 +52,7 @@
 
   def _send_next_subtest(self, test, run=0):
     subtest = self._create_subtest(test, str(run + 1), keep_output=(run != 0))
-    self._send_test(subtest)
+    return self._send_test(subtest)
 
   def _finalize_test(self, test):
     del self._rerun[test.procid]
diff --git a/src/v8/tools/testrunner/testproc/seed.py b/src/v8/tools/testrunner/testproc/seed.py
new file mode 100644
index 0000000..160eac8
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/seed.py
@@ -0,0 +1,63 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+from collections import defaultdict
+
+from . import base
+from ..utils import random_utils
+
+
+class SeedProc(base.TestProcProducer):
+  def __init__(self, count, seed=None, parallel_subtests=1):
+    """
+    Args:
+      count: How many subtests with different seeds to create for each test.
+        0 means infinite.
+      seed: seed to use. None means random seed for each subtest.
+      parallel_subtests: How many subtest of each test to run at the same time.
+    """
+    super(SeedProc, self).__init__('Seed')
+    self._count = count
+    self._seed = seed
+    self._last_idx = defaultdict(int)
+    self._todo = defaultdict(int)
+    self._parallel_subtests = parallel_subtests
+    if count:
+      self._parallel_subtests = min(self._parallel_subtests, count)
+
+  def setup(self, requirement=base.DROP_RESULT):
+    super(SeedProc, self).setup(requirement)
+
+    # SeedProc is optimized for dropping the result
+    assert requirement == base.DROP_RESULT
+
+  def _next_test(self, test):
+    is_loaded = False
+    for _ in range(0, self._parallel_subtests):
+      is_loaded |= self._try_send_next_test(test)
+
+    return is_loaded
+
+  def _result_for(self, test, subtest, result):
+    self._todo[test.procid] -= 1
+    if not self._try_send_next_test(test):
+      if not self._todo.get(test.procid):
+        del self._last_idx[test.procid]
+        del self._todo[test.procid]
+        self._send_result(test, None)
+
+  def _try_send_next_test(self, test):
+    def create_subtest(idx):
+      seed = self._seed or random_utils.random_seed()
+      return self._create_subtest(test, idx, random_seed=seed)
+
+    num = self._last_idx[test.procid]
+    if not self._count or num < self._count:
+      num += 1
+      self._todo[test.procid] += 1
+      self._last_idx[test.procid] = num
+      return self._send_test(create_subtest(num))
+
+    return False
diff --git a/src/v8/tools/testrunner/testproc/shard.py b/src/v8/tools/testrunner/testproc/shard.py
index 1caac9f..9475ea1 100644
--- a/src/v8/tools/testrunner/testproc/shard.py
+++ b/src/v8/tools/testrunner/testproc/shard.py
@@ -5,10 +5,21 @@
 from . import base
 
 
+# Alphabet size determines the hashing radix. Choosing a prime number prevents
+# clustering of the hashes.
+HASHING_ALPHABET_SIZE = 2 ** 7 -1
+
+def radix_hash(capacity, key):
+  h = 0
+  for character in key:
+    h = (h * HASHING_ALPHABET_SIZE + ord(character)) % capacity
+
+  return h
+
+
 class ShardProc(base.TestProcFilter):
   """Processor distributing tests between shards.
-  It simply passes every n-th test. To be deterministic it has to be placed
-  before all processors that generate tests dynamically.
+  It hashes the unique test identifiers uses the hash to shard tests.
   """
   def __init__(self, myid, shards_count):
     """
@@ -22,9 +33,6 @@
 
     self._myid = myid
     self._shards_count = shards_count
-    self._last = 0
 
   def _filter(self, test):
-    res = self._last != self._myid
-    self._last = (self._last + 1) % self._shards_count
-    return res
+    return self._myid != radix_hash(self._shards_count, test.procid)
diff --git a/src/v8/tools/testrunner/testproc/shard_unittest.py b/src/v8/tools/testrunner/testproc/shard_unittest.py
new file mode 100755
index 0000000..33a094e
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/shard_unittest.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import tempfile
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.testproc.shard import radix_hash
+
+
+class TestRadixHashing(unittest.TestCase):
+  def test_hash_character_by_radix(self):
+    self.assertEqual(97, radix_hash(capacity=2**32, key="a"))
+
+  def test_hash_character_by_radix_with_capacity(self):
+    self.assertEqual(6, radix_hash(capacity=7, key="a"))
+
+  def test_hash_string(self):
+    self.assertEqual(6, radix_hash(capacity=7, key="ab"))
+
+  def test_hash_test_id(self):
+    self.assertEqual(
+      5,
+      radix_hash(capacity=7,
+                 key="test262/Map/class-private-method-Variant-0-1"))
+
+  def test_hash_boundaries(self):
+    total_variants = 5
+    cases = []
+    for case in [
+      "test262/Map/class-private-method",
+      "test262/Map/class-public-method",
+      "test262/Map/object-retrieval",
+      "test262/Map/object-deletion",
+      "test262/Map/object-creation",
+      "test262/Map/garbage-collection",
+    ]:
+      for variant_index in range(total_variants):
+        cases.append("%s-Variant-%d" % (case, variant_index))
+
+    for case in cases:
+      self.assertTrue(0 <= radix_hash(capacity=7, key=case) < 7)
+
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/v8/tools/testrunner/testproc/sigproc.py b/src/v8/tools/testrunner/testproc/sigproc.py
new file mode 100644
index 0000000..f29fa22
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/sigproc.py
@@ -0,0 +1,34 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# for py2/py3 compatibility
+from __future__ import print_function
+
+import signal
+
+from . import base
+from testrunner.local import utils
+
+
+class SignalProc(base.TestProcObserver):
+  def __init__(self):
+    super(SignalProc, self).__init__()
+    self.exit_code = utils.EXIT_CODE_PASS
+
+  def setup(self, *args, **kwargs):
+    super(SignalProc, self).setup(*args, **kwargs)
+    # It should be called after processors are chained together to not loose
+    # catched signal.
+    signal.signal(signal.SIGINT, self._on_ctrlc)
+    signal.signal(signal.SIGTERM, self._on_sigterm)
+
+  def _on_ctrlc(self, _signum, _stack_frame):
+    print('>>> Ctrl-C detected, early abort...')
+    self.exit_code = utils.EXIT_CODE_INTERRUPTED
+    self.stop()
+
+  def _on_sigterm(self, _signum, _stack_frame):
+    print('>>> SIGTERM received, early abort...')
+    self.exit_code = utils.EXIT_CODE_TERMINATED
+    self.stop()
diff --git a/src/v8/tools/testrunner/testproc/timeout.py b/src/v8/tools/testrunner/testproc/timeout.py
new file mode 100644
index 0000000..54dc60e
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/timeout.py
@@ -0,0 +1,29 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import time
+
+from . import base
+
+
+class TimeoutProc(base.TestProcObserver):
+  def __init__(self, duration_sec):
+    super(TimeoutProc, self).__init__()
+    self._duration_sec = duration_sec
+    self._start = time.time()
+
+  def _on_next_test(self, test):
+    self._on_event()
+
+  def _on_result_for(self, test, result):
+    self._on_event()
+
+  def _on_heartbeat(self):
+    self._on_event()
+
+  def _on_event(self):
+    if not self.is_stopped:
+      if time.time() - self._start > self._duration_sec:
+        print('>>> Total timeout reached.')
+        self.stop()
diff --git a/src/v8/tools/testrunner/testproc/variant.py b/src/v8/tools/testrunner/testproc/variant.py
index dba1af9..0164ad8 100644
--- a/src/v8/tools/testrunner/testproc/variant.py
+++ b/src/v8/tools/testrunner/testproc/variant.py
@@ -39,21 +39,22 @@
   def _next_test(self, test):
     gen = self._variants_gen(test)
     self._next_variant[test.procid] = gen
-    self._try_send_new_subtest(test, gen)
+    return self._try_send_new_subtest(test, gen)
 
   def _result_for(self, test, subtest, result):
     gen = self._next_variant[test.procid]
-    self._try_send_new_subtest(test, gen)
+    if not self._try_send_new_subtest(test, gen):
+      self._send_result(test, None)
 
   def _try_send_new_subtest(self, test, variants_gen):
     for variant, flags, suffix in variants_gen:
       subtest = self._create_subtest(test, '%s-%s' % (variant, suffix),
                                      variant=variant, flags=flags)
-      self._send_test(subtest)
-      return
+      if self._send_test(subtest):
+        return True
 
     del self._next_variant[test.procid]
-    self._send_result(test, None)
+    return False
 
   def _variants_gen(self, test):
     """Generator producing (variant, flags, procid suffix) tuples."""
diff --git a/src/v8/tools/testrunner/testproc/variant_unittest.py b/src/v8/tools/testrunner/testproc/variant_unittest.py
new file mode 100755
index 0000000..56e28c8
--- /dev/null
+++ b/src/v8/tools/testrunner/testproc/variant_unittest.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+# Copyright 2019 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+import tempfile
+import unittest
+
+# Needed because the test runner contains relative imports.
+TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__))))
+sys.path.append(TOOLS_PATH)
+
+from testrunner.testproc import base
+from testrunner.testproc.variant import VariantProc
+
+
+class FakeResultObserver(base.TestProcObserver):
+  def __init__(self):
+    super(FakeResultObserver, self).__init__()
+
+    self.results = set()
+
+  def result_for(self, test, result):
+    self.results.add((test, result))
+
+
+class FakeFilter(base.TestProcFilter):
+  def __init__(self, filter_predicate):
+    super(FakeFilter, self).__init__()
+
+    self._filter_predicate = filter_predicate
+
+    self.loaded = set()
+    self.call_counter = 0
+
+  def next_test(self, test):
+    self.call_counter += 1
+
+    if self._filter_predicate(test):
+      return False
+
+    self.loaded.add(test)
+    return True
+
+
+class FakeSuite(object):
+  def __init__(self, name):
+    self.name = name
+
+
+class FakeTest(object):
+  def __init__(self, procid):
+    self.suite = FakeSuite("fake_suite")
+    self.procid = procid
+
+    self.keep_output = False
+
+  def create_subtest(self, proc, subtest_id, **kwargs):
+    variant = kwargs['variant']
+
+    variant.origin = self
+    return variant
+
+
+class FakeVariantGen(object):
+  def __init__(self, variants):
+    self._variants = variants
+
+  def gen(self, test):
+    for variant in self._variants:
+      yield variant, [], "fake_suffix"
+
+
+class TestVariantProcLoading(unittest.TestCase):
+  def setUp(self):
+    self.test = FakeTest("test")
+
+  def _simulate_proc(self, variants):
+    """Expects the list of instantiated test variants to load into the
+    VariantProc."""
+    variants_mapping = {self.test: variants}
+
+    # Creates a Variant processor containing the possible types of test
+    # variants.
+    self.variant_proc = VariantProc(variants=["to_filter", "to_load"])
+    self.variant_proc._variant_gens = {
+      "fake_suite": FakeVariantGen(variants)}
+
+    # FakeFilter only lets tests passing the predicate to be loaded.
+    self.fake_filter = FakeFilter(
+      filter_predicate=(lambda t: t.procid == "to_filter"))
+
+    # FakeResultObserver to verify that VariantProc calls result_for correctly.
+    self.fake_result_observer = FakeResultObserver()
+
+    # Links up processors together to form a test processing pipeline.
+    self.variant_proc._prev_proc = self.fake_result_observer
+    self.fake_filter._prev_proc = self.variant_proc
+    self.variant_proc._next_proc = self.fake_filter
+
+    # Injects the test into the VariantProc
+    is_loaded = self.variant_proc.next_test(self.test)
+
+    # Verifies the behavioral consistency by using the instrumentation in
+    # FakeFilter
+    loaded_variants = list(self.fake_filter.loaded)
+    self.assertEqual(is_loaded, any(loaded_variants))
+    return self.fake_filter.loaded, self.fake_filter.call_counter
+
+  def test_filters_first_two_variants(self):
+    variants = [
+      FakeTest('to_filter'),
+      FakeTest('to_filter'),
+      FakeTest('to_load'),
+      FakeTest('to_load'),
+    ]
+    expected_load_results = {variants[2]}
+
+    load_results, call_count = self._simulate_proc(variants)
+
+    self.assertSetEqual(expected_load_results, load_results)
+    self.assertEqual(call_count, 3)
+
+  def test_stops_loading_after_first_successful_load(self):
+    variants = [
+      FakeTest('to_load'),
+      FakeTest('to_load'),
+      FakeTest('to_filter'),
+    ]
+    expected_load_results = {variants[0]}
+
+    loaded_tests, call_count = self._simulate_proc(variants)
+
+    self.assertSetEqual(expected_load_results, loaded_tests)
+    self.assertEqual(call_count, 1)
+
+  def test_return_result_when_out_of_variants(self):
+    variants = [
+      FakeTest('to_filter'),
+      FakeTest('to_load'),
+    ]
+
+    self._simulate_proc(variants)
+
+    self.variant_proc.result_for(variants[1], None)
+
+    expected_results = {(self.test, None)}
+
+    self.assertSetEqual(expected_results, self.fake_result_observer.results)
+
+  def test_return_result_after_running_variants(self):
+    variants = [
+      FakeTest('to_filter'),
+      FakeTest('to_load'),
+      FakeTest('to_load'),
+    ]
+
+    self._simulate_proc(variants)
+    self.variant_proc.result_for(variants[1], None)
+
+    self.assertSetEqual(set(variants[1:]), self.fake_filter.loaded)
+
+    self.variant_proc.result_for(variants[2], None)
+
+    expected_results = {(self.test, None)}
+    self.assertSetEqual(expected_results, self.fake_result_observer.results)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/src/v8/tools/testrunner/trycatch_loader.js b/src/v8/tools/testrunner/trycatch_loader.js
new file mode 100644
index 0000000..737c8e4
--- /dev/null
+++ b/src/v8/tools/testrunner/trycatch_loader.js
@@ -0,0 +1,42 @@
+// Copyright 2018 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Wrapper loading javascript tests passed as arguments used by gc fuzzer.
+// It ignores all exceptions and run tests in a separate namespaces.
+//
+// It can't prevent %AbortJS function from aborting execution, so it should be
+// used with d8's --disable-abortjs flag to ignore all possible errors inside
+// tests.
+
+// We use -- as an additional separator for test preamble files and test files.
+// The preamble files (before --) will be loaded in each realm before each
+// test.
+var separator = arguments.indexOf("--")
+var preamble = arguments.slice(0, separator)
+var tests = arguments.slice(separator + 1)
+
+var preambleString = ""
+for (let jstest of preamble) {
+  preambleString += "load(\"" + jstest + "\");"
+}
+
+for (let jstest of tests) {
+  print("Loading " + jstest);
+  let start = performance.now();
+
+  // anonymous function to not populate global namespace.
+  (function () {
+    let realm = Realm.create();
+    try {
+      Realm.eval(realm, preambleString + "load(\"" + jstest + "\");");
+    } catch (err) {
+      // ignore all errors
+    }
+    Realm.dispose(realm);
+  })();
+
+  let durationSec = ((performance.now() - start) / 1000.0).toFixed(2);
+  print("Duration " + durationSec + "s");
+}
diff --git a/src/v8/tools/testrunner/utils/__init__.py b/src/v8/tools/testrunner/utils/__init__.py
new file mode 100644
index 0000000..4433538
--- /dev/null
+++ b/src/v8/tools/testrunner/utils/__init__.py
@@ -0,0 +1,3 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
diff --git a/src/v8/tools/testrunner/utils/dump_build_config_gyp.py b/src/v8/tools/testrunner/utils/dump_build_config_gyp.py
index 7f72627..963b0e2 100644
--- a/src/v8/tools/testrunner/utils/dump_build_config_gyp.py
+++ b/src/v8/tools/testrunner/utils/dump_build_config_gyp.py
@@ -11,6 +11,9 @@
 """
 # TODO(machenbach): Remove this when gyp is deprecated.
 
+# for py2/py3 compatibility
+from __future__ import print_function
+
 import json
 import os
 import sys
@@ -47,7 +50,7 @@
   try:
     return k, json.loads(v2)
   except ValueError as e:
-    print(k, v, v2)
+    print((k, v, v2))
     raise e
 
 with open(sys.argv[1], 'w') as f:
diff --git a/src/v8/tools/testrunner/utils/random_utils.py b/src/v8/tools/testrunner/utils/random_utils.py
new file mode 100644
index 0000000..0d2cb3f
--- /dev/null
+++ b/src/v8/tools/testrunner/utils/random_utils.py
@@ -0,0 +1,13 @@
+# Copyright 2018 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import random
+
+
+def random_seed():
+  """Returns random, non-zero seed."""
+  seed = 0
+  while not seed:
+    seed = random.SystemRandom().randint(-2147483648, 2147483647)
+  return seed