Import cobalt 25.master.0.1034729
diff --git a/base/metrics/COMMON_METADATA b/base/metrics/COMMON_METADATA
new file mode 100644
index 0000000..540f41c
--- /dev/null
+++ b/base/metrics/COMMON_METADATA
@@ -0,0 +1,3 @@
+monorail {
+  component: "Internals>Metrics"
+}
diff --git a/base/metrics/DIR_METADATA b/base/metrics/DIR_METADATA
new file mode 100644
index 0000000..e99cb75
--- /dev/null
+++ b/base/metrics/DIR_METADATA
@@ -0,0 +1 @@
+mixins: "//base/metrics/COMMON_METADATA"
diff --git a/base/metrics/OWNERS b/base/metrics/OWNERS
index 4cc69ff..b9a5227 100644
--- a/base/metrics/OWNERS
+++ b/base/metrics/OWNERS
@@ -1,10 +1,16 @@
+# Note: Unless you want a specific reviewer's expertise, please send CLs to
+# chromium-metrics-reviews@google.com rather than to specific individuals. These
+# CLs will be automatically reassigned to a reviewer within about 5 minutes.
+# This approach helps our team to load-balance incoming reviews. Googlers can
+# read more about this at go/gwsq-gerrit.
+
 asvitkine@chromium.org
-bcwhite@chromium.org
-gayane@chromium.org
+caitlinfischer@google.com
 holte@chromium.org
 isherman@chromium.org
 jwd@chromium.org
+lucnguyen@google.com
 mpearson@chromium.org
 rkaplow@chromium.org
-
-# COMPONENT: Internals>Metrics
+rogerm@chromium.org
+sweilun@chromium.org
diff --git a/base/metrics/bucket_ranges.cc b/base/metrics/bucket_ranges.cc
index 39b3793..4c04d98 100644
--- a/base/metrics/bucket_ranges.cc
+++ b/base/metrics/bucket_ranges.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -6,86 +6,10 @@
 
 #include <cmath>
 
-#include "base/logging.h"
+#include "base/metrics/crc32.h"
 
 namespace base {
 
-// Static table of checksums for all possible 8 bit bytes.
-const uint32_t kCrcTable[256] = {
-    0x0,         0x77073096L, 0xee0e612cL, 0x990951baL, 0x76dc419L,
-    0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0xedb8832L,  0x79dcb8a4L,
-    0xe0d5e91eL, 0x97d2d988L, 0x9b64c2bL,  0x7eb17cbdL, 0xe7b82d07L,
-    0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
-    0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
-    0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
-    0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
-    0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
-    0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
-    0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
-    0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
-    0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
-    0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
-    0x1db7106L,  0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x6b6b51fL,
-    0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0xf00f934L,  0x9609a88eL,
-    0xe10e9818L, 0x7f6a0dbbL, 0x86d3d2dL,  0x91646c97L, 0xe6635c01L,
-    0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
-    0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
-    0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
-    0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
-    0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
-    0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
-    0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
-    0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
-    0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
-    0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
-    0x3b6e20cL,  0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x4db2615L,
-    0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0xd6d6a3eL,  0x7a6a5aa8L,
-    0xe40ecf0bL, 0x9309ff9dL, 0xa00ae27L,  0x7d079eb1L, 0xf00f9344L,
-    0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
-    0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
-    0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
-    0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
-    0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
-    0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
-    0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
-    0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
-    0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
-    0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
-    0x26d930aL,  0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x5005713L,
-    0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0xcb61b38L,  0x92d28e9bL,
-    0xe5d5be0dL, 0x7cdcefb7L, 0xbdbdf21L,  0x86d3d2d4L, 0xf1d4e242L,
-    0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
-    0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
-    0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
-    0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
-    0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
-    0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
-    0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
-    0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
-    0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
-    0x2d02ef8dL,
-};
-
-// We generate the CRC-32 using the low order bits to select whether to XOR in
-// the reversed polynomial 0xedb88320L.  This is nice and simple, and allows us
-// to keep the quotient in a uint32_t.  Since we're not concerned about the
-// nature of corruptions (i.e., we don't care about bit sequencing, since we are
-// handling memory changes, which are more grotesque) so we don't bother to get
-// the CRC correct for big-endian vs little-ending calculations.  All we need is
-// a nice hash, that tends to depend on all the bits of the sample, with very
-// little chance of changes in one place impacting changes in another place.
-static uint32_t Crc32(uint32_t sum, HistogramBase::Sample value) {
-  union {
-    HistogramBase::Sample range;
-    unsigned char bytes[sizeof(HistogramBase::Sample)];
-  } converter;
-  converter.range = value;
-  for (size_t i = 0; i < sizeof(converter); ++i) {
-    sum = kCrcTable[(sum & 0xff) ^ converter.bytes[i]] ^ (sum >> 8);
-  }
-  return sum;
-}
-
 BucketRanges::BucketRanges(size_t num_ranges)
     : ranges_(num_ranges, 0),
       checksum_(0) {}
@@ -93,12 +17,16 @@
 BucketRanges::~BucketRanges() = default;
 
 uint32_t BucketRanges::CalculateChecksum() const {
-  // Seed checksum.
-  uint32_t checksum = static_cast<uint32_t>(ranges_.size());
+  // Crc of empty ranges_ happens to be 0. This early exit prevents trying to
+  // take the address of ranges_[0] which will fail for an empty vector even
+  // if that address is never used.
+  const size_t ranges_size = ranges_.size();
+  if (ranges_size == 0)
+    return 0;
 
-  for (size_t index = 0; index < ranges_.size(); ++index)
-    checksum = Crc32(checksum, ranges_[index]);
-  return checksum;
+  // Checksum is seeded with the ranges "size".
+  return Crc32(static_cast<uint32_t>(ranges_size), &ranges_[0],
+               sizeof(ranges_[0]) * ranges_size);
 }
 
 bool BucketRanges::HasValidChecksum() const {
diff --git a/base/metrics/bucket_ranges.h b/base/metrics/bucket_ranges.h
index e1dc09d..70be84d 100644
--- a/base/metrics/bucket_ranges.h
+++ b/base/metrics/bucket_ranges.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 //
@@ -17,15 +17,16 @@
 #ifndef BASE_METRICS_BUCKET_RANGES_H_
 #define BASE_METRICS_BUCKET_RANGES_H_
 
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <atomic>
 #include <vector>
 
-#include <limits.h>
-
-#include "base/atomicops.h"
 #include "base/base_export.h"
-#include "base/macros.h"
+#include "base/check_op.h"
 #include "base/metrics/histogram_base.h"
-#include "starboard/types.h"
 
 namespace base {
 
@@ -34,6 +35,10 @@
   typedef std::vector<HistogramBase::Sample> Ranges;
 
   explicit BucketRanges(size_t num_ranges);
+
+  BucketRanges(const BucketRanges&) = delete;
+  BucketRanges& operator=(const BucketRanges&) = delete;
+
   ~BucketRanges();
 
   size_t size() const { return ranges_.size(); }
@@ -66,10 +71,10 @@
   // safety against overwriting an existing value since though it is wasteful
   // to have multiple identical persistent records, it is still safe.
   void set_persistent_reference(uint32_t ref) const {
-    subtle::Release_Store(&persistent_reference_, ref);
+    persistent_reference_.store(ref, std::memory_order_release);
   }
   uint32_t persistent_reference() const {
-    return subtle::Acquire_Load(&persistent_reference_);
+    return persistent_reference_.load(std::memory_order_acquire);
   }
 
  private:
@@ -89,15 +94,9 @@
   // information is stored. This allows for the record to be created once and
   // re-used simply by having all histograms with the same ranges use the
   // same reference.
-  mutable subtle::Atomic32 persistent_reference_ = 0;
-
-  DISALLOW_COPY_AND_ASSIGN(BucketRanges);
+  mutable std::atomic<uint32_t> persistent_reference_{0};
 };
 
-//////////////////////////////////////////////////////////////////////////////
-// Expose only for test.
-BASE_EXPORT extern const uint32_t kCrcTable[256];
-
 }  // namespace base
 
 #endif  // BASE_METRICS_BUCKET_RANGES_H_
diff --git a/base/metrics/bucket_ranges_unittest.cc b/base/metrics/bucket_ranges_unittest.cc
index 9453210..eb105c4 100644
--- a/base/metrics/bucket_ranges_unittest.cc
+++ b/base/metrics/bucket_ranges_unittest.cc
@@ -1,10 +1,11 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/bucket_ranges.h"
 
-#include "starboard/types.h"
+#include <stdint.h>
+
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -73,21 +74,5 @@
   EXPECT_TRUE(ranges.HasValidChecksum());
 }
 
-// Table was generated similarly to sample code for CRC-32 given on:
-// http://www.w3.org/TR/PNG/#D-CRCAppendix.
-TEST(BucketRangesTest, Crc32TableTest) {
-  for (int i = 0; i < 256; ++i) {
-    uint32_t checksum = i;
-    for (int j = 0; j < 8; ++j) {
-      const uint32_t kReversedPolynomial = 0xedb88320L;
-      if (checksum & 1)
-        checksum = kReversedPolynomial ^ (checksum >> 1);
-      else
-        checksum >>= 1;
-    }
-    EXPECT_EQ(kCrcTable[i], checksum);
-  }
-}
-
 }  // namespace
 }  // namespace base
diff --git a/base/metrics/crc32.cc b/base/metrics/crc32.cc
new file mode 100644
index 0000000..6fdfc90
--- /dev/null
+++ b/base/metrics/crc32.cc
@@ -0,0 +1,81 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/crc32.h"
+
+namespace base {
+
+// Static table of checksums for all possible 8 bit bytes.
+const uint32_t kCrcTable[256] = {
+    0x0,         0x77073096L, 0xee0e612cL, 0x990951baL, 0x76dc419L,
+    0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0xedb8832L,  0x79dcb8a4L,
+    0xe0d5e91eL, 0x97d2d988L, 0x9b64c2bL,  0x7eb17cbdL, 0xe7b82d07L,
+    0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
+    0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
+    0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
+    0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
+    0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
+    0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
+    0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
+    0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
+    0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
+    0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
+    0x1db7106L,  0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x6b6b51fL,
+    0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0xf00f934L,  0x9609a88eL,
+    0xe10e9818L, 0x7f6a0dbbL, 0x86d3d2dL,  0x91646c97L, 0xe6635c01L,
+    0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
+    0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
+    0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
+    0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
+    0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
+    0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
+    0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
+    0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
+    0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
+    0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
+    0x3b6e20cL,  0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x4db2615L,
+    0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0xd6d6a3eL,  0x7a6a5aa8L,
+    0xe40ecf0bL, 0x9309ff9dL, 0xa00ae27L,  0x7d079eb1L, 0xf00f9344L,
+    0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
+    0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
+    0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
+    0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
+    0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
+    0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
+    0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
+    0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
+    0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
+    0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
+    0x26d930aL,  0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x5005713L,
+    0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0xcb61b38L,  0x92d28e9bL,
+    0xe5d5be0dL, 0x7cdcefb7L, 0xbdbdf21L,  0x86d3d2d4L, 0xf1d4e242L,
+    0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
+    0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
+    0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
+    0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
+    0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
+    0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
+    0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
+    0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
+    0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
+    0x2d02ef8dL,
+};
+
+// We generate the CRC-32 using the low order bits to select whether to XOR in
+// the reversed polynomial 0xEDB88320.  This is nice and simple, and allows us
+// to keep the quotient in a uint32_t.  Since we're not concerned about the
+// nature of corruptions (i.e., we don't care about bit sequencing, since we are
+// handling memory changes, which are more grotesque) so we don't bother to get
+// the CRC correct for big-endian vs little-ending calculations.  All we need is
+// a nice hash, that tends to depend on all the bits of the sample, with very
+// little chance of changes in one place impacting changes in another place.
+uint32_t Crc32(uint32_t sum, const void* data, size_t size) {
+  const unsigned char* bytes = reinterpret_cast<const unsigned char*>(data);
+  for (size_t i = 0; i < size; ++i) {
+    sum = kCrcTable[(sum & 0x000000FF) ^ bytes[i]] ^ (sum >> 8);
+  }
+  return sum;
+}
+
+}  // namespace base
diff --git a/base/metrics/crc32.h b/base/metrics/crc32.h
new file mode 100644
index 0000000..7488967
--- /dev/null
+++ b/base/metrics/crc32.h
@@ -0,0 +1,24 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_CRC32_H_
+#define BASE_METRICS_CRC32_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+
+namespace base {
+
+BASE_EXPORT extern const uint32_t kCrcTable[256];
+
+// This provides a simple, fast CRC-32 calculation that can be used for checking
+// the integrity of data.  It is not a "secure" calculation!  |sum| can start
+// with any seed or be used to continue an operation began with previous data.
+BASE_EXPORT uint32_t Crc32(uint32_t sum, const void* data, size_t size);
+
+}  // namespace base
+
+#endif  // BASE_METRICS_CRC32_H_
diff --git a/base/metrics/crc32_unittest.cc b/base/metrics/crc32_unittest.cc
new file mode 100644
index 0000000..f9ad6ab
--- /dev/null
+++ b/base/metrics/crc32_unittest.cc
@@ -0,0 +1,34 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/crc32.h"
+
+#include <stdint.h>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Table was generated similarly to sample code for CRC-32 given on:
+// http://www.w3.org/TR/PNG/#D-CRCAppendix.
+TEST(Crc32Test, TableTest) {
+  for (int i = 0; i < 256; ++i) {
+    uint32_t checksum = i;
+    for (int j = 0; j < 8; ++j) {
+      const uint32_t kReversedPolynomial = 0xEDB88320L;
+      if (checksum & 1)
+        checksum = kReversedPolynomial ^ (checksum >> 1);
+      else
+        checksum >>= 1;
+    }
+    EXPECT_EQ(kCrcTable[i], checksum);
+  }
+}
+
+// A CRC of nothing should always be zero.
+TEST(Crc32Test, ZeroTest) {
+  EXPECT_EQ(0U, Crc32(0, nullptr, 0));
+}
+
+}  // namespace base
diff --git a/base/metrics/dummy_histogram.cc b/base/metrics/dummy_histogram.cc
index 2707733..1de4814 100644
--- a/base/metrics/dummy_histogram.cc
+++ b/base/metrics/dummy_histogram.cc
@@ -1,4 +1,4 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -6,9 +6,11 @@
 
 #include <memory>
 
-#include "base/logging.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/metrics_hashes.h"
+#include "base/no_destructor.h"
+#include "base/notreached.h"
+#include "base/values.h"
 
 namespace base {
 
@@ -17,28 +19,27 @@
 // Helper classes for DummyHistogram.
 class DummySampleCountIterator : public SampleCountIterator {
  public:
-  DummySampleCountIterator() {}
-  ~DummySampleCountIterator() override {}
+  DummySampleCountIterator() = default;
+  DummySampleCountIterator(const DummySampleCountIterator&) = delete;
+  DummySampleCountIterator& operator=(const DummySampleCountIterator&) = delete;
+  ~DummySampleCountIterator() override = default;
 
   // SampleCountIterator:
   bool Done() const override { return true; }
   void Next() override { NOTREACHED(); }
   void Get(HistogramBase::Sample* min,
            int64_t* max,
-           HistogramBase::Count* count) const override {
+           HistogramBase::Count* count) override {
     NOTREACHED();
   }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DummySampleCountIterator);
 };
 
 class DummyHistogramSamples : public HistogramSamples {
  public:
-  explicit DummyHistogramSamples() : HistogramSamples(0, new LocalMetadata()) {}
-  ~DummyHistogramSamples() override {
-    delete static_cast<LocalMetadata*>(meta());
-  }
+  DummyHistogramSamples()
+      : HistogramSamples(0, std::make_unique<LocalMetadata>()) {}
+  DummyHistogramSamples(const DummyHistogramSamples&) = delete;
+  DummyHistogramSamples& operator=(const DummyHistogramSamples&) = delete;
 
   // HistogramSamples:
   void Accumulate(HistogramBase::Sample value,
@@ -52,12 +53,12 @@
   std::unique_ptr<SampleCountIterator> Iterator() const override {
     return std::make_unique<DummySampleCountIterator>();
   }
+  std::unique_ptr<SampleCountIterator> ExtractingIterator() override {
+    return std::make_unique<DummySampleCountIterator>();
+  }
   bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override {
     return true;
   }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DummyHistogramSamples);
 };
 
 }  // namespace
@@ -79,7 +80,7 @@
 bool DummyHistogram::HasConstructionArguments(
     Sample expected_minimum,
     Sample expected_maximum,
-    uint32_t expected_bucket_count) const {
+    size_t expected_bucket_count) const {
   return true;
 }
 
@@ -91,6 +92,11 @@
   return std::make_unique<DummyHistogramSamples>();
 }
 
+std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotUnloggedSamples()
+    const {
+  return std::make_unique<DummyHistogramSamples>();
+}
+
 std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotDelta() {
   return std::make_unique<DummyHistogramSamples>();
 }
@@ -99,4 +105,12 @@
   return std::make_unique<DummyHistogramSamples>();
 }
 
+Value::Dict DummyHistogram::ToGraphDict() const {
+  return Value::Dict();
+}
+
+Value::Dict DummyHistogram::GetParameters() const {
+  return Value::Dict();
+}
+
 }  // namespace base
diff --git a/base/metrics/dummy_histogram.h b/base/metrics/dummy_histogram.h
index 3ef59b9..95ec423 100644
--- a/base/metrics/dummy_histogram.h
+++ b/base/metrics/dummy_histogram.h
@@ -1,17 +1,19 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_DUMMY_HISTOGRAM_H_
 #define BASE_METRICS_DUMMY_HISTOGRAM_H_
 
+#include <stdint.h>
+
 #include <memory>
 #include <string>
 
 #include "base/base_export.h"
 #include "base/metrics/histogram_base.h"
 #include "base/no_destructor.h"
-#include "starboard/types.h"
+#include "base/values.h"
 
 namespace base {
 
@@ -21,38 +23,38 @@
  public:
   static DummyHistogram* GetInstance();
 
+  DummyHistogram(const DummyHistogram&) = delete;
+  DummyHistogram& operator=(const DummyHistogram&) = delete;
+
   // HistogramBase:
   void CheckName(const StringPiece& name) const override {}
   uint64_t name_hash() const override;
   HistogramType GetHistogramType() const override;
   bool HasConstructionArguments(Sample expected_minimum,
                                 Sample expected_maximum,
-                                uint32_t expected_bucket_count) const override;
+                                size_t expected_bucket_count) const override;
   void Add(Sample value) override {}
   void AddCount(Sample value, int count) override {}
   void AddSamples(const HistogramSamples& samples) override {}
   bool AddSamplesFromPickle(PickleIterator* iter) override;
   std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotUnloggedSamples() const override;
+  void MarkSamplesAsLogged(const HistogramSamples& samples) override {}
   std::unique_ptr<HistogramSamples> SnapshotDelta() override;
   std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
-  void WriteHTMLGraph(std::string* output) const override {}
   void WriteAscii(std::string* output) const override {}
+  Value::Dict ToGraphDict() const override;
 
  protected:
   // HistogramBase:
   void SerializeInfoImpl(Pickle* pickle) const override {}
-  void GetParameters(DictionaryValue* params) const override {}
-  void GetCountAndBucketData(Count* count,
-                             int64_t* sum,
-                             ListValue* buckets) const override {}
+  Value::Dict GetParameters() const override;
 
  private:
   friend class NoDestructor<DummyHistogram>;
 
   DummyHistogram() : HistogramBase("dummy_histogram") {}
   ~DummyHistogram() override {}
-
-  DISALLOW_COPY_AND_ASSIGN(DummyHistogram);
 };
 
 }  // namespace base
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
index eaaec1b..d1853f8 100644
--- a/base/metrics/field_trial.cc
+++ b/base/metrics/field_trial.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -7,28 +7,51 @@
 #include <algorithm>
 #include <utility>
 
+#include "base/auto_reset.h"
 #include "base/base_switches.h"
-#include "base/build_time.h"
 #include "base/command_line.h"
-#include "base/debug/activity_tracker.h"
 #include "base/logging.h"
 #include "base/metrics/field_trial_param_associator.h"
+#include "base/metrics/histogram_functions.h"
 #include "base/metrics/histogram_macros.h"
+#include "base/no_destructor.h"
+#include "base/notreached.h"
+#include "base/numerics/safe_conversions.h"
 #include "base/process/memory.h"
 #include "base/process/process_handle.h"
 #include "base/process/process_info.h"
 #include "base/rand_util.h"
 #include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
 #include "base/strings/string_split.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/strings/utf_string_conversions.h"
 #include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if !BUILDFLAG(IS_IOS)
+#include "base/process/launch.h"
+#endif
+
+#if BUILDFLAG(IS_MAC)
+#include "base/mac/mach_port_rendezvous.h"
+#endif
 
 // On POSIX, the fd is shared using the mapping in GlobalDescriptors.
-#if defined(OS_POSIX) && !defined(OS_NACL)
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
 #include "base/posix/global_descriptors.h"
-#include "starboard/memory.h"
+#endif
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+#endif
+
+#if BUILDFLAG(IS_FUCHSIA)
+#include <lib/zx/vmo.h>
+#include <zircon/process.h>
+
+#include "base/fuchsia/fuchsia_logging.h"
 #endif
 
 namespace base {
@@ -44,32 +67,21 @@
 // command line which forces its activation.
 const char kActivationMarker = '*';
 
-// Use shared memory to communicate field trial (experiment) state. Set to false
-// for now while the implementation is fleshed out (e.g. data format, single
-// shared memory segment). See https://codereview.chromium.org/2365273004/ and
-// crbug.com/653874
-// The browser is the only process that has write access to the shared memory.
-// This is safe from race conditions because MakeIterable is a release operation
-// and GetNextOfType is an acquire operation, so memory writes before
-// MakeIterable happen before memory reads after GetNextOfType.
-// TODO(752368): Not yet supported on Fuchsia.
-#if defined(OS_FUCHSIA) || defined(STARBOARD)
-const bool kUseSharedMemoryForFieldTrials = false;
-#else
-const bool kUseSharedMemoryForFieldTrials = true;
-#endif
-
 // Constants for the field trial allocator.
 const char kAllocatorName[] = "FieldTrialAllocator";
 
-// We allocate 128 KiB to hold all the field trial data. This should be enough,
+// We allocate 256 KiB to hold all the field trial data. This should be enough,
 // as most people use 3 - 25 KiB for field trials (as of 11/25/2016).
-// This also doesn't allocate all 128 KiB at once -- the pages only get mapped
+// This also doesn't allocate all 256 KiB at once -- the pages only get mapped
 // to physical memory when they are touched. If the size of the allocated field
-// trials does get larger than 128 KiB, then we will drop some field trials in
+// trials does get larger than 256 KiB, then we will drop some field trials in
 // child processes, leading to an inconsistent view between browser and child
 // processes and possibly causing crashes (see crbug.com/661617).
-const size_t kFieldTrialAllocationSize = 128 << 10;  // 128 KiB
+const size_t kFieldTrialAllocationSize = 256 << 10;  // 256 KiB
+
+#if BUILDFLAG(IS_MAC)
+constexpr MachPortsForRendezvous::key_type kFieldTrialRendezvousKey = 'fldt';
+#endif
 
 // Writes out string1 and then string2 to pickle.
 void WriteStringPair(Pickle* pickle,
@@ -83,7 +95,8 @@
 // format of the pickle looks like:
 // TrialName, GroupName, ParamKey1, ParamValue1, ParamKey2, ParamValue2, ...
 // If there are no parameters, then it just ends at GroupName.
-void PickleFieldTrial(const FieldTrial::State& trial_state, Pickle* pickle) {
+void PickleFieldTrial(const FieldTrial::PickleState& trial_state,
+                      Pickle* pickle) {
   WriteStringPair(pickle, *trial_state.trial_name, *trial_state.group_name);
 
   // Get field trial params.
@@ -96,33 +109,6 @@
     WriteStringPair(pickle, param.first, param.second);
 }
 
-// Created a time value based on |year|, |month| and |day_of_month| parameters.
-Time CreateTimeFromParams(int year, int month, int day_of_month) {
-  DCHECK_GT(year, 1970);
-  DCHECK_GT(month, 0);
-  DCHECK_LT(month, 13);
-  DCHECK_GT(day_of_month, 0);
-  DCHECK_LT(day_of_month, 32);
-
-  Time::Exploded exploded;
-  exploded.year = year;
-  exploded.month = month;
-  exploded.day_of_week = 0;  // Should be unused.
-  exploded.day_of_month = day_of_month;
-  exploded.hour = 0;
-  exploded.minute = 0;
-  exploded.second = 0;
-  exploded.millisecond = 0;
-  Time out_time;
-  if (!Time::FromLocalExploded(exploded, &out_time)) {
-    // TODO(maksims): implement failure handling.
-    // We might just return |out_time|, which is Time(0).
-    NOTIMPLEMENTED();
-  }
-
-  return out_time;
-}
-
 // Returns the boundary value for comparing against the FieldTrial's added
 // groups for a given |divisor| (total probability) and |entropy_value|.
 FieldTrial::Probability GetGroupBoundaryValue(
@@ -142,18 +128,11 @@
   return std::min(result, divisor - 1);
 }
 
-// Separate type from FieldTrial::State so that it can use StringPieces.
-struct FieldTrialStringEntry {
-  StringPiece trial_name;
-  StringPiece group_name;
-  bool activated = false;
-};
-
 // Parses the --force-fieldtrials string |trials_string| into |entries|.
 // Returns true if the string was parsed correctly. On failure, the |entries|
 // array may end up being partially filled.
 bool ParseFieldTrialsString(const std::string& trials_string,
-                            std::vector<FieldTrialStringEntry>* entries) {
+                            std::vector<FieldTrial::State>* entries) {
   const StringPiece trials_string_piece(trials_string);
 
   size_t next_item = 0;
@@ -168,7 +147,7 @@
     if (group_name_end == trials_string.npos)
       group_name_end = trials_string.length();
 
-    FieldTrialStringEntry entry;
+    FieldTrial::State entry;
     // Verify if the trial should be activated or not.
     if (trials_string[next_item] == kActivationMarker) {
       // Name cannot be only the indicator.
@@ -188,64 +167,55 @@
   return true;
 }
 
-void AddFeatureAndFieldTrialFlags(const char* enable_features_switch,
-                                  const char* disable_features_switch,
-                                  CommandLine* cmd_line) {
+#if !BUILDFLAG(IS_IOS)
+void AddFeatureAndFieldTrialFlags(CommandLine* cmd_line) {
   std::string enabled_features;
   std::string disabled_features;
   FeatureList::GetInstance()->GetFeatureOverrides(&enabled_features,
                                                   &disabled_features);
 
   if (!enabled_features.empty())
-    cmd_line->AppendSwitchASCII(enable_features_switch, enabled_features);
+    cmd_line->AppendSwitchASCII(switches::kEnableFeatures, enabled_features);
   if (!disabled_features.empty())
-    cmd_line->AppendSwitchASCII(disable_features_switch, disabled_features);
+    cmd_line->AppendSwitchASCII(switches::kDisableFeatures, disabled_features);
 
   std::string field_trial_states;
-  FieldTrialList::AllStatesToString(&field_trial_states, false);
+  FieldTrialList::AllStatesToString(&field_trial_states);
   if (!field_trial_states.empty()) {
     cmd_line->AppendSwitchASCII(switches::kForceFieldTrials,
                                 field_trial_states);
   }
 }
+#endif  // !BUILDFLAG(IS_IOS)
 
 void OnOutOfMemory(size_t size) {
-#if defined(OS_NACL)
+#if BUILDFLAG(IS_NACL) || defined(STARBOARD)
   NOTREACHED();
 #else
   TerminateBecauseOutOfMemory(size);
 #endif
 }
 
-#if !defined(OS_NACL) && !defined(STARBOARD)
+#if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS) && !defined(STARBOARD)
 // Returns whether the operation succeeded.
-bool DeserializeGUIDFromStringPieces(base::StringPiece first,
-                                     base::StringPiece second,
-                                     base::UnguessableToken* guid) {
+bool DeserializeGUIDFromStringPieces(StringPiece first,
+                                     StringPiece second,
+                                     UnguessableToken* guid) {
   uint64_t high = 0;
   uint64_t low = 0;
-  if (!base::StringToUint64(first, &high) ||
-      !base::StringToUint64(second, &low)) {
+  if (!StringToUint64(first, &high) || !StringToUint64(second, &low))
+    return false;
+
+  absl::optional<UnguessableToken> token =
+      UnguessableToken::Deserialize(high, low);
+  if (!token.has_value()) {
     return false;
   }
 
-  *guid = base::UnguessableToken::Deserialize(high, low);
+  *guid = token.value();
   return true;
 }
-
-// Extract a read-only SharedMemoryHandle from an existing |shared_memory|
-// handle. Note that on Android, this also makes the whole region read-only.
-SharedMemoryHandle GetSharedMemoryReadOnlyHandle(SharedMemory* shared_memory) {
-  SharedMemoryHandle result = shared_memory->GetReadOnlyHandle();
-#if defined(OS_ANDROID)
-  // On Android, turn the region read-only. This prevents any future
-  // writable mapping attempts, but the original one in |shm| survives
-  // and is still usable in the current process.
-  result.SetRegionReadOnly();
-#endif  // OS_ANDROID
-  return result;
-}
-#endif  // !OS_NACL
+#endif  // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS) && !defined(STARBOARD)
 
 }  // namespace
 
@@ -254,18 +224,28 @@
 const int FieldTrial::kDefaultGroupNumber = 0;
 bool FieldTrial::enable_benchmarking_ = false;
 
-int FieldTrialList::kNoExpirationYear = 0;
-
 //------------------------------------------------------------------------------
 // FieldTrial methods and members.
 
 FieldTrial::EntropyProvider::~EntropyProvider() = default;
 
-FieldTrial::State::State() = default;
+uint32_t FieldTrial::EntropyProvider::GetPseudorandomValue(
+    uint32_t salt,
+    uint32_t output_range) const {
+  // Passing a different salt is sufficient to get a "different" result from
+  // GetEntropyForTrial (ignoring collisions).
+  double entropy_value = GetEntropyForTrial(/*trial_name=*/"", salt);
 
-FieldTrial::State::State(const State& other) = default;
+  // Convert the [0,1) double to an [0, output_range) integer.
+  return static_cast<uint32_t>(GetGroupBoundaryValue(
+      static_cast<FieldTrial::Probability>(output_range), entropy_value));
+}
 
-FieldTrial::State::~State() = default;
+FieldTrial::PickleState::PickleState() = default;
+
+FieldTrial::PickleState::PickleState(const PickleState& other) = default;
+
+FieldTrial::PickleState::~PickleState() = default;
 
 bool FieldTrial::FieldTrialEntry::GetTrialAndGroupName(
     StringPiece* trial_name,
@@ -287,7 +267,7 @@
     StringPiece value;
     if (!ReadStringPair(&iter, &key, &value))
       return key.empty();  // Non-empty is bad: got one of a pair.
-    (*params)[key.as_string()] = value.as_string();
+    (*params)[std::string(key)] = std::string(value);
   }
 }
 
@@ -295,7 +275,7 @@
   const char* src =
       reinterpret_cast<const char*>(this) + sizeof(FieldTrialEntry);
 
-  Pickle pickle(src, pickle_size);
+  Pickle pickle(src, checked_cast<size_t>(pickle_size));
   return PickleIterator(pickle);
 }
 
@@ -310,23 +290,8 @@
   return true;
 }
 
-void FieldTrial::Disable() {
-  DCHECK(!group_reported_);
-  enable_field_trial_ = false;
-
-  // In case we are disabled after initialization, we need to switch
-  // the trial to the default group.
-  if (group_ != kNotFinalized) {
-    // Only reset when not already the default group, because in case we were
-    // forced to the default group, the group number may not be
-    // kDefaultGroupNumber, so we should keep it as is.
-    if (group_name_ != default_group_name_)
-      SetGroupChoice(default_group_name_, kDefaultGroupNumber);
-  }
-}
-
-int FieldTrial::AppendGroup(const std::string& name,
-                            Probability group_probability) {
+void FieldTrial::AppendGroup(const std::string& name,
+                             Probability group_probability) {
   // When the group choice was previously forced, we only need to return the
   // the id of the chosen group, and anything can be returned for the others.
   if (forced_) {
@@ -336,18 +301,19 @@
       // forced trial, it will not have the same value as the default group
       // number returned from the non-forced |FactoryGetFieldTrial()| call,
       // which takes care to ensure that this does not happen.
-      return group_;
+      return;
     }
     DCHECK_NE(next_group_number_, group_);
     // We still return different numbers each time, in case some caller need
     // them to be different.
-    return next_group_number_++;
+    next_group_number_++;
+    return;
   }
 
   DCHECK_LE(group_probability, divisor_);
   DCHECK_GE(group_probability, 0);
 
-  if (enable_benchmarking_ || !enable_field_trial_)
+  if (enable_benchmarking_)
     group_probability = 0;
 
   accumulated_group_probability_ += group_probability;
@@ -357,19 +323,19 @@
     // This is the group that crossed the random line, so we do the assignment.
     SetGroupChoice(name, next_group_number_);
   }
-  return next_group_number_++;
+  next_group_number_++;
+  return;
 }
 
-int FieldTrial::group() {
+void FieldTrial::Activate() {
   FinalizeGroupChoice();
   if (trial_registered_)
     FieldTrialList::NotifyFieldTrialGroupSelection(this);
-  return group_;
 }
 
 const std::string& FieldTrial::group_name() {
-  // Call |group()| to ensure group gets assigned and observers are notified.
-  group();
+  // Call |Activate()| to ensure group gets assigned and observers are notified.
+  Activate();
   DCHECK(!group_name_.empty());
   return group_name_;
 }
@@ -392,24 +358,28 @@
 
 // static
 void FieldTrial::EnableBenchmarking() {
-  DCHECK_EQ(0u, FieldTrialList::GetFieldTrialCount());
+  // We don't need to see field trials created via CreateFieldTrial() for
+  // benchmarking, because such field trials have only a single group and are
+  // not affected by randomization that |enable_benchmarking_| would disable.
+  DCHECK_EQ(0u, FieldTrialList::GetRandomizedFieldTrialCount());
   enable_benchmarking_ = true;
 }
 
 // static
 FieldTrial* FieldTrial::CreateSimulatedFieldTrial(
-    const std::string& trial_name,
+    StringPiece trial_name,
     Probability total_probability,
-    const std::string& default_group_name,
+    StringPiece default_group_name,
     double entropy_value) {
   return new FieldTrial(trial_name, total_probability, default_group_name,
-                        entropy_value);
+                        entropy_value, /*is_low_anonymity=*/false);
 }
 
-FieldTrial::FieldTrial(const std::string& trial_name,
+FieldTrial::FieldTrial(StringPiece trial_name,
                        const Probability total_probability,
-                       const std::string& default_group_name,
-                       double entropy_value)
+                       StringPiece default_group_name,
+                       double entropy_value,
+                       bool is_low_anonymity)
     : trial_name_(trial_name),
       divisor_(total_probability),
       default_group_name_(default_group_name),
@@ -417,11 +387,11 @@
       accumulated_group_probability_(0),
       next_group_number_(kDefaultGroupNumber + 1),
       group_(kNotFinalized),
-      enable_field_trial_(true),
       forced_(false),
       group_reported_(false),
       trial_registered_(false),
-      ref_(FieldTrialList::FieldTrialAllocator::kReferenceNull) {
+      ref_(FieldTrialList::FieldTrialAllocator::kReferenceNull),
+      is_low_anonymity_(is_low_anonymity) {
   DCHECK_GT(total_probability, 0);
   DCHECK(!trial_name_.empty());
   DCHECK(!default_group_name_.empty())
@@ -446,10 +416,6 @@
 }
 
 void FieldTrial::FinalizeGroupChoice() {
-  FinalizeGroupChoiceImpl(false);
-}
-
-void FieldTrial::FinalizeGroupChoiceImpl(bool is_locked) {
   if (group_ != kNotFinalized)
     return;
   accumulated_group_probability_ = divisor_;
@@ -457,14 +423,10 @@
   // finalized.
   DCHECK(!forced_);
   SetGroupChoice(default_group_name_, kDefaultGroupNumber);
-
-  // Add the field trial to shared memory.
-  if (kUseSharedMemoryForFieldTrials && trial_registered_)
-    FieldTrialList::OnGroupFinalized(is_locked, this);
 }
 
 bool FieldTrial::GetActiveGroup(ActiveGroup* active_group) const {
-  if (!group_reported_ || !enable_field_trial_)
+  if (!group_reported_)
     return false;
   DCHECK_NE(group_, kNotFinalized);
   active_group->trial_name = trial_name_;
@@ -472,15 +434,11 @@
   return true;
 }
 
-bool FieldTrial::GetStateWhileLocked(State* field_trial_state,
-                                     bool include_expired) {
-  if (!include_expired && !enable_field_trial_)
-    return false;
-  FinalizeGroupChoiceImpl(true);
+void FieldTrial::GetStateWhileLocked(PickleState* field_trial_state) {
+  FinalizeGroupChoice();
   field_trial_state->trial_name = &trial_name_;
   field_trial_state->group_name = &group_name_;
   field_trial_state->activated = group_reported_;
-  return true;
 }
 
 //------------------------------------------------------------------------------
@@ -489,24 +447,11 @@
 // static
 FieldTrialList* FieldTrialList::global_ = nullptr;
 
-// static
-bool FieldTrialList::used_without_global_ = false;
-
 FieldTrialList::Observer::~Observer() = default;
 
-FieldTrialList::FieldTrialList(
-    std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider)
-    : entropy_provider_(std::move(entropy_provider)),
-      observer_list_(new ObserverListThreadSafe<FieldTrialList::Observer>(
-          ObserverListPolicy::EXISTING_ONLY)) {
+FieldTrialList::FieldTrialList() {
   DCHECK(!global_);
-  DCHECK(!used_without_global_);
   global_ = this;
-
-  Time two_years_from_build_time = GetBuildTime() + TimeDelta::FromDays(730);
-  Time::Exploded exploded;
-  two_years_from_build_time.LocalExplode(&exploded);
-  kNoExpirationYear = exploded.year;
 }
 
 FieldTrialList::~FieldTrialList() {
@@ -516,93 +461,40 @@
     it->second->Release();
     registered_.erase(it->first);
   }
+  // Note: If this DCHECK fires in a test that uses ScopedFeatureList, it is
+  // likely caused by nested ScopedFeatureLists being destroyed in a different
+  // order than they are initialized.
   DCHECK_EQ(this, global_);
   global_ = nullptr;
 }
 
 // static
 FieldTrial* FieldTrialList::FactoryGetFieldTrial(
-    const std::string& trial_name,
+    StringPiece trial_name,
     FieldTrial::Probability total_probability,
-    const std::string& default_group_name,
-    const int year,
-    const int month,
-    const int day_of_month,
-    FieldTrial::RandomizationType randomization_type,
-    int* default_group_number) {
-  return FactoryGetFieldTrialWithRandomizationSeed(
-      trial_name, total_probability, default_group_name, year, month,
-      day_of_month, randomization_type, 0, default_group_number, nullptr);
-}
-
-// static
-FieldTrial* FieldTrialList::FactoryGetFieldTrialWithRandomizationSeed(
-    const std::string& trial_name,
-    FieldTrial::Probability total_probability,
-    const std::string& default_group_name,
-    const int year,
-    const int month,
-    const int day_of_month,
-    FieldTrial::RandomizationType randomization_type,
+    StringPiece default_group_name,
+    const FieldTrial::EntropyProvider& entropy_provider,
     uint32_t randomization_seed,
-    int* default_group_number,
-    const FieldTrial::EntropyProvider* override_entropy_provider) {
-  if (default_group_number)
-    *default_group_number = FieldTrial::kDefaultGroupNumber;
+    bool is_low_anonymity) {
   // Check if the field trial has already been created in some other way.
   FieldTrial* existing_trial = Find(trial_name);
   if (existing_trial) {
     CHECK(existing_trial->forced_);
-    // If the default group name differs between the existing forced trial
-    // and this trial, then use a different value for the default group number.
-    if (default_group_number &&
-        default_group_name != existing_trial->default_group_name()) {
-      // If the new default group number corresponds to the group that was
-      // chosen for the forced trial (which has been finalized when it was
-      // forced), then set the default group number to that.
-      if (default_group_name == existing_trial->group_name_internal()) {
-        *default_group_number = existing_trial->group_;
-      } else {
-        // Otherwise, use |kNonConflictingGroupNumber| (-2) for the default
-        // group number, so that it does not conflict with the |AppendGroup()|
-        // result for the chosen group.
-        const int kNonConflictingGroupNumber = -2;
-        static_assert(
-            kNonConflictingGroupNumber != FieldTrial::kDefaultGroupNumber,
-            "The 'non-conflicting' group number conflicts");
-        static_assert(kNonConflictingGroupNumber != FieldTrial::kNotFinalized,
-                      "The 'non-conflicting' group number conflicts");
-        *default_group_number = kNonConflictingGroupNumber;
-      }
-    }
     return existing_trial;
   }
 
-  double entropy_value;
-  if (randomization_type == FieldTrial::ONE_TIME_RANDOMIZED) {
-    // If an override entropy provider is given, use it.
-    const FieldTrial::EntropyProvider* entropy_provider =
-        override_entropy_provider ? override_entropy_provider
-                                  : GetEntropyProviderForOneTimeRandomization();
-    CHECK(entropy_provider);
-    entropy_value = entropy_provider->GetEntropyForTrial(trial_name,
-                                                         randomization_seed);
-  } else {
-    DCHECK_EQ(FieldTrial::SESSION_RANDOMIZED, randomization_type);
-    DCHECK_EQ(0U, randomization_seed);
-    entropy_value = RandDouble();
-  }
+  double entropy_value =
+      entropy_provider.GetEntropyForTrial(trial_name, randomization_seed);
 
-  FieldTrial* field_trial = new FieldTrial(trial_name, total_probability,
-                                           default_group_name, entropy_value);
-  if (GetBuildTime() > CreateTimeFromParams(year, month, day_of_month))
-    field_trial->Disable();
-  FieldTrialList::Register(field_trial);
+  FieldTrial* field_trial =
+      new FieldTrial(trial_name, total_probability, default_group_name,
+                     entropy_value, is_low_anonymity);
+  FieldTrialList::Register(field_trial, /*is_randomized_trial=*/true);
   return field_trial;
 }
 
 // static
-FieldTrial* FieldTrialList::Find(const std::string& trial_name) {
+FieldTrial* FieldTrialList::Find(StringPiece trial_name) {
   if (!global_)
     return nullptr;
   AutoLock auto_lock(global_->lock_);
@@ -610,15 +502,7 @@
 }
 
 // static
-int FieldTrialList::FindValue(const std::string& trial_name) {
-  FieldTrial* field_trial = Find(trial_name);
-  if (field_trial)
-    return field_trial->group();
-  return FieldTrial::kNotFinalized;
-}
-
-// static
-std::string FieldTrialList::FindFullName(const std::string& trial_name) {
+std::string FieldTrialList::FindFullName(StringPiece trial_name) {
   FieldTrial* field_trial = Find(trial_name);
   if (field_trial)
     return field_trial->group_name();
@@ -626,45 +510,51 @@
 }
 
 // static
-bool FieldTrialList::TrialExists(const std::string& trial_name) {
+bool FieldTrialList::TrialExists(StringPiece trial_name) {
   return Find(trial_name) != nullptr;
 }
 
 // static
-bool FieldTrialList::IsTrialActive(const std::string& trial_name) {
+bool FieldTrialList::IsTrialActive(StringPiece trial_name) {
   FieldTrial* field_trial = Find(trial_name);
   FieldTrial::ActiveGroup active_group;
   return field_trial && field_trial->GetActiveGroup(&active_group);
 }
 
 // static
-void FieldTrialList::StatesToString(std::string* output) {
-  FieldTrial::ActiveGroups active_groups;
-  GetActiveFieldTrialGroups(&active_groups);
-  for (FieldTrial::ActiveGroups::const_iterator it = active_groups.begin();
-       it != active_groups.end(); ++it) {
+std::vector<FieldTrial::State> FieldTrialList::GetAllFieldTrialStates(
+    PassKey<test::ScopedFeatureList>) {
+  std::vector<FieldTrial::State> states;
+
+  if (!global_)
+    return states;
+
+  AutoLock auto_lock(global_->lock_);
+  for (const auto& registered : global_->registered_) {
+    FieldTrial::PickleState trial;
+    registered.second->GetStateWhileLocked(&trial);
     DCHECK_EQ(std::string::npos,
-              it->trial_name.find(kPersistentStringSeparator));
+              trial.trial_name->find(kPersistentStringSeparator));
     DCHECK_EQ(std::string::npos,
-              it->group_name.find(kPersistentStringSeparator));
-    output->append(it->trial_name);
-    output->append(1, kPersistentStringSeparator);
-    output->append(it->group_name);
-    output->append(1, kPersistentStringSeparator);
+              trial.group_name->find(kPersistentStringSeparator));
+    FieldTrial::State entry;
+    entry.activated = trial.activated;
+    entry.trial_name = *trial.trial_name;
+    entry.group_name = *trial.group_name;
+    states.push_back(std::move(entry));
   }
+  return states;
 }
 
 // static
-void FieldTrialList::AllStatesToString(std::string* output,
-                                       bool include_expired) {
+void FieldTrialList::AllStatesToString(std::string* output) {
   if (!global_)
     return;
   AutoLock auto_lock(global_->lock_);
 
   for (const auto& registered : global_->registered_) {
-    FieldTrial::State trial;
-    if (!registered.second->GetStateWhileLocked(&trial, include_expired))
-      continue;
+    FieldTrial::PickleState trial;
+    registered.second->GetStateWhileLocked(&trial);
     DCHECK_EQ(std::string::npos,
               trial.trial_name->find(kPersistentStringSeparator));
     DCHECK_EQ(std::string::npos,
@@ -679,15 +569,13 @@
 }
 
 // static
-std::string FieldTrialList::AllParamsToString(bool include_expired,
-                                              EscapeDataFunc encode_data_func) {
+std::string FieldTrialList::AllParamsToString(EscapeDataFunc encode_data_func) {
   FieldTrialParamAssociator* params_associator =
       FieldTrialParamAssociator::GetInstance();
   std::string output;
   for (const auto& registered : GetRegisteredTrials()) {
-    FieldTrial::State trial;
-    if (!registered.second->GetStateWhileLocked(&trial, include_expired))
-      continue;
+    FieldTrial::PickleState trial;
+    registered.second->GetStateWhileLocked(&trial);
     DCHECK_EQ(std::string::npos,
               trial.trial_name->find(kPersistentStringSeparator));
     DCHECK_EQ(std::string::npos,
@@ -725,32 +613,23 @@
 // static
 void FieldTrialList::GetActiveFieldTrialGroups(
     FieldTrial::ActiveGroups* active_groups) {
-  DCHECK(active_groups->empty());
-  if (!global_)
-    return;
-  AutoLock auto_lock(global_->lock_);
-
-  for (auto it = global_->registered_.begin(); it != global_->registered_.end();
-       ++it) {
-    FieldTrial::ActiveGroup active_group;
-    if (it->second->GetActiveGroup(&active_group))
-      active_groups->push_back(active_group);
-  }
+  GetActiveFieldTrialGroupsInternal(active_groups,
+                                    /*include_low_anonymity=*/false);
 }
 
 // static
 void FieldTrialList::GetActiveFieldTrialGroupsFromString(
     const std::string& trials_string,
     FieldTrial::ActiveGroups* active_groups) {
-  std::vector<FieldTrialStringEntry> entries;
+  std::vector<FieldTrial::State> entries;
   if (!ParseFieldTrialsString(trials_string, &entries))
     return;
 
   for (const auto& entry : entries) {
     if (entry.activated) {
       FieldTrial::ActiveGroup group;
-      group.trial_name = entry.trial_name.as_string();
-      group.group_name = entry.group_name.as_string();
+      group.trial_name = std::string(entry.trial_name);
+      group.group_name = std::string(entry.group_name);
       active_groups->push_back(group);
     }
   }
@@ -758,12 +637,15 @@
 
 // static
 void FieldTrialList::GetInitiallyActiveFieldTrials(
-    const base::CommandLine& command_line,
+    const CommandLine& command_line,
     FieldTrial::ActiveGroups* active_groups) {
   DCHECK(global_);
   DCHECK(global_->create_trials_from_command_line_called_);
 
   if (!global_->field_trial_allocator_) {
+    UmaHistogramBoolean(
+        "ChildProcess.FieldTrials.GetInitiallyActiveFieldTrials.FromString",
+        true);
     GetActiveFieldTrialGroupsFromString(
         command_line.GetSwitchValueASCII(switches::kForceFieldTrials),
         active_groups);
@@ -780,84 +662,52 @@
     if (subtle::NoBarrier_Load(&entry->activated) &&
         entry->GetTrialAndGroupName(&trial_name, &group_name)) {
       FieldTrial::ActiveGroup group;
-      group.trial_name = trial_name.as_string();
-      group.group_name = group_name.as_string();
+      group.trial_name = std::string(trial_name);
+      group.group_name = std::string(group_name);
       active_groups->push_back(group);
     }
   }
 }
 
 // static
-bool FieldTrialList::CreateTrialsFromString(
-    const std::string& trials_string,
-    const std::set<std::string>& ignored_trial_names) {
+bool FieldTrialList::CreateTrialsFromString(const std::string& trials_string) {
   DCHECK(global_);
   if (trials_string.empty() || !global_)
     return true;
 
-  std::vector<FieldTrialStringEntry> entries;
+  std::vector<FieldTrial::State> entries;
   if (!ParseFieldTrialsString(trials_string, &entries))
     return false;
 
-  for (const auto& entry : entries) {
-    const std::string trial_name = entry.trial_name.as_string();
-    const std::string group_name = entry.group_name.as_string();
-
-    if (ContainsKey(ignored_trial_names, trial_name)) {
-      // This is to warn that the field trial forced through command-line
-      // input is unforcable.
-      // Use --enable-logging or --enable-logging=stderr to see this warning.
-      LOG(WARNING) << "Field trial: " << trial_name << " cannot be forced.";
-      continue;
-    }
-
-    FieldTrial* trial = CreateFieldTrial(trial_name, group_name);
-    if (!trial)
-      return false;
-    if (entry.activated) {
-      // Call |group()| to mark the trial as "used" and notify observers, if
-      // any. This is useful to ensure that field trials created in child
-      // processes are properly reported in crash reports.
-      trial->group();
-    }
-  }
-  return true;
+  return CreateTrialsFromFieldTrialStatesInternal(entries);
 }
 
 // static
-void FieldTrialList::CreateTrialsFromCommandLine(
-    const CommandLine& cmd_line,
-    const char* field_trial_handle_switch,
-    int fd_key) {
+bool FieldTrialList::CreateTrialsFromFieldTrialStates(
+    PassKey<test::ScopedFeatureList>,
+    const std::vector<FieldTrial::State>& entries) {
+  return CreateTrialsFromFieldTrialStatesInternal(entries);
+}
+
+// static
+void FieldTrialList::CreateTrialsFromCommandLine(const CommandLine& cmd_line,
+                                                 uint32_t fd_key) {
   global_->create_trials_from_command_line_called_ = true;
 
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
-  if (cmd_line.HasSwitch(field_trial_handle_switch)) {
+#if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS) && !defined(STARBOARD)
+  if (cmd_line.HasSwitch(switches::kFieldTrialHandle)) {
     std::string switch_value =
-        cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
-    bool result = CreateTrialsFromSwitchValue(switch_value);
+        cmd_line.GetSwitchValueASCII(switches::kFieldTrialHandle);
+    bool result = CreateTrialsFromSwitchValue(switch_value, fd_key);
     UMA_HISTOGRAM_BOOLEAN("ChildProcess.FieldTrials.CreateFromShmemSuccess",
                           result);
     DCHECK(result);
   }
-#elif defined(OS_POSIX) && !defined(OS_NACL) && !defined(STARBOARD)
-  // On POSIX, we check if the handle is valid by seeing if the browser process
-  // sent over the switch (we don't care about the value). Invalid handles
-  // occur in some browser tests which don't initialize the allocator.
-  if (cmd_line.HasSwitch(field_trial_handle_switch)) {
-    std::string switch_value =
-        cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
-    bool result = CreateTrialsFromDescriptor(fd_key, switch_value);
-    UMA_HISTOGRAM_BOOLEAN("ChildProcess.FieldTrials.CreateFromShmemSuccess",
-                          result);
-    DCHECK(result);
-  }
-#endif
+#endif  // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS)
 
   if (cmd_line.HasSwitch(switches::kForceFieldTrials)) {
     bool result = FieldTrialList::CreateTrialsFromString(
-        cmd_line.GetSwitchValueASCII(switches::kForceFieldTrials),
-        std::set<std::string>());
+        cmd_line.GetSwitchValueASCII(switches::kForceFieldTrials));
     UMA_HISTOGRAM_BOOLEAN("ChildProcess.FieldTrials.CreateFromSwitchSuccess",
                           result);
     DCHECK(result);
@@ -866,107 +716,93 @@
 
 // static
 void FieldTrialList::CreateFeaturesFromCommandLine(
-    const base::CommandLine& command_line,
-    const char* enable_features_switch,
-    const char* disable_features_switch,
+    const CommandLine& command_line,
     FeatureList* feature_list) {
   // Fallback to command line if not using shared memory.
-  if (!kUseSharedMemoryForFieldTrials ||
-      !global_->field_trial_allocator_.get()) {
+  if (!global_->field_trial_allocator_.get()) {
     return feature_list->InitializeFromCommandLine(
-        command_line.GetSwitchValueASCII(enable_features_switch),
-        command_line.GetSwitchValueASCII(disable_features_switch));
+        command_line.GetSwitchValueASCII(switches::kEnableFeatures),
+        command_line.GetSwitchValueASCII(switches::kDisableFeatures));
   }
 
   feature_list->InitializeFromSharedMemory(
       global_->field_trial_allocator_.get());
 }
 
-#if defined(OS_WIN)
+#if !BUILDFLAG(IS_IOS)
 // static
-void FieldTrialList::AppendFieldTrialHandleIfNeeded(
-    HandlesToInheritVector* handles) {
-  if (!global_)
+void FieldTrialList::PopulateLaunchOptionsWithFieldTrialState(
+    CommandLine* command_line,
+    LaunchOptions* launch_options) {
+  DCHECK(command_line);
+
+  // Use shared memory to communicate field trial state to child processes.
+  // The browser is the only process that has write access to the shared memory.
+  InstantiateFieldTrialAllocatorIfNeeded();
+
+  // If the readonly handle did not get created, fall back to flags.
+  if (!global_ || !global_->readonly_allocator_region_.IsValid()) {
+    UmaHistogramBoolean(
+        "ChildProcess.FieldTrials.PopulateLaunchOptions.CommandLine", true);
+    AddFeatureAndFieldTrialFlags(command_line);
     return;
-  if (kUseSharedMemoryForFieldTrials) {
-    InstantiateFieldTrialAllocatorIfNeeded();
-    if (global_->readonly_allocator_handle_.IsValid())
-      handles->push_back(global_->readonly_allocator_handle_.GetHandle());
+  }
+
+#if !BUILDFLAG(IS_NACL) && !defined(STARBOARD)
+  global_->field_trial_allocator_->UpdateTrackingHistograms();
+  std::string switch_value = SerializeSharedMemoryRegionMetadata(
+      global_->readonly_allocator_region_, launch_options);
+  command_line->AppendSwitchASCII(switches::kFieldTrialHandle, switch_value);
+#endif  // !BUILDFLAG(IS_NACL)
+
+  // Append --enable-features and --disable-features switches corresponding
+  // to the features enabled on the command-line, so that child and browser
+  // process command lines match and clearly show what has been specified
+  // explicitly by the user.
+  std::string enabled_features;
+  std::string disabled_features;
+  FeatureList::GetInstance()->GetCommandLineFeatureOverrides(
+      &enabled_features, &disabled_features);
+
+  if (!enabled_features.empty()) {
+    command_line->AppendSwitchASCII(switches::kEnableFeatures,
+                                    enabled_features);
+  }
+  if (!disabled_features.empty()) {
+    command_line->AppendSwitchASCII(switches::kDisableFeatures,
+                                    disabled_features);
   }
 }
-#elif defined(OS_FUCHSIA) || defined(STARBOARD)
-// TODO(fuchsia): Implement shared-memory configuration (crbug.com/752368).
-#elif defined(OS_POSIX) && !defined(OS_NACL)
+#endif  // !BUILDFLAG(IS_IOS)
+
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_NACL)
 // static
-SharedMemoryHandle FieldTrialList::GetFieldTrialHandle() {
-  if (global_ && kUseSharedMemoryForFieldTrials) {
-    InstantiateFieldTrialAllocatorIfNeeded();
-    // We check for an invalid handle where this gets called.
-    return global_->readonly_allocator_handle_;
-  }
-  return SharedMemoryHandle();
-}
+int FieldTrialList::GetFieldTrialDescriptor() {
+  InstantiateFieldTrialAllocatorIfNeeded();
+  if (!global_ || !global_->readonly_allocator_region_.IsValid())
+    return -1;
+
+#if BUILDFLAG(IS_ANDROID) || defined(STARBOARD)
+  return global_->readonly_allocator_region_.GetPlatformHandle();
+#else
+  return global_->readonly_allocator_region_.GetPlatformHandle().fd;
 #endif
+}
+#endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_NACL)
 
 // static
-void FieldTrialList::CopyFieldTrialStateToFlags(
-    const char* field_trial_handle_switch,
-    const char* enable_features_switch,
-    const char* disable_features_switch,
-    CommandLine* cmd_line) {
-  // TODO(lawrencewu): Ideally, having the global would be guaranteed. However,
-  // content browser tests currently don't create a FieldTrialList because they
-  // don't run ChromeBrowserMainParts code where it's done for Chrome.
-  // Some tests depend on the enable and disable features flag switch, though,
-  // so we can still add those even though AllStatesToString() will be a no-op.
-  if (!global_) {
-    AddFeatureAndFieldTrialFlags(enable_features_switch,
-                                 disable_features_switch, cmd_line);
-    return;
-  }
+ReadOnlySharedMemoryRegion
+FieldTrialList::DuplicateFieldTrialSharedMemoryForTesting() {
+  if (!global_)
+    return ReadOnlySharedMemoryRegion();
 
-  // Use shared memory to pass the state if the feature is enabled, otherwise
-  // fallback to passing it via the command line as a string.
-  if (kUseSharedMemoryForFieldTrials) {
-    InstantiateFieldTrialAllocatorIfNeeded();
-    // If the readonly handle didn't get duplicated properly, then fallback to
-    // original behavior.
-    if (!global_->readonly_allocator_handle_.IsValid()) {
-      AddFeatureAndFieldTrialFlags(enable_features_switch,
-                                   disable_features_switch, cmd_line);
-      return;
-    }
-
-    global_->field_trial_allocator_->UpdateTrackingHistograms();
-    std::string switch_value = SerializeSharedMemoryHandleMetadata(
-        global_->readonly_allocator_handle_);
-    cmd_line->AppendSwitchASCII(field_trial_handle_switch, switch_value);
-
-    // Append --enable-features and --disable-features switches corresponding
-    // to the features enabled on the command-line, so that child and browser
-    // process command lines match and clearly show what has been specified
-    // explicitly by the user.
-    std::string enabled_features;
-    std::string disabled_features;
-    FeatureList::GetInstance()->GetCommandLineFeatureOverrides(
-        &enabled_features, &disabled_features);
-
-    if (!enabled_features.empty())
-      cmd_line->AppendSwitchASCII(enable_features_switch, enabled_features);
-    if (!disabled_features.empty())
-      cmd_line->AppendSwitchASCII(disable_features_switch, disabled_features);
-
-    return;
-  }
-
-  AddFeatureAndFieldTrialFlags(enable_features_switch, disable_features_switch,
-                               cmd_line);
+  return global_->readonly_allocator_region_.Duplicate();
 }
 
 // static
-FieldTrial* FieldTrialList::CreateFieldTrial(
-    const std::string& name,
-    const std::string& group_name) {
+FieldTrial* FieldTrialList::CreateFieldTrial(StringPiece name,
+                                             StringPiece group_name,
+                                             bool is_low_anonymity) {
   DCHECK(global_);
   DCHECK_GE(name.size(), 0u);
   DCHECK_GE(group_name.size(), 0u);
@@ -982,8 +818,11 @@
     return field_trial;
   }
   const int kTotalProbability = 100;
-  field_trial = new FieldTrial(name, kTotalProbability, group_name, 0);
-  FieldTrialList::Register(field_trial);
+  field_trial =
+      new FieldTrial(name, kTotalProbability, group_name, 0, is_low_anonymity);
+  // The group choice will be finalized in this method. So
+  // |is_randomized_trial| should be false.
+  FieldTrialList::Register(field_trial, /*is_randomized_trial=*/false);
   // Force the trial, which will also finalize the group choice.
   field_trial->SetForced();
   return field_trial;
@@ -991,43 +830,14 @@
 
 // static
 bool FieldTrialList::AddObserver(Observer* observer) {
-  if (!global_)
-    return false;
-  global_->observer_list_->AddObserver(observer);
-  return true;
+  return FieldTrialList::AddObserverInternal(observer,
+                                             /*include_low_anonymity=*/false);
 }
 
 // static
 void FieldTrialList::RemoveObserver(Observer* observer) {
-  if (!global_)
-    return;
-  global_->observer_list_->RemoveObserver(observer);
-}
-
-// static
-void FieldTrialList::SetSynchronousObserver(Observer* observer) {
-  DCHECK(!global_->synchronous_observer_);
-  global_->synchronous_observer_ = observer;
-}
-
-// static
-void FieldTrialList::RemoveSynchronousObserver(Observer* observer) {
-  DCHECK_EQ(global_->synchronous_observer_, observer);
-  global_->synchronous_observer_ = nullptr;
-}
-
-// static
-void FieldTrialList::OnGroupFinalized(bool is_locked, FieldTrial* field_trial) {
-  if (!global_)
-    return;
-  if (is_locked) {
-    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
-                              field_trial);
-  } else {
-    AutoLock auto_lock(global_->lock_);
-    AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
-                              field_trial);
-  }
+  FieldTrialList::RemoveObserverInternal(observer,
+                                         /*include_low_anonymity=*/false);
 }
 
 // static
@@ -1035,36 +845,42 @@
   if (!global_)
     return;
 
+  std::vector<Observer*> local_observers;
+  std::vector<Observer*> local_observers_including_low_anonymity;
+
   {
     AutoLock auto_lock(global_->lock_);
     if (field_trial->group_reported_)
       return;
     field_trial->group_reported_ = true;
 
-    if (!field_trial->enable_field_trial_)
-      return;
+    ++global_->num_ongoing_notify_field_trial_group_selection_calls_;
 
-    if (kUseSharedMemoryForFieldTrials)
-      ActivateFieldTrialEntryWhileLocked(field_trial);
+    ActivateFieldTrialEntryWhileLocked(field_trial);
+
+    // Copy observers to a local variable to access outside the scope of the
+    // lock. Since removing observers concurrently with this method is
+    // disallowed, pointers should remain valid while observers are notified.
+    local_observers = global_->observers_;
+    local_observers_including_low_anonymity =
+        global_->observers_including_low_anonymity_;
   }
 
-  // Recording for stability debugging has to be done inline as a task posted
-  // to an observer may not get executed before a crash.
-  base::debug::GlobalActivityTracker* tracker =
-      base::debug::GlobalActivityTracker::Get();
-  if (tracker) {
-    tracker->RecordFieldTrial(field_trial->trial_name(),
-                              field_trial->group_name_internal());
+  if (!field_trial->is_low_anonymity_) {
+    for (Observer* observer : local_observers) {
+      observer->OnFieldTrialGroupFinalized(field_trial->trial_name(),
+                                           field_trial->group_name_internal());
+    }
   }
 
-  if (global_->synchronous_observer_) {
-    global_->synchronous_observer_->OnFieldTrialGroupFinalized(
-        field_trial->trial_name(), field_trial->group_name_internal());
+  for (Observer* observer : local_observers_including_low_anonymity) {
+    observer->OnFieldTrialGroupFinalized(field_trial->trial_name(),
+                                         field_trial->group_name_internal());
   }
 
-  global_->observer_list_->Notify(
-      FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
-      field_trial->trial_name(), field_trial->group_name_internal());
+  int previous_num_ongoing_notify_field_trial_group_selection_calls =
+      global_->num_ongoing_notify_field_trial_group_selection_calls_--;
+  DCHECK_GT(previous_num_ongoing_notify_field_trial_group_selection_calls, 0);
 }
 
 // static
@@ -1076,6 +892,14 @@
 }
 
 // static
+size_t FieldTrialList::GetRandomizedFieldTrialCount() {
+  if (!global_)
+    return 0;
+  AutoLock auto_lock(global_->lock_);
+  return global_->num_registered_randomized_trials_;
+}
+
+// static
 bool FieldTrialList::GetParamsFromSharedMemory(
     FieldTrial* field_trial,
     std::map<std::string, std::string>* params) {
@@ -1105,7 +929,8 @@
 
   size_t allocated_size =
       global_->field_trial_allocator_->GetAllocSize(field_trial->ref_);
-  size_t actual_size = sizeof(FieldTrial::FieldTrialEntry) + entry->pickle_size;
+  uint64_t actual_size =
+      sizeof(FieldTrial::FieldTrialEntry) + entry->pickle_size;
   if (allocated_size < actual_size)
     return false;
 
@@ -1149,6 +974,9 @@
     size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
     FieldTrial::FieldTrialEntry* new_entry =
         allocator->New<FieldTrial::FieldTrialEntry>(total_size);
+    DCHECK(new_entry)
+        << "Failed to allocate a new entry, likely because the allocator is "
+           "full. Consider increasing kFieldTrialAllocationSize.";
     subtle::NoBarrier_Store(&new_entry->activated,
                             subtle::NoBarrier_Load(&prev_entry->activated));
     new_entry->pickle_size = pickle.size();
@@ -1162,7 +990,7 @@
     // Update the ref on the field trial and add it to the list to be made
     // iterable.
     FieldTrial::FieldTrialRef new_ref = allocator->GetAsReference(new_entry);
-    FieldTrial* trial = global_->PreLockedFind(trial_name.as_string());
+    FieldTrial* trial = global_->PreLockedFind(trial_name);
     trial->ref_ = new_ref;
     new_refs.push_back(new_ref);
 
@@ -1203,152 +1031,200 @@
 }
 
 // static
-bool FieldTrialList::IsGlobalSetForTesting() {
-  return global_ != nullptr;
+FieldTrialList* FieldTrialList::GetInstance() {
+  return global_;
 }
 
 // static
-std::string FieldTrialList::SerializeSharedMemoryHandleMetadata(
-    const SharedMemoryHandle& shm) {
+FieldTrialList* FieldTrialList::BackupInstanceForTesting() {
+  FieldTrialList* instance = global_;
+  global_ = nullptr;
+  return instance;
+}
+
+// static
+void FieldTrialList::RestoreInstanceForTesting(FieldTrialList* instance) {
+  global_ = instance;
+}
+
+#ifdef COBALT_PENDING_CLEAN_UP
+// TODO(b/298237462): Try to enable the below code.
+#elif !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS)
+
+// static
+std::string FieldTrialList::SerializeSharedMemoryRegionMetadata(
+    const ReadOnlySharedMemoryRegion& shm,
+    LaunchOptions* launch_options) {
   std::stringstream ss;
-#if defined(OS_WIN)
+#if BUILDFLAG(IS_WIN)
+  // Elevated process might not need this, although it is harmless.
+  launch_options->handles_to_inherit.push_back(shm.GetPlatformHandle());
+
   // Tell the child process the name of the inherited HANDLE.
-  uintptr_t uintptr_handle = reinterpret_cast<uintptr_t>(shm.GetHandle());
+  uintptr_t uintptr_handle =
+      reinterpret_cast<uintptr_t>(shm.GetPlatformHandle());
   ss << uintptr_handle << ",";
-#elif defined(OS_FUCHSIA)
-  ss << shm.GetHandle() << ",";
-#elif defined(STARBOARD)
-  ss << "unsupported"
-     << ",";
-#elif !defined(OS_POSIX)
+  if (launch_options->elevated) {
+    // Tell the child that it must open its parent and grab the handle.
+    ss << "p,";
+  } else {
+    // Tell the child that it inherited the handle.
+    ss << "i,";
+  }
+#elif BUILDFLAG(IS_MAC)
+  launch_options->mach_ports_for_rendezvous.emplace(
+      kFieldTrialRendezvousKey,
+      MachRendezvousPort(shm.GetPlatformHandle(), MACH_MSG_TYPE_COPY_SEND));
+
+  // The handle on Mac is looked up directly by the child, rather than being
+  // transferred to the child over the command line.
+  ss << kFieldTrialRendezvousKey << ",";
+  // Tell the child that the handle is looked up.
+  ss << "r,";
+#elif BUILDFLAG(IS_FUCHSIA)
+  zx::vmo transfer_vmo;
+  zx_status_t status = shm.GetPlatformHandle()->duplicate(
+      ZX_RIGHT_READ | ZX_RIGHT_MAP | ZX_RIGHT_TRANSFER | ZX_RIGHT_GET_PROPERTY |
+          ZX_RIGHT_DUPLICATE,
+      &transfer_vmo);
+  ZX_CHECK(status == ZX_OK, status) << "zx_handle_duplicate";
+
+  // The handle on Fuchsia is passed as part of the launch handles to transfer.
+  uint32_t handle_id = LaunchOptions::AddHandleToTransfer(
+      &launch_options->handles_to_transfer, transfer_vmo.release());
+  ss << handle_id << ",";
+  // Tell the child that the handle is inherited.
+  ss << "i,";
+#elif BUILDFLAG(IS_POSIX)
+  // This is actually unused in the child process, but allows non-Mac Posix
+  // platforms to have the same format as the others.
+  ss << "0,i,";
+#else
 #error Unsupported OS
 #endif
 
-  base::UnguessableToken guid = shm.GetGUID();
+  UnguessableToken guid = shm.GetGUID();
   ss << guid.GetHighForSerialization() << "," << guid.GetLowForSerialization();
   ss << "," << shm.GetSize();
   return ss.str();
 }
 
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
-
 // static
-SharedMemoryHandle FieldTrialList::DeserializeSharedMemoryHandleMetadata(
-    const std::string& switch_value) {
-  std::vector<base::StringPiece> tokens = base::SplitStringPiece(
-      switch_value, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+ReadOnlySharedMemoryRegion
+FieldTrialList::DeserializeSharedMemoryRegionMetadata(
+    const std::string& switch_value,
+    int fd) {
+  // Format: "handle,[irp],guid-high,guid-low,size".
+  std::vector<StringPiece> tokens =
+      SplitStringPiece(switch_value, ",", KEEP_WHITESPACE, SPLIT_WANT_ALL);
 
-  if (tokens.size() != 4)
-    return SharedMemoryHandle();
+  if (tokens.size() != 5)
+    return ReadOnlySharedMemoryRegion();
 
   int field_trial_handle = 0;
-  if (!base::StringToInt(tokens[0], &field_trial_handle))
-    return SharedMemoryHandle();
-#if defined(OS_FUCHSIA)
-  zx_handle_t handle = static_cast<zx_handle_t>(field_trial_handle);
-#elif defined(OS_WIN)
+  if (!StringToInt(tokens[0], &field_trial_handle))
+    return ReadOnlySharedMemoryRegion();
+
+    // token[1] has a fixed value but is ignored on all platforms except
+    // Windows, where it can be 'i' or 'p' to indicate that the handle is
+    // inherited or must be obtained from the parent.
+#if BUILDFLAG(IS_WIN)
   HANDLE handle = reinterpret_cast<HANDLE>(field_trial_handle);
-  if (base::IsCurrentProcessElevated()) {
-    // base::LaunchElevatedProcess doesn't have a way to duplicate the handle,
-    // but this process can since by definition it's not sandboxed.
-    base::ProcessId parent_pid = base::GetParentProcessId(GetCurrentProcess());
+  if (tokens[1] == "p") {
+    DCHECK(IsCurrentProcessElevated());
+    // LaunchProcess doesn't have a way to duplicate the handle, but this
+    // process can since by definition it's not sandboxed.
+    ProcessId parent_pid = GetParentProcessId(GetCurrentProcess());
     HANDLE parent_handle = OpenProcess(PROCESS_ALL_ACCESS, FALSE, parent_pid);
+    // TODO(https://crbug.com/916461): Duplicating the handle is known to fail
+    // with ERROR_ACCESS_DENIED when the parent process is being torn down. This
+    // should be handled elegantly somehow.
     DuplicateHandle(parent_handle, handle, GetCurrentProcess(), &handle, 0,
                     FALSE, DUPLICATE_SAME_ACCESS);
     CloseHandle(parent_handle);
+  } else if (tokens[1] != "i") {
+    return ReadOnlySharedMemoryRegion();
   }
-#endif  // defined(OS_WIN)
-
-  base::UnguessableToken guid;
-  if (!DeserializeGUIDFromStringPieces(tokens[1], tokens[2], &guid))
-    return SharedMemoryHandle();
-
-  int size;
-  if (!base::StringToInt(tokens[3], &size))
-    return SharedMemoryHandle();
-
-  return SharedMemoryHandle(handle, static_cast<size_t>(size), guid);
-}
-
-#elif defined(OS_POSIX) && !defined(OS_NACL)
-
-// static
-SharedMemoryHandle FieldTrialList::DeserializeSharedMemoryHandleMetadata(
-    int fd,
-    const std::string& switch_value) {
-  std::vector<base::StringPiece> tokens = base::SplitStringPiece(
-      switch_value, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
-
-  if (tokens.size() != 3)
-    return SharedMemoryHandle();
-
-  base::UnguessableToken guid;
-  if (!DeserializeGUIDFromStringPieces(tokens[0], tokens[1], &guid))
-    return SharedMemoryHandle();
-
-  int size;
-  if (!base::StringToInt(tokens[2], &size))
-    return SharedMemoryHandle();
-
-  return SharedMemoryHandle(FileDescriptor(fd, true), static_cast<size_t>(size),
-                            guid);
-}
-
+  win::ScopedHandle scoped_handle(handle);
+#elif BUILDFLAG(IS_MAC)
+  auto* rendezvous = MachPortRendezvousClient::GetInstance();
+  if (!rendezvous)
+    return ReadOnlySharedMemoryRegion();
+  mac::ScopedMachSendRight scoped_handle = rendezvous->TakeSendRight(
+      static_cast<MachPortsForRendezvous::key_type>(field_trial_handle));
+  if (!scoped_handle.is_valid())
+    return ReadOnlySharedMemoryRegion();
+#elif BUILDFLAG(IS_FUCHSIA)
+  static bool startup_handle_taken = false;
+  DCHECK(!startup_handle_taken) << "Shared memory region initialized twice";
+  zx::vmo scoped_handle(
+      zx_take_startup_handle(checked_cast<uint32_t>(field_trial_handle)));
+  startup_handle_taken = true;
+  if (!scoped_handle.is_valid())
+    return ReadOnlySharedMemoryRegion();
+#elif BUILDFLAG(IS_POSIX)
+  if (fd == -1)
+    return ReadOnlySharedMemoryRegion();
+  ScopedFD scoped_handle(fd);
+#else
+#error Unsupported OS
 #endif
 
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
+  UnguessableToken guid;
+  if (!DeserializeGUIDFromStringPieces(tokens[2], tokens[3], &guid))
+    return ReadOnlySharedMemoryRegion();
+
+  int size;
+  if (!StringToInt(tokens[4], &size))
+    return ReadOnlySharedMemoryRegion();
+
+  auto platform_handle = subtle::PlatformSharedMemoryRegion::Take(
+      std::move(scoped_handle),
+      subtle::PlatformSharedMemoryRegion::Mode::kReadOnly,
+      static_cast<size_t>(size), guid);
+  return ReadOnlySharedMemoryRegion::Deserialize(std::move(platform_handle));
+}
+
 // static
 bool FieldTrialList::CreateTrialsFromSwitchValue(
-    const std::string& switch_value) {
-  SharedMemoryHandle shm = DeserializeSharedMemoryHandleMetadata(switch_value);
-  if (!shm.IsValid())
-    return false;
-  return FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm);
-}
-#elif defined(OS_POSIX) && !defined(OS_NACL) && !defined(STARBOARD)
-// static
-bool FieldTrialList::CreateTrialsFromDescriptor(
-    int fd_key,
-    const std::string& switch_value) {
-  if (!kUseSharedMemoryForFieldTrials)
-    return false;
-
-  if (fd_key == -1)
-    return false;
-
-  int fd = GlobalDescriptors::GetInstance()->MaybeGet(fd_key);
+    const std::string& switch_value,
+    uint32_t fd_key) {
+  int fd = -1;
+#if BUILDFLAG(IS_POSIX)
+  fd = GlobalDescriptors::GetInstance()->MaybeGet(fd_key);
   if (fd == -1)
     return false;
-
-  SharedMemoryHandle shm =
-      DeserializeSharedMemoryHandleMetadata(fd, switch_value);
+#endif  // BUILDFLAG(IS_POSIX)
+  ReadOnlySharedMemoryRegion shm =
+      DeserializeSharedMemoryRegionMetadata(switch_value, fd);
   if (!shm.IsValid())
     return false;
-
-  bool result = FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm);
-  DCHECK(result);
-  return true;
+  return FieldTrialList::CreateTrialsFromSharedMemoryRegion(shm);
 }
-#endif  // defined(OS_POSIX) && !defined(OS_NACL)
 
-#if !defined(STARBOARD)
+#endif  // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS)
+
 // static
-bool FieldTrialList::CreateTrialsFromSharedMemoryHandle(
-    SharedMemoryHandle shm_handle) {
-  // shm gets deleted when it gets out of scope, but that's OK because we need
-  // it only for the duration of this method.
-  std::unique_ptr<SharedMemory> shm(new SharedMemory(shm_handle, true));
-  if (!shm.get()->Map(kFieldTrialAllocationSize))
+bool FieldTrialList::CreateTrialsFromSharedMemoryRegion(
+    const ReadOnlySharedMemoryRegion& shm_region) {
+  ReadOnlySharedMemoryMapping shm_mapping =
+      shm_region.MapAt(0, kFieldTrialAllocationSize);
+  if (!shm_mapping.IsValid())
     OnOutOfMemory(kFieldTrialAllocationSize);
 
-  return FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+  return FieldTrialList::CreateTrialsFromSharedMemoryMapping(
+      std::move(shm_mapping));
 }
 
 // static
-bool FieldTrialList::CreateTrialsFromSharedMemory(
-    std::unique_ptr<SharedMemory> shm) {
-  global_->field_trial_allocator_.reset(
-      new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, true));
+bool FieldTrialList::CreateTrialsFromSharedMemoryMapping(
+    ReadOnlySharedMemoryMapping shm_mapping) {
+#if defined(STARBOARD)
+  return false;
+#else
+  global_->field_trial_allocator_ =
+      std::make_unique<ReadOnlySharedPersistentMemoryAllocator>(
+          std::move(shm_mapping), 0, kAllocatorName);
   FieldTrialAllocator* shalloc = global_->field_trial_allocator_.get();
   FieldTrialAllocator::Iterator mem_iter(shalloc);
 
@@ -1360,22 +1236,18 @@
     if (!entry->GetTrialAndGroupName(&trial_name, &group_name))
       return false;
 
-    // TODO(lawrencewu): Convert the API for CreateFieldTrial to take
-    // StringPieces.
-    FieldTrial* trial =
-        CreateFieldTrial(trial_name.as_string(), group_name.as_string());
-
+    FieldTrial* trial = CreateFieldTrial(trial_name, group_name);
     trial->ref_ = mem_iter.GetAsReference(entry);
     if (subtle::NoBarrier_Load(&entry->activated)) {
-      // Call |group()| to mark the trial as "used" and notify observers, if
-      // any. This is useful to ensure that field trials created in child
+      // Mark the trial as "used" and notify observers, if any.
+      // This is useful to ensure that field trials created in child
       // processes are properly reported in crash reports.
-      trial->group();
+      trial->Activate();
     }
   }
   return true;
+#endif // defined(STARBOARD)
 }
-#endif
 
 // static
 void FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded() {
@@ -1384,27 +1256,21 @@
 #else
   if (!global_)
     return;
+
   AutoLock auto_lock(global_->lock_);
   // Create the allocator if not already created and add all existing trials.
   if (global_->field_trial_allocator_ != nullptr)
     return;
 
-  SharedMemoryCreateOptions options;
-  options.size = kFieldTrialAllocationSize;
-  options.share_read_only = true;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-  options.type = SharedMemoryHandle::POSIX;
-#endif
+  MappedReadOnlyRegion shm =
+      ReadOnlySharedMemoryRegion::Create(kFieldTrialAllocationSize);
 
-  std::unique_ptr<SharedMemory> shm(new SharedMemory());
-  if (!shm->Create(options))
+  if (!shm.IsValid())
     OnOutOfMemory(kFieldTrialAllocationSize);
 
-  if (!shm->Map(kFieldTrialAllocationSize))
-    OnOutOfMemory(kFieldTrialAllocationSize);
-
-  global_->field_trial_allocator_.reset(
-      new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, false));
+  global_->field_trial_allocator_ =
+      std::make_unique<WritableSharedPersistentMemoryAllocator>(
+          std::move(shm.mapping), 0, kAllocatorName);
   global_->field_trial_allocator_->CreateTrackingHistograms(kAllocatorName);
 
   // Add all existing field trials.
@@ -1417,11 +1283,10 @@
   FeatureList::GetInstance()->AddFeaturesToAllocator(
       global_->field_trial_allocator_.get());
 
-#if !defined(OS_NACL)
-  global_->readonly_allocator_handle_ = GetSharedMemoryReadOnlyHandle(
-      global_->field_trial_allocator_->shared_memory());
+#if !BUILDFLAG(IS_NACL)
+  global_->readonly_allocator_region_ = std::move(shm.region);
 #endif
-#endif
+#endif // !defined(STARBOARD)
 }
 
 // static
@@ -1437,9 +1302,8 @@
   if (allocator->IsReadonly())
     return;
 
-  FieldTrial::State trial_state;
-  if (!field_trial->GetStateWhileLocked(&trial_state, false))
-    return;
+  FieldTrial::PickleState trial_state;
+  field_trial->GetStateWhileLocked(&trial_state);
 
   // Or if we've already added it. We must check after GetState since it can
   // also add to the allocator.
@@ -1496,18 +1360,7 @@
   }
 }
 
-// static
-const FieldTrial::EntropyProvider*
-    FieldTrialList::GetEntropyProviderForOneTimeRandomization() {
-  if (!global_) {
-    used_without_global_ = true;
-    return nullptr;
-  }
-
-  return global_->entropy_provider_.get();
-}
-
-FieldTrial* FieldTrialList::PreLockedFind(const std::string& name) {
+FieldTrial* FieldTrialList::PreLockedFind(StringPiece name) {
   auto it = registered_.find(name);
   if (registered_.end() == it)
     return nullptr;
@@ -1515,16 +1368,17 @@
 }
 
 // static
-void FieldTrialList::Register(FieldTrial* trial) {
-  if (!global_) {
-    used_without_global_ = true;
-    return;
-  }
+void FieldTrialList::Register(FieldTrial* trial, bool is_randomized_trial) {
+  DCHECK(global_);
+
   AutoLock auto_lock(global_->lock_);
   CHECK(!global_->PreLockedFind(trial->trial_name())) << trial->trial_name();
   trial->AddRef();
   trial->SetTrialRegistered();
   global_->registered_[trial->trial_name()] = trial;
+
+  if (is_randomized_trial)
+    ++global_->num_registered_randomized_trials_;
 }
 
 // static
@@ -1537,4 +1391,74 @@
   return output;
 }
 
+// static
+bool FieldTrialList::CreateTrialsFromFieldTrialStatesInternal(
+    const std::vector<FieldTrial::State>& entries) {
+  DCHECK(global_);
+
+  for (const auto& entry : entries) {
+    FieldTrial* trial = CreateFieldTrial(entry.trial_name, entry.group_name);
+    if (!trial)
+      return false;
+    if (entry.activated) {
+      // Mark the trial as "used" and notify observers, if any.
+      // This is useful to ensure that field trials created in child
+      // processes are properly reported in crash reports.
+      trial->Activate();
+    }
+  }
+  return true;
+}
+
+// static
+void FieldTrialList::GetActiveFieldTrialGroupsInternal(
+    FieldTrial::ActiveGroups* active_groups,
+    bool include_low_anonymity) {
+  DCHECK(active_groups->empty());
+  if (!global_) {
+    return;
+  }
+  AutoLock auto_lock(global_->lock_);
+
+  for (const auto& registered : global_->registered_) {
+    const FieldTrial& trial = *registered.second;
+    FieldTrial::ActiveGroup active_group;
+    if ((include_low_anonymity || !trial.is_low_anonymity_) &&
+        trial.GetActiveGroup(&active_group)) {
+      active_groups->push_back(active_group);
+    }
+  }
+}
+
+// static
+bool FieldTrialList::AddObserverInternal(Observer* observer,
+                                         bool include_low_anonymity) {
+  if (!global_) {
+    return false;
+  }
+  AutoLock auto_lock(global_->lock_);
+  if (include_low_anonymity) {
+    global_->observers_including_low_anonymity_.push_back(observer);
+  } else {
+    global_->observers_.push_back(observer);
+  }
+  return true;
+}
+
+// static
+void FieldTrialList::RemoveObserverInternal(Observer* observer,
+                                            bool include_low_anonymity) {
+  if (!global_) {
+    return;
+  }
+  AutoLock auto_lock(global_->lock_);
+  if (include_low_anonymity) {
+    Erase(global_->observers_including_low_anonymity_, observer);
+  } else {
+    Erase(global_->observers_, observer);
+  }
+  DCHECK_EQ(global_->num_ongoing_notify_field_trial_group_selection_calls_, 0)
+      << "Cannot call RemoveObserver while accessing FieldTrial::group_name().";
+}
+
 }  // namespace base
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
index 3d877f2..2209ac8 100644
--- a/base/metrics/field_trial.h
+++ b/base/metrics/field_trial.h
@@ -1,10 +1,33 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+// The FieldTrial class handles the lower level configuration of running A/B
+// tests.
+//
+// Most server-side experiments should be configured using Features which
+// have a simpler interface. See base/feature_list.h for details on
+// configurating a Feature for an experiment.
+
+// In certain cases you may still need to use FieldTrial directly. This is
+// generally for either:
+// - Client-configured experiments:
+//     The experiment is controlled directly in the code. For example, if the
+//     server controlled behavior is not yet available. See below documentation.
+// - Synthetic field trials:
+//     These act like field trials for reporting purposes, but the group
+//     placement is controlled directly. See RegisterSyntheticFieldTrial().
+
+// If you have access, see go/client-side-field-trials for additional context.
+
+//------------------------------------------------------------------------------
+// Details:
+
 // FieldTrial is a class for handling details of statistical experiments
 // performed by actual users in the field (i.e., in a shipped or beta product).
-// All code is called exclusively on the UI thread currently.
+// All code is called exclusively on the UI thread currently. It only handles
+// the lower level details, server-side experiments should use
+// generally use Features (see above).
 //
 // The simplest example is an experiment to see whether one of two options
 // produces "better" results across our user population.  In that scenario, UMA
@@ -21,42 +44,36 @@
 
 //------------------------------------------------------------------------------
 // Example:  Suppose we have an experiment involving memory, such as determining
-// the impact of some pruning algorithm.
-// We assume that we already have a histogram of memory usage, such as:
-
-//   UMA_HISTOGRAM_COUNTS_1M("Memory.RendererTotal", count);
-
-// Somewhere in main thread initialization code, we'd probably define an
-// instance of a FieldTrial, with code such as:
+// the impact of some pruning algorithm. Note that using this API directly is
+// not recommended, see above.
 
 // // FieldTrials are reference counted, and persist automagically until
 // // process teardown, courtesy of their automatic registration in
 // // FieldTrialList.
-// // Note: This field trial will run in Chrome instances compiled through
-// //       8 July, 2015, and after that all instances will be in "StandardMem".
 // scoped_refptr<base::FieldTrial> trial(
 //     base::FieldTrialList::FactoryGetFieldTrial(
-//         "MemoryExperiment", 1000, "StandardMem", 2015, 7, 8,
-//         base::FieldTrial::ONE_TIME_RANDOMIZED, NULL));
+//         "MemoryExperiment", 1000, "StandardMem", entropy_provider);
 //
-// const int high_mem_group =
-//     trial->AppendGroup("HighMem", 20);  // 2% in HighMem group.
-// const int low_mem_group =
-//     trial->AppendGroup("LowMem", 20);   // 2% in LowMem group.
+// trial->AppendGroup("HighMem", 20);  // 2% in HighMem group.
+// trial->AppendGroup("LowMem", 20);   // 2% in LowMem group.
 // // Take action depending of which group we randomly land in.
-// if (trial->group() == high_mem_group)
-//   SetPruningAlgorithm(kType1);  // Sample setting of browser state.
-// else if (trial->group() == low_mem_group)
-//   SetPruningAlgorithm(kType2);  // Sample alternate setting.
+// if (trial->group_name() == "HighMem")
+//   SetPruningAlgorithm(kType1);
+// else if (trial->group_name() == "LowMem")
+//   SetPruningAlgorithm(kType2);
 
 //------------------------------------------------------------------------------
 
 #ifndef BASE_METRICS_FIELD_TRIAL_H_
 #define BASE_METRICS_FIELD_TRIAL_H_
 
+#include <stddef.h>
+#include <stdint.h>
+
+#include <atomic>
+#include <functional>
 #include <map>
 #include <memory>
-#include <set>
 #include <string>
 #include <vector>
 
@@ -64,50 +81,32 @@
 #include "base/base_export.h"
 #include "base/command_line.h"
 #include "base/feature_list.h"
-#include "base/files/file.h"
 #include "base/gtest_prod_util.h"
-#include "base/macros.h"
+#include "base/memory/raw_ptr.h"
+#include "base/memory/read_only_shared_memory_region.h"
 #include "base/memory/ref_counted.h"
-#include "base/memory/shared_memory.h"
-#include "base/memory/shared_memory_handle.h"
+#include "base/memory/shared_memory_mapping.h"
 #include "base/metrics/persistent_memory_allocator.h"
-#include "base/observer_list_threadsafe.h"
 #include "base/pickle.h"
-#include "base/process/launch.h"
 #include "base/strings/string_piece.h"
 #include "base/synchronization/lock.h"
-#include "base/time/time.h"
+#include "base/types/pass_key.h"
 #include "build/build_config.h"
-#include "starboard/types.h"
 
 namespace base {
 
+namespace test {
+class ScopedFeatureList;
+}  // namespace test
+
+class CompareActiveGroupToFieldTrialMatcher;
 class FieldTrialList;
+struct LaunchOptions;
 
 class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
  public:
   typedef int Probability;  // Probability type for being selected in a trial.
 
-#if !defined(STARBOARD)
-  // TODO(665129): Make private again after crash has been resolved.
-  typedef SharedPersistentMemoryAllocator::Reference FieldTrialRef;
-#else
-  // In Cobalt, we don't export SharedPersistentMemoryAllocator, use the
-  // underlying type "uint32_t" directly here.
-  typedef uint32_t FieldTrialRef;
-#endif
-
-  // Specifies the persistence of the field trial group choice.
-  enum RandomizationType {
-    // One time randomized trials will persist the group choice between
-    // restarts, which is recommended for most trials, especially those that
-    // change user visible behavior.
-    ONE_TIME_RANDOMIZED,
-    // Session randomized trials will roll the dice to select a group on every
-    // process restart.
-    SESSION_RANDOMIZED,
-  };
-
   // EntropyProvider is an interface for providing entropy for one-time
   // randomized (persistent) field trials.
   class BASE_EXPORT EntropyProvider {
@@ -119,8 +118,19 @@
     // used in preference to |trial_name| for generating the entropy by entropy
     // providers that support it. A given instance should always return the same
     // value given the same input |trial_name| and |randomization_seed| values.
-    virtual double GetEntropyForTrial(const std::string& trial_name,
+    virtual double GetEntropyForTrial(StringPiece trial_name,
                                       uint32_t randomization_seed) const = 0;
+
+    // Returns a pseudorandom integer in [0, output_range).
+    // |salt| is a data parameter for the pseudorandom function.
+    uint32_t GetPseudorandomValue(uint32_t salt, uint32_t output_range) const;
+  };
+
+  // Separate type from FieldTrial::PickleState so that it can use StringPieces.
+  struct State {
+    StringPiece trial_name;
+    StringPiece group_name;
+    bool activated = false;
   };
 
   // A pair representing a Field Trial and its selected group.
@@ -133,14 +143,14 @@
   // active. String members are pointers to the underlying strings owned by the
   // FieldTrial object. Does not use StringPiece to avoid conversions back to
   // std::string.
-  struct BASE_EXPORT State {
-    const std::string* trial_name = nullptr;
-    const std::string* group_name = nullptr;
+  struct BASE_EXPORT PickleState {
+    raw_ptr<const std::string, DanglingUntriaged> trial_name = nullptr;
+    raw_ptr<const std::string, DanglingUntriaged> group_name = nullptr;
     bool activated = false;
 
-    State();
-    State(const State& other);
-    ~State();
+    PickleState();
+    PickleState(const PickleState& other);
+    ~PickleState();
   };
 
   // We create one FieldTrialEntry per field trial in shared memory, via
@@ -148,10 +158,10 @@
   // base::Pickle object that we unpickle and read from.
   struct BASE_EXPORT FieldTrialEntry {
     // SHA1(FieldTrialEntry): Increment this if structure changes!
-    static constexpr uint32_t kPersistentTypeId = 0xABA17E13 + 2;
+    static constexpr uint32_t kPersistentTypeId = 0xABA17E13 + 3;
 
     // Expected size for 32/64-bit check.
-    static constexpr size_t kExpectedInstanceSize = 8;
+    static constexpr size_t kExpectedInstanceSize = 16;
 
     // Whether or not this field trial is activated. This is really just a
     // boolean but using a 32 bit value for portability reasons. It should be
@@ -160,8 +170,14 @@
     // thread is accessing the memory location.
     subtle::Atomic32 activated;
 
+    // On e.g. x86, alignof(uint64_t) is 4.  Ensure consistent size and
+    // alignment of `pickle_size` across platforms. This can be considered
+    // to be padding for the final 32 bit value (activated). If this struct
+    // gains or loses fields, consider if this padding is still needed.
+    uint32_t padding;
+
     // Size of the pickled structure, NOT the total size of this entry.
-    uint32_t pickle_size;
+    uint64_t pickle_size;
 
     // Calling this is only valid when the entry is initialized. That is, it
     // resides in shared memory and has a pickle containing the trial name and
@@ -191,34 +207,32 @@
   // assignment (and hence is not yet participating in the trial).
   static const int kNotFinalized;
 
-  // Disables this trial, meaning it always determines the default group
-  // has been selected. May be called immediately after construction, or
-  // at any time after initialization (should not be interleaved with
-  // AppendGroup calls). Once disabled, there is no way to re-enable a
-  // trial.
-  // TODO(mad): http://code.google.com/p/chromium/issues/detail?id=121446
-  // This doesn't properly reset to Default when a group was forced.
-  void Disable();
+  FieldTrial(const FieldTrial&) = delete;
+  FieldTrial& operator=(const FieldTrial&) = delete;
 
-  // Establish the name and probability of the next group in this trial.
+  // Establishes the name and probability of the next group in this trial.
   // Sometimes, based on construction randomization, this call may cause the
   // provided group to be *THE* group selected for use in this instance.
-  // The return value is the group number of the new group.
-  int AppendGroup(const std::string& name, Probability group_probability);
+  // AppendGroup can be called after calls to group() but it should be avoided
+  // if possible. Doing so may be confusing since it won't change the group
+  // selection.
+  void AppendGroup(const std::string& name, Probability group_probability);
 
   // Return the name of the FieldTrial (excluding the group name).
   const std::string& trial_name() const { return trial_name_; }
 
-  // Return the randomly selected group number that was assigned, and notify
-  // any/all observers that this finalized group number has presumably been used
-  // (queried), and will never change. Note that this will force an instance to
+  // Finalizes the group assignment and notifies any/all observers. This is a
+  // no-op if the trial is already active. Note this will force an instance to
   // participate, and make it illegal to attempt to probabilistically add any
   // other groups to the trial.
-  int group();
+  void Activate();
 
   // If the group's name is empty, a string version containing the group number
   // is used as the group name. This causes a winner to be chosen if none was.
   const std::string& group_name();
+#ifdef COBALT_PENDING_CLEAN_UP
+  int group () { return 0; }
+#endif
 
   // Finalizes the group choice and returns the chosen group, but does not mark
   // the trial as active - so its state will not be reported until group_name()
@@ -235,7 +249,7 @@
   // be done from the UI thread.
   void SetForced();
 
-  // Enable benchmarking sets field trials to a common setting.
+  // Supports benchmarking by causing field trials' default groups to be chosen.
   static void EnableBenchmarking();
 
   // Creates a FieldTrial object with the specified parameters, to be used for
@@ -248,11 +262,16 @@
   //
   // The ownership of the returned FieldTrial is transfered to the caller which
   // is responsible for deref'ing it (e.g. by using scoped_refptr<FieldTrial>).
-  static FieldTrial* CreateSimulatedFieldTrial(
-      const std::string& trial_name,
-      Probability total_probability,
-      const std::string& default_group_name,
-      double entropy_value);
+  static FieldTrial* CreateSimulatedFieldTrial(StringPiece trial_name,
+                                               Probability total_probability,
+                                               StringPiece default_group_name,
+                                               double entropy_value);
+
+  // Whether this field trial is low anonymity or not (see
+  // |FieldTrialListIncludingLowAnonymity|).
+  // TODO(crbug.com/1431156): remove this once all call sites have been properly
+  // migrated to use an appropriate observer.
+  bool is_low_anonymity() { return is_low_anonymity_; }
 
  private:
   // Allow tests to access our innards for testing purposes.
@@ -273,16 +292,25 @@
   FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedTurnFeatureOn);
   FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_Default);
   FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_NonDefault);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, ObserveReentrancy);
   FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FloatBoundariesGiveEqualGroupSizes);
   FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DoesNotSurpassTotalProbability);
   FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
                            DoNotAddSimulatedFieldTrialsToAllocator);
   FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
+                           TestGetRandomizedFieldTrialCount);
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetLowAnonymity);
+
+  // MATCHER(CompareActiveGroupToFieldTrialMatcher, "")
+  friend class base::CompareActiveGroupToFieldTrialMatcher;
 
   friend class base::FieldTrialList;
 
   friend class RefCounted<FieldTrial>;
 
+  using FieldTrialRef = PersistentMemoryAllocator::Reference;
+
   // This is the group number of the 'default' group when a choice wasn't forced
   // by a call to FieldTrialList::CreateFieldTrial. It is kept private so that
   // consumers don't use it by mistake in cases where the group was forced.
@@ -290,14 +318,13 @@
 
   // Creates a field trial with the specified parameters. Group assignment will
   // be done based on |entropy_value|, which must have a range of [0, 1).
-  FieldTrial(const std::string& trial_name,
+  FieldTrial(StringPiece trial_name,
              Probability total_probability,
-             const std::string& default_group_name,
-             double entropy_value);
-  virtual ~FieldTrial();
+             StringPiece default_group_name,
+             double entropy_value,
+             bool is_low_anonymity);
 
-  // Return the default group name of the FieldTrial.
-  std::string default_group_name() const { return default_group_name_; }
+  virtual ~FieldTrial();
 
   // Marks this trial as having been registered with the FieldTrialList. Must be
   // called no more than once and before any |group()| calls have occurred.
@@ -311,10 +338,6 @@
   // status.
   void FinalizeGroupChoice();
 
-  // Implements FinalizeGroupChoice() with the added flexibility of being
-  // deadlock-free if |is_locked| is true and the caller is holding a lock.
-  void FinalizeGroupChoiceImpl(bool is_locked);
-
   // Returns the trial name and selected group name for this field trial via
   // the output parameter |active_group|, but only if the group has already
   // been chosen and has been externally observed via |group()| and the trial
@@ -324,16 +347,11 @@
   bool GetActiveGroup(ActiveGroup* active_group) const;
 
   // Returns the trial name and selected group name for this field trial via
-  // the output parameter |field_trial_state| for all the studies when
-  // |bool include_expired| is true. In case when |bool include_expired| is
-  // false, if the trial has not been disabled true is returned and
-  // |field_trial_state| is filled in; otherwise, the result is false and
-  // |field_trial_state| is left untouched.
-  // This function is deadlock-free if the caller is holding a lock.
-  bool GetStateWhileLocked(State* field_trial_state, bool include_expired);
+  // the output parameter |field_trial_state| for all the studies.
+  void GetStateWhileLocked(PickleState* field_trial_state);
 
   // Returns the group_name. A winner need not have been chosen.
-  std::string group_name_internal() const { return group_name_; }
+  const std::string& group_name_internal() const { return group_name_; }
 
   // The name of the field trial, as can be found via the FieldTrialList.
   const std::string trial_name_;
@@ -364,10 +382,6 @@
   // has been called.
   std::string group_name_;
 
-  // When enable_field_trial_ is false, field trial reverts to the 'default'
-  // group.
-  bool enable_field_trial_;
-
   // When forced_ is true, we return the chosen group from AppendGroup when
   // appropriate.
   bool forced_;
@@ -382,35 +396,28 @@
   // Reference to related field trial struct and data in shared memory.
   FieldTrialRef ref_;
 
-  // When benchmarking is enabled, field trials all revert to the 'default'
-  // group.
+  // Denotes whether benchmarking is enabled. In this case, field trials all
+  // revert to the default group.
   static bool enable_benchmarking_;
 
-  DISALLOW_COPY_AND_ASSIGN(FieldTrial);
+  // Whether this field trial is potentially low anonymity (eg. only a small
+  // set of users are included).
+  const bool is_low_anonymity_ = false;
 };
 
 //------------------------------------------------------------------------------
 // Class with a list of all active field trials.  A trial is active if it has
-// been registered, which includes evaluating its state based on its probaility.
-// Only one instance of this class exists and outside of testing, will live for
-// the entire life time of the process.
+// been registered, which includes evaluating its state based on its
+// probability. Only one instance of this class exists and outside of testing,
+// will live for the entire life time of the process.
 class BASE_EXPORT FieldTrialList {
  public:
-#if !defined(STARBOARD)
-  typedef SharedPersistentMemoryAllocator FieldTrialAllocator;
-#else
-  // In Cobalt, we don't import any shared memory constructs.
-  typedef LocalPersistentMemoryAllocator FieldTrialAllocator;
-#endif
+  using FieldTrialAllocator = PersistentMemoryAllocator;
 
   // Type for function pointer passed to |AllParamsToString| used to escape
   // special characters from |input|.
   typedef std::string (*EscapeDataFunc)(const std::string& input);
 
-  // Year that is guaranteed to not be expired when instantiating a field trial
-  // via |FactoryGetFieldTrial()|.  Set to two years from the build date.
-  static int kNoExpirationYear;
-
   // Observer is notified when a FieldTrial's group is selected.
   class BASE_EXPORT Observer {
    public:
@@ -423,74 +430,49 @@
   };
 
   // This singleton holds the global list of registered FieldTrials.
-  //
-  // To support one-time randomized field trials, specify a non-null
-  // |entropy_provider| which should be a source of uniformly distributed
-  // entropy values. If one time randomization is not desired, pass in null for
-  // |entropy_provider|.
-  explicit FieldTrialList(
-      std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider);
+  FieldTrialList();
+  FieldTrialList(const FieldTrialList&) = delete;
+  FieldTrialList& operator=(const FieldTrialList&) = delete;
 
   // Destructor Release()'s references to all registered FieldTrial instances.
   ~FieldTrialList();
 
-  // Get a FieldTrial instance from the factory.
+  // Gets a FieldTrial instance from the factory.
   //
-  // |name| is used to register the instance with the FieldTrialList class,
-  // and can be used to find the trial (only one trial can be present for each
-  // name). |default_group_name| is the name of the default group which will
-  // be chosen if none of the subsequent appended groups get to be chosen.
-  // |default_group_number| can receive the group number of the default group as
-  // AppendGroup returns the number of the subsequence groups. |trial_name| and
-  // |default_group_name| may not be empty but |default_group_number| can be
-  // NULL if the value is not needed.
+  // |trial_name| (a) is used to register the instance with the FieldTrialList
+  // class and (b) can be used to find the trial (only one trial can be present
+  // for each name). |default_group_name| is the name of the group that is
+  // chosen if none of the subsequent appended groups are chosen. Note that the
+  // default group is also chosen whenever |enable_benchmarking_| is true.
   //
   // Group probabilities that are later supplied must sum to less than or equal
-  // to the |total_probability|. Arguments |year|, |month| and |day_of_month|
-  // specify the expiration time. If the build time is after the expiration time
-  // then the field trial reverts to the 'default' group.
+  // to the |total_probability|.
   //
-  // Use this static method to get a startup-randomized FieldTrial or a
+  // The |entropy_provider| is used for randomizing group selection. The
+  // |randomization_seed| will be passed to the EntropyProvider in addition
+  // to the trial name, and it's handling is defined by the EntropyProvider.
+  // * SessionEntropyProvider requires it to be 0 by DCHECK.
+  // * SHA1 and NormalizedMurmurHash providers will use a non-zero value as a
+  //   salt _instead_ of using the trial name.
+  //
+  // Some field trials may be targeted in such way that a relatively small
+  // number of users are in a particular experiment group. Such trials should
+  // have |is_low_anonymity| set to true, and their visitbility is restricted
+  // to specific callers only, via |FieldTrialListIncludingLowAnonymity|.
+  //
+  // This static method can be used to get a startup-randomized FieldTrial or a
   // previously created forced FieldTrial.
   static FieldTrial* FactoryGetFieldTrial(
-      const std::string& trial_name,
+      StringPiece trial_name,
       FieldTrial::Probability total_probability,
-      const std::string& default_group_name,
-      const int year,
-      const int month,
-      const int day_of_month,
-      FieldTrial::RandomizationType randomization_type,
-      int* default_group_number);
-
-  // Same as FactoryGetFieldTrial(), but allows specifying a custom seed to be
-  // used on one-time randomized field trials (instead of a hash of the trial
-  // name, which is used otherwise or if |randomization_seed| has value 0). The
-  // |randomization_seed| value (other than 0) should never be the same for two
-  // trials, else this would result in correlated group assignments.  Note:
-  // Using a custom randomization seed is only supported by the
-  // PermutedEntropyProvider (which is used when UMA is not enabled). If
-  // |override_entropy_provider| is not null, then it will be used for
-  // randomization instead of the provider given when the FieldTrialList was
-  // instantiated.
-  static FieldTrial* FactoryGetFieldTrialWithRandomizationSeed(
-      const std::string& trial_name,
-      FieldTrial::Probability total_probability,
-      const std::string& default_group_name,
-      const int year,
-      const int month,
-      const int day_of_month,
-      FieldTrial::RandomizationType randomization_type,
-      uint32_t randomization_seed,
-      int* default_group_number,
-      const FieldTrial::EntropyProvider* override_entropy_provider);
+      StringPiece default_group_name,
+      const FieldTrial::EntropyProvider& entropy_provider,
+      uint32_t randomization_seed = 0,
+      bool is_low_anonymity = false);
 
   // The Find() method can be used to test to see if a named trial was already
   // registered, or to retrieve a pointer to it from the global map.
-  static FieldTrial* Find(const std::string& trial_name);
-
-  // Returns the group number chosen for the named trial, or
-  // FieldTrial::kNotFinalized if the trial does not exist.
-  static int FindValue(const std::string& trial_name);
+  static FieldTrial* Find(StringPiece trial_name);
 
   // Returns the group name chosen for the named trial, or the empty string if
   // the trial does not exist. The first call of this function on a given field
@@ -498,48 +480,41 @@
   // metrics, crashes, etc.
   // Note: Direct use of this function and related FieldTrial functions is
   // generally discouraged - instead please use base::Feature when possible.
-  static std::string FindFullName(const std::string& trial_name);
+  static std::string FindFullName(StringPiece trial_name);
 
   // Returns true if the named trial has been registered.
-  static bool TrialExists(const std::string& trial_name);
+  static bool TrialExists(StringPiece trial_name);
 
   // Returns true if the named trial exists and has been activated.
-  static bool IsTrialActive(const std::string& trial_name);
-
-  // Creates a persistent representation of active FieldTrial instances for
-  // resurrection in another process. This allows randomization to be done in
-  // one process, and secondary processes can be synchronized on the result.
-  // The resulting string contains the name and group name pairs of all
-  // registered FieldTrials for which the group has been chosen and externally
-  // observed (via |group()|) and which have not been disabled, with "/" used
-  // to separate all names and to terminate the string. This string is parsed
-  // by |CreateTrialsFromString()|.
-  static void StatesToString(std::string* output);
+  static bool IsTrialActive(StringPiece trial_name);
 
   // Creates a persistent representation of all FieldTrial instances for
   // resurrection in another process. This allows randomization to be done in
   // one process, and secondary processes can be synchronized on the result.
   // The resulting string contains the name and group name pairs of all
-  // registered FieldTrials including disabled based on |include_expired|,
+  // registered FieldTrials,
   // with "/" used to separate all names and to terminate the string. All
   // activated trials have their name prefixed with "*". This string is parsed
   // by |CreateTrialsFromString()|.
-  static void AllStatesToString(std::string* output, bool include_expired);
+  static void AllStatesToString(std::string* output);
 
   // Creates a persistent representation of all FieldTrial params for
   // resurrection in another process. The returned string contains the trial
-  // name and group name pairs of all registered FieldTrials including disabled
-  // based on |include_expired| separated by '.'. The pair is followed by ':'
-  // separator and list of param name and values separated by '/'. It also takes
-  // |encode_data_func| function pointer for encodeing special charactors.
-  // This string is parsed by |AssociateParamsFromString()|.
-  static std::string AllParamsToString(bool include_expired,
-                                       EscapeDataFunc encode_data_func);
+  // name and group name pairs of all registered FieldTrials. The pair is
+  // followed by ':' separator and list of param name and values separated by
+  // '/'. It also takes |encode_data_func| function pointer for encodeing
+  // special characters. This string is parsed by
+  // |AssociateParamsFromString()|.
+  static std::string AllParamsToString(EscapeDataFunc encode_data_func);
 
   // Fills in the supplied vector |active_groups| (which must be empty when
   // called) with a snapshot of all registered FieldTrials for which the group
   // has been chosen and externally observed (via |group()|) and which have
   // not been disabled.
+  //
+  // This does not return low anonymity field trials. Callers who need access to
+  // low anonymity field trials should use
+  // |FieldTrialListIncludingLowAnonymity.GetActiveFieldTrialGroups()|.
   static void GetActiveFieldTrialGroups(
       FieldTrial::ActiveGroups* active_groups);
 
@@ -553,66 +528,54 @@
   // holding field trial information.
   // Must be called only after a call to CreateTrialsFromCommandLine().
   static void GetInitiallyActiveFieldTrials(
-      const base::CommandLine& command_line,
+      const CommandLine& command_line,
       FieldTrial::ActiveGroups* active_groups);
 
-  // Use a state string (re: StatesToString()) to augment the current list of
+  // Use a state string (re: AllStatesToString()) to augment the current list of
   // field trials to include the supplied trials, and using a 100% probability
   // for each trial, force them to have the same group string. This is commonly
   // used in a non-browser process, to carry randomly selected state in a
   // browser process into this non-browser process, but could also be invoked
   // through a command line argument to the browser process. Created field
   // trials will be marked "used" for the purposes of active trial reporting
-  // if they are prefixed with |kActivationMarker|. Trial names in
-  // |ignored_trial_names| are ignored when parsing |trials_string|.
-  static bool CreateTrialsFromString(
-      const std::string& trials_string,
-      const std::set<std::string>& ignored_trial_names);
+  // if they are prefixed with |kActivationMarker|.
+  static bool CreateTrialsFromString(const std::string& trials_string);
 
   // Achieves the same thing as CreateTrialsFromString, except wraps the logic
   // by taking in the trials from the command line, either via shared memory
-  // handle or command line argument. A bit of a misnomer since on POSIX we
-  // simply get the trials from opening |fd_key| if using shared memory. On
-  // Windows, we expect the |cmd_line| switch for |field_trial_handle_switch| to
-  // contain the shared memory handle that contains the field trial allocator.
-  // We need the |field_trial_handle_switch| and |fd_key| arguments to be passed
-  // in since base/ can't depend on content/.
-  static void CreateTrialsFromCommandLine(const base::CommandLine& cmd_line,
-                                          const char* field_trial_handle_switch,
-                                          int fd_key);
+  // handle or command line argument.
+  // On non-Mac POSIX platforms, we simply get the trials from opening |fd_key|
+  // if using shared memory. The argument is needed here since //base can't
+  // depend on //content. |fd_key| is unused on other platforms.
+  // On other platforms, we expect the |cmd_line| switch for kFieldTrialHandle
+  // to contain the shared memory handle that contains the field trial
+  // allocator.
+  static void CreateTrialsFromCommandLine(const CommandLine& cmd_line,
+                                          uint32_t fd_key);
 
   // Creates base::Feature overrides from the command line by first trying to
   // use shared memory and then falling back to the command line if it fails.
-  static void CreateFeaturesFromCommandLine(
-      const base::CommandLine& command_line,
-      const char* enable_features_switch,
-      const char* disable_features_switch,
-      FeatureList* feature_list);
+  static void CreateFeaturesFromCommandLine(const CommandLine& command_line,
+                                            FeatureList* feature_list);
 
-#if defined(OS_WIN)
-  // On Windows, we need to explicitly pass down any handles to be inherited.
-  // This function adds the shared memory handle to field trial state to the
-  // list of handles to be inherited.
-  static void AppendFieldTrialHandleIfNeeded(
-      base::HandlesToInheritVector* handles);
-#elif defined(OS_FUCHSIA) || defined(STARBOARD)
-  // TODO(fuchsia): Implement shared-memory configuration (crbug.com/752368).
-#elif defined(OS_POSIX) && !defined(OS_NACL)
+#if !BUILDFLAG(IS_IOS)
+  // Populates |command_line| and |launch_options| with the handles and command
+  // line arguments necessary for a child process to inherit the shared-memory
+  // object containing the FieldTrial configuration.
+  static void PopulateLaunchOptionsWithFieldTrialState(
+      CommandLine* command_line,
+      LaunchOptions* launch_options);
+#endif  // !BUILDFLAG(IS_IOS)
+
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_NACL)
   // On POSIX, we also need to explicitly pass down this file descriptor that
-  // should be shared with the child process. Returns an invalid handle if it
-  // was not initialized properly.
-  static base::SharedMemoryHandle GetFieldTrialHandle();
-#endif
+  // should be shared with the child process. Returns -1 if it was not
+  // initialized properly. The current process remains the onwer of the passed
+  // descriptor.
+  static int GetFieldTrialDescriptor();
+#endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_NACL)
 
-  // Adds a switch to the command line containing the field trial state as a
-  // string (if not using shared memory to share field trial state), or the
-  // shared memory handle + length.
-  // Needs the |field_trial_handle_switch| argument to be passed in since base/
-  // can't depend on content/.
-  static void CopyFieldTrialStateToFlags(const char* field_trial_handle_switch,
-                                         const char* enable_features_switch,
-                                         const char* disable_features_switch,
-                                         base::CommandLine* cmd_line);
+  static ReadOnlySharedMemoryRegion DuplicateFieldTrialSharedMemoryForTesting();
 
   // Create a FieldTrial with the given |name| and using 100% probability for
   // the FieldTrial, force FieldTrial to have the same group string as
@@ -620,42 +583,43 @@
   // randomly selected state in a browser process into this non-browser process.
   // It returns NULL if there is a FieldTrial that is already registered with
   // the same |name| but has different finalized group string (|group_name|).
-  static FieldTrial* CreateFieldTrial(const std::string& name,
-                                      const std::string& group_name);
+  //
+  // Visibility of field trials with |is_low_anonymity| set to true is
+  // restricted to specific callers only, see
+  // |FieldTrialListIncludingLowAnonymity|.
+  static FieldTrial* CreateFieldTrial(StringPiece name,
+                                      StringPiece group_name,
+                                      bool is_low_anonymity = false);
 
   // Add an observer to be notified when a field trial is irrevocably committed
   // to being part of some specific field_group (and hence the group_name is
   // also finalized for that field_trial). Returns false and does nothing if
-  // there is no FieldTrialList singleton.
+  // there is no FieldTrialList singleton. The observer can be notified on any
+  // sequence; it must be thread-safe.
+  //
+  // Low anonymity field trials are not notified to this observer. Callers
+  // who need to be notified of low anonymity field trials should use
+  // |FieldTrialListIncludingLowAnonymity.AddObserver()|.
   static bool AddObserver(Observer* observer);
 
-  // Remove an observer.
+  // Remove an observer. This cannot be invoked concurrently with
+  // FieldTrial::group() (typically, this means that no other thread should be
+  // running when this is invoked).
+  //
+  // Removes observers added via the |AddObserver()| method of this class.
   static void RemoveObserver(Observer* observer);
 
-  // Similar to AddObserver(), but the passed observer will be notified
-  // synchronously when a field trial is activated and its group selected. It
-  // will be notified synchronously on the same thread where the activation and
-  // group selection happened. It is the responsibility of the observer to make
-  // sure that this is a safe operation and the operation must be fast, as this
-  // work is done synchronously as part of group() or related APIs. Only a
-  // single such observer is supported, exposed specifically for crash
-  // reporting. Must be called on the main thread before any other threads
-  // have been started.
-  static void SetSynchronousObserver(Observer* observer);
-
-  // Removes the single synchronous observer.
-  static void RemoveSynchronousObserver(Observer* observer);
-
-  // Grabs the lock if necessary and adds the field trial to the allocator. This
-  // should only be called from FinalizeGroupChoice().
-  static void OnGroupFinalized(bool is_locked, FieldTrial* field_trial);
-
   // Notify all observers that a group has been finalized for |field_trial|.
   static void NotifyFieldTrialGroupSelection(FieldTrial* field_trial);
 
   // Return the number of active field trials.
   static size_t GetFieldTrialCount();
 
+  // Return the number of active field trials registered as randomized trials.
+  // Trials created using the CreateFieldTrial() do not count towards this
+  // total.
+  static size_t GetRandomizedFieldTrialCount();
+
   // Gets the parameters for |field_trial| from shared memory and stores them in
   // |params|. This is only exposed for use by FieldTrialParamAssociator and
   // shouldn't be used by anything else.
@@ -678,8 +642,37 @@
   GetAllFieldTrialsFromPersistentAllocator(
       PersistentMemoryAllocator const& allocator);
 
-  // Returns true if a global field trial list is set. Only used for testing.
-  static bool IsGlobalSetForTesting();
+  // Returns a pointer to the global instance. This is exposed so that it can
+  // be used in a DCHECK in FeatureList and ScopedFeatureList test-only logic
+  // and is not intended to be used widely beyond those cases.
+  static FieldTrialList* GetInstance();
+
+  // For testing, sets the global instance to null and returns the previous one.
+  static FieldTrialList* BackupInstanceForTesting();
+
+  // For testing, sets the global instance to |instance|.
+  static void RestoreInstanceForTesting(FieldTrialList* instance);
+
+  // Creates a list of FieldTrial::State for all FieldTrial instances.
+  // StringPiece members are bound to the lifetime of the corresponding
+  // FieldTrial.
+  static std::vector<FieldTrial::State> GetAllFieldTrialStates(
+      PassKey<test::ScopedFeatureList>);
+
+  // Create FieldTrials from a list of FieldTrial::State. This method is only
+  // available to ScopedFeatureList for testing. The most typical usescase is:
+  // (1) AllStatesToFieldTrialStates(&field_trials);
+  // (2) backup_ = BackupInstanceForTesting();
+  //     // field_trials depends on backup_'s lifetype.
+  // (3) field_trial_list_ = new FieldTrialList();
+  // (4) CreateTrialsFromFieldTrialStates(field_trials);
+  //     // Copy backup_'s fieldtrials to the new field_trial_list_ while
+  //     // backup_ is alive.
+  // For resurrestion in another process, need to use AllStatesToString and
+  // CreateFieldTrialsFromString.
+  static bool CreateTrialsFromFieldTrialStates(
+      PassKey<test::ScopedFeatureList>,
+      const std::vector<FieldTrial::State>& entries);
 
  private:
   // Allow tests to access our innards for testing purposes.
@@ -690,52 +683,61 @@
   FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AssociateFieldTrialParams);
   FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
   FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
-                           SerializeSharedMemoryHandleMetadata);
-  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, CheckReadOnlySharedMemoryHandle);
+                           SerializeSharedMemoryRegionMetadata);
+  friend int SerializeSharedMemoryRegionMetadata();
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, CheckReadOnlySharedMemoryRegion);
 
-  // Serialization is used to pass information about the handle to child
-  // processes. It passes a reference to the relevant OS resource, and it passes
-  // a GUID. Serialization and deserialization doesn't actually transport the
-  // underlying OS resource - that must be done by the Process launcher.
-  static std::string SerializeSharedMemoryHandleMetadata(
-      const SharedMemoryHandle& shm);
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
-  static SharedMemoryHandle DeserializeSharedMemoryHandleMetadata(
-      const std::string& switch_value);
-#elif defined(OS_POSIX) && !defined(OS_NACL)
-  static SharedMemoryHandle DeserializeSharedMemoryHandleMetadata(
-      int fd,
-      const std::string& switch_value);
-#endif
+  // Required so that |FieldTrialListIncludingLowAnonymity| can expose APIs from
+  // this class to its friends.
+  friend class FieldTrialListIncludingLowAnonymity;
 
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
+#if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS)
+  // Serialization is used to pass information about the shared memory handle
+  // to child processes. This is achieved by passing a stringified reference to
+  // the relevant OS resources to the child process.
+  //
+  // Serialization populates |launch_options| with the relevant OS handles to
+  // transfer or copy to the child process and returns serialized information
+  // to be passed to the kFieldTrialHandle command-line switch.
+  // Note: On non-Mac POSIX platforms, it is necessary to pass down the file
+  // descriptor for the shared memory separately. It can be accessed via the
+  // GetFieldTrialDescriptor() API.
+  static std::string SerializeSharedMemoryRegionMetadata(
+      const ReadOnlySharedMemoryRegion& shm,
+      LaunchOptions* launch_options);
+
+  // Deserialization instantiates the shared memory region for FieldTrials from
+  // the serialized information contained in |switch_value|. Returns an invalid
+  // ReadOnlySharedMemoryRegion on failure.
+  // |fd| is used on non-Mac POSIX platforms to instantiate the shared memory
+  // region via a file descriptor.
+  static ReadOnlySharedMemoryRegion DeserializeSharedMemoryRegionMetadata(
+      const std::string& switch_value,
+      int fd);
+
   // Takes in |handle_switch| from the command line which represents the shared
   // memory handle for field trials, parses it, and creates the field trials.
   // Returns true on success, false on failure.
   // |switch_value| also contains the serialized GUID.
-  static bool CreateTrialsFromSwitchValue(const std::string& switch_value);
-#elif defined(OS_POSIX) && !defined(OS_NACL) && !defined(STARBOARD)
-  // On POSIX systems that use the zygote, we look up the correct fd that backs
-  // the shared memory segment containing the field trials by looking it up via
-  // an fd key in GlobalDescriptors. Returns true on success, false on failure.
-  // |switch_value| also contains the serialized GUID.
-  static bool CreateTrialsFromDescriptor(int fd_key,
-                                         const std::string& switch_value);
-#endif
+  // |fd_key| is used on non-Mac POSIX platforms as the file descriptor passed
+  // down to the child process for the shared memory region.
+  static bool CreateTrialsFromSwitchValue(const std::string& switch_value,
+                                          uint32_t fd_key);
+#endif  // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS)
 
-#if !defined(STARBOARD)
-  // Takes an unmapped SharedMemoryHandle, creates a SharedMemory object from
-  // it and maps it with the correct size.
-  static bool CreateTrialsFromSharedMemoryHandle(SharedMemoryHandle shm_handle);
+  // Takes an unmapped ReadOnlySharedMemoryRegion, maps it with the correct size
+  // and creates field trials via CreateTrialsFromSharedMemoryMapping(). Returns
+  // true if successful and false otherwise.
+  static bool CreateTrialsFromSharedMemoryRegion(
+      const ReadOnlySharedMemoryRegion& shm_region);
 
-  // Expects a mapped piece of shared memory |shm| that was created from the
-  // browser process's field_trial_allocator and shared via the command line.
-  // This function recreates the allocator, iterates through all the field
+  // Expects a mapped piece of shared memory |shm_mapping| that was created from
+  // the browser process's field_trial_allocator and shared via the command
+  // line. This function recreates the allocator, iterates through all the field
   // trials in it, and creates them via CreateFieldTrial(). Returns true if
   // successful and false otherwise.
-  static bool CreateTrialsFromSharedMemory(
-      std::unique_ptr<base::SharedMemory> shm);
-#endif
+  static bool CreateTrialsFromSharedMemoryMapping(
+      ReadOnlySharedMemoryMapping shm_mapping);
 
   // Instantiate the field trial allocator, add all existing field trials to it,
   // and duplicates its handle to a read-only handle, which gets stored in
@@ -751,63 +753,86 @@
   static void ActivateFieldTrialEntryWhileLocked(FieldTrial* field_trial);
 
   // A map from FieldTrial names to the actual instances.
-  typedef std::map<std::string, FieldTrial*> RegistrationMap;
-
-  // If one-time randomization is enabled, returns a weak pointer to the
-  // corresponding EntropyProvider. Otherwise, returns NULL.
-  static const FieldTrial::EntropyProvider*
-      GetEntropyProviderForOneTimeRandomization();
+  typedef std::map<std::string, FieldTrial*, std::less<>> RegistrationMap;
 
   // Helper function should be called only while holding lock_.
-  FieldTrial* PreLockedFind(const std::string& name);
+  FieldTrial* PreLockedFind(StringPiece name) EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
   // Register() stores a pointer to the given trial in a global map.
   // This method also AddRef's the indicated trial.
   // This should always be called after creating a new FieldTrial instance.
-  static void Register(FieldTrial* trial);
+  // If the caller wants to select the instance's group randomly,
+  // |is_randomized_trial| should be true to count the number of randomized
+  // trials correctly. Otherwise, false.
+  static void Register(FieldTrial* trial, bool is_randomized_trial);
 
   // Returns all the registered trials.
   static RegistrationMap GetRegisteredTrials();
 
+  // Create field trials from a list of FieldTrial::State.
+  // CreateTrialsFromString() and CreateTrialsFromFieldTrialStates() use this
+  // method internally.
+  static bool CreateTrialsFromFieldTrialStatesInternal(
+      const std::vector<FieldTrial::State>& entries);
+
+  // The same as |GetActiveFieldTrialGroups| but also gives access to low
+  // anonymity field trials.
+  // Restricted to specifically allowed friends - access via
+  // |FieldTrialListIncludingLowAnonymity::GetActiveFieldTrialGroups|.
+  static void GetActiveFieldTrialGroupsInternal(
+      FieldTrial::ActiveGroups* active_groups,
+      bool include_low_anonymity);
+
+  // The same as |AddObserver| but is notified for low anonymity field trials
+  // too.
+  // Restricted to specifically allowed friends - access via
+  // |FieldTrialListIncludingLowAnonymity::AddObserver|.
+  static bool AddObserverInternal(Observer* observer,
+                                  bool include_low_anonymity);
+
+  // The same as |RemoveObserver| but is notified for low anonymity field trials
+  // too.
+  // Restricted to specifically allowed friends - access via
+  // |FieldTrialListIncludingLowAnonymity::RemoveObserver|.
+  static void RemoveObserverInternal(Observer* observer,
+                                     bool include_low_anonymity);
+
   static FieldTrialList* global_;  // The singleton of this class.
 
-  // This will tell us if there is an attempt to register a field
-  // trial or check if one-time randomization is enabled without
-  // creating the FieldTrialList. This is not an error, unless a
-  // FieldTrialList is created after that.
-  static bool used_without_global_;
-
-  // Lock for access to registered_ and field_trial_allocator_.
+  // Lock for access to |registered_|, |observers_|,
+  // |observers_including_low_anonymity_|,
+  // |count_of_manually_created_field_trials_|.
   Lock lock_;
-  RegistrationMap registered_;
+  RegistrationMap registered_ GUARDED_BY(lock_);
 
-  std::map<std::string, std::string> seen_states_;
-
-  // Entropy provider to be used for one-time randomized field trials. If NULL,
-  // one-time randomization is not supported.
-  std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider_;
+  // Counts the number of field trials whose groups are selected randomly.
+  size_t num_registered_randomized_trials_ GUARDED_BY(lock_) = 0;
 
   // List of observers to be notified when a group is selected for a FieldTrial.
-  scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
+  // Excludes low anonymity field trials.
+  std::vector<Observer*> observers_ GUARDED_BY(lock_);
 
-  // Single synchronous observer to be notified when a trial group is chosen.
-  Observer* synchronous_observer_ = nullptr;
+  // List of observers to be notified when a group is selected for a FieldTrial.
+  // Includes low anonymity field trials.
+  std::vector<Observer*> observers_including_low_anonymity_ GUARDED_BY(lock_);
+
+  // Counts the ongoing calls to
+  // FieldTrialList::NotifyFieldTrialGroupSelection(). Used to ensure that
+  // RemoveObserver() isn't called while notifying observers.
+  std::atomic_int num_ongoing_notify_field_trial_group_selection_calls_{0};
 
   // Allocator in shared memory containing field trial data. Used in both
   // browser and child processes, but readonly in the child.
   // In the future, we may want to move this to a more generic place if we want
   // to start passing more data other than field trials.
-  std::unique_ptr<FieldTrialAllocator> field_trial_allocator_ = nullptr;
+  std::unique_ptr<FieldTrialAllocator> field_trial_allocator_;
 
-  // Readonly copy of the handle to the allocator. Needs to be a member variable
-  // because it's needed from both CopyFieldTrialStateToFlags() and
-  // AppendFieldTrialHandleIfNeeded().
-  base::SharedMemoryHandle readonly_allocator_handle_;
+  // Readonly copy of the region to the allocator. Needs to be a member variable
+  // because it's needed from multiple methods.
+  ReadOnlySharedMemoryRegion readonly_allocator_region_;
 
   // Tracks whether CreateTrialsFromCommandLine() has been called.
   bool create_trials_from_command_line_called_ = false;
-
-  DISALLOW_COPY_AND_ASSIGN(FieldTrialList);
 };
 
 }  // namespace base
diff --git a/base/metrics/field_trial_list_including_low_anonymity.cc b/base/metrics/field_trial_list_including_low_anonymity.cc
new file mode 100644
index 0000000..d672b86
--- /dev/null
+++ b/base/metrics/field_trial_list_including_low_anonymity.cc
@@ -0,0 +1,31 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial_list_including_low_anonymity.h"
+#include "base/metrics/field_trial.h"
+
+namespace base {
+
+// static
+void FieldTrialListIncludingLowAnonymity::GetActiveFieldTrialGroups(
+    FieldTrial::ActiveGroups* active_groups) {
+  return FieldTrialList::GetActiveFieldTrialGroupsInternal(
+      active_groups, /*include_low_anonymity=*/true);
+}
+
+// static
+bool FieldTrialListIncludingLowAnonymity::AddObserver(
+    FieldTrialList::Observer* observer) {
+  return FieldTrialList::AddObserverInternal(observer,
+                                             /*include_low_anonymity=*/true);
+}
+
+// static
+void FieldTrialListIncludingLowAnonymity::RemoveObserver(
+    FieldTrialList::Observer* observer) {
+  FieldTrialList::RemoveObserverInternal(observer,
+                                         /*include_low_anonymity=*/true);
+}
+
+}  // namespace base
diff --git a/base/metrics/field_trial_list_including_low_anonymity.h b/base/metrics/field_trial_list_including_low_anonymity.h
new file mode 100644
index 0000000..a70af44
--- /dev/null
+++ b/base/metrics/field_trial_list_including_low_anonymity.h
@@ -0,0 +1,98 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_FIELD_TRIAL_LIST_INCLUDING_LOW_ANONYMITY_H_
+#define BASE_METRICS_FIELD_TRIAL_LIST_INCLUDING_LOW_ANONYMITY_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/metrics/field_trial.h"
+#include "base/values.h"
+
+class AndroidFieldTrialListLogActiveTrialsFriendHelper;
+
+namespace content {
+class FieldTrialSynchronizer;
+}
+
+namespace variations {
+class ChildProcessFieldTrialSyncer;
+class EntropyProviders;
+class ProcessedStudy;
+struct SeedSimulationResult;
+class VariationsCrashKeys;
+class VariationsLayers;
+SeedSimulationResult ComputeDifferences(
+    const std::vector<ProcessedStudy>& processed_studies,
+    const VariationsLayers& layers,
+    const EntropyProviders& entropy_providers);
+}  // namespace variations
+
+namespace version_ui {
+base::Value::List GetVariationsList();
+}
+
+namespace base {
+
+// Provides a way to restrict access to the full set of field trials, including
+// trials with low anonymity, to explicitly allowed callers.
+//
+// See |FieldTrialList::FactoryGetFieldTrial()| for background.
+class BASE_EXPORT FieldTrialListIncludingLowAnonymity {
+ public:
+  // Exposed publicly, to avoid test code needing to be explicitly friended.
+  static void GetActiveFieldTrialGroupsForTesting(
+      FieldTrial::ActiveGroups* active_groups) {
+    return GetActiveFieldTrialGroups(active_groups);
+  }
+
+  // Classes / functions which are allowed full access to all field trials
+  // should be listed as friends here, with a comment explaining why this does
+  // not risk revealing identifiable information externally.
+
+  // This is used only for local logging on Android.
+  friend class ::AndroidFieldTrialListLogActiveTrialsFriendHelper;
+
+  // Used to synchronize field trial status between the browser and child
+  // processes.
+  // Access to these trials within each of these is then allowed only to the
+  // other friend classes / methods listed here.
+  friend class content::FieldTrialSynchronizer;
+  friend class variations::ChildProcessFieldTrialSyncer;
+
+  // This is only used to simulate seed changes, not sent to Google servers.
+  friend variations::SeedSimulationResult variations::ComputeDifferences(
+      const std::vector<variations::ProcessedStudy>& processed_studies,
+      const variations::VariationsLayers& layers,
+      const variations::EntropyProviders& entropy_providers);
+
+  // Include all active field trials in crash reports, so that crashes are
+  // reproducible: https://www.google.com/intl/en/chrome/privacy/.
+  friend class variations::VariationsCrashKeys;
+
+  // This usage is to display field trials in chrome://version and other local
+  // internal UIs.
+  friend base::Value::List version_ui::GetVariationsList();
+
+  // Required for tests.
+  friend class TestFieldTrialObserverIncludingLowAnonymity;
+  FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, ObserveIncludingLowAnonymity);
+
+ private:
+  // The same as |FieldTrialList::GetActiveFieldTrialGroups| but gives access to
+  // low anonymity field trials too.
+  static void GetActiveFieldTrialGroups(
+      FieldTrial::ActiveGroups* active_groups);
+
+  // Identical to |FieldTrialList::AddObserver| but also notifies of low
+  // anonymity trials.
+  static bool AddObserver(FieldTrialList::Observer* observer);
+
+  // Identical to |FieldTrialList::RemoveObserver| but for observers registered
+  // through the AddObserver() function of this class.
+  static void RemoveObserver(FieldTrialList::Observer* observer);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_FIELD_TRIAL_LIST_INCLUDING_LOW_ANONYMITY_H_
diff --git a/base/metrics/field_trial_param_associator.cc b/base/metrics/field_trial_param_associator.cc
index af76eaf..d2b2775 100644
--- a/base/metrics/field_trial_param_associator.cc
+++ b/base/metrics/field_trial_param_associator.cc
@@ -1,9 +1,11 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/field_trial_param_associator.h"
 
+#include "base/containers/contains.h"
+#include "base/logging.h"
 #include "base/metrics/field_trial.h"
 
 namespace base {
@@ -21,28 +23,30 @@
     const std::string& trial_name,
     const std::string& group_name,
     const FieldTrialParams& params) {
-  if (FieldTrialList::IsTrialActive(trial_name))
+  if (FieldTrialList::IsTrialActive(trial_name)) {
+    DLOG(ERROR) << "Field trial " << trial_name << " is already active.";
     return false;
+  }
 
   AutoLock scoped_lock(lock_);
   const FieldTrialKey key(trial_name, group_name);
-  if (ContainsKey(field_trial_params_, key))
+  if (Contains(field_trial_params_, key)) {
+    DLOG(ERROR) << "You can't override the existing params for field trial: "
+                << trial_name << "." << group_name;
     return false;
+  }
 
   field_trial_params_[key] = params;
   return true;
 }
 
-bool FieldTrialParamAssociator::GetFieldTrialParams(
-    const std::string& trial_name,
-    FieldTrialParams* params) {
-  FieldTrial* field_trial = FieldTrialList::Find(trial_name);
+bool FieldTrialParamAssociator::GetFieldTrialParams(FieldTrial* field_trial,
+                                                    FieldTrialParams* params) {
   if (!field_trial)
     return false;
-
   // First try the local map, falling back to getting it from shared memory.
-  if (GetFieldTrialParamsWithoutFallback(trial_name, field_trial->group_name(),
-                                         params)) {
+  if (GetFieldTrialParamsWithoutFallback(field_trial->trial_name(),
+                                         field_trial->group_name(), params)) {
     return true;
   }
 
@@ -56,11 +60,12 @@
     FieldTrialParams* params) {
   AutoLock scoped_lock(lock_);
 
-  const FieldTrialKey key(trial_name, group_name);
-  if (!ContainsKey(field_trial_params_, key))
+  const FieldTrialRefKey key(trial_name, group_name);
+  auto it = field_trial_params_.find(key);
+  if (it == field_trial_params_.end())
     return false;
 
-  *params = field_trial_params_[key];
+  *params = it->second;
   return true;
 }
 
@@ -76,7 +81,7 @@
     const std::string& trial_name,
     const std::string& group_name) {
   AutoLock scoped_lock(lock_);
-  const FieldTrialKey key(trial_name, group_name);
+  const FieldTrialRefKey key(trial_name, group_name);
   field_trial_params_.erase(key);
 }
 
diff --git a/base/metrics/field_trial_param_associator.h b/base/metrics/field_trial_param_associator.h
index b35e2cc..7915ea4 100644
--- a/base/metrics/field_trial_param_associator.h
+++ b/base/metrics/field_trial_param_associator.h
@@ -1,4 +1,4 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -12,6 +12,7 @@
 #include "base/base_export.h"
 #include "base/memory/singleton.h"
 #include "base/metrics/field_trial.h"
+#include "base/metrics/field_trial_params.h"
 #include "base/synchronization/lock.h"
 
 namespace base {
@@ -21,10 +22,12 @@
 class BASE_EXPORT FieldTrialParamAssociator {
  public:
   FieldTrialParamAssociator();
-  ~FieldTrialParamAssociator();
 
-  // Key-value mapping type for field trial parameters.
-  typedef std::map<std::string, std::string> FieldTrialParams;
+  FieldTrialParamAssociator(const FieldTrialParamAssociator&) = delete;
+  FieldTrialParamAssociator& operator=(const FieldTrialParamAssociator&) =
+      delete;
+
+  ~FieldTrialParamAssociator();
 
   // Retrieve the singleton.
   static FieldTrialParamAssociator* GetInstance();
@@ -35,9 +38,9 @@
                                  const FieldTrialParams& params);
 
   // Gets the parameters for a field trial and its chosen group. If not found in
-  // field_trial_params_, then tries to looks it up in shared memory.
-  bool GetFieldTrialParams(const std::string& trial_name,
-                           FieldTrialParams* params);
+  // field_trial_params_, then tries to looks it up in shared memory. Returns
+  // false if no params are available or the passed |field_trial| is null.
+  bool GetFieldTrialParams(FieldTrial* field_trial, FieldTrialParams* params);
 
   // Gets the parameters for a field trial and its chosen group. Does not
   // fallback to looking it up in shared memory. This should only be used if you
@@ -64,11 +67,11 @@
 
   // (field_trial_name, field_trial_group)
   typedef std::pair<std::string, std::string> FieldTrialKey;
+  // The following type can be used for lookups without needing to copy strings.
+  typedef std::pair<const std::string&, const std::string&> FieldTrialRefKey;
 
   Lock lock_;
   std::map<FieldTrialKey, FieldTrialParams> field_trial_params_;
-
-  DISALLOW_COPY_AND_ASSIGN(FieldTrialParamAssociator);
 };
 
 }  // namespace base
diff --git a/base/metrics/field_trial_params.cc b/base/metrics/field_trial_params.cc
index f01db0f..df0fe5d 100644
--- a/base/metrics/field_trial_params.cc
+++ b/base/metrics/field_trial_params.cc
@@ -1,45 +1,136 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/field_trial_params.h"
 
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "base/debug/crash_logging.h"
+#include "base/debug/dump_without_crashing.h"
 #include "base/feature_list.h"
 #include "base/metrics/field_trial.h"
 #include "base/metrics/field_trial_param_associator.h"
+#include "base/notreached.h"
+#include "base/strings/escape.h"
 #include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/stringprintf.h"
+#include "base/time/time_delta_from_string.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
 
 namespace base {
 
-bool AssociateFieldTrialParams(
-    const std::string& trial_name,
-    const std::string& group_name,
-    const std::map<std::string, std::string>& params) {
-  return base::FieldTrialParamAssociator::GetInstance()
-      ->AssociateFieldTrialParams(trial_name, group_name, params);
+void LogInvalidValue(const Feature& feature,
+                     const char* type,
+                     const std::string& param_name,
+                     const std::string& value_as_string,
+                     const std::string& default_value_as_string) {
+  // To anyone noticing these crash dumps in the wild, these parameters come
+  // from server-side experiment confiuration. If you're seeing an increase it
+  // is likely due to a bad experiment rollout rather than changes in the client
+  // code.
+  SCOPED_CRASH_KEY_STRING32("FieldTrialParams", "feature_name", feature.name);
+  SCOPED_CRASH_KEY_STRING32("FieldTrialParams", "param_name", param_name);
+  SCOPED_CRASH_KEY_STRING32("FieldTrialParams", "value", value_as_string);
+  SCOPED_CRASH_KEY_STRING32("FieldTrialParams", "default",
+                            default_value_as_string);
+  LOG(ERROR) << "Failed to parse field trial param " << param_name
+             << " with string value " << value_as_string << " under feature "
+             << feature.name << " into " << type
+             << ". Falling back to default value of "
+             << default_value_as_string;
+  base::debug::DumpWithoutCrashing();
+}
+
+std::string UnescapeValue(const std::string& value) {
+  return UnescapeURLComponent(
+      value, UnescapeRule::PATH_SEPARATORS |
+                 UnescapeRule::URL_SPECIAL_CHARS_EXCEPT_PATH_SEPARATORS);
+}
+
+bool AssociateFieldTrialParams(const std::string& trial_name,
+                               const std::string& group_name,
+                               const FieldTrialParams& params) {
+  return FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+      trial_name, group_name, params);
+}
+
+bool AssociateFieldTrialParamsFromString(
+    const std::string& params_string,
+    FieldTrialParamsDecodeStringFunc decode_data_func) {
+  // Format: Trial1.Group1:k1/v1/k2/v2,Trial2.Group2:k1/v1/k2/v2
+  std::set<std::pair<std::string, std::string>> trial_groups;
+  for (StringPiece experiment_group :
+       SplitStringPiece(params_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL)) {
+    std::vector<StringPiece> experiment = SplitStringPiece(
+        experiment_group, ":", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+    if (experiment.size() != 2) {
+      DLOG(ERROR) << "Experiment and params should be separated by ':'";
+      return false;
+    }
+
+    std::vector<std::string> group_parts =
+        SplitString(experiment[0], ".", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+    if (group_parts.size() != 2) {
+      DLOG(ERROR) << "Trial and group name should be separated by '.'";
+      return false;
+    }
+
+    std::vector<std::string> key_values =
+        SplitString(experiment[1], "/", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+    if (key_values.size() % 2 != 0) {
+      DLOG(ERROR) << "Param name and param value should be separated by '/'";
+      return false;
+    }
+    std::string trial = decode_data_func(group_parts[0]);
+    std::string group = decode_data_func(group_parts[1]);
+    auto trial_group = std::make_pair(trial, group);
+    if (trial_groups.find(trial_group) != trial_groups.end()) {
+      DLOG(ERROR) << StringPrintf(
+          "A (trial, group) pair listed more than once. (%s, %s)",
+          trial.c_str(), group.c_str());
+      return false;
+    }
+    trial_groups.insert(trial_group);
+    std::map<std::string, std::string> params;
+    for (size_t i = 0; i < key_values.size(); i += 2) {
+      std::string key = decode_data_func(key_values[i]);
+      std::string value = decode_data_func(key_values[i + 1]);
+      params[key] = value;
+    }
+    bool result = AssociateFieldTrialParams(trial, group, params);
+    if (!result) {
+      DLOG(ERROR) << "Failed to associate field trial params for group \""
+                  << group << "\" in trial \"" << trial << "\"";
+      return false;
+    }
+  }
+  return true;
 }
 
 bool GetFieldTrialParams(const std::string& trial_name,
-                         std::map<std::string, std::string>* params) {
-  return base::FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(
-      trial_name, params);
+                         FieldTrialParams* params) {
+  FieldTrial* trial = FieldTrialList::Find(trial_name);
+  return FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial,
+                                                                       params);
 }
 
-bool GetFieldTrialParamsByFeature(const base::Feature& feature,
-                                  std::map<std::string, std::string>* params) {
-  if (!base::FeatureList::IsEnabled(feature))
+bool GetFieldTrialParamsByFeature(const Feature& feature,
+                                  FieldTrialParams* params) {
+  if (!FeatureList::IsEnabled(feature))
     return false;
 
-  base::FieldTrial* trial = base::FeatureList::GetFieldTrial(feature);
-  if (!trial)
-    return false;
-
-  return GetFieldTrialParams(trial->trial_name(), params);
+  FieldTrial* trial = FeatureList::GetFieldTrial(feature);
+  return FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial,
+                                                                       params);
 }
 
 std::string GetFieldTrialParamValue(const std::string& trial_name,
                                     const std::string& param_name) {
-  std::map<std::string, std::string> params;
+  FieldTrialParams params;
   if (GetFieldTrialParams(trial_name, &params)) {
     auto it = params.find(param_name);
     if (it != params.end())
@@ -48,57 +139,50 @@
   return std::string();
 }
 
-std::string GetFieldTrialParamValueByFeature(const base::Feature& feature,
+std::string GetFieldTrialParamValueByFeature(const Feature& feature,
                                              const std::string& param_name) {
-  if (!base::FeatureList::IsEnabled(feature))
-    return std::string();
-
-  base::FieldTrial* trial = base::FeatureList::GetFieldTrial(feature);
-  if (!trial)
-    return std::string();
-
-  return GetFieldTrialParamValue(trial->trial_name(), param_name);
+  FieldTrialParams params;
+  if (GetFieldTrialParamsByFeature(feature, &params)) {
+    auto it = params.find(param_name);
+    if (it != params.end())
+      return it->second;
+  }
+  return std::string();
 }
 
-int GetFieldTrialParamByFeatureAsInt(const base::Feature& feature,
+int GetFieldTrialParamByFeatureAsInt(const Feature& feature,
                                      const std::string& param_name,
                                      int default_value) {
   std::string value_as_string =
       GetFieldTrialParamValueByFeature(feature, param_name);
   int value_as_int = 0;
-  if (!base::StringToInt(value_as_string, &value_as_int)) {
+  if (!StringToInt(value_as_string, &value_as_int)) {
     if (!value_as_string.empty()) {
-      DLOG(WARNING) << "Failed to parse field trial param " << param_name
-                    << " with string value " << value_as_string
-                    << " under feature " << feature.name
-                    << " into an int. Falling back to default value of "
-                    << default_value;
+      LogInvalidValue(feature, "an int", param_name, value_as_string,
+                      base::NumberToString(default_value));
     }
     value_as_int = default_value;
   }
   return value_as_int;
 }
 
-double GetFieldTrialParamByFeatureAsDouble(const base::Feature& feature,
+double GetFieldTrialParamByFeatureAsDouble(const Feature& feature,
                                            const std::string& param_name,
                                            double default_value) {
   std::string value_as_string =
       GetFieldTrialParamValueByFeature(feature, param_name);
   double value_as_double = 0;
-  if (!base::StringToDouble(value_as_string, &value_as_double)) {
+  if (!StringToDouble(value_as_string, &value_as_double)) {
     if (!value_as_string.empty()) {
-      DLOG(WARNING) << "Failed to parse field trial param " << param_name
-                    << " with string value " << value_as_string
-                    << " under feature " << feature.name
-                    << " into a double. Falling back to default value of "
-                    << default_value;
+      LogInvalidValue(feature, "a double", param_name, value_as_string,
+                      base::NumberToString(default_value));
     }
     value_as_double = default_value;
   }
   return value_as_double;
 }
 
-bool GetFieldTrialParamByFeatureAsBool(const base::Feature& feature,
+bool GetFieldTrialParamByFeatureAsBool(const Feature& feature,
                                        const std::string& param_name,
                                        bool default_value) {
   std::string value_as_string =
@@ -109,15 +193,32 @@
     return false;
 
   if (!value_as_string.empty()) {
-    DLOG(WARNING) << "Failed to parse field trial param " << param_name
-                  << " with string value " << value_as_string
-                  << " under feature " << feature.name
-                  << " into a bool. Falling back to default value of "
-                  << default_value;
+    LogInvalidValue(feature, "a bool", param_name, value_as_string,
+                    default_value ? "true" : "false");
   }
   return default_value;
 }
 
+base::TimeDelta GetFieldTrialParamByFeatureAsTimeDelta(
+    const Feature& feature,
+    const std::string& param_name,
+    base::TimeDelta default_value) {
+  std::string value_as_string =
+      GetFieldTrialParamValueByFeature(feature, param_name);
+
+  if (value_as_string.empty())
+    return default_value;
+
+  absl::optional<base::TimeDelta> ret = TimeDeltaFromString(value_as_string);
+  if (!ret.has_value()) {
+    LogInvalidValue(feature, "a base::TimeDelta", param_name, value_as_string,
+                    base::NumberToString(default_value.InSecondsF()) + " s");
+    return default_value;
+  }
+
+  return ret.value();
+}
+
 std::string FeatureParam<std::string>::Get() const {
   const std::string value = GetFieldTrialParamValueByFeature(*feature, name);
   return value.empty() ? default_value : value;
@@ -135,15 +236,16 @@
   return GetFieldTrialParamByFeatureAsBool(*feature, name, default_value);
 }
 
-void LogInvalidEnumValue(const base::Feature& feature,
+base::TimeDelta FeatureParam<base::TimeDelta>::Get() const {
+  return GetFieldTrialParamByFeatureAsTimeDelta(*feature, name, default_value);
+}
+
+void LogInvalidEnumValue(const Feature& feature,
                          const std::string& param_name,
                          const std::string& value_as_string,
                          int default_value_as_int) {
-  DLOG(WARNING) << "Failed to parse field trial param " << param_name
-                << " with string value " << value_as_string << " under feature "
-                << feature.name
-                << " into an enum. Falling back to default value of "
-                << default_value_as_int;
+  LogInvalidValue(feature, "an enum", param_name, value_as_string,
+                  base::NumberToString(default_value_as_int));
 }
 
 }  // namespace base
diff --git a/base/metrics/field_trial_params.h b/base/metrics/field_trial_params.h
index 8682226..39cc5f0 100644
--- a/base/metrics/field_trial_params.h
+++ b/base/metrics/field_trial_params.h
@@ -1,4 +1,4 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -9,19 +9,39 @@
 #include <string>
 
 #include "base/base_export.h"
+#include "base/feature_list.h"
+#include "base/logging.h"
+#include "base/memory/raw_ptr_exclusion.h"
+#include "base/notreached.h"
+#include "base/time/time.h"
 
 namespace base {
 
-struct Feature;
+// Key-value mapping type for field trial parameters.
+typedef std::map<std::string, std::string> FieldTrialParams;
+
+// Param string decoding function for AssociateFieldTrialParamsFromString().
+typedef std::string (*FieldTrialParamsDecodeStringFunc)(const std::string& str);
+
+// Unescapes special characters from the given string. Used in
+// AssociateFieldTrialParamsFromString() as one of the feature params decoding
+// functions.
+BASE_EXPORT std::string UnescapeValue(const std::string& value);
 
 // Associates the specified set of key-value |params| with the field trial
 // specified by |trial_name| and |group_name|. Fails and returns false if the
 // specified field trial already has params associated with it or the trial
 // is already active (group() has been called on it). Thread safe.
-BASE_EXPORT bool AssociateFieldTrialParams(
-    const std::string& trial_name,
-    const std::string& group_name,
-    const std::map<std::string, std::string>& params);
+BASE_EXPORT bool AssociateFieldTrialParams(const std::string& trial_name,
+                                           const std::string& group_name,
+                                           const FieldTrialParams& params);
+
+// Provides a mechanism to associate multiple set of params to multiple groups
+// with a formatted string as returned by FieldTrialList::AllParamsToString().
+// |decode_data_func| allows specifying a custom decoding function.
+BASE_EXPORT bool AssociateFieldTrialParamsFromString(
+    const std::string& params_string,
+    FieldTrialParamsDecodeStringFunc decode_data_func);
 
 // Retrieves the set of key-value |params| for the specified field trial, based
 // on its selected group. If the field trial does not exist or its selected
@@ -29,9 +49,8 @@
 // does not modify |params|. Calling this function will result in the field
 // trial being marked as active if found (i.e. group() will be called on it),
 // if it wasn't already. Thread safe.
-BASE_EXPORT bool GetFieldTrialParams(
-    const std::string& trial_name,
-    std::map<std::string, std::string>* params);
+BASE_EXPORT bool GetFieldTrialParams(const std::string& trial_name,
+                                     FieldTrialParams* params);
 
 // Retrieves the set of key-value |params| for the field trial associated with
 // the specified |feature|. A feature is associated with at most one field
@@ -40,9 +59,8 @@
 // returns false and does not modify |params|. Calling this function will
 // result in the associated field trial being marked as active if found (i.e.
 // group() will be called on it), if it wasn't already. Thread safe.
-BASE_EXPORT bool GetFieldTrialParamsByFeature(
-    const base::Feature& feature,
-    std::map<std::string, std::string>* params);
+BASE_EXPORT bool GetFieldTrialParamsByFeature(const base::Feature& feature,
+                                              FieldTrialParams* params);
 
 // Retrieves a specific parameter value corresponding to |param_name| for the
 // specified field trial, based on its selected group. If the field trial does
@@ -99,9 +117,13 @@
 //   double
 //   std::string
 //   enum types
+//   base::TimeDelta
 //
 // See the individual definitions below for the appropriate interfaces.
 // Attempting to use it with any other type is a compile error.
+//
+// Getting a param value from a FeatureParam<T> will have the same semantics as
+// GetFieldTrialParamValueByFeature(), see that function's comments for details.
 template <typename T, bool IsEnum = std::is_enum<T>::value>
 struct FeatureParam {
   // Prevent use of FeatureParam<> with unsupported types (e.g. void*). Uses T
@@ -115,8 +137,8 @@
 //     constexpr FeatureParam<string> kAssistantName{
 //         &kAssistantFeature, "assistant_name", "HAL"};
 //
-// If the feature is not set, or set to the empty string, then Get() will return
-// the default value.
+// If the parameter is not set, or set to the empty string, then Get() will
+// return the default value.
 template <>
 struct FeatureParam<std::string> {
   constexpr FeatureParam(const Feature* feature,
@@ -124,9 +146,13 @@
                          const char* default_value)
       : feature(feature), name(name), default_value(default_value) {}
 
+  // Calling Get() will activate the field trial associated with |feature|. See
+  // GetFieldTrialParamValueByFeature() for more details.
   BASE_EXPORT std::string Get() const;
 
-  const Feature* const feature;
+  // This field is not a raw_ptr<> because it was filtered by the rewriter for:
+  // #global-scope, #constexpr-ctor-field-initializer
+  RAW_PTR_EXCLUSION const Feature* const feature;
   const char* const name;
   const char* const default_value;
 };
@@ -136,8 +162,8 @@
 //     constexpr FeatureParam<double> kAssistantTriggerThreshold{
 //         &kAssistantFeature, "trigger_threshold", 0.10};
 //
-// If the feature is not set, or set to an invalid double value, then Get() will
-// return the default value.
+// If the parameter is not set, or set to an invalid double value, then Get()
+// will return the default value.
 template <>
 struct FeatureParam<double> {
   constexpr FeatureParam(const Feature* feature,
@@ -145,9 +171,13 @@
                          double default_value)
       : feature(feature), name(name), default_value(default_value) {}
 
+  // Calling Get() will activate the field trial associated with |feature|. See
+  // GetFieldTrialParamValueByFeature() for more details.
   BASE_EXPORT double Get() const;
 
-  const Feature* const feature;
+  // This field is not a raw_ptr<> because it was filtered by the rewriter for:
+  // #global-scope, #constexpr-ctor-field-initializer
+  RAW_PTR_EXCLUSION const Feature* const feature;
   const char* const name;
   const double default_value;
 };
@@ -157,7 +187,7 @@
 //     constexpr FeatureParam<int> kAssistantParallelism{
 //         &kAssistantFeature, "parallelism", 4};
 //
-// If the feature is not set, or set to an invalid int value, then Get() will
+// If the parameter is not set, or set to an invalid int value, then Get() will
 // return the default value.
 template <>
 struct FeatureParam<int> {
@@ -166,9 +196,13 @@
                          int default_value)
       : feature(feature), name(name), default_value(default_value) {}
 
+  // Calling Get() will activate the field trial associated with |feature|. See
+  // GetFieldTrialParamValueByFeature() for more details.
   BASE_EXPORT int Get() const;
 
-  const Feature* const feature;
+  // This field is not a raw_ptr<> because it was filtered by the rewriter for:
+  // #global-scope, #constexpr-ctor-field-initializer
+  RAW_PTR_EXCLUSION const Feature* const feature;
   const char* const name;
   const int default_value;
 };
@@ -178,8 +212,8 @@
 //     constexpr FeatureParam<int> kAssistantIsHelpful{
 //         &kAssistantFeature, "is_helpful", true};
 //
-// If the feature is not set, or set to value other than "true" or "false", then
-// Get() will return the default value.
+// If the parameter is not set, or set to value other than "true" or "false",
+// then Get() will return the default value.
 template <>
 struct FeatureParam<bool> {
   constexpr FeatureParam(const Feature* feature,
@@ -187,13 +221,42 @@
                          bool default_value)
       : feature(feature), name(name), default_value(default_value) {}
 
+  // Calling Get() will activate the field trial associated with |feature|. See
+  // GetFieldTrialParamValueByFeature() for more details.
   BASE_EXPORT bool Get() const;
 
-  const Feature* const feature;
+  // This field is not a raw_ptr<> because it was filtered by the rewriter for:
+  // #global-scope, #constexpr-ctor-field-initializer
+  RAW_PTR_EXCLUSION const Feature* const feature;
   const char* const name;
   const bool default_value;
 };
 
+// Declares an TimeDelta-valued parameter. Example:
+//
+//     constexpr base::FeatureParam<base::TimeDelta> kPerAgentDelay{
+//         &kPerAgentSchedulingExperiments, "delay", base::TimeDelta()};
+//
+// If the parameter is not set, or set to an invalid value (as defined by
+// base::TimeDeltaFromString()), then Get() will return the default value.
+template <>
+struct FeatureParam<base::TimeDelta> {
+  constexpr FeatureParam(const Feature* feature,
+                         const char* name,
+                         base::TimeDelta default_value)
+      : feature(feature), name(name), default_value(default_value) {}
+
+  // Calling Get() will activate the field trial associated with |feature|. See
+  // GetFieldTrialParamValueByFeature() for more details.
+  BASE_EXPORT base::TimeDelta Get() const;
+
+  // This field is not a raw_ptr<> because it was filtered by the rewriter for:
+  // #global-scope, #constexpr-ctor-field-initializer
+  RAW_PTR_EXCLUSION const Feature* const feature;
+  const char* const name;
+  const base::TimeDelta default_value;
+};
+
 BASE_EXPORT void LogInvalidEnumValue(const Feature& feature,
                                      const std::string& param_name,
                                      const std::string& value_as_string,
@@ -201,7 +264,7 @@
 
 // Feature param declaration for an enum, with associated options. Example:
 //
-//     constexpr FeatureParam<ShapeEnum>::Option[] kShapeParamOptions[] = {
+//     constexpr FeatureParam<ShapeEnum>::Option kShapeParamOptions[] = {
 //         {SHAPE_CIRCLE, "circle"},
 //         {SHAPE_CYLINDER, "cylinder"},
 //         {SHAPE_PAPERCLIP, "paperclip"}};
@@ -234,6 +297,8 @@
     static_assert(option_count >= 1, "FeatureParam<enum> has no options");
   }
 
+  // Calling Get() will activate the field trial associated with |feature|. See
+  // GetFieldTrialParamValueByFeature() for more details.
   Enum Get() const {
     std::string value = GetFieldTrialParamValueByFeature(*feature, name);
     if (value.empty())
@@ -246,10 +311,24 @@
     return default_value;
   }
 
-  const base::Feature* const feature;
+  // Returns the param-string for the given enum value.
+  std::string GetName(Enum value) const {
+    for (size_t i = 0; i < option_count; ++i) {
+      if (value == options[i].value)
+        return options[i].name;
+    }
+    NOTREACHED();
+    return "";
+  }
+
+  // This field is not a raw_ptr<> because it was filtered by the rewriter for:
+  // #global-scope, #constexpr-ctor-field-initializer
+  RAW_PTR_EXCLUSION const base::Feature* const feature;
   const char* const name;
   const Enum default_value;
-  const Option* const options;
+  // This field is not a raw_ptr<> because it was filtered by the rewriter for:
+  // #global-scope, #constexpr-ctor-field-initializer
+  RAW_PTR_EXCLUSION const Option* const options;
   const size_t option_count;
 };
 
diff --git a/base/metrics/field_trial_params_unittest.cc b/base/metrics/field_trial_params_unittest.cc
index d310c0d..6f770a2 100644
--- a/base/metrics/field_trial_params_unittest.cc
+++ b/base/metrics/field_trial_params_unittest.cc
@@ -1,37 +1,40 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/field_trial_params.h"
 
 #include "base/feature_list.h"
-#include "base/macros.h"
 #include "base/metrics/field_trial.h"
 #include "base/metrics/field_trial_param_associator.h"
+#include "base/test/gtest_util.h"
+#include "base/test/mock_entropy_provider.h"
 #include "base/test/scoped_feature_list.h"
+#include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 
 namespace {
 
-// Call FieldTrialList::FactoryGetFieldTrial() with a future expiry date.
+// Call FieldTrialList::FactoryGetFieldTrial().
 scoped_refptr<FieldTrial> CreateFieldTrial(
     const std::string& trial_name,
     int total_probability,
-    const std::string& default_group_name,
-    int* default_group_number) {
+    const std::string& default_group_name) {
+  MockEntropyProvider entropy_provider(0.9);
   return FieldTrialList::FactoryGetFieldTrial(
-      trial_name, total_probability, default_group_name,
-      FieldTrialList::kNoExpirationYear, 1, 1, FieldTrial::SESSION_RANDOMIZED,
-      default_group_number);
+      trial_name, total_probability, default_group_name, entropy_provider);
 }
 
 }  // namespace
 
 class FieldTrialParamsTest : public ::testing::Test {
  public:
-  FieldTrialParamsTest() : field_trial_list_(nullptr) {}
+  FieldTrialParamsTest() = default;
+
+  FieldTrialParamsTest(const FieldTrialParamsTest&) = delete;
+  FieldTrialParamsTest& operator=(const FieldTrialParamsTest&) = delete;
 
   ~FieldTrialParamsTest() override {
     // Ensure that the maps are cleared between tests, since they are stored as
@@ -49,10 +52,7 @@
   }
 
  private:
-  FieldTrialList field_trial_list_;
   test::ScopedFeatureList scoped_feature_list_;
-
-  DISALLOW_COPY_AND_ASSIGN(FieldTrialParamsTest);
 };
 
 TEST_F(FieldTrialParamsTest, AssociateFieldTrialParams) {
@@ -113,8 +113,7 @@
       "AssociateFieldTrialParams_DoesntActivateTrial";
 
   ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
   ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
 
   std::map<std::string, std::string> params;
@@ -147,8 +146,7 @@
   const std::string kTrialName = "GetFieldTrialParams_ActivatesTrial";
 
   ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
   ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
 
   std::map<std::string, std::string> params;
@@ -160,8 +158,7 @@
   const std::string kTrialName = "GetFieldTrialParamValue_ActivatesTrial";
 
   ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
   ASSERT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
 
   std::map<std::string, std::string> params;
@@ -171,13 +168,12 @@
 
 TEST_F(FieldTrialParamsTest, GetFieldTrialParamsByFeature) {
   const std::string kTrialName = "GetFieldTrialParamsByFeature";
-  const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
 
   std::map<std::string, std::string> params;
   params["x"] = "1";
   AssociateFieldTrialParams(kTrialName, "A", params);
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
 
   CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
                          trial.get());
@@ -189,13 +185,12 @@
 
 TEST_F(FieldTrialParamsTest, GetFieldTrialParamValueByFeature) {
   const std::string kTrialName = "GetFieldTrialParamsByFeature";
-  const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
 
   std::map<std::string, std::string> params;
   params["x"] = "1";
   AssociateFieldTrialParams(kTrialName, "A", params);
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
 
   CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
                          trial.get());
@@ -206,13 +201,12 @@
 
 TEST_F(FieldTrialParamsTest, GetFieldTrialParamsByFeature_Disable) {
   const std::string kTrialName = "GetFieldTrialParamsByFeature";
-  const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
 
   std::map<std::string, std::string> params;
   params["x"] = "1";
   AssociateFieldTrialParams(kTrialName, "A", params);
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
 
   CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_DISABLE_FEATURE,
                          trial.get());
@@ -223,13 +217,12 @@
 
 TEST_F(FieldTrialParamsTest, GetFieldTrialParamValueByFeature_Disable) {
   const std::string kTrialName = "GetFieldTrialParamsByFeature";
-  const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
 
   std::map<std::string, std::string> params;
   params["x"] = "1";
   AssociateFieldTrialParams(kTrialName, "A", params);
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
 
   CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_DISABLE_FEATURE,
                          trial.get());
@@ -241,7 +234,7 @@
 TEST_F(FieldTrialParamsTest, FeatureParamString) {
   const std::string kTrialName = "GetFieldTrialParamsByFeature";
 
-  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
   static const FeatureParam<std::string> a{&kFeature, "a", "default"};
   static const FeatureParam<std::string> b{&kFeature, "b", ""};
   static const FeatureParam<std::string> c{&kFeature, "c", "default"};
@@ -257,8 +250,7 @@
   // "e" is not registered
   // "f" is not registered
   AssociateFieldTrialParams(kTrialName, "A", params);
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
 
   CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
                          trial.get());
@@ -274,7 +266,7 @@
 TEST_F(FieldTrialParamsTest, FeatureParamInt) {
   const std::string kTrialName = "GetFieldTrialParamsByFeature";
 
-  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
   static const FeatureParam<int> a{&kFeature, "a", 0};
   static const FeatureParam<int> b{&kFeature, "b", 0};
   static const FeatureParam<int> c{&kFeature, "c", 0};
@@ -288,8 +280,7 @@
   params["d"] = "";
   // "e" is not registered
   AssociateFieldTrialParams(kTrialName, "A", params);
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
 
   CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
                          trial.get());
@@ -310,7 +301,7 @@
 TEST_F(FieldTrialParamsTest, FeatureParamDouble) {
   const std::string kTrialName = "GetFieldTrialParamsByFeature";
 
-  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
   static const FeatureParam<double> a{&kFeature, "a", 0.0};
   static const FeatureParam<double> b{&kFeature, "b", 0.0};
   static const FeatureParam<double> c{&kFeature, "c", 0.0};
@@ -326,8 +317,7 @@
   params["e"] = "";
   // "f" is not registered
   AssociateFieldTrialParams(kTrialName, "A", params);
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
 
   CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
                          trial.get());
@@ -351,7 +341,7 @@
 TEST_F(FieldTrialParamsTest, FeatureParamBool) {
   const std::string kTrialName = "GetFieldTrialParamsByFeature";
 
-  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
   static const FeatureParam<bool> a{&kFeature, "a", false};
   static const FeatureParam<bool> b{&kFeature, "b", true};
   static const FeatureParam<bool> c{&kFeature, "c", false};
@@ -367,18 +357,55 @@
   params["e"] = "";
   // "f" is not registered
   AssociateFieldTrialParams(kTrialName, "A", params);
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
 
   CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
                          trial.get());
 
   EXPECT_TRUE(a.Get());
   EXPECT_FALSE(b.Get());
-  EXPECT_FALSE(c.Get());  // invalid
-  EXPECT_TRUE(d.Get());   // invalid
-  EXPECT_TRUE(e.Get());   // empty
-  EXPECT_TRUE(f.Get());   // empty
+  EXPECT_EQ(false, c.Get());  // invalid
+  EXPECT_EQ(true, d.Get());   // invalid
+  EXPECT_TRUE(e.Get());       // empty
+  EXPECT_TRUE(f.Get());       // empty
+}
+
+TEST_F(FieldTrialParamsTest, FeatureParamTimeDelta) {
+  const std::string kTrialName = "GetFieldTrialParamsByFeature";
+
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
+  static const FeatureParam<base::TimeDelta> a{&kFeature, "a",
+                                               base::TimeDelta()};
+  static const FeatureParam<base::TimeDelta> b{&kFeature, "b",
+                                               base::TimeDelta()};
+  static const FeatureParam<base::TimeDelta> c{&kFeature, "c",
+                                               base::TimeDelta()};
+  static const FeatureParam<base::TimeDelta> d{&kFeature, "d",
+                                               base::TimeDelta()};
+  static const FeatureParam<base::TimeDelta> e{&kFeature, "e",
+                                               base::TimeDelta()};
+  static const FeatureParam<base::TimeDelta> f{&kFeature, "f",
+                                               base::TimeDelta()};
+
+  std::map<std::string, std::string> params;
+  params["a"] = "1.5s";
+  params["b"] = "1h2m";
+  params["c"] = "1";
+  params["d"] = "true";
+  params["e"] = "";
+  // "f" is not registered
+  AssociateFieldTrialParams(kTrialName, "A", params);
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
+
+  CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
+                         trial.get());
+
+  EXPECT_EQ(a.Get(), base::Seconds(1.5));
+  EXPECT_EQ(b.Get(), base::Minutes(62));
+  EXPECT_EQ(c.Get(), base::TimeDelta());  // invalid
+  EXPECT_EQ(d.Get(), base::TimeDelta());  // invalid
+  EXPECT_EQ(e.Get(), base::TimeDelta());  // empty
+  EXPECT_EQ(f.Get(), base::TimeDelta());  // empty
 }
 
 enum Hand { ROCK, PAPER, SCISSORS };
@@ -388,7 +415,7 @@
 
   static const FeatureParam<Hand>::Option hands[] = {
       {ROCK, "rock"}, {PAPER, "paper"}, {SCISSORS, "scissors"}};
-  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
   static const FeatureParam<Hand> a{&kFeature, "a", ROCK, &hands};
   static const FeatureParam<Hand> b{&kFeature, "b", ROCK, &hands};
   static const FeatureParam<Hand> c{&kFeature, "c", ROCK, &hands};
@@ -404,8 +431,7 @@
   params["e"] = "";
   // "f" is not registered
   AssociateFieldTrialParams(kTrialName, "A", params);
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
 
   CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
                          trial.get());
@@ -414,7 +440,7 @@
   EXPECT_EQ(PAPER, b.Get());
   EXPECT_EQ(SCISSORS, c.Get());
   EXPECT_EQ(ROCK, d.Get());      // invalid
-  EXPECT_EQ(PAPER, e.Get());     // invalid/empty
+  EXPECT_EQ(PAPER, e.Get());     // empty
   EXPECT_EQ(SCISSORS, f.Get());  // not registered
 }
 
@@ -425,7 +451,7 @@
 
   static const FeatureParam<UI>::Option uis[] = {
       {UI::ONE_D, "1d"}, {UI::TWO_D, "2d"}, {UI::THREE_D, "3d"}};
-  static const Feature kFeature{"TestFeature", FEATURE_DISABLED_BY_DEFAULT};
+  static BASE_FEATURE(kFeature, "TestFeature", FEATURE_DISABLED_BY_DEFAULT);
   static const FeatureParam<UI> a{&kFeature, "a", UI::ONE_D, &uis};
   static const FeatureParam<UI> b{&kFeature, "b", UI::ONE_D, &uis};
   static const FeatureParam<UI> c{&kFeature, "c", UI::ONE_D, &uis};
@@ -441,8 +467,7 @@
   params["e"] = "";
   // "f" is not registered
   AssociateFieldTrialParams(kTrialName, "A", params);
-  scoped_refptr<FieldTrial> trial(
-      CreateFieldTrial(kTrialName, 100, "A", nullptr));
+  scoped_refptr<FieldTrial> trial(CreateFieldTrial(kTrialName, 100, "A"));
 
   CreateFeatureWithTrial(kFeature, FeatureList::OVERRIDE_ENABLE_FEATURE,
                          trial.get());
@@ -451,7 +476,7 @@
   EXPECT_EQ(UI::TWO_D, b.Get());
   EXPECT_EQ(UI::THREE_D, c.Get());
   EXPECT_EQ(UI::ONE_D, d.Get());    // invalid
-  EXPECT_EQ(UI::TWO_D, e.Get());    // invalid/empty
+  EXPECT_EQ(UI::TWO_D, e.Get());    // empty
   EXPECT_EQ(UI::THREE_D, f.Get());  // not registered
 }
 
diff --git a/base/metrics/field_trial_params_unittest.nc b/base/metrics/field_trial_params_unittest.nc
index 4c6005e..4c72865 100644
--- a/base/metrics/field_trial_params_unittest.nc
+++ b/base/metrics/field_trial_params_unittest.nc
@@ -1,4 +1,4 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -8,7 +8,8 @@
 #include "base/feature_list.h"
 #include "base/metrics/field_trial_params.h"
 
-constexpr base::Feature kFeature{"NoCompileFeature"};
+[[maybe_unused]] constexpr base::Feature kFeature{
+  "NoCompileFeature", base::FEATURE_DISABLED_BY_DEFAULT};
 
 enum Param { FOO, BAR };
 
@@ -38,10 +39,4 @@
 constexpr base::FeatureParam<Param> kParam{
   &kFeature, "Param", FOO, &kParamOptions};
 
-#else
-
-void suppress_unused_variable_warning() {
-    (void)kFeature;
-}
-
 #endif
diff --git a/base/metrics/field_trial_unittest.cc b/base/metrics/field_trial_unittest.cc
index 3b8a610..49c2d8b 100644
--- a/base/metrics/field_trial_unittest.cc
+++ b/base/metrics/field_trial_unittest.cc
@@ -1,15 +1,18 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/field_trial.h"
 
+#include <stddef.h>
+#include <utility>
+
 #include "base/base_switches.h"
 #include "base/build_time.h"
+#include "base/command_line.h"
 #include "base/feature_list.h"
-#include "base/macros.h"
 #include "base/memory/ptr_util.h"
-#include "base/message_loop/message_loop.h"
+#include "base/metrics/field_trial_list_including_low_anonymity.h"
 #include "base/metrics/field_trial_param_associator.h"
 #include "base/rand_util.h"
 #include "base/run_loop.h"
@@ -17,11 +20,27 @@
 #include "base/strings/stringprintf.h"
 #include "base/test/gtest_util.h"
 #include "base/test/mock_entropy_provider.h"
+#include "base/test/multiprocess_test.h"
 #include "base/test/scoped_feature_list.h"
+#include "base/test/task_environment.h"
 #include "base/test/test_shared_memory_util.h"
+#include "base/test/test_timeouts.h"
 #include "build/build_config.h"
-#include "starboard/types.h"
+#include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#if !BUILDFLAG(IS_IOS)
+#include "base/process/launch.h"
+#endif
+
+#if BUILDFLAG(IS_ANDROID)
+#include "base/posix/global_descriptors.h"
+#endif
+
+#if BUILDFLAG(IS_MAC)
+#include "base/mac/mach_port_rendezvous.h"
+#endif
 
 namespace base {
 
@@ -30,45 +49,92 @@
 // Default group name used by several tests.
 const char kDefaultGroupName[] = "DefaultGroup";
 
-// Call FieldTrialList::FactoryGetFieldTrial() with a future expiry date.
+// Call FieldTrialList::FactoryGetFieldTrial().
 scoped_refptr<FieldTrial> CreateFieldTrial(
     const std::string& trial_name,
     int total_probability,
     const std::string& default_group_name,
-    int* default_group_number) {
+    bool is_low_anonymity = false) {
+  MockEntropyProvider entropy_provider(0.9);
   return FieldTrialList::FactoryGetFieldTrial(
-      trial_name, total_probability, default_group_name,
-      FieldTrialList::kNoExpirationYear, 1, 1, FieldTrial::SESSION_RANDOMIZED,
-      default_group_number);
+      trial_name, total_probability, default_group_name, entropy_provider, 0,
+      is_low_anonymity);
 }
 
-int OneYearBeforeBuildTime() {
-  Time one_year_before_build_time = GetBuildTime() - TimeDelta::FromDays(365);
-  Time::Exploded exploded;
-  one_year_before_build_time.LocalExplode(&exploded);
-  return exploded.year;
-}
-
-// FieldTrialList::Observer implementation for testing.
+// A FieldTrialList::Observer implementation which stores the trial name and
+// group name received via OnFieldTrialGroupFinalized() for later inspection.
 class TestFieldTrialObserver : public FieldTrialList::Observer {
  public:
-  enum Type {
-    ASYNCHRONOUS,
-    SYNCHRONOUS,
-  };
+  TestFieldTrialObserver() { FieldTrialList::AddObserver(this); }
+  TestFieldTrialObserver(const TestFieldTrialObserver&) = delete;
+  TestFieldTrialObserver& operator=(const TestFieldTrialObserver&) = delete;
 
-  TestFieldTrialObserver(Type type) : type_(type) {
-    if (type == SYNCHRONOUS)
-      FieldTrialList::SetSynchronousObserver(this);
-    else
-      FieldTrialList::AddObserver(this);
+  ~TestFieldTrialObserver() override { FieldTrialList::RemoveObserver(this); }
+
+  void OnFieldTrialGroupFinalized(const std::string& trial,
+                                  const std::string& group) override {
+    trial_name_ = trial;
+    group_name_ = group;
   }
 
-  ~TestFieldTrialObserver() override {
-    if (type_ == SYNCHRONOUS)
-      FieldTrialList::RemoveSynchronousObserver(this);
-    else
-      FieldTrialList::RemoveObserver(this);
+  const std::string& trial_name() const { return trial_name_; }
+  const std::string& group_name() const { return group_name_; }
+
+ private:
+  std::string trial_name_;
+  std::string group_name_;
+};
+
+// A FieldTrialList::Observer implementation which accesses the group of a
+// FieldTrial from OnFieldTrialGroupFinalized(). Used to test reentrancy.
+class FieldTrialObserverAccessingGroup : public FieldTrialList::Observer {
+ public:
+  // |trial_to_access| is the FieldTrial on which to invoke Activate() when
+  // receiving an OnFieldTrialGroupFinalized() notification.
+  explicit FieldTrialObserverAccessingGroup(
+      scoped_refptr<FieldTrial> trial_to_access)
+      : trial_to_access_(trial_to_access) {
+    FieldTrialList::AddObserver(this);
+  }
+  FieldTrialObserverAccessingGroup(const FieldTrialObserverAccessingGroup&) =
+      delete;
+  FieldTrialObserverAccessingGroup& operator=(
+      const FieldTrialObserverAccessingGroup&) = delete;
+
+  ~FieldTrialObserverAccessingGroup() override {
+    FieldTrialList::RemoveObserver(this);
+  }
+
+  void OnFieldTrialGroupFinalized(const std::string& trial,
+                                  const std::string& group) override {
+    trial_to_access_->Activate();
+  }
+
+ private:
+  scoped_refptr<FieldTrial> trial_to_access_;
+};
+
+std::string MockEscapeQueryParamValue(const std::string& input) {
+  return input;
+}
+
+}  // namespace
+
+// Same as |TestFieldTrialObserver|, but registers for low anonymity field
+// trials too.
+class TestFieldTrialObserverIncludingLowAnonymity
+    : public FieldTrialList::Observer {
+ public:
+  TestFieldTrialObserverIncludingLowAnonymity() {
+    FieldTrialListIncludingLowAnonymity::AddObserver(this);
+  }
+  TestFieldTrialObserverIncludingLowAnonymity(
+      const TestFieldTrialObserverIncludingLowAnonymity&) = delete;
+  TestFieldTrialObserverIncludingLowAnonymity& operator=(
+      const TestFieldTrialObserverIncludingLowAnonymity&) = delete;
+
+  ~TestFieldTrialObserverIncludingLowAnonymity() override {
+    FieldTrialListIncludingLowAnonymity::RemoveObserver(this);
   }
 
   void OnFieldTrialGroupFinalized(const std::string& trial,
@@ -81,30 +147,32 @@
   const std::string& group_name() const { return group_name_; }
 
  private:
-  const Type type_;
   std::string trial_name_;
   std::string group_name_;
-
-  DISALLOW_COPY_AND_ASSIGN(TestFieldTrialObserver);
 };
 
-std::string MockEscapeQueryParamValue(const std::string& input) {
-  return input;
-}
-
-}  // namespace
-
 class FieldTrialTest : public ::testing::Test {
  public:
-  FieldTrialTest() : trial_list_(nullptr) {}
+  FieldTrialTest() {
+    // The test suite instantiates a FieldTrialList but for the purpose of these
+    // tests it's cleaner to start from scratch.
+    scoped_feature_list_.InitWithEmptyFeatureAndFieldTrialLists();
+  }
+  FieldTrialTest(const FieldTrialTest&) = delete;
+  FieldTrialTest& operator=(const FieldTrialTest&) = delete;
 
  private:
-  MessageLoop message_loop_;
-  FieldTrialList trial_list_;
-
-  DISALLOW_COPY_AND_ASSIGN(FieldTrialTest);
+  test::TaskEnvironment task_environment_;
+  test::ScopedFeatureList scoped_feature_list_;
 };
 
+MATCHER(CompareActiveGroupToFieldTrial, "") {
+  const base::FieldTrial::ActiveGroup& lhs = ::testing::get<0>(arg);
+  const base::FieldTrial* rhs = ::testing::get<1>(arg).get();
+  return lhs.trial_name == rhs->trial_name() &&
+         lhs.group_name == rhs->group_name_internal();
+}
+
 // Test registration, and also check that destructors are called for trials.
 TEST_F(FieldTrialTest, Registration) {
   const char name1[] = "name 1 test";
@@ -113,7 +181,7 @@
   EXPECT_FALSE(FieldTrialList::Find(name2));
 
   scoped_refptr<FieldTrial> trial1 =
-      CreateFieldTrial(name1, 10, "default name 1 test", nullptr);
+      CreateFieldTrial(name1, 10, "default name 1 test");
   EXPECT_EQ(FieldTrial::kNotFinalized, trial1->group_);
   EXPECT_EQ(name1, trial1->trial_name());
   EXPECT_EQ("", trial1->group_name_internal());
@@ -124,7 +192,7 @@
   EXPECT_FALSE(FieldTrialList::Find(name2));
 
   scoped_refptr<FieldTrial> trial2 =
-      CreateFieldTrial(name2, 10, "default name 2 test", nullptr);
+      CreateFieldTrial(name2, 10, "default name 2 test");
   EXPECT_EQ(FieldTrial::kNotFinalized, trial2->group_);
   EXPECT_EQ(name2, trial2->trial_name());
   EXPECT_EQ("", trial2->group_name_internal());
@@ -137,165 +205,75 @@
 }
 
 TEST_F(FieldTrialTest, AbsoluteProbabilities) {
-  char always_true[] = " always true";
-  char default_always_true[] = " default always true";
-  char always_false[] = " always false";
-  char default_always_false[] = " default always false";
-  for (int i = 1; i < 250; ++i) {
-    // Try lots of names, by changing the first character of the name.
-    char c = static_cast<char>(i);
-    always_true[0] = c;
-    default_always_true[0] = c;
-    always_false[0] = c;
-    default_always_false[0] = c;
-
-    scoped_refptr<FieldTrial> trial_true =
-        CreateFieldTrial(always_true, 10, default_always_true, nullptr);
-    const std::string winner = "TheWinner";
-    int winner_group = trial_true->AppendGroup(winner, 10);
-
-    EXPECT_EQ(winner_group, trial_true->group());
-    EXPECT_EQ(winner, trial_true->group_name());
-
-    scoped_refptr<FieldTrial> trial_false =
-        CreateFieldTrial(always_false, 10, default_always_false, nullptr);
-    int loser_group = trial_false->AppendGroup("ALoser", 0);
-
-    EXPECT_NE(loser_group, trial_false->group());
-  }
+  MockEntropyProvider entropy_provider(0.51);
+  scoped_refptr<FieldTrial> trial = FieldTrialList::FactoryGetFieldTrial(
+      "trial name", 100, "Default", entropy_provider);
+  trial->AppendGroup("LoserA", 0);
+  trial->AppendGroup("Winner", 100);
+  trial->AppendGroup("LoserB", 0);
+  EXPECT_EQ(trial->group_name(), "Winner");
 }
 
-TEST_F(FieldTrialTest, RemainingProbability) {
-  // First create a test that hasn't had a winner yet.
-  const std::string winner = "Winner";
-  const std::string loser = "Loser";
-  scoped_refptr<FieldTrial> trial;
-  int counter = 0;
-  int default_group_number = -1;
-  do {
-    std::string name = StringPrintf("trial%d", ++counter);
-    trial = CreateFieldTrial(name, 10, winner, &default_group_number);
-    trial->AppendGroup(loser, 5);  // 50% chance of not being chosen.
-    // If a group is not assigned, group_ will be kNotFinalized.
-  } while (trial->group_ != FieldTrial::kNotFinalized);
-
-  // And that 'default' group (winner) should always win.
-  EXPECT_EQ(default_group_number, trial->group());
-
-  // And that winner should ALWAYS win.
-  EXPECT_EQ(winner, trial->group_name());
+TEST_F(FieldTrialTest, SmallProbabilities_49) {
+  MockEntropyProvider entropy_provider(0.49);
+  scoped_refptr<FieldTrial> trial = FieldTrialList::FactoryGetFieldTrial(
+      "trial name", 2, "Default", entropy_provider);
+  trial->AppendGroup("first", 1);
+  trial->AppendGroup("second", 1);
+  EXPECT_EQ(trial->group_name(), "first");
 }
 
-TEST_F(FieldTrialTest, FiftyFiftyProbability) {
-  // Check that even with small divisors, we have the proper probabilities, and
-  // all outcomes are possible.  Since this is a 50-50 test, it should get both
-  // outcomes in a few tries, but we'll try no more than 100 times (and be flaky
-  // with probability around 1 in 2^99).
-  bool first_winner = false;
-  bool second_winner = false;
-  int counter = 0;
-  do {
-    std::string name = StringPrintf("FiftyFifty%d", ++counter);
-    std::string default_group_name =
-        StringPrintf("Default FiftyFifty%d", ++counter);
-    scoped_refptr<FieldTrial> trial =
-        CreateFieldTrial(name, 2, default_group_name, nullptr);
-    trial->AppendGroup("first", 1);  // 50% chance of being chosen.
-    // If group_ is kNotFinalized, then a group assignement hasn't been done.
-    if (trial->group_ != FieldTrial::kNotFinalized) {
-      first_winner = true;
-      continue;
-    }
-    trial->AppendGroup("second", 1);  // Always chosen at this point.
-    EXPECT_NE(FieldTrial::kNotFinalized, trial->group());
-    second_winner = true;
-  } while ((!second_winner || !first_winner) && counter < 100);
-  EXPECT_TRUE(second_winner);
-  EXPECT_TRUE(first_winner);
+TEST_F(FieldTrialTest, SmallProbabilities_51) {
+  MockEntropyProvider entropy_provider(0.51);
+  scoped_refptr<FieldTrial> trial = FieldTrialList::FactoryGetFieldTrial(
+      "trial name", 2, "Default", entropy_provider);
+  trial->AppendGroup("first", 1);
+  trial->AppendGroup("second", 1);
+  EXPECT_EQ(trial->group_name(), "second");
 }
 
-TEST_F(FieldTrialTest, MiddleProbabilities) {
-  char name[] = " same name";
-  char default_group_name[] = " default same name";
-  bool false_event_seen = false;
-  bool true_event_seen = false;
-  for (int i = 1; i < 250; ++i) {
-    char c = static_cast<char>(i);
-    name[0] = c;
-    default_group_name[0] = c;
-    scoped_refptr<FieldTrial> trial =
-        CreateFieldTrial(name, 10, default_group_name, nullptr);
-    int might_win = trial->AppendGroup("MightWin", 5);
-
-    if (trial->group() == might_win) {
-      true_event_seen = true;
-    } else {
-      false_event_seen = true;
-    }
-    if (false_event_seen && true_event_seen)
-      return;  // Successful test!!!
-  }
-  // Very surprising to get here. Probability should be around 1 in 2 ** 250.
-  // One of the following will fail.
-  EXPECT_TRUE(false_event_seen);
-  EXPECT_TRUE(true_event_seen);
+TEST_F(FieldTrialTest, MiddleProbabilities_49) {
+  MockEntropyProvider entropy_provider(0.49);
+  scoped_refptr<FieldTrial> trial = FieldTrialList::FactoryGetFieldTrial(
+      "trial name", 10, "Default", entropy_provider);
+  trial->AppendGroup("NotDefault", 5);
+  EXPECT_EQ(trial->group_name(), "NotDefault");
 }
 
+TEST_F(FieldTrialTest, MiddleProbabilities_51) {
+  MockEntropyProvider entropy_provider(0.51);
+  scoped_refptr<FieldTrial> trial = FieldTrialList::FactoryGetFieldTrial(
+      "trial name", 10, "Default", entropy_provider);
+  trial->AppendGroup("NotDefault", 5);
+  EXPECT_EQ(trial->group_name(), "Default");
+}
+
+// AppendGroup after finalization should not change the winner.
 TEST_F(FieldTrialTest, OneWinner) {
-  char name[] = "Some name";
-  char default_group_name[] = "Default some name";
-  int group_count(10);
+  MockEntropyProvider entropy_provider(0.51);
+  scoped_refptr<FieldTrial> trial = FieldTrialList::FactoryGetFieldTrial(
+      "trial name", 10, "Default", entropy_provider);
 
-  int default_group_number = -1;
-  scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(name, group_count, default_group_name, nullptr);
-  int winner_index(-2);
-  std::string winner_name;
-
-  for (int i = 1; i <= group_count; ++i) {
-    int might_win = trial->AppendGroup(std::string(), 1);
-
-    // Because we keep appending groups, we want to see if the last group that
-    // was added has been assigned or not.
-    if (trial->group_ == might_win) {
-      EXPECT_EQ(-2, winner_index);
-      winner_index = might_win;
-      StringAppendF(&winner_name, "%d", might_win);
-      EXPECT_EQ(winner_name, trial->group_name());
-    }
+  for (int i = 0; i < 5; ++i) {
+    trial->AppendGroup(StringPrintf("%d", i), 1);
   }
-  EXPECT_GE(winner_index, 0);
-  // Since all groups cover the total probability, we should not have
-  // chosen the default group.
-  EXPECT_NE(trial->group(), default_group_number);
-  EXPECT_EQ(trial->group(), winner_index);
-  EXPECT_EQ(trial->group_name(), winner_name);
-}
 
-TEST_F(FieldTrialTest, DisableProbability) {
-  const std::string default_group_name = "Default group";
-  const std::string loser = "Loser";
-  const std::string name = "Trial";
+  // Entropy 0.51 should assign to the 6th group.
+  // It should be declared the winner and stay that way.
+  trial->AppendGroup("Winner", 1);
+  EXPECT_EQ("Winner", trial->group_name());
 
-  // Create a field trail that has expired.
-  int default_group_number = -1;
-  FieldTrial* trial = FieldTrialList::FactoryGetFieldTrial(
-      name, 1000000000, default_group_name, OneYearBeforeBuildTime(), 1, 1,
-      FieldTrial::SESSION_RANDOMIZED,
-      &default_group_number);
-  trial->AppendGroup(loser, 999999999);  // 99.9999999% chance of being chosen.
-
-  // Because trial has expired, we should always be in the default group.
-  EXPECT_EQ(default_group_number, trial->group());
-
-  // And that default_group_name should ALWAYS win.
-  EXPECT_EQ(default_group_name, trial->group_name());
+  // Note: appending groups after calling group_name() is probably not really
+  // valid usage, since it will DCHECK if the default group won.
+  for (int i = 7; i < 10; ++i) {
+    trial->AppendGroup(StringPrintf("%d", i), 1);
+    EXPECT_EQ("Winner", trial->group_name());
+  }
 }
 
 TEST_F(FieldTrialTest, ActiveGroups) {
   std::string no_group("No Group");
-  scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(no_group, 10, "Default", nullptr);
+  scoped_refptr<FieldTrial> trial = CreateFieldTrial(no_group, 10, "Default");
 
   // There is no winner yet, so no NameGroupId should be returned.
   FieldTrial::ActiveGroup active_group;
@@ -303,26 +281,24 @@
 
   // Create a single winning group.
   std::string one_winner("One Winner");
-  trial = CreateFieldTrial(one_winner, 10, "Default", nullptr);
+  trial = CreateFieldTrial(one_winner, 10, "Default");
   std::string winner("Winner");
   trial->AppendGroup(winner, 10);
   EXPECT_FALSE(trial->GetActiveGroup(&active_group));
-  // Finalize the group selection by accessing the selected group.
-  trial->group();
+  trial->Activate();
   EXPECT_TRUE(trial->GetActiveGroup(&active_group));
   EXPECT_EQ(one_winner, active_group.trial_name);
   EXPECT_EQ(winner, active_group.group_name);
 
   std::string multi_group("MultiGroup");
   scoped_refptr<FieldTrial> multi_group_trial =
-      CreateFieldTrial(multi_group, 9, "Default", nullptr);
+      CreateFieldTrial(multi_group, 9, "Default");
 
   multi_group_trial->AppendGroup("Me", 3);
   multi_group_trial->AppendGroup("You", 3);
   multi_group_trial->AppendGroup("Them", 3);
   EXPECT_FALSE(multi_group_trial->GetActiveGroup(&active_group));
-  // Finalize the group selection by accessing the selected group.
-  multi_group_trial->group();
+  multi_group_trial->Activate();
   EXPECT_TRUE(multi_group_trial->GetActiveGroup(&active_group));
   EXPECT_EQ(multi_group, active_group.trial_name);
   EXPECT_EQ(multi_group_trial->group_name(), active_group.group_name);
@@ -356,12 +332,11 @@
   const char kTrialName[] = "TestTrial";
   const char kSecondaryGroupName[] = "SecondaryGroup";
 
-  int default_group = -1;
   scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
-  const int secondary_group = trial->AppendGroup(kSecondaryGroupName, 50);
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
+  trial->AppendGroup(kSecondaryGroupName, 50);
 
-  // Before |group()| is called, |GetActiveGroup()| should return false.
+  // Before |Activate()| is called, |GetActiveGroup()| should return false.
   FieldTrial::ActiveGroup active_group;
   EXPECT_FALSE(trial->GetActiveGroup(&active_group));
 
@@ -370,16 +345,13 @@
   FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
   EXPECT_TRUE(active_groups.empty());
 
-  // After |group()| has been called, both APIs should succeed.
-  const int chosen_group = trial->group();
-  EXPECT_TRUE(chosen_group == default_group || chosen_group == secondary_group);
+  // After |Activate()| has been called, both APIs should succeed.
+  trial->Activate();
 
   EXPECT_TRUE(trial->GetActiveGroup(&active_group));
   EXPECT_EQ(kTrialName, active_group.trial_name);
-  if (chosen_group == default_group)
-    EXPECT_EQ(kDefaultGroupName, active_group.group_name);
-  else
-    EXPECT_EQ(kSecondaryGroupName, active_group.group_name);
+  EXPECT_TRUE(kDefaultGroupName == active_group.group_name ||
+              kSecondaryGroupName == active_group.group_name);
 
   FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
   ASSERT_EQ(1U, active_groups.size());
@@ -391,9 +363,8 @@
   const char kTrialName[] = "TestTrial";
   const char kSecondaryGroupName[] = "SecondaryGroup";
 
-  int default_group = -1;
   scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
   trial->AppendGroup(kSecondaryGroupName, 50);
 
   // The trial should start inactive.
@@ -409,117 +380,55 @@
   EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
 }
 
-TEST_F(FieldTrialTest, Save) {
-  std::string save_string;
-
-  scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial("Some name", 10, "Default some name", nullptr);
-  // There is no winner yet, so no textual group name is associated with trial.
-  // In this case, the trial should not be included.
-  EXPECT_EQ("", trial->group_name_internal());
-  FieldTrialList::StatesToString(&save_string);
-  EXPECT_EQ("", save_string);
-  save_string.clear();
-
-  // Create a winning group.
-  trial->AppendGroup("Winner", 10);
-  // Finalize the group selection by accessing the selected group.
-  trial->group();
-  FieldTrialList::StatesToString(&save_string);
-  EXPECT_EQ("Some name/Winner/", save_string);
-  save_string.clear();
-
-  // Create a second trial and winning group.
-  scoped_refptr<FieldTrial> trial2 =
-      CreateFieldTrial("xxx", 10, "Default xxx", nullptr);
-  trial2->AppendGroup("yyyy", 10);
-  // Finalize the group selection by accessing the selected group.
-  trial2->group();
-
-  FieldTrialList::StatesToString(&save_string);
-  // We assume names are alphabetized... though this is not critical.
-  EXPECT_EQ("Some name/Winner/xxx/yyyy/", save_string);
-  save_string.clear();
-
-  // Create a third trial with only the default group.
-  scoped_refptr<FieldTrial> trial3 =
-      CreateFieldTrial("zzz", 10, "default", nullptr);
-  // Finalize the group selection by accessing the selected group.
-  trial3->group();
-
-  FieldTrialList::StatesToString(&save_string);
-  EXPECT_EQ("Some name/Winner/xxx/yyyy/zzz/default/", save_string);
-}
-
 TEST_F(FieldTrialTest, SaveAll) {
   std::string save_string;
 
   scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial("Some name", 10, "Default some name", nullptr);
+      CreateFieldTrial("Some name", 10, "Default some name");
   EXPECT_EQ("", trial->group_name_internal());
-  FieldTrialList::AllStatesToString(&save_string, false);
+  FieldTrialList::AllStatesToString(&save_string);
   EXPECT_EQ("Some name/Default some name/", save_string);
   // Getting all states should have finalized the trial.
   EXPECT_EQ("Default some name", trial->group_name_internal());
   save_string.clear();
 
   // Create a winning group.
-  trial = CreateFieldTrial("trial2", 10, "Default some name", nullptr);
+  trial = CreateFieldTrial("trial2", 10, "Default some name");
   trial->AppendGroup("Winner", 10);
-  // Finalize the group selection by accessing the selected group.
-  trial->group();
-  FieldTrialList::AllStatesToString(&save_string, false);
+  trial->Activate();
+  FieldTrialList::AllStatesToString(&save_string);
   EXPECT_EQ("Some name/Default some name/*trial2/Winner/", save_string);
   save_string.clear();
 
   // Create a second trial and winning group.
-  scoped_refptr<FieldTrial> trial2 =
-      CreateFieldTrial("xxx", 10, "Default xxx", nullptr);
+  scoped_refptr<FieldTrial> trial2 = CreateFieldTrial("xxx", 10, "Default xxx");
   trial2->AppendGroup("yyyy", 10);
-  // Finalize the group selection by accessing the selected group.
-  trial2->group();
+  trial2->Activate();
 
-  FieldTrialList::AllStatesToString(&save_string, false);
+  FieldTrialList::AllStatesToString(&save_string);
   // We assume names are alphabetized... though this is not critical.
   EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/",
             save_string);
   save_string.clear();
 
   // Create a third trial with only the default group.
-  scoped_refptr<FieldTrial> trial3 =
-      CreateFieldTrial("zzz", 10, "default", nullptr);
+  scoped_refptr<FieldTrial> trial3 = CreateFieldTrial("zzz", 10, "default");
 
-  FieldTrialList::AllStatesToString(&save_string, false);
+  FieldTrialList::AllStatesToString(&save_string);
   EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
             save_string);
 
-  // Create expired study.
-  int default_group_number = -1;
-  scoped_refptr<FieldTrial> expired_trial =
-      FieldTrialList::FactoryGetFieldTrial(
-          "Expired trial name", 1000000000, "Default group",
-          OneYearBeforeBuildTime(), 1, 1, FieldTrial::SESSION_RANDOMIZED,
-          &default_group_number);
-  expired_trial->AppendGroup("Expired trial group name", 999999999);
-
   save_string.clear();
-  FieldTrialList::AllStatesToString(&save_string, false);
+  FieldTrialList::AllStatesToString(&save_string);
   EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
             save_string);
-  save_string.clear();
-  FieldTrialList::AllStatesToString(&save_string, true);
-  EXPECT_EQ(
-      "Expired trial name/Default group/"
-      "Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
-      save_string);
 }
 
 TEST_F(FieldTrialTest, Restore) {
   ASSERT_FALSE(FieldTrialList::TrialExists("Some_name"));
   ASSERT_FALSE(FieldTrialList::TrialExists("xxx"));
 
-  FieldTrialList::CreateTrialsFromString("Some_name/Winner/xxx/yyyy/",
-                                         std::set<std::string>());
+  FieldTrialList::CreateTrialsFromString("Some_name/Winner/xxx/yyyy/");
 
   FieldTrial* trial = FieldTrialList::Find("Some_name");
   ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
@@ -533,8 +442,7 @@
 }
 
 TEST_F(FieldTrialTest, RestoreNotEndingWithSlash) {
-  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString("tname/gname",
-                                                     std::set<std::string>()));
+  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString("tname/gname"));
 
   FieldTrial* trial = FieldTrialList::Find("tname");
   ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
@@ -543,42 +451,34 @@
 }
 
 TEST_F(FieldTrialTest, BogusRestore) {
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingSlash",
-                                                      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingGroupName/",
-                                                      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("noname, only group/",
-                                                      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("/emptyname",
-                                                      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("*/emptyname",
-                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingSlash"));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingGroupName/"));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("noname, only group/"));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("/emptyname"));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("*/emptyname"));
 }
 
 TEST_F(FieldTrialTest, DuplicateRestore) {
   scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial("Some name", 10, "Default", nullptr);
+      CreateFieldTrial("Some name", 10, "Default");
   trial->AppendGroup("Winner", 10);
-  // Finalize the group selection by accessing the selected group.
-  trial->group();
+  trial->Activate();
   std::string save_string;
-  FieldTrialList::StatesToString(&save_string);
-  EXPECT_EQ("Some name/Winner/", save_string);
+  FieldTrialList::AllStatesToString(&save_string);
+  // * prefix since it is activated.
+  EXPECT_EQ("*Some name/Winner/", save_string);
 
   // It is OK if we redundantly specify a winner.
-  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(save_string,
-                                                     std::set<std::string>()));
+  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(save_string));
 
   // But it is an error to try to change to a different winner.
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("Some name/Loser/",
-                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("Some name/Loser/"));
 }
 
 TEST_F(FieldTrialTest, CreateTrialsFromStringNotActive) {
   ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
   ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/Xyz/zyx/",
-                                                     std::set<std::string>()));
+  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/Xyz/zyx/"));
 
   FieldTrial::ActiveGroups active_groups;
   FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -600,8 +500,8 @@
   ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
   ASSERT_FALSE(FieldTrialList::TrialExists("def"));
   ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "*Abc/cba/def/fed/*Xyz/zyx/", std::set<std::string>()));
+  ASSERT_TRUE(
+      FieldTrialList::CreateTrialsFromString("*Abc/cba/def/fed/*Xyz/zyx/"));
 
   FieldTrial::ActiveGroups active_groups;
   FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -615,9 +515,8 @@
 TEST_F(FieldTrialTest, CreateTrialsFromStringNotActiveObserver) {
   ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
 
-  TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/",
-                                                     std::set<std::string>()));
+  TestFieldTrialObserver observer;
+  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/"));
   RunLoop().RunUntilIdle();
   // Observer shouldn't be notified.
   EXPECT_TRUE(observer.trial_name().empty());
@@ -625,52 +524,10 @@
   // Check that the values still get returned and querying them activates them.
   EXPECT_EQ("def", FieldTrialList::FindFullName("Abc"));
 
-  RunLoop().RunUntilIdle();
   EXPECT_EQ("Abc", observer.trial_name());
   EXPECT_EQ("def", observer.group_name());
 }
 
-TEST_F(FieldTrialTest, CreateTrialsFromStringWithIgnoredFieldTrials) {
-  ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted1"));
-  ASSERT_FALSE(FieldTrialList::TrialExists("Foo"));
-  ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted2"));
-  ASSERT_FALSE(FieldTrialList::TrialExists("Bar"));
-  ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted3"));
-
-  std::set<std::string> ignored_trial_names;
-  ignored_trial_names.insert("Unaccepted1");
-  ignored_trial_names.insert("Unaccepted2");
-  ignored_trial_names.insert("Unaccepted3");
-
-  FieldTrialList::CreateTrialsFromString(
-      "Unaccepted1/Unaccepted1_name/"
-      "Foo/Foo_name/"
-      "Unaccepted2/Unaccepted2_name/"
-      "Bar/Bar_name/"
-      "Unaccepted3/Unaccepted3_name/",
-      ignored_trial_names);
-
-  EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted1"));
-  EXPECT_TRUE(FieldTrialList::TrialExists("Foo"));
-  EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted2"));
-  EXPECT_TRUE(FieldTrialList::TrialExists("Bar"));
-  EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted3"));
-
-  FieldTrial::ActiveGroups active_groups;
-  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
-  EXPECT_TRUE(active_groups.empty());
-
-  FieldTrial* trial = FieldTrialList::Find("Foo");
-  ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
-  EXPECT_EQ("Foo", trial->trial_name());
-  EXPECT_EQ("Foo_name", trial->group_name());
-
-  trial = FieldTrialList::Find("Bar");
-  ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
-  EXPECT_EQ("Bar", trial->trial_name());
-  EXPECT_EQ("Bar_name", trial->group_name());
-}
-
 TEST_F(FieldTrialTest, CreateFieldTrial) {
   ASSERT_FALSE(FieldTrialList::TrialExists("Some_name"));
 
@@ -695,7 +552,7 @@
 
 TEST_F(FieldTrialTest, DuplicateFieldTrial) {
   scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial("Some_name", 10, "Default", nullptr);
+      CreateFieldTrial("Some_name", 10, "Default");
   trial->AppendGroup("Winner", 10);
 
   // It is OK if we redundantly specify a winner.
@@ -707,109 +564,77 @@
   EXPECT_TRUE(trial2 == nullptr);
 }
 
-TEST_F(FieldTrialTest, DisableImmediately) {
-  int default_group_number = -1;
-  scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial("trial", 100, "default", &default_group_number);
-  trial->Disable();
-  ASSERT_EQ("default", trial->group_name());
-  ASSERT_EQ(default_group_number, trial->group());
-}
-
-TEST_F(FieldTrialTest, DisableAfterInitialization) {
-  scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial("trial", 100, "default", nullptr);
-  trial->AppendGroup("non_default", 100);
-  trial->Disable();
-  ASSERT_EQ("default", trial->group_name());
-}
-
 TEST_F(FieldTrialTest, ForcedFieldTrials) {
   // Validate we keep the forced choice.
   FieldTrial* forced_trial = FieldTrialList::CreateFieldTrial("Use the",
                                                               "Force");
   EXPECT_STREQ("Force", forced_trial->group_name().c_str());
 
-  int default_group_number = -1;
   scoped_refptr<FieldTrial> factory_trial =
-      CreateFieldTrial("Use the", 1000, "default", &default_group_number);
+      CreateFieldTrial("Use the", 1000, "default");
   EXPECT_EQ(factory_trial.get(), forced_trial);
 
-  int chosen_group = factory_trial->AppendGroup("Force", 100);
-  EXPECT_EQ(chosen_group, factory_trial->group());
-  int not_chosen_group = factory_trial->AppendGroup("Dark Side", 100);
-  EXPECT_NE(chosen_group, not_chosen_group);
-
-  // Since we didn't force the default group, we should not be returned the
-  // chosen group as the default group.
-  EXPECT_NE(default_group_number, chosen_group);
-  int new_group = factory_trial->AppendGroup("Duck Tape", 800);
-  EXPECT_NE(chosen_group, new_group);
-  // The new group should not be the default group either.
-  EXPECT_NE(default_group_number, new_group);
+  factory_trial->AppendGroup("Force", 100);
+  EXPECT_EQ("Force", factory_trial->group_name());
+  factory_trial->AppendGroup("Dark Side", 100);
+  EXPECT_EQ("Force", factory_trial->group_name());
+  factory_trial->AppendGroup("Duck Tape", 800);
+  EXPECT_EQ("Force", factory_trial->group_name());
 }
 
 TEST_F(FieldTrialTest, ForcedFieldTrialsDefaultGroup) {
   // Forcing the default should use the proper group ID.
-  FieldTrial* forced_trial = FieldTrialList::CreateFieldTrial("Trial Name",
-                                                              "Default");
-  int default_group_number = -1;
+  FieldTrial* forced_trial =
+      FieldTrialList::CreateFieldTrial("Trial Name", "Default");
   scoped_refptr<FieldTrial> factory_trial =
-      CreateFieldTrial("Trial Name", 1000, "Default", &default_group_number);
+      CreateFieldTrial("Trial Name", 1000, "Default");
   EXPECT_EQ(forced_trial, factory_trial.get());
 
-  int other_group = factory_trial->AppendGroup("Not Default", 100);
+  factory_trial->AppendGroup("Not Default", 100);
   EXPECT_STREQ("Default", factory_trial->group_name().c_str());
-  EXPECT_EQ(default_group_number, factory_trial->group());
-  EXPECT_NE(other_group, factory_trial->group());
 
-  int new_other_group = factory_trial->AppendGroup("Not Default Either", 800);
-  EXPECT_NE(new_other_group, factory_trial->group());
+  factory_trial->AppendGroup("Not Default Either", 800);
+  EXPECT_STREQ("Default", factory_trial->group_name().c_str());
 }
 
 TEST_F(FieldTrialTest, SetForced) {
   // Start by setting a trial for which we ensure a winner...
-  int default_group_number = -1;
   scoped_refptr<FieldTrial> forced_trial =
-      CreateFieldTrial("Use the", 1, "default", &default_group_number);
+      CreateFieldTrial("Use the", 1, "default");
   EXPECT_EQ(forced_trial, forced_trial);
 
-  int forced_group = forced_trial->AppendGroup("Force", 1);
-  EXPECT_EQ(forced_group, forced_trial->group());
+  forced_trial->AppendGroup("Force", 1);
+  EXPECT_EQ("Force", forced_trial->group_name());
 
   // Now force it.
   forced_trial->SetForced();
 
   // Now try to set it up differently as a hard coded registration would.
   scoped_refptr<FieldTrial> hard_coded_trial =
-      CreateFieldTrial("Use the", 1, "default", &default_group_number);
+      CreateFieldTrial("Use the", 1, "default");
   EXPECT_EQ(hard_coded_trial, forced_trial);
 
-  int would_lose_group = hard_coded_trial->AppendGroup("Force", 0);
-  EXPECT_EQ(forced_group, hard_coded_trial->group());
-  EXPECT_EQ(forced_group, would_lose_group);
+  hard_coded_trial->AppendGroup("Force", 0);
+  EXPECT_EQ("Force", hard_coded_trial->group_name());
 
   // Same thing if we would have done it to win again.
   scoped_refptr<FieldTrial> other_hard_coded_trial =
-      CreateFieldTrial("Use the", 1, "default", &default_group_number);
+      CreateFieldTrial("Use the", 1, "default");
   EXPECT_EQ(other_hard_coded_trial, forced_trial);
 
-  int would_win_group = other_hard_coded_trial->AppendGroup("Force", 1);
-  EXPECT_EQ(forced_group, other_hard_coded_trial->group());
-  EXPECT_EQ(forced_group, would_win_group);
+  other_hard_coded_trial->AppendGroup("Force", 1);
+  EXPECT_EQ("Force", other_hard_coded_trial->group_name());
 }
 
 TEST_F(FieldTrialTest, SetForcedDefaultOnly) {
   const char kTrialName[] = "SetForcedDefaultOnly";
   ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
 
-  int default_group = -1;
   scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
   trial->SetForced();
 
-  trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
-  EXPECT_EQ(default_group, trial->group());
+  trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
   EXPECT_EQ(kDefaultGroupName, trial->group_name());
 }
 
@@ -817,15 +642,12 @@
   const char kTrialName[] = "SetForcedDefaultWithExtraGroup";
   ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
 
-  int default_group = -1;
   scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
   trial->SetForced();
 
-  trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
-  const int extra_group = trial->AppendGroup("Extra", 100);
-  EXPECT_EQ(default_group, trial->group());
-  EXPECT_NE(extra_group, trial->group());
+  trial = CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
+  trial->AppendGroup("Extra", 100);
   EXPECT_EQ(kDefaultGroupName, trial->group_name());
 }
 
@@ -837,18 +659,16 @@
   // Simulate a server-side (forced) config that turns the feature on when the
   // original hard-coded config had it disabled.
   scoped_refptr<FieldTrial> forced_trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
   forced_trial->AppendGroup(kExtraGroupName, 100);
   forced_trial->SetForced();
 
-  int default_group = -1;
   scoped_refptr<FieldTrial> client_trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
-  const int extra_group = client_trial->AppendGroup(kExtraGroupName, 0);
-  EXPECT_NE(default_group, extra_group);
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
+  client_trial->AppendGroup(kExtraGroupName, 0);
 
   EXPECT_FALSE(client_trial->group_reported_);
-  EXPECT_EQ(extra_group, client_trial->group());
+  EXPECT_EQ(kExtraGroupName, client_trial->group_name());
   EXPECT_TRUE(client_trial->group_reported_);
   EXPECT_EQ(kExtraGroupName, client_trial->group_name());
 }
@@ -861,18 +681,16 @@
   // Simulate a server-side (forced) config that turns the feature off when the
   // original hard-coded config had it enabled.
   scoped_refptr<FieldTrial> forced_trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
   forced_trial->AppendGroup(kExtraGroupName, 0);
   forced_trial->SetForced();
 
-  int default_group = -1;
   scoped_refptr<FieldTrial> client_trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
-  const int extra_group = client_trial->AppendGroup(kExtraGroupName, 100);
-  EXPECT_NE(default_group, extra_group);
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
+  client_trial->AppendGroup(kExtraGroupName, 100);
 
   EXPECT_FALSE(client_trial->group_reported_);
-  EXPECT_EQ(default_group, client_trial->group());
+  EXPECT_EQ(kDefaultGroupName, client_trial->group_name());
   EXPECT_TRUE(client_trial->group_reported_);
   EXPECT_EQ(kDefaultGroupName, client_trial->group_name());
 }
@@ -886,18 +704,16 @@
   // Simulate a server-side (forced) config that switches which group is default
   // and ensures that the non-forced code receives the correct group numbers.
   scoped_refptr<FieldTrial> forced_trial =
-      CreateFieldTrial(kTrialName, 100, kGroupAName, nullptr);
+      CreateFieldTrial(kTrialName, 100, kGroupAName);
   forced_trial->AppendGroup(kGroupBName, 100);
   forced_trial->SetForced();
 
-  int default_group = -1;
   scoped_refptr<FieldTrial> client_trial =
-      CreateFieldTrial(kTrialName, 100, kGroupBName, &default_group);
-  const int extra_group = client_trial->AppendGroup(kGroupAName, 50);
-  EXPECT_NE(default_group, extra_group);
+      CreateFieldTrial(kTrialName, 100, kGroupBName);
+  client_trial->AppendGroup(kGroupAName, 50);
 
   EXPECT_FALSE(client_trial->group_reported_);
-  EXPECT_EQ(default_group, client_trial->group());
+  EXPECT_NE(kGroupAName, client_trial->group_name());
   EXPECT_TRUE(client_trial->group_reported_);
   EXPECT_EQ(kGroupBName, client_trial->group_name());
 }
@@ -911,18 +727,16 @@
   // Simulate a server-side (forced) config that switches which group is default
   // and ensures that the non-forced code receives the correct group numbers.
   scoped_refptr<FieldTrial> forced_trial =
-      CreateFieldTrial(kTrialName, 100, kGroupAName, nullptr);
+      CreateFieldTrial(kTrialName, 100, kGroupAName);
   forced_trial->AppendGroup(kGroupBName, 0);
   forced_trial->SetForced();
 
-  int default_group = -1;
   scoped_refptr<FieldTrial> client_trial =
-      CreateFieldTrial(kTrialName, 100, kGroupBName, &default_group);
-  const int extra_group = client_trial->AppendGroup(kGroupAName, 50);
-  EXPECT_NE(default_group, extra_group);
+      CreateFieldTrial(kTrialName, 100, kGroupBName);
+  client_trial->AppendGroup(kGroupAName, 50);
 
   EXPECT_FALSE(client_trial->group_reported_);
-  EXPECT_EQ(extra_group, client_trial->group());
+  EXPECT_EQ(kGroupAName, client_trial->group_name());
   EXPECT_TRUE(client_trial->group_reported_);
   EXPECT_EQ(kGroupAName, client_trial->group_name());
 }
@@ -931,123 +745,54 @@
   const char kTrialName[] = "TrialToObserve1";
   const char kSecondaryGroupName[] = "SecondaryGroup";
 
-  TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
-  int default_group = -1;
+  TestFieldTrialObserver observer;
   scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
-  const int secondary_group = trial->AppendGroup(kSecondaryGroupName, 50);
-  const int chosen_group = trial->group();
-  EXPECT_TRUE(chosen_group == default_group || chosen_group == secondary_group);
+      CreateFieldTrial(kTrialName, 100, kDefaultGroupName);
+  trial->AppendGroup(kSecondaryGroupName, 50);
+  const std::string chosen_group_name = trial->group_name();
+  EXPECT_TRUE(chosen_group_name == kDefaultGroupName ||
+              chosen_group_name == kSecondaryGroupName);
 
-  // Observers are called asynchronously.
-  EXPECT_TRUE(observer.trial_name().empty());
-  EXPECT_TRUE(observer.group_name().empty());
-  RunLoop().RunUntilIdle();
-
+  // The observer should be notified synchronously by the group_name() call.
   EXPECT_EQ(kTrialName, observer.trial_name());
-  if (chosen_group == default_group)
-    EXPECT_EQ(kDefaultGroupName, observer.group_name());
-  else
-    EXPECT_EQ(kSecondaryGroupName, observer.group_name());
+  EXPECT_EQ(chosen_group_name, observer.group_name());
 }
 
-TEST_F(FieldTrialTest, SynchronousObserver) {
-  const char kTrialName[] = "TrialToObserve1";
-  const char kSecondaryGroupName[] = "SecondaryGroup";
+// Verify that no hang occurs when a FieldTrial group is selected from a
+// FieldTrialList::Observer::OnFieldTrialGroupFinalized() notification. If the
+// FieldTrialList's lock is held when observers are notified, this test will
+// hang due to reentrant lock acquisition when selecting the FieldTrial group.
+TEST_F(FieldTrialTest, ObserveReentrancy) {
+  const char kTrialName1[] = "TrialToObserve1";
+  const char kTrialName2[] = "TrialToObserve2";
 
-  TestFieldTrialObserver observer(TestFieldTrialObserver::SYNCHRONOUS);
-  int default_group = -1;
-  scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
-  const int secondary_group = trial->AppendGroup(kSecondaryGroupName, 50);
-  const int chosen_group = trial->group();
-  EXPECT_TRUE(chosen_group == default_group || chosen_group == secondary_group);
+  scoped_refptr<FieldTrial> trial_1 =
+      CreateFieldTrial(kTrialName1, 100, kDefaultGroupName);
 
-  // The observer should be notified synchronously by the group() call.
-  EXPECT_EQ(kTrialName, observer.trial_name());
-  if (chosen_group == default_group)
-    EXPECT_EQ(kDefaultGroupName, observer.group_name());
-  else
-    EXPECT_EQ(kSecondaryGroupName, observer.group_name());
+  FieldTrialObserverAccessingGroup observer(trial_1);
+
+  scoped_refptr<FieldTrial> trial_2 =
+      CreateFieldTrial(kTrialName2, 100, kDefaultGroupName);
+
+  // No group should be selected for |trial_1| yet.
+  EXPECT_EQ(FieldTrial::kNotFinalized, trial_1->group_);
+
+  // Force selection of a group for |trial_2|. This will notify |observer| which
+  // will force the selection of a group for |trial_1|. This should not hang.
+  trial_2->Activate();
+
+  // The above call should have selected a group for |trial_1|.
+  EXPECT_NE(FieldTrial::kNotFinalized, trial_1->group_);
 }
 
-TEST_F(FieldTrialTest, ObserveDisabled) {
-  const char kTrialName[] = "TrialToObserve2";
-
-  TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
-  int default_group = -1;
-  scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
-  trial->AppendGroup("A", 25);
-  trial->AppendGroup("B", 25);
-  trial->AppendGroup("C", 25);
-  trial->Disable();
-
-  // Observer shouldn't be notified of a disabled trial.
-  RunLoop().RunUntilIdle();
-  EXPECT_TRUE(observer.trial_name().empty());
-  EXPECT_TRUE(observer.group_name().empty());
-
-  // Observer shouldn't be notified even after a |group()| call.
-  EXPECT_EQ(default_group, trial->group());
-  RunLoop().RunUntilIdle();
-  EXPECT_TRUE(observer.trial_name().empty());
-  EXPECT_TRUE(observer.group_name().empty());
-}
-
-TEST_F(FieldTrialTest, ObserveForcedDisabled) {
-  const char kTrialName[] = "TrialToObserve3";
-
-  TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
-  int default_group = -1;
-  scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, &default_group);
-  trial->AppendGroup("A", 25);
-  trial->AppendGroup("B", 25);
-  trial->AppendGroup("C", 25);
-  trial->SetForced();
-  trial->Disable();
-
-  // Observer shouldn't be notified of a disabled trial, even when forced.
-  RunLoop().RunUntilIdle();
-  EXPECT_TRUE(observer.trial_name().empty());
-  EXPECT_TRUE(observer.group_name().empty());
-
-  // Observer shouldn't be notified even after a |group()| call.
-  EXPECT_EQ(default_group, trial->group());
-  RunLoop().RunUntilIdle();
-  EXPECT_TRUE(observer.trial_name().empty());
-  EXPECT_TRUE(observer.group_name().empty());
-}
-
-TEST_F(FieldTrialTest, DisabledTrialNotActive) {
-  const char kTrialName[] = "DisabledTrial";
-  ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
-
-  scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(kTrialName, 100, kDefaultGroupName, nullptr);
-  trial->AppendGroup("X", 50);
-  trial->Disable();
-
-  // Ensure the trial is not listed as active.
-  FieldTrial::ActiveGroups active_groups;
-  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
-  EXPECT_TRUE(active_groups.empty());
-
-  // Ensure the trial is not listed in the |StatesToString()| result.
-  std::string states;
-  FieldTrialList::StatesToString(&states);
-  EXPECT_TRUE(states.empty());
-}
-
-TEST_F(FieldTrialTest, ExpirationYearNotExpired) {
-  const char kTrialName[] = "NotExpired";
+TEST_F(FieldTrialTest, NotDisabled) {
+  const char kTrialName[] = "NotDisabled";
   const char kGroupName[] = "Group2";
   const int kProbability = 100;
   ASSERT_FALSE(FieldTrialList::TrialExists(kTrialName));
 
   scoped_refptr<FieldTrial> trial =
-      CreateFieldTrial(kTrialName, kProbability, kDefaultGroupName, nullptr);
+      CreateFieldTrial(kTrialName, kProbability, kDefaultGroupName);
   trial->AppendGroup(kGroupName, kProbability);
   EXPECT_EQ(kGroupName, trial->group_name());
 }
@@ -1059,12 +804,12 @@
   for (int i = 0; i < kBucketCount; ++i) {
     const double entropy = i / static_cast<double>(kBucketCount);
 
-    scoped_refptr<FieldTrial> trial(
-        new FieldTrial("test", kBucketCount, "default", entropy));
+    scoped_refptr<FieldTrial> trial(new FieldTrial(
+        "test", kBucketCount, "default", entropy, /*is_low_anonymity=*/false));
     for (int j = 0; j < kBucketCount; ++j)
-      trial->AppendGroup(IntToString(j), 1);
+      trial->AppendGroup(NumberToString(j), 1);
 
-    EXPECT_EQ(IntToString(i), trial->group_name());
+    EXPECT_EQ(NumberToString(i), trial->group_name());
   }
 }
 
@@ -1072,8 +817,8 @@
   const double kEntropyValue = 1.0 - 1e-9;
   ASSERT_LT(kEntropyValue, 1.0);
 
-  scoped_refptr<FieldTrial> trial(
-      new FieldTrial("test", 2, "default", kEntropyValue));
+  scoped_refptr<FieldTrial> trial(new FieldTrial(
+      "test", 2, "default", kEntropyValue, /*is_low_anonymity=*/false));
   trial->AppendGroup("1", 1);
   trial->AppendGroup("2", 1);
 
@@ -1094,14 +839,13 @@
     { 0.95, kDefaultGroupName },
   };
 
-  for (size_t i = 0; i < arraysize(test_cases); ++i) {
-    TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
-    scoped_refptr<FieldTrial> trial(
-       FieldTrial::CreateSimulatedFieldTrial(kTrialName, 100, kDefaultGroupName,
-                                             test_cases[i].entropy_value));
+  for (auto& test_case : test_cases) {
+    TestFieldTrialObserver observer;
+    scoped_refptr<FieldTrial> trial(FieldTrial::CreateSimulatedFieldTrial(
+        kTrialName, 100, kDefaultGroupName, test_case.entropy_value));
     trial->AppendGroup("A", 80);
     trial->AppendGroup("B", 10);
-    EXPECT_EQ(test_cases[i].expected_group, trial->group_name());
+    EXPECT_EQ(test_case.expected_group, trial->group_name());
 
     // Field trial shouldn't have been registered with the list.
     EXPECT_FALSE(FieldTrialList::TrialExists(kTrialName));
@@ -1116,9 +860,9 @@
     FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
     EXPECT_TRUE(active_groups.empty());
 
-    // The trial shouldn't be listed in the |StatesToString()| result.
+    // The trial shouldn't be listed in the |AllStatesToString()| result.
     std::string states;
-    FieldTrialList::StatesToString(&states);
+    FieldTrialList::AllStatesToString(&states);
     EXPECT_TRUE(states.empty());
   }
 }
@@ -1126,28 +870,36 @@
 TEST(FieldTrialTestWithoutList, StatesStringFormat) {
   std::string save_string;
 
+  test::ScopedFeatureList scoped_feature_list;
+  // The test suite instantiates a FieldTrialList but for the purpose of these
+  // tests it's cleaner to start from scratch.
+  scoped_feature_list.InitWithEmptyFeatureAndFieldTrialLists();
+
   // Scoping the first FieldTrialList, as we need another one to test the
   // importing function.
   {
-    FieldTrialList field_trial_list(nullptr);
-    scoped_refptr<FieldTrial> trial =
-        CreateFieldTrial("Abc", 10, "Default some name", nullptr);
-    trial->AppendGroup("cba", 10);
-    trial->group();
-    scoped_refptr<FieldTrial> trial2 =
-        CreateFieldTrial("Xyz", 10, "Default xxx", nullptr);
-    trial2->AppendGroup("zyx", 10);
-    trial2->group();
-    scoped_refptr<FieldTrial> trial3 =
-        CreateFieldTrial("zzz", 10, "default", nullptr);
+    test::ScopedFeatureList scoped_feature_list1;
+    scoped_feature_list1.InitWithNullFeatureAndFieldTrialLists();
+    FieldTrialList field_trial_list;
 
-    FieldTrialList::AllStatesToString(&save_string, false);
+    scoped_refptr<FieldTrial> trial =
+        CreateFieldTrial("Abc", 10, "Default some name");
+    trial->AppendGroup("cba", 10);
+    trial->Activate();
+    scoped_refptr<FieldTrial> trial2 =
+        CreateFieldTrial("Xyz", 10, "Default xxx");
+    trial2->AppendGroup("zyx", 10);
+    trial2->Activate();
+    scoped_refptr<FieldTrial> trial3 = CreateFieldTrial("zzz", 10, "default");
+
+    FieldTrialList::AllStatesToString(&save_string);
   }
 
   // Starting with a new blank FieldTrialList.
-  FieldTrialList field_trial_list(nullptr);
-  ASSERT_TRUE(field_trial_list.CreateTrialsFromString(save_string,
-                                                      std::set<std::string>()));
+  test::ScopedFeatureList scoped_feature_list2;
+  scoped_feature_list2.InitWithNullFeatureAndFieldTrialLists();
+  FieldTrialList field_trial_list;
+  ASSERT_TRUE(field_trial_list.CreateTrialsFromString(save_string));
 
   FieldTrial::ActiveGroups active_groups;
   field_trial_list.GetActiveFieldTrialGroups(&active_groups);
@@ -1159,31 +911,26 @@
   EXPECT_TRUE(field_trial_list.TrialExists("zzz"));
 }
 
-TEST(FieldTrialDeathTest, OneTimeRandomizedTrialWithoutFieldTrialList) {
-  // Trying to instantiate a one-time randomized field trial before the
-  // FieldTrialList is created should crash.
-  EXPECT_DEATH_IF_SUPPORTED(
-      FieldTrialList::FactoryGetFieldTrial(
-          "OneTimeRandomizedTrialWithoutFieldTrialList", 100, kDefaultGroupName,
-          FieldTrialList::kNoExpirationYear, 1, 1,
-          FieldTrial::ONE_TIME_RANDOMIZED, nullptr),
-      "");
-}
+class FieldTrialListTest : public ::testing::Test {
+ public:
+  FieldTrialListTest() {
+    // The test suite instantiates a FieldTrialList but for the purpose of these
+    // tests it's cleaner to start from scratch.
+    scoped_feature_list_.InitWithEmptyFeatureAndFieldTrialLists();
+  }
 
-#if defined(OS_FUCHSIA)
-// TODO(crbug.com/752368): This is flaky on Fuchsia.
-#define MAYBE_TestCopyFieldTrialStateToFlags \
-  DISABLED_TestCopyFieldTrialStateToFlags
-#else
-#define MAYBE_TestCopyFieldTrialStateToFlags TestCopyFieldTrialStateToFlags
-#endif
-TEST(FieldTrialListTest, MAYBE_TestCopyFieldTrialStateToFlags) {
-  constexpr char kFieldTrialHandleSwitch[] = "test-field-trial-handle";
-  constexpr char kEnableFeaturesSwitch[] = "test-enable-features";
-  constexpr char kDisableFeaturesSwitch[] = "test-disable-features";
+ private:
+  test::ScopedFeatureList scoped_feature_list_;
+};
 
-  FieldTrialList field_trial_list(std::make_unique<MockEntropyProvider>());
-
+// TODO(b/298237462): Try to enable |FieldTrialList|.
+// TODO(b/316198056): Determine if tests should be enabled.
+#if !defined(COBALT_PENDING_CLEAN_UP)
+#if !BUILDFLAG(IS_IOS)
+// LaunchOptions is not available on iOS.
+TEST_F(FieldTrialListTest, TestCopyFieldTrialStateToFlags) {
+  test::ScopedFeatureList scoped_feature_list1;
+  scoped_feature_list1.InitWithEmptyFeatureAndFieldTrialLists();
   std::unique_ptr<FeatureList> feature_list(new FeatureList);
   feature_list->InitializeFromCommandLine("A,B", "C");
 
@@ -1191,116 +938,118 @@
   feature_list->RegisterFieldTrialOverride(
       "MyFeature", FeatureList::OVERRIDE_ENABLE_FEATURE, trial);
 
-  test::ScopedFeatureList scoped_feature_list;
-  scoped_feature_list.InitWithFeatureList(std::move(feature_list));
+  test::ScopedFeatureList scoped_feature_list2;
+  scoped_feature_list2.InitWithFeatureList(std::move(feature_list));
 
   FilePath test_file_path = FilePath(FILE_PATH_LITERAL("Program"));
   CommandLine command_line = CommandLine(test_file_path);
+  LaunchOptions launch_options;
 
-  FieldTrialList::CopyFieldTrialStateToFlags(
-      kFieldTrialHandleSwitch, kEnableFeaturesSwitch, kDisableFeaturesSwitch,
-      &command_line);
-  EXPECT_TRUE(command_line.HasSwitch(kFieldTrialHandleSwitch));
+  FieldTrialList::PopulateLaunchOptionsWithFieldTrialState(&command_line,
+                                                           &launch_options);
+  EXPECT_TRUE(command_line.HasSwitch(switches::kFieldTrialHandle));
 
-  // Explictly specified enabled/disabled features should be specified.
-  EXPECT_EQ("A,B", command_line.GetSwitchValueASCII(kEnableFeaturesSwitch));
-  EXPECT_EQ("C", command_line.GetSwitchValueASCII(kDisableFeaturesSwitch));
+  // Explicitly specified enabled/disabled features should be specified.
+  EXPECT_EQ("A,B", command_line.GetSwitchValueASCII(switches::kEnableFeatures));
+  EXPECT_EQ("C", command_line.GetSwitchValueASCII(switches::kDisableFeatures));
 }
+#endif  // !BUILDFLAG(IS_IOS)
 
-TEST(FieldTrialListTest, InstantiateAllocator) {
+TEST_F(FieldTrialListTest, InstantiateAllocator) {
   test::ScopedFeatureList scoped_feature_list;
-  scoped_feature_list.Init();
+  scoped_feature_list.InitWithEmptyFeatureAndFieldTrialLists();
 
-  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList* field_trial_list = FieldTrialList::GetInstance();
+
   FieldTrialList::CreateFieldTrial("Trial1", "Group1");
 
   FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
-  void* memory = field_trial_list.field_trial_allocator_->shared_memory();
-  size_t used = field_trial_list.field_trial_allocator_->used();
+  const void* memory = field_trial_list->field_trial_allocator_->data();
+  size_t used = field_trial_list->field_trial_allocator_->used();
 
   // Ensure that the function is idempotent.
   FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
-  void* new_memory = field_trial_list.field_trial_allocator_->shared_memory();
-  size_t new_used = field_trial_list.field_trial_allocator_->used();
+  const void* new_memory = field_trial_list->field_trial_allocator_->data();
+  size_t new_used = field_trial_list->field_trial_allocator_->used();
   EXPECT_EQ(memory, new_memory);
   EXPECT_EQ(used, new_used);
 }
 
-TEST(FieldTrialListTest, AddTrialsToAllocator) {
+TEST_F(FieldTrialListTest, AddTrialsToAllocator) {
   std::string save_string;
-  SharedMemoryHandle handle;
+  base::ReadOnlySharedMemoryRegion shm_region;
 
   // Scoping the first FieldTrialList, as we need another one to test that it
   // matches.
   {
-    test::ScopedFeatureList scoped_feature_list;
-    scoped_feature_list.Init();
+    test::ScopedFeatureList scoped_feature_list1;
+    scoped_feature_list1.InitWithEmptyFeatureAndFieldTrialLists();
 
-    FieldTrialList field_trial_list(nullptr);
     FieldTrialList::CreateFieldTrial("Trial1", "Group1");
     FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
-    FieldTrialList::AllStatesToString(&save_string, false);
-    handle = SharedMemory::DuplicateHandle(
-        field_trial_list.field_trial_allocator_->shared_memory()->handle());
+    FieldTrialList::AllStatesToString(&save_string);
+    shm_region = FieldTrialList::DuplicateFieldTrialSharedMemoryForTesting();
+    ASSERT_TRUE(shm_region.IsValid());
   }
 
-  FieldTrialList field_trial_list2(nullptr);
-  std::unique_ptr<SharedMemory> shm(new SharedMemory(handle, true));
+  test::ScopedFeatureList scoped_feature_list2;
+  scoped_feature_list2.InitWithEmptyFeatureAndFieldTrialLists();
+
   // 4 KiB is enough to hold the trials only created for this test.
-  shm.get()->Map(4 << 10);
-  FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+  base::ReadOnlySharedMemoryMapping shm_mapping = shm_region.MapAt(0, 4 << 10);
+  ASSERT_TRUE(shm_mapping.IsValid());
+  FieldTrialList::CreateTrialsFromSharedMemoryMapping(std::move(shm_mapping));
   std::string check_string;
-  FieldTrialList::AllStatesToString(&check_string, false);
+  FieldTrialList::AllStatesToString(&check_string);
   EXPECT_EQ(save_string, check_string);
 }
 
-TEST(FieldTrialListTest, DoNotAddSimulatedFieldTrialsToAllocator) {
+TEST_F(FieldTrialListTest, DoNotAddSimulatedFieldTrialsToAllocator) {
   constexpr char kTrialName[] = "trial";
-  SharedMemoryHandle handle;
+  base::ReadOnlySharedMemoryRegion shm_region;
   {
-    test::ScopedFeatureList scoped_feature_list;
-    scoped_feature_list.Init();
+    test::ScopedFeatureList scoped_feature_list1;
+    scoped_feature_list1.InitWithEmptyFeatureAndFieldTrialLists();
 
-    // Create a simulated trial and a real trial and call group() on them, which
-    // should only add the real trial to the field trial allocator.
-    FieldTrialList field_trial_list(nullptr);
+    // Create a simulated trial and a real trial and call Activate() on them,
+    // which should only add the real trial to the field trial allocator.
     FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
 
     // This shouldn't add to the allocator.
     scoped_refptr<FieldTrial> simulated_trial =
         FieldTrial::CreateSimulatedFieldTrial(kTrialName, 100, "Simulated",
                                               0.95);
-    simulated_trial->group();
+    simulated_trial->Activate();
 
     // This should add to the allocator.
     FieldTrial* real_trial =
         FieldTrialList::CreateFieldTrial(kTrialName, "Real");
-    real_trial->group();
+    real_trial->Activate();
 
-    handle = SharedMemory::DuplicateHandle(
-        field_trial_list.field_trial_allocator_->shared_memory()->handle());
+    shm_region = FieldTrialList::DuplicateFieldTrialSharedMemoryForTesting();
+    ASSERT_TRUE(shm_region.IsValid());
   }
 
   // Check that there's only one entry in the allocator.
-  FieldTrialList field_trial_list2(nullptr);
-  std::unique_ptr<SharedMemory> shm(new SharedMemory(handle, true));
+  test::ScopedFeatureList scoped_feature_list2;
+  scoped_feature_list2.InitWithEmptyFeatureAndFieldTrialLists();
   // 4 KiB is enough to hold the trials only created for this test.
-  shm.get()->Map(4 << 10);
-  FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+  base::ReadOnlySharedMemoryMapping shm_mapping = shm_region.MapAt(0, 4 << 10);
+  ASSERT_TRUE(shm_mapping.IsValid());
+  FieldTrialList::CreateTrialsFromSharedMemoryMapping(std::move(shm_mapping));
   std::string check_string;
-  FieldTrialList::AllStatesToString(&check_string, false);
+  FieldTrialList::AllStatesToString(&check_string);
   ASSERT_EQ(check_string.find("Simulated"), std::string::npos);
 }
 
-TEST(FieldTrialListTest, AssociateFieldTrialParams) {
+TEST_F(FieldTrialListTest, AssociateFieldTrialParams) {
   test::ScopedFeatureList scoped_feature_list;
-  scoped_feature_list.Init();
+  scoped_feature_list.InitWithEmptyFeatureAndFieldTrialLists();
 
   std::string trial_name("Trial1");
   std::string group_name("Group1");
 
   // Create a field trial with some params.
-  FieldTrialList field_trial_list(nullptr);
   FieldTrialList::CreateFieldTrial(trial_name, group_name);
   std::map<std::string, std::string> params;
   params["key1"] = "value1";
@@ -1319,30 +1068,22 @@
 
   // Check that we fetch the param from shared memory properly.
   std::map<std::string, std::string> new_params;
-  FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial_name,
-                                                                &new_params);
+  GetFieldTrialParams(trial_name, &new_params);
   EXPECT_EQ("value1", new_params["key1"]);
   EXPECT_EQ("value2", new_params["key2"]);
   EXPECT_EQ(2U, new_params.size());
 }
 
-#if defined(OS_FUCHSIA)
-// TODO(crbug.com/752368): This is flaky on Fuchsia.
-#define MAYBE_ClearParamsFromSharedMemory DISABLED_ClearParamsFromSharedMemory
-#else
-#define MAYBE_ClearParamsFromSharedMemory ClearParamsFromSharedMemory
-#endif
-TEST(FieldTrialListTest, MAYBE_ClearParamsFromSharedMemory) {
+TEST_F(FieldTrialListTest, ClearParamsFromSharedMemory) {
   std::string trial_name("Trial1");
   std::string group_name("Group1");
 
-  SharedMemoryHandle handle;
+  base::ReadOnlySharedMemoryRegion shm_region;
   {
-    test::ScopedFeatureList scoped_feature_list;
-    scoped_feature_list.Init();
+    test::ScopedFeatureList scoped_feature_list1;
+    scoped_feature_list1.InitWithEmptyFeatureAndFieldTrialLists();
 
     // Create a field trial with some params.
-    FieldTrialList field_trial_list(nullptr);
     FieldTrial* trial =
         FieldTrialList::CreateFieldTrial(trial_name, group_name);
     std::map<std::string, std::string> params;
@@ -1361,33 +1102,35 @@
 
     // Check that there are no params associated with the field trial anymore.
     std::map<std::string, std::string> new_params;
-    FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial_name,
-                                                                  &new_params);
+    GetFieldTrialParams(trial_name, &new_params);
     EXPECT_EQ(0U, new_params.size());
 
     // Now duplicate the handle so we can easily check that the trial is still
     // in shared memory via AllStatesToString.
-    handle = SharedMemory::DuplicateHandle(
-        field_trial_list.field_trial_allocator_->shared_memory()->handle());
+    shm_region = FieldTrialList::DuplicateFieldTrialSharedMemoryForTesting();
+    ASSERT_TRUE(shm_region.IsValid());
   }
 
   // Check that we have the trial.
-  FieldTrialList field_trial_list2(nullptr);
-  std::unique_ptr<SharedMemory> shm(new SharedMemory(handle, true));
+  test::ScopedFeatureList scoped_feature_list2;
+  scoped_feature_list2.InitWithEmptyFeatureAndFieldTrialLists();
   // 4 KiB is enough to hold the trials only created for this test.
-  shm.get()->Map(4 << 10);
-  FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+  base::ReadOnlySharedMemoryMapping shm_mapping = shm_region.MapAt(0, 4 << 10);
+  ASSERT_TRUE(shm_mapping.IsValid());
+  FieldTrialList::CreateTrialsFromSharedMemoryMapping(std::move(shm_mapping));
   std::string check_string;
-  FieldTrialList::AllStatesToString(&check_string, false);
+  FieldTrialList::AllStatesToString(&check_string);
   EXPECT_EQ("*Trial1/Group1/", check_string);
 }
 
-TEST(FieldTrialListTest, DumpAndFetchFromSharedMemory) {
+TEST_F(FieldTrialListTest, DumpAndFetchFromSharedMemory) {
   std::string trial_name("Trial1");
   std::string group_name("Group1");
 
   // Create a field trial with some params.
-  FieldTrialList field_trial_list(nullptr);
+  test::ScopedFeatureList scoped_feature_list;
+  scoped_feature_list.InitWithEmptyFeatureAndFieldTrialLists();
+
   FieldTrialList::CreateFieldTrial(trial_name, group_name);
   std::map<std::string, std::string> params;
   params["key1"] = "value1";
@@ -1395,11 +1138,13 @@
   FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
       trial_name, group_name, params);
 
-  std::unique_ptr<SharedMemory> shm(new SharedMemory());
   // 4 KiB is enough to hold the trials only created for this test.
-  shm.get()->CreateAndMapAnonymous(4 << 10);
+  base::MappedReadOnlyRegion shm =
+      base::ReadOnlySharedMemoryRegion::Create(4 << 10);
+  ASSERT_TRUE(shm.IsValid());
   // We _could_ use PersistentMemoryAllocator, this just has less params.
-  SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
+  WritableSharedPersistentMemoryAllocator allocator(std::move(shm.mapping), 1,
+                                                    "");
 
   // Dump and subsequently retrieve the field trial to |allocator|.
   FieldTrialList::DumpAllFieldTrialsToPersistentAllocator(&allocator);
@@ -1425,48 +1170,131 @@
   EXPECT_EQ("value2", shm_params["key2"]);
 }
 
-#if !defined(OS_NACL)
-TEST(FieldTrialListTest, SerializeSharedMemoryHandleMetadata) {
-  std::unique_ptr<SharedMemory> shm(new SharedMemory());
-  shm->CreateAndMapAnonymous(4 << 10);
-
+#if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS)
+MULTIPROCESS_TEST_MAIN(SerializeSharedMemoryRegionMetadata) {
   std::string serialized =
-      FieldTrialList::SerializeSharedMemoryHandleMetadata(shm->handle());
-#if defined(OS_WIN) || defined(OS_FUCHSIA)
-  SharedMemoryHandle deserialized =
-      FieldTrialList::DeserializeSharedMemoryHandleMetadata(serialized);
-#else
-  // Use a valid-looking arbitrary number for the file descriptor. It's not
-  // being used in this unittest, but needs to pass sanity checks in the
-  // handle's constructor.
-  SharedMemoryHandle deserialized =
-      FieldTrialList::DeserializeSharedMemoryHandleMetadata(42, serialized);
+      CommandLine::ForCurrentProcess()->GetSwitchValueASCII("field_trials");
+  std::string guid_string =
+      CommandLine::ForCurrentProcess()->GetSwitchValueASCII("guid");
+
+  int fd = 42;
+#if BUILDFLAG(IS_ANDROID)
+  fd = base::GlobalDescriptors::GetInstance()->MaybeGet(42);
+  CHECK_NE(fd, -1);
 #endif
-  EXPECT_EQ(deserialized.GetGUID(), shm->handle().GetGUID());
-  EXPECT_FALSE(deserialized.GetGUID().is_empty());
+
+  base::ReadOnlySharedMemoryRegion deserialized =
+      FieldTrialList::DeserializeSharedMemoryRegionMetadata(serialized, fd);
+  CHECK(deserialized.IsValid());
+  CHECK_EQ(deserialized.GetGUID().ToString(), guid_string);
+  CHECK(!deserialized.GetGUID().is_empty());
+
+  return 0;
 }
-#endif  // !defined(OS_NACL)
+
+TEST_F(FieldTrialListTest, SerializeSharedMemoryRegionMetadata) {
+  base::MappedReadOnlyRegion shm =
+      base::ReadOnlySharedMemoryRegion::Create(4 << 10);
+  ASSERT_TRUE(shm.IsValid());
+
+  LaunchOptions options;
+  std::string serialized =
+      FieldTrialList::SerializeSharedMemoryRegionMetadata(shm.region, &options);
+
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_MAC)
+#if BUILDFLAG(IS_ANDROID)
+  int shm_fd = shm.region.GetPlatformHandle();
+#else
+  int shm_fd = shm.region.GetPlatformHandle().fd;
+#endif  // BUILDFLAG(IS_ANDROID)
+  // Pick an arbitrary FD number to use for the shmem FD in the child.
+  options.fds_to_remap.emplace_back(std::make_pair(shm_fd, 42));
+#endif  // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_MAC)
+
+  CommandLine cmd_line = GetMultiProcessTestChildBaseCommandLine();
+  cmd_line.AppendSwitchASCII("field_trials", serialized);
+  cmd_line.AppendSwitchASCII("guid", shm.region.GetGUID().ToString());
+
+  Process process = SpawnMultiProcessTestChild(
+      "SerializeSharedMemoryRegionMetadata", cmd_line, options);
+
+  int exit_code;
+  EXPECT_TRUE(WaitForMultiprocessTestChildExit(
+      process, TestTimeouts::action_timeout(), &exit_code));
+  EXPECT_EQ(0, exit_code);
+}
+#endif  // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_IOS)
 
 // Verify that the field trial shared memory handle is really read-only, and
-// does not allow writable mappings. Test disabled on NaCl, Windows and Fuchsia
-// which don't support/implement GetFieldTrialHandle(). For Fuchsia, see
-// crbug.com/752368
-#if !defined(OS_NACL) && !defined(OS_WIN) && !defined(OS_FUCHSIA)
-TEST(FieldTrialListTest, CheckReadOnlySharedMemoryHandle) {
-  FieldTrialList field_trial_list(nullptr);
-  FieldTrialList::CreateFieldTrial("Trial1", "Group1");
-
+// does not allow writable mappings. Test disabled on NaCl, Fuchsia, and Mac,
+// which don't support/implement shared memory configuration.
+#if !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_MAC)
+TEST_F(FieldTrialListTest, CheckReadOnlySharedMemoryRegion) {
   test::ScopedFeatureList scoped_feature_list;
-  scoped_feature_list.Init();
+  scoped_feature_list.InitWithEmptyFeatureAndFieldTrialLists();
+
+  FieldTrialList::CreateFieldTrial("Trial1", "Group1");
 
   FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
 
-  SharedMemoryHandle handle = FieldTrialList::GetFieldTrialHandle();
-  ASSERT_TRUE(handle.IsValid());
+  base::ReadOnlySharedMemoryRegion region =
+      FieldTrialList::DuplicateFieldTrialSharedMemoryForTesting();
+  ASSERT_TRUE(region.IsValid());
 
-  ASSERT_TRUE(CheckReadOnlySharedMemoryHandleForTesting(handle));
+  ASSERT_TRUE(CheckReadOnlyPlatformSharedMemoryRegionForTesting(
+      base::ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+          std::move(region))));
 }
-#endif  // !OS_NACL && !OS_WIN && !OS_FUCHSIA
+#endif  // !BUILDFLAG(IS_NACL) && !BUILDFLAG(IS_MAC)
+
+TEST_F(FieldTrialListTest, TestGetRandomizedFieldTrialCount) {
+  EXPECT_EQ(0u, FieldTrialList::GetFieldTrialCount());
+  EXPECT_EQ(0u, FieldTrialList::GetRandomizedFieldTrialCount());
+
+  const char name1[] = "name 1 test";
+  const char name2[] = "name 2 test";
+  const char name3[] = "name 3 test";
+  const char group1[] = "group 1";
+
+  // Create a field trial with a single group.
+  scoped_refptr<FieldTrial> trial1 =
+      FieldTrialList::CreateFieldTrial(name1, group1);
+  EXPECT_NE(FieldTrial::kNotFinalized, trial1->group_);
+  EXPECT_EQ(group1, trial1->group_name_internal());
+
+  EXPECT_EQ(1u, FieldTrialList::GetFieldTrialCount());
+  EXPECT_EQ(0u, FieldTrialList::GetRandomizedFieldTrialCount());
+
+  // Create a randomized field trial.
+  scoped_refptr<FieldTrial> trial2 =
+      CreateFieldTrial(name2, 10, "default name 2 test");
+  EXPECT_EQ(FieldTrial::kNotFinalized, trial2->group_);
+  EXPECT_EQ(name2, trial2->trial_name());
+  EXPECT_EQ("", trial2->group_name_internal());
+
+  EXPECT_EQ(2u, FieldTrialList::GetFieldTrialCount());
+  EXPECT_EQ(1u, FieldTrialList::GetRandomizedFieldTrialCount());
+
+  // Append a first group to trial 2. This doesn't affect GetFieldTrialCount()
+  // and GetRandomizedFieldTrialCount().
+  trial2->AppendGroup("a first group", 7);
+
+  EXPECT_EQ(2u, FieldTrialList::GetFieldTrialCount());
+  EXPECT_EQ(1u, FieldTrialList::GetRandomizedFieldTrialCount());
+
+  // Create another randomized field trial.
+  scoped_refptr<FieldTrial> trial3 =
+      CreateFieldTrial(name3, 10, "default name 3 test");
+  EXPECT_EQ(FieldTrial::kNotFinalized, trial3->group_);
+  EXPECT_EQ(name3, trial3->trial_name());
+  EXPECT_EQ("", trial3->group_name_internal());
+
+  EXPECT_EQ(3u, FieldTrialList::GetFieldTrialCount());
+  EXPECT_EQ(2u, FieldTrialList::GetRandomizedFieldTrialCount());
+
+  // Note: FieldTrialList should delete the objects at shutdown.
+}
+#endif  // !defined(COBALT_PENDING_CLEAN_UP)
 
 TEST_F(FieldTrialTest, TestAllParamsToString) {
   std::string exptected_output = "t1.g1:p1/v1/p2/v2";
@@ -1477,27 +1305,73 @@
   params["p2"] = "v2";
   FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
       "t1", "g1", params);
-  EXPECT_EQ(
-      "", FieldTrialList::AllParamsToString(false, &MockEscapeQueryParamValue));
+  EXPECT_EQ("", FieldTrialList::AllParamsToString(&MockEscapeQueryParamValue));
 
-  scoped_refptr<FieldTrial> trial1 =
-      CreateFieldTrial("t1", 100, "Default", nullptr);
+  scoped_refptr<FieldTrial> trial1 = CreateFieldTrial("t1", 100, "Default");
   trial1->AppendGroup("g1", 100);
-  trial1->group();
-  EXPECT_EQ(exptected_output, FieldTrialList::AllParamsToString(
-                                  false, &MockEscapeQueryParamValue));
+  trial1->Activate();
+  EXPECT_EQ(exptected_output,
+            FieldTrialList::AllParamsToString(&MockEscapeQueryParamValue));
 
   // Create study with two groups and params that don't belog to the assigned
   // group. This should be in the output.
   FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
       "t2", "g2", params);
-  scoped_refptr<FieldTrial> trial2 =
-      CreateFieldTrial("t2", 100, "Default", nullptr);
+  scoped_refptr<FieldTrial> trial2 = CreateFieldTrial("t2", 100, "Default");
   trial2->AppendGroup("g1", 100);
   trial2->AppendGroup("g2", 0);
-  trial2->group();
-  EXPECT_EQ(exptected_output, FieldTrialList::AllParamsToString(
-                                  false, &MockEscapeQueryParamValue));
+  trial2->Activate();
+  EXPECT_EQ(exptected_output,
+            FieldTrialList::AllParamsToString(&MockEscapeQueryParamValue));
+}
+
+TEST_F(FieldTrialTest, GetActiveFieldTrialGroups_LowAnonymity) {
+  // Create a field trial with a single winning group.
+  scoped_refptr<FieldTrial> trial_1 = CreateFieldTrial("Normal", 10, "Default");
+  trial_1->AppendGroup("Winner 1", 10);
+  trial_1->Activate();
+
+  // Create a second field trial with a single winning group, marked as
+  // low-anonymity.
+  scoped_refptr<FieldTrial> trial_2 = CreateFieldTrial(
+      "Low anonymity", 10, "Default", /*is_low_anonymity=*/true);
+  trial_2->AppendGroup("Winner 2", 10);
+  trial_2->Activate();
+
+  // Check that |FieldTrialList::GetActiveFieldTrialGroups()| does not include
+  // the low-anonymity trial.
+  FieldTrial::ActiveGroups active_groups_for_metrics;
+  FieldTrialList::GetActiveFieldTrialGroups(&active_groups_for_metrics);
+  EXPECT_THAT(
+      active_groups_for_metrics,
+      testing::UnorderedPointwise(CompareActiveGroupToFieldTrial(), {trial_1}));
+
+  // Check that
+  // |FieldTrialListIncludingLowAnonymity::GetActiveFieldTrialGroups()| includes
+  // both trials.
+  FieldTrial::ActiveGroups active_groups;
+  FieldTrialListIncludingLowAnonymity::GetActiveFieldTrialGroupsForTesting(
+      &active_groups);
+  EXPECT_THAT(active_groups,
+              testing::UnorderedPointwise(CompareActiveGroupToFieldTrial(),
+                                          {trial_1, trial_2}));
+}
+
+TEST_F(FieldTrialTest, ObserveIncludingLowAnonymity) {
+  TestFieldTrialObserver observer;
+  TestFieldTrialObserverIncludingLowAnonymity low_anonymity_observer;
+
+  // Create a low-anonymity trial with one active group.
+  const char kTrialName[] = "TrialToObserve1";
+  scoped_refptr<FieldTrial> trial = CreateFieldTrial(
+      kTrialName, 100, kDefaultGroupName, /*is_low_anonymity=*/true);
+  trial->Activate();
+
+  // Only the low_anonymity_observer should be notified.
+  EXPECT_EQ("", observer.trial_name());
+  EXPECT_EQ("", observer.group_name());
+  EXPECT_EQ(kTrialName, low_anonymity_observer.trial_name());
+  EXPECT_EQ(kDefaultGroupName, low_anonymity_observer.group_name());
 }
 
 }  // namespace base
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
index ef56619..88d1dc4 100644
--- a/base/metrics/histogram.cc
+++ b/base/metrics/histogram.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -13,14 +13,17 @@
 #include <limits.h>
 #include <math.h>
 
-#include <algorithm>
+#include <memory>
 #include <string>
 #include <utility>
 
 #include "base/compiler_specific.h"
+#include "base/cxx17_backports.h"
 #include "base/debug/alias.h"
 #include "base/logging.h"
 #include "base/memory/ptr_util.h"
+#include "base/memory/raw_ptr.h"
+#include "base/memory/raw_ref.h"
 #include "base/metrics/dummy_histogram.h"
 #include "base/metrics/histogram_functions.h"
 #include "base/metrics/metrics_hashes.h"
@@ -28,13 +31,14 @@
 #include "base/metrics/persistent_memory_allocator.h"
 #include "base/metrics/sample_vector.h"
 #include "base/metrics/statistics_recorder.h"
+#include "base/notreached.h"
 #include "base/pickle.h"
+#include "base/ranges/algorithm.h"
 #include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
 #include "base/synchronization/lock.h"
 #include "base/values.h"
 #include "build/build_config.h"
-#include "starboard/types.h"
 
 namespace base {
 
@@ -45,17 +49,17 @@
                             int* flags,
                             int* declared_min,
                             int* declared_max,
-                            uint32_t* bucket_count,
+                            size_t* bucket_count,
                             uint32_t* range_checksum) {
-  if (!iter->ReadString(histogram_name) ||
-      !iter->ReadInt(flags) ||
-      !iter->ReadInt(declared_min) ||
-      !iter->ReadInt(declared_max) ||
-      !iter->ReadUInt32(bucket_count) ||
+  uint32_t bucket_count_u32;
+  if (!iter->ReadString(histogram_name) || !iter->ReadInt(flags) ||
+      !iter->ReadInt(declared_min) || !iter->ReadInt(declared_max) ||
+      !iter->ReadUInt32(&bucket_count_u32) ||
       !iter->ReadUInt32(range_checksum)) {
     DLOG(ERROR) << "Pickle error decoding Histogram: " << *histogram_name;
     return false;
   }
+  *bucket_count = bucket_count_u32;
 
   // Since these fields may have come from an untrusted renderer, do additional
   // checks above and beyond those in Histogram::Initialize()
@@ -93,17 +97,17 @@
 typedef HistogramBase::Count Count;
 typedef HistogramBase::Sample Sample;
 
-// static
-const uint32_t Histogram::kBucketCount_MAX = 16384u;
-
 class Histogram::Factory {
  public:
   Factory(const std::string& name,
           HistogramBase::Sample minimum,
           HistogramBase::Sample maximum,
-          uint32_t bucket_count,
+          size_t bucket_count,
           int32_t flags)
-    : Factory(name, HISTOGRAM, minimum, maximum, bucket_count, flags) {}
+      : Factory(name, HISTOGRAM, minimum, maximum, bucket_count, flags) {}
+
+  Factory(const Factory&) = delete;
+  Factory& operator=(const Factory&) = delete;
 
   // Create histogram based on construction parameters. Caller takes
   // ownership of the returned object.
@@ -114,14 +118,14 @@
           HistogramType histogram_type,
           HistogramBase::Sample minimum,
           HistogramBase::Sample maximum,
-          uint32_t bucket_count,
+          size_t bucket_count,
           int32_t flags)
-    : name_(name),
-      histogram_type_(histogram_type),
-      minimum_(minimum),
-      maximum_(maximum),
-      bucket_count_(bucket_count),
-      flags_(flags) {}
+      : name_(name),
+        histogram_type_(histogram_type),
+        minimum_(minimum),
+        maximum_(maximum),
+        bucket_count_(bucket_count),
+        flags_(flags) {}
 
   // Create a BucketRanges structure appropriate for this histogram.
   virtual BucketRanges* CreateRanges() {
@@ -133,8 +137,7 @@
   // Allocate the correct Histogram object off the heap (in case persistent
   // memory is not available).
   virtual std::unique_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) {
-    return WrapUnique(
-        new Histogram(GetPermanentName(name_), minimum_, maximum_, ranges));
+    return WrapUnique(new Histogram(GetPermanentName(*name_), ranges));
   }
 
   // Perform any required datafill on the just-created histogram.  If
@@ -145,24 +148,20 @@
   // These values are protected (instead of private) because they need to
   // be accessible to methods of sub-classes in order to avoid passing
   // unnecessary parameters everywhere.
-  const std::string& name_;
+  const raw_ref<const std::string> name_;
   const HistogramType histogram_type_;
   HistogramBase::Sample minimum_;
   HistogramBase::Sample maximum_;
-  uint32_t bucket_count_;
+  size_t bucket_count_;
   int32_t flags_;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(Factory);
 };
 
 HistogramBase* Histogram::Factory::Build() {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name_);
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(*name_);
   if (!histogram) {
-    // TODO(gayane): |HashMetricName()| is called again in Histogram
     // constructor. Refactor code to avoid the additional call.
-    bool should_record =
-        StatisticsRecorder::ShouldRecordHistogram(HashMetricName(name_));
+    bool should_record = StatisticsRecorder::ShouldRecordHistogram(
+        HashMetricNameAs32Bits(*name_));
     if (!should_record)
       return DummyHistogram::GetInstance();
     // To avoid racy destruction at shutdown, the following will be leaked.
@@ -177,7 +176,7 @@
     // at run-time. In the latter case, those ctor parameters are zero and
     // the results extracted from the result of CreateRanges().
     if (bucket_count_ == 0) {
-      bucket_count_ = static_cast<uint32_t>(registered_ranges->bucket_count());
+      bucket_count_ = registered_ranges->bucket_count();
       minimum_ = registered_ranges->range(1);
       maximum_ = registered_ranges->range(bucket_count_ - 1);
     }
@@ -194,13 +193,8 @@
     PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
     if (allocator) {
       tentative_histogram = allocator->AllocateHistogram(
-          histogram_type_,
-          name_,
-          minimum_,
-          maximum_,
-          registered_ranges,
-          flags_,
-          &histogram_ref);
+          histogram_type_, *name_, minimum_, maximum_, registered_ranges,
+          flags_, &histogram_ref);
     }
 
     // Handle the case where no persistent allocator is present or the
@@ -239,8 +233,8 @@
     // return would cause Chrome to crash; better to just record it for later
     // analysis.
     UmaHistogramSparse("Histogram.MismatchedConstructionArguments",
-                       static_cast<Sample>(HashMetricName(name_)));
-    DLOG(ERROR) << "Histogram " << name_
+                       static_cast<Sample>(HashMetricName(*name_)));
+    DLOG(ERROR) << "Histogram " << *name_
                 << " has mismatched construction arguments";
     return DummyHistogram::GetInstance();
   }
@@ -250,11 +244,15 @@
 HistogramBase* Histogram::FactoryGet(const std::string& name,
                                      Sample minimum,
                                      Sample maximum,
-                                     uint32_t bucket_count,
+                                     size_t bucket_count,
                                      int32_t flags) {
   bool valid_arguments =
       InspectConstructionArguments(name, &minimum, &maximum, &bucket_count);
-  DCHECK(valid_arguments);
+  DCHECK(valid_arguments) << name;
+  if (!valid_arguments) {
+    DLOG(ERROR) << "Histogram " << name << " dropped for invalid parameters.";
+    return DummyHistogram::GetInstance();
+  }
 
   return Factory(name, minimum, maximum, bucket_count, flags).Build();
 }
@@ -262,8 +260,10 @@
 HistogramBase* Histogram::FactoryTimeGet(const std::string& name,
                                          TimeDelta minimum,
                                          TimeDelta maximum,
-                                         uint32_t bucket_count,
+                                         size_t bucket_count,
                                          int32_t flags) {
+  DCHECK_LT(minimum.InMilliseconds(), std::numeric_limits<Sample>::max());
+  DCHECK_LT(maximum.InMilliseconds(), std::numeric_limits<Sample>::max());
   return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
                     static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
                     flags);
@@ -272,8 +272,10 @@
 HistogramBase* Histogram::FactoryMicrosecondsTimeGet(const std::string& name,
                                                      TimeDelta minimum,
                                                      TimeDelta maximum,
-                                                     uint32_t bucket_count,
+                                                     size_t bucket_count,
                                                      int32_t flags) {
+  DCHECK_LT(minimum.InMicroseconds(), std::numeric_limits<Sample>::max());
+  DCHECK_LT(maximum.InMicroseconds(), std::numeric_limits<Sample>::max());
   return FactoryGet(name, static_cast<Sample>(minimum.InMicroseconds()),
                     static_cast<Sample>(maximum.InMicroseconds()), bucket_count,
                     flags);
@@ -282,7 +284,7 @@
 HistogramBase* Histogram::FactoryGet(const char* name,
                                      Sample minimum,
                                      Sample maximum,
-                                     uint32_t bucket_count,
+                                     size_t bucket_count,
                                      int32_t flags) {
   return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
 }
@@ -290,7 +292,7 @@
 HistogramBase* Histogram::FactoryTimeGet(const char* name,
                                          TimeDelta minimum,
                                          TimeDelta maximum,
-                                         uint32_t bucket_count,
+                                         size_t bucket_count,
                                          int32_t flags) {
   return FactoryTimeGet(std::string(name), minimum, maximum, bucket_count,
                         flags);
@@ -299,7 +301,7 @@
 HistogramBase* Histogram::FactoryMicrosecondsTimeGet(const char* name,
                                                      TimeDelta minimum,
                                                      TimeDelta maximum,
-                                                     uint32_t bucket_count,
+                                                     size_t bucket_count,
                                                      int32_t flags) {
   return FactoryMicrosecondsTimeGet(std::string(name), minimum, maximum,
                                     bucket_count, flags);
@@ -307,15 +309,13 @@
 
 std::unique_ptr<HistogramBase> Histogram::PersistentCreate(
     const char* name,
-    Sample minimum,
-    Sample maximum,
     const BucketRanges* ranges,
     const DelayedPersistentAllocation& counts,
     const DelayedPersistentAllocation& logged_counts,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta) {
-  return WrapUnique(new Histogram(name, minimum, maximum, ranges, counts,
-                                  logged_counts, meta, logged_meta));
+  return WrapUnique(
+      new Histogram(name, ranges, counts, logged_counts, meta, logged_meta));
 }
 
 // Calculate what range of values are held in each bucket.
@@ -342,6 +342,7 @@
   while (bucket_count > ++bucket_index) {
     double log_current;
     log_current = log(static_cast<double>(current));
+    debug::Alias(&log_current);
     // Calculate the count'th root of the range.
     log_ratio = (log_max - log_current) / (bucket_count - bucket_index);
     // See where the next bucket would start.
@@ -362,9 +363,9 @@
 const int Histogram::kCommonRaceBasedCountMismatch = 5;
 
 uint32_t Histogram::FindCorruption(const HistogramSamples& samples) const {
-  int inconsistencies = NO_INCONSISTENCIES;
+  uint32_t inconsistencies = NO_INCONSISTENCIES;
   Sample previous_range = -1;  // Bottom range is always 0.
-  for (uint32_t index = 0; index < bucket_count(); ++index) {
+  for (size_t index = 0; index < bucket_count(); ++index) {
     int new_range = ranges(index);
     if (previous_range >= new_range)
       inconsistencies |= BUCKET_ORDER_ERROR;
@@ -409,40 +410,60 @@
   return ranges->range(ranges->bucket_count() - 1);
 }
 
-Sample Histogram::ranges(uint32_t i) const {
+Sample Histogram::ranges(size_t i) const {
   return bucket_ranges()->range(i);
 }
 
-uint32_t Histogram::bucket_count() const {
-  return static_cast<uint32_t>(bucket_ranges()->bucket_count());
+size_t Histogram::bucket_count() const {
+  return bucket_ranges()->bucket_count();
 }
 
 // static
 bool Histogram::InspectConstructionArguments(StringPiece name,
                                              Sample* minimum,
                                              Sample* maximum,
-                                             uint32_t* bucket_count) {
-  // Defensive code for backward compatibility.
-  if (*minimum < 1) {
-    DVLOG(1) << "Histogram: " << name << " has bad minimum: " << *minimum;
-    *minimum = 1;
-  }
-  if (*maximum >= kSampleType_MAX) {
-    DVLOG(1) << "Histogram: " << name << " has bad maximum: " << *maximum;
-    *maximum = kSampleType_MAX - 1;
-  }
-  if (*bucket_count >= kBucketCount_MAX) {
-    DVLOG(1) << "Histogram: " << name << " has bad bucket_count: "
-             << *bucket_count;
-    *bucket_count = kBucketCount_MAX - 1;
-  }
-
+                                             size_t* bucket_count) {
   bool check_okay = true;
 
+  // Checks below must be done after any min/max swap.
   if (*minimum > *maximum) {
+    DLOG(ERROR) << "Histogram: " << name << " has swapped minimum/maximum";
     check_okay = false;
     std::swap(*minimum, *maximum);
   }
+
+  // Defensive code for backward compatibility.
+  if (*minimum < 1) {
+    // TODO(crbug.com/1288842): Temporarily disabled during cleanup.
+    // DLOG(ERROR) << "Histogram: " << name << " has bad minimum: " << *minimum;
+    *minimum = 1;
+    if (*maximum < 1)
+      *maximum = 1;
+  }
+  if (*maximum >= kSampleType_MAX) {
+    DLOG(ERROR) << "Histogram: " << name << " has bad maximum: " << *maximum;
+    *maximum = kSampleType_MAX - 1;
+  }
+  if (*bucket_count > kBucketCount_MAX) {
+    UmaHistogramSparse("Histogram.TooManyBuckets.1000",
+                       static_cast<Sample>(HashMetricName(name)));
+
+    // Blink.UseCounter legitimately has more than 1000 entries in its enum.
+    if (!StartsWith(name, "Blink.UseCounter")) {
+      DLOG(ERROR) << "Histogram: " << name
+                  << " has bad bucket_count: " << *bucket_count << " (limit "
+                  << kBucketCount_MAX << ")";
+
+      // Assume it's a mistake and limit to 100 buckets, plus under and over.
+      // If the DCHECK doesn't alert the user then hopefully the small number
+      // will be obvious on the dashboard. If not, then it probably wasn't
+      // important.
+      *bucket_count = 102;
+      check_okay = false;
+    }
+  }
+
+  // Ensure parameters are sane.
   if (*maximum == *minimum) {
     check_okay = false;
     *maximum = *minimum + 1;
@@ -451,16 +472,11 @@
     check_okay = false;
     *bucket_count = 3;
   }
-  // Very high bucket counts are wasteful. Use a sparse histogram instead.
-  // Value of 10002 equals a user-supplied value of 10k + 2 overflow buckets.
-  constexpr uint32_t kMaxBucketCount = 10002;
-  if (*bucket_count > kMaxBucketCount) {
+  // The swap at the top of the function guarantees this cast is safe.
+  const size_t max_buckets = static_cast<size_t>(*maximum - *minimum + 2);
+  if (*bucket_count > max_buckets) {
     check_okay = false;
-    *bucket_count = kMaxBucketCount;
-  }
-  if (*bucket_count > static_cast<uint32_t>(*maximum - *minimum + 2)) {
-    check_okay = false;
-    *bucket_count = static_cast<uint32_t>(*maximum - *minimum + 2);
+    *bucket_count = max_buckets;
   }
 
   if (!check_okay) {
@@ -481,7 +497,7 @@
 
 bool Histogram::HasConstructionArguments(Sample expected_minimum,
                                          Sample expected_maximum,
-                                         uint32_t expected_bucket_count) const {
+                                         size_t expected_bucket_count) const {
   return (expected_bucket_count == bucket_count() &&
           expected_minimum == declared_min() &&
           expected_maximum == declared_max());
@@ -505,14 +521,30 @@
   }
   unlogged_samples_->Accumulate(value, count);
 
-  FindAndRunCallback(value);
+  if (UNLIKELY(StatisticsRecorder::have_active_callbacks()))
+    FindAndRunCallbacks(value);
 }
 
 std::unique_ptr<HistogramSamples> Histogram::SnapshotSamples() const {
   return SnapshotAllSamples();
 }
 
+std::unique_ptr<HistogramSamples> Histogram::SnapshotUnloggedSamples() const {
+  return SnapshotUnloggedSamplesImpl();
+}
+
+void Histogram::MarkSamplesAsLogged(const HistogramSamples& samples) {
+  // |final_delta_created_| only exists when DCHECK is on.
+#if DCHECK_IS_ON()
+  DCHECK(!final_delta_created_);
+#endif
+
+  unlogged_samples_->Subtract(samples);
+  logged_samples_->Add(samples);
+}
+
 std::unique_ptr<HistogramSamples> Histogram::SnapshotDelta() {
+  // |final_delta_created_| only exists when DCHECK is on.
 #if DCHECK_IS_ON()
   DCHECK(!final_delta_created_);
 #endif
@@ -529,14 +561,16 @@
   // vector: this way, the next snapshot will include any concurrent updates
   // missed by the current snapshot.
 
-  std::unique_ptr<HistogramSamples> snapshot = SnapshotUnloggedSamples();
-  unlogged_samples_->Subtract(*snapshot);
+  std::unique_ptr<HistogramSamples> snapshot =
+      std::make_unique<SampleVector>(unlogged_samples_->id(), bucket_ranges());
+  snapshot->Extract(*unlogged_samples_);
   logged_samples_->Add(*snapshot);
 
   return snapshot;
 }
 
 std::unique_ptr<HistogramSamples> Histogram::SnapshotFinalDelta() const {
+  // |final_delta_created_| only exists when DCHECK is on.
 #if DCHECK_IS_ON()
   DCHECK(!final_delta_created_);
   final_delta_created_ = true;
@@ -553,24 +587,9 @@
   return unlogged_samples_->AddFromPickle(iter);
 }
 
-// The following methods provide a graphical histogram display.
-void Histogram::WriteHTMLGraph(std::string* output) const {
-  // TBD(jar) Write a nice HTML bar chart, with divs an mouse-overs etc.
-  output->append("<PRE>");
-  WriteAsciiImpl(true, "<br>", output);
-  output->append("</PRE>");
-}
-
-void Histogram::WriteAscii(std::string* output) const {
-  WriteAsciiImpl(true, "\n", output);
-}
-
-void Histogram::ValidateHistogramContents() const {
-  CHECK(unlogged_samples_);
-  CHECK(unlogged_samples_->bucket_ranges());
-  CHECK(logged_samples_);
-  CHECK(logged_samples_->bucket_ranges());
-  CHECK_NE(0U, logged_samples_->id());
+base::Value::Dict Histogram::ToGraphDict() const {
+  std::unique_ptr<SampleVector> snapshot = SnapshotAllSamples();
+  return snapshot->ToGraphDict(histogram_name(), flags());
 }
 
 void Histogram::SerializeInfoImpl(Pickle* pickle) const {
@@ -579,58 +598,37 @@
   pickle->WriteInt(flags());
   pickle->WriteInt(declared_min());
   pickle->WriteInt(declared_max());
-  pickle->WriteUInt32(bucket_count());
+  // Limited to kBucketCount_MAX, which fits in a uint32_t.
+  pickle->WriteUInt32(static_cast<uint32_t>(bucket_count()));
   pickle->WriteUInt32(bucket_ranges()->checksum());
 }
 
-// TODO(bcwhite): Remove minimum/maximum parameters from here and call chain.
-Histogram::Histogram(const char* name,
-                     Sample minimum,
-                     Sample maximum,
-                     const BucketRanges* ranges)
+Histogram::Histogram(const char* name, const BucketRanges* ranges)
     : HistogramBase(name) {
-  DCHECK(ranges) << name << ": " << minimum << "-" << maximum;
-  unlogged_samples_.reset(new SampleVector(HashMetricName(name), ranges));
-  logged_samples_.reset(new SampleVector(unlogged_samples_->id(), ranges));
+  DCHECK(ranges) << name;
+  unlogged_samples_ =
+      std::make_unique<SampleVector>(HashMetricName(name), ranges);
+  logged_samples_ =
+      std::make_unique<SampleVector>(unlogged_samples_->id(), ranges);
 }
 
 Histogram::Histogram(const char* name,
-                     Sample minimum,
-                     Sample maximum,
                      const BucketRanges* ranges,
                      const DelayedPersistentAllocation& counts,
                      const DelayedPersistentAllocation& logged_counts,
                      HistogramSamples::Metadata* meta,
                      HistogramSamples::Metadata* logged_meta)
     : HistogramBase(name) {
-  DCHECK(ranges) << name << ": " << minimum << "-" << maximum;
-  unlogged_samples_.reset(
-      new PersistentSampleVector(HashMetricName(name), ranges, meta, counts));
-  logged_samples_.reset(new PersistentSampleVector(
-      unlogged_samples_->id(), ranges, logged_meta, logged_counts));
+  DCHECK(ranges) << name;
+  unlogged_samples_ = std::make_unique<PersistentSampleVector>(
+      HashMetricName(name), ranges, meta, counts);
+  logged_samples_ = std::make_unique<PersistentSampleVector>(
+      unlogged_samples_->id(), ranges, logged_meta, logged_counts);
 }
 
 Histogram::~Histogram() = default;
 
-bool Histogram::PrintEmptyBucket(uint32_t index) const {
-  return true;
-}
-
-// Use the actual bucket widths (like a linear histogram) until the widths get
-// over some transition value, and then use that transition width.  Exponentials
-// get so big so fast (and we don't expect to see a lot of entries in the large
-// buckets), so we need this to make it possible to see what is going on and
-// not have 0-graphical-height buckets.
-double Histogram::GetBucketSize(Count current, uint32_t i) const {
-  DCHECK_GT(ranges(i + 1), ranges(i));
-  static const double kTransitionWidth = 5;
-  double denominator = ranges(i + 1) - ranges(i);
-  if (denominator > kTransitionWidth)
-    denominator = kTransitionWidth;  // Stop trying to normalize.
-  return current/denominator;
-}
-
-const std::string Histogram::GetAsciiBucketRange(uint32_t i) const {
+const std::string Histogram::GetAsciiBucketRange(size_t i) const {
   return GetSimpleAsciiBucketRange(ranges(i));
 }
 
@@ -643,7 +641,7 @@
   int flags;
   int declared_min;
   int declared_max;
-  uint32_t bucket_count;
+  size_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -665,149 +663,25 @@
 }
 
 std::unique_ptr<SampleVector> Histogram::SnapshotAllSamples() const {
-  std::unique_ptr<SampleVector> samples = SnapshotUnloggedSamples();
+  std::unique_ptr<SampleVector> samples = SnapshotUnloggedSamplesImpl();
   samples->Add(*logged_samples_);
   return samples;
 }
 
-std::unique_ptr<SampleVector> Histogram::SnapshotUnloggedSamples() const {
+std::unique_ptr<SampleVector> Histogram::SnapshotUnloggedSamplesImpl() const {
   std::unique_ptr<SampleVector> samples(
       new SampleVector(unlogged_samples_->id(), bucket_ranges()));
   samples->Add(*unlogged_samples_);
   return samples;
 }
 
-void Histogram::WriteAsciiImpl(bool graph_it,
-                               const std::string& newline,
-                               std::string* output) const {
-  // Get local (stack) copies of all effectively volatile class data so that we
-  // are consistent across our output activities.
-  std::unique_ptr<SampleVector> snapshot = SnapshotAllSamples();
-  Count sample_count = snapshot->TotalCount();
-
-  WriteAsciiHeader(*snapshot, sample_count, output);
-  output->append(newline);
-
-  // Prepare to normalize graphical rendering of bucket contents.
-  double max_size = 0;
-  if (graph_it)
-    max_size = GetPeakBucketSize(*snapshot);
-
-  // Calculate space needed to print bucket range numbers.  Leave room to print
-  // nearly the largest bucket range without sliding over the histogram.
-  uint32_t largest_non_empty_bucket = bucket_count() - 1;
-  while (0 == snapshot->GetCountAtIndex(largest_non_empty_bucket)) {
-    if (0 == largest_non_empty_bucket)
-      break;  // All buckets are empty.
-    --largest_non_empty_bucket;
-  }
-
-  // Calculate largest print width needed for any of our bucket range displays.
-  size_t print_width = 1;
-  for (uint32_t i = 0; i < bucket_count(); ++i) {
-    if (snapshot->GetCountAtIndex(i)) {
-      size_t width = GetAsciiBucketRange(i).size() + 1;
-      if (width > print_width)
-        print_width = width;
-    }
-  }
-
-  int64_t remaining = sample_count;
-  int64_t past = 0;
-  // Output the actual histogram graph.
-  for (uint32_t i = 0; i < bucket_count(); ++i) {
-    Count current = snapshot->GetCountAtIndex(i);
-    if (!current && !PrintEmptyBucket(i))
-      continue;
-    remaining -= current;
-    std::string range = GetAsciiBucketRange(i);
-    output->append(range);
-    for (size_t j = 0; range.size() + j < print_width + 1; ++j)
-      output->push_back(' ');
-    if (0 == current && i < bucket_count() - 1 &&
-        0 == snapshot->GetCountAtIndex(i + 1)) {
-      while (i < bucket_count() - 1 &&
-             0 == snapshot->GetCountAtIndex(i + 1)) {
-        ++i;
-      }
-      output->append("... ");
-      output->append(newline);
-      continue;  // No reason to plot emptiness.
-    }
-    double current_size = GetBucketSize(current, i);
-    if (graph_it)
-      WriteAsciiBucketGraph(current_size, max_size, output);
-    WriteAsciiBucketContext(past, current, remaining, i, output);
-    output->append(newline);
-    past += current;
-  }
-  DCHECK_EQ(sample_count, past);
-}
-
-double Histogram::GetPeakBucketSize(const SampleVectorBase& samples) const {
-  double max = 0;
-  for (uint32_t i = 0; i < bucket_count() ; ++i) {
-    double current_size = GetBucketSize(samples.GetCountAtIndex(i), i);
-    if (current_size > max)
-      max = current_size;
-  }
-  return max;
-}
-
-void Histogram::WriteAsciiHeader(const SampleVectorBase& samples,
-                                 Count sample_count,
-                                 std::string* output) const {
-  StringAppendF(output, "Histogram: %s recorded %d samples", histogram_name(),
-                sample_count);
-  if (sample_count == 0) {
-    DCHECK_EQ(samples.sum(), 0);
-  } else {
-    double mean = static_cast<float>(samples.sum()) / sample_count;
-    StringAppendF(output, ", mean = %.1f", mean);
-  }
-  if (flags())
-    StringAppendF(output, " (flags = 0x%x)", flags());
-}
-
-void Histogram::WriteAsciiBucketContext(const int64_t past,
-                                        const Count current,
-                                        const int64_t remaining,
-                                        const uint32_t i,
-                                        std::string* output) const {
-  double scaled_sum = (past + current + remaining) / 100.0;
-  WriteAsciiBucketValue(current, scaled_sum, output);
-  if (0 < i) {
-    double percentage = past / scaled_sum;
-    StringAppendF(output, " {%3.1f%%}", percentage);
-  }
-}
-
-void Histogram::GetParameters(DictionaryValue* params) const {
-  params->SetString("type", HistogramTypeToString(GetHistogramType()));
-  params->SetInteger("min", declared_min());
-  params->SetInteger("max", declared_max());
-  params->SetInteger("bucket_count", static_cast<int>(bucket_count()));
-}
-
-void Histogram::GetCountAndBucketData(Count* count,
-                                      int64_t* sum,
-                                      ListValue* buckets) const {
-  std::unique_ptr<SampleVector> snapshot = SnapshotAllSamples();
-  *count = snapshot->TotalCount();
-  *sum = snapshot->sum();
-  uint32_t index = 0;
-  for (uint32_t i = 0; i < bucket_count(); ++i) {
-    Sample count_at_index = snapshot->GetCountAtIndex(i);
-    if (count_at_index > 0) {
-      std::unique_ptr<DictionaryValue> bucket_value(new DictionaryValue());
-      bucket_value->SetInteger("low", ranges(i));
-      if (i != bucket_count() - 1)
-        bucket_value->SetInteger("high", ranges(i + 1));
-      bucket_value->SetInteger("count", count_at_index);
-      buckets->Set(index, std::move(bucket_value));
-      ++index;
-    }
-  }
+Value::Dict Histogram::GetParameters() const {
+  Value::Dict params;
+  params.Set("type", HistogramTypeToString(GetHistogramType()));
+  params.Set("min", declared_min());
+  params.Set("max", declared_max());
+  params.Set("bucket_count", static_cast<int>(bucket_count()));
+  return params;
 }
 
 //------------------------------------------------------------------------------
@@ -820,14 +694,21 @@
   Factory(const std::string& name,
           HistogramBase::Sample minimum,
           HistogramBase::Sample maximum,
-          uint32_t bucket_count,
+          size_t bucket_count,
           int32_t flags,
           const DescriptionPair* descriptions)
-    : Histogram::Factory(name, LINEAR_HISTOGRAM, minimum, maximum,
-                         bucket_count, flags) {
+      : Histogram::Factory(name,
+                           LINEAR_HISTOGRAM,
+                           minimum,
+                           maximum,
+                           bucket_count,
+                           flags) {
     descriptions_ = descriptions;
   }
 
+  Factory(const Factory&) = delete;
+  Factory& operator=(const Factory&) = delete;
+
  protected:
   BucketRanges* CreateRanges() override {
     BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
@@ -837,8 +718,7 @@
 
   std::unique_ptr<HistogramBase> HeapAlloc(
       const BucketRanges* ranges) override {
-    return WrapUnique(new LinearHistogram(GetPermanentName(name_), minimum_,
-                                          maximum_, ranges));
+    return WrapUnique(new LinearHistogram(GetPermanentName(*name_), ranges));
   }
 
   void FillHistogram(HistogramBase* base_histogram) override {
@@ -859,9 +739,7 @@
   }
 
  private:
-  const DescriptionPair* descriptions_;
-
-  DISALLOW_COPY_AND_ASSIGN(Factory);
+  raw_ptr<const DescriptionPair> descriptions_;
 };
 
 LinearHistogram::~LinearHistogram() = default;
@@ -869,7 +747,7 @@
 HistogramBase* LinearHistogram::FactoryGet(const std::string& name,
                                            Sample minimum,
                                            Sample maximum,
-                                           uint32_t bucket_count,
+                                           size_t bucket_count,
                                            int32_t flags) {
   return FactoryGetWithRangeDescription(name, minimum, maximum, bucket_count,
                                         flags, NULL);
@@ -878,8 +756,10 @@
 HistogramBase* LinearHistogram::FactoryTimeGet(const std::string& name,
                                                TimeDelta minimum,
                                                TimeDelta maximum,
-                                               uint32_t bucket_count,
+                                               size_t bucket_count,
                                                int32_t flags) {
+  DCHECK_LT(minimum.InMilliseconds(), std::numeric_limits<Sample>::max());
+  DCHECK_LT(maximum.InMilliseconds(), std::numeric_limits<Sample>::max());
   return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
                     static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
                     flags);
@@ -888,7 +768,7 @@
 HistogramBase* LinearHistogram::FactoryGet(const char* name,
                                            Sample minimum,
                                            Sample maximum,
-                                           uint32_t bucket_count,
+                                           size_t bucket_count,
                                            int32_t flags) {
   return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
 }
@@ -896,7 +776,7 @@
 HistogramBase* LinearHistogram::FactoryTimeGet(const char* name,
                                                TimeDelta minimum,
                                                TimeDelta maximum,
-                                               uint32_t bucket_count,
+                                               size_t bucket_count,
                                                int32_t flags) {
   return FactoryTimeGet(std::string(name),  minimum, maximum, bucket_count,
                         flags);
@@ -904,27 +784,41 @@
 
 std::unique_ptr<HistogramBase> LinearHistogram::PersistentCreate(
     const char* name,
-    Sample minimum,
-    Sample maximum,
     const BucketRanges* ranges,
     const DelayedPersistentAllocation& counts,
     const DelayedPersistentAllocation& logged_counts,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta) {
-  return WrapUnique(new LinearHistogram(name, minimum, maximum, ranges, counts,
-                                        logged_counts, meta, logged_meta));
+  return WrapUnique(new LinearHistogram(name, ranges, counts, logged_counts,
+                                        meta, logged_meta));
 }
 
 HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
     const std::string& name,
     Sample minimum,
     Sample maximum,
-    uint32_t bucket_count,
+    size_t bucket_count,
     int32_t flags,
     const DescriptionPair descriptions[]) {
+  // Originally, histograms were required to have at least one sample value
+  // plus underflow and overflow buckets. For single-entry enumerations,
+  // that one value is usually zero (which IS the underflow bucket)
+  // resulting in a |maximum| value of 1 (the exclusive upper-bound) and only
+  // the two outlier buckets. Handle this by making max==2 and buckets==3.
+  // This usually won't have any cost since the single-value-optimization
+  // will be used until the count exceeds 16 bits.
+  if (maximum == 1 && bucket_count == 2) {
+    maximum = 2;
+    bucket_count = 3;
+  }
+
   bool valid_arguments = Histogram::InspectConstructionArguments(
       name, &minimum, &maximum, &bucket_count);
-  DCHECK(valid_arguments);
+  DCHECK(valid_arguments) << name;
+  if (!valid_arguments) {
+    DLOG(ERROR) << "Histogram " << name << " dropped for invalid parameters.";
+    return DummyHistogram::GetInstance();
+  }
 
   return Factory(name, minimum, maximum, bucket_count, flags, descriptions)
       .Build();
@@ -934,39 +828,19 @@
   return LINEAR_HISTOGRAM;
 }
 
-LinearHistogram::LinearHistogram(const char* name,
-                                 Sample minimum,
-                                 Sample maximum,
-                                 const BucketRanges* ranges)
-    : Histogram(name, minimum, maximum, ranges) {}
+LinearHistogram::LinearHistogram(const char* name, const BucketRanges* ranges)
+    : Histogram(name, ranges) {}
 
 LinearHistogram::LinearHistogram(
     const char* name,
-    Sample minimum,
-    Sample maximum,
     const BucketRanges* ranges,
     const DelayedPersistentAllocation& counts,
     const DelayedPersistentAllocation& logged_counts,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta)
-    : Histogram(name,
-                minimum,
-                maximum,
-                ranges,
-                counts,
-                logged_counts,
-                meta,
-                logged_meta) {}
+    : Histogram(name, ranges, counts, logged_counts, meta, logged_meta) {}
 
-double LinearHistogram::GetBucketSize(Count current, uint32_t i) const {
-  DCHECK_GT(ranges(i + 1), ranges(i));
-  // Adjacent buckets with different widths would have "surprisingly" many (few)
-  // samples in a histogram if we didn't normalize this way.
-  double denominator = ranges(i + 1) - ranges(i);
-  return current/denominator;
-}
-
-const std::string LinearHistogram::GetAsciiBucketRange(uint32_t i) const {
+const std::string LinearHistogram::GetAsciiBucketRange(size_t i) const {
   int range = ranges(i);
   BucketDescriptionMap::const_iterator it = bucket_description_.find(range);
   if (it == bucket_description_.end())
@@ -974,10 +848,6 @@
   return it->second;
 }
 
-bool LinearHistogram::PrintEmptyBucket(uint32_t index) const {
-  return bucket_description_.find(ranges(index)) == bucket_description_.end();
-}
-
 // static
 void LinearHistogram::InitializeBucketRanges(Sample minimum,
                                              Sample maximum,
@@ -989,7 +859,7 @@
   for (size_t i = 1; i < bucket_count; ++i) {
     double linear_range =
         (min * (bucket_count - 1 - i) + max * (i - 1)) / (bucket_count - 2);
-    uint32_t range = static_cast<Sample>(linear_range + 0.5);
+    auto range = static_cast<Sample>(linear_range + 0.5);
     ranges->set_range(i, range);
   }
   ranges->set_range(ranges->bucket_count(), HistogramBase::kSampleType_MAX);
@@ -1002,7 +872,7 @@
   int flags;
   int declared_min;
   int declared_max;
-  uint32_t bucket_count;
+  size_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -1030,15 +900,27 @@
 ScaledLinearHistogram::ScaledLinearHistogram(const char* name,
                                              Sample minimum,
                                              Sample maximum,
-                                             uint32_t bucket_count,
+                                             size_t bucket_count,
                                              int32_t scale,
                                              int32_t flags)
-    : histogram_(static_cast<LinearHistogram*>(
-          LinearHistogram::FactoryGet(name,
-                                      minimum,
-                                      maximum,
-                                      bucket_count,
-                                      flags))),
+    : ScaledLinearHistogram(std::string(name),
+                            minimum,
+                            maximum,
+                            bucket_count,
+                            scale,
+                            flags) {}
+
+ScaledLinearHistogram::ScaledLinearHistogram(const std::string& name,
+                                             Sample minimum,
+                                             Sample maximum,
+                                             size_t bucket_count,
+                                             int32_t scale,
+                                             int32_t flags)
+    : histogram_(LinearHistogram::FactoryGet(name,
+                                             minimum,
+                                             maximum,
+                                             bucket_count,
+                                             flags)),
       scale_(scale) {
   DCHECK(histogram_);
   DCHECK_LT(1, scale);
@@ -1046,46 +928,57 @@
   CHECK_EQ(static_cast<Sample>(bucket_count), maximum - minimum + 2)
       << " ScaledLinearHistogram requires buckets of size 1";
 
-  remainders_.resize(histogram_->bucket_count(), 0);
+  // Normally, |histogram_| should have type LINEAR_HISTOGRAM or be
+  // inherited from it. However, if it's expired, it will be DUMMY_HISTOGRAM.
+  if (histogram_->GetHistogramType() == DUMMY_HISTOGRAM)
+    return;
+
+  DCHECK_EQ(histogram_->GetHistogramType(), LINEAR_HISTOGRAM);
+  LinearHistogram* histogram = static_cast<LinearHistogram*>(histogram_);
+  remainders_.resize(histogram->bucket_count(), 0);
 }
 
 ScaledLinearHistogram::~ScaledLinearHistogram() = default;
 
-void ScaledLinearHistogram::AddScaledCount(Sample value, int count) {
+void ScaledLinearHistogram::AddScaledCount(Sample value, int64_t count) {
+  if (histogram_->GetHistogramType() == DUMMY_HISTOGRAM)
+    return;
   if (count == 0)
     return;
   if (count < 0) {
     NOTREACHED();
     return;
   }
-  const int32_t max_value =
-      static_cast<int32_t>(histogram_->bucket_count() - 1);
-  if (value > max_value)
-    value = max_value;
-  if (value < 0)
-    value = 0;
 
-  int scaled_count = count / scale_;
-  subtle::Atomic32 remainder = count - scaled_count * scale_;
+  DCHECK_EQ(histogram_->GetHistogramType(), LINEAR_HISTOGRAM);
+  LinearHistogram* histogram = static_cast<LinearHistogram*>(histogram_);
+  const auto max_value = static_cast<Sample>(histogram->bucket_count() - 1);
+  value = base::clamp(value, 0, max_value);
+
+  int64_t scaled_count = count / scale_;
+  subtle::Atomic32 remainder = static_cast<int>(count - scaled_count * scale_);
 
   // ScaledLinearHistogram currently requires 1-to-1 mappings between value
   // and bucket which alleviates the need to do a bucket lookup here (something
   // that is internal to the HistogramSamples object).
   if (remainder > 0) {
-    remainder =
-        subtle::NoBarrier_AtomicIncrement(&remainders_[value], remainder);
+    remainder = subtle::NoBarrier_AtomicIncrement(
+        &remainders_[static_cast<size_t>(value)], remainder);
     // If remainder passes 1/2 scale, increment main count (thus rounding up).
     // The remainder is decremented by the full scale, though, which will
     // cause it to go negative and thus requrire another increase by the full
     // scale amount before another bump of the scaled count.
     if (remainder >= scale_ / 2) {
       scaled_count += 1;
-      subtle::NoBarrier_AtomicIncrement(&remainders_[value], -scale_);
+      subtle::NoBarrier_AtomicIncrement(
+          &remainders_[static_cast<size_t>(value)], -scale_);
     }
   }
 
-  if (scaled_count > 0)
-    histogram_->AddCount(value, scaled_count);
+  if (scaled_count > 0) {
+    DCHECK(scaled_count <= std::numeric_limits<int>::max());
+    histogram->AddCount(value, static_cast<int>(scaled_count));
+  }
 }
 
 //------------------------------------------------------------------------------
@@ -1097,6 +990,9 @@
   Factory(const std::string& name, int32_t flags)
     : Histogram::Factory(name, BOOLEAN_HISTOGRAM, 1, 2, 3, flags) {}
 
+  Factory(const Factory&) = delete;
+  Factory& operator=(const Factory&) = delete;
+
  protected:
   BucketRanges* CreateRanges() override {
     BucketRanges* ranges = new BucketRanges(3 + 1);
@@ -1106,11 +1002,8 @@
 
   std::unique_ptr<HistogramBase> HeapAlloc(
       const BucketRanges* ranges) override {
-    return WrapUnique(new BooleanHistogram(GetPermanentName(name_), ranges));
+    return WrapUnique(new BooleanHistogram(GetPermanentName(*name_), ranges));
   }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(Factory);
 };
 
 HistogramBase* BooleanHistogram::FactoryGet(const std::string& name,
@@ -1138,7 +1031,7 @@
 }
 
 BooleanHistogram::BooleanHistogram(const char* name, const BucketRanges* ranges)
-    : LinearHistogram(name, 1, 2, ranges) {}
+    : LinearHistogram(name, ranges) {}
 
 BooleanHistogram::BooleanHistogram(
     const char* name,
@@ -1147,21 +1040,14 @@
     const DelayedPersistentAllocation& logged_counts,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta)
-    : LinearHistogram(name,
-                      1,
-                      2,
-                      ranges,
-                      counts,
-                      logged_counts,
-                      meta,
-                      logged_meta) {}
+    : LinearHistogram(name, ranges, counts, logged_counts, meta, logged_meta) {}
 
 HistogramBase* BooleanHistogram::DeserializeInfoImpl(PickleIterator* iter) {
   std::string histogram_name;
   int flags;
   int declared_min;
   int declared_max;
-  uint32_t bucket_count;
+  size_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -1194,17 +1080,20 @@
     custom_ranges_ = custom_ranges;
   }
 
+  Factory(const Factory&) = delete;
+  Factory& operator=(const Factory&) = delete;
+
  protected:
   BucketRanges* CreateRanges() override {
     // Remove the duplicates in the custom ranges array.
     std::vector<int> ranges = *custom_ranges_;
     ranges.push_back(0);  // Ensure we have a zero value.
     ranges.push_back(HistogramBase::kSampleType_MAX);
-    std::sort(ranges.begin(), ranges.end());
-    ranges.erase(std::unique(ranges.begin(), ranges.end()), ranges.end());
+    ranges::sort(ranges);
+    ranges.erase(ranges::unique(ranges), ranges.end());
 
     BucketRanges* bucket_ranges = new BucketRanges(ranges.size());
-    for (uint32_t i = 0; i < ranges.size(); i++) {
+    for (size_t i = 0; i < ranges.size(); i++) {
       bucket_ranges->set_range(i, ranges[i]);
     }
     bucket_ranges->ResetChecksum();
@@ -1213,13 +1102,11 @@
 
   std::unique_ptr<HistogramBase> HeapAlloc(
       const BucketRanges* ranges) override {
-    return WrapUnique(new CustomHistogram(GetPermanentName(name_), ranges));
+    return WrapUnique(new CustomHistogram(GetPermanentName(*name_), ranges));
   }
 
  private:
-  const std::vector<Sample>* custom_ranges_;
-
-  DISALLOW_COPY_AND_ASSIGN(Factory);
+  raw_ptr<const std::vector<Sample>> custom_ranges_;
 };
 
 HistogramBase* CustomHistogram::FactoryGet(
@@ -1268,10 +1155,7 @@
 }
 
 CustomHistogram::CustomHistogram(const char* name, const BucketRanges* ranges)
-    : Histogram(name,
-                ranges->range(1),
-                ranges->range(ranges->bucket_count() - 1),
-                ranges) {}
+    : Histogram(name, ranges) {}
 
 CustomHistogram::CustomHistogram(
     const char* name,
@@ -1280,37 +1164,24 @@
     const DelayedPersistentAllocation& logged_counts,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta)
-    : Histogram(name,
-                ranges->range(1),
-                ranges->range(ranges->bucket_count() - 1),
-                ranges,
-                counts,
-                logged_counts,
-                meta,
-                logged_meta) {}
+    : Histogram(name, ranges, counts, logged_counts, meta, logged_meta) {}
 
 void CustomHistogram::SerializeInfoImpl(Pickle* pickle) const {
   Histogram::SerializeInfoImpl(pickle);
 
   // Serialize ranges. First and last ranges are alwasy 0 and INT_MAX, so don't
   // write them.
-  for (uint32_t i = 1; i < bucket_ranges()->bucket_count(); ++i)
+  for (size_t i = 1; i < bucket_ranges()->bucket_count(); ++i)
     pickle->WriteInt(bucket_ranges()->range(i));
 }
 
-double CustomHistogram::GetBucketSize(Count current, uint32_t i) const {
-  // If this is a histogram of enum values, normalizing the bucket count
-  // by the bucket range is not helpful, so just return the bucket count.
-  return current;
-}
-
 // static
 HistogramBase* CustomHistogram::DeserializeInfoImpl(PickleIterator* iter) {
   std::string histogram_name;
   int flags;
   int declared_min;
   int declared_max;
-  uint32_t bucket_count;
+  size_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -1321,8 +1192,8 @@
   // First and last ranges are not serialized.
   std::vector<Sample> sample_ranges(bucket_count - 1);
 
-  for (uint32_t i = 0; i < sample_ranges.size(); ++i) {
-    if (!iter->ReadInt(&sample_ranges[i]))
+  for (Sample& sample : sample_ranges) {
+    if (!iter->ReadInt(&sample))
       return nullptr;
   }
 
@@ -1342,8 +1213,7 @@
 bool CustomHistogram::ValidateCustomRanges(
     const std::vector<Sample>& custom_ranges) {
   bool has_valid_range = false;
-  for (uint32_t i = 0; i < custom_ranges.size(); i++) {
-    Sample sample = custom_ranges[i];
+  for (Sample sample : custom_ranges) {
     if (sample < 0 || sample > HistogramBase::kSampleType_MAX - 1)
       return false;
     if (sample != 0)
diff --git a/base/metrics/histogram.h b/base/metrics/histogram.h
index 9837bb6..ce4a65a 100644
--- a/base/metrics/histogram.h
+++ b/base/metrics/histogram.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -64,6 +64,9 @@
 #ifndef BASE_METRICS_HISTOGRAM_H_
 #define BASE_METRICS_HISTOGRAM_H_
 
+#include <stddef.h>
+#include <stdint.h>
+
 #include <map>
 #include <memory>
 #include <string>
@@ -72,15 +75,15 @@
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
 #include "base/containers/span.h"
+#include "base/dcheck_is_on.h"
 #include "base/gtest_prod_util.h"
-#include "base/logging.h"
-#include "base/macros.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/strings/string_piece.h"
 #include "base/time/time.h"
-#include "starboard/types.h"
+#include "base/values.h"
 
 namespace base {
 
@@ -97,11 +100,17 @@
 
 class BASE_EXPORT Histogram : public HistogramBase {
  public:
-  // Initialize maximum number of buckets in histograms as 16,384.
-  static const uint32_t kBucketCount_MAX;
+  // Initialize maximum number of buckets in histograms as 1000, plus over and
+  // under.  This must be a value that fits in a uint32_t (since that's how we
+  // serialize bucket counts) as well as a Sample (since samples can be up to
+  // this value).
+  static constexpr size_t kBucketCount_MAX = 1002;
 
   typedef std::vector<Count> Counts;
 
+  Histogram(const Histogram&) = delete;
+  Histogram& operator=(const Histogram&) = delete;
+
   ~Histogram() override;
 
   //----------------------------------------------------------------------------
@@ -117,17 +126,17 @@
   static HistogramBase* FactoryGet(const std::string& name,
                                    Sample minimum,
                                    Sample maximum,
-                                   uint32_t bucket_count,
+                                   size_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const std::string& name,
                                        base::TimeDelta minimum,
                                        base::TimeDelta maximum,
-                                       uint32_t bucket_count,
+                                       size_t bucket_count,
                                        int32_t flags);
   static HistogramBase* FactoryMicrosecondsTimeGet(const std::string& name,
                                                    base::TimeDelta minimum,
                                                    base::TimeDelta maximum,
-                                                   uint32_t bucket_count,
+                                                   size_t bucket_count,
                                                    int32_t flags);
 
   // Overloads of the above functions that take a const char* |name| param, to
@@ -136,24 +145,22 @@
   static HistogramBase* FactoryGet(const char* name,
                                    Sample minimum,
                                    Sample maximum,
-                                   uint32_t bucket_count,
+                                   size_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const char* name,
                                        base::TimeDelta minimum,
                                        base::TimeDelta maximum,
-                                       uint32_t bucket_count,
+                                       size_t bucket_count,
                                        int32_t flags);
   static HistogramBase* FactoryMicrosecondsTimeGet(const char* name,
                                                    base::TimeDelta minimum,
                                                    base::TimeDelta maximum,
-                                                   uint32_t bucket_count,
+                                                   size_t bucket_count,
                                                    int32_t flags);
 
   // Create a histogram using data in persistent storage.
   static std::unique_ptr<HistogramBase> PersistentCreate(
       const char* name,
-      Sample minimum,
-      Sample maximum,
       const BucketRanges* ranges,
       const DelayedPersistentAllocation& counts,
       const DelayedPersistentAllocation& logged_counts,
@@ -187,40 +194,35 @@
   const BucketRanges* bucket_ranges() const;
   Sample declared_min() const;
   Sample declared_max() const;
-  virtual Sample ranges(uint32_t i) const;
-  virtual uint32_t bucket_count() const;
+  virtual Sample ranges(size_t i) const;
+  virtual size_t bucket_count() const;
 
   // This function validates histogram construction arguments. It returns false
   // if some of the arguments are bad but also corrects them so they should
   // function on non-dcheck builds without crashing.
   // Note. Currently it allow some bad input, e.g. 0 as minimum, but silently
   // converts it to good input: 1.
-  // TODO(bcwhite): Use false returns to create "sink" histograms so that bad
-  // data doesn't create confusion on the servers.
   static bool InspectConstructionArguments(StringPiece name,
                                            Sample* minimum,
                                            Sample* maximum,
-                                           uint32_t* bucket_count);
+                                           size_t* bucket_count);
 
   // HistogramBase implementation:
   uint64_t name_hash() const override;
   HistogramType GetHistogramType() const override;
   bool HasConstructionArguments(Sample expected_minimum,
                                 Sample expected_maximum,
-                                uint32_t expected_bucket_count) const override;
+                                size_t expected_bucket_count) const override;
   void Add(Sample value) override;
   void AddCount(Sample value, int count) override;
   std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotUnloggedSamples() const override;
+  void MarkSamplesAsLogged(const HistogramSamples& samples) final;
   std::unique_ptr<HistogramSamples> SnapshotDelta() override;
   std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
   void AddSamples(const HistogramSamples& samples) override;
   bool AddSamplesFromPickle(base::PickleIterator* iter) override;
-  void WriteHTMLGraph(std::string* output) const override;
-  void WriteAscii(std::string* output) const override;
-
-  // Validates the histogram contents and CHECKs on errors.
-  // TODO(bcwhite): Remove this after https://crbug/836875.
-  void ValidateHistogramContents() const override;
+  base::Value::Dict ToGraphDict() const override;
 
  protected:
   // This class, defined entirely within the .cc file, contains all the
@@ -232,10 +234,7 @@
 
   // |ranges| should contain the underflow and overflow buckets. See top
   // comments for example.
-  Histogram(const char* name,
-            Sample minimum,
-            Sample maximum,
-            const BucketRanges* ranges);
+  Histogram(const char* name, const BucketRanges* ranges);
 
   // Traditionally, histograms allocate their own memory for the bucket
   // vector but "shared" histograms use memory regions allocated from a
@@ -244,8 +243,6 @@
   // of this object. Practically, this memory is never released until the
   // process exits and the OS cleans it up.
   Histogram(const char* name,
-            Sample minimum,
-            Sample maximum,
             const BucketRanges* ranges,
             const DelayedPersistentAllocation& counts,
             const DelayedPersistentAllocation& logged_counts,
@@ -255,20 +252,15 @@
   // HistogramBase implementation:
   void SerializeInfoImpl(base::Pickle* pickle) const override;
 
-  // Method to override to skip the display of the i'th bucket if it's empty.
-  virtual bool PrintEmptyBucket(uint32_t index) const;
-
-  // Get normalized size, relative to the ranges(i).
-  virtual double GetBucketSize(Count current, uint32_t i) const;
-
   // Return a string description of what goes in a given bucket.
   // Most commonly this is the numeric value, but in derived classes it may
   // be a name (or string description) given to the bucket.
-  virtual const std::string GetAsciiBucketRange(uint32_t it) const;
+  virtual const std::string GetAsciiBucketRange(size_t it) const;
 
  private:
   // Allow tests to corrupt our innards for testing purposes.
   friend class HistogramTest;
+  friend class HistogramThreadsafeTest;
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, BoundsTest);
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, BucketPlacementTest);
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
@@ -285,38 +277,14 @@
   // internal use.
   std::unique_ptr<SampleVector> SnapshotAllSamples() const;
 
-  // Create a copy of unlogged samples.
-  std::unique_ptr<SampleVector> SnapshotUnloggedSamples() const;
+  // Returns a copy of unlogged samples as the underlying SampleVector class,
+  // instead of the HistogramSamples base class. Used for tests and to avoid
+  // virtual dispatch from some callsites.
+  std::unique_ptr<SampleVector> SnapshotUnloggedSamplesImpl() const;
 
-  //----------------------------------------------------------------------------
-  // Helpers for emitting Ascii graphic.  Each method appends data to output.
-
-  void WriteAsciiImpl(bool graph_it,
-                      const std::string& newline,
-                      std::string* output) const;
-
-  // Find out how large (graphically) the largest bucket will appear to be.
-  double GetPeakBucketSize(const SampleVectorBase& samples) const;
-
-  // Write a common header message describing this histogram.
-  void WriteAsciiHeader(const SampleVectorBase& samples,
-                        Count sample_count,
-                        std::string* output) const;
-
-  // Write information about previous, current, and next buckets.
-  // Information such as cumulative percentage, etc.
-  void WriteAsciiBucketContext(const int64_t past,
-                               const Count current,
-                               const int64_t remaining,
-                               const uint32_t i,
-                               std::string* output) const;
-
-  // WriteJSON calls these.
-  void GetParameters(DictionaryValue* params) const override;
-
-  void GetCountAndBucketData(Count* count,
-                             int64_t* sum,
-                             ListValue* buckets) const override;
+  // Writes the type, min, max, and bucket count information of the histogram in
+  // |params|.
+  Value::Dict GetParameters() const override;
 
   // Samples that have not yet been logged with SnapshotDelta().
   std::unique_ptr<SampleVectorBase> unlogged_samples_;
@@ -329,8 +297,6 @@
   // used to DCHECK that a final delta is not created multiple times.
   mutable bool final_delta_created_ = false;
 #endif
-
-  DISALLOW_COPY_AND_ASSIGN(Histogram);
 };
 
 //------------------------------------------------------------------------------
@@ -339,6 +305,9 @@
 // buckets.
 class BASE_EXPORT LinearHistogram : public Histogram {
  public:
+  LinearHistogram(const LinearHistogram&) = delete;
+  LinearHistogram& operator=(const LinearHistogram&) = delete;
+
   ~LinearHistogram() override;
 
   /* minimum should start from 1. 0 is as minimum is invalid. 0 is an implicit
@@ -346,12 +315,12 @@
   static HistogramBase* FactoryGet(const std::string& name,
                                    Sample minimum,
                                    Sample maximum,
-                                   uint32_t bucket_count,
+                                   size_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const std::string& name,
                                        TimeDelta minimum,
                                        TimeDelta maximum,
-                                       uint32_t bucket_count,
+                                       size_t bucket_count,
                                        int32_t flags);
 
   // Overloads of the above two functions that take a const char* |name| param,
@@ -360,19 +329,17 @@
   static HistogramBase* FactoryGet(const char* name,
                                    Sample minimum,
                                    Sample maximum,
-                                   uint32_t bucket_count,
+                                   size_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const char* name,
                                        TimeDelta minimum,
                                        TimeDelta maximum,
-                                       uint32_t bucket_count,
+                                       size_t bucket_count,
                                        int32_t flags);
 
   // Create a histogram using data in persistent storage.
   static std::unique_ptr<HistogramBase> PersistentCreate(
       const char* name,
-      Sample minimum,
-      Sample maximum,
       const BucketRanges* ranges,
       const DelayedPersistentAllocation& counts,
       const DelayedPersistentAllocation& logged_counts,
@@ -393,7 +360,7 @@
       const std::string& name,
       Sample minimum,
       Sample maximum,
-      uint32_t bucket_count,
+      size_t bucket_count,
       int32_t flags,
       const DescriptionPair descriptions[]);
 
@@ -407,29 +374,18 @@
  protected:
   class Factory;
 
-  LinearHistogram(const char* name,
-                  Sample minimum,
-                  Sample maximum,
-                  const BucketRanges* ranges);
+  LinearHistogram(const char* name, const BucketRanges* ranges);
 
   LinearHistogram(const char* name,
-                  Sample minimum,
-                  Sample maximum,
                   const BucketRanges* ranges,
                   const DelayedPersistentAllocation& counts,
                   const DelayedPersistentAllocation& logged_counts,
                   HistogramSamples::Metadata* meta,
                   HistogramSamples::Metadata* logged_meta);
 
-  double GetBucketSize(Count current, uint32_t i) const override;
-
   // If we have a description for a bucket, then return that.  Otherwise
   // let parent class provide a (numeric) description.
-  const std::string GetAsciiBucketRange(uint32_t i) const override;
-
-  // Skip printing of name for numeric range if we have a name (and if this is
-  // an empty bucket).
-  bool PrintEmptyBucket(uint32_t index) const override;
+  const std::string GetAsciiBucketRange(size_t i) const override;
 
  private:
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
@@ -441,8 +397,6 @@
   // to provide a description.
   typedef std::map<Sample, std::string> BucketDescriptionMap;
   BucketDescriptionMap bucket_description_;
-
-  DISALLOW_COPY_AND_ASSIGN(LinearHistogram);
 };
 
 //------------------------------------------------------------------------------
@@ -464,24 +418,35 @@
   ScaledLinearHistogram(const char* name,
                         Sample minimum,
                         Sample maximum,
-                        uint32_t bucket_count,
+                        size_t bucket_count,
                         int32_t scale,
                         int32_t flags);
+  ScaledLinearHistogram(const std::string& name,
+                        Sample minimum,
+                        Sample maximum,
+                        size_t bucket_count,
+                        int32_t scale,
+                        int32_t flags);
+
+  ScaledLinearHistogram(const ScaledLinearHistogram&) = delete;
+  ScaledLinearHistogram& operator=(const ScaledLinearHistogram&) = delete;
 
   ~ScaledLinearHistogram();
 
   // Like AddCount() but actually accumulates |count|/|scale| and increments
   // the accumulated remainder by |count|%|scale|. An additional increment
   // is done when the remainder has grown sufficiently large.
-  void AddScaledCount(Sample value, int count);
+  // The value after scaling must fit into 32-bit signed integer.
+  void AddScaledCount(Sample value, int64_t count);
 
   int32_t scale() const { return scale_; }
-  LinearHistogram* histogram() { return histogram_; }
+  HistogramBase* histogram() { return histogram_; }
 
  private:
   // Pointer to the underlying histogram. Ownership of it remains with
-  // the statistics-recorder.
-  LinearHistogram* const histogram_;
+  // the statistics-recorder. This is typed as HistogramBase because it may be a
+  // DummyHistogram if expired.
+  const raw_ptr<HistogramBase> histogram_;
 
   // The scale factor of the sample counts.
   const int32_t scale_;
@@ -490,8 +455,6 @@
   // may be negative as the scaled count is actually bumped once the
   // remainder is 1/2 way to the scale value (thus "rounding").
   std::vector<AtomicCount> remainders_;
-
-  DISALLOW_COPY_AND_ASSIGN(ScaledLinearHistogram);
 };
 
 //------------------------------------------------------------------------------
@@ -506,6 +469,9 @@
   // call sites.
   static HistogramBase* FactoryGet(const char* name, int32_t flags);
 
+  BooleanHistogram(const BooleanHistogram&) = delete;
+  BooleanHistogram& operator=(const BooleanHistogram&) = delete;
+
   // Create a histogram using data in persistent storage.
   static std::unique_ptr<HistogramBase> PersistentCreate(
       const char* name,
@@ -532,8 +498,6 @@
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
       base::PickleIterator* iter);
   static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
-
-  DISALLOW_COPY_AND_ASSIGN(BooleanHistogram);
 };
 
 //------------------------------------------------------------------------------
@@ -556,6 +520,9 @@
                                    const std::vector<Sample>& custom_ranges,
                                    int32_t flags);
 
+  CustomHistogram(const CustomHistogram&) = delete;
+  CustomHistogram& operator=(const CustomHistogram&) = delete;
+
   // Create a histogram using data in persistent storage.
   static std::unique_ptr<HistogramBase> PersistentCreate(
       const char* name,
@@ -591,16 +558,12 @@
   // HistogramBase implementation:
   void SerializeInfoImpl(base::Pickle* pickle) const override;
 
-  double GetBucketSize(Count current, uint32_t i) const override;
-
  private:
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
       base::PickleIterator* iter);
   static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
 
   static bool ValidateCustomRanges(const std::vector<Sample>& custom_ranges);
-
-  DISALLOW_COPY_AND_ASSIGN(CustomHistogram);
 };
 
 }  // namespace base
diff --git a/base/metrics/histogram_base.cc b/base/metrics/histogram_base.cc
index ba4588a..3ec332a 100644
--- a/base/metrics/histogram_base.cc
+++ b/base/metrics/histogram_base.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -10,14 +10,15 @@
 #include <set>
 #include <utility>
 
+#include "base/check_op.h"
 #include "base/json/json_string_value_serializer.h"
-#include "base/lazy_instance.h"
-#include "base/logging.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/sparse_histogram.h"
 #include "base/metrics/statistics_recorder.h"
+#include "base/no_destructor.h"
+#include "base/notreached.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/pickle.h"
 #include "base/process/process_handle.h"
@@ -25,7 +26,6 @@
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/lock.h"
 #include "base/values.h"
-#include "starboard/types.h"
 
 namespace base {
 
@@ -69,6 +69,19 @@
   }
 }
 
+HistogramBase::CountAndBucketData::CountAndBucketData(Count count,
+                                                      int64_t sum,
+                                                      Value::List buckets)
+    : count(count), sum(sum), buckets(std::move(buckets)) {}
+
+HistogramBase::CountAndBucketData::~CountAndBucketData() = default;
+
+HistogramBase::CountAndBucketData::CountAndBucketData(
+    CountAndBucketData&& other) = default;
+
+HistogramBase::CountAndBucketData& HistogramBase::CountAndBucketData::operator=(
+    CountAndBucketData&& other) = default;
+
 const HistogramBase::Sample HistogramBase::kSampleType_MAX = INT_MAX;
 
 HistogramBase::HistogramBase(const char* name)
@@ -77,30 +90,36 @@
 HistogramBase::~HistogramBase() = default;
 
 void HistogramBase::CheckName(const StringPiece& name) const {
-  DCHECK_EQ(StringPiece(histogram_name()), name);
+  DCHECK_EQ(StringPiece(histogram_name()), name)
+      << "Provided histogram name doesn't match instance name. Are you using a "
+         "dynamic string in a macro?";
 }
 
 void HistogramBase::SetFlags(int32_t flags) {
-  HistogramBase::Count old_flags = subtle::NoBarrier_Load(&flags_);
-  subtle::NoBarrier_Store(&flags_, old_flags | flags);
+  flags_.fetch_or(flags, std::memory_order_relaxed);
 }
 
 void HistogramBase::ClearFlags(int32_t flags) {
-  HistogramBase::Count old_flags = subtle::NoBarrier_Load(&flags_);
-  subtle::NoBarrier_Store(&flags_, old_flags & ~flags);
+  flags_.fetch_and(~flags, std::memory_order_relaxed);
+}
+
+bool HistogramBase::HasFlags(int32_t flags) const {
+  // Check this->flags() is a superset of |flags|, i.e. every flag in |flags| is
+  // included.
+  return (this->flags() & flags) == flags;
 }
 
 void HistogramBase::AddScaled(Sample value, int count, int scale) {
-  DCHECK_LT(0, scale);
+  DCHECK_GT(scale, 0);
 
   // Convert raw count and probabilistically round up/down if the remainder
   // is more than a random number [0, scale). This gives a more accurate
   // count when there are a large number of records. RandInt is "inclusive",
   // hence the -1 for the max value.
-  int64_t count_scaled = count / scale;
+  int count_scaled = count / scale;
   if (count - (count_scaled * scale) > base::RandInt(0, scale - 1))
-    count_scaled += 1;
-  if (count_scaled == 0)
+    ++count_scaled;
+  if (count_scaled <= 0)
     return;
 
   AddCount(value, count_scaled);
@@ -144,43 +163,68 @@
 
 void HistogramBase::WriteJSON(std::string* output,
                               JSONVerbosityLevel verbosity_level) const {
-  Count count;
-  int64_t sum;
-  std::unique_ptr<ListValue> buckets(new ListValue());
-  GetCountAndBucketData(&count, &sum, buckets.get());
-  std::unique_ptr<DictionaryValue> parameters(new DictionaryValue());
-  GetParameters(parameters.get());
+  CountAndBucketData count_and_bucket_data = GetCountAndBucketData();
+  Value::Dict parameters = GetParameters();
 
   JSONStringValueSerializer serializer(output);
-  DictionaryValue root;
-  root.SetString("name", histogram_name());
-  root.SetInteger("count", count);
-  root.SetDouble("sum", static_cast<double>(sum));
-  root.SetInteger("flags", flags());
+  Value::Dict root;
+  root.Set("name", histogram_name());
+  root.Set("count", count_and_bucket_data.count);
+  root.Set("sum", static_cast<double>(count_and_bucket_data.sum));
+  root.Set("flags", flags());
   root.Set("params", std::move(parameters));
   if (verbosity_level != JSON_VERBOSITY_LEVEL_OMIT_BUCKETS)
-    root.Set("buckets", std::move(buckets));
-  root.SetInteger("pid", GetUniqueIdForProcess());
+    root.Set("buckets", std::move(count_and_bucket_data.buckets));
+  root.Set("pid", static_cast<int>(GetUniqueIdForProcess().GetUnsafeValue()));
   serializer.Serialize(root);
 }
 
-void HistogramBase::FindAndRunCallback(HistogramBase::Sample sample) const {
-  if ((flags() & kCallbackExists) == 0)
-    return;
+void HistogramBase::FindAndRunCallbacks(HistogramBase::Sample sample) const {
+  StatisticsRecorder::GlobalSampleCallback global_sample_callback =
+      StatisticsRecorder::global_sample_callback();
+  if (global_sample_callback)
+    global_sample_callback(histogram_name(), name_hash(), sample);
 
-  StatisticsRecorder::OnSampleCallback cb =
-      StatisticsRecorder::FindCallback(histogram_name());
-  if (!cb.is_null())
-    cb.Run(sample);
+  // We check the flag first since it is very cheap and we can avoid the
+  // function call and lock overhead of FindAndRunHistogramCallbacks().
+  if (!HasFlags(kCallbackExists)) {
+    return;
+  }
+
+  StatisticsRecorder::FindAndRunHistogramCallbacks(
+      base::PassKey<HistogramBase>(), histogram_name(), name_hash(), sample);
 }
 
-void HistogramBase::WriteAsciiBucketGraph(double current_size,
-                                          double max_size,
+HistogramBase::CountAndBucketData HistogramBase::GetCountAndBucketData() const {
+  std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
+  Count count = snapshot->TotalCount();
+  int64_t sum = snapshot->sum();
+  std::unique_ptr<SampleCountIterator> it = snapshot->Iterator();
+
+  Value::List buckets;
+  while (!it->Done()) {
+    Sample bucket_min;
+    int64_t bucket_max;
+    Count bucket_count;
+    it->Get(&bucket_min, &bucket_max, &bucket_count);
+
+    Value::Dict bucket_value;
+    bucket_value.Set("low", bucket_min);
+    // TODO(crbug.com/1334256): Make base::Value able to hold int64_t and remove
+    // this cast.
+    bucket_value.Set("high", static_cast<int>(bucket_max));
+    bucket_value.Set("count", bucket_count);
+    buckets.Append(std::move(bucket_value));
+    it->Next();
+  }
+
+  return CountAndBucketData(count, sum, std::move(buckets));
+}
+
+void HistogramBase::WriteAsciiBucketGraph(double x_count,
+                                          int line_length,
                                           std::string* output) const {
-  const int k_line_length = 72;  // Maximal horizontal width of graph.
-  int x_count = static_cast<int>(k_line_length * (current_size / max_size)
-                                 + 0.5);
-  int x_remainder = k_line_length - x_count;
+  int x_remainder = line_length - x_count;
 
   while (0 < x_count--)
     output->append("-");
@@ -197,7 +241,14 @@
 void HistogramBase::WriteAsciiBucketValue(Count current,
                                           double scaled_sum,
                                           std::string* output) const {
-  StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
+  StringAppendF(output, " (%d = %3.1f%%)", current, current / scaled_sum);
+}
+
+void HistogramBase::WriteAscii(std::string* output) const {
+  base::Value::Dict graph_dict = ToGraphDict();
+  output->append(*graph_dict.FindString("header"));
+  output->append("\n");
+  output->append(*graph_dict.FindString("body"));
 }
 
 // static
@@ -205,11 +256,11 @@
   // A set of histogram names that provides the "permanent" lifetime required
   // by histogram objects for those strings that are not already code constants
   // or held in persistent memory.
-  static LazyInstance<std::set<std::string>>::Leaky permanent_names;
-  static LazyInstance<Lock>::Leaky permanent_names_lock;
+  static base::NoDestructor<std::set<std::string>> permanent_names;
+  static base::NoDestructor<Lock> permanent_names_lock;
 
-  AutoLock lock(permanent_names_lock.Get());
-  auto result = permanent_names.Get().insert(name);
+  AutoLock lock(*permanent_names_lock);
+  auto result = permanent_names->insert(name);
   return result.first->c_str();
 }
 
diff --git a/base/metrics/histogram_base.h b/base/metrics/histogram_base.h
index da73467..9663846 100644
--- a/base/metrics/histogram_base.h
+++ b/base/metrics/histogram_base.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -6,24 +6,24 @@
 #define BASE_METRICS_HISTOGRAM_BASE_H_
 
 #include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
 
+#include <atomic>
 #include <memory>
 #include <string>
-#include <vector>
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
-#include "base/macros.h"
 #include "base/strings/string_piece.h"
 #include "base/time/time.h"
-#include "starboard/types.h"
+#include "base/values.h"
 
 namespace base {
 
-class DictionaryValue;
+class Value;
 class HistogramBase;
 class HistogramSamples;
-class ListValue;
 class Pickle;
 class PickleIterator;
 
@@ -146,6 +146,10 @@
   // Construct the base histogram. The name is not copied; it's up to the
   // caller to ensure that it lives at least as long as this object.
   explicit HistogramBase(const char* name);
+
+  HistogramBase(const HistogramBase&) = delete;
+  HistogramBase& operator=(const HistogramBase&) = delete;
+
   virtual ~HistogramBase();
 
   const char* histogram_name() const { return histogram_name_; }
@@ -159,19 +163,19 @@
   virtual uint64_t name_hash() const = 0;
 
   // Operations with Flags enum.
-  int32_t flags() const { return subtle::NoBarrier_Load(&flags_); }
+  int32_t flags() const { return flags_.load(std::memory_order_relaxed); }
   void SetFlags(int32_t flags);
   void ClearFlags(int32_t flags);
+  bool HasFlags(int32_t flags) const;
 
   virtual HistogramType GetHistogramType() const = 0;
 
   // Whether the histogram has construction arguments as parameters specified.
   // For histograms that don't have the concept of minimum, maximum or
   // bucket_count, this function always returns false.
-  virtual bool HasConstructionArguments(
-      Sample expected_minimum,
-      Sample expected_maximum,
-      uint32_t expected_bucket_count) const = 0;
+  virtual bool HasConstructionArguments(Sample expected_minimum,
+                                        Sample expected_maximum,
+                                        size_t expected_bucket_count) const = 0;
 
   virtual void Add(Sample value) = 0;
 
@@ -211,15 +215,58 @@
   virtual uint32_t FindCorruption(const HistogramSamples& samples) const;
 
   // Snapshot the current complete set of sample data.
+  // Note that histogram data is stored per-process. The browser process
+  // periodically ingests data from subprocesses. As such, the browser
+  // process can see histogram data from any process but other processes
+  // can only see histogram data recorded in the subprocess.
+  // Moreover, the data returned here may not be up to date:
+  // - this function does not use a lock so data might not be synced
+  //   (e.g., across cpu caches)
+  // - in the browser process, the data from subprocesses may not have
+  //   synced data from subprocesses via MergeHistogramDeltas() recently.
+  //
   // Override with atomic/locked snapshot if needed.
   // NOTE: this data can overflow for long-running sessions. It should be
   // handled with care and this method is recommended to be used only
   // in about:histograms and test code.
   virtual std::unique_ptr<HistogramSamples> SnapshotSamples() const = 0;
 
+  // Returns a copy of the samples that have not yet been logged. To mark the
+  // returned samples as logged, see MarkSamplesAsLogged().
+  //
+  // See additional caveats by SnapshotSamples().
+  //
+  // WARNING: This may be called from a background thread by the metrics
+  // collection system. Do not make a call to this unless it was properly vetted
+  // by someone familiar with the system.
+  // TODO(crbug/1052796): Consider gating this behind a PassKey, so that
+  // eventually, only StatisticsRecorder can use this.
+  virtual std::unique_ptr<HistogramSamples> SnapshotUnloggedSamples() const = 0;
+
+  // Marks the passed |samples| as logged. More formally, the |samples| passed
+  // will not appear in the samples returned by a subsequent call to
+  // SnapshotDelta().
+  //
+  // See additional caveats by SnapshotSamples().
+  //
+  // WARNING: This may be called from a background thread by the metrics
+  // collection system. Do not make a call to this unless it was properly vetted
+  // by someone familiar with the system.
+  // TODO(crbug/1052796): Consider gating this behind a PassKey, so that
+  // eventually, only StatisticsRecorder can use this.
+  virtual void MarkSamplesAsLogged(const HistogramSamples& samples) = 0;
+
   // Calculate the change (delta) in histogram counts since the previous call
-  // to this method. Each successive call will return only those counts
-  // changed since the last call.
+  // to this method. Each successive call will return only those counts changed
+  // since the last call. Calls to MarkSamplesAsLogged() will also affect the
+  // samples returned. Logically, this function is equivalent to a call to
+  // SnapshotUnloggedSamples() followed by a call to MarkSamplesAsLogged().
+  //
+  // See additional caveats by SnapshotSamples().
+  //
+  // WARNING: This may be called from a background thread by the metrics
+  // collection system. Do not make a call to this unless it was properly vetted
+  // by someone familiar with the system.
   virtual std::unique_ptr<HistogramSamples> SnapshotDelta() = 0;
 
   // Calculate the change (delta) in histogram counts since the previous call
@@ -229,11 +276,18 @@
   // data previously returned. Because no internal data is changed, this call
   // can be made on "const" histograms such as those with data held in
   // read-only memory.
+  //
+  // See additional caveats by SnapshotSamples().
   virtual std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const = 0;
 
-  // The following methods provide graphical histogram displays.
-  virtual void WriteHTMLGraph(std::string* output) const = 0;
-  virtual void WriteAscii(std::string* output) const = 0;
+  // The following method provides graphical histogram displays.
+  virtual void WriteAscii(std::string* output) const;
+
+  // Returns histograms data as a Dict (or an empty dict if not available),
+  // with the following format:
+  // {"header": "Name of the histogram with samples, mean, and/or flags",
+  // "body": "ASCII histogram representation"}
+  virtual base::Value::Dict ToGraphDict() const = 0;
 
   // TODO(bcwhite): Remove this after https://crbug/836875.
   virtual void ValidateHistogramContents() const;
@@ -247,22 +301,32 @@
  protected:
   enum ReportActivity { HISTOGRAM_CREATED, HISTOGRAM_LOOKUP };
 
+  struct BASE_EXPORT CountAndBucketData {
+    Count count;
+    int64_t sum;
+    Value::List buckets;
+
+    CountAndBucketData(Count count, int64_t sum, Value::List buckets);
+    ~CountAndBucketData();
+
+    CountAndBucketData(CountAndBucketData&& other);
+    CountAndBucketData& operator=(CountAndBucketData&& other);
+  };
+
   // Subclasses should implement this function to make SerializeInfo work.
   virtual void SerializeInfoImpl(base::Pickle* pickle) const = 0;
 
   // Writes information about the construction parameters in |params|.
-  virtual void GetParameters(DictionaryValue* params) const = 0;
+  virtual Value::Dict GetParameters() const = 0;
 
-  // Writes information about the current (non-empty) buckets and their sample
+  // Returns information about the current (non-empty) buckets and their sample
   // counts to |buckets|, the total sample count to |count| and the total sum
   // to |sum|.
-  virtual void GetCountAndBucketData(Count* count,
-                                     int64_t* sum,
-                                     ListValue* buckets) const = 0;
+  CountAndBucketData GetCountAndBucketData() const;
 
-  //// Produce actual graph (set of blank vs non blank char's) for a bucket.
-  void WriteAsciiBucketGraph(double current_size,
-                             double max_size,
+  // Produces an actual graph (set of blank vs non blank char's) for a bucket.
+  void WriteAsciiBucketGraph(double x_count,
+                             int line_length,
                              std::string* output) const;
 
   // Return a string description of what goes in a given bucket.
@@ -274,9 +338,9 @@
                              double scaled_sum,
                              std::string* output) const;
 
-  // Retrieves the callback for this histogram, if one exists, and runs it
-  // passing |sample| as the parameter.
-  void FindAndRunCallback(Sample sample) const;
+  // Retrieves the registered callbacks for this histogram, if any, and runs
+  // them passing |sample| as the parameter.
+  void FindAndRunCallbacks(Sample sample) const;
 
   // Gets a permanent string that can be used for histogram objects when the
   // original is not a code constant or held in persistent memory.
@@ -296,9 +360,7 @@
   const char* const histogram_name_;
 
   // Additional information about the histogram.
-  AtomicCount flags_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramBase);
+  std::atomic<int32_t> flags_{0};
 };
 
 }  // namespace base
diff --git a/base/metrics/histogram_base_unittest.cc b/base/metrics/histogram_base_unittest.cc
index 9e3d6d0..6a30c89 100644
--- a/base/metrics/histogram_base_unittest.cc
+++ b/base/metrics/histogram_base_unittest.cc
@@ -1,12 +1,12 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <limits>
 #include <vector>
 
 #include "base/metrics/histogram.h"
 #include "base/metrics/histogram_base.h"
-#include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/sample_vector.h"
 #include "base/metrics/sparse_histogram.h"
 #include "base/metrics/statistics_recorder.h"
@@ -16,16 +16,18 @@
 namespace base {
 
 class HistogramBaseTest : public testing::Test {
- protected:
+ public:
   HistogramBaseTest() {
     // Each test will have a clean state (no Histogram / BucketRanges
     // registered).
     ResetStatisticsRecorder();
-    GlobalHistogramAllocator::ReleaseForTesting();
   }
 
+  HistogramBaseTest(const HistogramBaseTest&) = delete;
+  HistogramBaseTest& operator=(const HistogramBaseTest&) = delete;
   ~HistogramBaseTest() override = default;
 
+ protected:
   void ResetStatisticsRecorder() {
     // It is necessary to fully destruct any existing StatisticsRecorder
     // before creating a new one.
@@ -35,8 +37,6 @@
 
  private:
   std::unique_ptr<StatisticsRecorder> statistics_recorder_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramBaseTest);
 };
 
 TEST_F(HistogramBaseTest, DeserializeHistogram) {
@@ -201,8 +201,7 @@
   while (large_positive > std::numeric_limits<HistogramBase::Sample>::max()) {
     // Add the TimeDelta corresponding to |large_positive| milliseconds to the
     // histogram.
-    histogram->AddTimeMillisecondsGranularity(
-        TimeDelta::FromMilliseconds(large_positive));
+    histogram->AddTimeMillisecondsGranularity(Milliseconds(large_positive));
     ++add_count;
     // Reduce the value of |large_positive|. The choice of 7 here is
     // arbitrary.
@@ -219,8 +218,7 @@
   int64_t large_negative = std::numeric_limits<int64_t>::min();
   add_count = 0;
   while (large_negative < std::numeric_limits<HistogramBase::Sample>::min()) {
-    histogram->AddTimeMillisecondsGranularity(
-        TimeDelta::FromMilliseconds(large_negative));
+    histogram->AddTimeMillisecondsGranularity(Milliseconds(large_negative));
     ++add_count;
     large_negative /= 7;
   }
@@ -246,8 +244,7 @@
   while (large_positive > std::numeric_limits<HistogramBase::Sample>::max()) {
     // Add the TimeDelta corresponding to |large_positive| microseconds to the
     // histogram.
-    histogram->AddTimeMicrosecondsGranularity(
-        TimeDelta::FromMicroseconds(large_positive));
+    histogram->AddTimeMicrosecondsGranularity(Microseconds(large_positive));
     ++add_count;
     // Reduce the value of |large_positive|. The choice of 7 here is
     // arbitrary.
@@ -264,8 +261,7 @@
   int64_t large_negative = std::numeric_limits<int64_t>::min();
   add_count = 0;
   while (large_negative < std::numeric_limits<HistogramBase::Sample>::min()) {
-    histogram->AddTimeMicrosecondsGranularity(
-        TimeDelta::FromMicroseconds(large_negative));
+    histogram->AddTimeMicrosecondsGranularity(Microseconds(large_negative));
     ++add_count;
     large_negative /= 7;
   }
diff --git a/base/metrics/histogram_delta_serialization.cc b/base/metrics/histogram_delta_serialization.cc
index 7214c9a..c751210 100644
--- a/base/metrics/histogram_delta_serialization.cc
+++ b/base/metrics/histogram_delta_serialization.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -23,7 +23,7 @@
   if (!histogram)
     return;
 
-  if (histogram->flags() & HistogramBase::kIPCSerializationSourceFlag) {
+  if (histogram->HasFlags(HistogramBase::kIPCSerializationSourceFlag)) {
     DVLOG(1) << "Single process mode, histogram observed and not copied: "
              << histogram->histogram_name();
     return;
@@ -59,7 +59,7 @@
     const std::vector<std::string>& serialized_deltas) {
   for (auto it = serialized_deltas.begin(); it != serialized_deltas.end();
        ++it) {
-    Pickle pickle(it->data(), checked_cast<int>(it->size()));
+    Pickle pickle(it->data(), it->size());
     PickleIterator iter(pickle);
     DeserializeHistogramAndAddSamples(&iter);
   }
@@ -74,8 +74,7 @@
   Pickle pickle;
   histogram.SerializeInfo(&pickle);
   snapshot.Serialize(&pickle);
-  serialized_deltas_->push_back(
-      std::string(static_cast<const char*>(pickle.data()), pickle.size()));
+  serialized_deltas_->emplace_back(pickle.data_as_char(), pickle.size());
 }
 
 }  // namespace base
diff --git a/base/metrics/histogram_delta_serialization.h b/base/metrics/histogram_delta_serialization.h
index 57ebd2c..7da1905 100644
--- a/base/metrics/histogram_delta_serialization.h
+++ b/base/metrics/histogram_delta_serialization.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -10,7 +10,7 @@
 #include <vector>
 
 #include "base/base_export.h"
-#include "base/macros.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/histogram_flattener.h"
 #include "base/metrics/histogram_snapshot_manager.h"
 #include "base/threading/thread_checker.h"
@@ -24,6 +24,11 @@
  public:
   // |caller_name| is string used in histograms for counting inconsistencies.
   explicit HistogramDeltaSerialization(const std::string& caller_name);
+
+  HistogramDeltaSerialization(const HistogramDeltaSerialization&) = delete;
+  HistogramDeltaSerialization& operator=(const HistogramDeltaSerialization&) =
+      delete;
+
   ~HistogramDeltaSerialization() override;
 
   // Computes deltas in histogram bucket counts relative to the previous call to
@@ -51,9 +56,7 @@
   HistogramSnapshotManager histogram_snapshot_manager_;
 
   // Output buffer for serialized deltas.
-  std::vector<std::string>* serialized_deltas_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramDeltaSerialization);
+  raw_ptr<std::vector<std::string>> serialized_deltas_;
 };
 
 }  // namespace base
diff --git a/base/metrics/histogram_delta_serialization_unittest.cc b/base/metrics/histogram_delta_serialization_unittest.cc
index 719bc70..693c466 100644
--- a/base/metrics/histogram_delta_serialization_unittest.cc
+++ b/base/metrics/histogram_delta_serialization_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/metrics/histogram_flattener.h b/base/metrics/histogram_flattener.h
index 6a5e3f4..3ca3559 100644
--- a/base/metrics/histogram_flattener.h
+++ b/base/metrics/histogram_flattener.h
@@ -1,14 +1,11 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_HISTOGRAM_FLATTENER_H_
 #define BASE_METRICS_HISTOGRAM_FLATTENER_H_
 
-#include <map>
-#include <string>
-
-#include "base/macros.h"
+#include "base/base_export.h"
 #include "base/metrics/histogram.h"
 
 namespace base {
@@ -19,15 +16,15 @@
 // handles the logistics of gathering up available histograms for recording.
 class BASE_EXPORT HistogramFlattener {
  public:
+  HistogramFlattener(const HistogramFlattener&) = delete;
+  HistogramFlattener& operator=(const HistogramFlattener&) = delete;
+  virtual ~HistogramFlattener() = default;
+
   virtual void RecordDelta(const HistogramBase& histogram,
                            const HistogramSamples& snapshot) = 0;
 
  protected:
   HistogramFlattener() = default;
-  virtual ~HistogramFlattener() = default;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(HistogramFlattener);
 };
 
 }  // namespace base
diff --git a/base/metrics/histogram_functions.cc b/base/metrics/histogram_functions.cc
index 31bf219..efa3d55 100644
--- a/base/metrics/histogram_functions.cc
+++ b/base/metrics/histogram_functions.cc
@@ -1,4 +1,4 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -17,26 +17,64 @@
   histogram->Add(sample);
 }
 
+void UmaHistogramBoolean(const char* name, bool sample) {
+  HistogramBase* histogram = BooleanHistogram::FactoryGet(
+      name, HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->Add(sample);
+}
+
 void UmaHistogramExactLinear(const std::string& name,
                              int sample,
-                             int value_max) {
-  HistogramBase* histogram =
-      LinearHistogram::FactoryGet(name, 1, value_max, value_max + 1,
-                                  HistogramBase::kUmaTargetedHistogramFlag);
+                             int exclusive_max) {
+  HistogramBase* histogram = LinearHistogram::FactoryGet(
+      name, 1, exclusive_max, static_cast<size_t>(exclusive_max + 1),
+      HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->Add(sample);
+}
+
+void UmaHistogramExactLinear(const char* name, int sample, int exclusive_max) {
+  HistogramBase* histogram = LinearHistogram::FactoryGet(
+      name, 1, exclusive_max, static_cast<size_t>(exclusive_max + 1),
+      HistogramBase::kUmaTargetedHistogramFlag);
   histogram->Add(sample);
 }
 
 void UmaHistogramPercentage(const std::string& name, int percent) {
+  UmaHistogramExactLinear(name, percent, 101);
+}
+
+void UmaHistogramPercentage(const char* name, int percent) {
+  UmaHistogramExactLinear(name, percent, 101);
+}
+
+void UmaHistogramPercentageObsoleteDoNotUse(const std::string& name,
+                                            int percent) {
+  UmaHistogramExactLinear(name, percent, 100);
+}
+
+void UmaHistogramPercentageObsoleteDoNotUse(const char* name, int percent) {
   UmaHistogramExactLinear(name, percent, 100);
 }
 
 void UmaHistogramCustomCounts(const std::string& name,
                               int sample,
                               int min,
-                              int max,
-                              int buckets) {
-  HistogramBase* histogram = Histogram::FactoryGet(
-      name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
+                              int exclusive_max,
+                              size_t buckets) {
+  HistogramBase* histogram =
+      Histogram::FactoryGet(name, min, exclusive_max, buckets,
+                            HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->Add(sample);
+}
+
+void UmaHistogramCustomCounts(const char* name,
+                              int sample,
+                              int min,
+                              int exclusive_max,
+                              size_t buckets) {
+  HistogramBase* histogram =
+      Histogram::FactoryGet(name, min, exclusive_max, buckets,
+                            HistogramBase::kUmaTargetedHistogramFlag);
   histogram->Add(sample);
 }
 
@@ -44,67 +82,166 @@
   UmaHistogramCustomCounts(name, sample, 1, 100, 50);
 }
 
+void UmaHistogramCounts100(const char* name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 100, 50);
+}
+
 void UmaHistogramCounts1000(const std::string& name, int sample) {
   UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
 }
 
+void UmaHistogramCounts1000(const char* name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
+}
+
 void UmaHistogramCounts10000(const std::string& name, int sample) {
   UmaHistogramCustomCounts(name, sample, 1, 10000, 50);
 }
 
+void UmaHistogramCounts10000(const char* name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 10000, 50);
+}
+
 void UmaHistogramCounts100000(const std::string& name, int sample) {
   UmaHistogramCustomCounts(name, sample, 1, 100000, 50);
 }
 
+void UmaHistogramCounts100000(const char* name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 100000, 50);
+}
+
 void UmaHistogramCounts1M(const std::string& name, int sample) {
   UmaHistogramCustomCounts(name, sample, 1, 1000000, 50);
 }
 
+void UmaHistogramCounts1M(const char* name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 1000000, 50);
+}
+
 void UmaHistogramCounts10M(const std::string& name, int sample) {
   UmaHistogramCustomCounts(name, sample, 1, 10000000, 50);
 }
 
+void UmaHistogramCounts10M(const char* name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 10000000, 50);
+}
+
 void UmaHistogramCustomTimes(const std::string& name,
                              TimeDelta sample,
                              TimeDelta min,
                              TimeDelta max,
-                             int buckets) {
+                             size_t buckets) {
+  HistogramBase* histogram = Histogram::FactoryTimeGet(
+      name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->AddTimeMillisecondsGranularity(sample);
+}
+
+void UmaHistogramCustomTimes(const char* name,
+                             TimeDelta sample,
+                             TimeDelta min,
+                             TimeDelta max,
+                             size_t buckets) {
   HistogramBase* histogram = Histogram::FactoryTimeGet(
       name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
   histogram->AddTimeMillisecondsGranularity(sample);
 }
 
 void UmaHistogramTimes(const std::string& name, TimeDelta sample) {
-  UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
-                          TimeDelta::FromSeconds(10), 50);
+  UmaHistogramCustomTimes(name, sample, Milliseconds(1), Seconds(10), 50);
+}
+
+void UmaHistogramTimes(const char* name, TimeDelta sample) {
+  UmaHistogramCustomTimes(name, sample, Milliseconds(1), Seconds(10), 50);
 }
 
 void UmaHistogramMediumTimes(const std::string& name, TimeDelta sample) {
-  UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
-                          TimeDelta::FromMinutes(3), 50);
+  UmaHistogramCustomTimes(name, sample, Milliseconds(1), Minutes(3), 50);
+}
+
+void UmaHistogramMediumTimes(const char* name, TimeDelta sample) {
+  UmaHistogramCustomTimes(name, sample, Milliseconds(1), Minutes(3), 50);
 }
 
 void UmaHistogramLongTimes(const std::string& name, TimeDelta sample) {
-  UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
-                          TimeDelta::FromHours(1), 50);
+  UmaHistogramCustomTimes(name, sample, Milliseconds(1), Hours(1), 50);
+}
+
+void UmaHistogramLongTimes(const char* name, TimeDelta sample) {
+  UmaHistogramCustomTimes(name, sample, Milliseconds(1), Hours(1), 50);
+}
+
+void UmaHistogramLongTimes100(const std::string& name, TimeDelta sample) {
+  UmaHistogramCustomTimes(name, sample, Milliseconds(1), Hours(1), 100);
+}
+
+void UmaHistogramLongTimes100(const char* name, TimeDelta sample) {
+  UmaHistogramCustomTimes(name, sample, Milliseconds(1), Hours(1), 100);
+}
+
+void UmaHistogramCustomMicrosecondsTimes(const std::string& name,
+                                         TimeDelta sample,
+                                         TimeDelta min,
+                                         TimeDelta max,
+                                         size_t buckets) {
+  HistogramBase* histogram = Histogram::FactoryMicrosecondsTimeGet(
+      name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->AddTimeMicrosecondsGranularity(sample);
+}
+
+void UmaHistogramCustomMicrosecondsTimes(const char* name,
+                                         TimeDelta sample,
+                                         TimeDelta min,
+                                         TimeDelta max,
+                                         size_t buckets) {
+  HistogramBase* histogram = Histogram::FactoryMicrosecondsTimeGet(
+      name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->AddTimeMicrosecondsGranularity(sample);
+}
+
+void UmaHistogramMicrosecondsTimes(const std::string& name, TimeDelta sample) {
+  UmaHistogramCustomMicrosecondsTimes(name, sample, Microseconds(1),
+                                      Seconds(10), 50);
+}
+
+void UmaHistogramMicrosecondsTimes(const char* name, TimeDelta sample) {
+  UmaHistogramCustomMicrosecondsTimes(name, sample, Microseconds(1),
+                                      Seconds(10), 50);
 }
 
 void UmaHistogramMemoryKB(const std::string& name, int sample) {
   UmaHistogramCustomCounts(name, sample, 1000, 500000, 50);
 }
 
+void UmaHistogramMemoryKB(const char* name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1000, 500000, 50);
+}
+
 void UmaHistogramMemoryMB(const std::string& name, int sample) {
   UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
 }
 
+void UmaHistogramMemoryMB(const char* name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
+}
+
 void UmaHistogramMemoryLargeMB(const std::string& name, int sample) {
   UmaHistogramCustomCounts(name, sample, 1, 64000, 100);
 }
 
+void UmaHistogramMemoryLargeMB(const char* name, int sample) {
+  UmaHistogramCustomCounts(name, sample, 1, 64000, 100);
+}
+
 void UmaHistogramSparse(const std::string& name, int sample) {
   HistogramBase* histogram = SparseHistogram::FactoryGet(
       name, HistogramBase::kUmaTargetedHistogramFlag);
   histogram->Add(sample);
 }
 
+void UmaHistogramSparse(const char* name, int sample) {
+  HistogramBase* histogram = SparseHistogram::FactoryGet(
+      name, HistogramBase::kUmaTargetedHistogramFlag);
+  histogram->Add(sample);
+}
+
 }  // namespace base
diff --git a/base/metrics/histogram_functions.h b/base/metrics/histogram_functions.h
index 60c0057..22b7ea2 100644
--- a/base/metrics/histogram_functions.h
+++ b/base/metrics/histogram_functions.h
@@ -1,88 +1,152 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
 #define BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
 
+#include <string>
+#include <type_traits>
+
+#include "base/base_export.h"
+#include "base/check_op.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/histogram_base.h"
 #include "base/time/time.h"
 
+// TODO(crbug/1265443): Update this file's function comments to provide more
+// detail, like histogram_macros.h.
+//
 // Functions for recording metrics.
 //
 // For best practices on deciding when to emit to a histogram and what form
 // the histogram should take, see
 // https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
-
-// Functions for recording UMA histograms. These can be used for cases
-// when the histogram name is generated at runtime. The functionality is
-// equivalent to macros defined in histogram_macros.h but allowing non-constant
-// histogram names. These functions are slower compared to their macro
-// equivalent because the histogram objects are not cached between calls.
-// So, these shouldn't be used in performance critical code.
+//
+// For deciding whether to use the function or macro APIs, see
+// https://chromium.googlesource.com/chromium/src/+/HEAD/tools/metrics/histograms/README.md#coding-emitting-to-histograms"
+//
+// Every function is duplicated to take both std::string and char* for the name.
+// This avoids ctor/dtor instantiation for constant strings to std::string,
+// which makes the call be larger than caching macros (which do accept char*)
+// in those cases.
 namespace base {
 
-// For histograms with linear buckets.
-// Used for capturing integer data with a linear bucketing scheme. This can be
-// used when you want the exact value of some small numeric count, with a max of
-// 100 or less. If you need to capture a range of greater than 100, we recommend
-// the use of the COUNT histograms below.
+// For numeric measurements where you want exact integer values up to
+// |exclusive_max|. |exclusive_max| itself is included in the overflow bucket.
+// Therefore, if you want an accurate measure up to kMax, then |exclusive_max|
+// should be set to kMax + 1.
+//
+// |exclusive_max| should be 101 or less. If you need to capture a larger range,
+// we recommend the use of the COUNT histograms below.
+//
 // Sample usage:
-//   base::UmaHistogramExactLinear("Histogram.Linear", some_value, 10);
+//   base::UmaHistogramExactLinear("Histogram.Linear", sample, kMax + 1);
+// In this case, buckets are 1, 2, .., kMax, kMax+1, where the kMax+1 bucket
+// captures everything kMax+1 and above.
 BASE_EXPORT void UmaHistogramExactLinear(const std::string& name,
                                          int sample,
-                                         int value_max);
+                                         int exclusive_max);
+BASE_EXPORT void UmaHistogramExactLinear(const char* name,
+                                         int sample,
+                                         int exclusive_max);
 
 // For adding a sample to an enumerated histogram.
 // Sample usage:
 //   // These values are persisted to logs. Entries should not be renumbered and
 //   // numeric values should never be reused.
-//   enum class MyEnum {
-//     FIRST_VALUE = 0,
-//     SECOND_VALUE = 1,
-//     ...
-//     FINAL_VALUE = N,
-//     COUNT
+//   enum class NewTabPageAction {
+//     kUseOmnibox = 0,
+//     kClickTitle = 1,
+//     // kUseSearchbox = 2,  // no longer used, combined into omnibox
+//     kOpenBookmark = 3,
+//     kMaxValue = kOpenBookmark,
 //   };
 //   base::UmaHistogramEnumeration("My.Enumeration",
-//                                 MyEnum::SOME_VALUE, MyEnum::COUNT);
+//                                 NewTabPageAction::kClickTitle);
 //
-// Note: The value in |sample| must be strictly less than |enum_size|.
-template <typename T>
-void UmaHistogramEnumeration(const std::string& name, T sample, T enum_size) {
-  static_assert(std::is_enum<T>::value,
-                "Non enum passed to UmaHistogramEnumeration");
-  DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
-  DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
-  return UmaHistogramExactLinear(name, static_cast<int>(sample),
-                                 static_cast<int>(enum_size));
-}
-
-// Same as above, but uses T::kMaxValue as the inclusive maximum value of the
-// enum.
+// Note that there are code that refer implementation details of this function.
+// Keep them synchronized.
 template <typename T>
 void UmaHistogramEnumeration(const std::string& name, T sample) {
-  static_assert(std::is_enum<T>::value,
-                "Non enum passed to UmaHistogramEnumeration");
-  DCHECK_LE(static_cast<uintmax_t>(T::kMaxValue),
-            static_cast<uintmax_t>(INT_MAX) - 1);
+  static_assert(std::is_enum<T>::value, "T is not an enum.");
+  // This also ensures that an enumeration that doesn't define kMaxValue fails
+  // with a semi-useful error ("no member named 'kMaxValue' in ...").
+  static_assert(static_cast<uintmax_t>(T::kMaxValue) <=
+                    static_cast<uintmax_t>(INT_MAX) - 1,
+                "Enumeration's kMaxValue is out of range of INT_MAX!");
   DCHECK_LE(static_cast<uintmax_t>(sample),
             static_cast<uintmax_t>(T::kMaxValue));
   return UmaHistogramExactLinear(name, static_cast<int>(sample),
                                  static_cast<int>(T::kMaxValue) + 1);
 }
 
+template <typename T>
+void UmaHistogramEnumeration(const char* name, T sample) {
+  static_assert(std::is_enum<T>::value, "T is not an enum.");
+  // This also ensures that an enumeration that doesn't define kMaxValue fails
+  // with a semi-useful error ("no member named 'kMaxValue' in ...").
+  static_assert(static_cast<uintmax_t>(T::kMaxValue) <=
+                    static_cast<uintmax_t>(INT_MAX) - 1,
+                "Enumeration's kMaxValue is out of range of INT_MAX!");
+  DCHECK_LE(static_cast<uintmax_t>(sample),
+            static_cast<uintmax_t>(T::kMaxValue));
+  return UmaHistogramExactLinear(name, static_cast<int>(sample),
+                                 static_cast<int>(T::kMaxValue) + 1);
+}
+
+// Some legacy histograms may manually specify the enum size, with a kCount,
+// COUNT, kMaxValue, or MAX_VALUE sentinel like so:
+//   // These values are persisted to logs. Entries should not be renumbered and
+//   // numeric values should never be reused.
+//   enum class NewTabPageAction {
+//     kUseOmnibox = 0,
+//     kClickTitle = 1,
+//     // kUseSearchbox = 2,  // no longer used, combined into omnibox
+//     kOpenBookmark = 3,
+//     kCount,
+//   };
+//   base::UmaHistogramEnumeration("My.Enumeration",
+//                                 NewTabPageAction::kClickTitle,
+//                                 kCount);
+// Note: The value in |sample| must be strictly less than |enum_size|. This is
+// otherwise functionally equivalent to the above.
+template <typename T>
+void UmaHistogramEnumeration(const std::string& name, T sample, T enum_size) {
+  static_assert(std::is_enum<T>::value, "T is not an enum.");
+  DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
+  DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
+  return UmaHistogramExactLinear(name, static_cast<int>(sample),
+                                 static_cast<int>(enum_size));
+}
+
+template <typename T>
+void UmaHistogramEnumeration(const char* name, T sample, T enum_size) {
+  static_assert(std::is_enum<T>::value, "T is not an enum.");
+  DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
+  DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
+  return UmaHistogramExactLinear(name, static_cast<int>(sample),
+                                 static_cast<int>(enum_size));
+}
+
 // For adding boolean sample to histogram.
 // Sample usage:
 //   base::UmaHistogramBoolean("My.Boolean", true)
 BASE_EXPORT void UmaHistogramBoolean(const std::string& name, bool sample);
+BASE_EXPORT void UmaHistogramBoolean(const char* name, bool sample);
 
-// For adding histogram with percent.
-// Percents are integer between 1 and 100.
+// For adding histogram sample denoting a percentage.
+// Percents are integers between 1 and 100, inclusively.
 // Sample usage:
 //   base::UmaHistogramPercentage("My.Percent", 69)
 BASE_EXPORT void UmaHistogramPercentage(const std::string& name, int percent);
+BASE_EXPORT void UmaHistogramPercentage(const char* name, int percent);
+
+// Obsolete. Use |UmaHistogramPercentage| instead. See crbug/1121318.
+BASE_EXPORT void UmaHistogramPercentageObsoleteDoNotUse(const std::string& name,
+                                                        int percent);
+BASE_EXPORT void UmaHistogramPercentageObsoleteDoNotUse(const char* name,
+                                                        int percent);
 
 // For adding counts histogram.
 // Sample usage:
@@ -90,39 +154,84 @@
 BASE_EXPORT void UmaHistogramCustomCounts(const std::string& name,
                                           int sample,
                                           int min,
-                                          int max,
-                                          int buckets);
+                                          int exclusive_max,
+                                          size_t buckets);
+BASE_EXPORT void UmaHistogramCustomCounts(const char* name,
+                                          int sample,
+                                          int min,
+                                          int exclusive_max,
+                                          size_t buckets);
 
 // Counts specialization for maximum counts 100, 1000, 10k, 100k, 1M and 10M.
 BASE_EXPORT void UmaHistogramCounts100(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts100(const char* name, int sample);
 BASE_EXPORT void UmaHistogramCounts1000(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts1000(const char* name, int sample);
 BASE_EXPORT void UmaHistogramCounts10000(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts10000(const char* name, int sample);
 BASE_EXPORT void UmaHistogramCounts100000(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts100000(const char* name, int sample);
 BASE_EXPORT void UmaHistogramCounts1M(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts1M(const char* name, int sample);
 BASE_EXPORT void UmaHistogramCounts10M(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts10M(const char* name, int sample);
 
-// For histograms storing times.
+// For histograms storing times. It uses milliseconds granularity.
 BASE_EXPORT void UmaHistogramCustomTimes(const std::string& name,
                                          TimeDelta sample,
                                          TimeDelta min,
                                          TimeDelta max,
-                                         int buckets);
+                                         size_t buckets);
+BASE_EXPORT void UmaHistogramCustomTimes(const char* name,
+                                         TimeDelta sample,
+                                         TimeDelta min,
+                                         TimeDelta max,
+                                         size_t buckets);
 // For short timings from 1 ms up to 10 seconds (50 buckets).
 BASE_EXPORT void UmaHistogramTimes(const std::string& name, TimeDelta sample);
+BASE_EXPORT void UmaHistogramTimes(const char* name, TimeDelta sample);
 // For medium timings up to 3 minutes (50 buckets).
 BASE_EXPORT void UmaHistogramMediumTimes(const std::string& name,
                                          TimeDelta sample);
+BASE_EXPORT void UmaHistogramMediumTimes(const char* name, TimeDelta sample);
 // For time intervals up to 1 hr (50 buckets).
 BASE_EXPORT void UmaHistogramLongTimes(const std::string& name,
                                        TimeDelta sample);
+BASE_EXPORT void UmaHistogramLongTimes(const char* name, TimeDelta sample);
+
+// For time intervals up to 1 hr (100 buckets).
+BASE_EXPORT void UmaHistogramLongTimes100(const std::string& name,
+                                          TimeDelta sample);
+BASE_EXPORT void UmaHistogramLongTimes100(const char* name, TimeDelta sample);
+
+// For histograms storing times with microseconds granularity.
+BASE_EXPORT void UmaHistogramCustomMicrosecondsTimes(const std::string& name,
+                                                     TimeDelta sample,
+                                                     TimeDelta min,
+                                                     TimeDelta max,
+                                                     size_t buckets);
+BASE_EXPORT void UmaHistogramCustomMicrosecondsTimes(const char* name,
+                                                     TimeDelta sample,
+                                                     TimeDelta min,
+                                                     TimeDelta max,
+                                                     size_t buckets);
+
+// For microseconds timings from 1 microsecond up to 10 seconds (50 buckets).
+BASE_EXPORT void UmaHistogramMicrosecondsTimes(const std::string& name,
+                                               TimeDelta sample);
+BASE_EXPORT void UmaHistogramMicrosecondsTimes(const char* name,
+                                               TimeDelta sample);
 
 // For recording memory related histograms.
 // Used to measure common KB-granularity memory stats. Range is up to 500M.
 BASE_EXPORT void UmaHistogramMemoryKB(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramMemoryKB(const char* name, int sample);
 // Used to measure common MB-granularity memory stats. Range is up to ~1G.
 BASE_EXPORT void UmaHistogramMemoryMB(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramMemoryMB(const char* name, int sample);
 // Used to measure common MB-granularity memory stats. Range is up to ~64G.
 BASE_EXPORT void UmaHistogramMemoryLargeMB(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramMemoryLargeMB(const char* name, int sample);
 
 // For recording sparse histograms.
 // The |sample| can be a negative or non-negative number.
@@ -150,8 +259,9 @@
 // many distinct values to the server (across all users). Concretely, keep the
 // number of distinct values <= 100 ideally, definitely <= 1000. If you have no
 // guarantees on the range of your data, use clamping, e.g.:
-//   UmaHistogramSparse("MyHistogram", ClampToRange(value, 0, 200));
+//   UmaHistogramSparse("My.Histogram", base::clamp(value, 0, 200));
 BASE_EXPORT void UmaHistogramSparse(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramSparse(const char* name, int sample);
 
 }  // namespace base
 
diff --git a/base/metrics/histogram_functions_unittest.cc b/base/metrics/histogram_functions_unittest.cc
index 025318f..8e7d484 100644
--- a/base/metrics/histogram_functions_unittest.cc
+++ b/base/metrics/histogram_functions_unittest.cc
@@ -1,11 +1,10 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/histogram_functions.h"
 
 #include "base/metrics/histogram_macros.h"
-#include "base/metrics/statistics_recorder.h"
 #include "base/test/metrics/histogram_tester.h"
 #include "base/time/time.h"
 #include "testing/gmock/include/gmock/gmock.h"
@@ -20,8 +19,6 @@
 };
 
 TEST(HistogramFunctionsTest, ExactLinear) {
-  std::unique_ptr<StatisticsRecorder> recorder =
-      StatisticsRecorder::CreateTemporaryForTesting();
   std::string histogram("Testing.UMA.HistogramExactLinear");
   HistogramTester tester;
   UmaHistogramExactLinear(histogram, 10, 100);
@@ -41,7 +38,6 @@
   tester.ExpectTotalCount(histogram, 5);
 }
 
-#if !defined(STARBOARD)
 TEST(HistogramFunctionsTest, Enumeration) {
   std::string histogram("Testing.UMA.HistogramEnumeration");
   HistogramTester tester;
@@ -57,11 +53,8 @@
       histogram, static_cast<int>(UMA_HISTOGRAM_TESTING_ENUM_THIRD) + 1, 1);
   tester.ExpectTotalCount(histogram, 2);
 }
-#endif
 
 TEST(HistogramFunctionsTest, Boolean) {
-  std::unique_ptr<StatisticsRecorder> recorder =
-      StatisticsRecorder::CreateTemporaryForTesting();
   std::string histogram("Testing.UMA.HistogramBoolean");
   HistogramTester tester;
   UmaHistogramBoolean(histogram, true);
@@ -72,57 +65,82 @@
 }
 
 TEST(HistogramFunctionsTest, Percentage) {
-  std::unique_ptr<StatisticsRecorder> recorder =
-      StatisticsRecorder::CreateTemporaryForTesting();
   std::string histogram("Testing.UMA.HistogramPercentage");
   HistogramTester tester;
-  UmaHistogramPercentage(histogram, 50);
-  tester.ExpectUniqueSample(histogram, 50, 1);
-  // Test overflows.
-  UmaHistogramPercentage(histogram, 110);
-  tester.ExpectBucketCount(histogram, 101, 1);
-  tester.ExpectTotalCount(histogram, 2);
-}
+  UmaHistogramPercentage(histogram, 1);
+  tester.ExpectBucketCount(histogram, 1, 1);
+  tester.ExpectTotalCount(histogram, 1);
 
-TEST(HistogramFunctionsTest, Counts) {
-  std::unique_ptr<StatisticsRecorder> recorder =
-      StatisticsRecorder::CreateTemporaryForTesting();
-  std::string histogram("Testing.UMA.HistogramCount.Custom");
-  HistogramTester tester;
-  UmaHistogramCustomCounts(histogram, 10, 1, 100, 10);
-  tester.ExpectUniqueSample(histogram, 10, 1);
-  UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
-  UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
-  UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
-  tester.ExpectBucketCount(histogram, 20, 3);
-  tester.ExpectTotalCount(histogram, 4);
-  UmaHistogramCustomCounts(histogram, 110, 1, 100, 10);
+  UmaHistogramPercentage(histogram, 50);
+  tester.ExpectBucketCount(histogram, 50, 1);
+  tester.ExpectTotalCount(histogram, 2);
+
+  UmaHistogramPercentage(histogram, 100);
+  tester.ExpectBucketCount(histogram, 100, 1);
+  tester.ExpectTotalCount(histogram, 3);
+  // Test overflows.
+  UmaHistogramPercentage(histogram, 101);
   tester.ExpectBucketCount(histogram, 101, 1);
+  tester.ExpectTotalCount(histogram, 4);
+
+  UmaHistogramPercentage(histogram, 500);
+  tester.ExpectBucketCount(histogram, 101, 2);
   tester.ExpectTotalCount(histogram, 5);
 }
 
+TEST(HistogramFunctionsTest, Counts) {
+  std::string histogram("Testing.UMA.HistogramCount.Custom");
+  HistogramTester tester;
+
+  // Add a sample that should go into the underflow bucket.
+  UmaHistogramCustomCounts(histogram, 0, 1, 100, 10);
+
+  // Add a sample that should go into the first bucket.
+  UmaHistogramCustomCounts(histogram, 1, 1, 100, 10);
+
+  // Add multiple samples that should go into the same bucket.
+  UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
+  UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
+  UmaHistogramCustomCounts(histogram, 21, 1, 100, 10);
+
+  // Add a sample that should go into the last bucket.
+  UmaHistogramCustomCounts(histogram, 99, 1, 100, 10);
+
+  // Add some samples that should go into the overflow bucket.
+  UmaHistogramCustomCounts(histogram, 100, 1, 100, 10);
+  UmaHistogramCustomCounts(histogram, 101, 1, 100, 10);
+
+  // Verify the number of samples.
+  tester.ExpectTotalCount(histogram, 8);
+
+  // Verify the following:
+  // (a) The underflow bucket [0, 1) contains one sample.
+  // (b) The first and last buckets each contain one sample.
+  // (c) The bucket for values in [16, 29) contains three samples.
+  // (d) The overflow bucket contains two samples.
+  EXPECT_THAT(tester.GetAllSamples(histogram),
+              testing::ElementsAre(Bucket(0, 1), Bucket(1, 1), Bucket(16, 3),
+                                   Bucket(54, 1), Bucket(100, 2)));
+}
+
 TEST(HistogramFunctionsTest, Times) {
-  std::unique_ptr<StatisticsRecorder> recorder =
-      StatisticsRecorder::CreateTemporaryForTesting();
   std::string histogram("Testing.UMA.HistogramTimes");
   HistogramTester tester;
-  UmaHistogramTimes(histogram, TimeDelta::FromSeconds(1));
-  tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(1), 1);
+  UmaHistogramTimes(histogram, Seconds(1));
+  tester.ExpectTimeBucketCount(histogram, Seconds(1), 1);
   tester.ExpectTotalCount(histogram, 1);
-  UmaHistogramTimes(histogram, TimeDelta::FromSeconds(9));
-  tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(9), 1);
+  UmaHistogramTimes(histogram, Seconds(9));
+  tester.ExpectTimeBucketCount(histogram, Seconds(9), 1);
   tester.ExpectTotalCount(histogram, 2);
-  UmaHistogramTimes(histogram, TimeDelta::FromSeconds(10));  // Overflows
-  tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(10), 1);
-  UmaHistogramTimes(histogram, TimeDelta::FromSeconds(20));  // Overflows.
+  UmaHistogramTimes(histogram, Seconds(10));  // Overflows
+  tester.ExpectTimeBucketCount(histogram, Seconds(10), 1);
+  UmaHistogramTimes(histogram, Seconds(20));  // Overflows.
   // Check the value by picking any overflow time.
-  tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(11), 2);
+  tester.ExpectTimeBucketCount(histogram, Seconds(11), 2);
   tester.ExpectTotalCount(histogram, 4);
 }
 
 TEST(HistogramFunctionsTest, Sparse_SupportsLargeRange) {
-  std::unique_ptr<StatisticsRecorder> recorder =
-      StatisticsRecorder::CreateTemporaryForTesting();
   std::string histogram("Testing.UMA.HistogramSparse");
   HistogramTester tester;
   UmaHistogramSparse(histogram, 0);
@@ -133,8 +151,6 @@
 }
 
 TEST(HistogramFunctionsTest, Sparse_SupportsNegativeValues) {
-  std::unique_ptr<StatisticsRecorder> recorder =
-      StatisticsRecorder::CreateTemporaryForTesting();
   std::string histogram("Testing.UMA.HistogramSparse");
   HistogramTester tester;
   UmaHistogramSparse(histogram, -1);
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
index 72fad6a..0a3436e 100644
--- a/base/metrics/histogram_macros.h
+++ b/base/metrics/histogram_macros.h
@@ -1,25 +1,23 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_HISTOGRAM_MACROS_H_
 #define BASE_METRICS_HISTOGRAM_MACROS_H_
 
-#include "base/macros.h"
+#include "base/check_op.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/histogram_macros_internal.h"
 #include "base/metrics/histogram_macros_local.h"
 #include "base/time/time.h"
 
+
 // Macros for efficient use of histograms.
 //
 // For best practices on deciding when to emit to a histogram and what form
 // the histogram should take, see
 // https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
 
-// TODO(rkaplow): Link to proper documentation on metric creation once we have
-// it in a good state.
-
 // All of these macros must be called with |name| as a runtime constant - it
 // doesn't have to literally be a constant, but it must be the same string on
 // all calls from a particular call site. If this rule is violated, it is
@@ -42,8 +40,9 @@
 // having to handle a sentinel no-op value.
 //
 // Sample usage:
-//   // These values are persisted to logs. Entries should not be renumbered and
-//   // numeric values should never be reused.
+//   // These values are logged to UMA. Entries should not be renumbered and
+//   // numeric values should never be reused. Please keep in sync with "MyEnum"
+//   // in src/tools/metrics/histograms/enums.xml.
 //   enum class MyEnum {
 //     kFirstValue = 0,
 //     kSecondValue = 1,
@@ -55,12 +54,14 @@
 //
 // The second variant requires three arguments: the first two are the same as
 // before, and the third argument is the enum boundary: this must be strictly
-// greater than any other enumerator that will be sampled.
+// greater than any other enumerator that will be sampled. This only works for
+// enums with a fixed underlying type.
 //
 // Sample usage:
-//   // These values are persisted to logs. Entries should not be renumbered and
-//   // numeric values should never be reused.
-//   enum class MyEnum {
+//   // These values are logged to UMA. Entries should not be renumbered and
+//   // numeric values should never be reused. Please keep in sync with "MyEnum"
+//   // in src/tools/metrics/histograms/enums.xml.
+//   enum class MyEnum : uint8_t {
 //     FIRST_VALUE = 0,
 //     SECOND_VALUE = 1,
 //     ...
@@ -75,16 +76,24 @@
 // example). For scoped enums, this is awkward since it requires casting the
 // enum to an arithmetic type and adding one. Instead, prefer the two argument
 // version of the macro which automatically deduces the boundary from kMaxValue.
+#define CR_EXPAND_ARG(arg) arg
 #define UMA_HISTOGRAM_ENUMERATION(name, ...)                            \
-  CR_EXPAND_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(           \
+  CR_EXPAND_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(                         \
       __VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
-      INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)(              \
-      name, __VA_ARGS__, base::HistogramBase::kUmaTargetedHistogramFlag))
+      INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)               \
+  (name, __VA_ARGS__, base::HistogramBase::kUmaTargetedHistogramFlag))
 
 // As above but "scaled" count to avoid overflows caused by increments of
 // large amounts. See UMA_HISTOGRAM_SCALED_EXACT_LINEAR for more information.
 // Only the new format utilizing an internal kMaxValue is supported.
 // It'll be necessary to #include "base/lazy_instance.h" to use this macro.
+//   name: Full constant name of the histogram (must not change between calls).
+//   sample: Bucket to be incremented.
+//   count: Amount by which to increment.
+//   scale: Amount by which |count| is divided.
+
+// Sample usage:
+//    UMA_HISTOGRAM_SCALED_ENUMERATION("FooKiB", kEnumValue, byte_count, 1024)
 #define UMA_HISTOGRAM_SCALED_ENUMERATION(name, sample, count, scale) \
   INTERNAL_HISTOGRAM_SCALED_ENUMERATION_WITH_FLAG(                   \
       name, sample, count, scale,                                    \
@@ -104,16 +113,22 @@
 
 // All of these macros must be called with |name| as a runtime constant.
 
-// Used for capturing integer data with a linear bucketing scheme. This can be
-// used when you want the exact value of some small numeric count, with a max of
-// 100 or less. If you need to capture a range of greater than 100, we recommend
-// the use of the COUNT histograms below.
-
+// For numeric measurements where you want exact integer values up to
+// |exclusive_max|. |exclusive_max| itself is included in the overflow bucket.
+// Therefore, if you want an accurate measure up to kMax, then |exclusive_max|
+// should be set to kMax + 1.
+//
+// |exclusive_max| should be 101 or less. If you need to capture a larger range,
+// we recommend the use of the COUNT histograms below.
+//
 // Sample usage:
-//   UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
-#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
-  INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(                \
-      name, sample, value_max, base::HistogramBase::kUmaTargetedHistogramFlag)
+//   base::UmaHistogramExactLinear("Histogram.Linear", sample, kMax + 1);
+// In this case, buckets are 1, 2, .., kMax, kMax+1, where the kMax+1 bucket
+// captures everything kMax+1 and above.
+#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, exclusive_max) \
+  INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(                    \
+      name, sample, exclusive_max,                              \
+      base::HistogramBase::kUmaTargetedHistogramFlag)
 
 // Used for capturing basic percentages. This will be 100 buckets of size 1.
 
@@ -123,17 +138,27 @@
   UMA_HISTOGRAM_EXACT_LINEAR(name, percent_as_int, 101)
 
 //------------------------------------------------------------------------------
-// Scaled Linear histograms.
+// Scaled linear histograms.
 
 // These take |count| and |scale| parameters to allow cumulative reporting of
-// large numbers. Only the scaled count is reported but the reminder is kept so
-// multiple calls will accumulate correctly.  Only "exact linear" is supported.
+// large numbers. For example, code might pass a count of 1825 bytes and a scale
+// of 1024 bytes to report values in kilobytes. Only the scaled count is
+// reported, but the remainder is tracked between calls, so that multiple calls
+// will accumulate correctly.
 // It'll be necessary to #include "base/lazy_instance.h" to use this macro.
+//   name: Full constant name of the histogram (must not change between calls).
+//   sample: Bucket to be incremented.
+//   count: Amount by which to increment.
+//   sample_max: Maximum (exclusive) allowed sample value.
+//   scale: Amount by which |count| is divided.
 
-#define UMA_HISTOGRAM_SCALED_EXACT_LINEAR(name, sample, count, value_max, \
-                                          scale)                          \
-  INTERNAL_HISTOGRAM_SCALED_EXACT_LINEAR_WITH_FLAG(                       \
-      name, sample, count, value_max, scale,                              \
+// Sample usage:
+//    UMA_HISTOGRAM_SCALED_EXACT_LINER("FooKiB", bucket_no, byte_count,
+//                                     kBucketsMax+1, 1024)
+#define UMA_HISTOGRAM_SCALED_EXACT_LINEAR(name, sample, count, sample_max, \
+                                          scale)                           \
+  INTERNAL_HISTOGRAM_SCALED_EXACT_LINEAR_WITH_FLAG(                        \
+      name, sample, count, sample_max, scale,                              \
       base::HistogramBase::kUmaTargetedHistogramFlag)
 
 //------------------------------------------------------------------------------
@@ -174,19 +199,18 @@
 #define UMA_HISTOGRAM_COUNTS_10M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS(    \
     name, sample, 1, 10000000, 50)
 
-// This can be used when the default ranges are not sufficient. This macro lets
-// the metric developer customize the min and max of the sampled range, as well
-// as the number of buckets recorded.
-// Any data outside the range here will be put in underflow and overflow
-// buckets. Min values should be >=1 as emitted 0s will still go into the
-// underflow bucket.
+// This macro allows the min, max, and number of buckets to be customized. Any
+// samples whose values are outside of [min, exclusive_max-1] are put in the
+// underflow or overflow buckets. Note that |min| should be >=1 as emitted 0s go
+// into the underflow bucket.
 
 // Sample usage:
-//   UMA_HISTOGRAM_CUSTOM_COUNTS("My.Histogram", 1, 100000000, 100);
-#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count)      \
-    INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                                \
-        name, sample, min, max, bucket_count,                                  \
-        base::HistogramBase::kUmaTargetedHistogramFlag)
+//   UMA_HISTOGRAM_CUSTOM_COUNTS("My.Histogram", sample, 1, 100000000, 50);
+#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, exclusive_max, \
+                                    bucket_count)                     \
+  INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(                         \
+      name, sample, min, exclusive_max, bucket_count,                 \
+      base::HistogramBase::kUmaTargetedHistogramFlag)
 
 //------------------------------------------------------------------------------
 // Timing histograms. These are used for collecting timing data (generally
@@ -202,25 +226,25 @@
 
 // Short timings - up to 10 seconds. For high-resolution (microseconds) timings,
 // see UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES.
-#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(          \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromSeconds(10), 50)
+#define UMA_HISTOGRAM_TIMES(name, sample)                         \
+  UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, base::Milliseconds(1), \
+                             base::Seconds(10), 50)
 
 // Medium timings - up to 3 minutes. Note this starts at 10ms (no good reason,
 // but not worth changing).
-#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(   \
-    name, sample, base::TimeDelta::FromMilliseconds(10),                       \
-    base::TimeDelta::FromMinutes(3), 50)
+#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample)                   \
+  UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, base::Milliseconds(10), \
+                             base::Minutes(3), 50)
 
 // Long timings - up to an hour.
-#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES(     \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromHours(1), 50)
+#define UMA_HISTOGRAM_LONG_TIMES(name, sample)                    \
+  UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, base::Milliseconds(1), \
+                             base::Hours(1), 50)
 
 // Long timings with higher granularity - up to an hour with 100 buckets.
-#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromHours(1), 100)
+#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample)                \
+  UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, base::Milliseconds(1), \
+                             base::Hours(1), 100)
 
 // This can be used when the default ranges are not sufficient. This macro lets
 // the metric developer customize the min and max of the sampled range, as well
@@ -228,7 +252,7 @@
 
 // Sample usage:
 //   UMA_HISTOGRAM_CUSTOM_TIMES("Very.Long.Timing.Histogram", time_delta,
-//       base::TimeDelta::FromSeconds(1), base::TimeDelta::FromDays(1), 100);
+//       base::Seconds(1), base::Days(1), 100);
 #define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
   STATIC_HISTOGRAM_POINTER_BLOCK(                                        \
       name, AddTimeMillisecondsGranularity(sample),                      \
@@ -246,8 +270,8 @@
 // Sample usage:
 //  UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
 //      "High.Resolution.TimingMicroseconds.Histogram", time_delta,
-//      base::TimeDelta::FromMicroseconds(1),
-//      base::TimeDelta::FromMilliseconds(10), 100);
+//      base::Microseconds(1),
+//      base::Milliseconds(10), 100);
 #define UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(name, sample, min, max, \
                                                 bucket_count)           \
   STATIC_HISTOGRAM_POINTER_BLOCK(                                       \
@@ -256,25 +280,38 @@
           name, min, max, bucket_count,                                 \
           base::HistogramBase::kUmaTargetedHistogramFlag))
 
-// Scoped class which logs its time on this earth as a UMA statistic. This is
-// recommended for when you want a histogram which measures the time it takes
-// for a method to execute. This measures up to 10 seconds. It uses
-// UMA_HISTOGRAM_TIMES under the hood.
+// Scoped class which logs its time on this earth in milliseconds as a UMA
+// statistic. This is recommended for when you want a histogram which measures
+// the time it takes for a method to execute. This measures up to 10 seconds. It
+// uses UMA_HISTOGRAM_TIMES under the hood.
 
 // Sample usage:
 //   void Function() {
 //     SCOPED_UMA_HISTOGRAM_TIMER("Component.FunctionTime");
 //     ...
 //   }
-#define SCOPED_UMA_HISTOGRAM_TIMER(name)                                       \
-  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, false, __COUNTER__)
+enum class ScopedHistogramTiming {
+  kMicrosecondTimes,
+  kMediumTimes,
+  kLongTimes
+};
+#define SCOPED_UMA_HISTOGRAM_TIMER(name)        \
+  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER( \
+      name, ScopedHistogramTiming::kMediumTimes, __COUNTER__)
 
 // Similar scoped histogram timer, but this uses UMA_HISTOGRAM_LONG_TIMES_100,
 // which measures up to an hour, and uses 100 buckets. This is more expensive
 // to store, so only use if this often takes >10 seconds.
-#define SCOPED_UMA_HISTOGRAM_LONG_TIMER(name)                                  \
-  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, true, __COUNTER__)
+#define SCOPED_UMA_HISTOGRAM_LONG_TIMER(name)   \
+  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER( \
+      name, ScopedHistogramTiming::kLongTimes, __COUNTER__)
 
+// Similar scoped histogram timer, but this uses
+// UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES, measuring from 1 microseconds to 1
+// second, with 50 buckets.
+#define SCOPED_UMA_HISTOGRAM_TIMER_MICROS(name) \
+  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER( \
+      name, ScopedHistogramTiming::kMicrosecondTimes, __COUNTER__)
 
 //------------------------------------------------------------------------------
 // Memory histograms.
@@ -292,6 +329,11 @@
 #define UMA_HISTOGRAM_MEMORY_KB(name, sample)                                  \
     UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1000, 500000, 50)
 
+// Used to measure common MB-granularity memory stats. Range is up to 4000MiB -
+// approximately 4GiB.
+#define UMA_HISTOGRAM_MEMORY_MEDIUM_MB(name, sample) \
+  UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 4000, 100)
+
 // Used to measure common MB-granularity memory stats. Range is up to ~64G.
 #define UMA_HISTOGRAM_MEMORY_LARGE_MB(name, sample)                            \
     UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 64000, 100)
@@ -307,6 +349,12 @@
 
 // For details on usage, see the documentation on the non-stability equivalents.
 
+#define UMA_STABILITY_HISTOGRAM_BOOLEAN(name, sample) \
+  STATIC_HISTOGRAM_POINTER_BLOCK(                     \
+      name, AddBoolean(sample),                       \
+      base::BooleanHistogram::FactoryGet(             \
+          name, base::HistogramBase::kUmaStabilityHistogramFlag))
+
 #define UMA_STABILITY_HISTOGRAM_COUNTS_100(name, sample)                       \
     UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
 
@@ -316,10 +364,43 @@
         name, sample, min, max, bucket_count,                                  \
         base::HistogramBase::kUmaStabilityHistogramFlag)
 
-#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, sample, enum_max)            \
-    INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(                                  \
-        name, sample, enum_max,                                                \
-        base::HistogramBase::kUmaStabilityHistogramFlag)
+#define CR_EXPAND_ARG(arg) arg
+#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, ...)                  \
+  CR_EXPAND_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(                         \
+      __VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
+      INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)               \
+  (name, __VA_ARGS__, base::HistogramBase::kUmaStabilityHistogramFlag))
+
+#define UMA_STABILITY_HISTOGRAM_LONG_TIMES(name, sample)   \
+  STATIC_HISTOGRAM_POINTER_BLOCK(                          \
+      name, AddTimeMillisecondsGranularity(sample),        \
+      base::Histogram::FactoryTimeGet(                     \
+          name, base::Milliseconds(1), base::Hours(1), 50, \
+          base::HistogramBase::kUmaStabilityHistogramFlag))
+
+#define UMA_STABILITY_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
+  INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(                     \
+      name, percent_as_int, 101,                                 \
+      base::HistogramBase::kUmaStabilityHistogramFlag)
+
+//------------------------------------------------------------------------------
+// Sparse histograms.
+//
+// The |sample| can be a negative or non-negative number.
+//
+// Sparse histograms are well suited for recording counts of exact sample values
+// that are sparsely distributed over a relatively large range, in cases where
+// ultra-fast performance is not critical. For instance, Sqlite.Version.* are
+// sparse because for any given database, there's going to be exactly one
+// version logged.
+//
+// For important details on performance, data size, and usage, see the
+// documentation on the regular function equivalents (histogram_functions.h).
+#define UMA_HISTOGRAM_SPARSE(name, sample) \
+  STATIC_HISTOGRAM_POINTER_BLOCK(          \
+      name, Add(sample),                   \
+      base::SparseHistogram::FactoryGet(   \
+          name, base::HistogramBase::kUmaTargetedHistogramFlag))
 
 //------------------------------------------------------------------------------
 // Histogram instantiation helpers.
@@ -341,17 +422,16 @@
 // instance will be used only for DCHECK builds and the second will
 // execute only during the first access to the given index, after which
 // the pointer is cached and the name never needed again.
-#define STATIC_HISTOGRAM_POINTER_GROUP(constant_histogram_name, index,        \
-                                       constant_maximum,                      \
-                                       histogram_add_method_invocation,       \
-                                       histogram_factory_get_invocation)      \
-  do {                                                                        \
-    static base::subtle::AtomicWord atomic_histograms[constant_maximum];      \
-    DCHECK_LE(0, index);                                                      \
-    DCHECK_LT(index, constant_maximum);                                       \
-    HISTOGRAM_POINTER_USE(&atomic_histograms[index], constant_histogram_name, \
-                          histogram_add_method_invocation,                    \
-                          histogram_factory_get_invocation);                  \
+#define STATIC_HISTOGRAM_POINTER_GROUP(                                     \
+    constant_histogram_name, index, constant_maximum,                       \
+    histogram_add_method_invocation, histogram_factory_get_invocation)      \
+  do {                                                                      \
+    static std::atomic_uintptr_t atomic_histograms[constant_maximum];       \
+    DCHECK_LE(0, index);                                                    \
+    DCHECK_LT(index, constant_maximum);                                     \
+    HISTOGRAM_POINTER_USE(                                                  \
+        std::addressof(atomic_histograms[index]), constant_histogram_name,  \
+        histogram_add_method_invocation, histogram_factory_get_invocation); \
   } while (0)
 
 //------------------------------------------------------------------------------
diff --git a/base/metrics/histogram_macros_internal.h b/base/metrics/histogram_macros_internal.h
index 084d3ed..a17ffd8 100644
--- a/base/metrics/histogram_macros_internal.h
+++ b/base/metrics/histogram_macros_internal.h
@@ -1,19 +1,21 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
 #define BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
 
+#include <stdint.h>
+
+#include <atomic>
 #include <limits>
+#include <memory>
 #include <type_traits>
 
-#include "base/atomicops.h"
-#include "base/logging.h"
+#include "base/dcheck_is_on.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/sparse_histogram.h"
 #include "base/time/time.h"
-#include "starboard/types.h"
 
 // This is for macros and helpers internal to base/metrics. They should not be
 // used outside of this directory. For writing to UMA histograms, see
@@ -39,9 +41,13 @@
 struct EnumSizeTraits<
     Enum,
     std::enable_if_t<std::is_enum<decltype(Enum::kMaxValue)>::value>> {
-  using underlying_type = typename std::underlying_type<Enum>::type;
   static constexpr Enum Count() {
-    return static_cast<Enum>(static_cast<underlying_type>(Enum::kMaxValue) + 1);
+    // If you're getting
+    //   note: integer value X is outside the valid range of values [0, X] for
+    //         this enumeration type
+    // Then you need to give your enum a fixed underlying type.
+    return static_cast<Enum>(
+        static_cast<std::underlying_type_t<Enum>>(Enum::kMaxValue) + 1);
   }
 };
 
@@ -64,60 +70,51 @@
 // define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
 // STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
 // and forwards to HISTOGRAM_POINTER_USE.
-#define HISTOGRAM_POINTER_USE(atomic_histogram_pointer,                        \
-                              constant_histogram_name,                         \
-                              histogram_add_method_invocation,                 \
-                              histogram_factory_get_invocation)                \
-  do {                                                                         \
-    /*                                                                         \
-     * Acquire_Load() ensures that we acquire visibility to the                \
-     * pointed-to data in the histogram.                                       \
-     */                                                                        \
-    base::HistogramBase* histogram_pointer(                                    \
-        reinterpret_cast<base::HistogramBase*>(                                \
-            base::subtle::Acquire_Load(atomic_histogram_pointer)));            \
-    if (!histogram_pointer) {                                                  \
-      /*                                                                       \
-       * This is the slow path, which will construct OR find the               \
-       * matching histogram.  histogram_factory_get_invocation includes        \
-       * locks on a global histogram name map and is completely thread         \
-       * safe.                                                                 \
-       */                                                                      \
-      histogram_pointer = histogram_factory_get_invocation;                    \
-                                                                               \
-      /*                                                                       \
-       * Use Release_Store to ensure that the histogram data is made           \
-       * available globally before we make the pointer visible. Several        \
-       * threads may perform this store, but the same value will be            \
-       * stored in all cases (for a given named/spec'ed histogram).            \
-       * We could do this without any barrier, since FactoryGet entered        \
-       * and exited a lock after construction, but this barrier makes          \
-       * things clear.                                                         \
-       */                                                                      \
-      base::subtle::Release_Store(                                             \
-          atomic_histogram_pointer,                                            \
-          reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));      \
-    }                                                                          \
-    if (DCHECK_IS_ON())                                                        \
-      histogram_pointer->CheckName(constant_histogram_name);                   \
-    histogram_pointer->histogram_add_method_invocation;                        \
+#define HISTOGRAM_POINTER_USE(                                           \
+    atomic_histogram_pointer, constant_histogram_name,                   \
+    histogram_add_method_invocation, histogram_factory_get_invocation)   \
+  do {                                                                   \
+    base::HistogramBase* histogram_pointer(                              \
+        reinterpret_cast<base::HistogramBase*>(                          \
+            atomic_histogram_pointer->load(std::memory_order_acquire))); \
+    if (!histogram_pointer) {                                            \
+      /*                                                                 \
+       * This is the slow path, which will construct OR find the         \
+       * matching histogram. |histogram_factory_get_invocation| includes \
+       * locks on a global histogram name map and is completely thread   \
+       * safe.                                                           \
+       */                                                                \
+      histogram_pointer = histogram_factory_get_invocation;              \
+                                                                         \
+      /*                                                                 \
+       * We could do this without any barrier, since FactoryGet()        \
+       * entered and exited a lock after construction, but this barrier  \
+       * makes things clear.                                             \
+       */                                                                \
+      atomic_histogram_pointer->store(                                   \
+          reinterpret_cast<uintptr_t>(histogram_pointer),                \
+          std::memory_order_release);                                    \
+    }                                                                    \
+    if (DCHECK_IS_ON())                                                  \
+      histogram_pointer->CheckName(constant_histogram_name);             \
+    histogram_pointer->histogram_add_method_invocation;                  \
   } while (0)
 
 // This is a helper macro used by other macros and shouldn't be used directly.
 // Defines the static |atomic_histogram_pointer| and forwards to
 // HISTOGRAM_POINTER_USE.
-#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name,                \
-                                       histogram_add_method_invocation,        \
-                                       histogram_factory_get_invocation)       \
-  do {                                                                         \
-    /*                                                                         \
-     * The pointer's presence indicates that the initialization is complete.   \
-     * Initialization is idempotent, so it can safely be atomically repeated.  \
-     */                                                                        \
-    static base::subtle::AtomicWord atomic_histogram_pointer = 0;              \
-    HISTOGRAM_POINTER_USE(&atomic_histogram_pointer, constant_histogram_name,  \
-                          histogram_add_method_invocation,                     \
-                          histogram_factory_get_invocation);                   \
+#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name,               \
+                                       histogram_add_method_invocation,       \
+                                       histogram_factory_get_invocation)      \
+  do {                                                                        \
+    /*                                                                        \
+     * The pointer's presence indicates that the initialization is complete.  \
+     * Initialization is idempotent, so it can safely be atomically repeated. \
+     */                                                                       \
+    static std::atomic_uintptr_t atomic_histogram_pointer;                    \
+    HISTOGRAM_POINTER_USE(                                                    \
+        std::addressof(atomic_histogram_pointer), constant_histogram_name,    \
+        histogram_add_method_invocation, histogram_factory_get_invocation);   \
   } while (0)
 
 // This is a helper macro used by other macros and shouldn't be used directly.
@@ -144,9 +141,9 @@
 #define INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(name, sample, boundary,  \
                                                   flag)                    \
   do {                                                                     \
-    static_assert(!std::is_enum<decltype(sample)>::value,                  \
+    static_assert(!std::is_enum<std::decay_t<decltype(sample)>>::value,    \
                   "|sample| should not be an enum type!");                 \
-    static_assert(!std::is_enum<decltype(boundary)>::value,                \
+    static_assert(!std::is_enum<std::decay_t<decltype(boundary)>>::value,  \
                   "|boundary| should not be an enum type!");               \
     STATIC_HISTOGRAM_POINTER_BLOCK(                                        \
         name, Add(sample),                                                 \
@@ -160,9 +157,9 @@
 #define INTERNAL_HISTOGRAM_SCALED_EXACT_LINEAR_WITH_FLAG(                      \
     name, sample, count, boundary, scale, flag)                                \
   do {                                                                         \
-    static_assert(!std::is_enum<decltype(sample)>::value,                      \
+    static_assert(!std::is_enum<std::decay_t<decltype(sample)>>::value,        \
                   "|sample| should not be an enum type!");                     \
-    static_assert(!std::is_enum<decltype(boundary)>::value,                    \
+    static_assert(!std::is_enum<std::decay_t<decltype(boundary)>>::value,      \
                   "|boundary| should not be an enum type!");                   \
     class ScaledLinearHistogramInstance : public base::ScaledLinearHistogram { \
      public:                                                                   \
@@ -185,7 +182,8 @@
 #define INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY(name, sample,       \
                                                            flags)              \
   INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(                                    \
-      name, sample, base::internal::EnumSizeTraits<decltype(sample)>::Count(), \
+      name, sample,                                                            \
+      base::internal::EnumSizeTraits<std::decay_t<decltype(sample)>>::Count(), \
       flags)
 
 // Note: The value in |sample| must be strictly less than |enum_size|.
@@ -230,8 +228,8 @@
     using decayed_sample = std::decay<decltype(sample)>::type;               \
     static_assert(std::is_enum<decayed_sample>::value,                       \
                   "Unexpected: |sample| is not at enum.");                   \
-    constexpr auto boundary =                                                \
-        base::internal::EnumSizeTraits<decltype(sample)>::Count();           \
+    constexpr auto boundary = base::internal::EnumSizeTraits<                \
+        std::decay_t<decltype(sample)>>::Count();                            \
     static_assert(                                                           \
         static_cast<uintmax_t>(boundary) <                                   \
             static_cast<uintmax_t>(                                          \
@@ -244,24 +242,32 @@
 
 // This is a helper macro used by other macros and shouldn't be used directly.
 // This is necessary to expand __COUNTER__ to an actual value.
-#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, is_long, key)       \
-  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)
+#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, timing, key) \
+  INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, timing, key)
 
 // This is a helper macro used by other macros and shouldn't be used directly.
-#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)         \
-  class ScopedHistogramTimer##key {                                            \
-   public:                                                                     \
-    ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {}      \
-    ~ScopedHistogramTimer##key() {                                             \
-      base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_;         \
-      if (is_long) {                                                           \
-        UMA_HISTOGRAM_LONG_TIMES_100(name, elapsed);                           \
-      } else {                                                                 \
-        UMA_HISTOGRAM_TIMES(name, elapsed);                                    \
-      }                                                                        \
-    }                                                                          \
-   private:                                                                    \
-    base::TimeTicks constructed_;                                              \
+#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, timing, key)      \
+  class ScopedHistogramTimer##key {                                        \
+   public:                                                                 \
+    ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {}  \
+    ~ScopedHistogramTimer##key() {                                         \
+      base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_;     \
+      switch (timing) {                                                    \
+        case ScopedHistogramTiming::kMicrosecondTimes:                     \
+          UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(                         \
+              name, elapsed, base::Microseconds(1), base::Seconds(1), 50); \
+          break;                                                           \
+        case ScopedHistogramTiming::kMediumTimes:                          \
+          UMA_HISTOGRAM_TIMES(name, elapsed);                              \
+          break;                                                           \
+        case ScopedHistogramTiming::kLongTimes:                            \
+          UMA_HISTOGRAM_LONG_TIMES_100(name, elapsed);                     \
+          break;                                                           \
+      }                                                                    \
+    }                                                                      \
+                                                                           \
+   private:                                                                \
+    base::TimeTicks constructed_;                                          \
   } scoped_histogram_timer_##key
 
 #endif  // BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
diff --git a/base/metrics/histogram_macros_local.h b/base/metrics/histogram_macros_local.h
index c4d333b..2a2f29f 100644
--- a/base/metrics/histogram_macros_local.h
+++ b/base/metrics/histogram_macros_local.h
@@ -1,11 +1,10 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
 #define BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
 
-#include "base/logging.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/histogram_macros_internal.h"
 #include "base/time/time.h"
@@ -18,11 +17,12 @@
 //
 // For usage details, see the equivalents in histogram_macros.h.
 
+#define CR_GET_ARG(arg) arg
 #define LOCAL_HISTOGRAM_ENUMERATION(name, ...)                          \
-  CR_EXPAND_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(           \
+  CR_GET_ARG(INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(                         \
       __VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
-      INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)(              \
-      name, __VA_ARGS__, base::HistogramBase::kNoFlags))
+      INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY)               \
+  (name, __VA_ARGS__, base::HistogramBase::kNoFlags))
 
 #define LOCAL_HISTOGRAM_BOOLEAN(name, sample)                                  \
     STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample),                   \
@@ -60,9 +60,9 @@
 //
 // For usage details, see the equivalents in histogram_macros.h.
 
-#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES(      \
-    name, sample, base::TimeDelta::FromMilliseconds(1),                        \
-    base::TimeDelta::FromSeconds(10), 50)
+#define LOCAL_HISTOGRAM_TIMES(name, sample)                         \
+  LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, base::Milliseconds(1), \
+                               base::Seconds(10), 50)
 
 #define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
   STATIC_HISTOGRAM_POINTER_BLOCK(                                          \
@@ -70,6 +70,12 @@
       base::Histogram::FactoryTimeGet(name, min, max, bucket_count,        \
                                       base::HistogramBase::kNoFlags))
 
+#define LOCAL_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(name, sample, min, max, \
+                                                  bucket_count)           \
+  STATIC_HISTOGRAM_POINTER_BLOCK(                                         \
+      name, AddTimeMicrosecondsGranularity(sample),                       \
+      base::Histogram::FactoryMicrosecondsTimeGet(                        \
+          name, min, max, bucket_count, base::HistogramBase::kNoFlags))
 //------------------------------------------------------------------------------
 // Memory histograms.
 //
diff --git a/base/metrics/histogram_macros_unittest.cc b/base/metrics/histogram_macros_unittest.cc
index 3c592b0..20a611f 100644
--- a/base/metrics/histogram_macros_unittest.cc
+++ b/base/metrics/histogram_macros_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -8,7 +8,9 @@
 
 namespace base {
 
-TEST(ScopedHistogramTimer, TwoTimersOneScope) {
+TEST(ScopedHistogramTimer, ThreeTimersOneScope) {
+  SCOPED_UMA_HISTOGRAM_TIMER_MICROS("TestShortTimer0");
+  SCOPED_UMA_HISTOGRAM_TIMER_MICROS("TestShortTimer1");
   SCOPED_UMA_HISTOGRAM_TIMER("TestTimer0");
   SCOPED_UMA_HISTOGRAM_TIMER("TestTimer1");
   SCOPED_UMA_HISTOGRAM_LONG_TIMER("TestLongTimer0");
@@ -20,8 +22,8 @@
 // - integral types
 // - unscoped enums
 // - scoped enums
-TEST(HistogramMacro, IntegralPsuedoEnumeration) {
-  UMA_HISTOGRAM_ENUMERATION("Test.FauxEnumeration", 1, 10000);
+TEST(HistogramMacro, IntegralPseudoEnumeration) {
+  UMA_HISTOGRAM_ENUMERATION("Test.FauxEnumeration", 1, 1000);
 }
 
 TEST(HistogramMacro, UnscopedEnumeration) {
@@ -54,4 +56,20 @@
                             TestEnum2::MAX_ENTRIES);
 }
 
+// Compile tests for UMA_HISTOGRAM_ENUMERATION when the value type is:
+// - a const reference to an enum
+// - a non-const reference to an enum
+TEST(HistogramMacro, EnumerationConstRef) {
+  enum class TestEnum { kValue, kMaxValue = kValue };
+  const TestEnum& value_ref = TestEnum::kValue;
+  UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration3", value_ref);
+}
+
+TEST(HistogramMacro, EnumerationNonConstRef) {
+  enum class TestEnum { kValue, kMaxValue = kValue };
+  TestEnum value = TestEnum::kValue;
+  TestEnum& value_ref = value;
+  UMA_HISTOGRAM_ENUMERATION("Test.ScopedEnumeration4", value_ref);
+}
+
 }  // namespace base
diff --git a/base/metrics/histogram_samples.cc b/base/metrics/histogram_samples.cc
index dffd703..eb9efc0 100644
--- a/base/metrics/histogram_samples.cc
+++ b/base/metrics/histogram_samples.cc
@@ -1,18 +1,20 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/histogram_samples.h"
 
 #include <limits>
+#include <cstring>
 
 #include "base/compiler_specific.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/histogram_functions.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/numerics/safe_math.h"
 #include "base/pickle.h"
-#include "starboard/memory.h"
+#include "base/strings/stringprintf.h"
 
 namespace base {
 
@@ -36,10 +38,10 @@
   void Next() override;
   void Get(HistogramBase::Sample* min,
            int64_t* max,
-           HistogramBase::Count* count) const override;
+           HistogramBase::Count* count) override;
 
  private:
-  PickleIterator* const iter_;
+  const raw_ptr<PickleIterator> iter_;
 
   HistogramBase::Sample min_;
   int64_t max_;
@@ -67,7 +69,7 @@
 
 void SampleCountPickleIterator::Get(HistogramBase::Sample* min,
                                     int64_t* max,
-                                    HistogramBase::Count* count) const {
+                                    HistogramBase::Count* count) {
   DCHECK(!Done());
   *min = min_;
   *max = max_;
@@ -82,7 +84,7 @@
 
 HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Load()
     const {
-  AtomicSingleSample single_sample = subtle::Acquire_Load(&as_atomic);
+  AtomicSingleSample single_sample(subtle::Acquire_Load(&as_atomic));
 
   // If the sample was extracted/disabled, it's still zero to the outside.
   if (single_sample.as_atomic == kDisabledSingleSample)
@@ -92,12 +94,48 @@
 }
 
 HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Extract(
-    bool disable) {
-  AtomicSingleSample single_sample = subtle::NoBarrier_AtomicExchange(
-      &as_atomic, disable ? kDisabledSingleSample : 0);
-  if (single_sample.as_atomic == kDisabledSingleSample)
-    single_sample.as_atomic = 0;
-  return single_sample.as_parts;
+    AtomicSingleSample new_value) {
+  DCHECK(new_value.as_atomic != kDisabledSingleSample)
+      << "Disabling an AtomicSingleSample should be done through "
+         "ExtractAndDisable().";
+
+  AtomicSingleSample old_value;
+
+  // Because a concurrent call may modify and/or disable this object as we are
+  // trying to extract its value, a compare-and-swap loop must be done to ensure
+  // that the value was not changed between the reading and writing (and to
+  // prevent accidentally re-enabling this object).
+  while (true) {
+    old_value.as_atomic = subtle::Acquire_Load(&as_atomic);
+
+    // If this object was already disabled, return an empty sample and keep it
+    // disabled.
+    if (old_value.as_atomic == kDisabledSingleSample) {
+      old_value.as_atomic = 0;
+      return old_value.as_parts;
+    }
+
+    // Extract the single-sample from memory. |existing| is what was in that
+    // memory location at the time of the call; if it doesn't match |original|
+    // (i.e., the single-sample was concurrently modified during this
+    // iteration), then the swap did not happen, so try again.
+    subtle::Atomic32 existing = subtle::Release_CompareAndSwap(
+        &as_atomic, old_value.as_atomic, new_value.as_atomic);
+    if (existing == old_value.as_atomic) {
+      return old_value.as_parts;
+    }
+  }
+}
+
+HistogramSamples::SingleSample
+HistogramSamples::AtomicSingleSample::ExtractAndDisable() {
+  AtomicSingleSample old_value(
+      subtle::NoBarrier_AtomicExchange(&as_atomic, kDisabledSingleSample));
+  // If this object was already disabled, return an empty sample.
+  if (old_value.as_atomic == kDisabledSingleSample) {
+    old_value.as_atomic = 0;
+  }
+  return old_value.as_parts;
 }
 
 bool HistogramSamples::AtomicSingleSample::Accumulate(
@@ -186,6 +224,11 @@
     meta_->id = id;
 }
 
+HistogramSamples::HistogramSamples(uint64_t id, std::unique_ptr<Metadata> meta)
+    : HistogramSamples(id, meta.get()) {
+  meta_owned_ = std::move(meta);
+}
+
 // This mustn't do anything with |meta_|. It was passed to the ctor and may
 // be invalid by the time this dtor gets called.
 HistogramSamples::~HistogramSamples() = default;
@@ -217,6 +260,40 @@
   DCHECK(success);
 }
 
+void HistogramSamples::Extract(HistogramSamples& other) {
+  static_assert(sizeof(other.meta_->sum) == 8);
+
+#ifdef ARCH_CPU_64_BITS
+  // NoBarrier_AtomicExchange() is only defined for 64-bit types if
+  // the ARCH_CPU_64_BITS macro is set.
+  subtle::Atomic64 other_sum =
+      subtle::NoBarrier_AtomicExchange(&other.meta_->sum, 0);
+#else
+  // |sum| is only atomic on 64 bit archs. Make |other_sum| volatile so that
+  // the following code is not optimized or rearranged to be something like:
+  //     IncreaseSumAndCount(other.meta_->sum, ...);
+  //     other.meta_->sum = 0;
+  // Or:
+  //     int64_t other_sum = other.meta_->sum;
+  //     other.meta_->sum = 0;
+  //     IncreaseSumAndCount(other_sum, ...);
+  // Which do not guarantee eventual consistency anymore (other.meta_->sum may
+  // be modified concurrently at any time). However, despite this, eventual
+  // consistency is still not guaranteed here because performing 64-bit
+  // operations (loading, storing, adding, etc.) on a 32-bit machine cannot be
+  // done atomically, but this at least reduces the odds of inconsistencies, at
+  // the cost of a few extra instructions.
+  volatile int64_t other_sum = other.meta_->sum;
+  other.meta_->sum -= other_sum;
+#endif  // ARCH_CPU_64_BITS
+  HistogramBase::AtomicCount other_redundant_count =
+      subtle::NoBarrier_AtomicExchange(&other.meta_->redundant_count, 0);
+  IncreaseSumAndCount(other_sum, other_redundant_count);
+  std::unique_ptr<SampleCountIterator> it = other.ExtractingIterator();
+  bool success = AddSubtractImpl(it.get(), ADD);
+  DCHECK(success);
+}
+
 void HistogramSamples::Serialize(Pickle* pickle) const {
   pickle->WriteInt64(sum());
   pickle->WriteInt(redundant_count());
@@ -264,6 +341,102 @@
                      static_cast<int32_t>(id()));
 }
 
+base::Value::Dict HistogramSamples::ToGraphDict(StringPiece histogram_name,
+                                                int32_t flags) const {
+  base::Value::Dict dict;
+  dict.Set("name", histogram_name);
+  dict.Set("header", GetAsciiHeader(histogram_name, flags));
+  dict.Set("body", GetAsciiBody());
+  return dict;
+}
+
+std::string HistogramSamples::GetAsciiHeader(StringPiece histogram_name,
+                                             int32_t flags) const {
+  std::string output;
+  StringAppendF(&output, "Histogram: %.*s recorded %d samples",
+                static_cast<int>(histogram_name.size()), histogram_name.data(),
+                TotalCount());
+  if (flags)
+    StringAppendF(&output, " (flags = 0x%x)", flags);
+  return output;
+}
+
+std::string HistogramSamples::GetAsciiBody() const {
+  HistogramBase::Count total_count = TotalCount();
+  double scaled_total_count = total_count / 100.0;
+
+  // Determine how wide the largest bucket range is (how many digits to print),
+  // so that we'll be able to right-align starts for the graphical bars.
+  // Determine which bucket has the largest sample count so that we can
+  // normalize the graphical bar-width relative to that sample count.
+  HistogramBase::Count largest_count = 0;
+  HistogramBase::Sample largest_sample = 0;
+  std::unique_ptr<SampleCountIterator> it = Iterator();
+  while (!it->Done()) {
+    HistogramBase::Sample min;
+    int64_t max;
+    HistogramBase::Count count;
+    it->Get(&min, &max, &count);
+    if (min > largest_sample)
+      largest_sample = min;
+    if (count > largest_count)
+      largest_count = count;
+    it->Next();
+  }
+  // Scale histogram bucket counts to take at most 72 characters.
+  // Note: Keep in sync w/ kLineLength sample_vector.cc
+  const double kLineLength = 72;
+  double scaling_factor = 1;
+  if (largest_count > kLineLength)
+    scaling_factor = kLineLength / largest_count;
+  size_t print_width = GetSimpleAsciiBucketRange(largest_sample).size() + 1;
+
+  // iterate over each item and display them
+  it = Iterator();
+  std::string output;
+  while (!it->Done()) {
+    HistogramBase::Sample min;
+    int64_t max;
+    HistogramBase::Count count;
+    it->Get(&min, &max, &count);
+
+    // value is min, so display it
+    std::string range = GetSimpleAsciiBucketRange(min);
+    output.append(range);
+    for (size_t j = 0; range.size() + j < print_width + 1; ++j)
+      output.push_back(' ');
+    HistogramBase::Count current_size = round(count * scaling_factor);
+    WriteAsciiBucketGraph(current_size, kLineLength, &output);
+    WriteAsciiBucketValue(count, scaled_total_count, &output);
+    StringAppendF(&output, "\n");
+    it->Next();
+  }
+  return output;
+}
+
+void HistogramSamples::WriteAsciiBucketGraph(double x_count,
+                                             int line_length,
+                                             std::string* output) const {
+  int x_remainder = line_length - x_count;
+
+  while (0 < x_count--)
+    output->append("-");
+  output->append("O");
+  while (0 < x_remainder--)
+    output->append(" ");
+}
+
+void HistogramSamples::WriteAsciiBucketValue(HistogramBase::Count current,
+                                             double scaled_sum,
+                                             std::string* output) const {
+  StringAppendF(output, " (%d = %3.1f%%)", current, current / scaled_sum);
+}
+
+const std::string HistogramSamples::GetSimpleAsciiBucketRange(
+    HistogramBase::Sample sample) const {
+  return StringPrintf("%d", sample);
+}
+
 SampleCountIterator::~SampleCountIterator() = default;
 
 bool SampleCountIterator::GetBucketIndex(size_t* index) const {
@@ -273,16 +446,22 @@
 
 SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
                                            int64_t max,
-                                           HistogramBase::Count count)
-    : SingleSampleIterator(min, max, count, kSizeMax) {}
-
-SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
-                                           int64_t max,
                                            HistogramBase::Count count,
-                                           size_t bucket_index)
-    : min_(min), max_(max), bucket_index_(bucket_index), count_(count) {}
+                                           size_t bucket_index,
+                                           bool value_was_extracted)
+    : min_(min),
+      max_(max),
+      bucket_index_(bucket_index),
+      count_(count),
+      value_was_extracted_(value_was_extracted) {}
 
-SingleSampleIterator::~SingleSampleIterator() = default;
+SingleSampleIterator::~SingleSampleIterator() {
+  // Because this object may have been instantiated in such a way that the
+  // samples it is holding were already extracted from the underlying data, we
+  // add a DCHECK to ensure that in those cases, users of this iterator read the
+  // samples, otherwise they may be lost.
+  DCHECK(!value_was_extracted_ || Done());
+}
 
 bool SingleSampleIterator::Done() const {
   return count_ == 0;
@@ -295,14 +474,11 @@
 
 void SingleSampleIterator::Get(HistogramBase::Sample* min,
                                int64_t* max,
-                               HistogramBase::Count* count) const {
+                               HistogramBase::Count* count) {
   DCHECK(!Done());
-  if (min != nullptr)
-    *min = min_;
-  if (max != nullptr)
-    *max = max_;
-  if (count != nullptr)
-    *count = count_;
+  *min = min_;
+  *max = max_;
+  *count = count_;
 }
 
 bool SingleSampleIterator::GetBucketIndex(size_t* index) const {
diff --git a/base/metrics/histogram_samples.h b/base/metrics/histogram_samples.h
index 392de26..7f34093 100644
--- a/base/metrics/histogram_samples.h
+++ b/base/metrics/histogram_samples.h
@@ -1,17 +1,21 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_HISTOGRAM_SAMPLES_H_
 #define BASE_METRICS_HISTOGRAM_SAMPLES_H_
 
+#include <stddef.h>
+#include <stdint.h>
+
 #include <limits>
 #include <memory>
+#include <string>
 
 #include "base/atomicops.h"
-#include "base/macros.h"
+#include "base/base_export.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/histogram_base.h"
-#include "starboard/types.h"
 
 namespace base {
 
@@ -46,17 +50,23 @@
   // acquire/release operations to guarantee ordering with outside values.
   union BASE_EXPORT AtomicSingleSample {
     AtomicSingleSample() : as_atomic(0) {}
-    AtomicSingleSample(subtle::Atomic32 rhs) : as_atomic(rhs) {}
+    explicit AtomicSingleSample(subtle::Atomic32 rhs) : as_atomic(rhs) {}
 
     // Returns the single sample in an atomic manner. This in an "acquire"
     // load. The returned sample isn't shared and thus its fields can be safely
-    // accessed.
+    // accessed. If this object is disabled, this will return an empty sample
+    // (bucket count set to 0).
     SingleSample Load() const;
 
-    // Extracts the single sample in an atomic manner. If |disable| is true
-    // then this object will be set so it will never accumulate another value.
-    // This is "no barrier" so doesn't enforce ordering with other atomic ops.
-    SingleSample Extract(bool disable);
+    // Extracts and returns the single sample and changes it to |new_value| in
+    // an atomic manner. If this object is disabled, this will return an empty
+    // sample (bucket count set to 0).
+    SingleSample Extract(AtomicSingleSample new_value = AtomicSingleSample(0));
+
+    // Like Extract() above, but also disables this object so that it will
+    // never accumulate another value. If this object is already disabled, this
+    // will return an empty sample (bucket count set to 0).
+    SingleSample ExtractAndDisable();
 
     // Adds a given count to the held bucket. If not possible, it returns false
     // and leaves the parts unchanged. Once extracted/disabled, this always
@@ -111,7 +121,7 @@
     // histogram types, there might be races during histogram accumulation
     // and snapshotting that we choose to accept. In this case, the tallies
     // might mismatch even when no memory corruption has happened.
-    HistogramBase::AtomicCount redundant_count;
+    HistogramBase::AtomicCount redundant_count{0};
 
     // A single histogram value and associated count. This allows histograms
     // that typically report only a single value to not require full storage
@@ -126,7 +136,8 @@
     LocalMetadata();
   };
 
-  HistogramSamples(uint64_t id, Metadata* meta);
+  HistogramSamples(const HistogramSamples&) = delete;
+  HistogramSamples& operator=(const HistogramSamples&) = delete;
   virtual ~HistogramSamples();
 
   virtual void Accumulate(HistogramBase::Sample value,
@@ -134,17 +145,35 @@
   virtual HistogramBase::Count GetCount(HistogramBase::Sample value) const = 0;
   virtual HistogramBase::Count TotalCount() const = 0;
 
-  virtual void Add(const HistogramSamples& other);
+  void Add(const HistogramSamples& other);
 
   // Add from serialized samples.
-  virtual bool AddFromPickle(PickleIterator* iter);
+  bool AddFromPickle(PickleIterator* iter);
 
-  virtual void Subtract(const HistogramSamples& other);
+  void Subtract(const HistogramSamples& other);
 
+  // Adds the samples from |other| while also resetting |other|'s sample counts
+  // to 0.
+  void Extract(HistogramSamples& other);
+
+  // Returns an iterator to read the sample counts.
   virtual std::unique_ptr<SampleCountIterator> Iterator() const = 0;
-  virtual void Serialize(Pickle* pickle) const;
 
-  // Accessor fuctions.
+  // Returns a special kind of iterator that resets the underlying sample count
+  // to 0 when Get() is called. The returned iterator must be consumed
+  // completely before being destroyed, otherwise samples may be lost (this is
+  // enforced by a DCHECK in the destructor).
+  virtual std::unique_ptr<SampleCountIterator> ExtractingIterator() = 0;
+
+  void Serialize(Pickle* pickle) const;
+
+  // Returns ASCII representation of histograms data for histogram samples.
+  // The dictionary returned will be of the form
+  // {"name":<string>, "header":<string>, "body": <string>}
+  base::Value::Dict ToGraphDict(StringPiece histogram_name,
+                                int32_t flags) const;
+
+  // Accessor functions.
   uint64_t id() const { return meta_->id; }
   int64_t sum() const {
 #ifdef ARCH_CPU_64_BITS
@@ -171,6 +200,9 @@
     MAX_NEGATIVE_SAMPLE_REASONS
   };
 
+  HistogramSamples(uint64_t id, Metadata* meta);
+  HistogramSamples(uint64_t id, std::unique_ptr<Metadata> meta);
+
   // Based on |op| type, add or subtract sample counts data from the iterator.
   enum Operator { ADD, SUBTRACT };
   virtual bool AddSubtractImpl(SampleCountIterator* iter, Operator op) = 0;
@@ -194,15 +226,38 @@
     return meta_->single_sample;
   }
 
+  // Produces an actual graph (set of blank vs non blank char's) for a bucket.
+  void WriteAsciiBucketGraph(double x_count,
+                             int line_length,
+                             std::string* output) const;
+
+  // Writes textual description of the bucket contents (relative to histogram).
+  // Output is the count in the buckets, as well as the percentage.
+  void WriteAsciiBucketValue(HistogramBase::Count current,
+                             double scaled_sum,
+                             std::string* output) const;
+
+  // Gets a body for this histogram samples.
+  virtual std::string GetAsciiBody() const;
+
+  // Gets a header message describing this histogram samples.
+  virtual std::string GetAsciiHeader(StringPiece histogram_name,
+                                     int32_t flags) const;
+
+  // Returns a string description of what goes in a given bucket.
+  const std::string GetSimpleAsciiBucketRange(
+      HistogramBase::Sample sample) const;
+
   Metadata* meta() { return meta_; }
 
  private:
-  // Depending on derived class meta values can come from local stoarge or
-  // external storage in which case HistogramSamples class cannot take ownership
-  // of Metadata*.
-  Metadata* meta_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramSamples);
+  // Depending on derived class `meta_` can come from:
+  // - Local storage: Then `meta_owned_` is set and meta_ points to it.
+  // - External storage: Then `meta_owned_` is null, and `meta_` point toward an
+  //   external object. The callers guarantees the value will outlive this
+  //   instance.
+  std::unique_ptr<Metadata> meta_owned_;
+  raw_ptr<Metadata> meta_;
 };
 
 class BASE_EXPORT SampleCountIterator {
@@ -213,14 +268,13 @@
   virtual void Next() = 0;
 
   // Get the sample and count at current position.
-  // |min| |max| and |count| can be NULL if the value is not of interest.
   // Note: |max| is int64_t because histograms support logged values in the
   // full int32_t range and bucket max is exclusive, so it needs to support
   // values up to MAXINT32+1.
   // Requires: !Done();
   virtual void Get(HistogramBase::Sample* min,
                    int64_t* max,
-                   HistogramBase::Count* count) const = 0;
+                   HistogramBase::Count* count) = 0;
   static_assert(std::numeric_limits<HistogramBase::Sample>::max() <
                     std::numeric_limits<int64_t>::max(),
                 "Get() |max| must be able to hold Histogram::Sample max + 1");
@@ -235,11 +289,9 @@
  public:
   SingleSampleIterator(HistogramBase::Sample min,
                        int64_t max,
-                       HistogramBase::Count count);
-  SingleSampleIterator(HistogramBase::Sample min,
-                       int64_t max,
                        HistogramBase::Count count,
-                       size_t bucket_index);
+                       size_t bucket_index,
+                       bool value_was_extracted);
   ~SingleSampleIterator() override;
 
   // SampleCountIterator:
@@ -247,7 +299,7 @@
   void Next() override;
   void Get(HistogramBase::Sample* min,
            int64_t* max,
-           HistogramBase::Count* count) const override;
+           HistogramBase::Count* count) override;
 
   // SampleVector uses predefined buckets so iterator can return bucket index.
   bool GetBucketIndex(size_t* index) const override;
@@ -258,6 +310,10 @@
   const int64_t max_;
   const size_t bucket_index_;
   HistogramBase::Count count_;
+
+  // Whether the value that this iterator holds was extracted from the
+  // underlying data (i.e., reset to 0).
+  const bool value_was_extracted_;
 };
 
 }  // namespace base
diff --git a/base/metrics/histogram_samples_unittest.cc b/base/metrics/histogram_samples_unittest.cc
index 74c743b..9b70c9f 100644
--- a/base/metrics/histogram_samples_unittest.cc
+++ b/base/metrics/histogram_samples_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -25,33 +25,44 @@
   s = sample.Load();
   EXPECT_EQ(9U, s.bucket);
   EXPECT_EQ(1U, s.count);
+
+  ASSERT_TRUE(sample.Accumulate(9, 1));
+  s = sample.Load();
+  EXPECT_EQ(9U, s.bucket);
+  EXPECT_EQ(2U, s.count);
 }
 
 TEST(SingleSampleTest, Extract) {
   AtomicSingleSample sample;
   ASSERT_TRUE(sample.Accumulate(9, 1));
 
-  SingleSample s = sample.Extract(/*disable=*/false);
+  SingleSample s = sample.Extract();
   EXPECT_EQ(9U, s.bucket);
   EXPECT_EQ(1U, s.count);
 
-  s = sample.Extract(/*disable=*/false);
+  s = sample.Extract();
   EXPECT_EQ(0U, s.bucket);
   EXPECT_EQ(0U, s.count);
+
+  ASSERT_TRUE(sample.Accumulate(1, 2));
+  s = sample.Extract();
+  EXPECT_EQ(1U, s.bucket);
+  EXPECT_EQ(2U, s.count);
 }
 
 TEST(SingleSampleTest, Disable) {
   AtomicSingleSample sample;
-  EXPECT_EQ(0U, sample.Extract(/*disable=*/false).count);
+  EXPECT_EQ(0U, sample.Extract().count);
   EXPECT_FALSE(sample.IsDisabled());
 
   ASSERT_TRUE(sample.Accumulate(9, 1));
-  EXPECT_EQ(1U, sample.Extract(/*disable=*/true).count);
+  EXPECT_EQ(1U, sample.ExtractAndDisable().count);
   EXPECT_TRUE(sample.IsDisabled());
 
   ASSERT_FALSE(sample.Accumulate(9, 1));
-  EXPECT_EQ(0U, sample.Extract(/*disable=*/false).count);
-  EXPECT_FALSE(sample.IsDisabled());
+  EXPECT_EQ(0U, sample.Extract().count);
+  // The sample should still be disabled.
+  EXPECT_TRUE(sample.IsDisabled());
 }
 
 TEST(SingleSampleTest, Accumulate) {
@@ -60,12 +71,14 @@
   ASSERT_TRUE(sample.Accumulate(9, 1));
   ASSERT_TRUE(sample.Accumulate(9, 2));
   ASSERT_TRUE(sample.Accumulate(9, 4));
-  EXPECT_EQ(7U, sample.Extract(/*disable=*/false).count);
+  ASSERT_FALSE(sample.Accumulate(10, 1));
+  EXPECT_EQ(7U, sample.Extract().count);
 
   ASSERT_TRUE(sample.Accumulate(9, 4));
   ASSERT_TRUE(sample.Accumulate(9, -2));
   ASSERT_TRUE(sample.Accumulate(9, 1));
-  EXPECT_EQ(3U, sample.Extract(/*disable=*/false).count);
+  ASSERT_FALSE(sample.Accumulate(10, 1));
+  EXPECT_EQ(3U, sample.Extract().count);
 }
 
 TEST(SingleSampleTest, Overflow) {
@@ -73,12 +86,11 @@
 
   ASSERT_TRUE(sample.Accumulate(9, 1));
   ASSERT_FALSE(sample.Accumulate(9, -2));
-  EXPECT_EQ(1U, sample.Extract(/*disable=*/false).count);
+  EXPECT_EQ(1U, sample.Extract().count);
 
   ASSERT_TRUE(sample.Accumulate(9, std::numeric_limits<uint16_t>::max()));
   ASSERT_FALSE(sample.Accumulate(9, 1));
-  EXPECT_EQ(std::numeric_limits<uint16_t>::max(),
-            sample.Extract(/*disable=*/false).count);
+  EXPECT_EQ(std::numeric_limits<uint16_t>::max(), sample.Extract().count);
 }
 
 }  // namespace base
diff --git a/base/metrics/histogram_snapshot_manager.cc b/base/metrics/histogram_snapshot_manager.cc
index c1b804e..40e9c78 100644
--- a/base/metrics/histogram_snapshot_manager.cc
+++ b/base/metrics/histogram_snapshot_manager.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -7,38 +7,17 @@
 #include <memory>
 
 #include "base/debug/alias.h"
+#include "base/logging.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/histogram_flattener.h"
 #include "base/metrics/histogram_samples.h"
-#include "base/metrics/statistics_recorder.h"
-#include "base/stl_util.h"
 
 namespace base {
 
-namespace {
-
-// A simple object to set an "active" flag and clear it upon destruction. It is
-// an error if the flag is already set.
-class MakeActive {
- public:
-  MakeActive(std::atomic<bool>* is_active) : is_active_(is_active) {
-    bool was_active = is_active_->exchange(true, std::memory_order_relaxed);
-    CHECK(!was_active);
-  }
-  ~MakeActive() { is_active_->store(false, std::memory_order_relaxed); }
-
- private:
-  std::atomic<bool>* is_active_;
-
-  DISALLOW_COPY_AND_ASSIGN(MakeActive);
-};
-
-}  // namespace
-
 HistogramSnapshotManager::HistogramSnapshotManager(
     HistogramFlattener* histogram_flattener)
     : histogram_flattener_(histogram_flattener) {
   DCHECK(histogram_flattener_);
-  is_active_.store(false, std::memory_order_relaxed);
 }
 
 HistogramSnapshotManager::~HistogramSnapshotManager() = default;
@@ -49,40 +28,57 @@
     HistogramBase::Flags required_flags) {
   for (HistogramBase* const histogram : histograms) {
     histogram->SetFlags(flags_to_set);
-    if ((histogram->flags() & required_flags) == required_flags)
+    if (histogram->HasFlags(required_flags)) {
       PrepareDelta(histogram);
+    }
+  }
+}
+
+void HistogramSnapshotManager::SnapshotUnloggedSamples(
+    const std::vector<HistogramBase*>& histograms,
+    HistogramBase::Flags required_flags) {
+  DCHECK(!unlogged_samples_snapshot_taken_);
+  unlogged_samples_snapshot_taken_ = true;
+  for (HistogramBase* const histogram : histograms) {
+    if (histogram->HasFlags(required_flags)) {
+      const HistogramSnapshotPair& histogram_snapshot_pair =
+          histograms_and_snapshots_.emplace_back(
+              histogram, histogram->SnapshotUnloggedSamples());
+      PrepareSamples(histogram_snapshot_pair.first,
+                     *histogram_snapshot_pair.second);
+    }
+  }
+}
+
+void HistogramSnapshotManager::MarkUnloggedSamplesAsLogged() {
+  DCHECK(unlogged_samples_snapshot_taken_);
+  unlogged_samples_snapshot_taken_ = false;
+  std::vector<HistogramSnapshotPair> histograms_and_snapshots;
+  histograms_and_snapshots.swap(histograms_and_snapshots_);
+  for (auto& [histogram, snapshot] : histograms_and_snapshots) {
+    histogram->MarkSamplesAsLogged(*snapshot);
   }
 }
 
 void HistogramSnapshotManager::PrepareDelta(HistogramBase* histogram) {
-  histogram->ValidateHistogramContents();
-  PrepareSamples(histogram, histogram->SnapshotDelta());
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+  PrepareSamples(histogram, *samples);
 }
 
 void HistogramSnapshotManager::PrepareFinalDelta(
     const HistogramBase* histogram) {
-  histogram->ValidateHistogramContents();
-  PrepareSamples(histogram, histogram->SnapshotFinalDelta());
+  std::unique_ptr<HistogramSamples> samples = histogram->SnapshotFinalDelta();
+  PrepareSamples(histogram, *samples);
 }
 
-void HistogramSnapshotManager::PrepareSamples(
-    const HistogramBase* histogram,
-    std::unique_ptr<HistogramSamples> samples) {
+void HistogramSnapshotManager::PrepareSamples(const HistogramBase* histogram,
+                                              const HistogramSamples& samples) {
   DCHECK(histogram_flattener_);
 
-  // Ensure that there is no concurrent access going on while accessing the
-  // set of known histograms. The flag will be reset when this object goes
-  // out of scope.
-  MakeActive make_active(&is_active_);
-
-  // Get information known about this histogram. If it did not previously
-  // exist, one will be created and initialized.
-  SampleInfo* sample_info = &known_histograms_[histogram->name_hash()];
-
   // Crash if we detect that our histograms have been overwritten.  This may be
   // a fair distance from the memory smasher, but we hope to correlate these
   // crashes with other events, such as plugins, or usage patterns, etc.
-  uint32_t corruption = histogram->FindCorruption(*samples);
+  uint32_t corruption = histogram->FindCorruption(samples);
   if (HistogramBase::BUCKET_ORDER_ERROR & corruption) {
     // Extract fields useful during debug.
     const BucketRanges* ranges =
@@ -109,15 +105,11 @@
     DLOG(ERROR) << "Histogram: \"" << histogram->histogram_name()
                 << "\" has data corruption: " << corruption;
     // Don't record corrupt data to metrics services.
-    const uint32_t old_corruption = sample_info->inconsistencies;
-    if (old_corruption == (corruption | old_corruption))
-      return;  // We've already seen this corruption for this histogram.
-    sample_info->inconsistencies |= corruption;
     return;
   }
 
-  if (samples->TotalCount() > 0)
-    histogram_flattener_->RecordDelta(*histogram, *samples);
+  if (samples.TotalCount() > 0)
+    histogram_flattener_->RecordDelta(*histogram, samples);
 }
 
 }  // namespace base
diff --git a/base/metrics/histogram_snapshot_manager.h b/base/metrics/histogram_snapshot_manager.h
index 55eee92..33772cc 100644
--- a/base/metrics/histogram_snapshot_manager.h
+++ b/base/metrics/histogram_snapshot_manager.h
@@ -1,19 +1,21 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
 #define BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
 
+#include <stdint.h>
+
 #include <atomic>
 #include <map>
-#include <string>
+#include <memory>
 #include <vector>
 
+#include "base/base_export.h"
 #include "base/gtest_prod_util.h"
-#include "base/macros.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/histogram_base.h"
-#include "starboard/types.h"
 
 namespace base {
 
@@ -30,18 +32,33 @@
 class BASE_EXPORT HistogramSnapshotManager final {
  public:
   explicit HistogramSnapshotManager(HistogramFlattener* histogram_flattener);
+
+  HistogramSnapshotManager(const HistogramSnapshotManager&) = delete;
+  HistogramSnapshotManager& operator=(const HistogramSnapshotManager&) = delete;
+
   ~HistogramSnapshotManager();
 
-  // Snapshot all histograms, and ask |histogram_flattener_| to record the
+  // Snapshots all histograms and asks |histogram_flattener_| to record the
   // delta. |flags_to_set| is used to set flags for each histogram.
-  // |required_flags| is used to select histograms to be recorded.
-  // Only histograms that have all the flags specified by the argument will be
-  // chosen. If all histograms should be recorded, set it to
-  // |Histogram::kNoFlags|.
+  // |required_flags| is used to select which histograms to record. Only
+  // histograms with all of the required flags are selected. If all histograms
+  // should be recorded, use |Histogram::kNoFlags| as the required flag.
   void PrepareDeltas(const std::vector<HistogramBase*>& histograms,
                      HistogramBase::Flags flags_to_set,
                      HistogramBase::Flags required_flags);
 
+  // Same as PrepareDeltas() above, but the samples obtained from the histograms
+  // are not immediately marked as logged. Instead, they are stored internally
+  // in |histograms_and_snapshots_|, and a call to MarkUnloggedSamplesAsLogged()
+  // should be made subsequently in order to mark them as logged.
+  void SnapshotUnloggedSamples(const std::vector<HistogramBase*>& histograms,
+                               HistogramBase::Flags required_flags);
+
+  // Marks the unlogged samples obtained from SnapshotUnloggedSamples() as
+  // logged. For each call to this function, there should be a corresponding
+  // call to SnapshotUnloggedSamples() before it.
+  void MarkUnloggedSamplesAsLogged();
+
   // When the collection is not so simple as can be done using a single
   // iterator, the steps can be performed separately. Call PerpareDelta()
   // as many times as necessary. PrepareFinalDelta() works like PrepareDelta()
@@ -53,35 +70,28 @@
  private:
   FRIEND_TEST_ALL_PREFIXES(HistogramSnapshotManagerTest, CheckMerge);
 
-  // During a snapshot, samples are acquired and aggregated. This structure
-  // contains all the information for a given histogram that persists between
-  // collections.
-  struct SampleInfo {
-    // The set of inconsistencies (flags) already seen for the histogram.
-    // See HistogramBase::Inconsistency for values.
-    uint32_t inconsistencies = 0;
-  };
+  using HistogramSnapshotPair =
+      std::pair<HistogramBase*, std::unique_ptr<HistogramSamples>>;
 
   // Capture and hold samples from a histogram. This does all the heavy
-  // lifting for PrepareDelta() and PrepareAbsolute().
+  // lifting for PrepareDelta() and PrepareFinalDelta().
   void PrepareSamples(const HistogramBase* histogram,
-                      std::unique_ptr<HistogramSamples> samples);
+                      const HistogramSamples& samples);
+
+  // A list of histograms and snapshots of unlogged samples. Filled when calling
+  // SnapshotUnloggedSamples(). They are marked as logged when calling
+  // MarkUnloggedSamplesAsLogged().
+  std::vector<HistogramSnapshotPair> histograms_and_snapshots_;
+
+  // Keeps track of whether SnapshotUnloggedSamples() has been called. This
+  // resets back to false after calling MarkUnloggedSamplesAsLogged(), so that
+  // the same HistogramSnapshotManager instance can be used to take multiple
+  // snapshots if needed.
+  bool unlogged_samples_snapshot_taken_ = false;
 
   // |histogram_flattener_| handles the logistics of recording the histogram
   // deltas.
-  HistogramFlattener* const histogram_flattener_;  // Weak.
-
-  // For histograms, track what has been previously seen, indexed
-  // by the hash of the histogram name.
-  std::map<uint64_t, SampleInfo> known_histograms_;
-
-  // A flag indicating if a thread is currently doing an operation. This is
-  // used to check against concurrent access which is not supported. A Thread-
-  // Checker is not sufficient because it may be guarded by at outside lock
-  // (as is the case with cronet).
-  std::atomic<bool> is_active_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramSnapshotManager);
+  const raw_ptr<HistogramFlattener> histogram_flattener_;  // Weak.
 };
 
 }  // namespace base
diff --git a/base/metrics/histogram_snapshot_manager_unittest.cc b/base/metrics/histogram_snapshot_manager_unittest.cc
index 1e2c599..e9aa964 100644
--- a/base/metrics/histogram_snapshot_manager_unittest.cc
+++ b/base/metrics/histogram_snapshot_manager_unittest.cc
@@ -1,56 +1,72 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/histogram_snapshot_manager.h"
 
+#include <memory>
 #include <string>
 #include <vector>
 
-#include "base/macros.h"
+#include "base/containers/contains.h"
 #include "base/metrics/histogram_delta_serialization.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_functions.h"
 #include "base/metrics/sample_vector.h"
 #include "base/metrics/statistics_recorder.h"
-#include "base/stl_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 
+namespace {
+
+const std::string kHistogramName = "UmaHistogram";
+
+const std::string kStabilityHistogramName = "UmaStabilityHistogram";
+
+void UmaStabilityHistogramBoolean(const std::string& name, bool sample) {
+  HistogramBase* histogram = BooleanHistogram::FactoryGet(
+      name, HistogramBase::kUmaStabilityHistogramFlag);
+  histogram->Add(sample);
+}
+
+}  // namespace
+
 class HistogramFlattenerDeltaRecorder : public HistogramFlattener {
  public:
   HistogramFlattenerDeltaRecorder() = default;
 
+  HistogramFlattenerDeltaRecorder(const HistogramFlattenerDeltaRecorder&) =
+      delete;
+  HistogramFlattenerDeltaRecorder& operator=(
+      const HistogramFlattenerDeltaRecorder&) = delete;
+
   void RecordDelta(const HistogramBase& histogram,
                    const HistogramSamples& snapshot) override {
-    recorded_delta_histogram_names_.push_back(histogram.histogram_name());
+    recorded_delta_histograms_.push_back(&histogram);
     // Use CHECK instead of ASSERT to get full stack-trace and thus origin.
-    CHECK(!ContainsKey(recorded_delta_histogram_sum_,
-                       histogram.histogram_name()));
+    CHECK(!Contains(recorded_delta_histogram_sum_, histogram.histogram_name()));
     // Keep pointer to snapshot for testing. This really isn't ideal but the
     // snapshot-manager keeps the snapshot alive until it's "forgotten".
     recorded_delta_histogram_sum_[histogram.histogram_name()] = snapshot.sum();
   }
 
   void Reset() {
-    recorded_delta_histogram_names_.clear();
+    recorded_delta_histograms_.clear();
     recorded_delta_histogram_sum_.clear();
   }
 
-  std::vector<std::string> GetRecordedDeltaHistogramNames() {
-    return recorded_delta_histogram_names_;
+  std::vector<const HistogramBase*>& GetRecordedDeltaHistograms() {
+    return recorded_delta_histograms_;
   }
 
   int64_t GetRecordedDeltaHistogramSum(const std::string& name) {
-    EXPECT_TRUE(ContainsKey(recorded_delta_histogram_sum_, name));
+    EXPECT_TRUE(Contains(recorded_delta_histogram_sum_, name));
     return recorded_delta_histogram_sum_[name];
   }
 
  private:
-  std::vector<std::string> recorded_delta_histogram_names_;
+  std::vector<const HistogramBase*> recorded_delta_histograms_;
   std::map<std::string, int64_t> recorded_delta_histogram_sum_;
-
-  DISALLOW_COPY_AND_ASSIGN(HistogramFlattenerDeltaRecorder);
 };
 
 class HistogramSnapshotManagerTest : public testing::Test {
@@ -61,6 +77,11 @@
 
   ~HistogramSnapshotManagerTest() override = default;
 
+  int64_t GetRecordedDeltaHistogramSum(const std::string& name) {
+    return histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramSum(
+        name);
+  }
+
   std::unique_ptr<StatisticsRecorder> statistics_recorder_;
   HistogramFlattenerDeltaRecorder histogram_flattener_delta_recorder_;
   HistogramSnapshotManager histogram_snapshot_manager_;
@@ -68,49 +89,163 @@
 
 TEST_F(HistogramSnapshotManagerTest, PrepareDeltasNoFlagsFilter) {
   // kNoFlags filter should record all histograms.
-  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
-  UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
+  base::UmaHistogramBoolean(kHistogramName, true);
+  UmaStabilityHistogramBoolean(kStabilityHistogramName, true);
 
-  StatisticsRecorder::PrepareDeltas(false, HistogramBase::kNoFlags,
-                                    HistogramBase::kNoFlags,
-                                    &histogram_snapshot_manager_);
+  StatisticsRecorder::PrepareDeltas(
+      /*include_persistent=*/false, /*flags_to_set=*/HistogramBase::kNoFlags,
+      /*required_flags=*/HistogramBase::kNoFlags, &histogram_snapshot_manager_);
 
-  const std::vector<std::string>& histograms =
-      histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
-  EXPECT_EQ(2U, histograms.size());
-  EXPECT_EQ("UmaHistogram", histograms[0]);
-  EXPECT_EQ("UmaStabilityHistogram", histograms[1]);
+  // Verify that the snapshots were recorded.
+  const std::vector<const HistogramBase*>& histograms =
+      histogram_flattener_delta_recorder_.GetRecordedDeltaHistograms();
+  ASSERT_EQ(2U, histograms.size());
+  ASSERT_EQ(kHistogramName, histograms[0]->histogram_name());
+  EXPECT_EQ(GetRecordedDeltaHistogramSum(kHistogramName), 1);
+  ASSERT_EQ(kStabilityHistogramName, histograms[1]->histogram_name());
+  EXPECT_EQ(GetRecordedDeltaHistogramSum(kStabilityHistogramName), 1);
+
+  // The samples should have been marked as logged.
+  EXPECT_EQ(histograms[0]->SnapshotUnloggedSamples()->TotalCount(), 0);
+  EXPECT_EQ(histograms[1]->SnapshotUnloggedSamples()->TotalCount(), 0);
 }
 
 TEST_F(HistogramSnapshotManagerTest, PrepareDeltasUmaHistogramFlagFilter) {
   // Note that kUmaStabilityHistogramFlag includes kUmaTargetedHistogramFlag.
-  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
-  UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
+  base::UmaHistogramBoolean(kHistogramName, true);
+  UmaStabilityHistogramBoolean(kStabilityHistogramName, true);
 
-  StatisticsRecorder::PrepareDeltas(false, HistogramBase::kNoFlags,
-                                    HistogramBase::kUmaTargetedHistogramFlag,
-                                    &histogram_snapshot_manager_);
+  StatisticsRecorder::PrepareDeltas(
+      /*include_persistent=*/false, /*flags_to_set=*/HistogramBase::kNoFlags,
+      /*required_flags=*/HistogramBase::kUmaTargetedHistogramFlag,
+      &histogram_snapshot_manager_);
 
-  const std::vector<std::string>& histograms =
-      histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
-  EXPECT_EQ(2U, histograms.size());
-  EXPECT_EQ("UmaHistogram", histograms[0]);
-  EXPECT_EQ("UmaStabilityHistogram", histograms[1]);
+  // Verify that the snapshots were recorded.
+  const std::vector<const HistogramBase*>& histograms =
+      histogram_flattener_delta_recorder_.GetRecordedDeltaHistograms();
+  ASSERT_EQ(2U, histograms.size());
+  ASSERT_EQ(kHistogramName, histograms[0]->histogram_name());
+  EXPECT_EQ(GetRecordedDeltaHistogramSum(kHistogramName), 1);
+  ASSERT_EQ(kStabilityHistogramName, histograms[1]->histogram_name());
+  EXPECT_EQ(GetRecordedDeltaHistogramSum(kStabilityHistogramName), 1);
+
+  // The samples should have been marked as logged.
+  EXPECT_EQ(histograms[0]->SnapshotUnloggedSamples()->TotalCount(), 0);
+  EXPECT_EQ(histograms[1]->SnapshotUnloggedSamples()->TotalCount(), 0);
 }
 
 TEST_F(HistogramSnapshotManagerTest,
        PrepareDeltasUmaStabilityHistogramFlagFilter) {
-  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
-  UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
+  base::UmaHistogramBoolean(kHistogramName, true);
+  UmaStabilityHistogramBoolean(kStabilityHistogramName, true);
 
-  StatisticsRecorder::PrepareDeltas(false, HistogramBase::kNoFlags,
-                                    HistogramBase::kUmaStabilityHistogramFlag,
-                                    &histogram_snapshot_manager_);
+  StatisticsRecorder::PrepareDeltas(
+      /*include_persistent=*/false, /*flags_to_set=*/HistogramBase::kNoFlags,
+      /*required_flags=*/HistogramBase::kUmaStabilityHistogramFlag,
+      &histogram_snapshot_manager_);
 
-  const std::vector<std::string>& histograms =
-      histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
-  EXPECT_EQ(1U, histograms.size());
-  EXPECT_EQ("UmaStabilityHistogram", histograms[0]);
+  // Verify that only the stability histogram was snapshotted and recorded.
+  const std::vector<const HistogramBase*>& histograms =
+      histogram_flattener_delta_recorder_.GetRecordedDeltaHistograms();
+  ASSERT_EQ(1U, histograms.size());
+  ASSERT_EQ(kStabilityHistogramName, histograms[0]->histogram_name());
+  EXPECT_EQ(GetRecordedDeltaHistogramSum(kStabilityHistogramName), 1);
+
+  // The samples should have been marked as logged.
+  EXPECT_EQ(histograms[0]->SnapshotUnloggedSamples()->TotalCount(), 0);
+}
+
+TEST_F(HistogramSnapshotManagerTest, SnapshotUnloggedSamplesNoFlagsFilter) {
+  // kNoFlags filter should record all histograms.
+  base::UmaHistogramBoolean(kHistogramName, true);
+  UmaStabilityHistogramBoolean(kStabilityHistogramName, true);
+
+  StatisticsRecorder::SnapshotUnloggedSamples(
+      /*required_flags=*/HistogramBase::kNoFlags, &histogram_snapshot_manager_);
+
+  // Verify that the snapshots were recorded.
+  const std::vector<const HistogramBase*>& histograms =
+      histogram_flattener_delta_recorder_.GetRecordedDeltaHistograms();
+  ASSERT_EQ(2U, histograms.size());
+  ASSERT_EQ(kHistogramName, histograms[0]->histogram_name());
+  EXPECT_EQ(GetRecordedDeltaHistogramSum(kHistogramName), 1);
+  ASSERT_EQ(kStabilityHistogramName, histograms[1]->histogram_name());
+  EXPECT_EQ(GetRecordedDeltaHistogramSum(kStabilityHistogramName), 1);
+
+  // The samples should NOT have been marked as logged.
+  std::unique_ptr<HistogramSamples> samples =
+      histograms[0]->SnapshotUnloggedSamples();
+  EXPECT_EQ(samples->TotalCount(), 1);
+  EXPECT_EQ(samples->sum(), 1);
+  samples = histograms[1]->SnapshotUnloggedSamples();
+  EXPECT_EQ(samples->TotalCount(), 1);
+  EXPECT_EQ(samples->sum(), 1);
+
+  // Mark the samples as logged and verify that they are correctly marked as so.
+  histogram_snapshot_manager_.MarkUnloggedSamplesAsLogged();
+  EXPECT_EQ(histograms[0]->SnapshotUnloggedSamples()->TotalCount(), 0);
+  EXPECT_EQ(histograms[1]->SnapshotUnloggedSamples()->TotalCount(), 0);
+}
+
+TEST_F(HistogramSnapshotManagerTest,
+       SnapshotUnloggedSamplesUmaHistogramFlagFilter) {
+  // Note that kUmaStabilityHistogramFlag includes kUmaTargetedHistogramFlag.
+  base::UmaHistogramBoolean(kHistogramName, true);
+  UmaStabilityHistogramBoolean(kStabilityHistogramName, true);
+
+  StatisticsRecorder::SnapshotUnloggedSamples(
+      /*required_flags=*/HistogramBase::kUmaTargetedHistogramFlag,
+      &histogram_snapshot_manager_);
+
+  // Verify that the snapshots were recorded.
+  const std::vector<const HistogramBase*>& histograms =
+      histogram_flattener_delta_recorder_.GetRecordedDeltaHistograms();
+  ASSERT_EQ(2U, histograms.size());
+  ASSERT_EQ(kHistogramName, histograms[0]->histogram_name());
+  EXPECT_EQ(GetRecordedDeltaHistogramSum(kHistogramName), 1);
+  ASSERT_EQ(kStabilityHistogramName, histograms[1]->histogram_name());
+  EXPECT_EQ(GetRecordedDeltaHistogramSum(kStabilityHistogramName), 1);
+
+  // The samples should NOT have been marked as logged.
+  std::unique_ptr<HistogramSamples> samples =
+      histograms[0]->SnapshotUnloggedSamples();
+  EXPECT_EQ(samples->TotalCount(), 1);
+  EXPECT_EQ(samples->sum(), 1);
+  samples = histograms[1]->SnapshotUnloggedSamples();
+  EXPECT_EQ(samples->TotalCount(), 1);
+  EXPECT_EQ(samples->sum(), 1);
+
+  // Mark the samples as logged and verify that they are correctly marked as so.
+  histogram_snapshot_manager_.MarkUnloggedSamplesAsLogged();
+  EXPECT_EQ(histograms[0]->SnapshotUnloggedSamples()->TotalCount(), 0);
+  EXPECT_EQ(histograms[1]->SnapshotUnloggedSamples()->TotalCount(), 0);
+}
+
+TEST_F(HistogramSnapshotManagerTest,
+       SnapshotUnloggedSamplesUmaStabilityHistogramFlagFilter) {
+  base::UmaHistogramBoolean(kHistogramName, true);
+  UmaStabilityHistogramBoolean(kStabilityHistogramName, true);
+
+  StatisticsRecorder::SnapshotUnloggedSamples(
+      /*required_flags=*/HistogramBase::kUmaStabilityHistogramFlag,
+      &histogram_snapshot_manager_);
+
+  // Verify that only the stability histogram was snapshotted and recorded.
+  const std::vector<const HistogramBase*>& histograms =
+      histogram_flattener_delta_recorder_.GetRecordedDeltaHistograms();
+  ASSERT_EQ(1U, histograms.size());
+  ASSERT_EQ(kStabilityHistogramName, histograms[0]->histogram_name());
+  EXPECT_EQ(GetRecordedDeltaHistogramSum(kStabilityHistogramName), 1);
+
+  // The samples should NOT have been marked as logged.
+  std::unique_ptr<HistogramSamples> samples =
+      histograms[0]->SnapshotUnloggedSamples();
+  EXPECT_EQ(samples->TotalCount(), 1);
+  EXPECT_EQ(samples->sum(), 1);
+
+  // Mark the samples as logged and verify that they are correctly marked as so.
+  histogram_snapshot_manager_.MarkUnloggedSamplesAsLogged();
+  EXPECT_EQ(histograms[0]->SnapshotUnloggedSamples()->TotalCount(), 0);
 }
 
 }  // namespace base
diff --git a/base/metrics/histogram_threadsafe_unittest.cc b/base/metrics/histogram_threadsafe_unittest.cc
new file mode 100644
index 0000000..aeb0e85
--- /dev/null
+++ b/base/metrics/histogram_threadsafe_unittest.cc
@@ -0,0 +1,424 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram.h"
+
+#include <memory>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "base/atomicops.h"
+#include "base/containers/span.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/no_destructor.h"
+#include "base/strings/stringprintf.h"
+#include "base/test/scoped_feature_list.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+char const* GetPermanentName(const std::string& name) {
+  // A set of histogram names that provides the "permanent" lifetime required
+  // by histogram objects for those strings that are not already code constants
+  // or held in persistent memory.
+  static base::NoDestructor<std::set<std::string>> permanent_names;
+
+  auto result = permanent_names->insert(name);
+  return result.first->c_str();
+}
+
+size_t GetBucketIndex(HistogramBase::Sample value, const BucketRanges* ranges) {
+  size_t bucket_count = ranges->bucket_count();
+  EXPECT_GE(bucket_count, 1U);
+  for (size_t i = 0; i < bucket_count; ++i) {
+    if (ranges->range(i) > value) {
+      return i - 1;
+    }
+  }
+  return bucket_count - 1;
+}
+
+// Runs a task in a thread that will emit |num_emission_| times the passed
+// |histograms| and snapshot them. The thread will also keep track of the
+// actual samples emitted, as well as the ones found in the snapshots taken, so
+// that they can be compared.
+class SnapshotDeltaThread : public SimpleThread {
+ public:
+  SnapshotDeltaThread(const std::string& name,
+                      size_t num_emissions,
+                      span<HistogramBase*> histograms,
+                      HistogramBase::Sample histogram_max,
+                      subtle::Atomic32* real_total_samples_count,
+                      span<subtle::Atomic32> real_bucket_counts,
+                      subtle::Atomic32* snapshots_total_samples_count,
+                      span<subtle::Atomic32> snapshots_bucket_counts)
+      : SimpleThread(name, Options()),
+        num_emissions_(num_emissions),
+        histograms_(histograms),
+        histogram_max_(histogram_max),
+        real_total_samples_count_(real_total_samples_count),
+        real_bucket_counts_(real_bucket_counts),
+        snapshots_total_samples_count_(snapshots_total_samples_count),
+        snapshots_bucket_counts_(snapshots_bucket_counts) {}
+
+  SnapshotDeltaThread(const SnapshotDeltaThread&) = delete;
+  SnapshotDeltaThread& operator=(const SnapshotDeltaThread&) = delete;
+
+  ~SnapshotDeltaThread() override = default;
+
+  void Run() override {
+    for (size_t i = 0; i < num_emissions_; ++i) {
+      for (HistogramBase* histogram : histograms_) {
+        // Emit a random sample. rand() is used here to generate such a sample,
+        // but the randomness does not really matter as thread-safety is what is
+        // being tested here and there is already a lot of non-determinism
+        // surrounding scheduling.
+        Histogram::Sample sample = rand() % histogram_max_;
+        histogram->Add(sample);
+
+        // Take a snapshot of the histogram. Because of the multithreading
+        // nature of the test, this may or may not include the sample that was
+        // just emitted, and/or may include samples that came from other
+        // threads.
+        std::unique_ptr<HistogramSamples> snapshot = histogram->SnapshotDelta();
+
+        // Store the sample that was emitted as well as the snapshot so that
+        // the totals can be compared later on.
+        StoreActualSample(histogram, sample);
+        StoreSnapshot(std::move(snapshot));
+      }
+    }
+  }
+
+ private:
+  // Stores an actual |sample| that was emitted for |histogram|. This is done
+  // to compare what was found in histogram snapshots (see StoreSnapshot()).
+  void StoreActualSample(HistogramBase* histogram, Histogram::Sample sample) {
+    subtle::NoBarrier_AtomicIncrement(real_total_samples_count_, 1);
+    switch (histogram->GetHistogramType()) {
+      case HISTOGRAM: {
+        const BucketRanges* ranges =
+            static_cast<Histogram*>(histogram)->bucket_ranges();
+        size_t bucket_index = GetBucketIndex(sample, ranges);
+        size_t bucket_min = ranges->range(bucket_index);
+        subtle::NoBarrier_AtomicIncrement(&real_bucket_counts_[bucket_min], 1);
+        break;
+      }
+      case SPARSE_HISTOGRAM:
+        subtle::NoBarrier_AtomicIncrement(&real_bucket_counts_[sample], 1);
+        break;
+      case LINEAR_HISTOGRAM:
+      case BOOLEAN_HISTOGRAM:
+      case CUSTOM_HISTOGRAM:
+      case DUMMY_HISTOGRAM:
+        NOTREACHED();
+    }
+  }
+
+  // Store a |snapshot| that was taken of a histogram. This is done to compare
+  // what was actually emitted (see StoreActualSample()).
+  void StoreSnapshot(std::unique_ptr<HistogramSamples> snapshot) {
+    HistogramBase::Count snapshot_samples_count = snapshot->TotalCount();
+    subtle::NoBarrier_AtomicIncrement(snapshots_total_samples_count_,
+                                      snapshot_samples_count);
+    for (auto it = snapshot->Iterator(); !it->Done(); it->Next()) {
+      HistogramBase::Sample min;
+      int64_t max;
+      HistogramBase::Count count;
+      it->Get(&min, &max, &count);
+      // Verify that the snapshot contains only positive bucket counts.
+      // This is to ensure SnapshotDelta() is fully thread-safe, not just
+      // "eventually consistent".
+      ASSERT_GE(count, 0);
+      subtle::NoBarrier_AtomicIncrement(&snapshots_bucket_counts_[min], count);
+    }
+  }
+
+  const size_t num_emissions_;
+  span<HistogramBase*> histograms_;
+  const HistogramBase::Sample histogram_max_;
+  raw_ptr<subtle::Atomic32> real_total_samples_count_;
+  span<subtle::Atomic32> real_bucket_counts_;
+  raw_ptr<subtle::Atomic32> snapshots_total_samples_count_;
+  span<subtle::Atomic32> snapshots_bucket_counts_;
+};
+
+}  // namespace
+
+class HistogramThreadsafeTest : public testing::Test {
+ public:
+  HistogramThreadsafeTest() = default;
+
+  HistogramThreadsafeTest(const HistogramThreadsafeTest&) = delete;
+  HistogramThreadsafeTest& operator=(const HistogramThreadsafeTest&) = delete;
+
+  ~HistogramThreadsafeTest() override = default;
+
+  void SetUp() override {
+    GlobalHistogramAllocator::CreateWithLocalMemory(4 << 20, /*id=*/0,
+                                                    /*name=*/"");
+    ASSERT_TRUE(GlobalHistogramAllocator::Get());
+
+    // Create a second view of the persistent memory with a new persistent
+    // histogram allocator in order to simulate a subprocess with its own view
+    // of some shared memory.
+    PersistentMemoryAllocator* allocator =
+        GlobalHistogramAllocator::Get()->memory_allocator();
+    std::unique_ptr<PersistentMemoryAllocator> memory_view =
+        std::make_unique<PersistentMemoryAllocator>(
+            /*base=*/const_cast<void*>(allocator->data()), allocator->size(),
+            /*page_size=*/0, /*id=*/0,
+            /*name=*/"GlobalHistogramAllocatorView", /*readonly=*/false);
+    allocator_view_ =
+        std::make_unique<PersistentHistogramAllocator>(std::move(memory_view));
+  }
+
+  void TearDown() override {
+    histograms_.clear();
+    allocator_view_.reset();
+    GlobalHistogramAllocator::ReleaseForTesting();
+    ASSERT_FALSE(GlobalHistogramAllocator::Get());
+  }
+
+  // Creates and returns various histograms (some that live on the persistent
+  // memory, some that live on the local heap, and some that point to the same
+  // underlying data as those that live on the persistent memory but are
+  // different objects).
+  std::vector<HistogramBase*> CreateHistograms(size_t suffix,
+                                               HistogramBase::Sample max,
+                                               size_t bucket_count) {
+    // There are 4 ways histograms can store their underlying data:
+    // PersistentSampleVector, PersistentSampleMap, SampleVector, and SampleMap.
+    // The first two are intended for when the data may be either persisted to a
+    // file or shared with another process. The last two are when the histograms
+    // are to be used by the local process only.
+    // Create 4 histograms that use those storage structures respectively.
+    std::vector<HistogramBase*> histograms;
+
+    // Create histograms on the persistent memory (created through the
+    // GlobalHistogramAllocator, which is automatically done when using the
+    // FactoryGet() API). There is no need to store them in |histograms_|
+    // because these histograms are owned by the StatisticsRecorder.
+    std::string numeric_histogram_name =
+        StringPrintf("NumericHistogram%zu", suffix);
+    Histogram* numeric_histogram = static_cast<Histogram*>(
+        Histogram::FactoryGet(numeric_histogram_name, /*minimum=*/1, max,
+                              bucket_count, /*flags=*/HistogramBase::kNoFlags));
+    histograms.push_back(numeric_histogram);
+    std::string sparse_histogram_name =
+        StringPrintf("SparseHistogram%zu", suffix);
+    HistogramBase* sparse_histogram =
+        SparseHistogram::FactoryGet(sparse_histogram_name,
+                                    /*flags=*/HistogramBase::kNoFlags);
+    histograms.push_back(sparse_histogram);
+
+    // Create histograms on the "local heap" (i.e., are not instantiated using
+    // the GlobalHistogramAllocator, which is automatically done when using the
+    // FactoryGet() API). Store them in |histograms_| so that they are not freed
+    // during the test.
+    std::string local_heap_histogram_name =
+        StringPrintf("LocalHeapNumericHistogram%zu", suffix);
+    auto& local_heap_histogram = histograms_.emplace_back(
+        new Histogram(GetPermanentName(local_heap_histogram_name),
+                      numeric_histogram->bucket_ranges()));
+    histograms.push_back(local_heap_histogram.get());
+    std::string local_heap_sparse_histogram_name =
+        StringPrintf("LocalHeapSparseHistogram%zu", suffix);
+    auto& local_heap_sparse_histogram =
+        histograms_.emplace_back(new SparseHistogram(
+            GetPermanentName(local_heap_sparse_histogram_name)));
+    histograms.push_back(local_heap_sparse_histogram.get());
+
+    // Furthermore, create two additional *different* histogram objects that
+    // point to the same underlying data as the first two (|numeric_histogram|
+    // and |sparse_histogram|). This is to simulate subprocess histograms (i.e.,
+    // both the main browser process and the subprocess have their own histogram
+    // instance with possibly their own lock, but they both point to the same
+    // underlying storage, and they may both interact with it simultaneously).
+    // There is no need to do this for the "local heap" histograms because "by
+    // definition" they should only be interacted with within the same process.
+    PersistentHistogramAllocator::Iterator hist_it(allocator_view_.get());
+    std::unique_ptr<HistogramBase> subprocess_numeric_histogram;
+    std::unique_ptr<HistogramBase> subprocess_sparse_histogram;
+    while (true) {
+      // GetNext() creates a new histogram instance that points to the same
+      // underlying data as the histogram the iterator is pointing to.
+      std::unique_ptr<HistogramBase> histogram = hist_it.GetNext();
+      if (!histogram) {
+        break;
+      }
+
+      // Make sure the "local heap" histograms are not in persistent memory.
+      EXPECT_NE(local_heap_histogram_name, histogram->histogram_name());
+      EXPECT_NE(local_heap_sparse_histogram_name, histogram->histogram_name());
+
+      if (histogram->histogram_name() == numeric_histogram_name) {
+        subprocess_numeric_histogram = std::move(histogram);
+      } else if (histogram->histogram_name() == sparse_histogram_name) {
+        subprocess_sparse_histogram = std::move(histogram);
+      }
+    }
+    // Make sure we found the histograms, and ensure that they are not the same
+    // histogram objects. Assertions to verify that they are actually pointing
+    // to the same underlying data are not done now (to not mess up the sample
+    // counts).
+    EXPECT_TRUE(subprocess_numeric_histogram);
+    EXPECT_TRUE(subprocess_sparse_histogram);
+    histograms.push_back(subprocess_numeric_histogram.get());
+    histograms.push_back(subprocess_sparse_histogram.get());
+    EXPECT_NE(numeric_histogram, subprocess_numeric_histogram.get());
+    EXPECT_NE(sparse_histogram, subprocess_sparse_histogram.get());
+
+    // Store the histograms in |histograms_| so that they are not freed during
+    // the test.
+    histograms_.emplace_back(std::move(subprocess_numeric_histogram));
+    histograms_.emplace_back(std::move(subprocess_sparse_histogram));
+
+    return histograms;
+  }
+
+ private:
+  // A view of the GlobalHistogramAllocator to simulate a subprocess having its
+  // own view of some shared memory.
+  std::unique_ptr<PersistentHistogramAllocator> allocator_view_;
+
+  // Used to prevent histograms from being freed during the test.
+  std::vector<std::unique_ptr<HistogramBase>> histograms_;
+};
+
+// Verifies that SnapshotDelta() is thread safe. That means 1) a sample emitted
+// while a snapshot is taken is not lost, and 2) concurrent calls to
+// SnapshotDelta() will not return the same samples. Note that the test makes
+// use of ASSERT_* instead EXPECT_* because the test is repeated multiple times,
+// and the use of EXPECT_* produces spammy outputs as it does not end the test
+// immediately.
+TEST_F(HistogramThreadsafeTest, DISABLED_SnapshotDeltaThreadsafe) {
+  // We try this test |kNumIterations| times to have a coverage of different
+  // scenarios. For example, for a numeric histogram, if it has only samples
+  // within the same bucket, the samples will be stored in a different way than
+  // if it had samples in multiple buckets for efficiency reasons (SingleSample
+  // vs a vector). Hence, the goal of doing this test multiple time is to have
+  // coverage of the SingleSample scenario, because once the histogram has moved
+  // to using a vector, it will not use SingleSample again.
+  // Note: |kNumIterations| was 200 on 4/2023, but was decreased because the
+  // workload was causing flakiness (timing out).
+  constexpr size_t kNumIterations = 100;
+  for (size_t iteration = 0; iteration < kNumIterations; ++iteration) {
+    // TL;DR of the test: multiple threads are created, which will each emit to
+    // the same histograms and snapshot their delta multiple times. We keep
+    // track of the actual number of samples found in the snapshots, and ensure
+    // that it matches what we actually emitted.
+
+    // Create histograms. Two histograms should live on persistent memory,
+    // two should live on local heap, and two of them should be simulations of
+    // subprocess histograms that point to the same underlying data as first two
+    // histograms (but are different objects).
+    // The max values of the histograms will alternate between 2 and 50 in order
+    // to have coverage of histograms that are being emitted to with a small
+    // range of values, and a large range of values.
+    const HistogramBase::Sample kHistogramMax = (iteration % 2 == 0) ? 2 : 50;
+    const size_t kBucketCount = (iteration % 2 == 0) ? 3 : 10;
+    std::vector<HistogramBase*> histograms =
+        CreateHistograms(/*suffix=*/iteration, kHistogramMax, kBucketCount);
+
+    // Start |kNumThreads| that will each emit and snapshot the histograms (see
+    // SnapshotDeltaThread). We keep track of the real samples as well as the
+    // samples found in the snapshots so that we can compare that they match
+    // later on.
+    constexpr size_t kNumThreads = 2;
+    constexpr size_t kNumEmissions = 2000;
+    subtle::Atomic32 real_total_samples_count = 0;
+    std::vector<subtle::Atomic32> real_bucket_counts(kHistogramMax, 0);
+    subtle::Atomic32 snapshots_total_samples_count = 0;
+    std::vector<subtle::Atomic32> snapshots_bucket_counts(kHistogramMax, 0);
+    std::unique_ptr<SnapshotDeltaThread> threads[kNumThreads];
+    for (size_t i = 0; i < kNumThreads; ++i) {
+      threads[i] = std::make_unique<SnapshotDeltaThread>(
+          StringPrintf("SnapshotDeltaThread.%zu.%zu", iteration, i),
+          kNumEmissions, histograms, kHistogramMax, &real_total_samples_count,
+          real_bucket_counts, &snapshots_total_samples_count,
+          snapshots_bucket_counts);
+      threads[i]->Start();
+    }
+
+    // Wait until all threads have finished.
+    for (auto& thread : threads) {
+      thread->Join();
+    }
+
+    // Verify that the samples found in the snapshots match what we emitted.
+    ASSERT_EQ(static_cast<size_t>(real_total_samples_count),
+              kNumThreads * kNumEmissions * histograms.size());
+    ASSERT_EQ(snapshots_total_samples_count, real_total_samples_count);
+    for (HistogramBase::Sample i = 0; i < kHistogramMax; ++i) {
+      ASSERT_EQ(snapshots_bucket_counts[i], real_bucket_counts[i]);
+    }
+
+    // Also verify that no more unlogged samples remain, and that the internal
+    // logged samples of the histograms match what we emitted.
+
+    HistogramBase::Count logged_total_samples_count = 0;
+    std::vector<HistogramBase::Count> logged_bucket_counts(
+        /*value=*/kHistogramMax, 0);
+    // We ignore the last two histograms since they are the same as the first
+    // two (they are simulations of histogram instances from a subprocess that
+    // point to the same underlying data). Otherwise, we will be counting the
+    // samples from those histograms twice.
+    for (size_t i = 0; i < histograms.size() - 2; ++i) {
+      HistogramBase* histogram = histograms[i];
+      ASSERT_EQ(histogram->SnapshotDelta()->TotalCount(), 0);
+      std::unique_ptr<HistogramSamples> logged_samples =
+          histogram->SnapshotSamples();
+      // Each individual histograms should have been emitted to a specific
+      // amount of times. Non-"local heap" histograms were emitted to twice as
+      // much because they appeared twice in the |histograms| array -- once as a
+      // normal histogram, and once as a simulation of a subprocess histogram.
+      size_t expected_logged_samples_count = kNumThreads * kNumEmissions;
+      if (!strstr(histogram->histogram_name(), "LocalHeap")) {
+        expected_logged_samples_count *= 2;
+      }
+      ASSERT_EQ(static_cast<size_t>(logged_samples->TotalCount()),
+                expected_logged_samples_count);
+
+      for (auto it = logged_samples->Iterator(); !it->Done(); it->Next()) {
+        HistogramBase::Sample min;
+        int64_t max;
+        HistogramBase::Count count;
+        it->Get(&min, &max, &count);
+        ASSERT_GE(count, 0);
+        logged_total_samples_count += count;
+        logged_bucket_counts[min] += count;
+      }
+    }
+    ASSERT_EQ(logged_total_samples_count, real_total_samples_count);
+    for (HistogramBase::Sample i = 0; i < kHistogramMax; ++i) {
+      ASSERT_EQ(logged_bucket_counts[i], real_bucket_counts[i]);
+    }
+
+    // Finally, verify that our "subprocess histograms" actually point to the
+    // same underlying data as the "main browser" histograms, despite being
+    // different instances (this was verified earlier). This is done at the end
+    // of the test so as to not mess up the sample counts.
+    HistogramBase* numeric_histogram = histograms[0];
+    HistogramBase* subprocess_numeric_histogram = histograms[4];
+    HistogramBase* sparse_histogram = histograms[1];
+    HistogramBase* subprocess_sparse_histogram = histograms[5];
+    ASSERT_EQ(subprocess_numeric_histogram->SnapshotDelta()->TotalCount(), 0);
+    ASSERT_EQ(subprocess_sparse_histogram->SnapshotDelta()->TotalCount(), 0);
+    numeric_histogram->Add(0);
+    sparse_histogram->Add(0);
+    ASSERT_EQ(subprocess_numeric_histogram->SnapshotDelta()->TotalCount(), 1);
+    ASSERT_EQ(subprocess_sparse_histogram->SnapshotDelta()->TotalCount(), 1);
+    ASSERT_EQ(numeric_histogram->SnapshotDelta()->TotalCount(), 0);
+    ASSERT_EQ(sparse_histogram->SnapshotDelta()->TotalCount(), 0);
+  }
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
index a976d0a..626909f 100644
--- a/base/metrics/histogram_unittest.cc
+++ b/base/metrics/histogram_unittest.cc
@@ -1,10 +1,12 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/histogram.h"
 
 #include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
 
 #include <climits>
 #include <memory>
@@ -13,6 +15,7 @@
 
 #include "base/lazy_instance.h"
 #include "base/logging.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/dummy_histogram.h"
 #include "base/metrics/histogram_macros.h"
@@ -26,7 +29,8 @@
 #include "base/strings/stringprintf.h"
 #include "base/test/gtest_util.h"
 #include "base/time/time.h"
-#include "starboard/types.h"
+#include "base/values.h"
+#include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -40,8 +44,8 @@
   ~TestRecordHistogramChecker() override = default;
 
   // RecordHistogramChecker:
-  bool ShouldRecord(uint64_t histogram_hash) const override {
-    return histogram_hash != HashMetricName(kExpiredHistogramName);
+  bool ShouldRecord(uint32_t histogram_hash) const override {
+    return histogram_hash != HashMetricNameAs32Bits(kExpiredHistogramName);
   }
 };
 
@@ -51,16 +55,24 @@
 // for histogram allocation. False will allocate histograms from the process
 // heap.
 class HistogramTest : public testing::TestWithParam<bool> {
+ public:
+  HistogramTest(const HistogramTest&) = delete;
+  HistogramTest& operator=(const HistogramTest&) = delete;
+
  protected:
+  using CountAndBucketData = base::Histogram::CountAndBucketData;
+
   const int32_t kAllocatorMemorySize = 8 << 20;  // 8 MiB
 
-  HistogramTest()
-      : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()),
-        use_persistent_histogram_allocator_(GetParam()) {}
+  HistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
 
   void SetUp() override {
     if (use_persistent_histogram_allocator_)
       CreatePersistentHistogramAllocator();
+
+    // Each test will have a clean state (no Histogram / BucketRanges
+    // registered).
+    InitializeStatisticsRecorder();
   }
 
   void TearDown() override {
@@ -68,12 +80,20 @@
       ASSERT_FALSE(allocator_->IsFull());
       ASSERT_FALSE(allocator_->IsCorrupt());
     }
+    UninitializeStatisticsRecorder();
     DestroyPersistentHistogramAllocator();
   }
 
+  void InitializeStatisticsRecorder() {
+    DCHECK(!statistics_recorder_);
+    statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+  }
+
+  void UninitializeStatisticsRecorder() { statistics_recorder_.reset(); }
+
   void CreatePersistentHistogramAllocator() {
-    GlobalHistogramAllocator::CreateWithLocalMemory(
-        kAllocatorMemorySize, 0, "HistogramAllocatorTest");
+    GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0,
+                                                    "HistogramAllocatorTest");
     allocator_ = GlobalHistogramAllocator::Get()->memory_allocator();
   }
 
@@ -86,25 +106,27 @@
     return h->SnapshotAllSamples();
   }
 
+  CountAndBucketData GetCountAndBucketData(Histogram* histogram) {
+    // A simple wrapper around |GetCountAndBucketData| to make it visible for
+    // testing.
+    return histogram->GetCountAndBucketData();
+  }
+
   const bool use_persistent_histogram_allocator_;
 
   std::unique_ptr<StatisticsRecorder> statistics_recorder_;
   std::unique_ptr<char[]> allocator_memory_;
-  PersistentMemoryAllocator* allocator_ = nullptr;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(HistogramTest);
+  raw_ptr<PersistentMemoryAllocator> allocator_ = nullptr;
 };
 
 // Run all HistogramTest cases with both heap and persistent memory.
-INSTANTIATE_TEST_CASE_P(HeapAndPersistent, HistogramTest, testing::Bool());
-
+INSTANTIATE_TEST_SUITE_P(HeapAndPersistent, HistogramTest, testing::Bool());
 
 // Check for basic syntax and use.
 TEST_P(HistogramTest, BasicTest) {
   // Try basic construction
-  HistogramBase* histogram = Histogram::FactoryGet(
-      "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
+  HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+                                                   HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram);
 
   HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
@@ -127,7 +149,7 @@
   already_run = true;
 
   // Use standard macros (but with fixed samples)
-  LOCAL_HISTOGRAM_TIMES("Test2Histogram", TimeDelta::FromDays(1));
+  LOCAL_HISTOGRAM_TIMES("Test2Histogram", Days(1));
   LOCAL_HISTOGRAM_COUNTS("Test3Histogram", 30);
 
   LOCAL_HISTOGRAM_ENUMERATION("Test6Histogram", 129, 130);
@@ -156,9 +178,8 @@
 
 // Check that delta calculations work correctly.
 TEST_P(HistogramTest, DeltaTest) {
-  HistogramBase* histogram =
-      Histogram::FactoryGet("DeltaHistogram", 1, 64, 8,
-                            HistogramBase::kNoFlags);
+  HistogramBase* histogram = Histogram::FactoryGet("DeltaHistogram", 1, 64, 8,
+                                                   HistogramBase::kNoFlags);
   histogram->Add(1);
   histogram->Add(10);
   histogram->Add(50);
@@ -169,25 +190,102 @@
   EXPECT_EQ(1, samples->GetCount(10));
   EXPECT_EQ(1, samples->GetCount(50));
   EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(61, samples->sum());
 
   samples = histogram->SnapshotDelta();
   EXPECT_EQ(0, samples->TotalCount());
+  EXPECT_EQ(0, samples->sum());
 
   histogram->Add(10);
   histogram->Add(10);
   samples = histogram->SnapshotDelta();
   EXPECT_EQ(2, samples->TotalCount());
   EXPECT_EQ(2, samples->GetCount(10));
+  EXPECT_EQ(20, samples->sum());
 
   samples = histogram->SnapshotDelta();
   EXPECT_EQ(0, samples->TotalCount());
+  EXPECT_EQ(0, samples->sum());
+
+  // Verify that the logged samples contain everything emitted.
+  samples = histogram->SnapshotSamples();
+  EXPECT_EQ(5, samples->TotalCount());
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(3, samples->GetCount(10));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(81, samples->sum());
+}
+
+// Check that delta calculations work correctly with SnapshotUnloggedSamples()
+// and MarkSamplesAsLogged().
+TEST_P(HistogramTest, UnloggedSamplesTest) {
+  HistogramBase* histogram = Histogram::FactoryGet("DeltaHistogram", 1, 64, 8,
+                                                   HistogramBase::kNoFlags);
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(50);
+
+  std::unique_ptr<HistogramSamples> samples =
+      histogram->SnapshotUnloggedSamples();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(1, samples->GetCount(10));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(61, samples->sum());
+
+  // Snapshot unlogged samples again, which would be the same as above.
+  samples = histogram->SnapshotUnloggedSamples();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(1, samples->GetCount(10));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(61, samples->sum());
+
+  // Verify that marking the samples as logged works correctly, and that
+  // SnapshotDelta() will not pick up the samples.
+  histogram->MarkSamplesAsLogged(*samples);
+  samples = histogram->SnapshotUnloggedSamples();
+  EXPECT_EQ(0, samples->TotalCount());
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(0, samples->sum());
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(0, samples->sum());
+
+  // Similarly, verify that SnapshotDelta() marks the samples as logged.
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(50);
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(1, samples->GetCount(10));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(61, samples->sum());
+  samples = histogram->SnapshotUnloggedSamples();
+  EXPECT_EQ(0, samples->TotalCount());
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(0, samples->sum());
+
+  // Verify that the logged samples contain everything emitted.
+  samples = histogram->SnapshotSamples();
+  EXPECT_EQ(6, samples->TotalCount());
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(2, samples->GetCount(1));
+  EXPECT_EQ(2, samples->GetCount(10));
+  EXPECT_EQ(2, samples->GetCount(50));
+  EXPECT_EQ(122, samples->sum());
 }
 
 // Check that final-delta calculations work correctly.
 TEST_P(HistogramTest, FinalDeltaTest) {
-  HistogramBase* histogram =
-      Histogram::FactoryGet("FinalDeltaHistogram", 1, 64, 8,
-                            HistogramBase::kNoFlags);
+  HistogramBase* histogram = Histogram::FactoryGet("FinalDeltaHistogram", 1, 64,
+                                                   8, HistogramBase::kNoFlags);
   histogram->Add(1);
   histogram->Add(10);
   histogram->Add(50);
@@ -282,6 +380,25 @@
   EXPECT_TRUE(ranges2.Equals(histogram2->bucket_ranges()));
 }
 
+TEST_P(HistogramTest, SingleValueEnumerationHistogram) {
+  // Make sure its possible to construct a linear histogram with only the two
+  // required outlier buckets (underflow and overflow).
+  HistogramBase* histogram = LinearHistogram::FactoryGet(
+      "SingleValueEnum", 1, 1, 2, HistogramBase::kNoFlags);
+  EXPECT_TRUE(histogram);
+
+  // Make sure the macros work properly. This can only be run when
+  // there is no persistent allocator which can be discarded and leave
+  // dangling pointers.
+  if (!use_persistent_histogram_allocator_) {
+    enum EnumWithMax {
+      kSomething = 0,
+      kMaxValue = kSomething,
+    };
+    UMA_HISTOGRAM_ENUMERATION("h1", kSomething);
+  }
+}
+
 TEST_P(HistogramTest, ArrayToCustomEnumRangesTest) {
   const HistogramBase::Sample ranges[3] = {5, 10, 20};
   std::vector<HistogramBase::Sample> ranges_vec =
@@ -301,9 +418,8 @@
   custom_ranges.push_back(1);
   custom_ranges.push_back(2);
 
-  Histogram* histogram = static_cast<Histogram*>(
-      CustomHistogram::FactoryGet("TestCustomHistogram1", custom_ranges,
-                                  HistogramBase::kNoFlags));
+  Histogram* histogram = static_cast<Histogram*>(CustomHistogram::FactoryGet(
+      "TestCustomHistogram1", custom_ranges, HistogramBase::kNoFlags));
   const BucketRanges* ranges = histogram->bucket_ranges();
   ASSERT_EQ(4u, ranges->size());
   EXPECT_EQ(0, ranges->range(0));  // Auto added.
@@ -315,9 +431,8 @@
   custom_ranges.clear();
   custom_ranges.push_back(2);
   custom_ranges.push_back(1);
-  histogram = static_cast<Histogram*>(
-      CustomHistogram::FactoryGet("TestCustomHistogram2", custom_ranges,
-                                  HistogramBase::kNoFlags));
+  histogram = static_cast<Histogram*>(CustomHistogram::FactoryGet(
+      "TestCustomHistogram2", custom_ranges, HistogramBase::kNoFlags));
   ranges = histogram->bucket_ranges();
   ASSERT_EQ(4u, ranges->size());
   EXPECT_EQ(0, ranges->range(0));
@@ -330,9 +445,8 @@
   custom_ranges.push_back(4);
   custom_ranges.push_back(1);
   custom_ranges.push_back(4);
-  histogram = static_cast<Histogram*>(
-      CustomHistogram::FactoryGet("TestCustomHistogram3", custom_ranges,
-                                  HistogramBase::kNoFlags));
+  histogram = static_cast<Histogram*>(CustomHistogram::FactoryGet(
+      "TestCustomHistogram3", custom_ranges, HistogramBase::kNoFlags));
   ranges = histogram->bucket_ranges();
   ASSERT_EQ(4u, ranges->size());
   EXPECT_EQ(0, ranges->range(0));
@@ -350,9 +464,8 @@
   std::vector<HistogramBase::Sample> custom_ranges;
   custom_ranges.push_back(4);
 
-  Histogram* histogram = static_cast<Histogram*>(
-      CustomHistogram::FactoryGet("2BucketsCustomHistogram", custom_ranges,
-                                  HistogramBase::kNoFlags));
+  Histogram* histogram = static_cast<Histogram*>(CustomHistogram::FactoryGet(
+      "2BucketsCustomHistogram", custom_ranges, HistogramBase::kNoFlags));
   const BucketRanges* ranges = histogram->bucket_ranges();
   ASSERT_EQ(3u, ranges->size());
   EXPECT_EQ(0, ranges->range(0));
@@ -362,9 +475,8 @@
 
 TEST_P(HistogramTest, AddCountTest) {
   const size_t kBucketCount = 50;
-  Histogram* histogram = static_cast<Histogram*>(
-      Histogram::FactoryGet("AddCountHistogram", 10, 100, kBucketCount,
-                            HistogramBase::kNoFlags));
+  Histogram* histogram = static_cast<Histogram*>(Histogram::FactoryGet(
+      "AddCountHistogram", 10, 100, kBucketCount, HistogramBase::kNoFlags));
 
   histogram->AddCount(20, 15);
   histogram->AddCount(30, 14);
@@ -430,9 +542,8 @@
 // Make sure histogram handles out-of-bounds data gracefully.
 TEST_P(HistogramTest, BoundsTest) {
   const size_t kBucketCount = 50;
-  Histogram* histogram = static_cast<Histogram*>(
-      Histogram::FactoryGet("Bounded", 10, 100, kBucketCount,
-                            HistogramBase::kNoFlags));
+  Histogram* histogram = static_cast<Histogram*>(Histogram::FactoryGet(
+      "Bounded", 10, 100, kBucketCount, HistogramBase::kNoFlags));
 
   // Put two samples "out of bounds" above and below.
   histogram->Add(5);
@@ -565,9 +676,8 @@
 }
 
 TEST_P(HistogramTest, HistogramSerializeInfo) {
-  Histogram* histogram = static_cast<Histogram*>(
-      Histogram::FactoryGet("Histogram", 1, 64, 8,
-                            HistogramBase::kIPCSerializationSourceFlag));
+  Histogram* histogram = static_cast<Histogram*>(Histogram::FactoryGet(
+      "Histogram", 1, 64, 8, HistogramBase::kIPCSerializationSourceFlag));
   Pickle pickle;
   histogram->SerializeInfo(&pickle);
 
@@ -611,10 +721,9 @@
   custom_ranges.push_back(10);
   custom_ranges.push_back(100);
 
-  HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
-      "TestCustomRangeBoundedHistogram",
-      custom_ranges,
-      HistogramBase::kNoFlags);
+  HistogramBase* custom_histogram =
+      CustomHistogram::FactoryGet("TestCustomRangeBoundedHistogram",
+                                  custom_ranges, HistogramBase::kNoFlags);
   Pickle pickle;
   custom_histogram->SerializeInfo(&pickle);
 
@@ -641,16 +750,16 @@
 }
 
 TEST_P(HistogramTest, BadConstruction) {
-  HistogramBase* histogram = Histogram::FactoryGet(
-      "BadConstruction", 0, 100, 8, HistogramBase::kNoFlags);
+  HistogramBase* histogram = Histogram::FactoryGet("BadConstruction", 0, 100, 8,
+                                                   HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram->HasConstructionArguments(1, 100, 8));
 
   // Try to get the same histogram name with different arguments.
   HistogramBase* bad_histogram = Histogram::FactoryGet(
       "BadConstruction", 0, 100, 7, HistogramBase::kNoFlags);
   EXPECT_EQ(DummyHistogram::GetInstance(), bad_histogram);
-  bad_histogram = Histogram::FactoryGet(
-      "BadConstruction", 0, 99, 8, HistogramBase::kNoFlags);
+  bad_histogram = Histogram::FactoryGet("BadConstruction", 0, 99, 8,
+                                        HistogramBase::kNoFlags);
   EXPECT_EQ(DummyHistogram::GetInstance(), bad_histogram);
 
   HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
@@ -658,11 +767,11 @@
   EXPECT_TRUE(linear_histogram->HasConstructionArguments(1, 100, 8));
 
   // Try to get the same histogram name with different arguments.
-  bad_histogram = LinearHistogram::FactoryGet(
-      "BadConstructionLinear", 0, 100, 7, HistogramBase::kNoFlags);
+  bad_histogram = LinearHistogram::FactoryGet("BadConstructionLinear", 0, 100,
+                                              7, HistogramBase::kNoFlags);
   EXPECT_EQ(DummyHistogram::GetInstance(), bad_histogram);
-  bad_histogram = LinearHistogram::FactoryGet(
-      "BadConstructionLinear", 10, 100, 8, HistogramBase::kNoFlags);
+  bad_histogram = LinearHistogram::FactoryGet("BadConstructionLinear", 10, 100,
+                                              8, HistogramBase::kNoFlags);
   EXPECT_EQ(DummyHistogram::GetInstance(), bad_histogram);
 }
 
@@ -688,8 +797,7 @@
   int64_t create_ms = create_ticks.InMilliseconds();
 
   VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
-          << "ms or about "
-          << (create_ms * 1000000) / kTestCreateCount
+          << "ms or about " << (create_ms * 1000000) / kTestCreateCount
           << "ns each.";
 
   // Calculate cost of looking up existing histograms.
@@ -708,13 +816,12 @@
   int64_t lookup_ms = lookup_ticks.InMilliseconds();
 
   VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
-          << "ms or about "
-          << (lookup_ms * 1000000) / kTestLookupCount
+          << "ms or about " << (lookup_ms * 1000000) / kTestLookupCount
           << "ns each.";
 
   // Calculate cost of accessing histograms.
-  HistogramBase* histogram = Histogram::FactoryGet(
-      histogram_names[0], 1, 100, 10, HistogramBase::kNoFlags);
+  HistogramBase* histogram = Histogram::FactoryGet(histogram_names[0], 1, 100,
+                                                   10, HistogramBase::kNoFlags);
   ASSERT_TRUE(histogram);
   TimeTicks add_start = TimeTicks::Now();
   for (int i = 0; i < kTestAddCount; ++i)
@@ -723,9 +830,7 @@
   int64_t add_ms = add_ticks.InMilliseconds();
 
   VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
-          << "ms or about "
-          << (add_ms * 1000000) / kTestAddCount
-          << "ns each.";
+          << "ms or about " << (add_ms * 1000000) / kTestAddCount << "ns each.";
 }
 
 TEST_P(HistogramTest, ScaledLinearHistogram) {
@@ -740,7 +845,7 @@
   scaled.AddScaledCount(6, 140);
 
   std::unique_ptr<SampleVector> samples =
-      SnapshotAllSamples(scaled.histogram());
+      SnapshotAllSamples(static_cast<Histogram*>(scaled.histogram()));
   EXPECT_EQ(0, samples->GetCountAtIndex(0));
   EXPECT_EQ(0, samples->GetCountAtIndex(1));
   EXPECT_EQ(1, samples->GetCountAtIndex(2));
@@ -768,25 +873,23 @@
 // 1). But we accept ranges exceeding those limits, and silently clamped to
 // those limits. This is for backwards compatibility.
 TEST(HistogramDeathTest, BadRangesTest) {
-  HistogramBase* histogram = Histogram::FactoryGet(
-      "BadRanges", 0, HistogramBase::kSampleType_MAX, 8,
-      HistogramBase::kNoFlags);
-  EXPECT_TRUE(
-      histogram->HasConstructionArguments(
-          1, HistogramBase::kSampleType_MAX - 1, 8));
+  HistogramBase* histogram =
+      Histogram::FactoryGet("BadRanges", 0, HistogramBase::kSampleType_MAX, 8,
+                            HistogramBase::kNoFlags);
+  EXPECT_TRUE(histogram->HasConstructionArguments(
+      1, HistogramBase::kSampleType_MAX - 1, 8));
 
   HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
       "BadRangesLinear", 0, HistogramBase::kSampleType_MAX, 8,
       HistogramBase::kNoFlags);
-  EXPECT_TRUE(
-      linear_histogram->HasConstructionArguments(
-          1, HistogramBase::kSampleType_MAX - 1, 8));
+  EXPECT_TRUE(linear_histogram->HasConstructionArguments(
+      1, HistogramBase::kSampleType_MAX - 1, 8));
 
   std::vector<int> custom_ranges;
   custom_ranges.push_back(0);
   custom_ranges.push_back(5);
-  Histogram* custom_histogram = static_cast<Histogram*>(
-      CustomHistogram::FactoryGet(
+  Histogram* custom_histogram =
+      static_cast<Histogram*>(CustomHistogram::FactoryGet(
           "BadRangesCustom", custom_ranges, HistogramBase::kNoFlags));
   const BucketRanges* ranges = custom_histogram->bucket_ranges();
   ASSERT_EQ(3u, ranges->size());
@@ -799,7 +902,7 @@
   EXPECT_DEATH_IF_SUPPORTED(
       CustomHistogram::FactoryGet("BadRangesCustom2", custom_ranges,
                                   HistogramBase::kNoFlags),
-               "");
+      "");
 
   // CustomHistogram needs at least 1 valid range.
   custom_ranges.clear();
@@ -807,7 +910,7 @@
   EXPECT_DEATH_IF_SUPPORTED(
       CustomHistogram::FactoryGet("BadRangesCustom3", custom_ranges,
                                   HistogramBase::kNoFlags),
-               "");
+      "");
 }
 
 TEST_P(HistogramTest, ExpiredHistogramTest) {
@@ -830,6 +933,13 @@
   samples = linear_expired->SnapshotDelta();
   EXPECT_EQ(0, samples->TotalCount());
 
+  ScaledLinearHistogram scaled_linear_expired(kExpiredHistogramName, 1, 5, 6,
+                                              100, HistogramBase::kNoFlags);
+  scaled_linear_expired.AddScaledCount(0, 1);
+  scaled_linear_expired.AddScaledCount(1, 49);
+  samples = scaled_linear_expired.histogram()->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+
   std::vector<int> custom_ranges;
   custom_ranges.push_back(1);
   custom_ranges.push_back(5);
@@ -866,4 +976,107 @@
   EXPECT_EQ(2, samples->TotalCount());
 }
 
+TEST_P(HistogramTest, CheckGetCountAndBucketData) {
+  const size_t kBucketCount = 50;
+  Histogram* histogram = static_cast<Histogram*>(Histogram::FactoryGet(
+      "AddCountHistogram", 10, 100, kBucketCount, HistogramBase::kNoFlags));
+  // Add samples in reverse order and make sure the output is in correct order.
+  histogram->AddCount(/*sample=*/30, /*value=*/14);
+  histogram->AddCount(/*sample=*/20, /*value=*/15);
+  histogram->AddCount(/*sample=*/20, /*value=*/15);
+  histogram->AddCount(/*sample=*/30, /*value=*/14);
+
+  const CountAndBucketData count_and_data_bucket =
+      GetCountAndBucketData(histogram);
+  EXPECT_EQ(58, count_and_data_bucket.count);
+  EXPECT_EQ(1440, count_and_data_bucket.sum);
+
+  const base::Value::List& buckets_list = count_and_data_bucket.buckets;
+  ASSERT_EQ(2u, buckets_list.size());
+
+  // Check the first bucket.
+  const base::Value::Dict* bucket1 = buckets_list[0].GetIfDict();
+  ASSERT_TRUE(bucket1 != nullptr);
+  EXPECT_EQ(bucket1->FindInt("low"), absl::optional<int>(20));
+  EXPECT_EQ(bucket1->FindInt("high"), absl::optional<int>(21));
+  EXPECT_EQ(bucket1->FindInt("count"), absl::optional<int>(30));
+
+  // Check the second bucket.
+  const base::Value::Dict* bucket2 = buckets_list[1].GetIfDict();
+  ASSERT_TRUE(bucket2 != nullptr);
+  EXPECT_EQ(bucket2->FindInt("low"), absl::optional<int>(30));
+  EXPECT_EQ(bucket2->FindInt("high"), absl::optional<int>(31));
+  EXPECT_EQ(bucket2->FindInt("count"), absl::optional<int>(28));
+}
+
+TEST_P(HistogramTest, WriteAscii) {
+  HistogramBase* histogram =
+      LinearHistogram::FactoryGet("AsciiOut", /*minimum=*/1, /*maximum=*/10,
+                                  /*bucket_count=*/5, HistogramBase::kNoFlags);
+  histogram->AddCount(/*sample=*/4, /*value=*/5);
+
+  std::string output;
+  histogram->WriteAscii(&output);
+
+  const char kOutputFormatRe[] =
+      R"(Histogram: AsciiOut recorded 5 samples, mean = 4\.0.*\n)"
+      R"(0  \.\.\. \n)"
+      R"(4  -+O \s* \(5 = 100\.0%\) \{0\.0%\}\n)"
+      R"(7  \.\.\. \n)";
+
+  EXPECT_THAT(output, testing::MatchesRegex(kOutputFormatRe));
+}
+
+TEST_P(HistogramTest, ToGraphDict) {
+  HistogramBase* histogram =
+      LinearHistogram::FactoryGet("HTMLOut", /*minimum=*/1, /*maximum=*/10,
+                                  /*bucket_count=*/5, HistogramBase::kNoFlags);
+  histogram->AddCount(/*sample=*/4, /*value=*/5);
+
+  base::Value::Dict output = histogram->ToGraphDict();
+  const std::string* header = output.FindString("header");
+  const std::string* body = output.FindString("body");
+
+  const char kOutputHeaderFormatRe[] =
+      R"(Histogram: HTMLOut recorded 5 samples, mean = 4\.0.*)";
+  const char kOutputBodyFormatRe[] =
+      R"(0  \.\.\. \n)"
+      R"(4  -+O \s*  \(5 = 100\.0%\) \{0\.0%\}\n)"
+      R"(7  \.\.\. \n)";
+
+  EXPECT_THAT(*header, testing::MatchesRegex(kOutputHeaderFormatRe));
+  EXPECT_THAT(*body, testing::MatchesRegex(kOutputBodyFormatRe));
+}
+
+// Tests ToGraphDict() returns deterministic length size and normalizes to
+// scale.
+TEST_P(HistogramTest, ToGraphDictNormalize) {
+  int count_bucket_1 = 80;
+  int value_bucket_1 = 4;
+  int count_bucket_2 = 40;
+  int value_bucket_2 = 5;
+  HistogramBase* histogram =
+      LinearHistogram::FactoryGet("AsciiOut", /*minimum=*/1, /*maximum=*/100,
+                                  /*bucket_count=*/80, HistogramBase::kNoFlags);
+  histogram->AddCount(/*value=*/value_bucket_1, /*count=*/count_bucket_1);
+  histogram->AddCount(/*value=*/value_bucket_2, /*count=*/count_bucket_2);
+
+  base::Value::Dict output = histogram->ToGraphDict();
+  std::string* header = output.FindString("header");
+  std::string* body = output.FindString("body");
+
+  const char kOutputHeaderFormatRe[] =
+      R"(Histogram: AsciiOut recorded 120 samples, mean = 4\.3.*)";
+  const char kOutputBodyFormatRe[] =
+      R"(0  \.\.\. \n)"
+      R"(4  ---------------------------------------------------)"
+      R"(---------------------O \(80 = 66\.7%\) \{0\.0%\}\n)"
+      R"(5  ----------------)"
+      R"(--------------------O \s* \(40 = 33\.3%\) \{66\.7%\}\n)"
+      R"(6  \.\.\. \n)";
+
+  EXPECT_THAT(*header, testing::MatchesRegex(kOutputHeaderFormatRe));
+  EXPECT_THAT(*body, testing::MatchesRegex(kOutputBodyFormatRe));
+}
+
 }  // namespace base
diff --git a/base/metrics/histogram_unittest.nc b/base/metrics/histogram_unittest.nc
index c9c2657..c677106 100644
--- a/base/metrics/histogram_unittest.nc
+++ b/base/metrics/histogram_unittest.nc
@@ -1,4 +1,4 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -34,7 +34,7 @@
   UMA_HISTOGRAM_ENUMERATION("", TypeA::A, B);
 }
 
-#elif defined(NCTEST_NEGATIVE_ENUM_MAX)  // [r'static_assert failed "\|boundary\| is out of range of HistogramBase::Sample"']
+#elif defined(NCTEST_NEGATIVE_ENUM_MAX)  // [r"fatal error: static_assert failed due to requirement 'static_cast<uintmax_t>\(TypeA::A\) < static_cast<uintmax_t>\(std::numeric_limits<int>::max\(\)\)': |boundary| is out of range of HistogramBase::Sample"]
 
 void WontCompile() {
   // Buckets for enumeration start from 0, so a boundary < 0 is illegal.
@@ -42,7 +42,7 @@
   UMA_HISTOGRAM_ENUMERATION("", TypeA::A, TypeA::A);
 }
 
-#elif defined(NCTEST_ENUM_MAX_OUT_OF_RANGE)  // [r'static_assert failed "\|boundary\| is out of range of HistogramBase::Sample"']
+#elif defined(NCTEST_ENUM_MAX_OUT_OF_RANGE)  // [r"fatal error: static_assert failed due to requirement 'static_cast<uintmax_t>\(TypeA::A\) < static_cast<uintmax_t>\(std::numeric_limits<int>::max\(\)\)': |boundary| is out of range of HistogramBase::Sample"]
 
 void WontCompile() {
   // HistogramBase::Sample is an int and can't hold larger values.
@@ -50,14 +50,24 @@
   UMA_HISTOGRAM_ENUMERATION("", TypeA::A, TypeA::A);
 }
 
-#elif defined(NCTEST_SAMPLE_NOT_ENUM)  // [r'static_assert failed "Unexpected: \|boundary\| is enum, but \|sample\| is not."']
+#elif defined(NCTEST_SAMPLE_NOT_ENUM)  // [r"fatal error: static_assert failed due to requirement 'static_cast<uintmax_t>\(TypeA::A\) < static_cast<uintmax_t>\(std::numeric_limits<int>::max\(\)\)': |boundary| is out of range of HistogramBase::Sample"]
 
 void WontCompile() {
   enum TypeA { A };
   UMA_HISTOGRAM_ENUMERATION("", 0, TypeA::A);
 }
 
-#elif defined(NCTEST_FUNCTION_INT)  // [r"Non enum passed to UmaHistogramEnumeration"]
+#elif defined(NCTEST_FUNCTION_ENUM_NO_MAXVALUE)  // [r"no member named 'kMaxValue' in 'base::NoMaxValue'"]
+
+enum class NoMaxValue {
+  kMoo,
+};
+
+void WontCompile() {
+  UmaHistogramEnumeration("", NoMaxValue::kMoo);
+}
+
+#elif defined(NCTEST_FUNCTION_INT_AS_ENUM)  // [r"static assertion failed due to requirement 'std::is_enum<int>::value'"]
 
 void WontCompile() {
   UmaHistogramEnumeration("", 1, 2);
diff --git a/base/metrics/metrics_hashes.cc b/base/metrics/metrics_hashes.cc
index 3f72311..60f3fb7 100644
--- a/base/metrics/metrics_hashes.cc
+++ b/base/metrics/metrics_hashes.cc
@@ -1,16 +1,16 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/metrics_hashes.h"
 
-#include "base/logging.h"
-#include "base/md5.h"
+#include <string.h>
+
+#include "base/check_op.h"
+#include "base/hash/md5.h"
 #include "base/sys_byteorder.h"
-#include "starboard/memory.h"
 
 namespace base {
-
 namespace {
 
 // Converts the 8-byte prefix of an MD5 hash into a uint64_t value.
@@ -21,12 +21,32 @@
   return base::NetToHost64(value);
 }
 
+// Converts the 4-byte prefix of an MD5 hash into a uint32_t value.
+inline uint32_t DigestToUInt32(const base::MD5Digest& digest) {
+  uint32_t value;
+  DCHECK_GE(sizeof(digest.a), sizeof(value));
+  memcpy(&value, digest.a, sizeof(value));
+  return base::NetToHost32(value);
+}
+
 }  // namespace
 
 uint64_t HashMetricName(base::StringPiece name) {
+  // Corresponding Python code for quick look up:
+  //
+  //   import struct
+  //   import hashlib
+  //   struct.unpack('>Q', hashlib.md5(name.encode('utf-8')).digest()[:8])[0]
+  //
   base::MD5Digest digest;
   base::MD5Sum(name.data(), name.size(), &digest);
   return DigestToUInt64(digest);
 }
 
-}  // namespace metrics
+uint32_t HashMetricNameAs32Bits(base::StringPiece name) {
+  base::MD5Digest digest;
+  base::MD5Sum(name.data(), name.size(), &digest);
+  return DigestToUInt32(digest);
+}
+
+}  // namespace base
diff --git a/base/metrics/metrics_hashes.h b/base/metrics/metrics_hashes.h
index ed15f96..3826eb0 100644
--- a/base/metrics/metrics_hashes.h
+++ b/base/metrics/metrics_hashes.h
@@ -1,13 +1,14 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_METRICS_HASHES_H_
 #define BASE_METRICS_METRICS_HASHES_H_
 
+#include <stdint.h>
+
 #include "base/base_export.h"
 #include "base/strings/string_piece.h"
-#include "starboard/types.h"
 
 namespace base {
 
@@ -15,6 +16,10 @@
 // for metric names.
 BASE_EXPORT uint64_t HashMetricName(base::StringPiece name);
 
-}  // namespace metrics
+// Computes a uint32_t hash of a given string based on its MD5 hash. This
+// can be more suitable for contexts where memory use is a concern.
+BASE_EXPORT uint32_t HashMetricNameAs32Bits(base::StringPiece name);
+
+}  // namespace base
 
 #endif  // BASE_METRICS_METRICS_HASHES_H_
diff --git a/base/metrics/metrics_hashes_unittest.cc b/base/metrics/metrics_hashes_unittest.cc
index 2949b91..a122f85 100644
--- a/base/metrics/metrics_hashes_unittest.cc
+++ b/base/metrics/metrics_hashes_unittest.cc
@@ -1,33 +1,53 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/metrics_hashes.h"
 
+#include <stddef.h>
+#include <stdint.h>
+
 #include "base/format_macros.h"
-#include "base/macros.h"
 #include "base/strings/stringprintf.h"
-#include "starboard/types.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 
 // Make sure our ID hashes are the same as what we see on the server side.
-TEST(MetricsUtilTest, HashMetricName) {
+TEST(MetricsHashesTest, HashMetricName) {
+  // The cases must match those in //tools/metrics/ukm/codegen_test.py.
   static const struct {
     std::string input;
     std::string output;
   } cases[] = {
-    {"Back", "0x0557fa923dcee4d0"},
-    {"Forward", "0x67d2f6740a8eaebf"},
-    {"NewTab", "0x290eb683f96572f1"},
+      {"Back", "0x0557fa923dcee4d0"},
+      {"NewTab", "0x290eb683f96572f1"},
+      {"Forward", "0x67d2f6740a8eaebf"},
   };
 
-  for (size_t i = 0; i < arraysize(cases); ++i) {
+  for (size_t i = 0; i < std::size(cases); ++i) {
     uint64_t hash = HashMetricName(cases[i].input);
     std::string hash_hex = base::StringPrintf("0x%016" PRIx64, hash);
     EXPECT_EQ(cases[i].output, hash_hex);
   }
 }
 
+TEST(MetricsHashesTest, HashMetricNameAs32Bits) {
+  // The cases must match those in //tools/metrics/ukm/codegen_test.py.
+  static const struct {
+    std::string input;
+    std::string output;
+  } cases[] = {
+      {"Back", "0x0557fa92"},
+      {"NewTab", "0x290eb683"},
+      {"Forward", "0x67d2f674"},
+  };
+
+  for (size_t i = 0; i < std::size(cases); ++i) {
+    uint32_t hash = HashMetricNameAs32Bits(cases[i].input);
+    std::string hash_hex = base::StringPrintf("0x%08" PRIx32, hash);
+    EXPECT_EQ(cases[i].output, hash_hex);
+  }
+}
+
 }  // namespace metrics
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
index 669ca89..c8bbf45 100644
--- a/base/metrics/persistent_histogram_allocator.cc
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -1,12 +1,13 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/persistent_histogram_allocator.h"
 
-#include <memory>
+#include <atomic>
+#include <limits>
+#include <utility>
 
-#include "base/atomicops.h"
 #include "base/files/file_path.h"
 #include "base/files/file_util.h"
 #include "base/files/important_file_writer.h"
@@ -14,6 +15,8 @@
 #include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/memory/ptr_util.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/writable_shared_memory_region.h"
 #include "base/metrics/histogram.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
@@ -21,14 +24,17 @@
 #include "base/metrics/persistent_sample_map.h"
 #include "base/metrics/sparse_histogram.h"
 #include "base/metrics/statistics_recorder.h"
+#include "base/notreached.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/pickle.h"
 #include "base/process/process_handle.h"
+#include "base/strings/strcat.h"
 #include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
 #include "base/strings/string_split.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/lock.h"
-#include "starboard/memory.h"
+#include "build/build_config.h"
 
 namespace base {
 
@@ -95,11 +101,6 @@
 
 }  // namespace
 
-const Feature kPersistentHistogramsFeature{
-  "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
-};
-
-
 PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
     PersistentMemoryAllocator* allocator)
     : allocator_(allocator), record_iterator_(allocator) {}
@@ -117,8 +118,6 @@
 PersistentSampleMapRecords*
 PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
     uint64_t id) {
-  lock_.AssertAcquired();
-
   auto found = sample_records_.find(id);
   if (found != sample_records_.end())
     return found->second.get();
@@ -245,7 +244,7 @@
   uint32_t bucket_count;
   PersistentMemoryAllocator::Reference ranges_ref;
   uint32_t ranges_checksum;
-  subtle::Atomic32 counts_ref;  // PersistentMemoryAllocator::Reference
+  std::atomic<PersistentMemoryAllocator::Reference> counts_ref;
   HistogramSamples::Metadata samples_metadata;
   HistogramSamples::Metadata logged_metadata;
 
@@ -269,7 +268,6 @@
   return nullptr;
 }
 
-
 PersistentHistogramAllocator::PersistentHistogramAllocator(
     std::unique_ptr<PersistentMemoryAllocator> memory)
     : memory_allocator_(std::move(memory)),
@@ -419,7 +417,7 @@
     // next import (which will happen before the next histogram creation)
     // will know to skip it.
     // See also the comment in ImportHistogramsToStatisticsRecorder().
-    subtle::NoBarrier_Store(&last_created_, histogram_ref);
+    last_created_.store(histogram_ref, std::memory_order_relaxed);
     return histogram;
   }
 
@@ -491,8 +489,13 @@
   memory_allocator_->UpdateTrackingHistograms();
 }
 
+void PersistentHistogramAllocator::SetRangesManager(
+    RangesManager* ranges_manager) {
+  ranges_manager_.reset(ranges_manager);
+}
+
 void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
-  subtle::NoBarrier_Store(&last_created_, 0);
+  last_created_.store(0, std::memory_order_relaxed);
 }
 
 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
@@ -545,13 +548,22 @@
       ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
   if (!created_ranges)
     return nullptr;
-  const BucketRanges* ranges =
-      StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
-          created_ranges.release());
+  DCHECK_EQ(created_ranges->size(), histogram_bucket_count + 1);
+  DCHECK_EQ(created_ranges->range(1), histogram_minimum);
+  DCHECK_EQ(created_ranges->range(histogram_bucket_count - 1),
+            histogram_maximum);
+  const BucketRanges* ranges;
+  if (ranges_manager_) {
+    ranges = ranges_manager_->RegisterOrDeleteDuplicateRanges(
+        created_ranges.release());
+  } else {
+    ranges = StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
+        created_ranges.release());
+  }
 
   size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
   PersistentMemoryAllocator::Reference counts_ref =
-      subtle::Acquire_Load(&histogram_data_ptr->counts_ref);
+      histogram_data_ptr->counts_ref.load(std::memory_order_acquire);
   if (counts_bytes == 0 ||
       (counts_ref != 0 &&
        memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
@@ -566,7 +578,7 @@
   // notice the valid reference and access the same data.
   DelayedPersistentAllocation counts_data(memory_allocator_.get(),
                                           &histogram_data_ptr->counts_ref,
-                                          kTypeIdCountsArray, counts_bytes, 0);
+                                          kTypeIdCountsArray, counts_bytes);
 
   // A second delayed allocations is defined using the same reference storage
   // location as the first so the allocation of one will automatically be found
@@ -574,24 +586,23 @@
   // and the second half is for "logged counts".
   DelayedPersistentAllocation logged_data(
       memory_allocator_.get(), &histogram_data_ptr->counts_ref,
-      kTypeIdCountsArray, counts_bytes, counts_bytes / 2,
-      /*make_iterable=*/false);
+      kTypeIdCountsArray, counts_bytes, counts_bytes / 2);
 
   // Create the right type of histogram.
   const char* name = histogram_data_ptr->name;
   std::unique_ptr<HistogramBase> histogram;
   switch (histogram_type) {
     case HISTOGRAM:
-      histogram = Histogram::PersistentCreate(
-          name, histogram_minimum, histogram_maximum, ranges, counts_data,
-          logged_data, &histogram_data_ptr->samples_metadata,
-          &histogram_data_ptr->logged_metadata);
+      histogram =
+          Histogram::PersistentCreate(name, ranges, counts_data, logged_data,
+                                      &histogram_data_ptr->samples_metadata,
+                                      &histogram_data_ptr->logged_metadata);
       DCHECK(histogram);
       break;
     case LINEAR_HISTOGRAM:
       histogram = LinearHistogram::PersistentCreate(
-          name, histogram_minimum, histogram_maximum, ranges, counts_data,
-          logged_data, &histogram_data_ptr->samples_metadata,
+          name, ranges, counts_data, logged_data,
+          &histogram_data_ptr->samples_metadata,
           &histogram_data_ptr->logged_metadata);
       DCHECK(histogram);
       break;
@@ -647,7 +658,7 @@
     return nullptr;
 
   // Make sure there is no "serialization" flag set.
-  DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
+  DCHECK(!existing->HasFlags(HistogramBase::kIPCSerializationSourceFlag));
   // Record the newly created histogram in the SR.
   return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
 }
@@ -675,26 +686,28 @@
       std::make_unique<LocalPersistentMemoryAllocator>(size, id, name))));
 }
 
-#if !defined(OS_NACL) && !defined(STARBOARD)
+#if !BUILDFLAG(IS_NACL) && !defined(STARBOARD)
 // static
-bool GlobalHistogramAllocator::CreateWithFile(
-    const FilePath& file_path,
-    size_t size,
-    uint64_t id,
-    StringPiece name) {
-  bool exists = PathExists(file_path);
-  File file(
-      file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
-                 File::FLAG_READ | File::FLAG_WRITE);
+bool GlobalHistogramAllocator::CreateWithFile(const FilePath& file_path,
+                                              size_t size,
+                                              uint64_t id,
+                                              StringPiece name,
+                                              bool exclusive_write) {
+  uint32_t flags = File::FLAG_OPEN_ALWAYS | File::FLAG_WIN_SHARE_DELETE |
+                   File::FLAG_READ | File::FLAG_WRITE;
+  if (exclusive_write)
+    flags |= File::FLAG_WIN_EXCLUSIVE_WRITE;
+  File file(file_path, flags);
+  if (!file.IsValid())
+    return false;
 
   std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
   bool success = false;
-  if (exists) {
-    size = saturated_cast<size_t>(file.GetLength());
-    success = mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
-  } else {
+  if (file.created()) {
     success = mmfile->Initialize(std::move(file), {0, size},
                                  MemoryMappedFile::READ_WRITE_EXTEND);
+  } else {
+    success = mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
   }
   if (!success ||
       !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
@@ -702,8 +715,8 @@
   }
 
   Set(WrapUnique(new GlobalHistogramAllocator(
-      std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), size,
-                                                      id, name, false))));
+      std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), 0, id,
+                                                      name, false))));
   Get()->SetPersistentLocation(file_path);
   return true;
 }
@@ -717,14 +730,13 @@
                                                     StringPiece name) {
   // Old "active" becomes "base".
   if (!base::ReplaceFile(active_path, base_path, nullptr))
-    base::DeleteFile(base_path, /*recursive=*/false);
-  DCHECK(!base::PathExists(active_path));
+    base::DeleteFile(base_path);
+  if (base::PathExists(active_path))
+    return false;
 
   // Move any "spare" into "active". Okay to continue if file doesn't exist.
-  if (!spare_path.empty()) {
+  if (!spare_path.empty())
     base::ReplaceFile(spare_path, active_path, nullptr);
-    DCHECK(!base::PathExists(spare_path));
-  }
 
   return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
                                                         name);
@@ -735,8 +747,9 @@
                                                          size_t size,
                                                          uint64_t id,
                                                          StringPiece name) {
-  FilePath base_path, active_path, spare_path;
-  ConstructFilePaths(dir, name, &base_path, &active_path, &spare_path);
+  FilePath base_path = ConstructFilePath(dir, name);
+  FilePath active_path = ConstructFilePathForActiveFile(dir, name);
+  FilePath spare_path = ConstructFilePath(dir, std::string(name) + "-spare");
   return CreateWithActiveFile(base_path, active_path, spare_path, size, id,
                               name);
 }
@@ -749,6 +762,13 @@
 }
 
 // static
+FilePath GlobalHistogramAllocator::ConstructFilePathForActiveFile(
+    const FilePath& dir,
+    StringPiece name) {
+  return ConstructFilePath(dir, std::string(name) + "-active");
+}
+
+// static
 FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
     const FilePath& dir,
     StringPiece name,
@@ -761,6 +781,14 @@
 }
 
 // static
+FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
+    const FilePath& dir,
+    StringPiece name) {
+  return ConstructFilePathForUploadDir(dir, name, Time::Now(),
+                                       GetCurrentProcId());
+}
+
+// static
 bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
                                              std::string* out_name,
                                              Time* out_stamp,
@@ -772,7 +800,7 @@
     return false;
 
   if (out_name)
-    *out_name = parts[0].as_string();
+    *out_name = std::string(parts[0]);
 
   if (out_stamp) {
     int64_t stamp;
@@ -791,101 +819,53 @@
   return true;
 }
 
-// static
-void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
-                                                  StringPiece name,
-                                                  FilePath* out_base_path,
-                                                  FilePath* out_active_path,
-                                                  FilePath* out_spare_path) {
-  if (out_base_path)
-    *out_base_path = ConstructFilePath(dir, name);
-
-  if (out_active_path) {
-    *out_active_path =
-        ConstructFilePath(dir, name.as_string().append("-active"));
-  }
-
-  if (out_spare_path) {
-    *out_spare_path = ConstructFilePath(dir, name.as_string().append("-spare"));
-  }
-}
-
-// static
-void GlobalHistogramAllocator::ConstructFilePathsForUploadDir(
-    const FilePath& active_dir,
-    const FilePath& upload_dir,
-    const std::string& name,
-    FilePath* out_upload_path,
-    FilePath* out_active_path,
-    FilePath* out_spare_path) {
-  if (out_upload_path) {
-    *out_upload_path = ConstructFilePathForUploadDir(
-        upload_dir, name, Time::Now(), GetCurrentProcId());
-  }
-
-  if (out_active_path) {
-    *out_active_path =
-        ConstructFilePath(active_dir, name + std::string("-active"));
-  }
-
-  if (out_spare_path) {
-    *out_spare_path =
-        ConstructFilePath(active_dir, name + std::string("-spare"));
-  }
-}
-
-// static
 bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
                                                size_t size) {
+  // If the spare file already exists, it was created in a previous session and
+  // is still unused, so do nothing.
+  if (base::PathExists(spare_path)) {
+    return false;
+  }
   FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp"));
-  bool success = true;
+  bool success;
   {
     File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS |
                                          File::FLAG_READ | File::FLAG_WRITE);
-    if (!spare_file.IsValid())
-      return false;
+    success = spare_file.IsValid();
 
-    MemoryMappedFile mmfile;
-    success = mmfile.Initialize(std::move(spare_file), {0, size},
-                                MemoryMappedFile::READ_WRITE_EXTEND);
+    if (success) {
+      MemoryMappedFile mmfile;
+      success = mmfile.Initialize(std::move(spare_file), {0, size},
+                                  MemoryMappedFile::READ_WRITE_EXTEND);
+    }
   }
 
   if (success)
     success = ReplaceFile(temp_spare_path, spare_path, nullptr);
 
   if (!success)
-    DeleteFile(temp_spare_path, /*recursive=*/false);
+    DeleteFile(temp_spare_path);
 
   return success;
 }
-
-// static
-bool GlobalHistogramAllocator::CreateSpareFileInDir(const FilePath& dir,
-                                                    size_t size,
-                                                    StringPiece name) {
-  FilePath spare_path;
-  ConstructFilePaths(dir, name, nullptr, nullptr, &spare_path);
-  return CreateSpareFile(spare_path, size);
-}
-#endif  // !defined(OS_NACL)
+#endif  // !BUILDFLAG(IS_NACL) && !defined(STARBOARD)
 
 #if !defined(STARBOARD)
 // static
-void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
-    const SharedMemoryHandle& handle,
-    size_t size) {
-  std::unique_ptr<SharedMemory> shm(
-      new SharedMemory(handle, /*readonly=*/false));
-  if (!shm->Map(size) ||
-      !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
+void GlobalHistogramAllocator::CreateWithSharedMemoryRegion(
+    const WritableSharedMemoryRegion& region) {
+  base::WritableSharedMemoryMapping mapping = region.Map();
+  if (!mapping.IsValid() ||
+      !WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
+          mapping)) {
     return;
   }
 
   Set(WrapUnique(new GlobalHistogramAllocator(
-      std::make_unique<SharedPersistentMemoryAllocator>(
-          std::move(shm), 0, StringPiece(), /*readonly=*/false))));
+      std::make_unique<WritableSharedPersistentMemoryAllocator>(
+          std::move(mapping), 0, StringPiece()))));
 }
-#endif
+#endif // !defined(STARBOARD)
 
 // static
 void GlobalHistogramAllocator::Set(
@@ -895,7 +875,7 @@
   // also released, future accesses to those histograms will seg-fault.
   CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
   subtle::Release_Store(&g_histogram_allocator,
-                        reinterpret_cast<uintptr_t>(allocator.release()));
+                        reinterpret_cast<intptr_t>(allocator.release()));
   size_t existing = StatisticsRecorder::GetHistogramCount();
 
   DVLOG_IF(1, existing)
@@ -928,7 +908,7 @@
 
   subtle::Release_Store(&g_histogram_allocator, 0);
   return WrapUnique(histogram_allocator);
-};
+}
 
 void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
   persistent_location_ = location;
@@ -938,14 +918,33 @@
   return persistent_location_;
 }
 
+bool GlobalHistogramAllocator::HasPersistentLocation() const {
+  return !persistent_location_.empty();
+}
+
+bool GlobalHistogramAllocator::MovePersistentFile(const FilePath& dir) {
+  DCHECK(HasPersistentLocation());
+
+  FilePath new_file_path = dir.Append(persistent_location_.BaseName());
+
+  // Change the location of the persistent file. This is fine to do even though
+  // the file is currently "opened" by this process.
+  if (!base::ReplaceFile(persistent_location_, new_file_path, nullptr)) {
+    return false;
+  }
+
+  SetPersistentLocation(new_file_path);
+  return true;
+}
+
 bool GlobalHistogramAllocator::WriteToPersistentLocation() {
-#if defined(OS_NACL) || defined(STARBOARD)
+#if BUILDFLAG(IS_NACL)
   // NACL doesn't support file operations, including ImportantFileWriter.
   NOTREACHED();
   return false;
 #else
   // Stop if no destination is set.
-  if (persistent_location_.empty()) {
+  if (!HasPersistentLocation()) {
     NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
                  << " to file because no location was set.";
     return false;
@@ -966,11 +965,12 @@
 void GlobalHistogramAllocator::DeletePersistentLocation() {
   memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
 
-#if defined(OS_NACL) || defined(STARBOARD)
+#if BUILDFLAG(IS_NACL)
   NOTREACHED();
 #else
-  if (persistent_location_.empty())
+  if (!HasPersistentLocation()) {
     return;
+  }
 
   // Open (with delete) and then immediately close the file by going out of
   // scope. This is the only cross-platform safe way to delete a file that may
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
index 41aff76..eed72ef 100644
--- a/base/metrics/persistent_histogram_allocator.h
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -1,22 +1,25 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
-#define BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
+#ifndef BASE_METRICS_PERSISTENT_HISTOGRAM_ALLOCATOR_H_
+#define BASE_METRICS_PERSISTENT_HISTOGRAM_ALLOCATOR_H_
 
+#include <atomic>
 #include <map>
 #include <memory>
+#include <string>
+#include <vector>
 
-#include "base/atomicops.h"
 #include "base/base_export.h"
-#include "base/feature_list.h"
-#include "base/memory/shared_memory.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/persistent_memory_allocator.h"
+#include "base/metrics/ranges_manager.h"
 #include "base/process/process_handle.h"
 #include "base/strings/string_piece.h"
 #include "base/synchronization/lock.h"
+#include "build/build_config.h"
 
 namespace base {
 
@@ -24,10 +27,7 @@
 class FilePath;
 class PersistentSampleMapRecords;
 class PersistentSparseHistogramDataManager;
-
-// Feature definition for enabling histogram persistence.
-BASE_EXPORT extern const Feature kPersistentHistogramsFeature;
-
+class WritableSharedMemoryRegion;
 
 // A data manager for sparse histograms so each instance of such doesn't have
 // to separately iterate over the entire memory segment. Though this class
@@ -44,6 +44,11 @@
   explicit PersistentSparseHistogramDataManager(
       PersistentMemoryAllocator* allocator);
 
+  PersistentSparseHistogramDataManager(
+      const PersistentSparseHistogramDataManager&) = delete;
+  PersistentSparseHistogramDataManager& operator=(
+      const PersistentSparseHistogramDataManager&) = delete;
+
   ~PersistentSparseHistogramDataManager();
 
   // Returns the object that manages the persistent-sample-map records for a
@@ -64,9 +69,9 @@
  private:
   friend class PersistentSampleMapRecords;
 
-  // Gets the object holding records for a given sample-map id when |lock_|
-  // has already been acquired.
-  PersistentSampleMapRecords* GetSampleMapRecordsWhileLocked(uint64_t id);
+  // Gets the object holding records for a given sample-map id.
+  PersistentSampleMapRecords* GetSampleMapRecordsWhileLocked(uint64_t id)
+      EXCLUSIVE_LOCKS_REQUIRED(lock_);
 
   // Loads sample-map records looking for those belonging to the specified
   // |load_id|. Records found for other sample-maps are held for later use
@@ -77,19 +82,16 @@
   bool LoadRecords(PersistentSampleMapRecords* sample_map_records);
 
   // Weak-pointer to the allocator used by the sparse histograms.
-  PersistentMemoryAllocator* allocator_;
+  raw_ptr<PersistentMemoryAllocator> allocator_;
 
   // Iterator within the allocator for finding sample records.
-  PersistentMemoryAllocator::Iterator record_iterator_;
+  PersistentMemoryAllocator::Iterator record_iterator_ GUARDED_BY(lock_);
 
   // Mapping of sample-map IDs to their sample records.
   std::map<uint64_t, std::unique_ptr<PersistentSampleMapRecords>>
-      sample_records_;
+      sample_records_ GUARDED_BY(lock_);
 
-  // A lock used for synchronizing changes to sample_records_.
   base::Lock lock_;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentSparseHistogramDataManager);
 };
 
 
@@ -107,6 +109,10 @@
   PersistentSampleMapRecords(PersistentSparseHistogramDataManager* data_manager,
                              uint64_t sample_map_id);
 
+  PersistentSampleMapRecords(const PersistentSampleMapRecords&) = delete;
+  PersistentSampleMapRecords& operator=(const PersistentSampleMapRecords&) =
+      delete;
+
   ~PersistentSampleMapRecords();
 
   // Resets the internal state for a new object using this data. The return
@@ -140,14 +146,14 @@
   friend PersistentSparseHistogramDataManager;
 
   // Weak-pointer to the parent data-manager object.
-  PersistentSparseHistogramDataManager* data_manager_;
+  raw_ptr<PersistentSparseHistogramDataManager> data_manager_;
 
   // ID of PersistentSampleMap to which these records apply.
   const uint64_t sample_map_id_;
 
   // The current user of this set of records. It is used to ensure that no
   // more than one object is using these records at a given time.
-  const void* user_ = nullptr;
+  raw_ptr<const void> user_ = nullptr;
 
   // This is the count of how many "records" have already been read by the
   // owning sample-map.
@@ -164,8 +170,6 @@
   // is appended in bulk to "records". Access to this vector can be done
   // only while holding the parent manager's lock.
   std::vector<PersistentMemoryAllocator::Reference> found_;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentSampleMapRecords);
 };
 
 
@@ -186,6 +190,9 @@
     // The allocator must live beyond the lifetime of the iterator.
     explicit Iterator(PersistentHistogramAllocator* allocator);
 
+    Iterator(const Iterator&) = delete;
+    Iterator& operator=(const Iterator&) = delete;
+
     // Gets the next histogram from persistent memory; returns null if there
     // are no more histograms to be found. This may still be called again
     // later to retrieve any new histograms added in the meantime.
@@ -197,19 +204,22 @@
 
    private:
     // Weak-pointer to histogram allocator being iterated over.
-    PersistentHistogramAllocator* allocator_;
+    raw_ptr<PersistentHistogramAllocator> allocator_;
 
     // The iterator used for stepping through objects in persistent memory.
     // It is lock-free and thread-safe which is why this class is also such.
     PersistentMemoryAllocator::Iterator memory_iter_;
-
-    DISALLOW_COPY_AND_ASSIGN(Iterator);
   };
 
   // A PersistentHistogramAllocator is constructed from a PersistentMemory-
   // Allocator object of which it takes ownership.
   explicit PersistentHistogramAllocator(
       std::unique_ptr<PersistentMemoryAllocator> memory);
+
+  PersistentHistogramAllocator(const PersistentHistogramAllocator&) = delete;
+  PersistentHistogramAllocator& operator=(const PersistentHistogramAllocator&) =
+      delete;
+
   virtual ~PersistentHistogramAllocator();
 
   // Direct access to underlying memory allocator. If the segment is shared
@@ -277,13 +287,17 @@
   // is done seperately from construction for situations such as when the
   // histograms will be backed by memory provided by this very allocator.
   //
-  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
-  // with the following histograms:
-  //    UMA.PersistentAllocator.name.Allocs
+  // IMPORTANT: tools/metrics/histograms/metadata/uma/histograms.xml must
+  // be updated with the following histograms for each |name| param:
+  //    UMA.PersistentAllocator.name.Errors
   //    UMA.PersistentAllocator.name.UsedPct
   void CreateTrackingHistograms(StringPiece name);
   void UpdateTrackingHistograms();
 
+  // Sets the internal |ranges_manager_|, which will be used by the allocator to
+  // register BucketRanges. Takes ownership of the passed |ranges_manager|.
+  void SetRangesManager(RangesManager* ranges_manager);
+
   // Clears the internal |last_created_| reference so testing can validate
   // operation without that optimization.
   void ClearLastCreatedReferenceForTesting();
@@ -295,8 +309,8 @@
 
   // Gets the reference of the last histogram created, used to avoid
   // trying to import what was just created.
-  PersistentHistogramAllocator::Reference last_created() {
-    return subtle::NoBarrier_Load(&last_created_);
+  Reference last_created() {
+    return last_created_.load(std::memory_order_relaxed);
   }
 
   // Gets the next histogram in persistent data based on iterator while
@@ -318,15 +332,20 @@
   // The memory allocator that provides the actual histogram storage.
   std::unique_ptr<PersistentMemoryAllocator> memory_allocator_;
 
+  // The RangesManager that the allocator will register its BucketRanges with.
+  // If this is null (default), the BucketRanges will be registered with the
+  // global statistics recorder. Used when loading self-contained metrics coming
+  // from a previous session. Registering the BucketRanges with the global
+  // statistics recorder could create unnecessary contention, and a low amount
+  // of extra memory.
+  std::unique_ptr<base::RangesManager> ranges_manager_;
+
   // The data-manager used to improve performance of sparse histograms.
   PersistentSparseHistogramDataManager sparse_histogram_data_manager_;
 
   // A reference to the last-created histogram in the allocator, used to avoid
   // trying to import what was just created.
-  // TODO(bcwhite): Change this to std::atomic<PMA::Reference> when available.
-  subtle::Atomic32 last_created_ = 0;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocator);
+  std::atomic<Reference> last_created_ = 0;
 };
 
 
@@ -336,6 +355,9 @@
 class BASE_EXPORT GlobalHistogramAllocator
     : public PersistentHistogramAllocator {
  public:
+  GlobalHistogramAllocator(const GlobalHistogramAllocator&) = delete;
+  GlobalHistogramAllocator& operator=(const GlobalHistogramAllocator&) = delete;
+
   ~GlobalHistogramAllocator() override;
 
   // Create a global allocator using the passed-in memory |base|, |size|, and
@@ -350,16 +372,18 @@
   // specified |size| taken from the heap.
   static void CreateWithLocalMemory(size_t size, uint64_t id, StringPiece name);
 
-#if !defined(OS_NACL) && !defined(STARBOARD)
+#if !BUILDFLAG(IS_NACL) && !defined(STARBOARD)
   // Create a global allocator by memory-mapping a |file|. If the file does
   // not exist, it will be created with the specified |size|. If the file does
   // exist, the allocator will use and add to its contents, ignoring the passed
   // size in favor of the existing size. Returns whether the global allocator
-  // was set.
+  // was set. If |exclusive_write| is true, the file will be opened in a mode
+  // that disallows multiple concurrent writers (no effect on non-Windows).
   static bool CreateWithFile(const FilePath& file_path,
                              size_t size,
                              uint64_t id,
-                             StringPiece name);
+                             StringPiece name,
+                             bool exclusive_write = false);
 
   // Creates a new file at |active_path|. If it already exists, it will first be
   // moved to |base_path|. In all cases, any old file at |base_path| will be
@@ -385,59 +409,39 @@
   // Constructs a filename using a name.
   static FilePath ConstructFilePath(const FilePath& dir, StringPiece name);
 
+  // Constructs a filename using a name for an "active" file.
+  static FilePath ConstructFilePathForActiveFile(const FilePath& dir,
+                                                 StringPiece name);
+
   // Like above but with timestamp and pid for use in upload directories.
   static FilePath ConstructFilePathForUploadDir(const FilePath& dir,
                                                 StringPiece name,
                                                 base::Time stamp,
                                                 ProcessId pid);
 
+  // Override that uses the current time stamp and current process id.
+  static FilePath ConstructFilePathForUploadDir(const FilePath& dir,
+                                                StringPiece name);
+
   // Parses a filename to extract name, timestamp, and pid.
   static bool ParseFilePath(const FilePath& path,
                             std::string* out_name,
                             Time* out_stamp,
                             ProcessId* out_pid);
 
-  // Constructs a set of names in |dir| based on name that can be used for a
-  // base + active persistent memory mapped location for CreateWithActiveFile().
-  // The spare path is a file that can be pre-created and moved to be active
-  // without any startup penalty that comes from constructing the file. |name|
-  // will be used as the basename of the file inside |dir|. |out_base_path|,
-  // |out_active_path|, or |out_spare_path| may be null if not needed.
-  static void ConstructFilePaths(const FilePath& dir,
-                                 StringPiece name,
-                                 FilePath* out_base_path,
-                                 FilePath* out_active_path,
-                                 FilePath* out_spare_path);
-
-  // As above but puts the base files in a different "upload" directory. This
-  // is useful when moving all completed files into a single directory for easy
-  // upload management.
-  static void ConstructFilePathsForUploadDir(const FilePath& active_dir,
-                                             const FilePath& upload_dir,
-                                             const std::string& name,
-                                             FilePath* out_upload_path,
-                                             FilePath* out_active_path,
-                                             FilePath* out_spare_path);
-
   // Create a "spare" file that can later be made the "active" file. This
   // should be done on a background thread if possible.
   static bool CreateSpareFile(const FilePath& spare_path, size_t size);
-
-  // Same as above but uses standard names. |name| is the name of the allocator
-  // and is also used to create the correct filename.
-  static bool CreateSpareFileInDir(const FilePath& dir_path,
-                                   size_t size,
-                                   StringPiece name);
 #endif
 
 #if !defined(STARBOARD)
   // Create a global allocator using a block of shared memory accessed
-  // through the given |handle| and |size|. The allocator takes ownership
-  // of the handle and closes it upon destruction, though the memory will
-  // continue to live if other processes have access to it.
-  static void CreateWithSharedMemoryHandle(const SharedMemoryHandle& handle,
-                                           size_t size);
-#endif
+  // through the given |region|. The allocator maps the shared memory into
+  // current process's virtual address space and frees it upon destruction.
+  // The memory will continue to live if other processes have access to it.
+  static void CreateWithSharedMemoryRegion(
+      const WritableSharedMemoryRegion& region);
+#endif // !defined(STARBOARD)
 
   // Sets a GlobalHistogramAllocator for globally storing histograms in
   // a space that can be persisted or shared between processes. There is only
@@ -465,6 +469,14 @@
   // are to be saved.
   const FilePath& GetPersistentLocation() const;
 
+  // Returns whether the contents of this allocator are being saved to a
+  // persistent file on disk.
+  bool HasPersistentLocation() const;
+
+  // Moves the file being used to persist this allocator's data to the directory
+  // specified by |dir|. Returns whether the operation was successful.
+  bool MovePersistentFile(const FilePath& dir);
+
   // Writes the internal data to a previously set location. This is generally
   // called when a process is exiting from a section of code that may not know
   // the filesystem. The data is written in an atomic manner. The return value
@@ -499,10 +511,8 @@
 
   // The location to which the data should be persisted.
   FilePath persistent_location_;
-
-  DISALLOW_COPY_AND_ASSIGN(GlobalHistogramAllocator);
 };
 
 }  // namespace base
 
-#endif  // BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
+#endif  // BASE_METRICS_PERSISTENT_HISTOGRAM_ALLOCATOR_H__
diff --git a/base/metrics/persistent_histogram_allocator_unittest.cc b/base/metrics/persistent_histogram_allocator_unittest.cc
index f8de3d5..25ed1da 100644
--- a/base/metrics/persistent_histogram_allocator_unittest.cc
+++ b/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -7,18 +7,26 @@
 #include "base/files/file.h"
 #include "base/files/file_util.h"
 #include "base/files/scoped_temp_dir.h"
-#include "base/logging.h"
 #include "base/memory/ptr_util.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_functions.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/persistent_memory_allocator.h"
 #include "base/metrics/statistics_recorder.h"
-#include "starboard/memory.h"
+#include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 
 class PersistentHistogramAllocatorTest : public testing::Test {
+ public:
+  PersistentHistogramAllocatorTest(const PersistentHistogramAllocatorTest&) =
+      delete;
+  PersistentHistogramAllocatorTest& operator=(
+      const PersistentHistogramAllocatorTest&) = delete;
+
  protected:
   const int32_t kAllocatorMemorySize = 64 << 10;  // 64 KiB
 
@@ -48,10 +56,7 @@
 
   std::unique_ptr<StatisticsRecorder> statistics_recorder_;
   std::unique_ptr<char[]> allocator_memory_;
-  PersistentMemoryAllocator* allocator_ = nullptr;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocatorTest);
+  raw_ptr<PersistentMemoryAllocator> allocator_ = nullptr;
 };
 
 TEST_F(PersistentHistogramAllocatorTest, CreateAndIterate) {
@@ -166,25 +171,25 @@
   const size_t temp_size = 64 << 10;  // 64 KiB
 
   // Test creation of a new file.
-  GlobalHistogramAllocator::ReleaseForTesting();
+  DestroyPersistentHistogramAllocator();
   GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, temp_name);
   EXPECT_EQ(std::string(temp_name),
             GlobalHistogramAllocator::Get()->memory_allocator()->Name());
 
   // Test re-open of a possibly-existing file.
-  GlobalHistogramAllocator::ReleaseForTesting();
+  DestroyPersistentHistogramAllocator();
   GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, "");
   EXPECT_EQ(std::string(temp_name),
             GlobalHistogramAllocator::Get()->memory_allocator()->Name());
 
   // Test re-open of an known-existing file.
-  GlobalHistogramAllocator::ReleaseForTesting();
+  DestroyPersistentHistogramAllocator();
   GlobalHistogramAllocator::CreateWithFile(temp_file, 0, 0, "");
   EXPECT_EQ(std::string(temp_name),
             GlobalHistogramAllocator::Get()->memory_allocator()->Name());
 
   // Final release so file and temp-dir can be removed.
-  GlobalHistogramAllocator::ReleaseForTesting();
+  DestroyPersistentHistogramAllocator();
 }
 
 TEST_F(PersistentHistogramAllocatorTest, CreateSpareFile) {
@@ -208,12 +213,15 @@
       EXPECT_EQ(0, buffer[i]);
   }
 }
-#endif  // !defined(STARBOARD)
+#endif // !defined(STARBOARD)
 
 TEST_F(PersistentHistogramAllocatorTest, StatisticsRecorderMerge) {
   const char LinearHistogramName[] = "SRTLinearHistogram";
   const char SparseHistogramName[] = "SRTSparseHistogram";
-  const size_t starting_sr_count = StatisticsRecorder::GetHistogramCount();
+  const size_t global_sr_initial_histogram_count =
+      StatisticsRecorder::GetHistogramCount();
+  const size_t global_sr_initial_bucket_ranges_count =
+      StatisticsRecorder::GetBucketRanges().size();
 
   // Create a local StatisticsRecorder in which the newly created histogram
   // will be recorded. The global allocator must be replaced after because the
@@ -255,7 +263,10 @@
   std::unique_ptr<GlobalHistogramAllocator> new_allocator =
       GlobalHistogramAllocator::ReleaseForTesting();
   local_sr.reset();
-  EXPECT_EQ(starting_sr_count, StatisticsRecorder::GetHistogramCount());
+  EXPECT_EQ(global_sr_initial_histogram_count,
+            StatisticsRecorder::GetHistogramCount());
+  EXPECT_EQ(global_sr_initial_bucket_ranges_count,
+            StatisticsRecorder::GetBucketRanges().size());
   GlobalHistogramAllocator::Set(std::move(old_allocator));
 
   // Create a "recovery" allocator using the same memory as the local one.
@@ -277,8 +288,9 @@
     HistogramBase* found =
         StatisticsRecorder::FindHistogram(recovered->histogram_name());
     EXPECT_NE(recovered.get(), found);
-  };
-  EXPECT_EQ(starting_sr_count + 2, StatisticsRecorder::GetHistogramCount());
+  }
+  EXPECT_EQ(global_sr_initial_histogram_count + 2,
+            StatisticsRecorder::GetHistogramCount());
 
   // Check the merged histograms for accuracy.
   HistogramBase* found = StatisticsRecorder::FindHistogram(LinearHistogramName);
@@ -299,6 +311,12 @@
   EXPECT_EQ(1, snapshot->GetCount(4));
   EXPECT_EQ(1, snapshot->GetCount(6));
 
+  // Verify that the LinearHistogram's BucketRanges was registered with the
+  // global SR since the recovery allocator does not specify a custom
+  // RangesManager.
+  ASSERT_EQ(global_sr_initial_bucket_ranges_count + 1,
+            StatisticsRecorder::GetBucketRanges().size());
+
   // Perform additional histogram increments.
   histogram1->AddCount(1, 3);
   histogram1->Add(6);
@@ -316,8 +334,9 @@
     if (!recovered)
       break;
     recovery2.MergeHistogramDeltaToStatisticsRecorder(recovered.get());
-  };
-  EXPECT_EQ(starting_sr_count + 2, StatisticsRecorder::GetHistogramCount());
+  }
+  EXPECT_EQ(global_sr_initial_histogram_count + 2,
+            StatisticsRecorder::GetHistogramCount());
 
   // And verify.
   found = StatisticsRecorder::FindHistogram(LinearHistogramName);
@@ -338,6 +357,70 @@
   EXPECT_EQ(1, snapshot->GetCount(7));
 }
 
+TEST_F(PersistentHistogramAllocatorTest, CustomRangesManager) {
+  const char LinearHistogramName[] = "TestLinearHistogram";
+  const size_t global_sr_initial_bucket_ranges_count =
+      StatisticsRecorder::GetBucketRanges().size();
+
+  // Create a local StatisticsRecorder in which the newly created histogram
+  // will be recorded. The global allocator must be replaced after because the
+  // act of releasing will cause the active SR to forget about all histograms
+  // in the released memory.
+  std::unique_ptr<StatisticsRecorder> local_sr =
+      StatisticsRecorder::CreateTemporaryForTesting();
+  EXPECT_EQ(0U, StatisticsRecorder::GetHistogramCount());
+  std::unique_ptr<GlobalHistogramAllocator> old_allocator =
+      GlobalHistogramAllocator::ReleaseForTesting();
+  GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0, "");
+  ASSERT_TRUE(GlobalHistogramAllocator::Get());
+
+  // Create a linear histogram and verify it is registered with the local SR.
+  HistogramBase* histogram = LinearHistogram::FactoryGet(
+      LinearHistogramName, /*minimum=*/1, /*maximum=*/10, /*bucket_count=*/10,
+      /*flags=*/0);
+  ASSERT_TRUE(histogram);
+  EXPECT_EQ(1U, StatisticsRecorder::GetHistogramCount());
+  histogram->Add(1);
+
+  // Destroy the local SR and ensure that we're back to the initial state and
+  // restore the global allocator. The histogram created in the local SR will
+  // become unmanaged.
+  std::unique_ptr<GlobalHistogramAllocator> new_allocator =
+      GlobalHistogramAllocator::ReleaseForTesting();
+  local_sr.reset();
+  EXPECT_EQ(global_sr_initial_bucket_ranges_count,
+            StatisticsRecorder::GetBucketRanges().size());
+  GlobalHistogramAllocator::Set(std::move(old_allocator));
+
+  // Create a "recovery" allocator using the same memory as the local one.
+  PersistentHistogramAllocator recovery(
+      std::make_unique<PersistentMemoryAllocator>(
+          const_cast<void*>(new_allocator->memory_allocator()->data()),
+          new_allocator->memory_allocator()->size(), 0, 0, "", false));
+
+  // Set a custom RangesManager for the recovery allocator so that the
+  // BucketRanges are not registered with the global SR.
+  RangesManager* ranges_manager = new RangesManager();
+  recovery.SetRangesManager(ranges_manager);
+  EXPECT_EQ(0U, ranges_manager->GetBucketRanges().size());
+
+  // Get the histogram that was created locally (and forgotten).
+  PersistentHistogramAllocator::Iterator histogram_iter1(&recovery);
+  std::unique_ptr<HistogramBase> recovered = histogram_iter1.GetNext();
+  ASSERT_TRUE(recovered);
+
+  // Verify that there are no more histograms.
+  ASSERT_FALSE(histogram_iter1.GetNext());
+
+  // Expect that the histogram's BucketRanges was not registered with the global
+  // statistics recorder since the recovery allocator specifies a custom
+  // RangesManager.
+  EXPECT_EQ(global_sr_initial_bucket_ranges_count,
+            StatisticsRecorder::GetBucketRanges().size());
+
+  EXPECT_EQ(1U, ranges_manager->GetBucketRanges().size());
+}
+
 TEST_F(PersistentHistogramAllocatorTest, RangesDeDuplication) {
   // This corresponds to the "ranges_ref" field of the PersistentHistogramData
   // structure defined (privately) inside persistent_histogram_allocator.cc.
@@ -375,4 +458,65 @@
   EXPECT_EQ(ranges_ref, data2[kRangesRefIndex]);
 }
 
+#if !defined(STARBOARD)
+TEST_F(PersistentHistogramAllocatorTest, MovePersistentFile) {
+  const char temp_name[] = "MovePersistentFileTest.pma";
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath temp_file = temp_dir.GetPath().AppendASCII(temp_name);
+  const size_t temp_size = 64 << 10;  // 64 KiB
+
+  // Initialize persistent histogram system with a known file path.
+  DestroyPersistentHistogramAllocator();
+  GlobalHistogramAllocator::CreateWithFile(temp_file, temp_size, 0, temp_name);
+  GlobalHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+  ASSERT_TRUE(allocator->HasPersistentLocation());
+  EXPECT_EQ(allocator->GetPersistentLocation(), temp_file);
+  EXPECT_TRUE(base::PathExists(temp_file));
+
+  // Move the persistent file to a new directory.
+  ScopedTempDir new_temp_dir;
+  ASSERT_TRUE(new_temp_dir.CreateUniqueTempDir());
+  EXPECT_TRUE(allocator->MovePersistentFile(new_temp_dir.GetPath()));
+
+  // Verify that the persistent file was correctly moved |new_temp_dir|.
+  FilePath new_temp_file = new_temp_dir.GetPath().AppendASCII(temp_name);
+  ASSERT_TRUE(allocator->HasPersistentLocation());
+  EXPECT_EQ(allocator->GetPersistentLocation(), new_temp_file);
+  EXPECT_TRUE(base::PathExists(new_temp_file));
+  EXPECT_FALSE(base::PathExists(temp_file));
+
+  // Emit a histogram after moving the file.
+  const char kHistogramName[] = "MovePersistentFile.Test";
+  base::UmaHistogramBoolean(kHistogramName, true);
+
+  // Release the allocator.
+  DestroyPersistentHistogramAllocator();
+
+  // Open and read the file in order to verify that |kHistogramName| was written
+  // to it even after being moved.
+  base::File file(new_temp_file, base::File::FLAG_OPEN | base::File::FLAG_READ);
+  std::unique_ptr<char[]> data = std::make_unique<char[]>(temp_size);
+  EXPECT_EQ(file.Read(/*offset=*/0, data.get(), temp_size),
+            static_cast<int>(temp_size));
+
+  // Create an allocator and iterator using the file's data.
+  PersistentHistogramAllocator new_file_allocator(
+      std::make_unique<PersistentMemoryAllocator>(data.get(), temp_size, 0, 0,
+                                                  "", false));
+  PersistentHistogramAllocator::Iterator it(&new_file_allocator);
+
+  // Verify that |kHistogramName| is in the file.
+  std::unique_ptr<HistogramBase> histogram;
+  bool found_histogram = false;
+  while ((histogram = it.GetNext()) != nullptr) {
+    if (strcmp(kHistogramName, histogram->histogram_name()) == 0) {
+      found_histogram = true;
+      break;
+    }
+  }
+  EXPECT_TRUE(found_histogram);
+}
+#endif // !defined(STARBOARD)
+
 }  // namespace base
diff --git a/base/metrics/persistent_histogram_storage.cc b/base/metrics/persistent_histogram_storage.cc
index 0fb9628..0dbab31 100644
--- a/base/metrics/persistent_histogram_storage.cc
+++ b/base/metrics/persistent_histogram_storage.cc
@@ -1,4 +1,4 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -9,15 +9,55 @@
 #include "base/logging.h"
 #include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/persistent_memory_allocator.h"
+#include "base/process/memory.h"
 #include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/time/time.h"
 #include "build/build_config.h"
 
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+// Must be after <windows.h>
+#include <memoryapi.h>
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+#include <sys/mman.h>
+#endif
+
 namespace {
 
 constexpr size_t kAllocSize = 1 << 20;  // 1 MiB
 
+void* AllocateLocalMemory(size_t size) {
+  void* address;
+
+#if defined(COBALT_PENDING_CLEAN_UP)
+#elif BUILDFLAG(IS_WIN)
+  address =
+      ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+  if (address)
+    return address;
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
+  // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
+  // MAP_SHARED is not available on Linux <2.4 but required on Mac.
+  address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED,
+                   -1, 0);
+  if (address != MAP_FAILED)
+    return address;
+#else
+#error This architecture is not (yet) supported.
+#endif
+
+  // As a last resort, just allocate the memory from the heap. This will
+  // achieve the same basic result but the acquired memory has to be
+  // explicitly zeroed and thus realized immediately (i.e. all pages are
+  // added to the process now instead of only when first accessed).
+  if (!base::UncheckedMalloc(size, &address))
+    return nullptr;
+  DCHECK(address);
+  memset(address, 0, size);
+  return address;
+}
+
 }  // namespace
 
 namespace base {
@@ -29,18 +69,27 @@
   DCHECK(!allocator_name.empty());
   DCHECK(IsStringASCII(allocator_name));
 
-  GlobalHistogramAllocator::CreateWithLocalMemory(kAllocSize,
-                                                  0,  // No identifier.
-                                                  allocator_name);
+  // This code may be executed before crash handling and/or OOM handling has
+  // been initialized for the process. Silently ignore a failed allocation
+  // (no metric persistence) rather that generating a crash that won't be
+  // caught/reported.
+  void* memory = AllocateLocalMemory(kAllocSize);
+  if (!memory)
+    return;
+
+  GlobalHistogramAllocator::CreateWithPersistentMemory(memory, kAllocSize, 0,
+                                                       0,  // No identifier.
+                                                       allocator_name);
   GlobalHistogramAllocator::Get()->CreateTrackingHistograms(allocator_name);
 }
 
 PersistentHistogramStorage::~PersistentHistogramStorage() {
   PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
+  if (!allocator)
+    return;
+
   allocator->UpdateTrackingHistograms();
 
-  // TODO(chengx): Investigate making early return depend on whethere there are
-  // metrics to report at this point or not.
   if (disabled_)
     return;
 
@@ -94,12 +143,7 @@
 
   StringPiece contents(static_cast<const char*>(allocator->data()),
                        allocator->used());
-#if defined(STARBOARD)
-  int bytes_written = base::WriteFile(file_path, contents.data(), contents.size());
-  if (bytes_written == contents.size()) {
-#else
   if (!ImportantFileWriter::WriteFileAtomically(file_path, contents)) {
-#endif
     LOG(ERROR) << "Persistent histograms fail to write to file: "
                << file_path.value();
   }
diff --git a/base/metrics/persistent_histogram_storage.h b/base/metrics/persistent_histogram_storage.h
index 397236d..6eae2a8 100644
--- a/base/metrics/persistent_histogram_storage.h
+++ b/base/metrics/persistent_histogram_storage.h
@@ -1,4 +1,4 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -7,7 +7,6 @@
 
 #include "base/base_export.h"
 #include "base/files/file_path.h"
-#include "base/macros.h"
 #include "base/strings/string_piece.h"
 
 namespace base {
@@ -35,6 +34,10 @@
   PersistentHistogramStorage(StringPiece allocator_name,
                              StorageDirManagement storage_dir_management);
 
+  PersistentHistogramStorage(const PersistentHistogramStorage&) = delete;
+  PersistentHistogramStorage& operator=(const PersistentHistogramStorage&) =
+      delete;
+
   ~PersistentHistogramStorage();
 
   // The storage directory isn't always known during initial construction so
@@ -59,8 +62,6 @@
   // but can be set to true by the caller who decides to throw away its
   // histogram data.
   bool disabled_ = false;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramStorage);
 };
 
 }  // namespace base
diff --git a/base/metrics/persistent_histogram_storage_unittest.cc b/base/metrics/persistent_histogram_storage_unittest.cc
index 9db4391..b88440d 100644
--- a/base/metrics/persistent_histogram_storage_unittest.cc
+++ b/base/metrics/persistent_histogram_storage_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -10,7 +10,7 @@
 #include "base/files/file_util.h"
 #include "base/files/scoped_temp_dir.h"
 #include "base/metrics/histogram_macros.h"
-#include "base/metrics/statistics_recorder.h"
+#include "base/metrics/persistent_histogram_allocator.h"
 #include "base/time/time.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -25,9 +25,14 @@
 }  // namespace
 
 class PersistentHistogramStorageTest : public testing::Test {
+ public:
+  PersistentHistogramStorageTest(const PersistentHistogramStorageTest&) =
+      delete;
+  PersistentHistogramStorageTest& operator=(
+      const PersistentHistogramStorageTest&) = delete;
+
  protected:
-  PersistentHistogramStorageTest() :
-    recorder_for_testing_(StatisticsRecorder::CreateTemporaryForTesting()) {}
+  PersistentHistogramStorageTest() = default;
   ~PersistentHistogramStorageTest() override = default;
 
   // Creates a unique temporary directory, and sets the test storage directory.
@@ -48,16 +53,9 @@
 
   // The directory into which metrics files are written.
   FilePath test_storage_dir_;
-
-  std::unique_ptr<StatisticsRecorder> recorder_for_testing_;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramStorageTest);
 };
 
-// TODO(chengx): Re-enable the test on OS_IOS after issue 836789 is fixed.
-// PersistentHistogramStorage is only used on OS_WIN now, so disabling this
-// test on OS_IOS is fine.
-#if !defined(OS_NACL) && !defined(OS_IOS)
+#if !BUILDFLAG(IS_NACL)
 TEST_F(PersistentHistogramStorageTest, HistogramWriteTest) {
   auto persistent_histogram_storage =
       std::make_unique<PersistentHistogramStorage>(
@@ -76,7 +74,10 @@
   // destruction of the PersistentHistogramStorage instance.
   EXPECT_TRUE(DirectoryExists(test_storage_dir()));
   EXPECT_FALSE(IsDirectoryEmpty(test_storage_dir()));
+
+  // Clean up for subsequent tests.
+  GlobalHistogramAllocator::ReleaseForTesting();
 }
-#endif  // !defined(OS_NACL) && !defined(OS_IOS)
+#endif  // !BUILDFLAG(IS_NACL)
 
 }  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
index 831a5e7..c4976fd 100644
--- a/base/metrics/persistent_memory_allocator.cc
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -1,73 +1,83 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/persistent_memory_allocator.h"
 
 #include <assert.h>
+
 #include <algorithm>
 
-#if defined(OS_WIN)
-#include <windows.h>
-#include "winbase.h"
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
-#include <sys/mman.h>
-#endif
-
+#include "base/bits.h"
 #include "base/debug/alias.h"
+#include "base/debug/crash_logging.h"
 #include "base/files/memory_mapped_file.h"
 #include "base/logging.h"
-#include "base/memory/shared_memory.h"
 #include "base/metrics/histogram_functions.h"
 #include "base/metrics/sparse_histogram.h"
+#include "base/notreached.h"
+#include "base/numerics/checked_math.h"
 #include "base/numerics/safe_conversions.h"
-#include "base/sys_info.h"
-#include "base/threading/thread_restrictions.h"
+#include "base/strings/strcat.h"
+#include "base/strings/string_piece.h"
+#include "base/system/sys_info.h"
+#include "base/threading/scoped_blocking_call.h"
 #include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+#if BUILDFLAG(IS_WIN)
+#include <windows.h>
+// Must be after <windows.h>
+#include <winbase.h>
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA) || defined(STARBOARD)
+#include <sys/mman.h>
+#if BUILDFLAG(IS_ANDROID)
+#include <sys/prctl.h>
+#endif
+#endif
+#if defined(STARBOARD)
 #include "starboard/memory.h"
-#include "starboard/types.h"
+#endif
 
 namespace {
 
 // Limit of memory segment size. It has to fit in an unsigned 32-bit number
-// and should be a power of 2 in order to accomodate almost any page size.
-const uint32_t kSegmentMaxSize = 1 << 30;  // 1 GiB
+// and should be a power of 2 in order to accommodate almost any page size.
+constexpr uint32_t kSegmentMaxSize = 1 << 30;  // 1 GiB
 
 // A constant (random) value placed in the shared metadata to identify
 // an already initialized memory segment.
-const uint32_t kGlobalCookie = 0x408305DC;
+constexpr uint32_t kGlobalCookie = 0x408305DC;
 
 // The current version of the metadata. If updates are made that change
 // the metadata, the version number can be queried to operate in a backward-
 // compatible manner until the memory segment is completely re-initalized.
-const uint32_t kGlobalVersion = 2;
+constexpr uint32_t kGlobalVersion = 2;
 
 // Constant values placed in the block headers to indicate its state.
-const uint32_t kBlockCookieFree = 0;
-const uint32_t kBlockCookieQueue = 1;
-const uint32_t kBlockCookieWasted = (uint32_t)-1;
-const uint32_t kBlockCookieAllocated = 0xC8799269;
+constexpr uint32_t kBlockCookieFree = 0;
+constexpr uint32_t kBlockCookieQueue = 1;
+constexpr uint32_t kBlockCookieWasted = (uint32_t)-1;
+constexpr uint32_t kBlockCookieAllocated = 0xC8799269;
 
 // TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
 // types rather than combined bitfield.
 
 // Flags stored in the flags_ field of the SharedMetadata structure below.
-enum : int {
-  kFlagCorrupt = 1 << 0,
-  kFlagFull    = 1 << 1
-};
+constexpr uint32_t kFlagCorrupt = 1 << 0;
+constexpr uint32_t kFlagFull = 1 << 1;
 
 // Errors that are logged in "errors" histogram.
 enum AllocatorError : int {
   kMemoryIsCorrupt = 1,
 };
 
-bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
+bool CheckFlag(const volatile std::atomic<uint32_t>* flags, uint32_t flag) {
   uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
   return (loaded_flags & flag) != 0;
 }
 
-void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
+void SetFlag(volatile std::atomic<uint32_t>* flags, uint32_t flag) {
   uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
   for (;;) {
     uint32_t new_flags = (loaded_flags & ~flag) | flag;
@@ -86,18 +96,6 @@
 
 namespace base {
 
-// All allocations and data-structures must be aligned to this byte boundary.
-// Alignment as large as the physical bus between CPU and RAM is _required_
-// for some architectures, is simply more efficient on other CPUs, and
-// generally a Good Idea(tm) for all platforms as it reduces/eliminates the
-// chance that a type will span cache lines. Alignment mustn't be less
-// than 8 to ensure proper alignment for all types. The rest is a balance
-// between reducing spans across multiple cache lines and wasted space spent
-// padding out allocations. An alignment of 16 would ensure that the block
-// header structure always sits in a single cache line. An average of about
-// 1/2 this value will be wasted with every allocation.
-const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8;
-
 // The block-header is placed at the top of every allocation within the
 // segment to describe the data that follows it.
 struct PersistentMemoryAllocator::BlockHeader {
@@ -163,6 +161,8 @@
   Reset(starting_after);
 }
 
+PersistentMemoryAllocator::Iterator::~Iterator() = default;
+
 void PersistentMemoryAllocator::Iterator::Reset() {
   last_record_.store(kReferenceQueue, std::memory_order_relaxed);
   record_count_.store(0, std::memory_order_relaxed);
@@ -321,15 +321,15 @@
                                                      bool readonly)
     : mem_base_(static_cast<char*>(memory.base)),
       mem_type_(memory.type),
-      mem_size_(static_cast<uint32_t>(size)),
-      mem_page_(static_cast<uint32_t>((page_size ? page_size : size))),
-#if defined(OS_NACL) || defined(STARBOARD)
+      mem_size_(checked_cast<uint32_t>(size)),
+      mem_page_(checked_cast<uint32_t>((page_size ? page_size : size))),
+#if BUILDFLAG(IS_NACL)
       vm_page_size_(4096U),  // SysInfo is not built for NACL.
 #else
       vm_page_size_(SysInfo::VMAllocationGranularity()),
 #endif
       readonly_(readonly),
-      corrupt_(0),
+      corrupt_(false),
       allocs_histogram_(nullptr),
       used_histogram_(nullptr),
       errors_histogram_(nullptr) {
@@ -356,11 +356,10 @@
   // don't support multi-process applications on Starboard currently, so this
   // code will not be used.
 #if !defined(STARBOARD)
-  // These atomics operate inter-process and so must be lock-free. The local
-  // casts are to make sure it can be evaluated at compile time to a constant.
-  CHECK(((SharedMetadata*)nullptr)->freeptr.is_lock_free());
-  CHECK(((SharedMetadata*)nullptr)->flags.is_lock_free());
-  CHECK(((BlockHeader*)nullptr)->next.is_lock_free());
+  // These atomics operate inter-process and so must be lock-free.
+  DCHECK(SharedMetadata().freeptr.is_lock_free());
+  DCHECK(SharedMetadata().flags.is_lock_free());
+  DCHECK(BlockHeader().next.is_lock_free());
   CHECK(corrupt_.is_lock_free());
 #endif  // !defined(STARBOARD)
 
@@ -480,7 +479,7 @@
     base::StringPiece name) {
   if (name.empty() || readonly_)
     return;
-  std::string name_string = name.as_string();
+  std::string name_string(name);
 
 #if 0
   // This histogram wasn't being used so has been disabled. It is left here
@@ -546,7 +545,10 @@
   uint32_t size = block->size;
   // Header was verified by GetBlock() but a malicious actor could change
   // the value between there and here. Check it again.
-  if (size <= sizeof(BlockHeader) || ref + size > mem_size_) {
+  uint32_t total_size;
+  if (size <= sizeof(BlockHeader) ||
+      !base::CheckAdd(ref, size).AssignIfValid(&total_size) ||
+      total_size > mem_size_) {
     SetCorrupt();
     return 0;
   }
@@ -648,8 +650,7 @@
   }
 
   // Round up the requested size, plus header, to the next allocation alignment.
-  uint32_t size = static_cast<uint32_t>(req_size + sizeof(BlockHeader));
-  size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
+  size_t size = bits::AlignUp(req_size + sizeof(BlockHeader), kAllocAlignment);
   if (size <= sizeof(BlockHeader) || size > mem_page_) {
     NOTREACHED();
     return kReferenceNull;
@@ -708,15 +709,17 @@
     // Don't leave a slice at the end of a page too small for anything. This
     // can result in an allocation up to two alignment-sizes greater than the
     // minimum required by requested-size + header + alignment.
-    if (page_free - size < sizeof(BlockHeader) + kAllocAlignment)
+    if (page_free - size < sizeof(BlockHeader) + kAllocAlignment) {
       size = page_free;
-
-    const uint32_t new_freeptr = freeptr + size;
-    if (new_freeptr > mem_size_) {
-      SetCorrupt();
-      return kReferenceNull;
+      if (freeptr + size > mem_size_) {
+        SetCorrupt();
+        return kReferenceNull;
+      }
     }
 
+    // This cast is safe because (freeptr + size) <= mem_size_.
+    const uint32_t new_freeptr = static_cast<uint32_t>(freeptr + size);
+
     // Save our work. Try again if another thread has completed an allocation
     // while we were processing. A "weak" exchange would be permissable here
     // because the code will just loop and try again but the above processing
@@ -767,7 +770,8 @@
     // data here because this memory can, currently, be seen only by the thread
     // performing the allocation. When it comes time to share this, the thread
     // will call MakeIterable() which does the release operation.
-    block->size = size;
+    // `size` is at most kSegmentMaxSize, so this cast is safe.
+    block->size = static_cast<uint32_t>(size);
     block->cookie = kBlockCookieAllocated;
     block->type_id.store(type_id, std::memory_order_relaxed);
     return freeptr;
@@ -879,8 +883,10 @@
 // having internal dereferences go through this same function, the allocator
 // is hardened against corruption.
 const volatile PersistentMemoryAllocator::BlockHeader*
-PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
-                                    uint32_t size, bool queue_ok,
+PersistentMemoryAllocator::GetBlock(Reference ref,
+                                    uint32_t type_id,
+                                    size_t size,
+                                    bool queue_ok,
                                     bool free_ok) const {
   // Handle special cases.
   if (ref == kReferenceQueue && queue_ok)
@@ -892,8 +898,13 @@
   if (ref % kAllocAlignment != 0)
     return nullptr;
   size += sizeof(BlockHeader);
-  if (ref + size > mem_size_)
+  uint32_t total_size;
+  if (!base::CheckAdd(ref, size).AssignIfValid(&total_size)) {
     return nullptr;
+  }
+  if (total_size > mem_size_) {
+    return nullptr;
+  }
 
   // Validation of referenced block-header.
   if (!free_ok) {
@@ -903,8 +914,13 @@
       return nullptr;
     if (block->size < size)
       return nullptr;
-    if (ref + block->size > mem_size_)
+    uint32_t block_size;
+    if (!base::CheckAdd(ref, block->size).AssignIfValid(&block_size)) {
       return nullptr;
+    }
+    if (block_size > mem_size_) {
+      return nullptr;
+    }
     if (type_id != 0 &&
         block->type_id.load(std::memory_order_relaxed) != type_id) {
       return nullptr;
@@ -930,7 +946,7 @@
 const volatile void* PersistentMemoryAllocator::GetBlockData(
     Reference ref,
     uint32_t type_id,
-    uint32_t size) const {
+    size_t size) const {
   DCHECK(size > 0);
   const volatile BlockHeader* block =
       GetBlock(ref, type_id, size, false, false);
@@ -957,8 +973,12 @@
     size_t size,
     uint64_t id,
     base::StringPiece name)
-    : PersistentMemoryAllocator(AllocateLocalMemory(size),
-                                size, 0, id, name, false) {}
+    : PersistentMemoryAllocator(AllocateLocalMemory(size, name),
+                                size,
+                                0,
+                                id,
+                                name,
+                                false) {}
 
 LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
   DeallocateLocalMemory(const_cast<char*>(mem_base_), mem_size_, mem_type_);
@@ -966,30 +986,34 @@
 
 // static
 PersistentMemoryAllocator::Memory
-LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
+LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size,
+                                                    base::StringPiece name) {
   void* address;
 
-#if !defined(STARBOARD)
-#if defined(OS_WIN)
+#if defined(STARBOARD)
+#elif BUILDFLAG(IS_WIN)
   address =
       ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
   if (address)
     return Memory(address, MEM_VIRTUAL);
-  UmaHistogramSparse("UMA.LocalPersistentMemoryAllocator.Failures.Win",
-                     ::GetLastError());
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
   // MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
   // MAP_SHARED is not available on Linux <2.4 but required on Mac.
   address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE,
                    MAP_ANON | MAP_SHARED, -1, 0);
-  if (address != MAP_FAILED)
+  if (address != MAP_FAILED) {
+#if BUILDFLAG(IS_ANDROID)
+    // Allow the anonymous memory region allocated by mmap(MAP_ANON) to be
+    // identified in /proc/$PID/smaps.  This helps improve visibility into
+    // Chrome's memory usage on Android.
+    const std::string arena_name = base::StrCat({"persistent:", name});
+    prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, address, size, arena_name.c_str());
+#endif
     return Memory(address, MEM_VIRTUAL);
-  UmaHistogramSparse("UMA.LocalPersistentMemoryAllocator.Failures.Posix",
-                     errno);
+  }
 #else
 #error This architecture is not (yet) supported.
 #endif
-#endif  // !defined(STARBOARD)
 
   // As a last resort, just allocate the memory from the heap. This will
   // achieve the same basic result but the acquired memory has to be
@@ -1011,48 +1035,72 @@
   }
 
   DCHECK_EQ(MEM_VIRTUAL, type);
-#if !defined(STARBOARD)
-#if defined(OS_WIN)
+#if defined(STARBOARD)
+#elif BUILDFLAG(IS_WIN)
   BOOL success = ::VirtualFree(memory, 0, MEM_DECOMMIT);
   DCHECK(success);
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
   int result = ::munmap(memory, size);
   DCHECK_EQ(0, result);
 #else
 #error This architecture is not (yet) supported.
 #endif
-#endif  // !defined(STARBOARD)
 }
 
-
-//----- SharedPersistentMemoryAllocator ----------------------------------------
+//----- WritableSharedPersistentMemoryAllocator --------------------------------
 #if !defined(STARBOARD)
 
-SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator(
-    std::unique_ptr<SharedMemory> memory,
-    uint64_t id,
-    base::StringPiece name,
-    bool read_only)
+WritableSharedPersistentMemoryAllocator::
+    WritableSharedPersistentMemoryAllocator(
+        base::WritableSharedMemoryMapping memory,
+        uint64_t id,
+        base::StringPiece name)
+    : PersistentMemoryAllocator(Memory(memory.memory(), MEM_SHARED),
+                                memory.size(),
+                                0,
+                                id,
+                                name,
+                                false),
+      shared_memory_(std::move(memory)) {}
+
+WritableSharedPersistentMemoryAllocator::
+    ~WritableSharedPersistentMemoryAllocator() = default;
+
+// static
+bool WritableSharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
+    const base::WritableSharedMemoryMapping& memory) {
+  return IsMemoryAcceptable(memory.memory(), memory.size(), 0, false);
+}
+#endif  // !defined(STARBOARD)
+
+//----- ReadOnlySharedPersistentMemoryAllocator --------------------------------
+
+#if !defined(STARBOARD)
+ReadOnlySharedPersistentMemoryAllocator::
+    ReadOnlySharedPersistentMemoryAllocator(
+        base::ReadOnlySharedMemoryMapping memory,
+        uint64_t id,
+        base::StringPiece name)
     : PersistentMemoryAllocator(
-          Memory(static_cast<uint8_t*>(memory->memory()), MEM_SHARED),
-          memory->mapped_size(),
+          Memory(const_cast<void*>(memory.memory()), MEM_SHARED),
+          memory.size(),
           0,
           id,
           name,
-          read_only),
+          true),
       shared_memory_(std::move(memory)) {}
 
-SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() = default;
+ReadOnlySharedPersistentMemoryAllocator::
+    ~ReadOnlySharedPersistentMemoryAllocator() = default;
 
 // static
-bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
-    const SharedMemory& memory) {
-  return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, false);
+bool ReadOnlySharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
+    const base::ReadOnlySharedMemoryMapping& memory) {
+  return IsMemoryAcceptable(memory.memory(), memory.size(), 0, true);
 }
 #endif  // !defined(STARBOARD)
 
-
-#if !defined(OS_NACL) && !defined(STARBOARD)
+#if !BUILDFLAG(IS_NACL)
 //----- FilePersistentMemoryAllocator ------------------------------------------
 
 FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
@@ -1082,7 +1130,8 @@
 void FilePersistentMemoryAllocator::Cache() {
   // Since this method is expected to load data from permanent storage
   // into memory, blocking I/O may occur.
-  AssertBlockingAllowed();
+  base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
+                                                base::BlockingType::MAY_BLOCK);
 
   // Calculate begin/end addresses so that the first byte of every page
   // in that range can be read. Keep within the used space. The |volatile|
@@ -1106,97 +1155,50 @@
 }
 
 void FilePersistentMemoryAllocator::FlushPartial(size_t length, bool sync) {
-  if (sync)
-    AssertBlockingAllowed();
   if (IsReadonly())
     return;
 
-#if defined(OS_WIN)
+  absl::optional<base::ScopedBlockingCall> scoped_blocking_call;
+  if (sync)
+    scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
+
+#if BUILDFLAG(IS_WIN)
   // Windows doesn't support asynchronous flush.
-  AssertBlockingAllowed();
+  scoped_blocking_call.emplace(FROM_HERE, base::BlockingType::MAY_BLOCK);
   BOOL success = ::FlushViewOfFile(data(), length);
   DPCHECK(success);
-#elif defined(OS_MACOSX)
+#elif BUILDFLAG(IS_APPLE)
   // On OSX, "invalidate" removes all cached pages, forcing a re-read from
   // disk. That's not applicable to "flush" so omit it.
   int result =
       ::msync(const_cast<void*>(data()), length, sync ? MS_SYNC : MS_ASYNC);
   DCHECK_NE(EINVAL, result);
-#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA) || defined(STARBOARD) && !defined(COMPILER_MSVC)
   // On POSIX, "invalidate" forces _other_ processes to recognize what has
   // been written to disk and so is applicable to "flush".
   int result = ::msync(const_cast<void*>(data()), length,
                        MS_INVALIDATE | (sync ? MS_SYNC : MS_ASYNC));
   DCHECK_NE(EINVAL, result);
-#elif defined(STARBOARD)
-  // TODO(b/283278127): This wont' work for platforms where
-  // SB_CAN_MAP_EXECUTABLE_MEMORY = 0. That's nxswitch, tvos, and playstation.
-  // Figure out how to make this work for all platforms.
-  SbMemoryFlush(const_cast<void*>(data()), length);
+#elif defined(STARBOARD) && defined(COMPILER_MSVC)
+  msync(const_cast<void*>(data()), length, 0);
 #else
 #error Unsupported OS.
 #endif
 }
-#endif  // !defined(OS_NACL)
+#endif  // !BUILDFLAG(IS_NACL)
 
 //----- DelayedPersistentAllocation --------------------------------------------
 
-// Forwarding constructors.
-DelayedPersistentAllocation::DelayedPersistentAllocation(
-    PersistentMemoryAllocator* allocator,
-    subtle::Atomic32* ref,
-    uint32_t type,
-    size_t size,
-    bool make_iterable)
-    : DelayedPersistentAllocation(
-          allocator,
-          reinterpret_cast<std::atomic<Reference>*>(ref),
-          type,
-          size,
-          0,
-          make_iterable) {}
-
-DelayedPersistentAllocation::DelayedPersistentAllocation(
-    PersistentMemoryAllocator* allocator,
-    subtle::Atomic32* ref,
-    uint32_t type,
-    size_t size,
-    size_t offset,
-    bool make_iterable)
-    : DelayedPersistentAllocation(
-          allocator,
-          reinterpret_cast<std::atomic<Reference>*>(ref),
-          type,
-          size,
-          offset,
-          make_iterable) {}
-
 DelayedPersistentAllocation::DelayedPersistentAllocation(
     PersistentMemoryAllocator* allocator,
     std::atomic<Reference>* ref,
     uint32_t type,
     size_t size,
-    bool make_iterable)
-    : DelayedPersistentAllocation(allocator,
-                                  ref,
-                                  type,
-                                  size,
-                                  0,
-                                  make_iterable) {}
-
-// Real constructor.
-DelayedPersistentAllocation::DelayedPersistentAllocation(
-    PersistentMemoryAllocator* allocator,
-    std::atomic<Reference>* ref,
-    uint32_t type,
-    size_t size,
-    size_t offset,
-    bool make_iterable)
+    size_t offset)
     : allocator_(allocator),
       type_(type),
       size_(checked_cast<uint32_t>(size)),
       offset_(checked_cast<uint32_t>(offset)),
-      make_iterable_(make_iterable),
       reference_(ref) {
   DCHECK(allocator_);
   DCHECK_NE(0U, type_);
@@ -1210,6 +1212,14 @@
   // Relaxed operations are acceptable here because it's not protecting the
   // contents of the allocation in any way.
   Reference ref = reference_->load(std::memory_order_acquire);
+
+#if !BUILDFLAG(IS_NACL)
+  // TODO(crbug/1432981): Remove these. They are used to investigate unexpected
+  // failures.
+  bool ref_found = (ref != 0);
+  bool raced = false;
+#endif  // !BUILDFLAG(IS_NACL)
+
   if (!ref) {
     ref = allocator_->Allocate(size_, type_);
     if (!ref)
@@ -1219,12 +1229,9 @@
     // Use a "strong" exchange to ensure no false-negatives since the operation
     // cannot be retried.
     Reference existing = 0;  // Must be mutable; receives actual value.
-    if (reference_->compare_exchange_strong(existing, ref,
-                                            std::memory_order_release,
-                                            std::memory_order_relaxed)) {
-      if (make_iterable_)
-        allocator_->MakeIterable(ref);
-    } else {
+    if (!reference_->compare_exchange_strong(existing, ref,
+                                             std::memory_order_release,
+                                             std::memory_order_relaxed)) {
       // Failure indicates that something else has raced ahead, performed the
       // allocation, and stored its reference. Purge the allocation that was
       // just done and use the other one instead.
@@ -1232,11 +1239,27 @@
       DCHECK_LE(size_, allocator_->GetAllocSize(existing));
       allocator_->ChangeType(ref, 0, type_, /*clear=*/false);
       ref = existing;
+#if !BUILDFLAG(IS_NACL)
+      raced = true;
+#endif  // !BUILDFLAG(IS_NACL)
     }
   }
 
   char* mem = allocator_->GetAsArray<char>(ref, type_, size_);
   if (!mem) {
+#if !BUILDFLAG(IS_NACL)
+    // TODO(crbug/1432981): Remove these. They are used to investigate
+    // unexpected failures.
+    SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "full",
+                          allocator_->IsFull());
+    SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "corrupted",
+                          allocator_->IsCorrupt());
+    SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "ref", ref);
+    SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "ref_found", ref_found);
+    SCOPED_CRASH_KEY_BOOL("PersistentMemoryAllocator", "raced", raced);
+    SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "type_", type_);
+    SCOPED_CRASH_KEY_NUMBER("PersistentMemoryAllocator", "size_", size_);
+#endif  // !BUILDFLAG(IS_NACL)
     // This should never happen but be tolerant if it does as corruption from
     // the outside is something to guard against.
     NOTREACHED();
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
index bcc9736..f3e94cf 100644
--- a/base/metrics/persistent_memory_allocator.h
+++ b/base/metrics/persistent_memory_allocator.h
@@ -1,29 +1,32 @@
-// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
 #define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
 
+#include <stdint.h>
+
 #include <atomic>
 #include <memory>
 #include <type_traits>
 
 #include "base/atomicops.h"
 #include "base/base_export.h"
+#include "base/check.h"
+#include "base/check_op.h"
 #include "base/files/file_path.h"
 #include "base/gtest_prod_util.h"
-#include "base/macros.h"
+#include "base/memory/raw_ptr.h"
+#include "base/memory/raw_ptr_exclusion.h"
+#include "base/memory/shared_memory_mapping.h"
 #include "base/strings/string_piece.h"
-#include "starboard/types.h"
+#include "build/build_config.h"
 
 namespace base {
 
 class HistogramBase;
 class MemoryMappedFile;
-#if !defined(STARBOARD)
-class SharedMemory;
-#endif
 
 // Simple allocator for pieces of a memory block that may be persistent
 // to some storage or shared across multiple processes. This class resides
@@ -150,6 +153,11 @@
     Iterator(const PersistentMemoryAllocator* allocator,
              Reference starting_after);
 
+    Iterator(const Iterator&) = delete;
+    Iterator& operator=(const Iterator&) = delete;
+
+    ~Iterator();
+
     // Resets the iterator back to the beginning.
     void Reset();
 
@@ -218,15 +226,13 @@
 
    private:
     // Weak-pointer to memory allocator being iterated over.
-    const PersistentMemoryAllocator* allocator_;
+    raw_ptr<const PersistentMemoryAllocator> allocator_;
 
     // The last record that was returned.
     std::atomic<Reference> last_record_;
 
     // The number of records found; used for detecting loops.
     std::atomic<uint32_t> record_count_;
-
-    DISALLOW_COPY_AND_ASSIGN(Iterator);
   };
 
   // Returned information about the internal state of the heap.
@@ -287,6 +293,11 @@
   PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
                             uint64_t id, base::StringPiece name,
                             bool readonly);
+
+  PersistentMemoryAllocator(const PersistentMemoryAllocator&) = delete;
+  PersistentMemoryAllocator& operator=(const PersistentMemoryAllocator&) =
+      delete;
+
   virtual ~PersistentMemoryAllocator();
 
   // Check if memory segment is acceptable for creation of an Allocator. This
@@ -315,8 +326,8 @@
   // is done seperately from construction for situations such as when the
   // histograms will be backed by memory provided by this very allocator.
   //
-  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
-  // with the following histograms:
+  // IMPORTANT: tools/metrics/histograms/metadata/uma/histograms.xml must
+  // be updated with the following histograms for each |name| param:
   //    UMA.PersistentAllocator.name.Errors
   //    UMA.PersistentAllocator.name.UsedPct
   void CreateTrackingHistograms(base::StringPiece name);
@@ -610,7 +621,7 @@
   struct Memory {
     Memory(void* b, MemoryType t) : base(b), type(t) {}
 
-    void* base;
+    raw_ptr<void> base;
     MemoryType type;
   };
 
@@ -624,7 +635,11 @@
   // Implementation of Flush that accepts how much to flush.
   virtual void FlushPartial(size_t length, bool sync);
 
-  volatile char* const mem_base_;  // Memory base. (char so sizeof guaranteed 1)
+  // This field is not a raw_ptr<> because a pointer to stale non-PA allocation
+  // could be confused as a pointer to PA memory when that address space is
+  // reused. crbug.com/1173851 crbug.com/1169582
+  RAW_PTR_EXCLUSION volatile char* const
+      mem_base_;                   // Memory base. (char so sizeof guaranteed 1)
   const MemoryType mem_type_;      // Type of memory allocation.
   const uint32_t mem_size_;        // Size of entire memory segment.
   const uint32_t mem_page_;        // Page size allocations shouldn't cross.
@@ -633,7 +648,17 @@
  private:
   struct SharedMetadata;
   struct BlockHeader;
-  static const uint32_t kAllocAlignment;
+  // All allocations and data-structures must be aligned to this byte boundary.
+  // Alignment as large as the physical bus between CPU and RAM is _required_
+  // for some architectures, is simply more efficient on other CPUs, and
+  // generally a Good Idea(tm) for all platforms as it reduces/eliminates the
+  // chance that a type will span cache lines. Alignment mustn't be less
+  // than 8 to ensure proper alignment for all types. The rest is a balance
+  // between reducing spans across multiple cache lines and wasted space spent
+  // padding out allocations. An alignment of 16 would ensure that the block
+  // header structure always sits in a single cache line. An average of about
+  // 1/2 this value will be wasted with every allocation.
+  static constexpr size_t kAllocAlignment = 8;
   static const Reference kReferenceQueue;
 
   // The shared metadata is always located at the top of the memory segment.
@@ -651,24 +676,29 @@
   Reference AllocateImpl(size_t size, uint32_t type_id);
 
   // Get the block header associated with a specific reference.
-  const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
-                                       uint32_t size, bool queue_ok,
+  const volatile BlockHeader* GetBlock(Reference ref,
+                                       uint32_t type_id,
+                                       size_t size,
+                                       bool queue_ok,
                                        bool free_ok) const;
-  volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
-                                 bool queue_ok, bool free_ok) {
-      return const_cast<volatile BlockHeader*>(
-          const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
-              ref, type_id, size, queue_ok, free_ok));
+  volatile BlockHeader* GetBlock(Reference ref,
+                                 uint32_t type_id,
+                                 size_t size,
+                                 bool queue_ok,
+                                 bool free_ok) {
+    return const_cast<volatile BlockHeader*>(
+        const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
+            ref, type_id, size, queue_ok, free_ok));
   }
 
   // Get the actual data within a block associated with a specific reference.
-  const volatile void* GetBlockData(Reference ref, uint32_t type_id,
-                                    uint32_t size) const;
-  volatile void* GetBlockData(Reference ref, uint32_t type_id,
-                              uint32_t size) {
-      return const_cast<volatile void*>(
-          const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
-              ref, type_id, size));
+  const volatile void* GetBlockData(Reference ref,
+                                    uint32_t type_id,
+                                    size_t size) const;
+  volatile void* GetBlockData(Reference ref, uint32_t type_id, size_t size) {
+    return const_cast<volatile void*>(
+        const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
+            ref, type_id, size));
   }
 
   // Record an error in the internal histogram.
@@ -677,13 +707,12 @@
   const bool readonly_;                // Indicates access to read-only memory.
   mutable std::atomic<bool> corrupt_;  // Local version of "corrupted" flag.
 
-  HistogramBase* allocs_histogram_;  // Histogram recording allocs.
-  HistogramBase* used_histogram_;    // Histogram recording used space.
-  HistogramBase* errors_histogram_;  // Histogram recording errors.
+  raw_ptr<HistogramBase> allocs_histogram_;  // Histogram recording allocs.
+  raw_ptr<HistogramBase> used_histogram_;    // Histogram recording used space.
+  raw_ptr<HistogramBase> errors_histogram_;  // Histogram recording errors.
 
   friend class PersistentMemoryAllocatorTest;
   FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
-  DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
 };
 
 
@@ -696,50 +725,85 @@
  public:
   LocalPersistentMemoryAllocator(size_t size, uint64_t id,
                                  base::StringPiece name);
+
+  LocalPersistentMemoryAllocator(const LocalPersistentMemoryAllocator&) =
+      delete;
+  LocalPersistentMemoryAllocator& operator=(
+      const LocalPersistentMemoryAllocator&) = delete;
+
   ~LocalPersistentMemoryAllocator() override;
 
  private:
   // Allocates a block of local memory of the specified |size|, ensuring that
   // the memory will not be physically allocated until accessed and will read
   // as zero when that happens.
-  static Memory AllocateLocalMemory(size_t size);
+  static Memory AllocateLocalMemory(size_t size, base::StringPiece name);
 
   // Deallocates a block of local |memory| of the specified |size|.
   static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
-
-  DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
 };
 
+
 #if !defined(STARBOARD)
-// This allocator takes a shared-memory object and performs allocation from
-// it. The memory must be previously mapped via Map() or MapAt(). The allocator
-// takes ownership of the memory object.
-class BASE_EXPORT SharedPersistentMemoryAllocator
+// This allocator takes a writable shared memory mapping object and performs
+// allocation from it. The allocator takes ownership of the mapping object.
+class BASE_EXPORT WritableSharedPersistentMemoryAllocator
     : public PersistentMemoryAllocator {
  public:
-  SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory,
-                                  uint64_t id,
-                                  base::StringPiece name,
-                                  bool read_only);
-  ~SharedPersistentMemoryAllocator() override;
+  WritableSharedPersistentMemoryAllocator(
+      base::WritableSharedMemoryMapping memory,
+      uint64_t id,
+      base::StringPiece name);
 
-  SharedMemory* shared_memory() { return shared_memory_.get(); }
+  WritableSharedPersistentMemoryAllocator(
+      const WritableSharedPersistentMemoryAllocator&) = delete;
+  WritableSharedPersistentMemoryAllocator& operator=(
+      const WritableSharedPersistentMemoryAllocator&) = delete;
+
+  ~WritableSharedPersistentMemoryAllocator() override;
 
   // Ensure that the memory isn't so invalid that it would crash when passing it
   // to the allocator. This doesn't guarantee the data is valid, just that it
   // won't cause the program to abort. The existing IsCorrupt() call will handle
   // the rest.
-  static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
+  static bool IsSharedMemoryAcceptable(
+      const base::WritableSharedMemoryMapping& memory);
 
  private:
-  std::unique_ptr<SharedMemory> shared_memory_;
+  base::WritableSharedMemoryMapping shared_memory_;
+};
 
-  DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
+// This allocator takes a read-only shared memory mapping object and performs
+// allocation from it. The allocator takes ownership of the mapping object.
+class BASE_EXPORT ReadOnlySharedPersistentMemoryAllocator
+    : public PersistentMemoryAllocator {
+ public:
+  ReadOnlySharedPersistentMemoryAllocator(
+      base::ReadOnlySharedMemoryMapping memory,
+      uint64_t id,
+      base::StringPiece name);
+
+  ReadOnlySharedPersistentMemoryAllocator(
+      const ReadOnlySharedPersistentMemoryAllocator&) = delete;
+  ReadOnlySharedPersistentMemoryAllocator& operator=(
+      const ReadOnlySharedPersistentMemoryAllocator&) = delete;
+
+  ~ReadOnlySharedPersistentMemoryAllocator() override;
+
+  // Ensure that the memory isn't so invalid that it would crash when passing it
+  // to the allocator. This doesn't guarantee the data is valid, just that it
+  // won't cause the program to abort. The existing IsCorrupt() call will handle
+  // the rest.
+  static bool IsSharedMemoryAcceptable(
+      const base::ReadOnlySharedMemoryMapping& memory);
+
+ private:
+  base::ReadOnlySharedMemoryMapping shared_memory_;
 };
 #endif  // !defined(STARBOARD)
 
 // NACL doesn't support any kind of file access in build.
-#if !defined(OS_NACL) || !defined(STARBOARD)
+#if !BUILDFLAG(IS_NACL)
 // This allocator takes a memory-mapped file object and performs allocation
 // from it. The allocator takes ownership of the file object.
 class BASE_EXPORT FilePersistentMemoryAllocator
@@ -753,6 +817,11 @@
                                 uint64_t id,
                                 base::StringPiece name,
                                 bool read_only);
+
+  FilePersistentMemoryAllocator(const FilePersistentMemoryAllocator&) = delete;
+  FilePersistentMemoryAllocator& operator=(
+      const FilePersistentMemoryAllocator&) = delete;
+
   ~FilePersistentMemoryAllocator() override;
 
   // Ensure that the file isn't so invalid that it would crash when passing it
@@ -775,10 +844,8 @@
 
  private:
   std::unique_ptr<MemoryMappedFile> mapped_file_;
-
-  DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
 };
-#endif  // !defined(OS_NACL)
+#endif  // !BUILDFLAG(IS_NACL)
 
 // An allocation that is defined but not executed until required at a later
 // time. This allows for potential users of an allocation to be decoupled
@@ -799,40 +866,18 @@
   // offset into the segment; this allows combining allocations into a
   // single persistent segment to reduce overhead and means an "all or
   // nothing" request. Note that |size| is always the total memory size
-  // and |offset| is just indicating the start of a block within it.  If
-  // |make_iterable| was true, the allocation will made iterable when it
-  // is created; already existing allocations are not changed.
+  // and |offset| is just indicating the start of a block within it.
   //
   // Once allocated, a reference to the segment will be stored at |ref|.
   // This shared location must be initialized to zero (0); it is checked
   // with every Get() request to see if the allocation has already been
   // done. If reading |ref| outside of this object, be sure to do an
   // "acquire" load. Don't write to it -- leave that to this object.
-  //
-  // For convenience, methods taking both Atomic32 and std::atomic<Reference>
-  // are defined.
-  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
-                              subtle::Atomic32* ref,
-                              uint32_t type,
-                              size_t size,
-                              bool make_iterable);
-  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
-                              subtle::Atomic32* ref,
-                              uint32_t type,
-                              size_t size,
-                              size_t offset,
-                              bool make_iterable);
   DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
                               std::atomic<Reference>* ref,
                               uint32_t type,
                               size_t size,
-                              bool make_iterable);
-  DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
-                              std::atomic<Reference>* ref,
-                              uint32_t type,
-                              size_t size,
-                              size_t offset,
-                              bool make_iterable);
+                              size_t offset = 0);
   ~DelayedPersistentAllocation();
 
   // Gets a pointer to the defined allocation. This will realize the request
@@ -857,7 +902,7 @@
   // The underlying object that does the actual allocation of memory. Its
   // lifetime must exceed that of all DelayedPersistentAllocation objects
   // that use it.
-  PersistentMemoryAllocator* const allocator_;
+  const raw_ptr<PersistentMemoryAllocator> allocator_;
 
   // The desired type and size of the allocated segment plus the offset
   // within it for the defined request.
@@ -865,14 +910,11 @@
   const uint32_t size_;
   const uint32_t offset_;
 
-  // Flag indicating if allocation should be made iterable when done.
-  const bool make_iterable_;
-
   // The location at which a reference to the allocated segment is to be
   // stored once the allocation is complete. If multiple delayed allocations
   // share the same pointer then an allocation on one will amount to an
   // allocation for all.
-  volatile std::atomic<Reference>* const reference_;
+  const raw_ptr<volatile std::atomic<Reference>> reference_;
 
   // No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects.
 };
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
index 2a5642e..4d66a1c 100644
--- a/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -10,7 +10,10 @@
 #include "base/files/file_util.h"
 #include "base/files/memory_mapped_file.h"
 #include "base/files/scoped_temp_dir.h"
-#include "base/memory/shared_memory.h"
+#include "base/memory/raw_ptr.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/writable_shared_memory_region.h"
 #include "base/metrics/histogram.h"
 #include "base/rand_util.h"
 #include "base/strings/safe_sprintf.h"
@@ -18,6 +21,7 @@
 #include "base/synchronization/condition_variable.h"
 #include "base/synchronization/lock.h"
 #include "base/threading/simple_thread.h"
+#include "build/build_config.h"
 #include "testing/gmock/include/gmock/gmock.h"
 
 namespace base {
@@ -77,9 +81,9 @@
   void SetUp() override {
     allocator_.reset();
     ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
-    allocator_.reset(new PersistentMemoryAllocator(
-        mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE,
-        TEST_ID, TEST_NAME, false));
+    allocator_ = std::make_unique<PersistentMemoryAllocator>(
+        mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, TEST_ID,
+        TEST_NAME, false);
   }
 
   void TearDown() override {
@@ -380,6 +384,9 @@
         count_(0),
         wake_up_(wake_up) {}
 
+  CounterThread(const CounterThread&) = delete;
+  CounterThread& operator=(const CounterThread&) = delete;
+
   void Run() override {
     // Wait so all threads can start at approximately the same time.
     // Best performance comes from releasing a single worker which then
@@ -408,13 +415,11 @@
   unsigned count() { return count_; }
 
  private:
-  PersistentMemoryAllocator::Iterator* iterator_;
-  Lock* lock_;
-  ConditionVariable* condition_;
+  raw_ptr<PersistentMemoryAllocator::Iterator> iterator_;
+  raw_ptr<Lock> lock_;
+  raw_ptr<ConditionVariable> condition_;
   unsigned count_;
-  bool* wake_up_;
-
-  DISALLOW_COPY_AND_ASSIGN(CounterThread);
+  raw_ptr<bool> wake_up_;
 };
 
 // Ensure that parallel iteration returns the same number of objects as
@@ -489,9 +494,9 @@
   std::atomic<Reference> ref1, ref2;
   ref1.store(0, std::memory_order_relaxed);
   ref2.store(0, std::memory_order_relaxed);
-  DelayedPersistentAllocation da1(allocator_.get(), &ref1, 1001, 100, true);
-  DelayedPersistentAllocation da2a(allocator_.get(), &ref2, 2002, 200, 0, true);
-  DelayedPersistentAllocation da2b(allocator_.get(), &ref2, 2002, 200, 5, true);
+  DelayedPersistentAllocation da1(allocator_.get(), &ref1, 1001, 100);
+  DelayedPersistentAllocation da2a(allocator_.get(), &ref2, 2002, 200, 0);
+  DelayedPersistentAllocation da2b(allocator_.get(), &ref2, 2002, 200, 5);
 
   // Nothing should yet have been allocated.
   uint32_t type;
@@ -505,6 +510,7 @@
   EXPECT_NE(0U, da1.reference());
   EXPECT_EQ(allocator_->GetAsReference(mem1, 1001),
             ref1.load(std::memory_order_relaxed));
+  allocator_->MakeIterable(da1.reference());
   EXPECT_NE(0U, iter.GetNext(&type));
   EXPECT_EQ(1001U, type);
   EXPECT_EQ(0U, iter.GetNext(&type));
@@ -514,6 +520,7 @@
   ASSERT_TRUE(mem2a);
   EXPECT_EQ(allocator_->GetAsReference(mem2a, 2002),
             ref2.load(std::memory_order_relaxed));
+  allocator_->MakeIterable(da2a.reference());
   EXPECT_NE(0U, iter.GetNext(&type));
   EXPECT_EQ(2002U, type);
   EXPECT_EQ(0U, iter.GetNext(&type));
@@ -521,6 +528,7 @@
   // Third allocation should just return offset into second allocation.
   void* mem2b = da2b.Get();
   ASSERT_TRUE(mem2b);
+  allocator_->MakeIterable(da2b.reference());
   EXPECT_EQ(0U, iter.GetNext(&type));
   EXPECT_EQ(reinterpret_cast<uintptr_t>(mem2a) + 5,
             reinterpret_cast<uintptr_t>(mem2b));
@@ -608,22 +616,21 @@
   EXPECT_FALSE(allocator.IsCorrupt());
 }
 
-
-//----- SharedPersistentMemoryAllocator ----------------------------------------
-
+//----- {Writable,ReadOnly}SharedPersistentMemoryAllocator ---------------------
 #if !defined(STARBOARD)
 
 TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
-  SharedMemoryHandle shared_handle_1;
-  SharedMemoryHandle shared_handle_2;
+  base::WritableSharedMemoryRegion rw_region =
+      base::WritableSharedMemoryRegion::Create(TEST_MEMORY_SIZE);
+  ASSERT_TRUE(rw_region.IsValid());
 
   PersistentMemoryAllocator::MemoryInfo meminfo1;
   Reference r123, r456, r789;
   {
-    std::unique_ptr<SharedMemory> shmem1(new SharedMemory());
-    ASSERT_TRUE(shmem1->CreateAndMapAnonymous(TEST_MEMORY_SIZE));
-    SharedPersistentMemoryAllocator local(std::move(shmem1), TEST_ID, "",
-                                          false);
+    base::WritableSharedMemoryMapping mapping = rw_region.Map();
+    ASSERT_TRUE(mapping.IsValid());
+    WritableSharedPersistentMemoryAllocator local(std::move(mapping), TEST_ID,
+                                                  "");
     EXPECT_FALSE(local.IsReadonly());
     r123 = local.Allocate(123, 123);
     r456 = local.Allocate(456, 456);
@@ -634,19 +641,20 @@
     local.GetMemoryInfo(&meminfo1);
     EXPECT_FALSE(local.IsFull());
     EXPECT_FALSE(local.IsCorrupt());
-
-    shared_handle_1 = local.shared_memory()->handle().Duplicate();
-    ASSERT_TRUE(shared_handle_1.IsValid());
-    shared_handle_2 = local.shared_memory()->handle().Duplicate();
-    ASSERT_TRUE(shared_handle_2.IsValid());
   }
 
-  // Read-only test.
-  std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle_1,
-                                                        /*readonly=*/true));
-  ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
+  // Create writable and read-only mappings of the same region.
+  base::WritableSharedMemoryMapping rw_mapping = rw_region.Map();
+  ASSERT_TRUE(rw_mapping.IsValid());
+  base::ReadOnlySharedMemoryRegion ro_region =
+      base::WritableSharedMemoryRegion::ConvertToReadOnly(std::move(rw_region));
+  ASSERT_TRUE(ro_region.IsValid());
+  base::ReadOnlySharedMemoryMapping ro_mapping = ro_region.Map();
+  ASSERT_TRUE(ro_mapping.IsValid());
 
-  SharedPersistentMemoryAllocator shalloc2(std::move(shmem2), 0, "", true);
+  // Read-only test.
+  ReadOnlySharedPersistentMemoryAllocator shalloc2(std::move(ro_mapping), 0,
+                                                   "");
   EXPECT_TRUE(shalloc2.IsReadonly());
   EXPECT_EQ(TEST_ID, shalloc2.Id());
   EXPECT_FALSE(shalloc2.IsFull());
@@ -668,11 +676,8 @@
   EXPECT_EQ(meminfo1.free, meminfo2.free);
 
   // Read/write test.
-  std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle_2,
-                                                        /*readonly=*/false));
-  ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
-
-  SharedPersistentMemoryAllocator shalloc3(std::move(shmem3), 0, "", false);
+  WritableSharedPersistentMemoryAllocator shalloc3(std::move(rw_mapping), 0,
+                                                   "");
   EXPECT_FALSE(shalloc3.IsReadonly());
   EXPECT_EQ(TEST_ID, shalloc3.Id());
   EXPECT_FALSE(shalloc3.IsFull());
@@ -718,10 +723,10 @@
   EXPECT_EQ(0, data[2]);
   EXPECT_EQ(0, data[3]);
 }
-
 #endif  // !defined(STARBOARD)
 
-#if !defined(OS_NACL) && !defined(STARBOARD)
+// TODO: b/316198056 - Re-enable this test once base/net have been updated.
+#if !BUILDFLAG(IS_NACL) && !defined(STARBOARD)
 //----- FilePersistentMemoryAllocator ------------------------------------------
 
 TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
@@ -871,7 +876,7 @@
     const MemoryMappedFile::Access map_access =
         read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
 
-    mmfile.reset(new MemoryMappedFile());
+    mmfile = std::make_unique<MemoryMappedFile>();
     ASSERT_TRUE(mmfile->Initialize(File(file_path, file_flags), map_access));
     EXPECT_EQ(filesize, mmfile->length());
     if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
@@ -913,7 +918,7 @@
     }
     ASSERT_TRUE(PathExists(file_path));
 
-    mmfile.reset(new MemoryMappedFile());
+    mmfile = std::make_unique<MemoryMappedFile>();
     ASSERT_TRUE(mmfile->Initialize(File(file_path, file_flags), map_access));
     EXPECT_EQ(filesize, mmfile->length());
     if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
@@ -999,6 +1004,6 @@
   }
 }
 
-#endif  // !defined(OS_NACL)
+#endif  // !BUILDFLAG(IS_NACL)
 
 }  // namespace base
diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
index e07b716..de75122 100644
--- a/base/metrics/persistent_sample_map.cc
+++ b/base/metrics/persistent_sample_map.cc
@@ -1,14 +1,17 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/persistent_sample_map.h"
 
-#include "base/logging.h"
+#include "base/atomicops.h"
+#include "base/check_op.h"
+#include "base/containers/contains.h"
+#include "base/debug/crash_logging.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/persistent_histogram_allocator.h"
+#include "base/notreached.h"
 #include "base/numerics/safe_conversions.h"
-#include "base/stl_util.h"
 
 namespace base {
 
@@ -18,65 +21,84 @@
 namespace {
 
 // An iterator for going through a PersistentSampleMap. The logic here is
-// identical to that of SampleMapIterator but with different data structures.
-// Changes here likely need to be duplicated there.
-class PersistentSampleMapIterator : public SampleCountIterator {
+// identical to that of the iterator for SampleMap but with different data
+// structures. Changes here likely need to be duplicated there.
+template <typename T, typename I>
+class IteratorTemplate : public SampleCountIterator {
  public:
-  typedef std::map<HistogramBase::Sample, HistogramBase::Count*>
-      SampleToCountMap;
+  explicit IteratorTemplate(T& sample_counts)
+      : iter_(sample_counts.begin()), end_(sample_counts.end()) {
+    SkipEmptyBuckets();
+  }
 
-  explicit PersistentSampleMapIterator(const SampleToCountMap& sample_counts);
-  ~PersistentSampleMapIterator() override;
+  ~IteratorTemplate() override;
 
   // SampleCountIterator:
-  bool Done() const override;
-  void Next() override;
+  bool Done() const override { return iter_ == end_; }
+  void Next() override {
+    DCHECK(!Done());
+    ++iter_;
+    SkipEmptyBuckets();
+  }
   void Get(HistogramBase::Sample* min,
            int64_t* max,
-           HistogramBase::Count* count) const override;
+           HistogramBase::Count* count) override;
 
  private:
-  void SkipEmptyBuckets();
+  void SkipEmptyBuckets() {
+    while (!Done() && subtle::NoBarrier_Load(iter_->second) == 0) {
+      ++iter_;
+    }
+  }
 
-  SampleToCountMap::const_iterator iter_;
-  const SampleToCountMap::const_iterator end_;
+  I iter_;
+  const I end_;
 };
 
-PersistentSampleMapIterator::PersistentSampleMapIterator(
-    const SampleToCountMap& sample_counts)
-    : iter_(sample_counts.begin()),
-      end_(sample_counts.end()) {
-  SkipEmptyBuckets();
-}
+typedef std::map<HistogramBase::Sample, HistogramBase::Count*> SampleToCountMap;
+typedef IteratorTemplate<const SampleToCountMap,
+                         SampleToCountMap::const_iterator>
+    PersistentSampleMapIterator;
 
-PersistentSampleMapIterator::~PersistentSampleMapIterator() = default;
+template <>
+PersistentSampleMapIterator::~IteratorTemplate() = default;
 
-bool PersistentSampleMapIterator::Done() const {
-  return iter_ == end_;
-}
-
-void PersistentSampleMapIterator::Next() {
+// Get() for an iterator of a PersistentSampleMap.
+template <>
+void PersistentSampleMapIterator::Get(Sample* min, int64_t* max, Count* count) {
   DCHECK(!Done());
-  ++iter_;
-  SkipEmptyBuckets();
+  *min = iter_->first;
+  *max = strict_cast<int64_t>(iter_->first) + 1;
+  // We have to do the following atomically, because even if the caller is using
+  // a lock, a separate process (that is not aware of this lock) may
+  // concurrently modify the value (note that iter_->second is a pointer to a
+  // sample count, which may live in shared memory).
+  *count = subtle::NoBarrier_Load(iter_->second);
 }
 
-void PersistentSampleMapIterator::Get(Sample* min,
-                                      int64_t* max,
-                                      Count* count) const {
+typedef IteratorTemplate<SampleToCountMap, SampleToCountMap::iterator>
+    ExtractingPersistentSampleMapIterator;
+
+template <>
+ExtractingPersistentSampleMapIterator::~IteratorTemplate() {
+  // Ensure that the user has consumed all the samples in order to ensure no
+  // samples are lost.
+  DCHECK(Done());
+}
+
+// Get() for an extracting iterator of a PersistentSampleMap.
+template <>
+void ExtractingPersistentSampleMapIterator::Get(Sample* min,
+                                                int64_t* max,
+                                                Count* count) {
   DCHECK(!Done());
-  if (min)
-    *min = iter_->first;
-  if (max)
-    *max = strict_cast<int64_t>(iter_->first) + 1;
-  if (count)
-    *count = *iter_->second;
-}
-
-void PersistentSampleMapIterator::SkipEmptyBuckets() {
-  while (!Done() && *iter_->second == 0) {
-    ++iter_;
-  }
+  *min = iter_->first;
+  *max = strict_cast<int64_t>(iter_->first) + 1;
+  // We have to do the following atomically, because even if the caller is using
+  // a lock, a separate process (that is not aware of this lock) may
+  // concurrently modify the value (note that iter_->second is a pointer to a
+  // sample count, which may live in shared memory).
+  *count = subtle::NoBarrier_AtomicExchange(iter_->second, 0);
 }
 
 // This structure holds an entry for a PersistentSampleMap within a persistent
@@ -108,24 +130,11 @@
 }
 
 void PersistentSampleMap::Accumulate(Sample value, Count count) {
-#if 0  // TODO(bcwhite) Re-enable efficient version after crbug.com/682680.
-  *GetOrCreateSampleCountStorage(value) += count;
-#else
-  Count* local_count_ptr = GetOrCreateSampleCountStorage(value);
-  if (count < 0) {
-    if (*local_count_ptr < -count)
-      RecordNegativeSample(SAMPLES_ACCUMULATE_WENT_NEGATIVE, -count);
-    else
-      RecordNegativeSample(SAMPLES_ACCUMULATE_NEGATIVE_COUNT, -count);
-    *local_count_ptr += count;
-  } else {
-    Sample old_value = *local_count_ptr;
-    Sample new_value = old_value + count;
-    *local_count_ptr = new_value;
-    if ((new_value >= 0) != (old_value >= 0))
-      RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count);
-  }
-#endif
+  // We have to do the following atomically, because even if the caller is using
+  // a lock, a separate process (that is not aware of this lock) may
+  // concurrently modify the value.
+  subtle::NoBarrier_AtomicIncrement(GetOrCreateSampleCountStorage(value),
+                                    count);
   IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
 }
 
@@ -134,7 +143,7 @@
   // being able to know what value to return.
   Count* count_pointer =
       const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
-  return count_pointer ? *count_pointer : 0;
+  return count_pointer ? subtle::NoBarrier_Load(count_pointer) : 0;
 }
 
 Count PersistentSampleMap::TotalCount() const {
@@ -144,7 +153,7 @@
 
   Count count = 0;
   for (const auto& entry : sample_counts_) {
-    count += *entry.second;
+    count += subtle::NoBarrier_Load(entry.second);
   }
   return count;
 }
@@ -156,6 +165,14 @@
   return std::make_unique<PersistentSampleMapIterator>(sample_counts_);
 }
 
+std::unique_ptr<SampleCountIterator> PersistentSampleMap::ExtractingIterator() {
+  // Make sure all samples have been loaded before trying to iterate over the
+  // map.
+  ImportSamples(-1, true);
+  return std::make_unique<ExtractingPersistentSampleMapIterator>(
+      sample_counts_);
+}
+
 // static
 PersistentMemoryAllocator::Reference
 PersistentSampleMap::GetNextPersistentRecord(
@@ -177,6 +194,13 @@
     Sample value) {
   SampleRecord* record = allocator->New<SampleRecord>();
   if (!record) {
+#if !BUILDFLAG(IS_NACL)
+    // TODO(crbug/1432981): Remove these. They are used to investigate
+    // unexpected failures.
+    SCOPED_CRASH_KEY_BOOL("PersistentSampleMap", "full", allocator->IsFull());
+    SCOPED_CRASH_KEY_BOOL("PersistentSampleMap", "corrupted",
+                          allocator->IsCorrupt());
+#endif  // !BUILDFLAG(IS_NACL)
     NOTREACHED() << "full=" << allocator->IsFull()
                  << ", corrupt=" << allocator->IsCorrupt();
     return 0;
@@ -202,8 +226,13 @@
       continue;
     if (strict_cast<int64_t>(min) + 1 != max)
       return false;  // SparseHistogram only supports bucket with size 1.
-    *GetOrCreateSampleCountStorage(min) +=
-        (op == HistogramSamples::ADD) ? count : -count;
+
+    // We have to do the following atomically, because even if the caller is
+    // using a lock, a separate process (that is not aware of this lock) may
+    // concurrently modify the value.
+    subtle::Barrier_AtomicIncrement(
+        GetOrCreateSampleCountStorage(min),
+        (op == HistogramSamples::ADD) ? count : -count);
   }
   return true;
 }
@@ -276,7 +305,7 @@
     DCHECK_EQ(id(), record->id);
 
     // Check if the record's value is already known.
-    if (!ContainsKey(sample_counts_, record->value)) {
+    if (!Contains(sample_counts_, record->value)) {
       // No: Add it to map of known values.
       sample_counts_[record->value] = &record->count;
     } else {
diff --git a/base/metrics/persistent_sample_map.h b/base/metrics/persistent_sample_map.h
index a3b689f..21afc90 100644
--- a/base/metrics/persistent_sample_map.h
+++ b/base/metrics/persistent_sample_map.h
@@ -1,4 +1,4 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -9,15 +9,17 @@
 #ifndef BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
 #define BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
 
+#include <stdint.h>
+
 #include <map>
 #include <memory>
 
+#include "base/base_export.h"
 #include "base/compiler_specific.h"
-#include "base/macros.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/persistent_memory_allocator.h"
-#include "starboard/types.h"
 
 namespace base {
 
@@ -34,6 +36,9 @@
                       PersistentHistogramAllocator* allocator,
                       Metadata* meta);
 
+  PersistentSampleMap(const PersistentSampleMap&) = delete;
+  PersistentSampleMap& operator=(const PersistentSampleMap&) = delete;
+
   ~PersistentSampleMap() override;
 
   // HistogramSamples:
@@ -42,6 +47,7 @@
   HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
   HistogramBase::Count TotalCount() const override;
   std::unique_ptr<SampleCountIterator> Iterator() const override;
+  std::unique_ptr<SampleCountIterator> ExtractingIterator() override;
 
   // Uses a persistent-memory |iterator| to locate and return information about
   // the next record holding information for a PersistentSampleMap. The record
@@ -92,15 +98,13 @@
 
   // The allocator that manages histograms inside persistent memory. This is
   // owned externally and is expected to live beyond the life of this object.
-  PersistentHistogramAllocator* allocator_;
+  raw_ptr<PersistentHistogramAllocator> allocator_;
 
   // The object that manages sample records inside persistent memory. This is
   // owned by the |allocator_| object (above) and so, like it, is expected to
   // live beyond the life of this object. This value is lazily-initialized on
   // first use via the GetRecords() accessor method.
-  PersistentSampleMapRecords* records_ = nullptr;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentSampleMap);
+  raw_ptr<PersistentSampleMapRecords> records_ = nullptr;
 };
 
 }  // namespace base
diff --git a/base/metrics/persistent_sample_map_unittest.cc b/base/metrics/persistent_sample_map_unittest.cc
index b25f582..80d85cd 100644
--- a/base/metrics/persistent_sample_map_unittest.cc
+++ b/base/metrics/persistent_sample_map_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/metrics/ranges_manager.cc b/base/metrics/ranges_manager.cc
new file mode 100644
index 0000000..caea82b
--- /dev/null
+++ b/base/metrics/ranges_manager.cc
@@ -0,0 +1,62 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/ranges_manager.h"
+
+namespace base {
+
+RangesManager::RangesManager() = default;
+
+RangesManager::~RangesManager() {
+  if (!do_not_release_ranges_on_destroy_for_testing_)
+    ReleaseBucketRanges();
+}
+
+size_t RangesManager::BucketRangesHash::operator()(
+    const BucketRanges* const a) const {
+  return a->checksum();
+}
+
+bool RangesManager::BucketRangesEqual::operator()(
+    const BucketRanges* const a,
+    const BucketRanges* const b) const {
+  return a->Equals(b);
+}
+
+const BucketRanges* RangesManager::RegisterOrDeleteDuplicateRanges(
+    const BucketRanges* ranges) {
+  DCHECK(ranges->HasValidChecksum());
+
+  // Attempt to insert |ranges| into the set of registered BucketRanges. If an
+  // equivalent one already exists (one with the exact same ranges), this
+  // fetches the pre-existing one and does not insert the passed |ranges|.
+  const BucketRanges* const registered = *ranges_.insert(ranges).first;
+
+  // If there is already a registered equivalent BucketRanges, delete the passed
+  // |ranges|.
+  if (registered != ranges)
+    delete ranges;
+
+  return registered;
+}
+
+std::vector<const BucketRanges*> RangesManager::GetBucketRanges() {
+  std::vector<const BucketRanges*> out;
+  out.reserve(ranges_.size());
+  out.assign(ranges_.begin(), ranges_.end());
+  return out;
+}
+
+void RangesManager::ReleaseBucketRanges() {
+  for (auto* range : ranges_) {
+    delete range;
+  }
+  ranges_.clear();
+}
+
+void RangesManager::DoNotReleaseRangesOnDestroyForTesting() {
+  do_not_release_ranges_on_destroy_for_testing_ = true;
+}
+
+}  // namespace base
diff --git a/base/metrics/ranges_manager.h b/base/metrics/ranges_manager.h
new file mode 100644
index 0000000..834ef8f
--- /dev/null
+++ b/base/metrics/ranges_manager.h
@@ -0,0 +1,79 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_RANGES_MANAGER_H_
+#define BASE_METRICS_RANGES_MANAGER_H_
+
+#include <unordered_set>
+#include "base/base_export.h"
+#include "base/metrics/bucket_ranges.h"
+
+namespace base {
+
+// Manages BucketRanges and their lifetime. When registering a BucketRanges
+// to a RangesManager instance, if an equivalent one already exists (one with
+// the exact same ranges), the passed BucketRanges is deleted. This is useful to
+// prevent duplicate instances of equivalent BucketRanges. Upon the destruction
+// of a RangesManager instance, all BucketRanges managed by it are destroyed. A
+// BucketRanges instance should not be registered to multiple RangesManagers.
+class BASE_EXPORT RangesManager {
+ public:
+  RangesManager();
+
+  RangesManager(const RangesManager&) = delete;
+  RangesManager& operator=(const RangesManager&) = delete;
+
+  ~RangesManager();
+
+  // Registers a BucketRanges. If an equivalent BucketRanges is already
+  // registered, then the argument |ranges| will be deleted. The returned value
+  // is always the registered BucketRanges (either the argument, or the
+  // pre-existing one). Registering a BucketRanges passes the ownership, and
+  // will be released when the RangesManager is released.
+  const BucketRanges* RegisterOrDeleteDuplicateRanges(
+      const BucketRanges* ranges);
+
+  // Gets all registered BucketRanges. The order of returned BucketRanges is not
+  // guaranteed.
+  std::vector<const BucketRanges*> GetBucketRanges();
+
+  // Some tests may instantiate temporary StatisticsRecorders, each having their
+  // own RangesManager. During the tests, ranges may get registered with a
+  // recorder that later gets released, which would release the ranges as well.
+  // Calling this method prevents this, as the tests may not expect them to be
+  // deleted.
+  void DoNotReleaseRangesOnDestroyForTesting();
+
+ private:
+  // Removes all registered BucketRanges and destroys them. This is called in
+  // the destructor.
+  void ReleaseBucketRanges();
+
+  // Used to get the hash of a BucketRanges, which is simply its checksum.
+  struct BucketRangesHash {
+    size_t operator()(const BucketRanges* a) const;
+  };
+
+  // Comparator for BucketRanges. See `BucketRanges::Equals()`.
+  struct BucketRangesEqual {
+    bool operator()(const BucketRanges* a, const BucketRanges* b) const;
+  };
+
+  // Type for a set of unique RangesBucket, with their hash and equivalence
+  // defined by `BucketRangesHash` and `BucketRangesEqual`.
+  typedef std::
+      unordered_set<const BucketRanges*, BucketRangesHash, BucketRangesEqual>
+          RangesMap;
+
+  // The set of unique BucketRanges registered to the RangesManager.
+  RangesMap ranges_;
+
+  // Whether or not to release the registered BucketRanges when this
+  // RangesManager is destroyed. See `DoNotReleaseRangesOnDestroyForTesting()`.
+  bool do_not_release_ranges_on_destroy_for_testing_ = false;
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_RANGES_MANAGER_H_
diff --git a/base/metrics/ranges_manager_unittest.cc b/base/metrics/ranges_manager_unittest.cc
new file mode 100644
index 0000000..e0906d9
--- /dev/null
+++ b/base/metrics/ranges_manager_unittest.cc
@@ -0,0 +1,81 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/ranges_manager.h"
+
+#include <vector>
+
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+using testing::UnorderedElementsAre;
+
+TEST(RangesManagerTest, RegisterBucketRanges) {
+  RangesManager ranges_manager;
+
+  // Create some BucketRanges. We call |ResetChecksum| to calculate and set
+  // their checksums. Checksums are used to validate integrity (and test for
+  // non-equivalence) and should be computed after a BucketRanges is fully
+  // initialized. Note that BucketRanges are initialized with 0 for all ranges,
+  // i.e., all buckets will be [0, 0).
+  BucketRanges* ranges1 = new BucketRanges(3);
+  ranges1->ResetChecksum();
+  BucketRanges* ranges2 = new BucketRanges(4);
+  ranges2->ResetChecksum();
+
+  // Register new ranges.
+  EXPECT_EQ(ranges1, ranges_manager.RegisterOrDeleteDuplicateRanges(ranges1));
+  EXPECT_EQ(ranges2, ranges_manager.RegisterOrDeleteDuplicateRanges(ranges2));
+  EXPECT_THAT(ranges_manager.GetBucketRanges(),
+              UnorderedElementsAre(ranges1, ranges2));
+
+  // Register |ranges1| again. The registered BucketRanges set should not change
+  // as |ranges1| is already registered.
+  EXPECT_EQ(ranges1, ranges_manager.RegisterOrDeleteDuplicateRanges(ranges1));
+  EXPECT_THAT(ranges_manager.GetBucketRanges(),
+              UnorderedElementsAre(ranges1, ranges2));
+
+  // Make sure |ranges1| still exists, and is the same as what we expect (all
+  // ranges are 0).
+  ASSERT_EQ(3u, ranges1->size());
+  EXPECT_EQ(0, ranges1->range(0));
+  EXPECT_EQ(0, ranges1->range(1));
+  EXPECT_EQ(0, ranges1->range(2));
+
+  // Register a new |ranges3| that is equivalent to |ranges1| (same ranges). We
+  // expect that |ranges3| is deleted (verified by LeakSanitizer bots) and that
+  // |ranges1| is returned by |RegisterOrDeleteDuplicateRanges|.
+  BucketRanges* ranges3 = new BucketRanges(3);
+  ranges3->ResetChecksum();
+  EXPECT_EQ(ranges1, ranges_manager.RegisterOrDeleteDuplicateRanges(ranges3));
+  EXPECT_THAT(ranges_manager.GetBucketRanges(),
+              UnorderedElementsAre(ranges1, ranges2));
+}
+
+TEST(RangesManagerTest, ReleaseBucketRangesOnDestroy) {
+  std::unique_ptr<RangesManager> ranges_manager =
+      std::make_unique<RangesManager>();
+
+  // Create a BucketRanges. We call |ResetChecksum| to calculate and set its
+  // checksum. Checksums are used to validate integrity (and test for
+  // non-equivalence) and should be computed after a BucketRanges is fully
+  // initialized. Note that BucketRanges are initialized with 0 for all ranges,
+  // i.e., all buckets will be [0, 0).
+  BucketRanges* ranges = new BucketRanges(1);
+  ranges->ResetChecksum();
+
+  // Register new range.
+  EXPECT_EQ(ranges, ranges_manager->RegisterOrDeleteDuplicateRanges(ranges));
+  EXPECT_THAT(ranges_manager->GetBucketRanges(), UnorderedElementsAre(ranges));
+
+  // Explicitly destroy |ranges_manager|.
+  ranges_manager.reset();
+
+  // LeakSanitizer (lsan) bots will verify that |ranges| will be properly
+  // released after destroying |ranges_manager|.
+}
+
+}  // namespace base
diff --git a/base/metrics/record_histogram_checker.h b/base/metrics/record_histogram_checker.h
index 3a002bf..0e21ba2 100644
--- a/base/metrics/record_histogram_checker.h
+++ b/base/metrics/record_histogram_checker.h
@@ -1,12 +1,13 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_
 #define BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_
 
+#include <stdint.h>
+
 #include "base/base_export.h"
-#include "starboard/types.h"
 
 namespace base {
 
@@ -18,7 +19,8 @@
 
   // Returns true iff the given histogram should be recorded.
   // This method may be called on any thread, so it should not mutate any state.
-  virtual bool ShouldRecord(uint64_t histogram_hash) const = 0;
+  // |histogram_hash| corresponds to the result of HashMetricNameAs32Bits().
+  virtual bool ShouldRecord(uint32_t histogram_hash) const = 0;
 };
 
 }  // namespace base
diff --git a/base/metrics/sample_map.cc b/base/metrics/sample_map.cc
index f925238..f0eed32 100644
--- a/base/metrics/sample_map.cc
+++ b/base/metrics/sample_map.cc
@@ -1,12 +1,11 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/sample_map.h"
 
-#include "base/logging.h"
+#include "base/check.h"
 #include "base/numerics/safe_conversions.h"
-#include "base/stl_util.h"
 
 namespace base {
 
@@ -16,75 +15,102 @@
 namespace {
 
 // An iterator for going through a SampleMap. The logic here is identical
-// to that of PersistentSampleMapIterator but with different data structures.
-// Changes here likely need to be duplicated there.
-class SampleMapIterator : public SampleCountIterator {
+// to that of the iterator for PersistentSampleMap but with different data
+// structures. Changes here likely need to be duplicated there.
+template <typename T, typename I>
+class IteratorTemplate : public SampleCountIterator {
  public:
-  typedef std::map<HistogramBase::Sample, HistogramBase::Count>
-      SampleToCountMap;
+  explicit IteratorTemplate(T& sample_counts)
+      : iter_(sample_counts.begin()), end_(sample_counts.end()) {
+    SkipEmptyBuckets();
+  }
 
-  explicit SampleMapIterator(const SampleToCountMap& sample_counts);
-  ~SampleMapIterator() override;
+  ~IteratorTemplate() override;
 
   // SampleCountIterator:
-  bool Done() const override;
-  void Next() override;
+  bool Done() const override { return iter_ == end_; }
+  void Next() override {
+    DCHECK(!Done());
+    ++iter_;
+    SkipEmptyBuckets();
+  }
   void Get(HistogramBase::Sample* min,
            int64_t* max,
-           HistogramBase::Count* count) const override;
+           HistogramBase::Count* count) override;
 
  private:
-  void SkipEmptyBuckets();
+  void SkipEmptyBuckets() {
+    while (!Done() && iter_->second == 0) {
+      ++iter_;
+    }
+  }
 
-  SampleToCountMap::const_iterator iter_;
-  const SampleToCountMap::const_iterator end_;
+  I iter_;
+  const I end_;
 };
 
-SampleMapIterator::SampleMapIterator(const SampleToCountMap& sample_counts)
-    : iter_(sample_counts.begin()),
-      end_(sample_counts.end()) {
-  SkipEmptyBuckets();
-}
+typedef std::map<HistogramBase::Sample, HistogramBase::Count> SampleToCountMap;
+typedef IteratorTemplate<const SampleToCountMap,
+                         SampleToCountMap::const_iterator>
+    SampleMapIterator;
 
-SampleMapIterator::~SampleMapIterator() = default;
+template <>
+SampleMapIterator::~IteratorTemplate() = default;
 
-bool SampleMapIterator::Done() const {
-  return iter_ == end_;
-}
-
-void SampleMapIterator::Next() {
+// Get() for an iterator of a SampleMap.
+template <>
+void SampleMapIterator::Get(Sample* min, int64_t* max, Count* count) {
   DCHECK(!Done());
-  ++iter_;
-  SkipEmptyBuckets();
+  *min = iter_->first;
+  *max = strict_cast<int64_t>(iter_->first) + 1;
+  // We do not have to do the following atomically -- if the caller needs thread
+  // safety, they should use a lock. And since this is in local memory, if a
+  // lock is used, we know the value would not be concurrently modified by a
+  // different process (in contrast to PersistentSampleMap, where the value in
+  // shared memory may be modified concurrently by a subprocess).
+  *count = iter_->second;
 }
 
-void SampleMapIterator::Get(Sample* min, int64_t* max, Count* count) const {
+typedef IteratorTemplate<SampleToCountMap, SampleToCountMap::iterator>
+    ExtractingSampleMapIterator;
+
+template <>
+ExtractingSampleMapIterator::~IteratorTemplate() {
+  // Ensure that the user has consumed all the samples in order to ensure no
+  // samples are lost.
+  DCHECK(Done());
+}
+
+// Get() for an extracting iterator of a SampleMap.
+template <>
+void ExtractingSampleMapIterator::Get(Sample* min, int64_t* max, Count* count) {
   DCHECK(!Done());
-  if (min)
-    *min = iter_->first;
-  if (max)
-    *max = strict_cast<int64_t>(iter_->first) + 1;
-  if (count)
-    *count = iter_->second;
-}
-
-void SampleMapIterator::SkipEmptyBuckets() {
-  while (!Done() && iter_->second == 0) {
-    ++iter_;
-  }
+  *min = iter_->first;
+  *max = strict_cast<int64_t>(iter_->first) + 1;
+  // We do not have to do the following atomically -- if the caller needs thread
+  // safety, they should use a lock. And since this is in local memory, if a
+  // lock is used, we know the value would not be concurrently modified by a
+  // different process (in contrast to PersistentSampleMap, where the value in
+  // shared memory may be modified concurrently by a subprocess).
+  *count = iter_->second;
+  iter_->second = 0;
 }
 
 }  // namespace
 
 SampleMap::SampleMap() : SampleMap(0) {}
 
-SampleMap::SampleMap(uint64_t id) : HistogramSamples(id, new LocalMetadata()) {}
+SampleMap::SampleMap(uint64_t id)
+    : HistogramSamples(id, std::make_unique<LocalMetadata>()) {}
 
-SampleMap::~SampleMap() {
-  delete static_cast<LocalMetadata*>(meta());
-}
+SampleMap::~SampleMap() = default;
 
 void SampleMap::Accumulate(Sample value, Count count) {
+  // We do not have to do the following atomically -- if the caller needs
+  // thread safety, they should use a lock. And since this is in local memory,
+  // if a lock is used, we know the value would not be concurrently modified
+  // by a different process (in contrast to PersistentSampleMap, where the
+  // value in shared memory may be modified concurrently by a subprocess).
   sample_counts_[value] += count;
   IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
 }
@@ -108,6 +134,10 @@
   return std::make_unique<SampleMapIterator>(sample_counts_);
 }
 
+std::unique_ptr<SampleCountIterator> SampleMap::ExtractingIterator() {
+  return std::make_unique<ExtractingSampleMapIterator>(sample_counts_);
+}
+
 bool SampleMap::AddSubtractImpl(SampleCountIterator* iter, Operator op) {
   Sample min;
   int64_t max;
@@ -117,6 +147,11 @@
     if (strict_cast<int64_t>(min) + 1 != max)
       return false;  // SparseHistogram only supports bucket with size 1.
 
+    // We do not have to do the following atomically -- if the caller needs
+    // thread safety, they should use a lock. And since this is in local memory,
+    // if a lock is used, we know the value would not be concurrently modified
+    // by a different process (in contrast to PersistentSampleMap, where the
+    // value in shared memory may be modified concurrently by a subprocess).
     sample_counts_[min] += (op == HistogramSamples::ADD) ? count : -count;
   }
   return true;
diff --git a/base/metrics/sample_map.h b/base/metrics/sample_map.h
index 1753223..7caf976 100644
--- a/base/metrics/sample_map.h
+++ b/base/metrics/sample_map.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -8,14 +8,15 @@
 #ifndef BASE_METRICS_SAMPLE_MAP_H_
 #define BASE_METRICS_SAMPLE_MAP_H_
 
+#include <stdint.h>
+
 #include <map>
 #include <memory>
 
+#include "base/base_export.h"
 #include "base/compiler_specific.h"
-#include "base/macros.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
-#include "starboard/types.h"
 
 namespace base {
 
@@ -25,6 +26,10 @@
  public:
   SampleMap();
   explicit SampleMap(uint64_t id);
+
+  SampleMap(const SampleMap&) = delete;
+  SampleMap& operator=(const SampleMap&) = delete;
+
   ~SampleMap() override;
 
   // HistogramSamples:
@@ -33,6 +38,7 @@
   HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
   HistogramBase::Count TotalCount() const override;
   std::unique_ptr<SampleCountIterator> Iterator() const override;
+  std::unique_ptr<SampleCountIterator> ExtractingIterator() override;
 
  protected:
   // Performs arithemetic. |op| is ADD or SUBTRACT.
@@ -40,8 +46,6 @@
 
  private:
   std::map<HistogramBase::Sample, HistogramBase::Count> sample_counts_;
-
-  DISALLOW_COPY_AND_ASSIGN(SampleMap);
 };
 
 }  // namespace base
diff --git a/base/metrics/sample_map_unittest.cc b/base/metrics/sample_map_unittest.cc
index 83db56f..13946db 100644
--- a/base/metrics/sample_map_unittest.cc
+++ b/base/metrics/sample_map_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/metrics/sample_vector.cc b/base/metrics/sample_vector.cc
index cf8634e..0192748 100644
--- a/base/metrics/sample_vector.cc
+++ b/base/metrics/sample_vector.cc
@@ -1,14 +1,19 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/sample_vector.h"
 
+#include <ostream>
+
+#include "base/check_op.h"
+#include "base/debug/crash_logging.h"
 #include "base/lazy_instance.h"
-#include "base/logging.h"
 #include "base/memory/ptr_util.h"
 #include "base/metrics/persistent_memory_allocator.h"
+#include "base/notreached.h"
 #include "base/numerics/safe_conversions.h"
+#include "base/strings/stringprintf.h"
 #include "base/synchronization/lock.h"
 #include "base/threading/platform_thread.h"
 
@@ -23,6 +28,104 @@
 typedef HistogramBase::Count Count;
 typedef HistogramBase::Sample Sample;
 
+namespace {
+
+// An iterator for sample vectors.
+template <typename T>
+class IteratorTemplate : public SampleCountIterator {
+ public:
+  IteratorTemplate(T* counts,
+                   size_t counts_size,
+                   const BucketRanges* bucket_ranges)
+      : counts_(counts),
+        counts_size_(counts_size),
+        bucket_ranges_(bucket_ranges) {
+    DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
+    SkipEmptyBuckets();
+  }
+
+  ~IteratorTemplate() override;
+
+  // SampleCountIterator:
+  bool Done() const override { return index_ >= counts_size_; }
+  void Next() override {
+    DCHECK(!Done());
+    index_++;
+    SkipEmptyBuckets();
+  }
+  void Get(HistogramBase::Sample* min,
+           int64_t* max,
+           HistogramBase::Count* count) override;
+
+  // SampleVector uses predefined buckets, so iterator can return bucket index.
+  bool GetBucketIndex(size_t* index) const override {
+    DCHECK(!Done());
+    if (index != nullptr) {
+      *index = index_;
+    }
+    return true;
+  }
+
+ private:
+  void SkipEmptyBuckets() {
+    if (Done()) {
+      return;
+    }
+
+    while (index_ < counts_size_) {
+      if (subtle::NoBarrier_Load(&counts_[index_]) != 0) {
+        return;
+      }
+      index_++;
+    }
+  }
+
+  raw_ptr<T> counts_;
+  size_t counts_size_;
+  raw_ptr<const BucketRanges> bucket_ranges_;
+
+  size_t index_ = 0;
+};
+
+typedef IteratorTemplate<const HistogramBase::AtomicCount> SampleVectorIterator;
+
+template <>
+SampleVectorIterator::~IteratorTemplate() = default;
+
+// Get() for an iterator of a SampleVector.
+template <>
+void SampleVectorIterator::Get(HistogramBase::Sample* min,
+                               int64_t* max,
+                               HistogramBase::Count* count) {
+  DCHECK(!Done());
+  *min = bucket_ranges_->range(index_);
+  *max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1));
+  *count = subtle::NoBarrier_Load(&counts_[index_]);
+}
+
+typedef IteratorTemplate<HistogramBase::AtomicCount>
+    ExtractingSampleVectorIterator;
+
+template <>
+ExtractingSampleVectorIterator::~IteratorTemplate() {
+  // Ensure that the user has consumed all the samples in order to ensure no
+  // samples are lost.
+  DCHECK(Done());
+}
+
+// Get() for an extracting iterator of a SampleVector.
+template <>
+void ExtractingSampleVectorIterator::Get(HistogramBase::Sample* min,
+                                         int64_t* max,
+                                         HistogramBase::Count* count) {
+  DCHECK(!Done());
+  *min = bucket_ranges_->range(index_);
+  *max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1));
+  *count = subtle::NoBarrier_AtomicExchange(&counts_[index_], 0);
+}
+
+}  // namespace
+
 SampleVectorBase::SampleVectorBase(uint64_t id,
                                    Metadata* meta,
                                    const BucketRanges* bucket_ranges)
@@ -30,6 +133,13 @@
   CHECK_GE(bucket_ranges_->bucket_count(), 1u);
 }
 
+SampleVectorBase::SampleVectorBase(uint64_t id,
+                                   std::unique_ptr<Metadata> meta,
+                                   const BucketRanges* bucket_ranges)
+    : HistogramSamples(id, std::move(meta)), bucket_ranges_(bucket_ranges) {
+  CHECK_GE(bucket_ranges_->bucket_count(), 1u);
+}
+
 SampleVectorBase::~SampleVectorBase() = default;
 
 void SampleVectorBase::Accumulate(Sample value, Count count) {
@@ -112,7 +222,8 @@
   if (sample.count != 0) {
     return std::make_unique<SingleSampleIterator>(
         bucket_ranges_->range(sample.bucket),
-        bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket);
+        bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket,
+        /*value_was_extracted=*/false);
   }
 
   // Handle the multi-sample case.
@@ -125,6 +236,36 @@
   return std::make_unique<SampleVectorIterator>(nullptr, 0, bucket_ranges_);
 }
 
+std::unique_ptr<SampleCountIterator> SampleVectorBase::ExtractingIterator() {
+  // Handle the single-sample case.
+  SingleSample sample = single_sample().Extract();
+  if (sample.count != 0) {
+    // Note that we have already extracted the samples (i.e., reset the
+    // underlying data back to 0 samples), even before the iterator has been
+    // used. This means that the caller needs to ensure that this value is
+    // eventually consumed, otherwise the sample is lost. There is no iterator
+    // that simply points to the underlying SingleSample and extracts its value
+    // on-demand because there are tricky edge cases when the SingleSample is
+    // disabled between the creation of the iterator and the actual call to
+    // Get() (for example, due to histogram changing to use a vector to store
+    // its samples).
+    return std::make_unique<SingleSampleIterator>(
+        bucket_ranges_->range(sample.bucket),
+        bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket,
+        /*value_was_extracted=*/true);
+  }
+
+  // Handle the multi-sample case.
+  if (counts() || MountExistingCountsStorage()) {
+    return std::make_unique<ExtractingSampleVectorIterator>(
+        counts(), counts_size(), bucket_ranges_);
+  }
+
+  // And the no-value case.
+  return std::make_unique<ExtractingSampleVectorIterator>(nullptr, 0,
+                                                          bucket_ranges_);
+}
+
 bool SampleVectorBase::AddSubtractImpl(SampleCountIterator* iter,
                                        HistogramSamples::Operator op) {
   // Stop now if there's nothing to do.
@@ -184,6 +325,16 @@
     // Ensure that the sample's min/max match the ranges min/max.
     if (min != bucket_ranges_->range(dest_index) ||
         max != bucket_ranges_->range(dest_index + 1)) {
+#if !BUILDFLAG(IS_NACL)
+      // TODO(crbug/1432981): Remove these. They are used to investigate
+      // unexpected failures.
+      SCOPED_CRASH_KEY_NUMBER("SampleVector", "min", min);
+      SCOPED_CRASH_KEY_NUMBER("SampleVector", "max", max);
+      SCOPED_CRASH_KEY_NUMBER("SampleVector", "range_min",
+                              bucket_ranges_->range(dest_index));
+      SCOPED_CRASH_KEY_NUMBER("SampleVector", "range_max",
+                              bucket_ranges_->range(dest_index + 1));
+#endif  // !BUILDFLAG(IS_NACL)
       NOTREACHED() << "sample=" << min << "," << max
                    << "; range=" << bucket_ranges_->range(dest_index) << ","
                    << bucket_ranges_->range(dest_index + 1);
@@ -212,14 +363,29 @@
   }
 }
 
-// Use simple binary search.  This is very general, but there are better
-// approaches if we knew that the buckets were linearly distributed.
+// Uses simple binary search or calculates the index directly if it's an "exact"
+// linear histogram. This is very general, but there are better approaches if we
+// knew that the buckets were linearly distributed.
 size_t SampleVectorBase::GetBucketIndex(Sample value) const {
   size_t bucket_count = bucket_ranges_->bucket_count();
   CHECK_GE(bucket_count, 1u);
   CHECK_GE(value, bucket_ranges_->range(0));
   CHECK_LT(value, bucket_ranges_->range(bucket_count));
 
+  // For "exact" linear histograms, e.g. bucket_count = maximum + 1, their
+  // minimum is 1 and bucket sizes are 1. Thus, we don't need to binary search
+  // the bucket index. The bucket index for bucket |value| is just the |value|.
+  Sample maximum = bucket_ranges_->range(bucket_count - 1);
+  if (maximum == static_cast<Sample>(bucket_count - 1)) {
+    // |value| is in the underflow bucket.
+    if (value < 1)
+      return 0;
+    // |value| is in the overflow bucket.
+    if (value > maximum)
+      return bucket_count - 1;
+    return static_cast<size_t>(value);
+  }
+
   size_t under = 0;
   size_t over = bucket_count;
   size_t mid;
@@ -243,13 +409,19 @@
   DCHECK(counts());
 
   // Disable the single-sample since there is now counts storage for the data.
-  SingleSample sample = single_sample().Extract(/*disable=*/true);
+  SingleSample sample = single_sample().ExtractAndDisable();
 
   // Stop here if there is no "count" as trying to find the bucket index of
   // an invalid (including zero) "value" will crash.
   if (sample.count == 0)
     return;
 
+  // Stop here if the sample bucket would be out of range for the AtomicCount
+  // array.
+  if (sample.bucket >= counts_size()) {
+    return;
+  }
+
   // Move the value into storage. Sum and redundant-count already account
   // for this entry so no need to call IncreaseSumAndCount().
   subtle::NoBarrier_AtomicIncrement(&counts()[sample.bucket], sample.count);
@@ -262,9 +434,9 @@
   // concurrent entry into the code below; access and updates to |counts_|
   // still requires atomic operations.
   static LazyInstance<Lock>::Leaky counts_lock = LAZY_INSTANCE_INITIALIZER;
-  if (subtle::NoBarrier_Load(&counts_) == 0) {
+  if (!counts_.load(std::memory_order_relaxed)) {
     AutoLock lock(counts_lock.Get());
-    if (subtle::NoBarrier_Load(&counts_) == 0) {
+    if (!counts_.load(std::memory_order_relaxed)) {
       // Create the actual counts storage while the above lock is acquired.
       HistogramBase::Count* counts = CreateCountsStorageWhileLocked();
       DCHECK(counts);
@@ -286,17 +458,108 @@
     : SampleVector(0, bucket_ranges) {}
 
 SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges)
-    : SampleVectorBase(id, new LocalMetadata(), bucket_ranges) {}
+    : SampleVectorBase(id, std::make_unique<LocalMetadata>(), bucket_ranges) {}
 
-SampleVector::~SampleVector() {
-  delete static_cast<LocalMetadata*>(meta());
-}
+SampleVector::~SampleVector() = default;
 
 bool SampleVector::MountExistingCountsStorage() const {
   // There is never any existing storage other than what is already in use.
   return counts() != nullptr;
 }
 
+std::string SampleVector::GetAsciiHeader(StringPiece histogram_name,
+                                         int32_t flags) const {
+  Count sample_count = TotalCount();
+  std::string output;
+  StringAppendF(&output, "Histogram: %.*s recorded %d samples",
+                static_cast<int>(histogram_name.size()), histogram_name.data(),
+                sample_count);
+  if (sample_count == 0) {
+    DCHECK_EQ(sum(), 0);
+  } else {
+    double mean = static_cast<float>(sum()) / sample_count;
+    StringAppendF(&output, ", mean = %.1f", mean);
+  }
+  if (flags)
+    StringAppendF(&output, " (flags = 0x%x)", flags);
+  return output;
+}
+
+std::string SampleVector::GetAsciiBody() const {
+  Count sample_count = TotalCount();
+
+  // Prepare to normalize graphical rendering of bucket contents.
+  double max_size = 0;
+  double scaling_factor = 1;
+  max_size = GetPeakBucketSize();
+  // Scale histogram bucket counts to take at most 72 characters.
+  // Note: Keep in sync w/ kLineLength histogram_samples.cc
+  const double kLineLength = 72;
+  if (max_size > kLineLength)
+    scaling_factor = kLineLength / max_size;
+
+  // Calculate largest print width needed for any of our bucket range displays.
+  size_t print_width = 1;
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
+    if (GetCountAtIndex(i)) {
+      size_t width =
+          GetSimpleAsciiBucketRange(bucket_ranges()->range(i)).size() + 1;
+      if (width > print_width)
+        print_width = width;
+    }
+  }
+
+  int64_t remaining = sample_count;
+  int64_t past = 0;
+  std::string output;
+  // Output the actual histogram graph.
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
+    Count current = GetCountAtIndex(i);
+    remaining -= current;
+    std::string range = GetSimpleAsciiBucketRange(bucket_ranges()->range(i));
+    output.append(range);
+    for (size_t j = 0; range.size() + j < print_width + 1; ++j)
+      output.push_back(' ');
+    if (0 == current && i < bucket_count() - 1 && 0 == GetCountAtIndex(i + 1)) {
+      while (i < bucket_count() - 1 && 0 == GetCountAtIndex(i + 1)) {
+        ++i;
+      }
+      output.append("... \n");
+      continue;  // No reason to plot emptiness.
+    }
+    Count current_size = round(current * scaling_factor);
+    WriteAsciiBucketGraph(current_size, kLineLength, &output);
+    WriteAsciiBucketContext(past, current, remaining, i, &output);
+    output.append("\n");
+    past += current;
+  }
+  DCHECK_EQ(sample_count, past);
+  return output;
+}
+
+double SampleVector::GetPeakBucketSize() const {
+  Count max = 0;
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
+    Count current = GetCountAtIndex(i);
+    if (current > max)
+      max = current;
+  }
+  return max;
+}
+
+void SampleVector::WriteAsciiBucketContext(int64_t past,
+                                           Count current,
+                                           int64_t remaining,
+                                           uint32_t current_bucket_index,
+                                           std::string* output) const {
+  double scaled_sum = (past + current + remaining) / 100.0;
+  WriteAsciiBucketValue(current, scaled_sum, output);
+  if (0 < current_bucket_index) {
+    double percentage = past / scaled_sum;
+    StringAppendF(output, " {%3.1f%%}", percentage);
+  }
+}
+
 HistogramBase::AtomicCount* SampleVector::CreateCountsStorageWhileLocked() {
   local_counts_.resize(counts_size());
   return &local_counts_[0];
@@ -361,69 +624,4 @@
   return static_cast<HistogramBase::AtomicCount*>(mem);
 }
 
-SampleVectorIterator::SampleVectorIterator(
-    const std::vector<HistogramBase::AtomicCount>* counts,
-    const BucketRanges* bucket_ranges)
-    : counts_(&(*counts)[0]),
-      counts_size_(counts->size()),
-      bucket_ranges_(bucket_ranges),
-      index_(0) {
-  DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
-  SkipEmptyBuckets();
-}
-
-SampleVectorIterator::SampleVectorIterator(
-    const HistogramBase::AtomicCount* counts,
-    size_t counts_size,
-    const BucketRanges* bucket_ranges)
-    : counts_(counts),
-      counts_size_(counts_size),
-      bucket_ranges_(bucket_ranges),
-      index_(0) {
-  DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
-  SkipEmptyBuckets();
-}
-
-SampleVectorIterator::~SampleVectorIterator() = default;
-
-bool SampleVectorIterator::Done() const {
-  return index_ >= counts_size_;
-}
-
-void SampleVectorIterator::Next() {
-  DCHECK(!Done());
-  index_++;
-  SkipEmptyBuckets();
-}
-
-void SampleVectorIterator::Get(HistogramBase::Sample* min,
-                               int64_t* max,
-                               HistogramBase::Count* count) const {
-  DCHECK(!Done());
-  if (min != nullptr)
-    *min = bucket_ranges_->range(index_);
-  if (max != nullptr)
-    *max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1));
-  if (count != nullptr)
-    *count = subtle::NoBarrier_Load(&counts_[index_]);
-}
-
-bool SampleVectorIterator::GetBucketIndex(size_t* index) const {
-  DCHECK(!Done());
-  if (index != nullptr)
-    *index = index_;
-  return true;
-}
-
-void SampleVectorIterator::SkipEmptyBuckets() {
-  if (Done())
-    return;
-
-  while (index_ < counts_size_) {
-    if (subtle::NoBarrier_Load(&counts_[index_]) != 0)
-      return;
-    index_++;
-  }
-}
-
 }  // namespace base
diff --git a/base/metrics/sample_vector.h b/base/metrics/sample_vector.h
index 68ae7a5..0160f59 100644
--- a/base/metrics/sample_vector.h
+++ b/base/metrics/sample_vector.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -8,18 +8,22 @@
 #ifndef BASE_METRICS_SAMPLE_VECTOR_H_
 #define BASE_METRICS_SAMPLE_VECTOR_H_
 
+#include <stddef.h>
+#include <stdint.h>
+
+#include <atomic>
 #include <memory>
+#include <string>
 #include <vector>
 
-#include "base/atomicops.h"
+#include "base/base_export.h"
 #include "base/compiler_specific.h"
 #include "base/gtest_prod_util.h"
-#include "base/macros.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/metrics/persistent_memory_allocator.h"
-#include "starboard/types.h"
 
 namespace base {
 
@@ -27,9 +31,8 @@
 
 class BASE_EXPORT SampleVectorBase : public HistogramSamples {
  public:
-  SampleVectorBase(uint64_t id,
-                   Metadata* meta,
-                   const BucketRanges* bucket_ranges);
+  SampleVectorBase(const SampleVectorBase&) = delete;
+  SampleVectorBase& operator=(const SampleVectorBase&) = delete;
   ~SampleVectorBase() override;
 
   // HistogramSamples:
@@ -38,6 +41,7 @@
   HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
   HistogramBase::Count TotalCount() const override;
   std::unique_ptr<SampleCountIterator> Iterator() const override;
+  std::unique_ptr<SampleCountIterator> ExtractingIterator() override;
 
   // Get count of a specific bucket.
   HistogramBase::Count GetCountAtIndex(size_t bucket_index) const;
@@ -46,6 +50,13 @@
   const BucketRanges* bucket_ranges() const { return bucket_ranges_; }
 
  protected:
+  SampleVectorBase(uint64_t id,
+                   Metadata* meta,
+                   const BucketRanges* bucket_ranges);
+  SampleVectorBase(uint64_t id,
+                   std::unique_ptr<Metadata> meta,
+                   const BucketRanges* bucket_ranges);
+
   bool AddSubtractImpl(
       SampleCountIterator* iter,
       HistogramSamples::Operator op) override;  // |op| is ADD or SUBTRACT.
@@ -70,17 +81,15 @@
   virtual HistogramBase::Count* CreateCountsStorageWhileLocked() = 0;
 
   HistogramBase::AtomicCount* counts() {
-    return reinterpret_cast<HistogramBase::AtomicCount*>(
-        subtle::Acquire_Load(&counts_));
+    return counts_.load(std::memory_order_acquire);
   }
 
   const HistogramBase::AtomicCount* counts() const {
-    return reinterpret_cast<HistogramBase::AtomicCount*>(
-        subtle::Acquire_Load(&counts_));
+    return counts_.load(std::memory_order_acquire);
   }
 
-  void set_counts(const HistogramBase::AtomicCount* counts) const {
-    subtle::Release_Store(&counts_, reinterpret_cast<uintptr_t>(counts));
+  void set_counts(HistogramBase::AtomicCount* counts) const {
+    counts_.store(counts, std::memory_order_release);
   }
 
   size_t counts_size() const { return bucket_ranges_->bucket_count(); }
@@ -91,8 +100,8 @@
   FRIEND_TEST_ALL_PREFIXES(SharedHistogramTest, CorruptSampleCounts);
 
   // |counts_| is actually a pointer to a HistogramBase::AtomicCount array but
-  // is held as an AtomicWord for concurrency reasons. When combined with the
-  // single_sample held in the metadata, there are four possible states:
+  // is held as an atomic pointer for concurrency reasons. When combined with
+  // the single_sample held in the metadata, there are four possible states:
   //   1) single_sample == zero, counts_ == null
   //   2) single_sample != zero, counts_ == null
   //   3) single_sample != zero, counts_ != null BUT IS EMPTY
@@ -101,12 +110,10 @@
   // must be moved to this storage. It is mutable because changing it doesn't
   // change the (const) data but must adapt if a non-const object causes the
   // storage to be allocated and updated.
-  mutable subtle::AtomicWord counts_ = 0;
+  mutable std::atomic<HistogramBase::AtomicCount*> counts_{nullptr};
 
   // Shares the same BucketRanges with Histogram object.
-  const BucketRanges* const bucket_ranges_;
-
-  DISALLOW_COPY_AND_ASSIGN(SampleVectorBase);
+  const raw_ptr<const BucketRanges> bucket_ranges_;
 };
 
 // A sample vector that uses local memory for the counts array.
@@ -114,17 +121,37 @@
  public:
   explicit SampleVector(const BucketRanges* bucket_ranges);
   SampleVector(uint64_t id, const BucketRanges* bucket_ranges);
+  SampleVector(const SampleVector&) = delete;
+  SampleVector& operator=(const SampleVector&) = delete;
   ~SampleVector() override;
 
  private:
+  FRIEND_TEST_ALL_PREFIXES(SampleVectorTest, GetPeakBucketSize);
+
+  // HistogramSamples:
+  std::string GetAsciiBody() const override;
+  std::string GetAsciiHeader(StringPiece histogram_name,
+                             int32_t flags) const override;
+
   // SampleVectorBase:
   bool MountExistingCountsStorage() const override;
   HistogramBase::Count* CreateCountsStorageWhileLocked() override;
 
+  // Writes cumulative percentage information based on the number
+  // of past, current, and remaining bucket samples.
+  void WriteAsciiBucketContext(int64_t past,
+                               HistogramBase::Count current,
+                               int64_t remaining,
+                               uint32_t current_bucket_index,
+                               std::string* output) const;
+
+  // Finds out how large (graphically) the largest bucket will appear to be.
+  double GetPeakBucketSize() const;
+
+  size_t bucket_count() const { return bucket_ranges()->bucket_count(); }
+
   // Simple local storage for counts.
   mutable std::vector<HistogramBase::AtomicCount> local_counts_;
-
-  DISALLOW_COPY_AND_ASSIGN(SampleVector);
 };
 
 // A sample vector that uses persistent memory for the counts array.
@@ -134,6 +161,8 @@
                          const BucketRanges* bucket_ranges,
                          Metadata* meta,
                          const DelayedPersistentAllocation& counts);
+  PersistentSampleVector(const PersistentSampleVector&) = delete;
+  PersistentSampleVector& operator=(const PersistentSampleVector&) = delete;
   ~PersistentSampleVector() override;
 
  private:
@@ -143,39 +172,6 @@
 
   // Persistent storage for counts.
   DelayedPersistentAllocation persistent_counts_;
-
-  DISALLOW_COPY_AND_ASSIGN(PersistentSampleVector);
-};
-
-// An iterator for sample vectors. This could be defined privately in the .cc
-// file but is here for easy testing.
-class BASE_EXPORT SampleVectorIterator : public SampleCountIterator {
- public:
-  SampleVectorIterator(const std::vector<HistogramBase::AtomicCount>* counts,
-                       const BucketRanges* bucket_ranges);
-  SampleVectorIterator(const HistogramBase::AtomicCount* counts,
-                       size_t counts_size,
-                       const BucketRanges* bucket_ranges);
-  ~SampleVectorIterator() override;
-
-  // SampleCountIterator implementation:
-  bool Done() const override;
-  void Next() override;
-  void Get(HistogramBase::Sample* min,
-           int64_t* max,
-           HistogramBase::Count* count) const override;
-
-  // SampleVector uses predefined buckets, so iterator can return bucket index.
-  bool GetBucketIndex(size_t* index) const override;
-
- private:
-  void SkipEmptyBuckets();
-
-  const HistogramBase::AtomicCount* counts_;
-  size_t counts_size_;
-  const BucketRanges* bucket_ranges_;
-
-  size_t index_;
 };
 
 }  // namespace base
diff --git a/base/metrics/sample_vector_unittest.cc b/base/metrics/sample_vector_unittest.cc
index 1b1f51d..fb35786 100644
--- a/base/metrics/sample_vector_unittest.cc
+++ b/base/metrics/sample_vector_unittest.cc
@@ -1,10 +1,11 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/sample_vector.h"
 
 #include <limits.h>
+#include <stddef.h>
 
 #include <atomic>
 #include <memory>
@@ -14,8 +15,6 @@
 #include "base/metrics/histogram.h"
 #include "base/metrics/persistent_memory_allocator.h"
 #include "base/test/gtest_util.h"
-#include "starboard/memory.h"
-#include "starboard/types.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -219,53 +218,26 @@
   ranges.set_range(3, 3);
   ranges.set_range(4, 4);
 
-  std::vector<HistogramBase::Count> counts(3);
-  counts[0] = 1;
-  counts[1] = 0;  // Iterator will bypass this empty bucket.
-  counts[2] = 2;
-
-  // BucketRanges can have larger size than counts.
-  SampleVectorIterator it(&counts, &ranges);
-  size_t index;
-
-  HistogramBase::Sample min;
-  int64_t max;
-  HistogramBase::Count count;
-  it.Get(&min, &max, &count);
-  EXPECT_EQ(0, min);
-  EXPECT_EQ(1, max);
-  EXPECT_EQ(1, count);
-  EXPECT_TRUE(it.GetBucketIndex(&index));
-  EXPECT_EQ(0u, index);
-
-  it.Next();
-  it.Get(&min, &max, &count);
-  EXPECT_EQ(2, min);
-  EXPECT_EQ(3, max);
-  EXPECT_EQ(2, count);
-  EXPECT_TRUE(it.GetBucketIndex(&index));
-  EXPECT_EQ(2u, index);
-
-  it.Next();
-  EXPECT_TRUE(it.Done());
-
   // Create iterator from SampleVector.
   SampleVector samples(1, &ranges);
-  samples.Accumulate(0, 0);
+  samples.Accumulate(0, 0);  // Iterator will bypass this empty bucket.
   samples.Accumulate(1, 1);
   samples.Accumulate(2, 2);
   samples.Accumulate(3, 3);
-  std::unique_ptr<SampleCountIterator> it2 = samples.Iterator();
+  std::unique_ptr<SampleCountIterator> it = samples.Iterator();
 
   int i;
-  for (i = 1; !it2->Done(); i++, it2->Next()) {
-    it2->Get(&min, &max, &count);
+  size_t index;
+  HistogramBase::Sample min;
+  int64_t max;
+  HistogramBase::Count count;
+  for (i = 1; !it->Done(); i++, it->Next()) {
+    it->Get(&min, &max, &count);
     EXPECT_EQ(i, min);
     EXPECT_EQ(i + 1, max);
     EXPECT_EQ(i, count);
 
-    size_t index;
-    EXPECT_TRUE(it2->GetBucketIndex(&index));
+    EXPECT_TRUE(it->GetBucketIndex(&index));
     EXPECT_EQ(static_cast<size_t>(i), index);
   }
   EXPECT_EQ(4, i);
@@ -543,4 +515,19 @@
   EXPECT_EQ(200, samples2.GetCount(8));
 }
 
+// Tests GetPeakBucketSize() returns accurate max bucket size.
+TEST_F(SampleVectorTest, GetPeakBucketSize) {
+  // Custom buckets: [1, 5) [5, 10) [10, 20)
+  BucketRanges ranges(4);
+  ranges.set_range(0, 1);
+  ranges.set_range(1, 5);
+  ranges.set_range(2, 10);
+  ranges.set_range(3, 20);
+  SampleVector samples(1, &ranges);
+  samples.Accumulate(3, 1);
+  samples.Accumulate(6, 2);
+  samples.Accumulate(12, 3);
+  EXPECT_EQ(3, samples.GetPeakBucketSize());
+}
+
 }  // namespace base
diff --git a/base/metrics/single_sample_metrics.cc b/base/metrics/single_sample_metrics.cc
index 57c1c8f..6e25b64 100644
--- a/base/metrics/single_sample_metrics.cc
+++ b/base/metrics/single_sample_metrics.cc
@@ -1,4 +1,4 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
diff --git a/base/metrics/single_sample_metrics.h b/base/metrics/single_sample_metrics.h
index b966cb1..2ae2957 100644
--- a/base/metrics/single_sample_metrics.h
+++ b/base/metrics/single_sample_metrics.h
@@ -1,14 +1,15 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_SINGLE_SAMPLE_METRICS_H_
 #define BASE_METRICS_SINGLE_SAMPLE_METRICS_H_
 
+#include <memory>
 #include <string>
 
 #include "base/base_export.h"
-#include "base/macros.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/histogram_base.h"
 
 namespace base {
@@ -64,6 +65,10 @@
     : public SingleSampleMetricsFactory {
  public:
   DefaultSingleSampleMetricsFactory() = default;
+  DefaultSingleSampleMetricsFactory(const DefaultSingleSampleMetricsFactory&) =
+      delete;
+  DefaultSingleSampleMetricsFactory& operator=(
+      const DefaultSingleSampleMetricsFactory&) = delete;
   ~DefaultSingleSampleMetricsFactory() override = default;
 
   // SingleSampleMetricsFactory:
@@ -72,9 +77,6 @@
       HistogramBase::Sample min,
       HistogramBase::Sample max,
       uint32_t bucket_count) override;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(DefaultSingleSampleMetricsFactory);
 };
 
 class BASE_EXPORT DefaultSingleSampleMetric : public SingleSampleMetric {
@@ -84,19 +86,22 @@
                             HistogramBase::Sample max,
                             uint32_t bucket_count,
                             int32_t flags);
+
+  DefaultSingleSampleMetric(const DefaultSingleSampleMetric&) = delete;
+  DefaultSingleSampleMetric& operator=(const DefaultSingleSampleMetric&) =
+      delete;
+
   ~DefaultSingleSampleMetric() override;
 
   // SingleSampleMetric:
   void SetSample(HistogramBase::Sample sample) override;
 
  private:
-  HistogramBase* const histogram_;
+  const raw_ptr<HistogramBase> histogram_;
 
   // The last sample provided to SetSample(). We use -1 as a sentinel value to
   // indicate no sample has been set.
   HistogramBase::Sample sample_ = -1;
-
-  DISALLOW_COPY_AND_ASSIGN(DefaultSingleSampleMetric);
 };
 
 }  // namespace base
diff --git a/base/metrics/single_sample_metrics_unittest.cc b/base/metrics/single_sample_metrics_unittest.cc
index 974ba7e..412e31b 100644
--- a/base/metrics/single_sample_metrics_unittest.cc
+++ b/base/metrics/single_sample_metrics_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -22,13 +22,13 @@
  public:
   SingleSampleMetricsTest() = default;
 
+  SingleSampleMetricsTest(const SingleSampleMetricsTest&) = delete;
+  SingleSampleMetricsTest& operator=(const SingleSampleMetricsTest&) = delete;
+
   ~SingleSampleMetricsTest() override {
     // Ensure we cleanup after ourselves.
     SingleSampleMetricsFactory::DeleteFactoryForTesting();
   }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(SingleSampleMetricsTest);
 };
 
 }  // namespace
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
index 30175a0..dc841ec 100644
--- a/base/metrics/sparse_histogram.cc
+++ b/base/metrics/sparse_histogram.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -6,6 +6,7 @@
 
 #include <utility>
 
+#include "base/logging.h"
 #include "base/memory/ptr_util.h"
 #include "base/metrics/dummy_histogram.h"
 #include "base/metrics/metrics_hashes.h"
@@ -13,9 +14,11 @@
 #include "base/metrics/persistent_sample_map.h"
 #include "base/metrics/sample_map.h"
 #include "base/metrics/statistics_recorder.h"
+#include "base/notreached.h"
 #include "base/pickle.h"
-#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
 #include "base/synchronization/lock.h"
+#include "base/values.h"
 
 namespace base {
 
@@ -30,7 +33,7 @@
     // TODO(gayane): |HashMetricName| is called again in Histogram constructor.
     // Refactor code to avoid the additional call.
     bool should_record =
-        StatisticsRecorder::ShouldRecordHistogram(HashMetricName(name));
+        StatisticsRecorder::ShouldRecordHistogram(HashMetricNameAs32Bits(name));
     if (!should_record)
       return DummyHistogram::GetInstance();
     // Try to create the histogram using a "persistent" allocator. As of
@@ -81,8 +84,7 @@
     const char* name,
     HistogramSamples::Metadata* meta,
     HistogramSamples::Metadata* logged_meta) {
-  return WrapUnique(
-      new SparseHistogram(allocator, name, meta, logged_meta));
+  return WrapUnique(new SparseHistogram(allocator, name, meta, logged_meta));
 }
 
 SparseHistogram::~SparseHistogram() = default;
@@ -98,7 +100,7 @@
 bool SparseHistogram::HasConstructionArguments(
     Sample expected_minimum,
     Sample expected_maximum,
-    uint32_t expected_bucket_count) const {
+    size_t expected_bucket_count) const {
   // SparseHistogram never has min/max/bucket_count limit.
   return false;
 }
@@ -117,7 +119,8 @@
     unlogged_samples_->Accumulate(value, count);
   }
 
-  FindAndRunCallback(value);
+  if (UNLIKELY(StatisticsRecorder::have_active_callbacks()))
+    FindAndRunCallbacks(value);
 }
 
 std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
@@ -129,14 +132,31 @@
   return std::move(snapshot);
 }
 
-std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
-  DCHECK(!final_delta_created_);
-
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotUnloggedSamples()
+    const {
   std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+
   base::AutoLock auto_lock(lock_);
   snapshot->Add(*unlogged_samples_);
 
-  unlogged_samples_->Subtract(*snapshot);
+  return std::move(snapshot);
+}
+
+void SparseHistogram::MarkSamplesAsLogged(const HistogramSamples& samples) {
+  DCHECK(!final_delta_created_);
+
+  base::AutoLock auto_lock(lock_);
+  unlogged_samples_->Subtract(samples);
+  logged_samples_->Add(samples);
+}
+
+std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
+  DCHECK(!final_delta_created_);
+
+  std::unique_ptr<SampleMap> snapshot =
+      std::make_unique<SampleMap>(name_hash());
+  base::AutoLock auto_lock(lock_);
+  snapshot->Extract(*unlogged_samples_);
   logged_samples_->Add(*snapshot);
   return std::move(snapshot);
 }
@@ -162,14 +182,9 @@
   return unlogged_samples_->AddFromPickle(iter);
 }
 
-void SparseHistogram::WriteHTMLGraph(std::string* output) const {
-  output->append("<PRE>");
-  WriteAsciiImpl(true, "<br>", output);
-  output->append("</PRE>");
-}
-
-void SparseHistogram::WriteAscii(std::string* output) const {
-  WriteAsciiImpl(true, "\n", output);
+base::Value::Dict SparseHistogram::ToGraphDict() const {
+  std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
+  return snapshot->ToGraphDict(histogram_name(), flags());
 }
 
 void SparseHistogram::SerializeInfoImpl(Pickle* pickle) const {
@@ -216,75 +231,12 @@
   return SparseHistogram::FactoryGet(histogram_name, flags);
 }
 
-void SparseHistogram::GetParameters(DictionaryValue* params) const {
-  // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
-}
-
-void SparseHistogram::GetCountAndBucketData(Count* count,
-                                            int64_t* sum,
-                                            ListValue* buckets) const {
-  // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
-}
-
-void SparseHistogram::WriteAsciiImpl(bool graph_it,
-                                     const std::string& newline,
-                                     std::string* output) const {
-  // Get a local copy of the data so we are consistent.
-  std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
-  Count total_count = snapshot->TotalCount();
-  double scaled_total_count = total_count / 100.0;
-
-  WriteAsciiHeader(total_count, output);
-  output->append(newline);
-
-  // Determine how wide the largest bucket range is (how many digits to print),
-  // so that we'll be able to right-align starts for the graphical bars.
-  // Determine which bucket has the largest sample count so that we can
-  // normalize the graphical bar-width relative to that sample count.
-  Count largest_count = 0;
-  Sample largest_sample = 0;
-  std::unique_ptr<SampleCountIterator> it = snapshot->Iterator();
-  while (!it->Done()) {
-    Sample min;
-    int64_t max;
-    Count count;
-    it->Get(&min, &max, &count);
-    if (min > largest_sample)
-      largest_sample = min;
-    if (count > largest_count)
-      largest_count = count;
-    it->Next();
-  }
-  size_t print_width = GetSimpleAsciiBucketRange(largest_sample).size() + 1;
-
-  // iterate over each item and display them
-  it = snapshot->Iterator();
-  while (!it->Done()) {
-    Sample min;
-    int64_t max;
-    Count count;
-    it->Get(&min, &max, &count);
-
-    // value is min, so display it
-    std::string range = GetSimpleAsciiBucketRange(min);
-    output->append(range);
-    for (size_t j = 0; range.size() + j < print_width + 1; ++j)
-      output->push_back(' ');
-
-    if (graph_it)
-      WriteAsciiBucketGraph(count, largest_count, output);
-    WriteAsciiBucketValue(count, scaled_total_count, output);
-    output->append(newline);
-    it->Next();
-  }
-}
-
-void SparseHistogram::WriteAsciiHeader(const Count total_count,
-                                       std::string* output) const {
-  StringAppendF(output, "Histogram: %s recorded %d samples", histogram_name(),
-                total_count);
-  if (flags())
-    StringAppendF(output, " (flags = 0x%x)", flags());
+Value::Dict SparseHistogram::GetParameters() const {
+  // Unlike Histogram::GetParameters, only set the type here, and no other
+  // params. The other params do not make sense for sparse histograms.
+  Value::Dict params;
+  params.Set("type", HistogramTypeToString(GetHistogramType()));
+  return params;
 }
 
 }  // namespace base
diff --git a/base/metrics/sparse_histogram.h b/base/metrics/sparse_histogram.h
index 8b92204..f547ced 100644
--- a/base/metrics/sparse_histogram.h
+++ b/base/metrics/sparse_histogram.h
@@ -1,20 +1,22 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #ifndef BASE_METRICS_SPARSE_HISTOGRAM_H_
 #define BASE_METRICS_SPARSE_HISTOGRAM_H_
 
+#include <stddef.h>
+#include <stdint.h>
+
 #include <map>
 #include <memory>
 #include <string>
 
 #include "base/base_export.h"
-#include "base/macros.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
 #include "base/synchronization/lock.h"
-#include "starboard/types.h"
+#include "base/values.h"
 
 namespace base {
 
@@ -37,6 +39,9 @@
       HistogramSamples::Metadata* meta,
       HistogramSamples::Metadata* logged_meta);
 
+  SparseHistogram(const SparseHistogram&) = delete;
+  SparseHistogram& operator=(const SparseHistogram&) = delete;
+
   ~SparseHistogram() override;
 
   // HistogramBase implementation:
@@ -44,16 +49,17 @@
   HistogramType GetHistogramType() const override;
   bool HasConstructionArguments(Sample expected_minimum,
                                 Sample expected_maximum,
-                                uint32_t expected_bucket_count) const override;
+                                size_t expected_bucket_count) const override;
   void Add(Sample value) override;
   void AddCount(Sample value, int count) override;
   void AddSamples(const HistogramSamples& samples) override;
   bool AddSamplesFromPickle(base::PickleIterator* iter) override;
   std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
+  std::unique_ptr<HistogramSamples> SnapshotUnloggedSamples() const override;
+  void MarkSamplesAsLogged(const HistogramSamples& samples) override;
   std::unique_ptr<HistogramSamples> SnapshotDelta() override;
   std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
-  void WriteHTMLGraph(std::string* output) const override;
-  void WriteAscii(std::string* output) const override;
+  base::Value::Dict ToGraphDict() const override;
 
  protected:
   // HistogramBase implementation:
@@ -72,22 +78,12 @@
       base::PickleIterator* iter);
   static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
 
-  void GetParameters(DictionaryValue* params) const override;
-  void GetCountAndBucketData(Count* count,
-                             int64_t* sum,
-                             ListValue* buckets) const override;
+  // Writes the type of the sparse histogram in the |params|.
+  Value::Dict GetParameters() const override;
 
-  // Helpers for emitting Ascii graphic.  Each method appends data to output.
-  void WriteAsciiImpl(bool graph_it,
-                      const std::string& newline,
-                      std::string* output) const;
-
-  // Write a common header message describing this histogram.
-  void WriteAsciiHeader(const Count total_count,
-                        std::string* output) const;
-
-  // For constuctor calling.
+  // For constructor calling.
   friend class SparseHistogramTest;
+  friend class HistogramThreadsafeTest;
 
   // Protects access to |samples_|.
   mutable base::Lock lock_;
@@ -97,8 +93,6 @@
 
   std::unique_ptr<HistogramSamples> unlogged_samples_;
   std::unique_ptr<HistogramSamples> logged_samples_;
-
-  DISALLOW_COPY_AND_ASSIGN(SparseHistogram);
 };
 
 }  // namespace base
diff --git a/base/metrics/sparse_histogram_unittest.cc b/base/metrics/sparse_histogram_unittest.cc
index 627e592..7bd3775 100644
--- a/base/metrics/sparse_histogram_unittest.cc
+++ b/base/metrics/sparse_histogram_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -6,7 +6,10 @@
 
 #include <memory>
 #include <string>
+#include <vector>
 
+#include "base/logging.h"
+#include "base/memory/raw_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_functions.h"
 #include "base/metrics/histogram_samples.h"
@@ -17,6 +20,7 @@
 #include "base/metrics/statistics_recorder.h"
 #include "base/pickle.h"
 #include "base/strings/stringprintf.h"
+#include "base/values.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
@@ -26,16 +30,23 @@
 // for histogram allocation. False will allocate histograms from the process
 // heap.
 class SparseHistogramTest : public testing::TestWithParam<bool> {
+ public:
+  SparseHistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
+  SparseHistogramTest(const SparseHistogramTest&) = delete;
+  SparseHistogramTest& operator=(const SparseHistogramTest&) = delete;
+
  protected:
   const int32_t kAllocatorMemorySize = 8 << 20;  // 8 MiB
 
-  SparseHistogramTest()
-      : statistics_recorder_(StatisticsRecorder::CreateTemporaryForTesting()),
-        use_persistent_histogram_allocator_(GetParam()) {}
+  using CountAndBucketData = base::SparseHistogram::CountAndBucketData;
 
   void SetUp() override {
     if (use_persistent_histogram_allocator_)
       CreatePersistentMemoryAllocator();
+
+    // Each test will have a clean state (no Histogram / BucketRanges
+    // registered).
+    InitializeStatisticsRecorder();
   }
 
   void TearDown() override {
@@ -43,9 +54,17 @@
       ASSERT_FALSE(allocator_->IsFull());
       ASSERT_FALSE(allocator_->IsCorrupt());
     }
+    UninitializeStatisticsRecorder();
     DestroyPersistentMemoryAllocator();
   }
 
+  void InitializeStatisticsRecorder() {
+    DCHECK(!statistics_recorder_);
+    statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
+  }
+
+  void UninitializeStatisticsRecorder() { statistics_recorder_.reset(); }
+
   void CreatePersistentMemoryAllocator() {
     GlobalHistogramAllocator::CreateWithLocalMemory(
         kAllocatorMemorySize, 0, "SparseHistogramAllocatorTest");
@@ -63,20 +82,22 @@
     return std::unique_ptr<SparseHistogram>(new SparseHistogram(name));
   }
 
+  CountAndBucketData GetCountAndBucketData(SparseHistogram* histogram) {
+    // A simple wrapper around |GetCountAndBucketData| to make it visible for
+    // testing.
+    return histogram->GetCountAndBucketData();
+  }
+
   const bool use_persistent_histogram_allocator_;
 
   std::unique_ptr<StatisticsRecorder> statistics_recorder_;
-  PersistentMemoryAllocator* allocator_ = nullptr;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(SparseHistogramTest);
+  raw_ptr<PersistentMemoryAllocator> allocator_ = nullptr;
 };
 
 // Run all HistogramTest cases with both heap and persistent memory.
-INSTANTIATE_TEST_CASE_P(HeapAndPersistent,
-                        SparseHistogramTest,
-                        testing::Bool());
-
+INSTANTIATE_TEST_SUITE_P(HeapAndPersistent,
+                         SparseHistogramTest,
+                         testing::Bool());
 
 TEST_P(SparseHistogramTest, BasicTest) {
   std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
@@ -116,6 +137,64 @@
   EXPECT_EQ(25, snapshot2->GetCount(101));
 }
 
+// Check that delta calculations work correctly with SnapshotUnloggedSamples()
+// and MarkSamplesAsLogged().
+TEST_P(SparseHistogramTest, UnloggedSamplesTest) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  histogram->AddCount(1, 1);
+  histogram->AddCount(2, 2);
+
+  std::unique_ptr<HistogramSamples> samples =
+      histogram->SnapshotUnloggedSamples();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(2, samples->GetCount(2));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(5, samples->sum());
+
+  // Snapshot unlogged samples again, which would be the same as above.
+  samples = histogram->SnapshotUnloggedSamples();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(2, samples->GetCount(2));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(5, samples->sum());
+
+  // Verify that marking the samples as logged works correctly, and that
+  // SnapshotDelta() will not pick up the samples.
+  histogram->MarkSamplesAsLogged(*samples);
+  samples = histogram->SnapshotUnloggedSamples();
+  EXPECT_EQ(0, samples->TotalCount());
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(0, samples->sum());
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(0, samples->sum());
+
+  // Similarly, verify that SnapshotDelta() marks the samples as logged.
+  histogram->AddCount(1, 1);
+  histogram->AddCount(2, 2);
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(2, samples->GetCount(2));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(5, samples->sum());
+  samples = histogram->SnapshotUnloggedSamples();
+  EXPECT_EQ(0, samples->TotalCount());
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(0, samples->sum());
+
+  // Verify that the logged samples contain everything emitted.
+  samples = histogram->SnapshotSamples();
+  EXPECT_EQ(6, samples->TotalCount());
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+  EXPECT_EQ(2, samples->GetCount(1));
+  EXPECT_EQ(4, samples->GetCount(2));
+  EXPECT_EQ(10, samples->sum());
+}
+
 TEST_P(SparseHistogramTest, AddCount_LargeValuesDontOverflow) {
   std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
   std::unique_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
@@ -155,7 +234,6 @@
   }
 }
 
-#if !defined(STARBOARD)
 TEST_P(SparseHistogramTest, MacroBasicTest) {
   UmaHistogramSparse("Sparse", 100);
   UmaHistogramSparse("Sparse", 200);
@@ -196,8 +274,6 @@
   EXPECT_STREQ(histograms[1]->histogram_name(), "Sparse1");
 }
 
-#endif  // !defined(STARBOARD)
-
 TEST_P(SparseHistogramTest, Serialize) {
   std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
   histogram->SetFlags(HistogramBase::kIPCSerializationSourceFlag);
@@ -292,8 +368,7 @@
   int64_t create_ms = create_ticks.InMilliseconds();
 
   VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
-          << "ms or about "
-          << (create_ms * 1000000) / kTestCreateCount
+          << "ms or about " << (create_ms * 1000000) / kTestCreateCount
           << "ns each.";
 
   // Calculate cost of looking up existing histograms.
@@ -312,8 +387,7 @@
   int64_t lookup_ms = lookup_ticks.InMilliseconds();
 
   VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
-          << "ms or about "
-          << (lookup_ms * 1000000) / kTestLookupCount
+          << "ms or about " << (lookup_ms * 1000000) / kTestLookupCount
           << "ns each.";
 
   // Calculate cost of accessing histograms.
@@ -327,9 +401,7 @@
   int64_t add_ms = add_ticks.InMilliseconds();
 
   VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
-          << "ms or about "
-          << (add_ms * 1000000) / kTestAddCount
-          << "ns each.";
+          << "ms or about " << (add_ms * 1000000) / kTestAddCount << "ns each.";
 }
 
 TEST_P(SparseHistogramTest, ExtremeValues) {
@@ -345,7 +417,7 @@
       {2147483647, 2147483648LL},
   };
 
-  for (size_t i = 0; i < arraysize(cases); ++i) {
+  for (size_t i = 0; i < std::size(cases); ++i) {
     HistogramBase* histogram =
         SparseHistogram::FactoryGet(StringPrintf("ExtremeValues_%zu", i),
                                     HistogramBase::kUmaTargetedHistogramFlag);
@@ -376,4 +448,71 @@
   EXPECT_EQ(histogram->name_hash(), HashMetricName(kName));
 }
 
+TEST_P(SparseHistogramTest, CheckGetCountAndBucketData) {
+  std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  // Add samples in reverse order and make sure the output is in correct order.
+  histogram->AddCount(/*sample=*/200, /*count=*/15);
+  histogram->AddCount(/*sample=*/100, /*count=*/5);
+  // Add samples to the same bucket and make sure they'll be aggregated.
+  histogram->AddCount(/*sample=*/100, /*count=*/5);
+
+  const CountAndBucketData count_and_data_bucket =
+      GetCountAndBucketData(histogram.get());
+  EXPECT_EQ(25, count_and_data_bucket.count);
+  EXPECT_EQ(4000, count_and_data_bucket.sum);
+
+  const base::Value::List& buckets_list = count_and_data_bucket.buckets;
+  ASSERT_EQ(2u, buckets_list.size());
+
+  // Check the first bucket.
+  const base::Value::Dict* bucket1 = buckets_list[0].GetIfDict();
+  ASSERT_TRUE(bucket1 != nullptr);
+  EXPECT_EQ(bucket1->FindInt("low"), absl::optional<int>(100));
+  EXPECT_EQ(bucket1->FindInt("high"), absl::optional<int>(101));
+  EXPECT_EQ(bucket1->FindInt("count"), absl::optional<int>(10));
+
+  // Check the second bucket.
+  const base::Value::Dict* bucket2 = buckets_list[1].GetIfDict();
+  ASSERT_TRUE(bucket2 != nullptr);
+  EXPECT_EQ(bucket2->FindInt("low"), absl::optional<int>(200));
+  EXPECT_EQ(bucket2->FindInt("high"), absl::optional<int>(201));
+  EXPECT_EQ(bucket2->FindInt("count"), absl::optional<int>(15));
+}
+
+TEST_P(SparseHistogramTest, WriteAscii) {
+  HistogramBase* histogram =
+      SparseHistogram::FactoryGet("AsciiOut", HistogramBase::kNoFlags);
+  histogram->AddCount(/*sample=*/4, /*count=*/5);
+  histogram->AddCount(/*sample=*/10, /*count=*/15);
+
+  std::string output;
+  histogram->WriteAscii(&output);
+
+  const char kOutputFormatRe[] =
+      R"(Histogram: AsciiOut recorded 20 samples.*\n)"
+      R"(4   -+O +\(5 = 25.0%\)\n)"
+      R"(10  -+O +\(15 = 75.0%\)\n)";
+
+  EXPECT_THAT(output, testing::MatchesRegex(kOutputFormatRe));
+}
+
+TEST_P(SparseHistogramTest, ToGraphDict) {
+  HistogramBase* histogram =
+      SparseHistogram::FactoryGet("HTMLOut", HistogramBase::kNoFlags);
+  histogram->AddCount(/*sample=*/4, /*count=*/5);
+  histogram->AddCount(/*sample=*/10, /*count=*/15);
+
+  base::Value::Dict output = histogram->ToGraphDict();
+  std::string* header = output.FindString("header");
+  std::string* body = output.FindString("body");
+
+  const char kOutputHeaderFormatRe[] =
+      R"(Histogram: HTMLOut recorded 20 samples.*)";
+  const char kOutputBodyFormatRe[] = R"(4   -+O +\(5 = 25.0%\)\n)"
+                                     R"(10  -+O +\(15 = 75.0%\)\n)";
+
+  EXPECT_THAT(*header, testing::MatchesRegex(kOutputHeaderFormatRe));
+  EXPECT_THAT(*body, testing::MatchesRegex(kOutputBodyFormatRe));
+}
+
 }  // namespace base
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
index fdb95b9..002c1dc 100644
--- a/base/metrics/statistics_recorder.cc
+++ b/base/metrics/statistics_recorder.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -7,6 +7,7 @@
 #include <memory>
 
 #include "base/at_exit.h"
+#include "base/containers/contains.h"
 #include "base/debug/leak_annotations.h"
 #include "base/json/string_escape.h"
 #include "base/logging.h"
@@ -16,7 +17,8 @@
 #include "base/metrics/metrics_hashes.h"
 #include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/record_histogram_checker.h"
-#include "base/stl_util.h"
+#include "base/ranges/algorithm.h"
+#include "base/strings/string_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/values.h"
 
@@ -31,7 +33,15 @@
 }  // namespace
 
 // static
-LazyInstance<Lock>::Leaky StatisticsRecorder::lock_;
+LazyInstance<Lock>::Leaky StatisticsRecorder::lock_ = LAZY_INSTANCE_INITIALIZER;
+
+// static
+LazyInstance<base::Lock>::Leaky StatisticsRecorder::snapshot_lock_ =
+    LAZY_INSTANCE_INITIALIZER;
+
+// static
+StatisticsRecorder::SnapshotTransactionId
+    StatisticsRecorder::last_snapshot_transaction_id_ = 0;
 
 // static
 StatisticsRecorder* StatisticsRecorder::top_ = nullptr;
@@ -39,29 +49,36 @@
 // static
 bool StatisticsRecorder::is_vlog_initialized_ = false;
 
-size_t StatisticsRecorder::BucketRangesHash::operator()(
-    const BucketRanges* const a) const {
-  return a->checksum();
+// static
+std::atomic<bool> StatisticsRecorder::have_active_callbacks_{false};
+
+// static
+std::atomic<StatisticsRecorder::GlobalSampleCallback>
+    StatisticsRecorder::global_sample_callback_{nullptr};
+
+StatisticsRecorder::ScopedHistogramSampleObserver::
+    ScopedHistogramSampleObserver(const std::string& name,
+                                  OnSampleCallback callback)
+    : histogram_name_(name), callback_(callback) {
+  StatisticsRecorder::AddHistogramSampleObserver(histogram_name_, this);
 }
 
-bool StatisticsRecorder::BucketRangesEqual::operator()(
-    const BucketRanges* const a,
-    const BucketRanges* const b) const {
-  return a->Equals(b);
+StatisticsRecorder::ScopedHistogramSampleObserver::
+    ~ScopedHistogramSampleObserver() {
+  StatisticsRecorder::RemoveHistogramSampleObserver(histogram_name_, this);
+}
+
+void StatisticsRecorder::ScopedHistogramSampleObserver::RunCallback(
+    const char* histogram_name,
+    uint64_t name_hash,
+    HistogramBase::Sample sample) {
+  callback_.Run(histogram_name, name_hash, sample);
 }
 
 StatisticsRecorder::~StatisticsRecorder() {
   const AutoLock auto_lock(lock_.Get());
   DCHECK_EQ(this, top_);
-  DCHECK_NE(this, previous_);
   top_ = previous_;
-  // previous_ is only used for testing purpose to create temporary clean
-  // environment, sometimes multiple temporary environment can be messy and
-  // we want to make sure at least the last temporary StatisticsRecorder clears
-  // the static StatisticsRecorder's previous_.
-  if (top_ && top_->previous_) {
-    top_->previous_ = nullptr;
-  }
 }
 
 // static
@@ -102,13 +119,9 @@
     ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
     // If there are callbacks for this histogram, we set the kCallbackExists
     // flag.
-    const auto callback_iterator = top_->callbacks_.find(name);
-    if (callback_iterator != top_->callbacks_.end()) {
-      if (!callback_iterator->second.is_null())
-        histogram->SetFlags(HistogramBase::kCallbackExists);
-      else
-        histogram->ClearFlags(HistogramBase::kCallbackExists);
-    }
+    if (base::Contains(top_->observers_, name))
+      histogram->SetFlags(HistogramBase::kCallbackExists);
+
     return histogram;
   }
 
@@ -125,34 +138,19 @@
 // static
 const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
     const BucketRanges* ranges) {
-  DCHECK(ranges->HasValidChecksum());
-
-  // Declared before |auto_lock| to ensure correct destruction order.
-  std::unique_ptr<const BucketRanges> ranges_deleter;
   const AutoLock auto_lock(lock_.Get());
   EnsureGlobalRecorderWhileLocked();
 
-  const BucketRanges* const registered = *top_->ranges_.insert(ranges).first;
-  if (registered == ranges) {
+  const BucketRanges* const registered =
+      top_->ranges_manager_.RegisterOrDeleteDuplicateRanges(ranges);
+
+  if (registered == ranges)
     ANNOTATE_LEAKING_OBJECT_PTR(ranges);
-  } else {
-    ranges_deleter.reset(ranges);
-  }
 
   return registered;
 }
 
 // static
-void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
-                                        std::string* output) {
-  for (const HistogramBase* const histogram :
-       Sort(WithName(GetHistograms(), query))) {
-    histogram->WriteHTMLGraph(output);
-    *output += "<br><hr><br>";
-  }
-}
-
-// static
 void StatisticsRecorder::WriteGraph(const std::string& query,
                                     std::string* output) {
   if (query.length())
@@ -184,12 +182,10 @@
 
 // static
 std::vector<const BucketRanges*> StatisticsRecorder::GetBucketRanges() {
-  std::vector<const BucketRanges*> out;
   const AutoLock auto_lock(lock_.Get());
   EnsureGlobalRecorderWhileLocked();
-  out.reserve(top_->ranges_.size());
-  out.assign(top_->ranges_.begin(), top_->ranges_.end());
-  return out;
+
+  return top_->ranges_manager_.GetBucketRanges();
 }
 
 // static
@@ -226,16 +222,35 @@
 }
 
 // static
-void StatisticsRecorder::PrepareDeltas(
+StatisticsRecorder::SnapshotTransactionId StatisticsRecorder::PrepareDeltas(
     bool include_persistent,
     HistogramBase::Flags flags_to_set,
     HistogramBase::Flags required_flags,
     HistogramSnapshotManager* snapshot_manager) {
-  Histograms histograms = GetHistograms();
-  if (!include_persistent)
-    histograms = NonPersistent(std::move(histograms));
-  snapshot_manager->PrepareDeltas(Sort(std::move(histograms)), flags_to_set,
+  Histograms histograms = Sort(GetHistograms(include_persistent));
+  base::AutoLock lock(snapshot_lock_.Get());
+  snapshot_manager->PrepareDeltas(std::move(histograms), flags_to_set,
                                   required_flags);
+  return ++last_snapshot_transaction_id_;
+}
+
+// static
+StatisticsRecorder::SnapshotTransactionId
+StatisticsRecorder::SnapshotUnloggedSamples(
+    HistogramBase::Flags required_flags,
+    HistogramSnapshotManager* snapshot_manager) {
+  Histograms histograms = Sort(GetHistograms());
+  base::AutoLock lock(snapshot_lock_.Get());
+  snapshot_manager->SnapshotUnloggedSamples(std::move(histograms),
+                                            required_flags);
+  return ++last_snapshot_transaction_id_;
+}
+
+// static
+StatisticsRecorder::SnapshotTransactionId
+StatisticsRecorder::GetLastSnapshotTransactionId() {
+  base::AutoLock lock(snapshot_lock_.Get());
+  return last_snapshot_transaction_id_;
 }
 
 // static
@@ -245,43 +260,88 @@
 }
 
 // static
-bool StatisticsRecorder::SetCallback(
+void StatisticsRecorder::AddHistogramSampleObserver(
     const std::string& name,
-    const StatisticsRecorder::OnSampleCallback& cb) {
-  DCHECK(!cb.is_null());
+    StatisticsRecorder::ScopedHistogramSampleObserver* observer) {
+  DCHECK(observer);
   const AutoLock auto_lock(lock_.Get());
   EnsureGlobalRecorderWhileLocked();
 
-  if (!top_->callbacks_.insert({name, cb}).second)
-    return false;
+  auto iter = top_->observers_.find(name);
+  if (iter == top_->observers_.end()) {
+    top_->observers_.insert(
+        {name, base::MakeRefCounted<HistogramSampleObserverList>()});
+  }
+
+  top_->observers_[name]->AddObserver(observer);
 
   const HistogramMap::const_iterator it = top_->histograms_.find(name);
   if (it != top_->histograms_.end())
     it->second->SetFlags(HistogramBase::kCallbackExists);
 
-  return true;
+  have_active_callbacks_.store(
+      global_sample_callback() || !top_->observers_.empty(),
+      std::memory_order_relaxed);
 }
 
 // static
-void StatisticsRecorder::ClearCallback(const std::string& name) {
+void StatisticsRecorder::RemoveHistogramSampleObserver(
+    const std::string& name,
+    StatisticsRecorder::ScopedHistogramSampleObserver* observer) {
   const AutoLock auto_lock(lock_.Get());
   EnsureGlobalRecorderWhileLocked();
 
-  top_->callbacks_.erase(name);
+  auto iter = top_->observers_.find(name);
+  DCHECK(iter != top_->observers_.end());
 
-  // We also clear the flag from the histogram (if it exists).
-  const HistogramMap::const_iterator it = top_->histograms_.find(name);
-  if (it != top_->histograms_.end())
-    it->second->ClearFlags(HistogramBase::kCallbackExists);
+  auto result = iter->second->RemoveObserver(observer);
+  if (result ==
+      HistogramSampleObserverList::RemoveObserverResult::kWasOrBecameEmpty) {
+    top_->observers_.erase(name);
+
+    // We also clear the flag from the histogram (if it exists).
+    const HistogramMap::const_iterator it = top_->histograms_.find(name);
+    if (it != top_->histograms_.end())
+      it->second->ClearFlags(HistogramBase::kCallbackExists);
+  }
+
+  have_active_callbacks_.store(
+      global_sample_callback() || !top_->observers_.empty(),
+      std::memory_order_relaxed);
 }
 
 // static
-StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
-    const std::string& name) {
+void StatisticsRecorder::FindAndRunHistogramCallbacks(
+    base::PassKey<HistogramBase>,
+    const char* histogram_name,
+    uint64_t name_hash,
+    HistogramBase::Sample sample) {
   const AutoLock auto_lock(lock_.Get());
   EnsureGlobalRecorderWhileLocked();
-  const auto it = top_->callbacks_.find(name);
-  return it != top_->callbacks_.end() ? it->second : OnSampleCallback();
+
+  auto it = top_->observers_.find(histogram_name);
+
+  // Ensure that this observer is still registered, as it might have been
+  // unregistered before we acquired the lock.
+  if (it == top_->observers_.end())
+    return;
+
+  it->second->Notify(FROM_HERE, &ScopedHistogramSampleObserver::RunCallback,
+                     histogram_name, name_hash, sample);
+}
+
+// static
+void StatisticsRecorder::SetGlobalSampleCallback(
+    const GlobalSampleCallback& new_global_sample_callback) {
+  const AutoLock auto_lock(lock_.Get());
+  EnsureGlobalRecorderWhileLocked();
+
+  DCHECK(!global_sample_callback() || !new_global_sample_callback);
+  global_sample_callback_.store(new_global_sample_callback);
+
+  have_active_callbacks_.store(
+      new_global_sample_callback || !top_->observers_.empty(),
+      std::memory_order_relaxed);
 }
 
 // static
@@ -316,7 +376,11 @@
 std::unique_ptr<StatisticsRecorder>
 StatisticsRecorder::CreateTemporaryForTesting() {
   const AutoLock auto_lock(lock_.Get());
-  return WrapUnique(new StatisticsRecorder());
+  std::unique_ptr<StatisticsRecorder> temporary_recorder =
+      WrapUnique(new StatisticsRecorder());
+  temporary_recorder->ranges_manager_
+      .DoNotReleaseRangesOnDestroyForTesting();  // IN-TEST
+  return temporary_recorder;
 }
 
 // static
@@ -328,7 +392,7 @@
 }
 
 // static
-bool StatisticsRecorder::ShouldRecordHistogram(uint64_t histogram_hash) {
+bool StatisticsRecorder::ShouldRecordHistogram(uint32_t histogram_hash) {
   const AutoLock auto_lock(lock_.Get());
   EnsureGlobalRecorderWhileLocked();
   return !top_->record_checker_ ||
@@ -336,7 +400,8 @@
 }
 
 // static
-StatisticsRecorder::Histograms StatisticsRecorder::GetHistograms() {
+StatisticsRecorder::Histograms StatisticsRecorder::GetHistograms(
+    bool include_persistent) {
   // This must be called *before* the lock is acquired below because it will
   // call back into this object to register histograms. Those called methods
   // will acquire the lock at that time.
@@ -348,41 +413,47 @@
   EnsureGlobalRecorderWhileLocked();
 
   out.reserve(top_->histograms_.size());
-  for (const auto& entry : top_->histograms_)
+  for (const auto& entry : top_->histograms_) {
+    bool is_persistent = entry.second->HasFlags(HistogramBase::kIsPersistent);
+    if (!include_persistent && is_persistent)
+      continue;
     out.push_back(entry.second);
+  }
 
   return out;
 }
 
 // static
 StatisticsRecorder::Histograms StatisticsRecorder::Sort(Histograms histograms) {
-  std::sort(histograms.begin(), histograms.end(), &HistogramNameLesser);
+  ranges::sort(histograms, &HistogramNameLesser);
   return histograms;
 }
 
 // static
 StatisticsRecorder::Histograms StatisticsRecorder::WithName(
     Histograms histograms,
-    const std::string& query) {
+    const std::string& query,
+    bool case_sensitive) {
   // Need a C-string query for comparisons against C-string histogram name.
-  const char* const query_string = query.c_str();
-  histograms.erase(std::remove_if(histograms.begin(), histograms.end(),
-                                  [query_string](const HistogramBase* const h) {
-                                    return !strstr(h->histogram_name(),
-                                                   query_string);
-                                  }),
-                   histograms.end());
-  return histograms;
-}
+  std::string lowercase_query;
+  const char* query_string;
+  if (case_sensitive) {
+    query_string = query.c_str();
+  } else {
+    lowercase_query = base::ToLowerASCII(query);
+    query_string = lowercase_query.c_str();
+  }
 
-// static
-StatisticsRecorder::Histograms StatisticsRecorder::NonPersistent(
-    Histograms histograms) {
   histograms.erase(
-      std::remove_if(histograms.begin(), histograms.end(),
-                     [](const HistogramBase* const h) {
-                       return (h->flags() & HistogramBase::kIsPersistent) != 0;
-                     }),
+      ranges::remove_if(
+          histograms,
+          [query_string, case_sensitive](const HistogramBase* const h) {
+            return !strstr(
+                case_sensitive
+                    ? h->histogram_name()
+                    : base::ToLowerASCII(h->histogram_name()).c_str(),
+                query_string);
+          }),
       histograms.end());
   return histograms;
 }
@@ -397,9 +468,6 @@
     allocator->ImportHistogramsToStatisticsRecorder();
 }
 
-// This singleton instance should be started during the single threaded portion
-// of main(), and hence it is not thread safe. It initializes globals to provide
-// support for all future calls.
 StatisticsRecorder::StatisticsRecorder() {
   lock_.Get().AssertAcquired();
   previous_ = top_;
diff --git a/base/metrics/statistics_recorder.h b/base/metrics/statistics_recorder.h
index 240fcb7..d08ae00 100644
--- a/base/metrics/statistics_recorder.h
+++ b/base/metrics/statistics_recorder.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -10,6 +10,9 @@
 #ifndef BASE_METRICS_STATISTICS_RECORDER_H_
 #define BASE_METRICS_STATISTICS_RECORDER_H_
 
+#include <stdint.h>
+
+#include <atomic>
 #include <memory>
 #include <string>
 #include <unordered_map>
@@ -17,16 +20,19 @@
 #include <vector>
 
 #include "base/base_export.h"
-#include "base/callback.h"
+#include "base/functional/callback.h"
 #include "base/gtest_prod_util.h"
 #include "base/lazy_instance.h"
-#include "base/macros.h"
+#include "base/memory/raw_ptr.h"
 #include "base/memory/weak_ptr.h"
 #include "base/metrics/histogram_base.h"
+#include "base/metrics/ranges_manager.h"
 #include "base/metrics/record_histogram_checker.h"
+#include "base/observer_list_threadsafe.h"
 #include "base/strings/string_piece.h"
 #include "base/synchronization/lock.h"
-#include "starboard/types.h"
+#include "base/thread_annotations.h"
+#include "base/types/pass_key.h"
 
 namespace base {
 
@@ -37,7 +43,7 @@
 //
 // All the public methods are static and act on a global recorder. This global
 // recorder is internally synchronized and all the static methods are thread
-// safe.
+// safe. This is intended to only be run/used in the browser process.
 //
 // StatisticsRecorder doesn't have any public constructor. For testing purpose,
 // you can create a temporary recorder using the factory method
@@ -54,7 +60,49 @@
     virtual void MergeHistogramDeltas() = 0;
   };
 
+  // OnSampleCallback is a convenient callback type that provides information
+  // about a histogram sample. This is used in conjunction with
+  // ScopedHistogramSampleObserver to get notified when a sample is collected.
+  using OnSampleCallback =
+      base::RepeatingCallback<void(const char* /*=histogram_name*/,
+                                   uint64_t /*=name_hash*/,
+                                   HistogramBase::Sample)>;
+
+  // An observer that gets notified whenever a new sample is recorded for a
+  // particular histogram. Clients only need to construct it with the histogram
+  // name and the callback to be invoked. The class starts observing on
+  // construction and removes itself from the observer list on destruction. The
+  // clients are always notified on the same sequence in which they were
+  // registered.
+  class BASE_EXPORT ScopedHistogramSampleObserver {
+   public:
+    // Constructor. Called with the desired histogram name and the callback to
+    // be invoked when a sample is recorded.
+    explicit ScopedHistogramSampleObserver(const std::string& histogram_name,
+                                           OnSampleCallback callback);
+    ~ScopedHistogramSampleObserver();
+
+   private:
+    friend class StatisticsRecorder;
+
+    // Runs the callback.
+    void RunCallback(const char* histogram_name,
+                     uint64_t name_hash,
+                     HistogramBase::Sample sample);
+
+    // The name of the histogram to observe.
+    const std::string histogram_name_;
+
+    // The client supplied callback that is invoked when the histogram sample is
+    // collected.
+    const OnSampleCallback callback_;
+  };
+
   typedef std::vector<HistogramBase*> Histograms;
+  typedef size_t SnapshotTransactionId;
+
+  StatisticsRecorder(const StatisticsRecorder&) = delete;
+  StatisticsRecorder& operator=(const StatisticsRecorder&) = delete;
 
   // Restores the previous global recorder.
   //
@@ -92,12 +140,11 @@
   static const BucketRanges* RegisterOrDeleteDuplicateRanges(
       const BucketRanges* ranges);
 
-  // Methods for appending histogram data to a string.  Only histograms which
+  // A method for appending histogram data to a string. Only histograms which
   // have |query| as a substring are written to |output| (an empty string will
   // process all registered histograms).
   //
-  // These methods are thread safe.
-  static void WriteHTMLGraph(const std::string& query, std::string* output);
+  // This method is thread safe.
   static void WriteGraph(const std::string& query, std::string* output);
 
   // Returns the histograms with |verbosity_level| as the serialization
@@ -106,14 +153,16 @@
   // This method is thread safe.
   static std::string ToJSON(JSONVerbosityLevel verbosity_level);
 
-  // Gets existing histograms.
+  // Gets existing histograms. |include_persistent| determines whether
+  // histograms held in persistent storage are included.
   //
   // The order of returned histograms is not guaranteed.
   //
   // Ownership of the individual histograms remains with the StatisticsRecorder.
   //
   // This method is thread safe.
-  static Histograms GetHistograms();
+  static Histograms GetHistograms(bool include_persistent = true)
+      LOCKS_EXCLUDED(lock_.Pointer());
 
   // Gets BucketRanges used by all histograms registered. The order of returned
   // BucketRanges is not guaranteed.
@@ -132,36 +181,47 @@
   // This method must be called on the UI thread.
   static void ImportProvidedHistograms();
 
-  // Snapshots all histograms via |snapshot_manager|. |flags_to_set| is used to
-  // set flags for each histogram. |required_flags| is used to select
-  // histograms to be recorded. Only histograms that have all the flags
-  // specified by the argument will be chosen. If all histograms should be
-  // recorded, set it to |Histogram::kNoFlags|.
-  static void PrepareDeltas(bool include_persistent,
-                            HistogramBase::Flags flags_to_set,
-                            HistogramBase::Flags required_flags,
-                            HistogramSnapshotManager* snapshot_manager);
+  // Snapshots all histogram deltas via |snapshot_manager|. This marks the
+  // deltas as logged. |include_persistent| determines whether histograms held
+  // in persistent storage are snapshotted. |flags_to_set| is used to set flags
+  // for each histogram. |required_flags| is used to select which histograms to
+  // record. Only histograms with all required flags are selected. If all
+  // histograms should be recorded, use |Histogram::kNoFlags| as the required
+  // flag. This is logically equivalent to calling SnapshotUnloggedSamples()
+  // followed by HistogramSnapshotManager::MarkUnloggedSamplesAsLogged() on
+  // |snapshot_manager|. Returns the snapshot transaction ID associated with
+  // this operation. Thread-safe.
+  static SnapshotTransactionId PrepareDeltas(
+      bool include_persistent,
+      HistogramBase::Flags flags_to_set,
+      HistogramBase::Flags required_flags,
+      HistogramSnapshotManager* snapshot_manager)
+      LOCKS_EXCLUDED(snapshot_lock_.Pointer());
 
-  typedef base::Callback<void(HistogramBase::Sample)> OnSampleCallback;
+  // Same as PrepareDeltas() above, but the samples are not marked as logged.
+  // This includes persistent histograms, and no flags will be set. A call to
+  // HistogramSnapshotManager::MarkUnloggedSamplesAsLogged() on the passed
+  // |snapshot_manager| should be made to mark them as logged. Returns the
+  // snapshot transaction ID associated with this operation. Thread-safe.
+  static SnapshotTransactionId SnapshotUnloggedSamples(
+      HistogramBase::Flags required_flags,
+      HistogramSnapshotManager* snapshot_manager)
+      LOCKS_EXCLUDED(snapshot_lock_.Pointer());
 
-  // Sets the callback to notify when a new sample is recorded on the histogram
-  // referred to by |histogram_name|. Can be called before or after the
-  // histogram is created. Returns whether the callback was successfully set.
+  // Returns the transaction ID of the last snapshot performed (either through
+  // PrepareDeltas() or SnapshotUnloggedSamples()). Returns 0 if a snapshot was
+  // never taken so far. Thread-safe.
+  static SnapshotTransactionId GetLastSnapshotTransactionId()
+      LOCKS_EXCLUDED(snapshot_lock_.Pointer());
+
+  // Retrieves and runs the list of callbacks for the histogram referred to by
+  // |histogram_name|, if any.
   //
   // This method is thread safe.
-  static bool SetCallback(const std::string& histogram_name,
-                          const OnSampleCallback& callback);
-
-  // Clears any callback set on the histogram referred to by |histogram_name|.
-  //
-  // This method is thread safe.
-  static void ClearCallback(const std::string& histogram_name);
-
-  // Retrieves the callback for the histogram referred to by |histogram_name|,
-  // or a null callback if no callback exists for this histogram.
-  //
-  // This method is thread safe.
-  static OnSampleCallback FindCallback(const std::string& histogram_name);
+  static void FindAndRunHistogramCallbacks(base::PassKey<HistogramBase>,
+                                           const char* histogram_name,
+                                           uint64_t name_hash,
+                                           HistogramBase::Sample sample);
 
   // Returns the number of known histograms.
   //
@@ -191,8 +251,8 @@
   // they're created.
   //
   // This method is thread safe.
-  static std::unique_ptr<StatisticsRecorder> CreateTemporaryForTesting()
-      WARN_UNUSED_RESULT;
+  [[nodiscard]] static std::unique_ptr<StatisticsRecorder>
+  CreateTemporaryForTesting();
 
   // Sets the record checker for determining if a histogram should be recorded.
   // Record checker doesn't affect any already recorded histograms, so this
@@ -205,41 +265,74 @@
   // Checks if the given histogram should be recorded based on the
   // ShouldRecord() method of the record checker. If the record checker is not
   // set, returns true.
+  // |histogram_hash| corresponds to the result of HashMetricNameAs32Bits().
   //
   // This method is thread safe.
-  static bool ShouldRecordHistogram(uint64_t histogram_hash);
+  static bool ShouldRecordHistogram(uint32_t histogram_hash);
 
   // Sorts histograms by name.
   static Histograms Sort(Histograms histograms);
 
   // Filters histograms by name. Only histograms which have |query| as a
   // substring in their name are kept. An empty query keeps all histograms.
-  static Histograms WithName(Histograms histograms, const std::string& query);
+  // |case_sensitive| determines whether the matching should be done in a
+  // case sensitive way.
+  static Histograms WithName(Histograms histograms,
+                             const std::string& query,
+                             bool case_sensitive = true);
 
-  // Filters histograms by persistency. Only non-persistent histograms are kept.
-  static Histograms NonPersistent(Histograms histograms);
+  using GlobalSampleCallback = void (*)(const char* /*=histogram_name*/,
+                                        uint64_t /*=name_hash*/,
+                                        HistogramBase::Sample);
+  // Installs a global callback which will be called for every added
+  // histogram sample. The given callback is a raw function pointer in order
+  // to be accessed lock-free and can be called on any thread.
+  static void SetGlobalSampleCallback(
+      const GlobalSampleCallback& global_sample_callback);
+
+  // Returns the global callback, if any, that should be called every time a
+  // histogram sample is added.
+  static GlobalSampleCallback global_sample_callback() {
+    return global_sample_callback_.load(std::memory_order_relaxed);
+  }
+
+  // Returns whether there's either a global histogram callback set,
+  // or if any individual histograms have callbacks set. Used for early return
+  // when histogram samples are added.
+  static bool have_active_callbacks() {
+    return have_active_callbacks_.load(std::memory_order_relaxed);
+  }
 
  private:
+  // Adds an observer to be notified when a new sample is recorded on the
+  // histogram referred to by |histogram_name|. Can be called before or after
+  // the histogram is created.
+  //
+  // This method is thread safe.
+  static void AddHistogramSampleObserver(
+      const std::string& histogram_name,
+      ScopedHistogramSampleObserver* observer);
+
+  // Clears the given |observer| set on the histogram referred to by
+  // |histogram_name|.
+  //
+  // This method is thread safe.
+  static void RemoveHistogramSampleObserver(
+      const std::string& histogram_name,
+      ScopedHistogramSampleObserver* observer);
+
   typedef std::vector<WeakPtr<HistogramProvider>> HistogramProviders;
 
   typedef std::unordered_map<StringPiece, HistogramBase*, StringPieceHash>
       HistogramMap;
 
-  // We keep a map of callbacks to histograms, so that as histograms are
-  // created, we can set the callback properly.
-  typedef std::unordered_map<std::string, OnSampleCallback> CallbackMap;
-
-  struct BucketRangesHash {
-    size_t operator()(const BucketRanges* a) const;
-  };
-
-  struct BucketRangesEqual {
-    bool operator()(const BucketRanges* a, const BucketRanges* b) const;
-  };
-
-  typedef std::
-      unordered_set<const BucketRanges*, BucketRangesHash, BucketRangesEqual>
-          RangesMap;
+  // A map of histogram name to registered observers. If the histogram isn't
+  // created yet, the observers will be added after creation.
+  using HistogramSampleObserverList =
+      base::ObserverListThreadSafe<ScopedHistogramSampleObserver>;
+  typedef std::unordered_map<std::string,
+                             scoped_refptr<HistogramSampleObserverList>>
+      ObserverMap;
 
   friend class StatisticsRecorderTest;
   FRIEND_TEST_ALL_PREFIXES(StatisticsRecorderTest, IterationTest);
@@ -263,6 +356,10 @@
   // Constructs a new StatisticsRecorder and sets it as the current global
   // recorder.
   //
+  // This singleton instance should be started during the single-threaded
+  // portion of startup and hence it is not thread safe. It initializes globals
+  // to provide support for all future calls.
+  //
   // Precondition: The global lock is already acquired.
   StatisticsRecorder();
 
@@ -273,17 +370,26 @@
   static void InitLogOnShutdownWhileLocked();
 
   HistogramMap histograms_;
-  CallbackMap callbacks_;
-  RangesMap ranges_;
+  ObserverMap observers_;
   HistogramProviders providers_;
+  RangesManager ranges_manager_;
   std::unique_ptr<RecordHistogramChecker> record_checker_;
 
   // Previous global recorder that existed when this one was created.
-  StatisticsRecorder* previous_ = nullptr;
+  raw_ptr<StatisticsRecorder> previous_ = nullptr;
 
   // Global lock for internal synchronization.
   static LazyInstance<Lock>::Leaky lock_;
 
+  // Global lock for internal synchronization of histogram snapshots.
+  static LazyInstance<base::Lock>::Leaky snapshot_lock_;
+
+  // A strictly increasing number that is incremented every time a snapshot is
+  // taken (by either calling SnapshotUnloggedSamples() or PrepareDeltas()).
+  // This represents the transaction ID of the last snapshot taken.
+  static SnapshotTransactionId last_snapshot_transaction_id_
+      GUARDED_BY(snapshot_lock_.Get());
+
   // Current global recorder. This recorder is used by static methods. When a
   // new global recorder is created by CreateTemporaryForTesting(), then the
   // previous global recorder is referenced by top_->previous_.
@@ -293,7 +399,12 @@
   // function that will be called when the program finishes.
   static bool is_vlog_initialized_;
 
-  DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
+  // Track whether there are active histogram callbacks present.
+  static std::atomic<bool> have_active_callbacks_;
+
+  // Stores a raw callback which should be called on any every histogram sample
+  // which gets added.
+  static std::atomic<GlobalSampleCallback> global_sample_callback_;
 };
 
 }  // namespace base
diff --git a/base/metrics/statistics_recorder_unittest.cc b/base/metrics/statistics_recorder_unittest.cc
index 7937b71..d2bbc97 100644
--- a/base/metrics/statistics_recorder_unittest.cc
+++ b/base/metrics/statistics_recorder_unittest.cc
@@ -1,26 +1,30 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/statistics_recorder.h"
 
+#include <stddef.h>
+
 #include <memory>
 #include <utility>
 #include <vector>
 
-#include "base/bind.h"
+#include "base/functional/bind.h"
 #include "base/json/json_reader.h"
 #include "base/logging.h"
 #include "base/memory/weak_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_macros.h"
+#include "base/metrics/metrics_hashes.h"
 #include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/record_histogram_checker.h"
 #include "base/metrics/sparse_histogram.h"
+#include "base/test/task_environment.h"
 #include "base/values.h"
-#include "starboard/types.h"
 #include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
 
 namespace {
 
@@ -28,14 +32,13 @@
 // contained (i.e., do not affect other unit tests).
 class LogStateSaver {
  public:
-  LogStateSaver() : old_min_log_level_(logging::GetMinLogLevel()) {}
-
+  LogStateSaver() = default;
+  LogStateSaver(const LogStateSaver&) = delete;
+  LogStateSaver& operator=(const LogStateSaver&) = delete;
   ~LogStateSaver() { logging::SetMinLogLevel(old_min_log_level_); }
 
  private:
-  int old_min_log_level_;
-
-  DISALLOW_COPY_AND_ASSIGN(LogStateSaver);
+  int old_min_log_level_ = logging::GetMinLogLevel();
 };
 
 // Test implementation of RecordHistogramChecker interface.
@@ -44,7 +47,7 @@
   ~OddRecordHistogramChecker() override = default;
 
   // base::RecordHistogramChecker:
-  bool ShouldRecord(uint64_t histogram_hash) const override {
+  bool ShouldRecord(uint32_t histogram_hash) const override {
     return histogram_hash % 2;
   }
 };
@@ -58,6 +61,10 @@
 using testing::UnorderedElementsAre;
 
 class StatisticsRecorderTest : public testing::TestWithParam<bool> {
+ public:
+  StatisticsRecorderTest(const StatisticsRecorderTest&) = delete;
+  StatisticsRecorderTest& operator=(const StatisticsRecorderTest&) = delete;
+
  protected:
   const int32_t kAllocatorMemorySize = 64 << 10;  // 64 KiB
 
@@ -75,6 +82,7 @@
 
   ~StatisticsRecorderTest() override {
     GlobalHistogramAllocator::ReleaseForTesting();
+    UninitializeStatisticsRecorder();
   }
 
   void InitializeStatisticsRecorder() {
@@ -100,7 +108,7 @@
     Histogram::InitializeBucketRanges(min, max, ranges);
     const BucketRanges* registered_ranges =
         StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
-    return new Histogram(name, min, max, registered_ranges);
+    return new Histogram(name, registered_ranges);
   }
 
   void InitLogOnShutdown() { StatisticsRecorder::InitLogOnShutdown(); }
@@ -119,12 +127,10 @@
 
  private:
   LogStateSaver log_state_saver_;
-
-  DISALLOW_COPY_AND_ASSIGN(StatisticsRecorderTest);
 };
 
 // Run all HistogramTest cases with both heap and persistent memory.
-INSTANTIATE_TEST_CASE_P(Allocator, StatisticsRecorderTest, testing::Bool());
+INSTANTIATE_TEST_SUITE_P(Allocator, StatisticsRecorderTest, testing::Bool());
 
 TEST_P(StatisticsRecorderTest, NotInitialized) {
   UninitializeStatisticsRecorder();
@@ -150,43 +156,6 @@
               UnorderedElementsAre(ranges));
 }
 
-TEST_P(StatisticsRecorderTest, RegisterBucketRanges) {
-  std::vector<const BucketRanges*> registered_ranges;
-
-  BucketRanges* ranges1 = new BucketRanges(3);
-  ranges1->ResetChecksum();
-  BucketRanges* ranges2 = new BucketRanges(4);
-  ranges2->ResetChecksum();
-
-  // Register new ranges.
-  EXPECT_EQ(ranges1,
-            StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges1));
-  EXPECT_EQ(ranges2,
-            StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges2));
-  EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
-              UnorderedElementsAre(ranges1, ranges2));
-
-  // Register some ranges again.
-  EXPECT_EQ(ranges1,
-            StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges1));
-  EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
-              UnorderedElementsAre(ranges1, ranges2));
-
-  // Make sure the ranges is still the one we know.
-  ASSERT_EQ(3u, ranges1->size());
-  EXPECT_EQ(0, ranges1->range(0));
-  EXPECT_EQ(0, ranges1->range(1));
-  EXPECT_EQ(0, ranges1->range(2));
-
-  // Register ranges with same values.
-  BucketRanges* ranges3 = new BucketRanges(3);
-  ranges3->ResetChecksum();
-  EXPECT_EQ(ranges1,  // returning ranges1
-            StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges3));
-  EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
-              UnorderedElementsAre(ranges1, ranges2));
-}
-
 TEST_P(StatisticsRecorderTest, RegisterHistogram) {
   // Create a Histogram that was not registered.
   Histogram* const histogram1 = CreateHistogram("TestHistogram1", 1, 1000, 10);
@@ -263,10 +232,27 @@
 
   const auto histograms = StatisticsRecorder::GetHistograms();
   EXPECT_THAT(histograms, SizeIs(3));
-  EXPECT_THAT(StatisticsRecorder::WithName(histograms, ""), SizeIs(3));
-  EXPECT_THAT(StatisticsRecorder::WithName(histograms, "Test"), SizeIs(3));
-  EXPECT_THAT(StatisticsRecorder::WithName(histograms, "1"), SizeIs(1));
-  EXPECT_THAT(StatisticsRecorder::WithName(histograms, "hello"), IsEmpty());
+  EXPECT_THAT(
+      StatisticsRecorder::WithName(histograms, "", /*case_sensitive=*/true),
+      SizeIs(3));
+  EXPECT_THAT(
+      StatisticsRecorder::WithName(histograms, "Test", /*case_sensitive=*/true),
+      SizeIs(3));
+  EXPECT_THAT(
+      StatisticsRecorder::WithName(histograms, "1", /*case_sensitive=*/true),
+      SizeIs(1));
+  EXPECT_THAT(StatisticsRecorder::WithName(histograms, "hello",
+                                           /*case_sensitive=*/true),
+              IsEmpty());
+  EXPECT_THAT(StatisticsRecorder::WithName(histograms, "hello",
+                                           /*case_sensitive=*/false),
+              IsEmpty());
+  EXPECT_THAT(
+      StatisticsRecorder::WithName(histograms, "test", /*case_sensitive=*/true),
+      IsEmpty());
+  EXPECT_THAT(StatisticsRecorder::WithName(histograms, "test",
+                                           /*case_sensitive=*/false),
+              SizeIs(3));
 }
 
 TEST_P(StatisticsRecorderTest, RegisterHistogramWithFactoryGet) {
@@ -327,7 +313,7 @@
   ASSERT_EQ(1u, registered_histograms.size());
   EXPECT_EQ(histogram, registered_histograms[0]);
 
-  LOCAL_HISTOGRAM_TIMES("TestHistogramTimes", TimeDelta::FromDays(1));
+  LOCAL_HISTOGRAM_TIMES("TestHistogramTimes", Days(1));
   LOCAL_HISTOGRAM_ENUMERATION("TestHistogramEnumeration", 20, 200);
 
   EXPECT_THAT(StatisticsRecorder::GetHistograms(), SizeIs(3));
@@ -357,48 +343,48 @@
   std::string json(StatisticsRecorder::ToJSON(JSON_VERBOSITY_LEVEL_FULL));
 
   // Check for valid JSON.
-  std::unique_ptr<Value> root = JSONReader::Read(json);
-  ASSERT_TRUE(root.get());
-
-  DictionaryValue* root_dict = nullptr;
-  ASSERT_TRUE(root->GetAsDictionary(&root_dict));
+  absl::optional<Value> root = JSONReader::Read(json);
+  ASSERT_TRUE(root);
+  Value::Dict* root_dict = root->GetIfDict();
+  ASSERT_TRUE(root_dict);
 
   // No query should be set.
-  ASSERT_FALSE(root_dict->HasKey("query"));
+  ASSERT_FALSE(root_dict->Find("query"));
 
-  ListValue* histogram_list = nullptr;
-  ASSERT_TRUE(root_dict->GetList("histograms", &histogram_list));
-  ASSERT_EQ(2u, histogram_list->GetSize());
+  const Value::List* histogram_list = root_dict->FindList("histograms");
+
+  ASSERT_TRUE(histogram_list);
+  ASSERT_EQ(2u, histogram_list->size());
 
   // Examine the first histogram.
-  DictionaryValue* histogram_dict = nullptr;
-  ASSERT_TRUE(histogram_list->GetDictionary(0, &histogram_dict));
+  const Value::Dict* histogram_dict = (*histogram_list)[0].GetIfDict();
+  ASSERT_TRUE(histogram_dict);
 
-  int sample_count;
-  ASSERT_TRUE(histogram_dict->GetInteger("count", &sample_count));
-  EXPECT_EQ(2, sample_count);
+  auto sample_count = histogram_dict->FindInt("count");
+  ASSERT_TRUE(sample_count);
+  EXPECT_EQ(2, *sample_count);
 
-  ListValue* buckets_list = nullptr;
-  ASSERT_TRUE(histogram_dict->GetList("buckets", &buckets_list));
-  EXPECT_EQ(2u, buckets_list->GetList().size());
+  const Value::List* buckets_list = histogram_dict->FindList("buckets");
+  ASSERT_TRUE(buckets_list);
+  EXPECT_EQ(2u, buckets_list->size());
 
   // Check the serialized JSON with a different verbosity level.
   json = StatisticsRecorder::ToJSON(JSON_VERBOSITY_LEVEL_OMIT_BUCKETS);
   root = JSONReader::Read(json);
-  ASSERT_TRUE(root.get());
-  root_dict = nullptr;
-  ASSERT_TRUE(root->GetAsDictionary(&root_dict));
-  histogram_list = nullptr;
-  ASSERT_TRUE(root_dict->GetList("histograms", &histogram_list));
-  ASSERT_EQ(2u, histogram_list->GetSize());
-  histogram_dict = nullptr;
-  ASSERT_TRUE(histogram_list->GetDictionary(0, &histogram_dict));
-  sample_count = 0;
-  ASSERT_TRUE(histogram_dict->GetInteger("count", &sample_count));
-  EXPECT_EQ(2, sample_count);
-  buckets_list = nullptr;
+  ASSERT_TRUE(root);
+  root_dict = root->GetIfDict();
+  ASSERT_TRUE(root_dict);
+  histogram_list = root_dict->FindList("histograms");
+  ASSERT_TRUE(histogram_list);
+  ASSERT_EQ(2u, histogram_list->size());
+  const Value::Dict* histogram_dict2 = (*histogram_list)[0].GetIfDict();
+  ASSERT_TRUE(histogram_dict2);
+  sample_count = histogram_dict2->FindInt("count");
+  ASSERT_TRUE(sample_count);
+  EXPECT_EQ(2, *sample_count);
+  buckets_list = histogram_dict2->FindList("buckets");
   // Bucket information should be omitted.
-  ASSERT_FALSE(histogram_dict->GetList("buckets", &buckets_list));
+  ASSERT_FALSE(buckets_list);
 }
 
 TEST_P(StatisticsRecorderTest, IterationTest) {
@@ -407,7 +393,7 @@
 
   auto histograms = StatisticsRecorder::GetHistograms();
   EXPECT_THAT(histograms, SizeIs(2));
-  histograms = StatisticsRecorder::NonPersistent(std::move(histograms));
+  histograms = StatisticsRecorder::GetHistograms(/*include_persistent=*/false);
   EXPECT_THAT(histograms, SizeIs(use_persistent_histogram_allocator_ ? 0 : 2));
 
   // Create a new global allocator using the same memory as the old one. Any
@@ -426,7 +412,7 @@
 
   histograms = StatisticsRecorder::GetHistograms();
   EXPECT_THAT(histograms, SizeIs(use_persistent_histogram_allocator_ ? 2 : 0));
-  histograms = StatisticsRecorder::NonPersistent(std::move(histograms));
+  histograms = StatisticsRecorder::GetHistograms(/*include_persistent=*/false);
   EXPECT_THAT(histograms, IsEmpty());
 }
 
@@ -435,86 +421,159 @@
 // CallbackCheckWrapper is simply a convenient way to check and store that
 // a callback was actually run.
 struct CallbackCheckWrapper {
-  CallbackCheckWrapper() : called(false), last_histogram_value(0) {}
+  CallbackCheckWrapper()
+      : called(false),
+        last_histogram_name(""),
+        last_name_hash(HashMetricName("")),
+        last_histogram_value(0) {}
 
-  void OnHistogramChanged(base::HistogramBase::Sample histogram_value) {
+  void OnHistogramChanged(const char* histogram_name,
+                          uint64_t name_hash,
+                          base::HistogramBase::Sample histogram_value) {
     called = true;
+    last_histogram_name = histogram_name;
+    last_name_hash = name_hash;
     last_histogram_value = histogram_value;
   }
 
   bool called;
+  const char* last_histogram_name;
+  uint64_t last_name_hash;
   base::HistogramBase::Sample last_histogram_value;
 };
 
 }  // namespace
 
-// Check that you can't overwrite the callback with another.
-TEST_P(StatisticsRecorderTest, SetCallbackFailsWithoutHistogramTest) {
+TEST_P(StatisticsRecorderTest,
+       AddHistogramCallbackBeforeHistogramRegistration) {
+  test::TaskEnvironment task_environment;
+  const char* histogram_name = "TestHistogram";
   CallbackCheckWrapper callback_wrapper;
 
-  bool result = base::StatisticsRecorder::SetCallback(
-      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
-                                  base::Unretained(&callback_wrapper)));
-  EXPECT_TRUE(result);
+  auto callback =
+      std::make_unique<base::StatisticsRecorder::ScopedHistogramSampleObserver>(
+          histogram_name,
+          base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                              base::Unretained(&callback_wrapper)));
+  EXPECT_TRUE(base::StatisticsRecorder::have_active_callbacks());
 
-  result = base::StatisticsRecorder::SetCallback(
-      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
-                                  base::Unretained(&callback_wrapper)));
-  EXPECT_FALSE(result);
+  HistogramBase* const histogram = CreateHistogram(histogram_name, 1, 1000, 10);
+  EXPECT_EQ(StatisticsRecorder::RegisterOrDeleteDuplicate(histogram),
+            histogram);
+
+  EXPECT_TRUE(histogram->HasFlags(base::HistogramBase::kCallbackExists));
+  EXPECT_TRUE(base::StatisticsRecorder::have_active_callbacks());
 }
 
-// Check that you can't overwrite the callback with another.
-TEST_P(StatisticsRecorderTest, SetCallbackFailsWithHistogramTest) {
-  HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+TEST_P(StatisticsRecorderTest,
+       RemoveHistogramCallbackBeforeHistogramRegistrationWithMultipleClients) {
+  test::TaskEnvironment task_environment;
+  const char* histogram_name = "TestHistogram";
+  CallbackCheckWrapper callback_wrapper1;
+  CallbackCheckWrapper callback_wrapper2;
+
+  auto callback1 =
+      std::make_unique<base::StatisticsRecorder::ScopedHistogramSampleObserver>(
+          histogram_name,
+          base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                              base::Unretained(&callback_wrapper1)));
+  EXPECT_TRUE(base::StatisticsRecorder::have_active_callbacks());
+
+  auto callback2 =
+      std::make_unique<base::StatisticsRecorder::ScopedHistogramSampleObserver>(
+          histogram_name,
+          base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                              base::Unretained(&callback_wrapper2)));
+  EXPECT_TRUE(base::StatisticsRecorder::have_active_callbacks());
+
+  callback1.reset();
+  EXPECT_TRUE(base::StatisticsRecorder::have_active_callbacks());
+
+  callback2.reset();
+  EXPECT_FALSE(base::StatisticsRecorder::have_active_callbacks());
+
+  HistogramBase* const histogram = CreateHistogram(histogram_name, 1, 1000, 10);
+  EXPECT_EQ(StatisticsRecorder::RegisterOrDeleteDuplicate(histogram),
+            histogram);
+
+  EXPECT_FALSE(histogram->HasFlags(base::HistogramBase::kCallbackExists));
+  EXPECT_FALSE(base::StatisticsRecorder::have_active_callbacks());
+}
+
+TEST_P(StatisticsRecorderTest, AddHistogramCallbackWithMultipleClients) {
+  test::TaskEnvironment task_environment;
+  std::string histogram_name = "TestHistogram";
+  HistogramBase* histogram = Histogram::FactoryGet(histogram_name, 1, 1000, 10,
                                                    HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram);
 
-  CallbackCheckWrapper callback_wrapper;
+  CallbackCheckWrapper callback_wrapper1;
+  CallbackCheckWrapper callback_wrapper2;
 
-  bool result = base::StatisticsRecorder::SetCallback(
-      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
-                                  base::Unretained(&callback_wrapper)));
-  EXPECT_TRUE(result);
-  EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
-            base::HistogramBase::kCallbackExists);
+  auto callback1 =
+      std::make_unique<base::StatisticsRecorder::ScopedHistogramSampleObserver>(
+          histogram_name,
+          base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                              base::Unretained(&callback_wrapper1)));
 
-  result = base::StatisticsRecorder::SetCallback(
-      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
-                                  base::Unretained(&callback_wrapper)));
-  EXPECT_FALSE(result);
-  EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
-            base::HistogramBase::kCallbackExists);
+  EXPECT_TRUE(histogram->HasFlags(base::HistogramBase::kCallbackExists));
+  EXPECT_TRUE(base::StatisticsRecorder::have_active_callbacks());
+
+  auto callback2 =
+      std::make_unique<base::StatisticsRecorder::ScopedHistogramSampleObserver>(
+          histogram_name,
+          base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                              base::Unretained(&callback_wrapper2)));
+
+  EXPECT_TRUE(histogram->HasFlags(base::HistogramBase::kCallbackExists));
+  EXPECT_TRUE(base::StatisticsRecorder::have_active_callbacks());
 
   histogram->Add(1);
-
-  EXPECT_TRUE(callback_wrapper.called);
+  base::RunLoop().RunUntilIdle();
+  EXPECT_TRUE(callback_wrapper1.called);
+  histogram->Add(1);
+  EXPECT_TRUE(callback_wrapper2.called);
 }
 
-// Check that you can't overwrite the callback with another.
-TEST_P(StatisticsRecorderTest, ClearCallbackSuceedsWithHistogramTest) {
-  HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+TEST_P(StatisticsRecorderTest, RemoveHistogramCallbackWithMultipleClients) {
+  test::TaskEnvironment task_environment;
+  std::string histogram_name = "TestHistogram";
+  HistogramBase* histogram = Histogram::FactoryGet(histogram_name, 1, 1000, 10,
                                                    HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram);
 
-  CallbackCheckWrapper callback_wrapper;
+  CallbackCheckWrapper callback_wrapper1;
+  CallbackCheckWrapper callback_wrapper2;
 
-  bool result = base::StatisticsRecorder::SetCallback(
-      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
-                                  base::Unretained(&callback_wrapper)));
-  EXPECT_TRUE(result);
-  EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists,
-            base::HistogramBase::kCallbackExists);
+  auto callback1 =
+      std::make_unique<base::StatisticsRecorder::ScopedHistogramSampleObserver>(
+          histogram_name,
+          base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                              base::Unretained(&callback_wrapper1)));
+  auto callback2 =
+      std::make_unique<base::StatisticsRecorder::ScopedHistogramSampleObserver>(
+          histogram_name,
+          base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                              base::Unretained(&callback_wrapper2)));
 
-  base::StatisticsRecorder::ClearCallback("TestHistogram");
-  EXPECT_EQ(histogram->flags() & base::HistogramBase::kCallbackExists, 0);
+  callback1.reset();
+  EXPECT_TRUE(histogram->HasFlags(base::HistogramBase::kCallbackExists));
+  EXPECT_TRUE(base::StatisticsRecorder::have_active_callbacks());
+
+  callback2.reset();
+  EXPECT_FALSE(histogram->HasFlags(base::HistogramBase::kCallbackExists));
+  EXPECT_FALSE(base::StatisticsRecorder::have_active_callbacks());
 
   histogram->Add(1);
+  base::RunLoop().RunUntilIdle();
 
-  EXPECT_FALSE(callback_wrapper.called);
+  EXPECT_FALSE(callback_wrapper1.called);
+  EXPECT_FALSE(callback_wrapper2.called);
 }
 
 // Check that callback is used.
 TEST_P(StatisticsRecorderTest, CallbackUsedTest) {
+  test::TaskEnvironment task_environment;
   {
     HistogramBase* histogram = Histogram::FactoryGet(
         "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
@@ -522,13 +581,18 @@
 
     CallbackCheckWrapper callback_wrapper;
 
-    base::StatisticsRecorder::SetCallback(
-        "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
-                                    base::Unretained(&callback_wrapper)));
+    auto callback = std::make_unique<
+        base::StatisticsRecorder::ScopedHistogramSampleObserver>(
+        "TestHistogram",
+        base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                            base::Unretained(&callback_wrapper)));
 
     histogram->Add(1);
+    base::RunLoop().RunUntilIdle();
 
     EXPECT_TRUE(callback_wrapper.called);
+    EXPECT_STREQ(callback_wrapper.last_histogram_name, "TestHistogram");
+    EXPECT_EQ(callback_wrapper.last_name_hash, HashMetricName("TestHistogram"));
     EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
   }
 
@@ -538,14 +602,19 @@
 
     CallbackCheckWrapper callback_wrapper;
 
-    base::StatisticsRecorder::SetCallback(
+    auto callback = std::make_unique<
+        base::StatisticsRecorder::ScopedHistogramSampleObserver>(
         "TestLinearHistogram",
-        base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
-                   base::Unretained(&callback_wrapper)));
+        base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                            base::Unretained(&callback_wrapper)));
 
     linear_histogram->Add(1);
+    base::RunLoop().RunUntilIdle();
 
     EXPECT_TRUE(callback_wrapper.called);
+    EXPECT_STREQ(callback_wrapper.last_histogram_name, "TestLinearHistogram");
+    EXPECT_EQ(callback_wrapper.last_name_hash,
+              HashMetricName("TestLinearHistogram"));
     EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
   }
 
@@ -558,14 +627,19 @@
 
     CallbackCheckWrapper callback_wrapper;
 
-    base::StatisticsRecorder::SetCallback(
+    auto callback = std::make_unique<
+        base::StatisticsRecorder::ScopedHistogramSampleObserver>(
         "TestCustomHistogram",
-        base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
-                   base::Unretained(&callback_wrapper)));
+        base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                            base::Unretained(&callback_wrapper)));
 
     custom_histogram->Add(1);
+    base::RunLoop().RunUntilIdle();
 
     EXPECT_TRUE(callback_wrapper.called);
+    EXPECT_STREQ(callback_wrapper.last_histogram_name, "TestCustomHistogram");
+    EXPECT_EQ(callback_wrapper.last_name_hash,
+              HashMetricName("TestCustomHistogram"));
     EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
   }
 
@@ -575,35 +649,83 @@
 
     CallbackCheckWrapper callback_wrapper;
 
-    base::StatisticsRecorder::SetCallback(
+    auto callback = std::make_unique<
+        base::StatisticsRecorder::ScopedHistogramSampleObserver>(
         "TestSparseHistogram",
-        base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
-                   base::Unretained(&callback_wrapper)));
+        base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                            base::Unretained(&callback_wrapper)));
 
     custom_histogram->Add(1);
+    base::RunLoop().RunUntilIdle();
 
     EXPECT_TRUE(callback_wrapper.called);
+    EXPECT_STREQ(callback_wrapper.last_histogram_name, "TestSparseHistogram");
+    EXPECT_EQ(callback_wrapper.last_name_hash,
+              HashMetricName("TestSparseHistogram"));
     EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
   }
 }
 
 // Check that setting a callback before the histogram exists works.
 TEST_P(StatisticsRecorderTest, CallbackUsedBeforeHistogramCreatedTest) {
+  test::TaskEnvironment task_environment;
   CallbackCheckWrapper callback_wrapper;
 
-  base::StatisticsRecorder::SetCallback(
-      "TestHistogram", base::Bind(&CallbackCheckWrapper::OnHistogramChanged,
-                                  base::Unretained(&callback_wrapper)));
+  auto callback =
+      std::make_unique<base::StatisticsRecorder::ScopedHistogramSampleObserver>(
+          "TestHistogram",
+          base::BindRepeating(&CallbackCheckWrapper::OnHistogramChanged,
+                              base::Unretained(&callback_wrapper)));
 
   HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
                                                    HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram);
   histogram->Add(1);
+  base::RunLoop().RunUntilIdle();
 
   EXPECT_TRUE(callback_wrapper.called);
+  EXPECT_STREQ(callback_wrapper.last_histogram_name, "TestHistogram");
+  EXPECT_EQ(callback_wrapper.last_name_hash, HashMetricName("TestHistogram"));
   EXPECT_EQ(callback_wrapper.last_histogram_value, 1);
 }
 
+TEST_P(StatisticsRecorderTest, GlobalCallbackCalled) {
+  HistogramBase* histogram = Histogram::FactoryGet("TestHistogram", 1, 1000, 10,
+                                                   HistogramBase::kNoFlags);
+  EXPECT_TRUE(histogram);
+
+  // This is a static rather than passing the variable to the lambda
+  // as a reference capture, as only stateless lambdas can be cast to a raw
+  // function pointer.
+  static size_t callback_callcount;
+  callback_callcount = 0;
+  auto callback = [](const char* histogram_name, uint64_t name_hash,
+                     HistogramBase::Sample sample) {
+    EXPECT_STREQ(histogram_name, "TestHistogram");
+    EXPECT_EQ(sample, 1);
+    ++callback_callcount;
+  };
+
+  base::StatisticsRecorder::SetGlobalSampleCallback(callback);
+
+  // Test that adding a histogram sample calls our callback.
+  histogram->Add(1);
+  EXPECT_EQ(callback_callcount, 1u);
+
+  // Test that the callback gets correctly unregistered.
+  base::StatisticsRecorder::SetGlobalSampleCallback(nullptr);
+  histogram->Add(2);
+  EXPECT_EQ(callback_callcount, 1u);
+}
+
+#if BUILDFLAG(USE_RUNTIME_VLOG)
+// The following check that StatisticsRecorder::InitLogOnShutdownWhileLocked
+// dumps the histogram graph to vlog if VLOG_IS_ON(1) at runtime. When
+// USE_RUNTIME_VLOG is not set, all vlog levels are determined at build time
+// and default to off. Since we do not want StatisticsRecorder to dump all the
+// time, VLOG in its code stays off. As a result, the following tests would
+// fail.
+
 TEST_P(StatisticsRecorderTest, LogOnShutdownNotInitialized) {
   ResetVLogInitialized();
   logging::SetMinLogLevel(logging::LOG_WARNING);
@@ -633,13 +755,17 @@
   EXPECT_TRUE(VLOG_IS_ON(1));
   EXPECT_TRUE(IsVLogInitialized());
 }
+#endif  // BUILDFLAG(USE_RUNTIME_VLOG)
 
 class TestHistogramProvider : public StatisticsRecorder::HistogramProvider {
  public:
-  TestHistogramProvider(std::unique_ptr<PersistentHistogramAllocator> allocator)
-      : allocator_(std::move(allocator)), weak_factory_(this) {
+  explicit TestHistogramProvider(
+      std::unique_ptr<PersistentHistogramAllocator> allocator)
+      : allocator_(std::move(allocator)) {
     StatisticsRecorder::RegisterHistogramProvider(weak_factory_.GetWeakPtr());
   }
+  TestHistogramProvider(const TestHistogramProvider&) = delete;
+  TestHistogramProvider& operator=(const TestHistogramProvider&) = delete;
 
   void MergeHistogramDeltas() override {
     PersistentHistogramAllocator::Iterator hist_iter(allocator_.get());
@@ -653,9 +779,7 @@
 
  private:
   std::unique_ptr<PersistentHistogramAllocator> allocator_;
-  WeakPtrFactory<TestHistogramProvider> weak_factory_;
-
-  DISALLOW_COPY_AND_ASSIGN(TestHistogramProvider);
+  WeakPtrFactory<TestHistogramProvider> weak_factory_{this};
 };
 
 TEST_P(StatisticsRecorderTest, ImportHistogramsTest) {
diff --git a/base/metrics/ukm_source_id.cc b/base/metrics/ukm_source_id.cc
deleted file mode 100644
index 220cffa..0000000
--- a/base/metrics/ukm_source_id.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/metrics/ukm_source_id.h"
-
-#include "base/atomic_sequence_num.h"
-#include "base/logging.h"
-#include "base/rand_util.h"
-
-namespace base {
-
-namespace {
-
-const int64_t kLowBitsMask = (INT64_C(1) << 32) - 1;
-const int64_t kNumTypeBits = 2;
-const int64_t kTypeMask = (INT64_C(1) << kNumTypeBits) - 1;
-
-}  // namespace
-
-// static
-UkmSourceId UkmSourceId::New() {
-  // Generate some bits which are unique to this process, so we can generate
-  // IDs independently in different processes. IDs generated by this method may
-  // collide, but it should be sufficiently rare enough to not impact data
-  // quality.
-  const static int64_t process_id_bits =
-      static_cast<int64_t>(RandUint64()) & ~kLowBitsMask;
-  // Generate some bits which are unique within the process, using a counter.
-  static AtomicSequenceNumber seq;
-  UkmSourceId local_id = FromOtherId(seq.GetNext() + 1, UkmSourceId::Type::UKM);
-  // Combine the local and process bits to generate a unique ID.
-  return UkmSourceId((local_id.value_ & kLowBitsMask) | process_id_bits);
-}
-
-// static
-UkmSourceId UkmSourceId::FromOtherId(int64_t other_id, UkmSourceId::Type type) {
-  const int64_t type_bits = static_cast<int64_t>(type);
-  DCHECK_EQ(type_bits, type_bits & kTypeMask);
-  // Stores the the type ID in the low bits of the source id, and shift the rest
-  // of the ID to make room.  This could cause the original ID to overflow, but
-  // that should be rare enough that it won't matter for UKM's purposes.
-  return UkmSourceId((other_id << kNumTypeBits) | type_bits);
-}
-
-UkmSourceId::Type UkmSourceId::GetType() const {
-  return static_cast<UkmSourceId::Type>(value_ & kTypeMask);
-}
-
-}  // namespace base
diff --git a/base/metrics/ukm_source_id.h b/base/metrics/ukm_source_id.h
deleted file mode 100644
index 92da9be..0000000
--- a/base/metrics/ukm_source_id.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_METRICS_UKM_SOURCE_ID_H_
-#define BASE_METRICS_UKM_SOURCE_ID_H_
-
-#include "base/base_export.h"
-#include "starboard/types.h"
-
-namespace base {
-
-// An ID used to identify a Source to UKM, for recording information about it.
-// These objects are copyable, assignable, and occupy 64-bits per instance.
-// Prefer passing them by value.
-class BASE_EXPORT UkmSourceId {
- public:
-  enum class Type : int64_t {
-    UKM = 0,
-    NAVIGATION_ID = 1,
-    APP_ID = 2,
-  };
-
-  // Default constructor has the invalid value.
-  constexpr UkmSourceId() : value_(0) {}
-
-  constexpr UkmSourceId& operator=(UkmSourceId other) {
-    value_ = other.value_;
-    return *this;
-  }
-
-  // Allow identity comparisons.
-  constexpr bool operator==(UkmSourceId other) const {
-    return value_ == other.value_;
-  }
-  constexpr bool operator!=(UkmSourceId other) const {
-    return value_ != other.value_;
-  }
-
-  // Allow coercive comparisons to simplify test migration.
-  // TODO(crbug/873866): Remove these once callers are migrated.
-  constexpr bool operator==(int64_t other) const { return value_ == other; }
-  constexpr bool operator!=(int64_t other) const { return value_ == other; }
-
-  // Extract the Type of the SourceId.
-  Type GetType() const;
-
-  // Return the ID as an int64.
-  constexpr int64_t ToInt64() const { return value_; }
-
-  // Convert an int64 ID value to an ID.
-  static constexpr UkmSourceId FromInt64(int64_t internal_value) {
-    return UkmSourceId(internal_value);
-  }
-
-  // Get a new UKM-Type SourceId, which is unique within the scope of a
-  // browser session.
-  static UkmSourceId New();
-
-  // Utility for converting other unique ids to source ids.
-  static UkmSourceId FromOtherId(int64_t value, Type type);
-
- private:
-  constexpr explicit UkmSourceId(int64_t value) : value_(value) {}
-  int64_t value_;
-};
-
-constexpr UkmSourceId kInvalidUkmSourceId = UkmSourceId();
-
-}  // namespace base
-
-#endif  // BASE_METRICS_UKM_SOURCE_ID_H_
diff --git a/base/metrics/user_metrics.cc b/base/metrics/user_metrics.cc
index 6517b06..cb55bfc 100644
--- a/base/metrics/user_metrics.cc
+++ b/base/metrics/user_metrics.cc
@@ -1,17 +1,20 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
 #include "base/metrics/user_metrics.h"
 
+#include <stddef.h>
+
 #include <vector>
 
-#include "base/bind.h"
+#include "base/functional/bind.h"
 #include "base/lazy_instance.h"
 #include "base/location.h"
-#include "base/macros.h"
+#include "base/ranges/algorithm.h"
 #include "base/threading/thread_checker.h"
-#include "starboard/types.h"
+#include "base/time/time.h"
+#include "base/trace_event/base_tracing.h"
 
 namespace base {
 namespace {
@@ -28,19 +31,30 @@
 }
 
 void RecordComputedAction(const std::string& action) {
+  RecordComputedActionAt(action, TimeTicks::Now());
+}
+
+void RecordComputedActionSince(const std::string& action,
+                               TimeDelta time_since) {
+  RecordComputedActionAt(action, TimeTicks::Now() - time_since);
+}
+
+void RecordComputedActionAt(const std::string& action, TimeTicks action_time) {
+  TRACE_EVENT_INSTANT1("ui", "UserEvent", TRACE_EVENT_SCOPE_GLOBAL, "action",
+                       action);
   if (!g_task_runner.Get()) {
     DCHECK(g_callbacks.Get().empty());
     return;
   }
 
   if (!g_task_runner.Get()->BelongsToCurrentThread()) {
-    g_task_runner.Get()->PostTask(FROM_HERE,
-                                  BindOnce(&RecordComputedAction, action));
+    g_task_runner.Get()->PostTask(
+        FROM_HERE, BindOnce(&RecordComputedActionAt, action, action_time));
     return;
   }
 
   for (const ActionCallback& callback : g_callbacks.Get()) {
-    callback.Run(action);
+    callback.Run(action, action_time);
   }
 }
 
@@ -55,12 +69,9 @@
   DCHECK(g_task_runner.Get());
   DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
   std::vector<ActionCallback>* callbacks = g_callbacks.Pointer();
-  for (size_t i = 0; i < callbacks->size(); ++i) {
-    if ((*callbacks)[i].Equals(callback)) {
-      callbacks->erase(callbacks->begin() + i);
-      return;
-    }
-  }
+  const auto i = ranges::find(*callbacks, callback);
+  if (i != callbacks->end())
+    callbacks->erase(i);
 }
 
 void SetRecordActionTaskRunner(
@@ -70,4 +81,10 @@
   g_task_runner.Get() = task_runner;
 }
 
+scoped_refptr<SingleThreadTaskRunner> GetRecordActionTaskRunner() {
+  if (g_task_runner.IsCreated())
+    return g_task_runner.Get();
+  return nullptr;
+}
+
 }  // namespace base
diff --git a/base/metrics/user_metrics.h b/base/metrics/user_metrics.h
index 87fbd9c..8181559 100644
--- a/base/metrics/user_metrics.h
+++ b/base/metrics/user_metrics.h
@@ -1,4 +1,4 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
@@ -8,12 +8,14 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/callback.h"
+#include "base/functional/callback.h"
 #include "base/metrics/user_metrics_action.h"
-#include "base/single_thread_task_runner.h"
+#include "base/task/single_thread_task_runner.h"
 
 namespace base {
 
+class TimeTicks;
+
 // This module provides some helper functions for logging actions tracked by
 // the user metrics system.
 
@@ -55,8 +57,18 @@
 // SetRecordActionTaskRunner().
 BASE_EXPORT void RecordComputedAction(const std::string& action);
 
+// Similar to RecordComputedAction, but also takes the time at which the action
+// was observed.
+BASE_EXPORT void RecordComputedActionAt(const std::string& action,
+                                        TimeTicks action_time);
+
+// Similar to RecordComputedActionAt, but takes the amount of time elasped since
+// the action was observed.
+BASE_EXPORT void RecordComputedActionSince(const std::string& action,
+                                           TimeDelta time_since);
+
 // Called with the action string.
-typedef Callback<void(const std::string&)> ActionCallback;
+using ActionCallback = RepeatingCallback<void(const std::string&, TimeTicks)>;
 
 // Add/remove action callbacks (see above).
 // These functions must be called after the task runner has been set with
@@ -68,6 +80,10 @@
 BASE_EXPORT void SetRecordActionTaskRunner(
     scoped_refptr<SingleThreadTaskRunner> task_runner);
 
+// Returns the task runner used to record actions. Returns null when not set.
+// This function is thread safe.
+BASE_EXPORT scoped_refptr<SingleThreadTaskRunner> GetRecordActionTaskRunner();
+
 }  // namespace base
 
 #endif  // BASE_METRICS_USER_METRICS_H_
diff --git a/base/metrics/user_metrics_action.h b/base/metrics/user_metrics_action.h
index 454ed83..5adc93b 100644
--- a/base/metrics/user_metrics_action.h
+++ b/base/metrics/user_metrics_action.h
@@ -1,4 +1,4 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.