Import Cobalt 19.master.0.205881
diff --git a/src/base/memory/aligned_memory.cc b/src/base/memory/aligned_memory.cc
index cad5f50..0ae2268 100644
--- a/src/base/memory/aligned_memory.cc
+++ b/src/base/memory/aligned_memory.cc
@@ -5,10 +5,12 @@
 #include "base/memory/aligned_memory.h"
 
 #include "base/logging.h"
-#include "starboard/memory.h"
+#include "build/build_config.h"
 
-#if defined(OS_ANDROID) || defined(OS_NACL) || defined(__LB_SHELL__)
+#if defined(OS_ANDROID)
 #include <malloc.h>
+
+#include "starboard/types.h"
 #endif
 
 namespace base {
@@ -17,22 +19,21 @@
   DCHECK_GT(size, 0U);
   DCHECK_EQ(alignment & (alignment - 1), 0U);
   DCHECK_EQ(alignment % sizeof(void*), 0U);
-  void* ptr = NULL;
+  void* ptr = nullptr;
 #if defined(OS_STARBOARD)
   ptr = SbMemoryAllocateAligned(alignment, size);
 #elif defined(COMPILER_MSVC)
   ptr = _aligned_malloc(size, alignment);
-// Both Android and NaCl technically support posix_memalign(), but do not expose
-// it in the current version of the library headers used by Chrome.  Luckily,
-// memalign() on both platforms returns pointers which can safely be used with
-// free(), so we can use it instead.  Issues filed with each project for docs:
+// Android technically supports posix_memalign(), but does not expose it in
+// the current version of the library headers used by Chrome.  Luckily,
+// memalign() on Android returns pointers which can safely be used with
+// free(), so we can use it instead.  Issue filed to document this:
 // http://code.google.com/p/android/issues/detail?id=35391
-// http://code.google.com/p/chromium/issues/detail?id=138579
-#elif defined(OS_ANDROID) || defined(OS_NACL) || defined(__LB_SHELL__)
+#elif defined(OS_ANDROID)
   ptr = memalign(alignment, size);
 #else
   if (posix_memalign(&ptr, alignment, size))
-    ptr = NULL;
+    ptr = nullptr;
 #endif
   // Since aligned allocations may fail for non-memory related reasons, force a
   // crash if we encounter a failed allocation; maintaining consistent behavior
diff --git a/src/base/memory/aligned_memory.h b/src/base/memory/aligned_memory.h
index fa3b2f2..449eba3 100644
--- a/src/base/memory/aligned_memory.h
+++ b/src/base/memory/aligned_memory.h
@@ -2,6 +2,42 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
+#define BASE_MEMORY_ALIGNED_MEMORY_H_
+
+#include <type_traits>
+
+#include "starboard/types.h"
+
+#include "starboard/memory.h"
+
+#include "base/base_export.h"
+#include "base/basictypes.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+#if defined(COMPILER_MSVC)
+#include <malloc.h>
+#else
+#include <stdlib.h>
+#endif
+
+// A runtime sized aligned allocation can be created:
+//
+//   float* my_array = static_cast<float*>(AlignedAlloc(size, alignment));
+//
+//   // ... later, to release the memory:
+//   AlignedFree(my_array);
+//
+// Or using unique_ptr:
+//
+//   std::unique_ptr<float, AlignedFreeDeleter> my_array(
+//       static_cast<float*>(AlignedAlloc(size, alignment)));
+
+namespace base {
+
+// TODO[johnx]: Disable/Replace and remove AlignedMemory if possible.
+#if defined(STARBOARD)
 // AlignedMemory is a POD type that gives you a portable way to specify static
 // or local stack data of a given alignment and size. For example, if you need
 // static storage for a class, but you want manual control over when the object
@@ -31,47 +67,31 @@
 //   scoped_ptr_malloc<float, ScopedPtrAlignedFree> my_array(
 //       static_cast<float*>(AlignedAlloc(size, alignment)));
 
-#ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
-#define BASE_MEMORY_ALIGNED_MEMORY_H_
-
-#include "base/base_export.h"
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-
-#if defined(OS_STARBOARD)
-#include "starboard/memory.h"
-#elif defined(COMPILER_MSVC)
-#include <malloc.h>
-#else
-#include <stdlib.h>
-#endif
-
-namespace base {
-
 // AlignedMemory is specialized for all supported alignments.
 // Make sure we get a compiler error if someone uses an unsupported alignment.
 template <size_t Size, size_t ByteAlignment>
 struct AlignedMemory {};
 
-#define BASE_DECL_ALIGNED_MEMORY(byte_alignment) \
-    template <size_t Size> \
-    class AlignedMemory<Size, byte_alignment> { \
-     public: \
-      ALIGNAS(byte_alignment) uint8 data_[Size]; \
-      void* void_data() { return static_cast<void*>(data_); } \
-      const void* void_data() const { \
-        return static_cast<const void*>(data_); \
-      } \
-      template<typename Type> \
-      Type* data_as() { return static_cast<Type*>(void_data()); } \
-      template<typename Type> \
-      const Type* data_as() const { \
-        return static_cast<const Type*>(void_data()); \
-      } \
-     private: \
-      void* operator new(size_t); \
-      void operator delete(void*); \
-    }
+#define BASE_DECL_ALIGNED_MEMORY(byte_alignment)                              \
+  template <size_t Size>                                                      \
+  class AlignedMemory<Size, byte_alignment> {                                 \
+   public:                                                                    \
+    ALIGNAS(byte_alignment) uint8 data_[Size];                                \
+    void* void_data() { return static_cast<void*>(data_); }                   \
+    const void* void_data() const { return static_cast<const void*>(data_); } \
+    template <typename Type>                                                  \
+    Type* data_as() {                                                         \
+      return static_cast<Type*>(void_data());                                 \
+    }                                                                         \
+    template <typename Type>                                                  \
+    const Type* data_as() const {                                             \
+      return static_cast<const Type*>(void_data());                           \
+    }                                                                         \
+                                                                              \
+   private:                                                                   \
+    void* operator new(size_t);                                               \
+    void operator delete(void*);                                              \
+  }
 
 // Specialization for all alignments is required because MSVC (as of VS 2008)
 // does not understand ALIGNAS(ALIGNOF(Type)) or ALIGNAS(template_param).
@@ -92,22 +112,28 @@
 BASE_DECL_ALIGNED_MEMORY(4096);
 
 #undef BASE_DECL_ALIGNED_MEMORY
+#endif  // defined(STARBOARD)
 
+// This can be replaced with std::aligned_alloc when we have C++17.
+// Caveat: std::aligned_alloc requires the size parameter be an integral
+// multiple of alignment.
 BASE_EXPORT void* AlignedAlloc(size_t size, size_t alignment);
 
 inline void AlignedFree(void* ptr) {
-#if defined(OS_STARBOARD)
+#if defined(STARBOARD)
   SbMemoryDeallocateAligned(ptr);
-#elif defined(COMPILER_MSVC)
+#else
+#if defined(COMPILER_MSVC)
   _aligned_free(ptr);
 #else
   free(ptr);
 #endif
+#endif  // defined(STARBOARD)
 }
 
-// Helper class for use with scoped_ptr_malloc.
-class BASE_EXPORT ScopedPtrAlignedFree {
- public:
+// Deleter for use with unique_ptr. E.g., use as
+//   std::unique_ptr<Foo, base::AlignedFreeDeleter> foo;
+struct AlignedFreeDeleter {
   inline void operator()(void* ptr) const {
     AlignedFree(ptr);
   }
diff --git a/src/base/memory/aligned_memory_unittest.cc b/src/base/memory/aligned_memory_unittest.cc
index 3f378c2..e354f38 100644
--- a/src/base/memory/aligned_memory_unittest.cc
+++ b/src/base/memory/aligned_memory_unittest.cc
@@ -3,84 +3,44 @@
 // found in the LICENSE file.
 
 #include "base/memory/aligned_memory.h"
-#include "base/memory/scoped_ptr.h"
+
+#include <memory>
+
+#include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 #define EXPECT_ALIGNED(ptr, align) \
     EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
 
-namespace {
-
-using base::AlignedMemory;
-
-TEST(AlignedMemoryTest, StaticAlignment) {
-  static AlignedMemory<8, 8> raw8;
-  static AlignedMemory<8, 16> raw16;
-  static AlignedMemory<8, 256> raw256;
-  static AlignedMemory<8, 4096> raw4096;
-
-  EXPECT_EQ(8u, ALIGNOF(raw8));
-  EXPECT_EQ(16u, ALIGNOF(raw16));
-  EXPECT_EQ(256u, ALIGNOF(raw256));
-  EXPECT_EQ(4096u, ALIGNOF(raw4096));
-
-  EXPECT_ALIGNED(raw8.void_data(), 8);
-  EXPECT_ALIGNED(raw16.void_data(), 16);
-  EXPECT_ALIGNED(raw256.void_data(), 256);
-  EXPECT_ALIGNED(raw4096.void_data(), 4096);
-}
-
-TEST(AlignedMemoryTest, StackAlignment) {
-  AlignedMemory<8, 8> raw8;
-  AlignedMemory<8, 16> raw16;
-
-  EXPECT_EQ(8u, ALIGNOF(raw8));
-  EXPECT_EQ(16u, ALIGNOF(raw16));
-
-  EXPECT_ALIGNED(raw8.void_data(), 8);
-  EXPECT_ALIGNED(raw16.void_data(), 16);
-
-#if !SB_HAS_QUIRK(DOES_NOT_STACK_ALIGN_OVER_16_BYTES)
-  AlignedMemory<8, 256> raw256;
-  EXPECT_EQ(256u, ALIGNOF(raw256));
-  EXPECT_ALIGNED(raw256.void_data(), 256);
-
-  // TODO: This test hits an armv7 bug in clang. crbug.com/138066
-#if !defined(ARCH_CPU_ARM_FAMILY)
-  AlignedMemory<8, 4096> raw4096;
-  EXPECT_EQ(4096u, ALIGNOF(raw4096));
-  EXPECT_ALIGNED(raw4096.void_data(), 4096);
-#endif  // !defined(ARCH_CPU_ARM_FAMILY))
-#endif  // !SB_HAS_QUIRK(DOES_NOT_STACK_ALIGN_OVER_16_BYTES)
-}
+namespace base {
 
 TEST(AlignedMemoryTest, DynamicAllocation) {
-  void* p = base::AlignedAlloc(8, 8);
+  void* p = AlignedAlloc(8, 8);
   EXPECT_TRUE(p);
   EXPECT_ALIGNED(p, 8);
-  base::AlignedFree(p);
+  AlignedFree(p);
 
-  p = base::AlignedAlloc(8, 16);
+  p = AlignedAlloc(8, 16);
   EXPECT_TRUE(p);
   EXPECT_ALIGNED(p, 16);
-  base::AlignedFree(p);
+  AlignedFree(p);
 
-  p = base::AlignedAlloc(8, 256);
+  p = AlignedAlloc(8, 256);
   EXPECT_TRUE(p);
   EXPECT_ALIGNED(p, 256);
-  base::AlignedFree(p);
+  AlignedFree(p);
 
-  p = base::AlignedAlloc(8, 4096);
+  p = AlignedAlloc(8, 4096);
   EXPECT_TRUE(p);
   EXPECT_ALIGNED(p, 4096);
-  base::AlignedFree(p);
+  AlignedFree(p);
 }
 
 TEST(AlignedMemoryTest, ScopedDynamicAllocation) {
-  scoped_ptr_malloc<float, base::ScopedPtrAlignedFree> p(
-      static_cast<float*>(base::AlignedAlloc(8, 8)));
+  std::unique_ptr<float, AlignedFreeDeleter> p(
+      static_cast<float*>(AlignedAlloc(8, 8)));
   EXPECT_TRUE(p.get());
   EXPECT_ALIGNED(p.get(), 8);
 }
 
-}  // namespace
+}  // namespace base
diff --git a/src/base/memory/discardable_memory.cc b/src/base/memory/discardable_memory.cc
new file mode 100644
index 0000000..f0730aa
--- /dev/null
+++ b/src/base/memory/discardable_memory.cc
@@ -0,0 +1,13 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/discardable_memory.h"
+
+namespace base {
+
+DiscardableMemory::DiscardableMemory() = default;
+
+DiscardableMemory::~DiscardableMemory() = default;
+
+}  // namespace base
diff --git a/src/base/memory/discardable_memory.h b/src/base/memory/discardable_memory.h
new file mode 100644
index 0000000..5c632d1
--- /dev/null
+++ b/src/base/memory/discardable_memory.h
@@ -0,0 +1,78 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_H_
+#define BASE_MEMORY_DISCARDABLE_MEMORY_H_
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+namespace base {
+
+namespace trace_event {
+class MemoryAllocatorDump;
+class ProcessMemoryDump;
+}
+
+// Discardable memory is used to cache large objects without worrying about
+// blowing out memory, both on mobile devices where there is no swap, and
+// desktop devices where unused free memory should be used to help the user
+// experience. This is preferable to releasing memory in response to an OOM
+// signal because it is simpler and provides system-wide management of
+// purgable memory, though it has less flexibility as to which objects get
+// discarded.
+//
+// Discardable memory has two states: locked and unlocked. While the memory is
+// locked, it will not be discarded. Unlocking the memory allows the
+// discardable memory system and the OS to reclaim it if needed. Locks do not
+// nest.
+//
+// Notes:
+//   - The paging behavior of memory while it is locked is not specified. While
+//     mobile platforms will not swap it out, it may qualify for swapping
+//     on desktop platforms. It is not expected that this will matter, as the
+//     preferred pattern of usage for DiscardableMemory is to lock down the
+//     memory, use it as quickly as possible, and then unlock it.
+//   - Because of memory alignment, the amount of memory allocated can be
+//     larger than the requested memory size. It is not very efficient for
+//     small allocations.
+//   - A discardable memory instance is not thread safe. It is the
+//     responsibility of users of discardable memory to ensure there are no
+//     races.
+//
+class BASE_EXPORT DiscardableMemory {
+ public:
+  DiscardableMemory();
+  virtual ~DiscardableMemory();
+
+  // Locks the memory so that it will not be purged by the system. Returns
+  // true on success. If the return value is false then this object should be
+  // discarded and a new one should be created.
+  virtual bool Lock() WARN_UNUSED_RESULT = 0;
+
+  // Unlocks the memory so that it can be purged by the system. Must be called
+  // after every successful lock call.
+  virtual void Unlock() = 0;
+
+  // Returns the memory address held by this object. The object must be locked
+  // before calling this.
+  virtual void* data() const = 0;
+
+  // Handy method to simplify calling data() with a reinterpret_cast.
+  template<typename T> T* data_as() const {
+    return reinterpret_cast<T*>(data());
+  }
+
+  // Used for dumping the statistics of discardable memory allocated in tracing.
+  // Returns a new MemoryAllocatorDump in the |pmd| with the size of the
+  // discardable memory. The MemoryAllocatorDump created is owned by |pmd|. See
+  // ProcessMemoryDump::CreateAllocatorDump.
+  virtual trace_event::MemoryAllocatorDump* CreateMemoryAllocatorDump(
+      const char* name,
+      trace_event::ProcessMemoryDump* pmd) const = 0;
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_DISCARDABLE_MEMORY_H_
diff --git a/src/base/memory/discardable_memory_allocator.cc b/src/base/memory/discardable_memory_allocator.cc
new file mode 100644
index 0000000..3dbb276
--- /dev/null
+++ b/src/base/memory/discardable_memory_allocator.cc
@@ -0,0 +1,29 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/discardable_memory_allocator.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace {
+
+DiscardableMemoryAllocator* g_discardable_allocator = nullptr;
+
+}  // namespace
+
+// static
+void DiscardableMemoryAllocator::SetInstance(
+    DiscardableMemoryAllocator* allocator) {
+  DCHECK(!allocator || !g_discardable_allocator);
+  g_discardable_allocator = allocator;
+}
+
+// static
+DiscardableMemoryAllocator* DiscardableMemoryAllocator::GetInstance() {
+  DCHECK(g_discardable_allocator);
+  return g_discardable_allocator;
+}
+
+}  // namespace base
diff --git a/src/base/memory/discardable_memory_allocator.h b/src/base/memory/discardable_memory_allocator.h
new file mode 100644
index 0000000..e45a31f
--- /dev/null
+++ b/src/base/memory/discardable_memory_allocator.h
@@ -0,0 +1,37 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
+#define BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "starboard/types.h"
+
+namespace base {
+class DiscardableMemory;
+
+class BASE_EXPORT DiscardableMemoryAllocator {
+ public:
+  // Returns the allocator instance.
+  static DiscardableMemoryAllocator* GetInstance();
+
+  // Sets the allocator instance. Can only be called once, e.g. on startup.
+  // Ownership of |instance| remains with the caller.
+  static void SetInstance(DiscardableMemoryAllocator* allocator);
+
+  // Giant WARNING: Discardable[Shared]Memory is only implemented on Android. On
+  // non-Android platforms, it behaves exactly the same as SharedMemory.
+  // See LockPages() in discardable_shared_memory.cc.
+  virtual std::unique_ptr<DiscardableMemory> AllocateLockedDiscardableMemory(
+      size_t size) = 0;
+
+ protected:
+  virtual ~DiscardableMemoryAllocator() = default;
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
diff --git a/src/base/memory/discardable_shared_memory.cc b/src/base/memory/discardable_shared_memory.cc
new file mode 100644
index 0000000..f8289cf
--- /dev/null
+++ b/src/base/memory/discardable_shared_memory.cc
@@ -0,0 +1,512 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/discardable_shared_memory.h"
+
+#include <algorithm>
+
+#include "base/atomicops.h"
+#include "base/bits.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/numerics/safe_math.h"
+#include "base/process/process_metrics.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) && !defined(OS_NACL)
+// For madvise() which is available on all POSIX compatible systems.
+#include <sys/mman.h>
+#endif
+
+#if defined(OS_ANDROID)
+#include "third_party/ashmem/ashmem.h"
+#endif
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/win/windows_version.h"
+#include "starboard/types.h"
+#endif
+
+namespace base {
+namespace {
+
+// Use a machine-sized pointer as atomic type. It will use the Atomic32 or
+// Atomic64 routines, depending on the architecture.
+typedef intptr_t AtomicType;
+typedef uintptr_t UAtomicType;
+
+// Template specialization for timestamp serialization/deserialization. This
+// is used to serialize timestamps using Unix time on systems where AtomicType
+// does not have enough precision to contain a timestamp in the standard
+// serialized format.
+template <int>
+Time TimeFromWireFormat(int64_t value);
+template <int>
+int64_t TimeToWireFormat(Time time);
+
+// Serialize to Unix time when using 4-byte wire format.
+// Note: 19 January 2038, this will cease to work.
+template <>
+Time ALLOW_UNUSED_TYPE TimeFromWireFormat<4>(int64_t value) {
+  return value ? Time::UnixEpoch() + TimeDelta::FromSeconds(value) : Time();
+}
+template <>
+int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<4>(Time time) {
+  return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
+}
+
+// Standard serialization format when using 8-byte wire format.
+template <>
+Time ALLOW_UNUSED_TYPE TimeFromWireFormat<8>(int64_t value) {
+  return Time::FromInternalValue(value);
+}
+template <>
+int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<8>(Time time) {
+  return time.ToInternalValue();
+}
+
+struct SharedState {
+  enum LockState { UNLOCKED = 0, LOCKED = 1 };
+
+  explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
+  SharedState(LockState lock_state, Time timestamp) {
+    int64_t wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
+    DCHECK_GE(wire_timestamp, 0);
+    DCHECK_EQ(lock_state & ~1, 0);
+    value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
+  }
+
+  LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }
+
+  Time GetTimestamp() const {
+    return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
+  }
+
+  // Bit 1: Lock state. Bit is set when locked.
+  // Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
+  // purged.
+  union {
+    AtomicType i;
+    UAtomicType u;
+  } value;
+};
+
+// Shared state is stored at offset 0 in shared memory segments.
+SharedState* SharedStateFromSharedMemory(
+    const WritableSharedMemoryMapping& shared_memory) {
+  DCHECK(shared_memory.IsValid());
+  return static_cast<SharedState*>(shared_memory.memory());
+}
+
+// Round up |size| to a multiple of page size.
+size_t AlignToPageSize(size_t size) {
+  return bits::Align(size, base::GetPageSize());
+}
+
+}  // namespace
+
+DiscardableSharedMemory::DiscardableSharedMemory()
+    : mapped_size_(0), locked_page_count_(0) {
+}
+
+DiscardableSharedMemory::DiscardableSharedMemory(
+    UnsafeSharedMemoryRegion shared_memory_region)
+    : shared_memory_region_(std::move(shared_memory_region)),
+      mapped_size_(0),
+      locked_page_count_(0) {}
+
+DiscardableSharedMemory::~DiscardableSharedMemory() = default;
+
+bool DiscardableSharedMemory::CreateAndMap(size_t size) {
+  CheckedNumeric<size_t> checked_size = size;
+  checked_size += AlignToPageSize(sizeof(SharedState));
+  if (!checked_size.IsValid())
+    return false;
+
+  shared_memory_region_ =
+      UnsafeSharedMemoryRegion::Create(checked_size.ValueOrDie());
+
+  if (!shared_memory_region_.IsValid())
+    return false;
+
+  shared_memory_mapping_ = shared_memory_region_.Map();
+  if (!shared_memory_mapping_.IsValid())
+    return false;
+
+  mapped_size_ = shared_memory_mapping_.mapped_size() -
+                 AlignToPageSize(sizeof(SharedState));
+
+  locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
+#if DCHECK_IS_ON()
+  for (size_t page = 0; page < locked_page_count_; ++page)
+    locked_pages_.insert(page);
+#endif
+
+  DCHECK(last_known_usage_.is_null());
+  SharedState new_state(SharedState::LOCKED, Time());
+  subtle::Release_Store(
+      &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
+      new_state.value.i);
+  return true;
+}
+
+bool DiscardableSharedMemory::Map(size_t size) {
+  DCHECK(!shared_memory_mapping_.IsValid());
+  if (shared_memory_mapping_.IsValid())
+    return false;
+
+  shared_memory_mapping_ = shared_memory_region_.MapAt(
+      0, AlignToPageSize(sizeof(SharedState)) + size);
+  if (!shared_memory_mapping_.IsValid())
+    return false;
+
+  mapped_size_ = shared_memory_mapping_.mapped_size() -
+                 AlignToPageSize(sizeof(SharedState));
+
+  locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
+#if DCHECK_IS_ON()
+  for (size_t page = 0; page < locked_page_count_; ++page)
+    locked_pages_.insert(page);
+#endif
+
+  return true;
+}
+
+bool DiscardableSharedMemory::Unmap() {
+  if (!shared_memory_mapping_.IsValid())
+    return false;
+
+  shared_memory_mapping_ = WritableSharedMemoryMapping();
+  locked_page_count_ = 0;
+#if DCHECK_IS_ON()
+  locked_pages_.clear();
+#endif
+  mapped_size_ = 0;
+  return true;
+}
+
+DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
+    size_t offset, size_t length) {
+  DCHECK_EQ(AlignToPageSize(offset), offset);
+  DCHECK_EQ(AlignToPageSize(length), length);
+
+  // Calls to this function must be synchronized properly.
+  DFAKE_SCOPED_LOCK(thread_collision_warner_);
+
+  DCHECK(shared_memory_mapping_.IsValid());
+
+  // We need to successfully acquire the platform independent lock before
+  // individual pages can be locked.
+  if (!locked_page_count_) {
+    // Return false when instance has been purged or not initialized properly
+    // by checking if |last_known_usage_| is NULL.
+    if (last_known_usage_.is_null())
+      return FAILED;
+
+    SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
+    SharedState new_state(SharedState::LOCKED, Time());
+    SharedState result(subtle::Acquire_CompareAndSwap(
+        &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
+        old_state.value.i, new_state.value.i));
+    if (result.value.u != old_state.value.u) {
+      // Update |last_known_usage_| in case the above CAS failed because of
+      // an incorrect timestamp.
+      last_known_usage_ = result.GetTimestamp();
+      return FAILED;
+    }
+  }
+
+  // Zero for length means "everything onward".
+  if (!length)
+    length = AlignToPageSize(mapped_size_) - offset;
+
+  size_t start = offset / base::GetPageSize();
+  size_t end = start + length / base::GetPageSize();
+  DCHECK_LE(start, end);
+  DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
+
+  // Add pages to |locked_page_count_|.
+  // Note: Locking a page that is already locked is an error.
+  locked_page_count_ += end - start;
+#if DCHECK_IS_ON()
+  // Detect incorrect usage by keeping track of exactly what pages are locked.
+  for (auto page = start; page < end; ++page) {
+    auto result = locked_pages_.insert(page);
+    DCHECK(result.second);
+  }
+  DCHECK_EQ(locked_pages_.size(), locked_page_count_);
+#endif
+
+  // Always behave as if memory was purged when trying to lock a 0 byte segment.
+  if (!length)
+      return PURGED;
+
+#if defined(OS_ANDROID)
+  // Ensure that the platform won't discard the required pages.
+  return LockPages(shared_memory_region_,
+                   AlignToPageSize(sizeof(SharedState)) + offset, length);
+#elif defined(OS_MACOSX)
+  // On macOS, there is no mechanism to lock pages. However, we do need to call
+  // madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
+  // footprint via task_info().
+  //
+  // Note that calling madvise(MADV_FREE_REUSE) on regions that haven't had
+  // madvise(MADV_FREE_REUSABLE) called on them has no effect.
+  //
+  // Note that the corresponding call to MADV_FREE_REUSABLE is in Purge(), since
+  // that's where the memory is actually released, rather than Unlock(), which
+  // is a no-op on macOS.
+  //
+  // For more information, see
+  // https://bugs.chromium.org/p/chromium/issues/detail?id=823915.
+  madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
+              AlignToPageSize(sizeof(SharedState)),
+          AlignToPageSize(mapped_size_), MADV_FREE_REUSE);
+  return DiscardableSharedMemory::SUCCESS;
+#else
+  return DiscardableSharedMemory::SUCCESS;
+#endif
+}
+
+void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
+  DCHECK_EQ(AlignToPageSize(offset), offset);
+  DCHECK_EQ(AlignToPageSize(length), length);
+
+  // Calls to this function must be synchronized properly.
+  DFAKE_SCOPED_LOCK(thread_collision_warner_);
+
+  // Passing zero for |length| means "everything onward". Note that |length| may
+  // still be zero after this calculation, e.g. if |mapped_size_| is zero.
+  if (!length)
+    length = AlignToPageSize(mapped_size_) - offset;
+
+  DCHECK(shared_memory_mapping_.IsValid());
+
+  // Allow the pages to be discarded by the platform, if supported.
+  UnlockPages(shared_memory_region_,
+              AlignToPageSize(sizeof(SharedState)) + offset, length);
+
+  size_t start = offset / base::GetPageSize();
+  size_t end = start + length / base::GetPageSize();
+  DCHECK_LE(start, end);
+  DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
+
+  // Remove pages from |locked_page_count_|.
+  // Note: Unlocking a page that is not locked is an error.
+  DCHECK_GE(locked_page_count_, end - start);
+  locked_page_count_ -= end - start;
+#if DCHECK_IS_ON()
+  // Detect incorrect usage by keeping track of exactly what pages are locked.
+  for (auto page = start; page < end; ++page) {
+    auto erased_count = locked_pages_.erase(page);
+    DCHECK_EQ(1u, erased_count);
+  }
+  DCHECK_EQ(locked_pages_.size(), locked_page_count_);
+#endif
+
+  // Early out and avoid releasing the platform independent lock if some pages
+  // are still locked.
+  if (locked_page_count_)
+    return;
+
+  Time current_time = Now();
+  DCHECK(!current_time.is_null());
+
+  SharedState old_state(SharedState::LOCKED, Time());
+  SharedState new_state(SharedState::UNLOCKED, current_time);
+  // Note: timestamp cannot be NULL as that is a unique value used when
+  // locked or purged.
+  DCHECK(!new_state.GetTimestamp().is_null());
+  // Timestamp precision should at least be accurate to the second.
+  DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
+            (current_time - Time::UnixEpoch()).InSeconds());
+  SharedState result(subtle::Release_CompareAndSwap(
+      &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
+      old_state.value.i, new_state.value.i));
+
+  DCHECK_EQ(old_state.value.u, result.value.u);
+
+  last_known_usage_ = current_time;
+}
+
+void* DiscardableSharedMemory::memory() const {
+  return static_cast<uint8_t*>(shared_memory_mapping_.memory()) +
+         AlignToPageSize(sizeof(SharedState));
+}
+
+bool DiscardableSharedMemory::Purge(Time current_time) {
+  // Calls to this function must be synchronized properly.
+  DFAKE_SCOPED_LOCK(thread_collision_warner_);
+  DCHECK(shared_memory_mapping_.IsValid());
+
+  SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
+  SharedState new_state(SharedState::UNLOCKED, Time());
+  SharedState result(subtle::Acquire_CompareAndSwap(
+      &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
+      old_state.value.i, new_state.value.i));
+
+  // Update |last_known_usage_| to |current_time| if the memory is locked. This
+  // allows the caller to determine if purging failed because last known usage
+  // was incorrect or memory was locked. In the second case, the caller should
+  // most likely wait for some amount of time before attempting to purge the
+  // the memory again.
+  if (result.value.u != old_state.value.u) {
+    last_known_usage_ = result.GetLockState() == SharedState::LOCKED
+                            ? current_time
+                            : result.GetTimestamp();
+    return false;
+  }
+
+// The next section will release as much resource as can be done
+// from the purging process, until the client process notices the
+// purge and releases its own references.
+// Note: this memory will not be accessed again.  The segment will be
+// freed asynchronously at a later time, so just do the best
+// immediately.
+#if defined(OS_POSIX) && !defined(OS_NACL)
+// Linux and Android provide MADV_REMOVE which is preferred as it has a
+// behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
+// provide MADV_FREE which has the same result but memory is purged lazily.
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#define MADV_PURGE_ARGUMENT MADV_REMOVE
+#elif defined(OS_MACOSX)
+// MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
+// reusable bit, which allows both Activity Monitor and memory-infra to
+// correctly track the pages.
+#define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
+#else
+#define MADV_PURGE_ARGUMENT MADV_FREE
+#endif
+  // Advise the kernel to remove resources associated with purged pages.
+  // Subsequent accesses of memory pages will succeed, but might result in
+  // zero-fill-on-demand pages.
+  if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
+                  AlignToPageSize(sizeof(SharedState)),
+              AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) {
+    DPLOG(ERROR) << "madvise() failed";
+  }
+#elif defined(OS_WIN)
+  if (base::win::GetVersion() >= base::win::VERSION_WIN8_1) {
+    // Discard the purged pages, which releases the physical storage (resident
+    // memory, compressed or swapped), but leaves them reserved & committed.
+    // This does not free commit for use by other applications, but allows the
+    // system to avoid compressing/swapping these pages to free physical memory.
+    static const auto discard_virtual_memory =
+        reinterpret_cast<decltype(&::DiscardVirtualMemory)>(GetProcAddress(
+            GetModuleHandle(L"kernel32.dll"), "DiscardVirtualMemory"));
+    if (discard_virtual_memory) {
+      DWORD discard_result = discard_virtual_memory(
+          static_cast<char*>(shared_memory_mapping_.memory()) +
+              AlignToPageSize(sizeof(SharedState)),
+          AlignToPageSize(mapped_size_));
+      if (discard_result != ERROR_SUCCESS) {
+        DLOG(DCHECK) << "DiscardVirtualMemory() failed in Purge(): "
+                     << logging::SystemErrorCodeToString(discard_result);
+      }
+    }
+  }
+#endif
+
+  last_known_usage_ = Time();
+  return true;
+}
+
+bool DiscardableSharedMemory::IsMemoryResident() const {
+  DCHECK(shared_memory_mapping_.IsValid());
+
+  SharedState result(subtle::NoBarrier_Load(
+      &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
+
+  return result.GetLockState() == SharedState::LOCKED ||
+         !result.GetTimestamp().is_null();
+}
+
+bool DiscardableSharedMemory::IsMemoryLocked() const {
+  DCHECK(shared_memory_mapping_.IsValid());
+
+  SharedState result(subtle::NoBarrier_Load(
+      &SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
+
+  return result.GetLockState() == SharedState::LOCKED;
+}
+
+void DiscardableSharedMemory::Close() {
+  shared_memory_region_ = UnsafeSharedMemoryRegion();
+}
+
+void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
+    trace_event::MemoryAllocatorDump* local_segment_dump,
+    trace_event::ProcessMemoryDump* pmd,
+    bool is_owned) const {
+  auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
+      shared_memory_mapping_, pmd);
+  // TODO(ssid): Clean this by a new api to inherit size of parent dump once the
+  // we send the full PMD and calculate sizes inside chrome, crbug.com/704203.
+  size_t resident_size = shared_memory_dump->GetSizeInternal();
+  local_segment_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
+                                trace_event::MemoryAllocatorDump::kUnitsBytes,
+                                resident_size);
+
+  // By creating an edge with a higher |importance| (w.r.t non-owned dumps)
+  // the tracing UI will account the effective size of the segment to the
+  // client instead of manager.
+  // TODO(ssid): Define better constants in MemoryAllocatorDump for importance
+  // values, crbug.com/754793.
+  const int kImportance = is_owned ? 2 : 0;
+  auto shared_memory_guid = shared_memory_mapping_.guid();
+  local_segment_dump->AddString("id", "hash", shared_memory_guid.ToString());
+
+  // Owned discardable segments which are allocated by client process, could
+  // have been cleared by the discardable manager. So, the segment need not
+  // exist in memory and weak dumps are created to indicate the UI that the dump
+  // should exist only if the manager also created the global dump edge.
+  if (is_owned) {
+    pmd->CreateWeakSharedMemoryOwnershipEdge(local_segment_dump->guid(),
+                                             shared_memory_guid, kImportance);
+  } else {
+    pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
+                                         shared_memory_guid, kImportance);
+  }
+}
+
+// static
+DiscardableSharedMemory::LockResult DiscardableSharedMemory::LockPages(
+    const UnsafeSharedMemoryRegion& region,
+    size_t offset,
+    size_t length) {
+#if defined(OS_ANDROID)
+  if (region.IsValid()) {
+    int pin_result =
+        ashmem_pin_region(region.GetPlatformHandle(), offset, length);
+    if (pin_result == ASHMEM_WAS_PURGED)
+      return PURGED;
+    if (pin_result < 0)
+      return FAILED;
+  }
+#endif
+  return SUCCESS;
+}
+
+// static
+void DiscardableSharedMemory::UnlockPages(
+    const UnsafeSharedMemoryRegion& region,
+    size_t offset,
+    size_t length) {
+#if defined(OS_ANDROID)
+  if (region.IsValid()) {
+    int unpin_result =
+        ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
+    DCHECK_EQ(0, unpin_result);
+  }
+#endif
+}
+
+Time DiscardableSharedMemory::Now() const {
+  return Time::Now();
+}
+
+}  // namespace base
diff --git a/src/base/memory/discardable_shared_memory.h b/src/base/memory/discardable_shared_memory.h
new file mode 100644
index 0000000..73a2723
--- /dev/null
+++ b/src/base/memory/discardable_shared_memory.h
@@ -0,0 +1,187 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
+#define BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
+
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/threading/thread_collision_warner.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if DCHECK_IS_ON()
+#include <set>
+
+#include "starboard/types.h"
+#endif
+
+// Linux (including Android) support the MADV_REMOVE argument with madvise()
+// which has the behavior of reliably causing zero-fill-on-demand pages to
+// be returned after a call. Here we define
+// DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE on Linux
+// and Android to indicate that this type of behavior can be expected on
+// those platforms. Note that madvise() will still be used on other POSIX
+// platforms but doesn't provide the zero-fill-on-demand pages guarantee.
+#if defined(OS_LINUX) || defined(OS_ANDROID)
+#define DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE
+#endif
+
+namespace base {
+
+namespace trace_event {
+class MemoryAllocatorDump;
+class ProcessMemoryDump;
+}  // namespace trace_event
+
+// Platform abstraction for discardable shared memory.
+//
+// This class is not thread-safe. Clients are responsible for synchronizing
+// access to an instance of this class.
+class BASE_EXPORT DiscardableSharedMemory {
+ public:
+  enum LockResult { SUCCESS, PURGED, FAILED };
+
+  DiscardableSharedMemory();
+
+  // Create a new DiscardableSharedMemory object from an existing, open shared
+  // memory file. Memory must be locked.
+  explicit DiscardableSharedMemory(UnsafeSharedMemoryRegion region);
+
+  // Closes any open files.
+  virtual ~DiscardableSharedMemory();
+
+  // Creates and maps a locked DiscardableSharedMemory object with |size|.
+  // Returns true on success and false on failure.
+  bool CreateAndMap(size_t size);
+
+  // Maps the locked discardable memory into the caller's address space.
+  // Returns true on success, false otherwise.
+  bool Map(size_t size);
+
+  // Unmaps the discardable shared memory from the caller's address space.
+  // Unmapping won't unlock previously locked range.
+  // Returns true if successful; returns false on error or if the memory is
+  // not mapped.
+  bool Unmap();
+
+  // The actual size of the mapped memory (may be larger than requested).
+  size_t mapped_size() const { return mapped_size_; }
+
+  // Returns a duplicated shared memory region for this DiscardableSharedMemory
+  // object.
+  UnsafeSharedMemoryRegion DuplicateRegion() const {
+    return shared_memory_region_.Duplicate();
+  }
+
+  // Returns an ID for the shared memory region. This is ID of the mapped region
+  // consistent across all processes and is valid as long as the region is not
+  // unmapped.
+  const UnguessableToken& mapped_id() const {
+    return shared_memory_mapping_.guid();
+  }
+
+  // Locks a range of memory so that it will not be purged by the system.
+  // The range of memory must be unlocked. The result of trying to lock an
+  // already locked range is undefined. |offset| and |length| must both be
+  // a multiple of the page size as returned by GetPageSize().
+  // Passing 0 for |length| means "everything onward".
+  // Returns SUCCESS if range was successfully locked and the memory is still
+  // resident, PURGED if range was successfully locked but has been purged
+  // since last time it was locked and FAILED if range could not be locked.
+  // Locking can fail for two reasons; object might have been purged, our
+  // last known usage timestamp might be out of date. Last known usage time
+  // is updated to the actual last usage timestamp if memory is still resident
+  // or 0 if not.
+  LockResult Lock(size_t offset, size_t length);
+
+  // Unlock a previously successfully locked range of memory. The range of
+  // memory must be locked. The result of trying to unlock a not
+  // previously locked range is undefined.
+  // |offset| and |length| must both be a multiple of the page size as returned
+  // by GetPageSize().
+  // Passing 0 for |length| means "everything onward".
+  void Unlock(size_t offset, size_t length);
+
+  // Gets a pointer to the opened discardable memory space. Discardable memory
+  // must have been mapped via Map().
+  void* memory() const;
+
+  // Returns the last known usage time for DiscardableSharedMemory object. This
+  // may be earlier than the "true" usage time when memory has been used by a
+  // different process. Returns NULL time if purged.
+  Time last_known_usage() const { return last_known_usage_; }
+
+  // This returns true and sets |last_known_usage_| to 0 if
+  // DiscardableSharedMemory object was successfully purged. Purging can fail
+  // for two reasons; object might be locked or our last known usage timestamp
+  // might be out of date. Last known usage time is updated to |current_time|
+  // if locked or the actual last usage timestamp if unlocked. It is often
+  // necessary to call this function twice for the object to successfully be
+  // purged. First call, updates |last_known_usage_|. Second call, successfully
+  // purges the object using the updated |last_known_usage_|.
+  // Note: there is no guarantee that multiple calls to this function will
+  // successfully purge object. DiscardableSharedMemory object might be locked
+  // or another thread/process might be able to lock and unlock it in between
+  // each call.
+  bool Purge(Time current_time);
+
+  // Returns true if memory is still resident.
+  bool IsMemoryResident() const;
+
+  // Returns true if memory is locked.
+  bool IsMemoryLocked() const;
+
+  // Closes the open discardable memory segment.
+  // It is safe to call Close repeatedly.
+  void Close();
+
+  // For tracing: Creates ownership edge to the underlying shared memory dump
+  // which is cross process in the given |pmd|. |local_segment_dump| is the dump
+  // associated with the local discardable shared memory segment and |is_owned|
+  // is true when the current process owns the segment and the effective memory
+  // is assigned to the current process.
+  void CreateSharedMemoryOwnershipEdge(
+      trace_event::MemoryAllocatorDump* local_segment_dump,
+      trace_event::ProcessMemoryDump* pmd,
+      bool is_owned) const;
+
+ private:
+  // LockPages/UnlockPages are platform-native discardable page management
+  // helper functions. Both expect |offset| to be specified relative to the
+  // base address at which |memory| is mapped, and that |offset| and |length|
+  // are page-aligned by the caller.
+  // Returns SUCCESS on platforms which do not support discardable pages.
+  static LockResult LockPages(const UnsafeSharedMemoryRegion& region,
+                              size_t offset,
+                              size_t length);
+  // UnlockPages() is a no-op on platforms not supporting discardable pages.
+  static void UnlockPages(const UnsafeSharedMemoryRegion& region,
+                          size_t offset,
+                          size_t length);
+
+  // Virtual for tests.
+  virtual Time Now() const;
+
+  UnsafeSharedMemoryRegion shared_memory_region_;
+  WritableSharedMemoryMapping shared_memory_mapping_;
+  size_t mapped_size_;
+  size_t locked_page_count_;
+#if DCHECK_IS_ON()
+  std::set<size_t> locked_pages_;
+#endif
+  // Implementation is not thread-safe but still usable if clients are
+  // synchronized somehow. Use a collision warner to detect incorrect usage.
+  DFAKE_MUTEX(thread_collision_warner_);
+  Time last_known_usage_;
+
+  DISALLOW_COPY_AND_ASSIGN(DiscardableSharedMemory);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
diff --git a/src/base/memory/discardable_shared_memory_unittest.cc b/src/base/memory/discardable_shared_memory_unittest.cc
new file mode 100644
index 0000000..805477e
--- /dev/null
+++ b/src/base/memory/discardable_shared_memory_unittest.cc
@@ -0,0 +1,457 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fcntl.h>
+
+#include "base/files/scoped_file.h"
+#include "base/memory/discardable_shared_memory.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/process/process_metrics.h"
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#include "build/build_config.h"
+#include "starboard/memory.h"
+#include "starboard/types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class TestDiscardableSharedMemory : public DiscardableSharedMemory {
+ public:
+  TestDiscardableSharedMemory() = default;
+
+  explicit TestDiscardableSharedMemory(UnsafeSharedMemoryRegion region)
+      : DiscardableSharedMemory(std::move(region)) {}
+
+  void SetNow(Time now) { now_ = now; }
+
+ private:
+  // Overriden from DiscardableSharedMemory:
+  Time Now() const override { return now_; }
+
+  Time now_;
+};
+
+TEST(DiscardableSharedMemoryTest, CreateAndMap) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory;
+  bool rv = memory.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+  EXPECT_GE(memory.mapped_size(), kDataSize);
+  EXPECT_TRUE(memory.IsMemoryLocked());
+}
+
+TEST(DiscardableSharedMemoryTest, CreateFromHandle) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+  EXPECT_TRUE(memory2.IsMemoryLocked());
+}
+
+TEST(DiscardableSharedMemoryTest, LockAndUnlock) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  // Memory is initially locked. Unlock it.
+  memory1.SetNow(Time::FromDoubleT(1));
+  memory1.Unlock(0, 0);
+  EXPECT_FALSE(memory1.IsMemoryLocked());
+
+  // Lock and unlock memory.
+  DiscardableSharedMemory::LockResult lock_rv = memory1.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+  memory1.SetNow(Time::FromDoubleT(2));
+  memory1.Unlock(0, 0);
+
+  // Lock again before duplicating and passing ownership to new instance.
+  lock_rv = memory1.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+  EXPECT_TRUE(memory1.IsMemoryLocked());
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+
+  // Unlock second instance.
+  memory2.SetNow(Time::FromDoubleT(3));
+  memory2.Unlock(0, 0);
+
+  // Both memory instances should be unlocked now.
+  EXPECT_FALSE(memory2.IsMemoryLocked());
+  EXPECT_FALSE(memory1.IsMemoryLocked());
+
+  // Lock second instance before passing ownership back to first instance.
+  lock_rv = memory2.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+
+  // Memory should still be resident and locked.
+  rv = memory1.IsMemoryResident();
+  EXPECT_TRUE(rv);
+  EXPECT_TRUE(memory1.IsMemoryLocked());
+
+  // Unlock first instance.
+  memory1.SetNow(Time::FromDoubleT(4));
+  memory1.Unlock(0, 0);
+}
+
+TEST(DiscardableSharedMemoryTest, Purge) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+
+  // This should fail as memory is locked.
+  rv = memory1.Purge(Time::FromDoubleT(1));
+  EXPECT_FALSE(rv);
+
+  memory2.SetNow(Time::FromDoubleT(2));
+  memory2.Unlock(0, 0);
+
+  ASSERT_TRUE(memory2.IsMemoryResident());
+
+  // Memory is unlocked, but our usage timestamp is incorrect.
+  rv = memory1.Purge(Time::FromDoubleT(3));
+  EXPECT_FALSE(rv);
+
+  ASSERT_TRUE(memory2.IsMemoryResident());
+
+  // Memory is unlocked and our usage timestamp should be correct.
+  rv = memory1.Purge(Time::FromDoubleT(4));
+  EXPECT_TRUE(rv);
+
+  // Lock should fail as memory has been purged.
+  DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
+
+  ASSERT_FALSE(memory2.IsMemoryResident());
+}
+
+TEST(DiscardableSharedMemoryTest, LastUsed) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+
+  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.Unlock(0, 0);
+
+  EXPECT_EQ(memory2.last_known_usage(), Time::FromDoubleT(1));
+
+  DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+
+  // This should fail as memory is locked.
+  rv = memory1.Purge(Time::FromDoubleT(2));
+  ASSERT_FALSE(rv);
+
+  // Last usage should have been updated to timestamp passed to Purge above.
+  EXPECT_EQ(memory1.last_known_usage(), Time::FromDoubleT(2));
+
+  memory2.SetNow(Time::FromDoubleT(3));
+  memory2.Unlock(0, 0);
+
+  // Usage time should be correct for |memory2| instance.
+  EXPECT_EQ(memory2.last_known_usage(), Time::FromDoubleT(3));
+
+  // However, usage time has not changed as far as |memory1| instance knows.
+  EXPECT_EQ(memory1.last_known_usage(), Time::FromDoubleT(2));
+
+  // Memory is unlocked, but our usage timestamp is incorrect.
+  rv = memory1.Purge(Time::FromDoubleT(4));
+  EXPECT_FALSE(rv);
+
+  // The failed purge attempt should have updated usage time to the correct
+  // value.
+  EXPECT_EQ(memory1.last_known_usage(), Time::FromDoubleT(3));
+
+  // Purge memory through |memory2| instance. The last usage time should be
+  // set to 0 as a result of this.
+  rv = memory2.Purge(Time::FromDoubleT(5));
+  EXPECT_TRUE(rv);
+  EXPECT_TRUE(memory2.last_known_usage().is_null());
+
+  // This should fail as memory has already been purged and |memory1|'s usage
+  // time is incorrect as a result.
+  rv = memory1.Purge(Time::FromDoubleT(6));
+  EXPECT_FALSE(rv);
+
+  // The failed purge attempt should have updated usage time to the correct
+  // value.
+  EXPECT_TRUE(memory1.last_known_usage().is_null());
+
+  // Purge should succeed now that usage time is correct.
+  rv = memory1.Purge(Time::FromDoubleT(7));
+  EXPECT_TRUE(rv);
+}
+
+TEST(DiscardableSharedMemoryTest, LockShouldAlwaysFailAfterSuccessfulPurge) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+
+  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.Unlock(0, 0);
+
+  rv = memory2.Purge(Time::FromDoubleT(2));
+  EXPECT_TRUE(rv);
+
+  // Lock should fail as memory has been purged.
+  DiscardableSharedMemory::LockResult lock_rv = memory2.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
+}
+
+#if defined(OS_ANDROID)
+TEST(DiscardableSharedMemoryTest, LockShouldFailIfPlatformLockPagesFails) {
+  const uint32_t kDataSize = 1024;
+
+  DiscardableSharedMemory memory1;
+  bool rv1 = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv1);
+
+  base::UnsafeSharedMemoryRegion region = memory1.DuplicateRegion();
+  int fd = region.GetPlatformHandle();
+  DiscardableSharedMemory memory2(std::move(region));
+  bool rv2 = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv2);
+
+  // Unlock() the first page of memory, so we can test Lock()ing it.
+  memory2.Unlock(0, base::GetPageSize());
+  // To cause ashmem_pin_region() to fail, we arrange for it to be called with
+  // an invalid file-descriptor, which requires a valid-looking fd (i.e. we
+  // can't just Close() |memory|), but one on which the operation is invalid.
+  // We can overwrite the |memory| fd with a handle to a different file using
+  // dup2(), which has the nice properties that |memory| still has a valid fd
+  // that it can close, etc without errors, but on which ashmem_pin_region()
+  // will fail.
+  base::ScopedFD null(open("/dev/null", O_RDONLY));
+  ASSERT_EQ(fd, dup2(null.get(), fd));
+
+  // Now re-Lock()ing the first page should fail.
+  DiscardableSharedMemory::LockResult lock_rv =
+      memory2.Lock(0, base::GetPageSize());
+  EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
+}
+#endif  // defined(OS_ANDROID)
+
+TEST(DiscardableSharedMemoryTest, LockAndUnlockRange) {
+  const uint32_t kDataSize = 32;
+
+  uint32_t data_size_in_bytes = kDataSize * base::GetPageSize();
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(data_size_in_bytes);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(data_size_in_bytes);
+  ASSERT_TRUE(rv);
+
+  // Unlock first page.
+  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.Unlock(0, base::GetPageSize());
+
+  rv = memory1.Purge(Time::FromDoubleT(2));
+  EXPECT_FALSE(rv);
+
+  // Lock first page again.
+  memory2.SetNow(Time::FromDoubleT(3));
+  DiscardableSharedMemory::LockResult lock_rv =
+      memory2.Lock(0, base::GetPageSize());
+  EXPECT_NE(DiscardableSharedMemory::FAILED, lock_rv);
+
+  // Unlock first page.
+  memory2.SetNow(Time::FromDoubleT(4));
+  memory2.Unlock(0, base::GetPageSize());
+
+  rv = memory1.Purge(Time::FromDoubleT(5));
+  EXPECT_FALSE(rv);
+
+  // Unlock second page.
+  memory2.SetNow(Time::FromDoubleT(6));
+  memory2.Unlock(base::GetPageSize(), base::GetPageSize());
+
+  rv = memory1.Purge(Time::FromDoubleT(7));
+  EXPECT_FALSE(rv);
+
+  // Unlock anything onwards.
+  memory2.SetNow(Time::FromDoubleT(8));
+  memory2.Unlock(2 * base::GetPageSize(), 0);
+
+  // Memory is unlocked, but our usage timestamp is incorrect.
+  rv = memory1.Purge(Time::FromDoubleT(9));
+  EXPECT_FALSE(rv);
+
+  // The failed purge attempt should have updated usage time to the correct
+  // value.
+  EXPECT_EQ(Time::FromDoubleT(8), memory1.last_known_usage());
+
+  // Purge should now succeed.
+  rv = memory1.Purge(Time::FromDoubleT(10));
+  EXPECT_TRUE(rv);
+}
+
+TEST(DiscardableSharedMemoryTest, MappedSize) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory;
+  bool rv = memory.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  EXPECT_LE(kDataSize, memory.mapped_size());
+
+  // Mapped size should be 0 after memory segment has been unmapped.
+  rv = memory.Unmap();
+  EXPECT_TRUE(rv);
+  EXPECT_EQ(0u, memory.mapped_size());
+}
+
+TEST(DiscardableSharedMemoryTest, Close) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory;
+  bool rv = memory.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  // Mapped size should be unchanged after memory segment has been closed.
+  memory.Close();
+  EXPECT_LE(kDataSize, memory.mapped_size());
+
+  // Memory is initially locked. Unlock it.
+  memory.SetNow(Time::FromDoubleT(1));
+  memory.Unlock(0, 0);
+
+  // Lock and unlock memory.
+  DiscardableSharedMemory::LockResult lock_rv = memory.Lock(0, 0);
+  EXPECT_EQ(DiscardableSharedMemory::SUCCESS, lock_rv);
+  memory.SetNow(Time::FromDoubleT(2));
+  memory.Unlock(0, 0);
+}
+
+TEST(DiscardableSharedMemoryTest, ZeroSize) {
+  TestDiscardableSharedMemory memory;
+  bool rv = memory.CreateAndMap(0);
+  ASSERT_TRUE(rv);
+
+  EXPECT_LE(0u, memory.mapped_size());
+
+  // Memory is initially locked. Unlock it.
+  memory.SetNow(Time::FromDoubleT(1));
+  memory.Unlock(0, 0);
+
+  // Lock and unlock memory.
+  DiscardableSharedMemory::LockResult lock_rv = memory.Lock(0, 0);
+  EXPECT_NE(DiscardableSharedMemory::FAILED, lock_rv);
+  memory.SetNow(Time::FromDoubleT(2));
+  memory.Unlock(0, 0);
+}
+
+// This test checks that zero-filled pages are returned after purging a segment
+// when DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE is
+// defined and MADV_REMOVE is supported.
+#if defined(DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE)
+TEST(DiscardableSharedMemoryTest, ZeroFilledPagesAfterPurge) {
+  const uint32_t kDataSize = 1024;
+
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  UnsafeSharedMemoryRegion shared_region = memory1.DuplicateRegion();
+  ASSERT_TRUE(shared_region.IsValid());
+
+  TestDiscardableSharedMemory memory2(std::move(shared_region));
+  rv = memory2.Map(kDataSize);
+  ASSERT_TRUE(rv);
+
+  // Initialize all memory to '0xaa'.
+  SbMemorySet(memory2.memory(), 0xaa, kDataSize);
+
+  // Unlock memory.
+  memory2.SetNow(Time::FromDoubleT(1));
+  memory2.Unlock(0, 0);
+  EXPECT_FALSE(memory1.IsMemoryLocked());
+
+  // Memory is unlocked, but our usage timestamp is incorrect.
+  rv = memory1.Purge(Time::FromDoubleT(2));
+  EXPECT_FALSE(rv);
+  rv = memory1.Purge(Time::FromDoubleT(3));
+  EXPECT_TRUE(rv);
+
+  // Check that reading memory after it has been purged is returning
+  // zero-filled pages.
+  uint8_t expected_data[kDataSize] = {};
+  EXPECT_EQ(SbMemoryCompare(memory2.memory(), expected_data, kDataSize), 0);
+}
+#endif
+
+TEST(DiscardableSharedMemoryTest, TracingOwnershipEdges) {
+  const uint32_t kDataSize = 1024;
+  TestDiscardableSharedMemory memory1;
+  bool rv = memory1.CreateAndMap(kDataSize);
+  ASSERT_TRUE(rv);
+
+  base::trace_event::MemoryDumpArgs args = {
+      base::trace_event::MemoryDumpLevelOfDetail::DETAILED};
+  trace_event::ProcessMemoryDump pmd(args);
+  trace_event::MemoryAllocatorDump* client_dump =
+      pmd.CreateAllocatorDump("discardable_manager/map1");
+  const bool is_owned = false;
+  memory1.CreateSharedMemoryOwnershipEdge(client_dump, &pmd, is_owned);
+  const auto* shm_dump = pmd.GetAllocatorDump(
+      SharedMemoryTracker::GetDumpNameForTracing(memory1.mapped_id()));
+  EXPECT_TRUE(shm_dump);
+  EXPECT_EQ(shm_dump->GetSizeInternal(), client_dump->GetSizeInternal());
+  const auto edges = pmd.allocator_dumps_edges();
+  EXPECT_EQ(2u, edges.size());
+  EXPECT_NE(edges.end(), edges.find(shm_dump->guid()));
+  EXPECT_NE(edges.end(), edges.find(client_dump->guid()));
+  // TODO(ssid): test for weak global dump once the
+  // CreateWeakSharedMemoryOwnershipEdge() is fixed, crbug.com/661257.
+}
+
+}  // namespace base
diff --git a/src/base/memory/fake_memory_pressure_monitor.cc b/src/base/memory/fake_memory_pressure_monitor.cc
new file mode 100644
index 0000000..713b161
--- /dev/null
+++ b/src/base/memory/fake_memory_pressure_monitor.cc
@@ -0,0 +1,33 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/fake_memory_pressure_monitor.h"
+
+namespace base {
+namespace test {
+
+FakeMemoryPressureMonitor::FakeMemoryPressureMonitor()
+    : MemoryPressureMonitor(),
+      memory_pressure_level_(MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_NONE) {}
+
+FakeMemoryPressureMonitor::~FakeMemoryPressureMonitor() {}
+
+void FakeMemoryPressureMonitor::SetAndNotifyMemoryPressure(
+    MemoryPressureLevel level) {
+  memory_pressure_level_ = level;
+  base::MemoryPressureListener::SimulatePressureNotification(level);
+}
+
+base::MemoryPressureMonitor::MemoryPressureLevel
+FakeMemoryPressureMonitor::GetCurrentPressureLevel() {
+  return memory_pressure_level_;
+}
+
+void FakeMemoryPressureMonitor::SetDispatchCallback(
+    const DispatchCallback& callback) {
+  LOG(ERROR) << "FakeMemoryPressureMonitor::SetDispatchCallback";
+}
+
+}  // namespace test
+}  // namespace base
diff --git a/src/base/memory/fake_memory_pressure_monitor.h b/src/base/memory/fake_memory_pressure_monitor.h
new file mode 100644
index 0000000..2194b5f
--- /dev/null
+++ b/src/base/memory/fake_memory_pressure_monitor.h
@@ -0,0 +1,34 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_FAKE_MEMORY_PRESSURE_MONITOR_H_
+#define BASE_MEMORY_FAKE_MEMORY_PRESSURE_MONITOR_H_
+
+#include "base/macros.h"
+#include "base/memory/memory_pressure_monitor.h"
+
+namespace base {
+namespace test {
+
+class FakeMemoryPressureMonitor : public base::MemoryPressureMonitor {
+ public:
+  FakeMemoryPressureMonitor();
+  ~FakeMemoryPressureMonitor() override;
+
+  void SetAndNotifyMemoryPressure(MemoryPressureLevel level);
+
+  // base::MemoryPressureMonitor overrides:
+  MemoryPressureLevel GetCurrentPressureLevel() override;
+  void SetDispatchCallback(const DispatchCallback& callback) override;
+
+ private:
+  MemoryPressureLevel memory_pressure_level_;
+
+  DISALLOW_COPY_AND_ASSIGN(FakeMemoryPressureMonitor);
+};
+
+}  // namespace test
+}  // namespace base
+
+#endif  // BASE_MEMORY_FAKE_MEMORY_PRESSURE_MONITOR_H_
diff --git a/src/base/memory/free_deleter.h b/src/base/memory/free_deleter.h
new file mode 100644
index 0000000..0378c0c
--- /dev/null
+++ b/src/base/memory/free_deleter.h
@@ -0,0 +1,26 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_FREE_DELETER_H_
+#define BASE_MEMORY_FREE_DELETER_H_
+
+#include <stdlib.h>
+
+#include "starboard/memory.h"
+#include "starboard/types.h"
+
+namespace base {
+
+// Function object which invokes 'free' on its parameter, which must be
+// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
+//
+// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
+//     static_cast<int*>(malloc(sizeof(int))));
+struct FreeDeleter {
+  inline void operator()(void* ptr) const { SbMemoryDeallocate(ptr); }
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_FREE_DELETER_H_
diff --git a/src/base/memory/linked_ptr.h b/src/base/memory/linked_ptr.h
index 80044ad..6851286 100644
--- a/src/base/memory/linked_ptr.h
+++ b/src/base/memory/linked_ptr.h
@@ -17,10 +17,6 @@
 //   If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
 //   will happen (double deletion).
 //
-// A good use of this class is storing object references in STL containers.
-// You can safely put linked_ptr<> in a vector<>.
-// Other uses may not be as good.
-//
 // Note: If you use an incomplete type with linked_ptr<>, the class
 // *containing* linked_ptr<> must have a constructor and destructor (even
 // if they do nothing!).
@@ -73,6 +69,8 @@
   mutable linked_ptr_internal const* next_;
 };
 
+// TODO(http://crbug.com/556939): DEPRECATED: Use unique_ptr instead (now that
+// we have support for moveable types inside STL containers).
 template <typename T>
 class linked_ptr {
  public:
diff --git a/src/base/memory/linked_ptr_unittest.cc b/src/base/memory/linked_ptr_unittest.cc
index 516b972..344ffa4 100644
--- a/src/base/memory/linked_ptr_unittest.cc
+++ b/src/base/memory/linked_ptr_unittest.cc
@@ -5,7 +5,7 @@
 #include <string>
 
 #include "base/memory/linked_ptr.h"
-#include "base/stringprintf.h"
+#include "base/strings/stringprintf.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace {
@@ -25,10 +25,8 @@
 // Subclass
 struct B: public A {
   B() { history += base::StringPrintf("B%d ctor\n", mynum); }
-  virtual ~B() { history += base::StringPrintf("B%d dtor\n", mynum); }
-  virtual void Use() override {
-    history += base::StringPrintf("B%d use\n", mynum);
-  }
+  ~B() override { history += base::StringPrintf("B%d dtor\n", mynum); }
+  void Use() override { history += base::StringPrintf("B%d use\n", mynum); }
 };
 
 }  // namespace
@@ -36,20 +34,20 @@
 TEST(LinkedPtrTest, Test) {
   {
     linked_ptr<A> a0, a1, a2;
-    a0 = a0;
+    a0 = *&a0;  // The *& defeats Clang's -Wself-assign warning.
     a1 = a2;
-    ASSERT_EQ(a0.get(), static_cast<A*>(NULL));
-    ASSERT_EQ(a1.get(), static_cast<A*>(NULL));
-    ASSERT_EQ(a2.get(), static_cast<A*>(NULL));
-    ASSERT_TRUE(a0 == NULL);
-    ASSERT_TRUE(a1 == NULL);
-    ASSERT_TRUE(a2 == NULL);
+    ASSERT_EQ(a0.get(), static_cast<A*>(nullptr));
+    ASSERT_EQ(a1.get(), static_cast<A*>(nullptr));
+    ASSERT_EQ(a2.get(), static_cast<A*>(nullptr));
+    ASSERT_TRUE(a0 == nullptr);
+    ASSERT_TRUE(a1 == nullptr);
+    ASSERT_TRUE(a2 == nullptr);
 
     {
       linked_ptr<A> a3(new A);
       a0 = a3;
       ASSERT_TRUE(a0 == a3);
-      ASSERT_TRUE(a0 != NULL);
+      ASSERT_TRUE(a0 != nullptr);
       ASSERT_TRUE(a0.get() == a3);
       ASSERT_TRUE(a0 == a3.get());
       linked_ptr<A> a4(a0);
@@ -62,7 +60,7 @@
       linked_ptr<A> a6(b0);
       ASSERT_TRUE(b0 == a6);
       ASSERT_TRUE(a6 == b0);
-      ASSERT_TRUE(b0 != NULL);
+      ASSERT_TRUE(b0 != nullptr);
       a5 = b0;
       a5 = b0;
       a3->Use();
diff --git a/src/base/memory/manual_constructor.h b/src/base/memory/manual_constructor.h
deleted file mode 100644
index 9275f73..0000000
--- a/src/base/memory/manual_constructor.h
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// ManualConstructor statically-allocates space in which to store some
-// object, but does not initialize it.  You can then call the constructor
-// and destructor for the object yourself as you see fit.  This is useful
-// for memory management optimizations, where you want to initialize and
-// destroy an object multiple times but only allocate it once.
-//
-// (When I say ManualConstructor statically allocates space, I mean that
-// the ManualConstructor object itself is forced to be the right size.)
-//
-// For example usage, check out base/containers/small_map.h.
-
-#ifndef BASE_MEMORY_MANUAL_CONSTRUCTOR_H_
-#define BASE_MEMORY_MANUAL_CONSTRUCTOR_H_
-
-#include <stddef.h>
-
-#include "base/memory/aligned_memory.h"
-
-namespace base {
-
-template <typename Type>
-class ManualConstructor {
- public:
-  // No constructor or destructor because one of the most useful uses of
-  // this class is as part of a union, and members of a union cannot have
-  // constructors or destructors.  And, anyway, the whole point of this
-  // class is to bypass these.
-
-  // Support users creating arrays of ManualConstructor<>s.  This ensures that
-  // the array itself has the correct alignment.
-  static void* operator new[](size_t size) {
-#if defined(COMPILER_MSVC)
-    return AlignedAlloc(size, __alignof(Type));
-#else
-    return AlignedAlloc(size, __alignof__(Type));
-#endif
-  }
-  static void operator delete[](void* mem) {
-    AlignedFree(mem);
-  }
-
-  inline Type* get() {
-    return space_.template data_as<Type>();
-  }
-  inline const Type* get() const  {
-    return space_.template data_as<Type>();
-  }
-
-  inline Type* operator->() { return get(); }
-  inline const Type* operator->() const { return get(); }
-
-  inline Type& operator*() { return *get(); }
-  inline const Type& operator*() const { return *get(); }
-
-  // You can pass up to eight constructor arguments as arguments of Init().
-  inline void Init() {
-    new(space_.void_data()) Type;
-  }
-
-  template <typename T1>
-  inline void Init(const T1& p1) {
-    new(space_.void_data()) Type(p1);
-  }
-
-  template <typename T1, typename T2>
-  inline void Init(const T1& p1, const T2& p2) {
-    new(space_.void_data()) Type(p1, p2);
-  }
-
-  template <typename T1, typename T2, typename T3>
-  inline void Init(const T1& p1, const T2& p2, const T3& p3) {
-    new(space_.void_data()) Type(p1, p2, p3);
-  }
-
-  template <typename T1, typename T2, typename T3, typename T4>
-  inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4) {
-    new(space_.void_data()) Type(p1, p2, p3, p4);
-  }
-
-  template <typename T1, typename T2, typename T3, typename T4, typename T5>
-  inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
-                   const T5& p5) {
-    new(space_.void_data()) Type(p1, p2, p3, p4, p5);
-  }
-
-  template <typename T1, typename T2, typename T3, typename T4, typename T5,
-            typename T6>
-  inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
-                   const T5& p5, const T6& p6) {
-    new(space_.void_data()) Type(p1, p2, p3, p4, p5, p6);
-  }
-
-  template <typename T1, typename T2, typename T3, typename T4, typename T5,
-            typename T6, typename T7>
-  inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
-                   const T5& p5, const T6& p6, const T7& p7) {
-    new(space_.void_data()) Type(p1, p2, p3, p4, p5, p6, p7);
-  }
-
-  template <typename T1, typename T2, typename T3, typename T4, typename T5,
-            typename T6, typename T7, typename T8>
-  inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
-                   const T5& p5, const T6& p6, const T7& p7, const T8& p8) {
-    new(space_.void_data()) Type(p1, p2, p3, p4, p5, p6, p7, p8);
-  }
-
-  inline void Destroy() {
-    get()->~Type();
-  }
-
- private:
-#if defined(COMPILER_MSVC)
-  AlignedMemory<sizeof(Type), __alignof(Type)> space_;
-#else
-  AlignedMemory<sizeof(Type), __alignof__(Type)> space_;
-#endif
-};
-
-}  // namespace base
-
-#endif  // BASE_MEMORY_MANUAL_CONSTRUCTOR_H_
diff --git a/src/base/memory/memory_coordinator_client.cc b/src/base/memory/memory_coordinator_client.cc
new file mode 100644
index 0000000..7fa6232
--- /dev/null
+++ b/src/base/memory/memory_coordinator_client.cc
@@ -0,0 +1,27 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_coordinator_client.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+const char* MemoryStateToString(MemoryState state) {
+  switch (state) {
+    case MemoryState::UNKNOWN:
+      return "unknown";
+    case MemoryState::NORMAL:
+      return "normal";
+    case MemoryState::THROTTLED:
+      return "throttled";
+    case MemoryState::SUSPENDED:
+      return "suspended";
+    default:
+      NOTREACHED();
+  }
+  return "";
+}
+
+}  // namespace base
diff --git a/src/base/memory/memory_coordinator_client.h b/src/base/memory/memory_coordinator_client.h
new file mode 100644
index 0000000..804f0a6
--- /dev/null
+++ b/src/base/memory/memory_coordinator_client.h
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_COORDINATOR_CLIENT_H_
+#define BASE_MEMORY_MEMORY_COORDINATOR_CLIENT_H_
+
+#include "base/base_export.h"
+
+namespace base {
+
+// OVERVIEW:
+//
+// MemoryCoordinatorClient is an interface which a component can implement to
+// adjust "future allocation" and "existing allocation". For "future allocation"
+// it provides a callback to observe memory state changes, and for "existing
+// allocation" it provides a callback to purge memory.
+//
+// Unlike MemoryPressureListener, memory state changes are stateful. State
+// transitions are throttled to avoid thrashing; the exact throttling period is
+// platform dependent, but will be at least 5-10 seconds. When a state change
+// notification is dispatched, clients are expected to update their allocation
+// policies (e.g. setting cache limit) that persist for the duration of the
+// memory state. Note that clients aren't expected to free up memory on memory
+// state changes. Clients should wait for a separate purge request to free up
+// memory. Purging requests will be throttled as well.
+
+// MemoryState is an indicator that processes can use to guide their memory
+// allocation policies. For example, a process that receives the throttled
+// state can use that as as signal to decrease memory cache limits.
+// NOTE: This enum is used to back an UMA histogram, and therefore should be
+// treated as append-only.
+enum class MemoryState : int {
+  // The state is unknown.
+  UNKNOWN = -1,
+  // No memory constraints.
+  NORMAL = 0,
+  // Running and interactive but memory allocation should be throttled.
+  // Clients should set lower budget for any memory that is used as an
+  // optimization but that is not necessary for the process to run.
+  // (e.g. caches)
+  THROTTLED = 1,
+  // Still resident in memory but core processing logic has been suspended.
+  // In most cases, OnPurgeMemory() will be called before entering this state.
+  SUSPENDED = 2,
+};
+
+const int kMemoryStateMax = static_cast<int>(MemoryState::SUSPENDED) + 1;
+
+// Returns a string representation of MemoryState.
+BASE_EXPORT const char* MemoryStateToString(MemoryState state);
+
+// This is an interface for components which can respond to memory status
+// changes. An initial state is NORMAL. See MemoryCoordinatorClientRegistry for
+// threading guarantees and ownership management.
+class BASE_EXPORT MemoryCoordinatorClient {
+ public:
+  // Called when memory state has changed. Any transition can occur except for
+  // UNKNOWN. General guidelines are:
+  //  * NORMAL:    Restore the default settings for memory allocation/usage if
+  //               it has changed.
+  //  * THROTTLED: Use smaller limits for future memory allocations. You don't
+  //               need to take any action on existing allocations.
+  //  * SUSPENDED: Use much smaller limits for future memory allocations. You
+  //               don't need to take any action on existing allocations.
+  virtual void OnMemoryStateChange(MemoryState state) {}
+
+  // Called to purge memory.
+  // This callback should free up any memory that is used as an optimization, or
+  // any memory whose contents can be reproduced.
+  virtual void OnPurgeMemory() {}
+
+ protected:
+  virtual ~MemoryCoordinatorClient() = default;
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_COORDINATOR_CLIENT_H_
diff --git a/src/base/memory/memory_coordinator_client_registry.cc b/src/base/memory/memory_coordinator_client_registry.cc
new file mode 100644
index 0000000..45b4a7f
--- /dev/null
+++ b/src/base/memory/memory_coordinator_client_registry.cc
@@ -0,0 +1,41 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_coordinator_client_registry.h"
+
+namespace base {
+
+// static
+MemoryCoordinatorClientRegistry*
+MemoryCoordinatorClientRegistry::GetInstance() {
+  return Singleton<
+      MemoryCoordinatorClientRegistry,
+      LeakySingletonTraits<MemoryCoordinatorClientRegistry>>::get();
+}
+
+MemoryCoordinatorClientRegistry::MemoryCoordinatorClientRegistry()
+    : clients_(new ClientList) {}
+
+MemoryCoordinatorClientRegistry::~MemoryCoordinatorClientRegistry() = default;
+
+void MemoryCoordinatorClientRegistry::Register(
+    MemoryCoordinatorClient* client) {
+  clients_->AddObserver(client);
+}
+
+void MemoryCoordinatorClientRegistry::Unregister(
+    MemoryCoordinatorClient* client) {
+  clients_->RemoveObserver(client);
+}
+
+void MemoryCoordinatorClientRegistry::Notify(MemoryState state) {
+  clients_->Notify(FROM_HERE,
+                   &base::MemoryCoordinatorClient::OnMemoryStateChange, state);
+}
+
+void MemoryCoordinatorClientRegistry::PurgeMemory() {
+  clients_->Notify(FROM_HERE, &base::MemoryCoordinatorClient::OnPurgeMemory);
+}
+
+}  // namespace base
diff --git a/src/base/memory/memory_coordinator_client_registry.h b/src/base/memory/memory_coordinator_client_registry.h
new file mode 100644
index 0000000..e2c81b7
--- /dev/null
+++ b/src/base/memory/memory_coordinator_client_registry.h
@@ -0,0 +1,56 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_CLIENT_REGISTRY_H_
+#define BASE_MEMORY_MEMORY_CLIENT_REGISTRY_H_
+
+#include "base/base_export.h"
+#include "base/memory/memory_coordinator_client.h"
+#include "base/memory/singleton.h"
+#include "base/observer_list_threadsafe.h"
+
+namespace base {
+
+// MemoryCoordinatorClientRegistry is the registry of MemoryCoordinatorClients.
+// This class manages clients and provides a way to notify memory state changes
+// to clients, but this isn't responsible to determine how/when to change
+// memory states.
+//
+// Threading guarantees:
+// This class uses ObserverListThreadsafe internally, which means that
+//  * Registering/unregistering callbacks are thread-safe.
+//  * Callbacks are invoked on the same thread on which they are registered.
+// See base/observer_list_threadsafe.h for reference.
+//
+// Ownership management:
+// This class doesn't take the ownership of clients. Clients must be
+// unregistered before they are destroyed.
+class BASE_EXPORT MemoryCoordinatorClientRegistry {
+ public:
+  static MemoryCoordinatorClientRegistry* GetInstance();
+
+  ~MemoryCoordinatorClientRegistry();
+
+  // Registers/unregisters a client. Does not take ownership of client.
+  void Register(MemoryCoordinatorClient* client);
+  void Unregister(MemoryCoordinatorClient* client);
+
+  // Notify clients of a memory state change.
+  void Notify(MemoryState state);
+
+  // Requests purging memory.
+  void PurgeMemory();
+
+ private:
+  friend struct DefaultSingletonTraits<MemoryCoordinatorClientRegistry>;
+
+  MemoryCoordinatorClientRegistry();
+
+  using ClientList = ObserverListThreadSafe<MemoryCoordinatorClient>;
+  scoped_refptr<ClientList> clients_;
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_CLIENT_REGISTRY_H_
diff --git a/src/base/memory/memory_coordinator_client_registry_unittest.cc b/src/base/memory/memory_coordinator_client_registry_unittest.cc
new file mode 100644
index 0000000..37ed767
--- /dev/null
+++ b/src/base/memory/memory_coordinator_client_registry_unittest.cc
@@ -0,0 +1,58 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_coordinator_client_registry.h"
+
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class TestMemoryCoordinatorClient : public MemoryCoordinatorClient {
+ public:
+  void OnMemoryStateChange(MemoryState state) override { state_ = state; }
+
+  void OnPurgeMemory() override { ++purge_count_; }
+
+  MemoryState state() const { return state_; }
+  size_t purge_count() const { return purge_count_; }
+
+ private:
+  MemoryState state_ = MemoryState::UNKNOWN;
+  size_t purge_count_ = 0;
+};
+
+void RunUntilIdle() {
+  base::RunLoop loop;
+  loop.RunUntilIdle();
+}
+
+TEST(MemoryCoordinatorClientRegistryTest, NotifyStateChange) {
+  MessageLoop loop;
+  auto* registry = MemoryCoordinatorClientRegistry::GetInstance();
+  TestMemoryCoordinatorClient client;
+  registry->Register(&client);
+  registry->Notify(MemoryState::THROTTLED);
+  RunUntilIdle();
+  ASSERT_EQ(MemoryState::THROTTLED, client.state());
+  registry->Unregister(&client);
+}
+
+TEST(MemoryCoordinatorClientRegistryTest, PurgeMemory) {
+  MessageLoop loop;
+  auto* registry = MemoryCoordinatorClientRegistry::GetInstance();
+  TestMemoryCoordinatorClient client;
+  registry->Register(&client);
+  registry->PurgeMemory();
+  RunUntilIdle();
+  ASSERT_EQ(1u, client.purge_count());
+  registry->Unregister(&client);
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/src/base/memory/memory_coordinator_proxy.cc b/src/base/memory/memory_coordinator_proxy.cc
new file mode 100644
index 0000000..4e22fe0
--- /dev/null
+++ b/src/base/memory/memory_coordinator_proxy.cc
@@ -0,0 +1,37 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_coordinator_proxy.h"
+
+namespace base {
+
+namespace {
+
+MemoryCoordinator* g_memory_coordinator = nullptr;
+
+}  // namespace
+
+MemoryCoordinatorProxy::MemoryCoordinatorProxy() = default;
+
+MemoryCoordinatorProxy::~MemoryCoordinatorProxy() = default;
+
+// static
+MemoryCoordinatorProxy* MemoryCoordinatorProxy::GetInstance() {
+  return Singleton<base::MemoryCoordinatorProxy>::get();
+}
+
+// static
+void MemoryCoordinatorProxy::SetMemoryCoordinator(
+    MemoryCoordinator* coordinator) {
+  DCHECK(!g_memory_coordinator || !coordinator);
+  g_memory_coordinator = coordinator;
+}
+
+MemoryState MemoryCoordinatorProxy::GetCurrentMemoryState() const {
+  if (!g_memory_coordinator)
+    return MemoryState::NORMAL;
+  return g_memory_coordinator->GetCurrentMemoryState();
+}
+
+}  // namespace base
diff --git a/src/base/memory/memory_coordinator_proxy.h b/src/base/memory/memory_coordinator_proxy.h
new file mode 100644
index 0000000..b6e7b3f
--- /dev/null
+++ b/src/base/memory/memory_coordinator_proxy.h
@@ -0,0 +1,49 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
+#define BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/memory/memory_coordinator_client.h"
+#include "base/memory/singleton.h"
+
+namespace base {
+
+// The MemoryCoordinator interface. See comments in MemoryCoordinatorProxy for
+// method descriptions.
+class BASE_EXPORT MemoryCoordinator {
+ public:
+  virtual ~MemoryCoordinator() = default;
+
+  virtual MemoryState GetCurrentMemoryState() const = 0;
+};
+
+// The proxy of MemoryCoordinator to be accessed from components that are not
+// in content/browser e.g. net.
+class BASE_EXPORT MemoryCoordinatorProxy {
+ public:
+  static MemoryCoordinatorProxy* GetInstance();
+
+  // Sets an implementation of MemoryCoordinator. MemoryCoordinatorProxy doesn't
+  // take the ownership of |coordinator|. It must outlive this proxy.
+  // This should be called before any components starts using this proxy.
+  static void SetMemoryCoordinator(MemoryCoordinator* coordinator);
+
+  // Returns the current memory state.
+  MemoryState GetCurrentMemoryState() const;
+
+ private:
+  friend struct base::DefaultSingletonTraits<MemoryCoordinatorProxy>;
+
+  MemoryCoordinatorProxy();
+  virtual ~MemoryCoordinatorProxy();
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryCoordinatorProxy);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
diff --git a/src/base/memory/memory_pressure_listener.cc b/src/base/memory/memory_pressure_listener.cc
new file mode 100644
index 0000000..900a7d4
--- /dev/null
+++ b/src/base/memory/memory_pressure_listener.cc
@@ -0,0 +1,129 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_listener.h"
+
+#include "base/observer_list_threadsafe.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+
+namespace {
+
+// This class is thread safe and internally synchronized.
+class MemoryPressureObserver {
+ public:
+  // There is at most one MemoryPressureObserver and it is never deleted.
+  ~MemoryPressureObserver() = delete;
+
+  void AddObserver(MemoryPressureListener* listener, bool sync) {
+    async_observers_->AddObserver(listener);
+    if (sync) {
+      AutoLock lock(sync_observers_lock_);
+      sync_observers_.AddObserver(listener);
+    }
+  }
+
+  void RemoveObserver(MemoryPressureListener* listener) {
+    async_observers_->RemoveObserver(listener);
+    AutoLock lock(sync_observers_lock_);
+    sync_observers_.RemoveObserver(listener);
+  }
+
+  void Notify(
+      MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
+    async_observers_->Notify(FROM_HERE, &MemoryPressureListener::Notify,
+                             memory_pressure_level);
+    AutoLock lock(sync_observers_lock_);
+    for (auto& observer : sync_observers_)
+      observer.SyncNotify(memory_pressure_level);
+  }
+
+ private:
+  const scoped_refptr<ObserverListThreadSafe<MemoryPressureListener>>
+      async_observers_ = base::MakeRefCounted<
+          ObserverListThreadSafe<MemoryPressureListener>>();
+  ObserverList<MemoryPressureListener>::Unchecked sync_observers_;
+  Lock sync_observers_lock_;
+};
+
+// Gets the shared MemoryPressureObserver singleton instance.
+MemoryPressureObserver* GetMemoryPressureObserver() {
+  static auto* const observer = new MemoryPressureObserver();
+  return observer;
+}
+
+subtle::Atomic32 g_notifications_suppressed = 0;
+
+}  // namespace
+
+MemoryPressureListener::MemoryPressureListener(
+    const MemoryPressureListener::MemoryPressureCallback& callback)
+    : callback_(callback) {
+  GetMemoryPressureObserver()->AddObserver(this, false);
+}
+
+MemoryPressureListener::MemoryPressureListener(
+    const MemoryPressureListener::MemoryPressureCallback& callback,
+    const MemoryPressureListener::SyncMemoryPressureCallback&
+        sync_memory_pressure_callback)
+    : callback_(callback),
+      sync_memory_pressure_callback_(sync_memory_pressure_callback) {
+  GetMemoryPressureObserver()->AddObserver(this, true);
+}
+
+MemoryPressureListener::~MemoryPressureListener() {
+  GetMemoryPressureObserver()->RemoveObserver(this);
+}
+
+void MemoryPressureListener::Notify(MemoryPressureLevel memory_pressure_level) {
+  callback_.Run(memory_pressure_level);
+}
+
+void MemoryPressureListener::SyncNotify(
+    MemoryPressureLevel memory_pressure_level) {
+  if (!sync_memory_pressure_callback_.is_null()) {
+    sync_memory_pressure_callback_.Run(memory_pressure_level);
+  }
+}
+
+// static
+void MemoryPressureListener::NotifyMemoryPressure(
+    MemoryPressureLevel memory_pressure_level) {
+  DCHECK_NE(memory_pressure_level, MEMORY_PRESSURE_LEVEL_NONE);
+  TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("memory-infra"),
+                       "MemoryPressureListener::NotifyMemoryPressure",
+                       TRACE_EVENT_SCOPE_THREAD, "level",
+                       memory_pressure_level);
+  if (AreNotificationsSuppressed())
+    return;
+  DoNotifyMemoryPressure(memory_pressure_level);
+}
+
+// static
+bool MemoryPressureListener::AreNotificationsSuppressed() {
+  return subtle::Acquire_Load(&g_notifications_suppressed) == 1;
+}
+
+// static
+void MemoryPressureListener::SetNotificationsSuppressed(bool suppress) {
+  subtle::Release_Store(&g_notifications_suppressed, suppress ? 1 : 0);
+}
+
+// static
+void MemoryPressureListener::SimulatePressureNotification(
+    MemoryPressureLevel memory_pressure_level) {
+  // Notify all listeners even if regular pressure notifications are suppressed.
+  DoNotifyMemoryPressure(memory_pressure_level);
+}
+
+// static
+void MemoryPressureListener::DoNotifyMemoryPressure(
+    MemoryPressureLevel memory_pressure_level) {
+  DCHECK_NE(memory_pressure_level, MEMORY_PRESSURE_LEVEL_NONE);
+
+  GetMemoryPressureObserver()->Notify(memory_pressure_level);
+}
+
+}  // namespace base
diff --git a/src/base/memory/memory_pressure_listener.h b/src/base/memory/memory_pressure_listener.h
new file mode 100644
index 0000000..7e97010
--- /dev/null
+++ b/src/base/memory/memory_pressure_listener.h
@@ -0,0 +1,102 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// MemoryPressure provides static APIs for handling memory pressure on
+// platforms that have such signals, such as Android and ChromeOS.
+// The app will try to discard buffers that aren't deemed essential (individual
+// modules will implement their own policy).
+
+#ifndef BASE_MEMORY_MEMORY_PRESSURE_LISTENER_H_
+#define BASE_MEMORY_MEMORY_PRESSURE_LISTENER_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+
+namespace base {
+
+// To start listening, create a new instance, passing a callback to a
+// function that takes a MemoryPressureLevel parameter. To stop listening,
+// simply delete the listener object. The implementation guarantees
+// that the callback will always be called on the thread that created
+// the listener.
+// Note that even on the same thread, the callback is not guaranteed to be
+// called synchronously within the system memory pressure broadcast.
+// Please see notes in MemoryPressureLevel enum below: some levels are
+// absolutely critical, and if not enough memory is returned to the system,
+// it'll potentially kill the app, and then later the app will have to be
+// cold-started.
+//
+// Example:
+//
+//    void OnMemoryPressure(MemoryPressureLevel memory_pressure_level) {
+//       ...
+//    }
+//
+//    // Start listening.
+//    MemoryPressureListener* my_listener =
+//        new MemoryPressureListener(base::Bind(&OnMemoryPressure));
+//
+//    ...
+//
+//    // Stop listening.
+//    delete my_listener;
+//
+class BASE_EXPORT MemoryPressureListener {
+ public:
+  // A Java counterpart will be generated for this enum.
+  // GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base
+  enum MemoryPressureLevel {
+    // No problems, there is enough memory to use. This event is not sent via
+    // callback, but the enum is used in other places to find out the current
+    // state of the system.
+    MEMORY_PRESSURE_LEVEL_NONE,
+
+    // Modules are advised to free buffers that are cheap to re-allocate and not
+    // immediately needed.
+    MEMORY_PRESSURE_LEVEL_MODERATE,
+
+    // At this level, modules are advised to free all possible memory.  The
+    // alternative is to be killed by the system, which means all memory will
+    // have to be re-created, plus the cost of a cold start.
+    MEMORY_PRESSURE_LEVEL_CRITICAL,
+  };
+
+  typedef Callback<void(MemoryPressureLevel)> MemoryPressureCallback;
+  typedef Callback<void(MemoryPressureLevel)> SyncMemoryPressureCallback;
+
+  explicit MemoryPressureListener(
+      const MemoryPressureCallback& memory_pressure_callback);
+  MemoryPressureListener(
+      const MemoryPressureCallback& memory_pressure_callback,
+      const SyncMemoryPressureCallback& sync_memory_pressure_callback);
+
+  ~MemoryPressureListener();
+
+  // Intended for use by the platform specific implementation.
+  static void NotifyMemoryPressure(MemoryPressureLevel memory_pressure_level);
+
+  // These methods should not be used anywhere else but in memory measurement
+  // code, where they are intended to maintain stable conditions across
+  // measurements.
+  static bool AreNotificationsSuppressed();
+  static void SetNotificationsSuppressed(bool suppressed);
+  static void SimulatePressureNotification(
+      MemoryPressureLevel memory_pressure_level);
+
+  void Notify(MemoryPressureLevel memory_pressure_level);
+  void SyncNotify(MemoryPressureLevel memory_pressure_level);
+
+ private:
+  static void DoNotifyMemoryPressure(MemoryPressureLevel memory_pressure_level);
+
+  MemoryPressureCallback callback_;
+  SyncMemoryPressureCallback sync_memory_pressure_callback_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureListener);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_PRESSURE_LISTENER_H_
diff --git a/src/base/memory/memory_pressure_listener_unittest.cc b/src/base/memory/memory_pressure_listener_unittest.cc
new file mode 100644
index 0000000..87d5f4c
--- /dev/null
+++ b/src/base/memory/memory_pressure_listener_unittest.cc
@@ -0,0 +1,79 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_listener.h"
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace base {
+
+using MemoryPressureLevel = MemoryPressureListener::MemoryPressureLevel;
+
+class MemoryPressureListenerTest : public testing::Test {
+ public:
+  void SetUp() override {
+    message_loop_.reset(new MessageLoopForUI());
+    listener_.reset(new MemoryPressureListener(
+        Bind(&MemoryPressureListenerTest::OnMemoryPressure, Unretained(this))));
+  }
+
+  void TearDown() override {
+    listener_.reset();
+    message_loop_.reset();
+  }
+
+ protected:
+  void ExpectNotification(
+      void (*notification_function)(MemoryPressureLevel),
+      MemoryPressureLevel level) {
+    EXPECT_CALL(*this, OnMemoryPressure(level)).Times(1);
+    notification_function(level);
+    RunLoop().RunUntilIdle();
+  }
+
+  void ExpectNoNotification(
+      void (*notification_function)(MemoryPressureLevel),
+      MemoryPressureLevel level) {
+    EXPECT_CALL(*this, OnMemoryPressure(testing::_)).Times(0);
+    notification_function(level);
+    RunLoop().RunUntilIdle();
+  }
+
+ private:
+  MOCK_METHOD1(OnMemoryPressure,
+               void(MemoryPressureListener::MemoryPressureLevel));
+
+  std::unique_ptr<MessageLoopForUI> message_loop_;
+  std::unique_ptr<MemoryPressureListener> listener_;
+};
+
+TEST_F(MemoryPressureListenerTest, NotifyMemoryPressure) {
+  // Memory pressure notifications are not suppressed by default.
+  EXPECT_FALSE(MemoryPressureListener::AreNotificationsSuppressed());
+  ExpectNotification(&MemoryPressureListener::NotifyMemoryPressure,
+                     MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+  ExpectNotification(&MemoryPressureListener::SimulatePressureNotification,
+                     MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+
+  // Enable suppressing memory pressure notifications.
+  MemoryPressureListener::SetNotificationsSuppressed(true);
+  EXPECT_TRUE(MemoryPressureListener::AreNotificationsSuppressed());
+  ExpectNoNotification(&MemoryPressureListener::NotifyMemoryPressure,
+                       MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+  ExpectNotification(&MemoryPressureListener::SimulatePressureNotification,
+                     MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_MODERATE);
+
+  // Disable suppressing memory pressure notifications.
+  MemoryPressureListener::SetNotificationsSuppressed(false);
+  EXPECT_FALSE(MemoryPressureListener::AreNotificationsSuppressed());
+  ExpectNotification(&MemoryPressureListener::NotifyMemoryPressure,
+                     MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_CRITICAL);
+  ExpectNotification(&MemoryPressureListener::SimulatePressureNotification,
+                     MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_CRITICAL);
+}
+
+}  // namespace base
diff --git a/src/base/memory/memory_pressure_monitor.cc b/src/base/memory/memory_pressure_monitor.cc
new file mode 100644
index 0000000..ed350b8
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor.cc
@@ -0,0 +1,71 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+
+namespace base {
+namespace {
+
+MemoryPressureMonitor* g_monitor = nullptr;
+
+// Enumeration of UMA memory pressure levels. This needs to be kept in sync with
+// histograms.xml and the memory pressure levels defined in
+// MemoryPressureListener.
+enum MemoryPressureLevelUMA {
+  UMA_MEMORY_PRESSURE_LEVEL_NONE = 0,
+  UMA_MEMORY_PRESSURE_LEVEL_MODERATE = 1,
+  UMA_MEMORY_PRESSURE_LEVEL_CRITICAL = 2,
+  // This must be the last value in the enum.
+  UMA_MEMORY_PRESSURE_LEVEL_COUNT,
+};
+
+// Converts a memory pressure level to an UMA enumeration value.
+MemoryPressureLevelUMA MemoryPressureLevelToUmaEnumValue(
+    base::MemoryPressureListener::MemoryPressureLevel level) {
+  switch (level) {
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
+      return UMA_MEMORY_PRESSURE_LEVEL_NONE;
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
+      return UMA_MEMORY_PRESSURE_LEVEL_MODERATE;
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
+      return UMA_MEMORY_PRESSURE_LEVEL_CRITICAL;
+  }
+  NOTREACHED();
+  return UMA_MEMORY_PRESSURE_LEVEL_NONE;
+}
+
+}  // namespace
+
+MemoryPressureMonitor::MemoryPressureMonitor() {
+  DCHECK(!g_monitor);
+  g_monitor = this;
+}
+
+MemoryPressureMonitor::~MemoryPressureMonitor() {
+  DCHECK(g_monitor);
+  g_monitor = nullptr;
+}
+
+// static
+MemoryPressureMonitor* MemoryPressureMonitor::Get() {
+  return g_monitor;
+}
+void MemoryPressureMonitor::RecordMemoryPressure(
+    base::MemoryPressureListener::MemoryPressureLevel level,
+    int ticks) {
+  // Use the more primitive STATIC_HISTOGRAM_POINTER_BLOCK macro because the
+  // simple UMA_HISTOGRAM macros don't expose 'AddCount' functionality.
+  STATIC_HISTOGRAM_POINTER_BLOCK(
+      "Memory.PressureLevel",
+      AddCount(MemoryPressureLevelToUmaEnumValue(level), ticks),
+      base::LinearHistogram::FactoryGet(
+          "Memory.PressureLevel", 1, UMA_MEMORY_PRESSURE_LEVEL_COUNT,
+          UMA_MEMORY_PRESSURE_LEVEL_COUNT + 1,
+          base::HistogramBase::kUmaTargetedHistogramFlag));
+}
+
+}  // namespace base
diff --git a/src/base/memory/memory_pressure_monitor.h b/src/base/memory/memory_pressure_monitor.h
new file mode 100644
index 0000000..e48244b
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor.h
@@ -0,0 +1,53 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_H_
+#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+
+namespace base {
+
+// TODO(chrisha): Make this a concrete class with per-OS implementations rather
+// than an abstract base class.
+
+// Declares the interface for a MemoryPressureMonitor. There are multiple
+// OS specific implementations of this class. An instance of the memory
+// pressure observer is created at the process level, tracks memory usage, and
+// pushes memory state change notifications to the static function
+// base::MemoryPressureListener::NotifyMemoryPressure. This is turn notifies
+// all MemoryPressureListener instances via a callback.
+class BASE_EXPORT MemoryPressureMonitor {
+ public:
+  using MemoryPressureLevel = base::MemoryPressureListener::MemoryPressureLevel;
+  using DispatchCallback = base::Callback<void(MemoryPressureLevel level)>;
+
+  virtual ~MemoryPressureMonitor();
+
+  // Return the singleton MemoryPressureMonitor.
+  static MemoryPressureMonitor* Get();
+
+  // Record memory pressure UMA statistic. A tick is 5 seconds.
+  static void RecordMemoryPressure(MemoryPressureLevel level, int ticks);
+
+  // Returns the currently observed memory pressure.
+  virtual MemoryPressureLevel GetCurrentPressureLevel() = 0;
+
+  // Sets a notification callback. The default callback invokes
+  // base::MemoryPressureListener::NotifyMemoryPressure.
+  virtual void SetDispatchCallback(const DispatchCallback& callback) = 0;
+
+ protected:
+  MemoryPressureMonitor();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_H_
diff --git a/src/base/memory/memory_pressure_monitor_chromeos.cc b/src/base/memory/memory_pressure_monitor_chromeos.cc
new file mode 100644
index 0000000..4b44cab
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor_chromeos.cc
@@ -0,0 +1,297 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_chromeos.h"
+
+#include <fcntl.h>
+#include <sys/select.h>
+
+#include "base/metrics/histogram_macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process_metrics.h"
+#include "base/single_thread_task_runner.h"
+#include "base/sys_info.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "starboard/types.h"
+
+namespace base {
+namespace chromeos {
+
+namespace {
+
+// Type-safe version of |g_monitor| from base/memory/memory_pressure_monitor.cc.
+MemoryPressureMonitor* g_monitor = nullptr;
+
+// The time between memory pressure checks. While under critical pressure, this
+// is also the timer to repeat cleanup attempts.
+const int kMemoryPressureIntervalMs = 1000;
+
+// The time which should pass between two moderate memory pressure calls.
+const int kModerateMemoryPressureCooldownMs = 10000;
+
+// Number of event polls before the next moderate pressure event can be sent.
+const int kModerateMemoryPressureCooldown =
+    kModerateMemoryPressureCooldownMs / kMemoryPressureIntervalMs;
+
+// Threshold constants to emit pressure events.
+const int kNormalMemoryPressureModerateThresholdPercent = 60;
+const int kNormalMemoryPressureCriticalThresholdPercent = 95;
+const int kAggressiveMemoryPressureModerateThresholdPercent = 35;
+const int kAggressiveMemoryPressureCriticalThresholdPercent = 70;
+
+// The possible state for memory pressure level. The values should be in line
+// with values in MemoryPressureListener::MemoryPressureLevel and should be
+// updated if more memory pressure levels are introduced.
+enum MemoryPressureLevelUMA {
+  MEMORY_PRESSURE_LEVEL_NONE = 0,
+  MEMORY_PRESSURE_LEVEL_MODERATE,
+  MEMORY_PRESSURE_LEVEL_CRITICAL,
+  NUM_MEMORY_PRESSURE_LEVELS
+};
+
+// This is the file that will exist if low memory notification is available
+// on the device.  Whenever it becomes readable, it signals a low memory
+// condition.
+const char kLowMemFile[] = "/dev/chromeos-low-mem";
+
+// Converts a |MemoryPressureThreshold| value into a used memory percentage for
+// the moderate pressure event.
+int GetModerateMemoryThresholdInPercent(
+    MemoryPressureMonitor::MemoryPressureThresholds thresholds) {
+  return thresholds == MemoryPressureMonitor::
+                           THRESHOLD_AGGRESSIVE_CACHE_DISCARD ||
+         thresholds == MemoryPressureMonitor::THRESHOLD_AGGRESSIVE
+             ? kAggressiveMemoryPressureModerateThresholdPercent
+             : kNormalMemoryPressureModerateThresholdPercent;
+}
+
+// Converts a |MemoryPressureThreshold| value into a used memory percentage for
+// the critical pressure event.
+int GetCriticalMemoryThresholdInPercent(
+    MemoryPressureMonitor::MemoryPressureThresholds thresholds) {
+  return thresholds == MemoryPressureMonitor::
+                           THRESHOLD_AGGRESSIVE_TAB_DISCARD ||
+         thresholds == MemoryPressureMonitor::THRESHOLD_AGGRESSIVE
+             ? kAggressiveMemoryPressureCriticalThresholdPercent
+             : kNormalMemoryPressureCriticalThresholdPercent;
+}
+
+// Converts free percent of memory into a memory pressure value.
+MemoryPressureListener::MemoryPressureLevel GetMemoryPressureLevelFromFillLevel(
+    int actual_fill_level,
+    int moderate_threshold,
+    int critical_threshold) {
+  if (actual_fill_level < moderate_threshold)
+    return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+  return actual_fill_level < critical_threshold
+             ? MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE
+             : MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
+}
+
+// This function will be called less than once a second. It will check if
+// the kernel has detected a low memory situation.
+bool IsLowMemoryCondition(int file_descriptor) {
+  fd_set fds;
+  struct timeval tv;
+
+  FD_ZERO(&fds);
+  FD_SET(file_descriptor, &fds);
+
+  tv.tv_sec = 0;
+  tv.tv_usec = 0;
+
+  return HANDLE_EINTR(select(file_descriptor + 1, &fds, NULL, NULL, &tv)) > 0;
+}
+
+}  // namespace
+
+MemoryPressureMonitor::MemoryPressureMonitor(
+    MemoryPressureThresholds thresholds)
+    : current_memory_pressure_level_(
+          MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
+      moderate_pressure_repeat_count_(0),
+      seconds_since_reporting_(0),
+      moderate_pressure_threshold_percent_(
+          GetModerateMemoryThresholdInPercent(thresholds)),
+      critical_pressure_threshold_percent_(
+          GetCriticalMemoryThresholdInPercent(thresholds)),
+      low_mem_file_(HANDLE_EINTR(::open(kLowMemFile, O_RDONLY))),
+      dispatch_callback_(
+          base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
+      weak_ptr_factory_(this) {
+  DCHECK(!g_monitor);
+  g_monitor = this;
+
+  StartObserving();
+  LOG_IF(ERROR,
+         base::SysInfo::IsRunningOnChromeOS() && !low_mem_file_.is_valid())
+      << "Cannot open kernel listener";
+}
+
+MemoryPressureMonitor::~MemoryPressureMonitor() {
+  DCHECK(g_monitor);
+  g_monitor = nullptr;
+
+  StopObserving();
+}
+
+void MemoryPressureMonitor::ScheduleEarlyCheck() {
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, BindOnce(&MemoryPressureMonitor::CheckMemoryPressure,
+                          weak_ptr_factory_.GetWeakPtr()));
+}
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::GetCurrentPressureLevel() {
+  return current_memory_pressure_level_;
+}
+
+// static
+MemoryPressureMonitor* MemoryPressureMonitor::Get() {
+  return g_monitor;
+}
+
+void MemoryPressureMonitor::StartObserving() {
+  timer_.Start(FROM_HERE,
+               TimeDelta::FromMilliseconds(kMemoryPressureIntervalMs),
+               Bind(&MemoryPressureMonitor::
+                        CheckMemoryPressureAndRecordStatistics,
+                    weak_ptr_factory_.GetWeakPtr()));
+}
+
+void MemoryPressureMonitor::StopObserving() {
+  // If StartObserving failed, StopObserving will still get called.
+  timer_.Stop();
+}
+
+void MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics() {
+  CheckMemoryPressure();
+  if (seconds_since_reporting_++ == 5) {
+    seconds_since_reporting_ = 0;
+    RecordMemoryPressure(current_memory_pressure_level_, 1);
+  }
+  // Record UMA histogram statistics for the current memory pressure level.
+  // TODO(lgrey): Remove this once there's a usable history for the
+  // "Memory.PressureLevel" statistic
+  MemoryPressureLevelUMA memory_pressure_level_uma(MEMORY_PRESSURE_LEVEL_NONE);
+  switch (current_memory_pressure_level_) {
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
+      memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_NONE;
+      break;
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
+      memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_MODERATE;
+      break;
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
+      memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_CRITICAL;
+      break;
+  }
+
+  UMA_HISTOGRAM_ENUMERATION("ChromeOS.MemoryPressureLevel",
+                            memory_pressure_level_uma,
+                            NUM_MEMORY_PRESSURE_LEVELS);
+}
+
+void MemoryPressureMonitor::CheckMemoryPressure() {
+  MemoryPressureListener::MemoryPressureLevel old_pressure =
+      current_memory_pressure_level_;
+
+  // If we have the kernel low memory observer, we use it's flag instead of our
+  // own computation (for now). Note that in "simulation mode" it can be null.
+  // TODO(skuhne): We need to add code which makes sure that the kernel and this
+  // computation come to similar results and then remove this override again.
+  // TODO(skuhne): Add some testing framework here to see how close the kernel
+  // and the internal functions are.
+  if (low_mem_file_.is_valid() && IsLowMemoryCondition(low_mem_file_.get())) {
+    current_memory_pressure_level_ =
+        MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
+  } else {
+    current_memory_pressure_level_ = GetMemoryPressureLevelFromFillLevel(
+        GetUsedMemoryInPercent(),
+        moderate_pressure_threshold_percent_,
+        critical_pressure_threshold_percent_);
+
+    // When listening to the kernel, we ignore the reported memory pressure
+    // level from our own computation and reduce critical to moderate.
+    if (low_mem_file_.is_valid() &&
+        current_memory_pressure_level_ ==
+        MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
+      current_memory_pressure_level_ =
+          MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
+    }
+  }
+
+  // In case there is no memory pressure we do not notify.
+  if (current_memory_pressure_level_ ==
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE) {
+    return;
+  }
+  if (old_pressure == current_memory_pressure_level_) {
+    // If the memory pressure is still at the same level, we notify again for a
+    // critical level. In case of a moderate level repeat however, we only send
+    // a notification after a certain time has passed.
+    if (current_memory_pressure_level_ ==
+        MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE &&
+          ++moderate_pressure_repeat_count_ <
+              kModerateMemoryPressureCooldown) {
+      return;
+    }
+  } else if (current_memory_pressure_level_ ==
+               MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE &&
+             old_pressure ==
+               MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
+    // When we reducing the pressure level from critical to moderate, we
+    // restart the timeout and do not send another notification.
+    moderate_pressure_repeat_count_ = 0;
+    return;
+  }
+  moderate_pressure_repeat_count_ = 0;
+  dispatch_callback_.Run(current_memory_pressure_level_);
+}
+
+// Gets the used ChromeOS memory in percent.
+int MemoryPressureMonitor::GetUsedMemoryInPercent() {
+  base::SystemMemoryInfoKB info;
+  if (!base::GetSystemMemoryInfo(&info)) {
+    VLOG(1) << "Cannot determine the free memory of the system.";
+    return 0;
+  }
+  // TODO(skuhne): Instead of adding the kernel memory pressure calculation
+  // logic here, we should have a kernel mechanism similar to the low memory
+  // notifier in ChromeOS which offers multiple pressure states.
+  // To track this, we have crbug.com/381196.
+
+  // The available memory consists of "real" and virtual (z)ram memory.
+  // Since swappable memory uses a non pre-deterministic compression and
+  // the compression creates its own "dynamic" in the system, it gets
+  // de-emphasized by the |kSwapWeight| factor.
+  const int kSwapWeight = 4;
+
+  // The total memory we have is the "real memory" plus the virtual (z)ram.
+  int total_memory = info.total + info.swap_total / kSwapWeight;
+
+  // The kernel internally uses 50MB.
+  const int kMinFileMemory = 50 * 1024;
+
+  // Most file memory can be easily reclaimed.
+  int file_memory = info.active_file + info.inactive_file;
+  // unless it is dirty or it's a minimal portion which is required.
+  file_memory -= info.dirty + kMinFileMemory;
+
+  // Available memory is the sum of free, swap and easy reclaimable memory.
+  int available_memory =
+      info.free + info.swap_free / kSwapWeight + file_memory;
+
+  DCHECK(available_memory < total_memory);
+  int percentage = ((total_memory - available_memory) * 100) / total_memory;
+  return percentage;
+}
+
+void MemoryPressureMonitor::SetDispatchCallback(
+    const DispatchCallback& callback) {
+  dispatch_callback_ = callback;
+}
+
+}  // namespace chromeos
+}  // namespace base
diff --git a/src/base/memory/memory_pressure_monitor_chromeos.h b/src/base/memory/memory_pressure_monitor_chromeos.h
new file mode 100644
index 0000000..563ba85
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor_chromeos.h
@@ -0,0 +1,128 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
+#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
+
+#include "base/base_export.h"
+#include "base/files/scoped_file.h"
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/memory/memory_pressure_monitor.h"
+#include "base/memory/weak_ptr.h"
+#include "base/timer/timer.h"
+
+namespace base {
+namespace chromeos {
+
+class TestMemoryPressureMonitor;
+
+////////////////////////////////////////////////////////////////////////////////
+// MemoryPressureMonitor
+//
+// A class to handle the observation of our free memory. It notifies the
+// MemoryPressureListener of memory fill level changes, so that it can take
+// action to reduce memory resources accordingly.
+//
+class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
+ public:
+  using GetUsedMemoryInPercentCallback = int (*)();
+
+  // There are two memory pressure events:
+  // MODERATE - which will mainly release caches.
+  // CRITICAL - which will discard tabs.
+  // The |MemoryPressureThresholds| enum selects the strategy of firing these
+  // events: A conservative strategy will keep as much content in memory as
+  // possible (causing the system to swap to zram) and an aggressive strategy
+  // will release memory earlier to avoid swapping.
+  enum MemoryPressureThresholds {
+    // Use the system default.
+    THRESHOLD_DEFAULT = 0,
+    // Try to keep as much content in memory as possible.
+    THRESHOLD_CONSERVATIVE = 1,
+    // Discard caches earlier, allowing to keep more tabs in memory.
+    THRESHOLD_AGGRESSIVE_CACHE_DISCARD = 2,
+    // Discard tabs earlier, allowing the system to get faster.
+    THRESHOLD_AGGRESSIVE_TAB_DISCARD = 3,
+    // Discard caches and tabs earlier to allow the system to be faster.
+    THRESHOLD_AGGRESSIVE = 4
+  };
+
+  explicit MemoryPressureMonitor(MemoryPressureThresholds thresholds);
+  ~MemoryPressureMonitor() override;
+
+  // Redo the memory pressure calculation soon and call again if a critical
+  // memory pressure prevails. Note that this call will trigger an asynchronous
+  // action which gives the system time to release memory back into the pool.
+  void ScheduleEarlyCheck();
+
+  // Get the current memory pressure level.
+  MemoryPressureListener::MemoryPressureLevel GetCurrentPressureLevel()
+      override;
+  void SetDispatchCallback(const DispatchCallback& callback) override;
+
+  // Returns a type-casted version of the current memory pressure monitor. A
+  // simple wrapper to base::MemoryPressureMonitor::Get.
+  static MemoryPressureMonitor* Get();
+
+ private:
+  friend TestMemoryPressureMonitor;
+  // Starts observing the memory fill level.
+  // Calls to StartObserving should always be matched with calls to
+  // StopObserving.
+  void StartObserving();
+
+  // Stop observing the memory fill level.
+  // May be safely called if StartObserving has not been called.
+  void StopObserving();
+
+  // The function which gets periodically called to check any changes in the
+  // memory pressure. It will report pressure changes as well as continuous
+  // critical pressure levels.
+  void CheckMemoryPressure();
+
+  // The function periodically checks the memory pressure changes and records
+  // the UMA histogram statistics for the current memory pressure level.
+  void CheckMemoryPressureAndRecordStatistics();
+
+  // Get the memory pressure in percent (virtual for testing).
+  virtual int GetUsedMemoryInPercent();
+
+  // The current memory pressure.
+  base::MemoryPressureListener::MemoryPressureLevel
+      current_memory_pressure_level_;
+
+  // A periodic timer to check for resource pressure changes. This will get
+  // replaced by a kernel triggered event system (see crbug.com/381196).
+  base::RepeatingTimer timer_;
+
+  // To slow down the amount of moderate pressure event calls, this counter
+  // gets used to count the number of events since the last event occured.
+  int moderate_pressure_repeat_count_;
+
+  // The "Memory.PressureLevel" statistic is recorded every
+  // 5 seconds, but the timer to report "ChromeOS.MemoryPressureLevel"
+  // fires every second. This counter is used to allow reporting
+  // "Memory.PressureLevel" correctly without adding another
+  // timer.
+  int seconds_since_reporting_;
+
+  // The thresholds for moderate and critical pressure.
+  const int moderate_pressure_threshold_percent_;
+  const int critical_pressure_threshold_percent_;
+
+  // File descriptor used to detect low memory condition.
+  ScopedFD low_mem_file_;
+
+  DispatchCallback dispatch_callback_;
+
+  base::WeakPtrFactory<MemoryPressureMonitor> weak_ptr_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
+};
+
+}  // namespace chromeos
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
diff --git a/src/base/memory/memory_pressure_monitor_chromeos_unittest.cc b/src/base/memory/memory_pressure_monitor_chromeos_unittest.cc
new file mode 100644
index 0000000..ee00091
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor_chromeos_unittest.cc
@@ -0,0 +1,172 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_chromeos.h"
+
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/sys_info.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace chromeos {
+
+namespace {
+
+// True if the memory notifier got called.
+// Do not read/modify value directly.
+bool on_memory_pressure_called = false;
+
+// If the memory notifier got called, this is the memory pressure reported.
+MemoryPressureListener::MemoryPressureLevel on_memory_pressure_level =
+    MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+
+// Processes OnMemoryPressure calls.
+void OnMemoryPressure(MemoryPressureListener::MemoryPressureLevel level) {
+  on_memory_pressure_called = true;
+  on_memory_pressure_level = level;
+}
+
+// Resets the indicator for memory pressure.
+void ResetOnMemoryPressureCalled() {
+  on_memory_pressure_called = false;
+}
+
+// Returns true when OnMemoryPressure was called (and resets it).
+bool WasOnMemoryPressureCalled() {
+  bool b = on_memory_pressure_called;
+  ResetOnMemoryPressureCalled();
+  return b;
+}
+
+}  // namespace
+
+class TestMemoryPressureMonitor : public MemoryPressureMonitor {
+ public:
+  TestMemoryPressureMonitor()
+      : MemoryPressureMonitor(THRESHOLD_DEFAULT),
+        memory_in_percent_override_(0) {
+    // Disable any timers which are going on and set a special memory reporting
+    // function.
+    StopObserving();
+  }
+  ~TestMemoryPressureMonitor() override = default;
+
+  void SetMemoryInPercentOverride(int percent) {
+    memory_in_percent_override_ = percent;
+  }
+
+  void CheckMemoryPressureForTest() {
+    CheckMemoryPressure();
+  }
+
+ private:
+  int GetUsedMemoryInPercent() override {
+    return memory_in_percent_override_;
+  }
+
+  int memory_in_percent_override_;
+  DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
+};
+
+// This test tests the various transition states from memory pressure, looking
+// for the correct behavior on event reposting as well as state updates.
+TEST(ChromeOSMemoryPressureMonitorTest, CheckMemoryPressure) {
+  // crbug.com/844102:
+  if (base::SysInfo::IsRunningOnChromeOS())
+    return;
+
+  base::MessageLoopForUI message_loop;
+  std::unique_ptr<TestMemoryPressureMonitor> monitor(
+      new TestMemoryPressureMonitor);
+  std::unique_ptr<MemoryPressureListener> listener(
+      new MemoryPressureListener(base::Bind(&OnMemoryPressure)));
+  // Checking the memory pressure while 0% are used should not produce any
+  // events.
+  monitor->SetMemoryInPercentOverride(0);
+  ResetOnMemoryPressureCalled();
+
+  monitor->CheckMemoryPressureForTest();
+  RunLoop().RunUntilIdle();
+  EXPECT_FALSE(WasOnMemoryPressureCalled());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+            monitor->GetCurrentPressureLevel());
+
+  // Setting the memory level to 80% should produce a moderate pressure level.
+  monitor->SetMemoryInPercentOverride(80);
+  monitor->CheckMemoryPressureForTest();
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(WasOnMemoryPressureCalled());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            monitor->GetCurrentPressureLevel());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            on_memory_pressure_level);
+
+  // We need to check that the event gets reposted after a while.
+  int i = 0;
+  for (; i < 100; i++) {
+    monitor->CheckMemoryPressureForTest();
+    RunLoop().RunUntilIdle();
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor->GetCurrentPressureLevel());
+    if (WasOnMemoryPressureCalled()) {
+      EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+                on_memory_pressure_level);
+      break;
+    }
+  }
+  // Should be more than 5 and less than 100.
+  EXPECT_LE(5, i);
+  EXPECT_GE(99, i);
+
+  // Setting the memory usage to 99% should produce critical levels.
+  monitor->SetMemoryInPercentOverride(99);
+  monitor->CheckMemoryPressureForTest();
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(WasOnMemoryPressureCalled());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            on_memory_pressure_level);
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            monitor->GetCurrentPressureLevel());
+
+  // Calling it again should immediately produce a second call.
+  monitor->CheckMemoryPressureForTest();
+  RunLoop().RunUntilIdle();
+  EXPECT_TRUE(WasOnMemoryPressureCalled());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            on_memory_pressure_level);
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            monitor->GetCurrentPressureLevel());
+
+  // When lowering the pressure again we should not get an event, but the
+  // pressure should go back to moderate.
+  monitor->SetMemoryInPercentOverride(80);
+  monitor->CheckMemoryPressureForTest();
+  RunLoop().RunUntilIdle();
+  EXPECT_FALSE(WasOnMemoryPressureCalled());
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            monitor->GetCurrentPressureLevel());
+
+  // We should need exactly the same amount of calls as before, before the next
+  // call comes in.
+  int j = 0;
+  for (; j < 100; j++) {
+    monitor->CheckMemoryPressureForTest();
+    RunLoop().RunUntilIdle();
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor->GetCurrentPressureLevel());
+    if (WasOnMemoryPressureCalled()) {
+      EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+                on_memory_pressure_level);
+      break;
+    }
+  }
+  // We should have needed exactly the same amount of checks as before.
+  EXPECT_EQ(j, i);
+}
+
+}  // namespace chromeos
+}  // namespace base
diff --git a/src/base/memory/memory_pressure_monitor_mac.cc b/src/base/memory/memory_pressure_monitor_mac.cc
new file mode 100644
index 0000000..23857d6
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor_mac.cc
@@ -0,0 +1,190 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_mac.h"
+
+#include <CoreFoundation/CoreFoundation.h>
+
+#include <dlfcn.h>
+#include <sys/sysctl.h>
+
+#include <cmath>
+
+#include "base/bind.h"
+#include "base/logging.h"
+#include "base/mac/mac_util.h"
+#include "starboard/types.h"
+
+// Redeclare for partial 10.9 availability.
+DISPATCH_EXPORT const struct dispatch_source_type_s
+    _dispatch_source_type_memorypressure;
+
+namespace {
+static const int kUMATickSize = 5;
+}  // namespace
+
+namespace base {
+namespace mac {
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+    int mac_memory_pressure_level) {
+  switch (mac_memory_pressure_level) {
+    case DISPATCH_MEMORYPRESSURE_NORMAL:
+      return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+    case DISPATCH_MEMORYPRESSURE_WARN:
+      return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
+    case DISPATCH_MEMORYPRESSURE_CRITICAL:
+      return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
+  }
+  return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+}
+
+void MemoryPressureMonitor::OnRunLoopExit(CFRunLoopObserverRef observer,
+                                          CFRunLoopActivity activity,
+                                          void* info) {
+  MemoryPressureMonitor* self = static_cast<MemoryPressureMonitor*>(info);
+  self->UpdatePressureLevelOnRunLoopExit();
+}
+
+MemoryPressureMonitor::MemoryPressureMonitor()
+    : memory_level_event_source_(dispatch_source_create(
+          DISPATCH_SOURCE_TYPE_MEMORYPRESSURE,
+          0,
+          DISPATCH_MEMORYPRESSURE_WARN | DISPATCH_MEMORYPRESSURE_CRITICAL |
+              DISPATCH_MEMORYPRESSURE_NORMAL,
+          dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0))),
+      dispatch_callback_(
+          base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
+      last_statistic_report_time_(CFAbsoluteTimeGetCurrent()),
+      last_pressure_level_(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
+      subtick_seconds_(0) {
+  // Attach an event handler to the memory pressure event source.
+  if (memory_level_event_source_.get()) {
+    dispatch_source_set_event_handler(memory_level_event_source_, ^{
+      OnMemoryPressureChanged(memory_level_event_source_.get(),
+                              dispatch_callback_);
+    });
+
+    // Start monitoring the event source.
+    dispatch_resume(memory_level_event_source_);
+  }
+
+  // Create a CFRunLoopObserver to check the memory pressure at the end of
+  // every pass through the event loop (modulo kUMATickSize).
+  CFRunLoopObserverContext observer_context = {0, this, NULL, NULL, NULL};
+
+  exit_observer_.reset(
+      CFRunLoopObserverCreate(kCFAllocatorDefault, kCFRunLoopExit, true, 0,
+                              OnRunLoopExit, &observer_context));
+
+  CFRunLoopRef run_loop = CFRunLoopGetCurrent();
+  CFRunLoopAddObserver(run_loop, exit_observer_, kCFRunLoopCommonModes);
+  CFRunLoopAddObserver(run_loop, exit_observer_,
+                       kMessageLoopExclusiveRunLoopMode);
+}
+
+MemoryPressureMonitor::~MemoryPressureMonitor() {
+  // Detach from the run loop.
+  CFRunLoopRef run_loop = CFRunLoopGetCurrent();
+  CFRunLoopRemoveObserver(run_loop, exit_observer_, kCFRunLoopCommonModes);
+  CFRunLoopRemoveObserver(run_loop, exit_observer_,
+                          kMessageLoopExclusiveRunLoopMode);
+
+  // Remove the memory pressure event source.
+  if (memory_level_event_source_.get()) {
+    dispatch_source_cancel(memory_level_event_source_);
+  }
+}
+
+int MemoryPressureMonitor::GetMacMemoryPressureLevel() {
+  // Get the raw memory pressure level from macOS.
+  int mac_memory_pressure_level;
+  size_t length = sizeof(int);
+  sysctlbyname("kern.memorystatus_vm_pressure_level",
+               &mac_memory_pressure_level, &length, nullptr, 0);
+
+  return mac_memory_pressure_level;
+}
+
+void MemoryPressureMonitor::UpdatePressureLevel() {
+  // Get the current macOS pressure level and convert to the corresponding
+  // Chrome pressure level.
+  int mac_memory_pressure_level = GetMacMemoryPressureLevel();
+  MemoryPressureListener::MemoryPressureLevel new_pressure_level =
+      MemoryPressureLevelForMacMemoryPressureLevel(mac_memory_pressure_level);
+
+  // Compute the number of "ticks" spent at |last_pressure_level_| (since the
+  // last report sent to UMA).
+  CFTimeInterval now = CFAbsoluteTimeGetCurrent();
+  CFTimeInterval time_since_last_report = now - last_statistic_report_time_;
+  last_statistic_report_time_ = now;
+
+  double accumulated_time = time_since_last_report + subtick_seconds_;
+  int ticks_to_report = static_cast<int>(accumulated_time / kUMATickSize);
+  // Save for later the seconds that didn't make it into a full tick.
+  subtick_seconds_ = std::fmod(accumulated_time, kUMATickSize);
+
+  // Round the tick count up on a pressure level change to ensure we capture it.
+  bool pressure_level_changed = (new_pressure_level != last_pressure_level_);
+  if (pressure_level_changed && ticks_to_report < 1) {
+    ticks_to_report = 1;
+    subtick_seconds_ = 0;
+  }
+
+  // Send elapsed ticks to UMA.
+  if (ticks_to_report >= 1) {
+    RecordMemoryPressure(last_pressure_level_, ticks_to_report);
+  }
+
+  // Save the now-current memory pressure level.
+  last_pressure_level_ = new_pressure_level;
+}
+
+void MemoryPressureMonitor::UpdatePressureLevelOnRunLoopExit() {
+  // Wait until it's time to check the pressure level.
+  CFTimeInterval now = CFAbsoluteTimeGetCurrent();
+  if (now >= next_run_loop_update_time_) {
+    UpdatePressureLevel();
+
+    // Update again in kUMATickSize seconds. We can update at any frequency,
+    // but because we're only checking memory pressure levels for UMA there's
+    // no need to update more frequently than we're keeping statistics on.
+    next_run_loop_update_time_ = now + kUMATickSize - subtick_seconds_;
+  }
+}
+
+// Static.
+int MemoryPressureMonitor::GetSecondsPerUMATick() {
+  return kUMATickSize;
+}
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::GetCurrentPressureLevel() {
+  return last_pressure_level_;
+}
+
+void MemoryPressureMonitor::OnMemoryPressureChanged(
+    dispatch_source_s* event_source,
+    const MemoryPressureMonitor::DispatchCallback& dispatch_callback) {
+  // The OS has sent a notification that the memory pressure level has changed.
+  // Go through the normal memory pressure level checking mechanism so that
+  // last_pressure_level_ and UMA get updated to the current value.
+  UpdatePressureLevel();
+
+  // Run the callback that's waiting on memory pressure change notifications.
+  // The convention is to not send notifiations on memory pressure returning to
+  // normal.
+  if (last_pressure_level_ !=
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE)
+    dispatch_callback.Run(last_pressure_level_);
+}
+
+void MemoryPressureMonitor::SetDispatchCallback(
+    const DispatchCallback& callback) {
+  dispatch_callback_ = callback;
+}
+
+}  // namespace mac
+}  // namespace base
diff --git a/src/base/memory/memory_pressure_monitor_mac.h b/src/base/memory/memory_pressure_monitor_mac.h
new file mode 100644
index 0000000..6f0e02f
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor_mac.h
@@ -0,0 +1,92 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
+#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
+
+#include <CoreFoundation/CFDate.h>
+#include <dispatch/dispatch.h>
+
+#include "base/base_export.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/mac/scoped_dispatch_object.h"
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/memory/memory_pressure_monitor.h"
+#include "base/message_loop/message_pump_mac.h"
+#include "starboard/types.h"
+
+namespace base {
+namespace mac {
+
+class TestMemoryPressureMonitor;
+
+// Declares the interface for the Mac MemoryPressureMonitor, which reports
+// memory pressure events and status.
+class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
+ public:
+  MemoryPressureMonitor();
+  ~MemoryPressureMonitor() override;
+
+  // Returns the currently-observed memory pressure.
+  MemoryPressureLevel GetCurrentPressureLevel() override;
+
+  void SetDispatchCallback(const DispatchCallback& callback) override;
+
+ private:
+  friend TestMemoryPressureMonitor;
+
+  static MemoryPressureLevel MemoryPressureLevelForMacMemoryPressureLevel(
+      int mac_memory_pressure_level);
+  static void OnRunLoopExit(CFRunLoopObserverRef observer,
+                            CFRunLoopActivity activity,
+                            void* info);
+  // Returns the raw memory pressure level from the macOS. Exposed for
+  // unit testing.
+  virtual int GetMacMemoryPressureLevel();
+
+  // Updates |last_pressure_level_| with the current memory pressure level.
+  void UpdatePressureLevel();
+
+  // Updates |last_pressure_level_| at the end of every run loop pass (modulo
+  // some number of seconds).
+  void UpdatePressureLevelOnRunLoopExit();
+
+  // Run |dispatch_callback| on memory pressure notifications from the OS.
+  void OnMemoryPressureChanged(dispatch_source_s* event_source,
+                               const DispatchCallback& dispatch_callback);
+
+  // Returns the number of seconds per UMA tick (for statistics recording).
+  // Exposed for testing.
+  static int GetSecondsPerUMATick();
+
+  // The dispatch source that generates memory pressure change notifications.
+  ScopedDispatchObject<dispatch_source_t> memory_level_event_source_;
+
+  // The callback to call upon receiving a memory pressure change notification.
+  DispatchCallback dispatch_callback_;
+
+  // Last UMA report time.
+  CFTimeInterval last_statistic_report_time_;
+
+  // Most-recent memory pressure level.
+  MemoryPressureLevel last_pressure_level_;
+
+  // Observer that tracks exits from the main run loop.
+  ScopedCFTypeRef<CFRunLoopObserverRef> exit_observer_;
+
+  // Next time to update the memory pressure level when exiting the run loop.
+  CFTimeInterval next_run_loop_update_time_;
+
+  // Seconds left over from the last UMA tick calculation (to be added to the
+  // next calculation).
+  CFTimeInterval subtick_seconds_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
+};
+
+}  // namespace mac
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
diff --git a/src/base/memory/memory_pressure_monitor_mac_unittest.cc b/src/base/memory/memory_pressure_monitor_mac_unittest.cc
new file mode 100644
index 0000000..3f5f4b7
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor_mac_unittest.cc
@@ -0,0 +1,228 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_mac.h"
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/mac/scoped_cftyperef.h"
+#include "base/macros.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace mac {
+
+class TestMemoryPressureMonitor : public MemoryPressureMonitor {
+ public:
+  using MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel;
+
+  // A HistogramTester for verifying correct UMA stat generation.
+  base::HistogramTester tester;
+
+  TestMemoryPressureMonitor() { }
+
+  // Clears the next run loop update time so that the next pass of the run
+  // loop checks the memory pressure level immediately. Normally there's a
+  // 5 second delay between pressure readings.
+  void ResetRunLoopUpdateTime() { next_run_loop_update_time_ = 0; }
+
+  // Sets the last UMA stat report time. Time spent in memory pressure is
+  // recorded in 5-second "ticks" from the last time statistics were recorded.
+  void SetLastStatisticReportTime(CFTimeInterval time) {
+    last_statistic_report_time_ = time;
+  }
+
+  // Sets the raw macOS memory pressure level read by the memory pressure
+  // monitor.
+  int macos_pressure_level_for_testing_;
+
+  // Exposes the UpdatePressureLevel() method for testing.
+  void UpdatePressureLevel() { MemoryPressureMonitor::UpdatePressureLevel(); }
+
+  // Returns the number of seconds left over from the last UMA tick
+  // calculation.
+  int SubTickSeconds() { return subtick_seconds_; }
+
+  // Returns the number of seconds per UMA tick.
+  static int GetSecondsPerUMATick() {
+    return MemoryPressureMonitor::GetSecondsPerUMATick();
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
+
+  int GetMacMemoryPressureLevel() override {
+    return macos_pressure_level_for_testing_;
+  }
+};
+
+TEST(MacMemoryPressureMonitorTest, MemoryPressureFromMacMemoryPressure) {
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          DISPATCH_MEMORYPRESSURE_NORMAL));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          DISPATCH_MEMORYPRESSURE_WARN));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          DISPATCH_MEMORYPRESSURE_CRITICAL));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          0));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          3));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          5));
+  EXPECT_EQ(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+      TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
+          -1));
+}
+
+TEST(MacMemoryPressureMonitorTest, CurrentMemoryPressure) {
+  TestMemoryPressureMonitor monitor;
+
+  MemoryPressureListener::MemoryPressureLevel memory_pressure =
+      monitor.GetCurrentPressureLevel();
+  EXPECT_TRUE(memory_pressure ==
+                  MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE ||
+              memory_pressure ==
+                  MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE ||
+              memory_pressure ==
+                  MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
+}
+
+TEST(MacMemoryPressureMonitorTest, MemoryPressureConversion) {
+  TestMemoryPressureMonitor monitor;
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+  monitor.UpdatePressureLevel();
+  MemoryPressureListener::MemoryPressureLevel memory_pressure =
+      monitor.GetCurrentPressureLevel();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+            memory_pressure);
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
+  monitor.UpdatePressureLevel();
+  memory_pressure = monitor.GetCurrentPressureLevel();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            memory_pressure);
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+  monitor.UpdatePressureLevel();
+  memory_pressure = monitor.GetCurrentPressureLevel();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            memory_pressure);
+}
+
+TEST(MacMemoryPressureMonitorTest, MemoryPressureRunLoopChecking) {
+  TestMemoryPressureMonitor monitor;
+
+  // To test grabbing the memory presure at the end of the run loop, we have to
+  // run the run loop, but to do that the run loop needs a run loop source. Add
+  // a timer as the source. We know that the exit observer is attached to
+  // the kMessageLoopExclusiveRunLoopMode mode, so use that mode.
+  ScopedCFTypeRef<CFRunLoopTimerRef> timer_ref(CFRunLoopTimerCreate(
+      NULL, CFAbsoluteTimeGetCurrent() + 10, 0, 0, 0, nullptr, nullptr));
+  CFRunLoopAddTimer(CFRunLoopGetCurrent(), timer_ref,
+                    kMessageLoopExclusiveRunLoopMode);
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
+  monitor.ResetRunLoopUpdateTime();
+  CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
+  EXPECT_EQ(monitor.GetCurrentPressureLevel(),
+            MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE);
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+  monitor.ResetRunLoopUpdateTime();
+  CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
+  EXPECT_EQ(monitor.GetCurrentPressureLevel(),
+            MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
+
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+  monitor.ResetRunLoopUpdateTime();
+  CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
+  EXPECT_EQ(monitor.GetCurrentPressureLevel(),
+            MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE);
+
+  CFRunLoopRemoveTimer(CFRunLoopGetCurrent(), timer_ref,
+                       kMessageLoopExclusiveRunLoopMode);
+}
+
+TEST(MacMemoryPressureMonitorTest, RecordMemoryPressureStats) {
+  TestMemoryPressureMonitor monitor;
+  const char* kHistogram = "Memory.PressureLevel";
+  CFTimeInterval now = CFAbsoluteTimeGetCurrent();
+  const int seconds_per_tick =
+      TestMemoryPressureMonitor::GetSecondsPerUMATick();
+
+  // Set the initial pressure level.
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+  // Incur one UMA tick of time (and include one extra second of elapsed time).
+  monitor.SetLastStatisticReportTime(now - (seconds_per_tick + 1));
+  monitor.UpdatePressureLevel();
+  monitor.tester.ExpectTotalCount(kHistogram, 1);
+  monitor.tester.ExpectBucketCount(kHistogram, 0, 1);
+  // The report time above included an extra second so there should be 1
+  // sub-tick second left over.
+  EXPECT_EQ(1, monitor.SubTickSeconds());
+
+  // Simulate sitting in normal pressure for 1 second less than 6 UMA tick
+  // seconds and then elevating to warning. With the left over sub-tick second
+  // from above, the total elapsed ticks should be an even 6 UMA ticks.
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
+  monitor.SetLastStatisticReportTime(now - (seconds_per_tick * 6 - 1));
+  monitor.UpdatePressureLevel();
+  monitor.tester.ExpectTotalCount(kHistogram, 7);
+  monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+  monitor.tester.ExpectBucketCount(kHistogram, 1, 0);
+  EXPECT_EQ(0, monitor.SubTickSeconds());
+
+  // Simulate sitting in warning pressure for 20 UMA ticks and 2 seconds, and
+  // then elevating to critical.
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+  monitor.SetLastStatisticReportTime(now - (20 * seconds_per_tick + 2));
+  monitor.UpdatePressureLevel();
+  monitor.tester.ExpectTotalCount(kHistogram, 27);
+  monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+  monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
+  monitor.tester.ExpectBucketCount(kHistogram, 2, 0);
+  EXPECT_EQ(2, monitor.SubTickSeconds());
+
+  // A quick update while critical - the stats should not budge because less
+  // than 1 tick of time has elapsed.
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
+  monitor.SetLastStatisticReportTime(now - 1);
+  monitor.UpdatePressureLevel();
+  monitor.tester.ExpectTotalCount(kHistogram, 27);
+  monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+  monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
+  monitor.tester.ExpectBucketCount(kHistogram, 2, 0);
+  EXPECT_EQ(3, monitor.SubTickSeconds());
+
+  // A quick change back to normal. Less than 1 tick of time has elapsed, but
+  // in this case the pressure level changed, so the critical bucket should
+  // get another sample (otherwise we could miss quick level changes).
+  monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
+  monitor.SetLastStatisticReportTime(now - 1);
+  monitor.UpdatePressureLevel();
+  monitor.tester.ExpectTotalCount(kHistogram, 28);
+  monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
+  monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
+  monitor.tester.ExpectBucketCount(kHistogram, 2, 1);
+  // When less than 1 tick of time has elapsed but the pressure level changed,
+  // the subtick remainder gets zeroed out.
+  EXPECT_EQ(0, monitor.SubTickSeconds());
+}
+}  // namespace mac
+}  // namespace base
diff --git a/src/base/memory/memory_pressure_monitor_unittest.cc b/src/base/memory/memory_pressure_monitor_unittest.cc
new file mode 100644
index 0000000..10d9d24
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor_unittest.cc
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor.h"
+
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(MemoryPressureMonitorTest, RecordMemoryPressure) {
+  base::HistogramTester tester;
+  const char* kHistogram = "Memory.PressureLevel";
+
+  MemoryPressureMonitor::RecordMemoryPressure(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE, 3);
+  tester.ExpectTotalCount(kHistogram, 3);
+  tester.ExpectBucketCount(kHistogram, 0, 3);
+
+  MemoryPressureMonitor::RecordMemoryPressure(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE, 2);
+  tester.ExpectTotalCount(kHistogram, 5);
+  tester.ExpectBucketCount(kHistogram, 1, 2);
+
+  MemoryPressureMonitor::RecordMemoryPressure(
+      MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, 1);
+  tester.ExpectTotalCount(kHistogram, 6);
+  tester.ExpectBucketCount(kHistogram, 2, 1);
+}
+}  // namespace base
diff --git a/src/base/memory/memory_pressure_monitor_win.cc b/src/base/memory/memory_pressure_monitor_win.cc
new file mode 100644
index 0000000..e6f9815
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor_win.cc
@@ -0,0 +1,234 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_win.h"
+
+#include <windows.h>
+
+#include "base/single_thread_task_runner.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
+#include "starboard/types.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+static const DWORDLONG kMBBytes = 1024 * 1024;
+
+}  // namespace
+
+// The following constants have been lifted from similar values in the ChromeOS
+// memory pressure monitor. The values were determined experimentally to ensure
+// sufficient responsiveness of the memory pressure subsystem, and minimal
+// overhead.
+const int MemoryPressureMonitor::kPollingIntervalMs = 5000;
+const int MemoryPressureMonitor::kModeratePressureCooldownMs = 10000;
+const int MemoryPressureMonitor::kModeratePressureCooldownCycles =
+    kModeratePressureCooldownMs / kPollingIntervalMs;
+
+// TODO(chrisha): Explore the following constants further with an experiment.
+
+// A system is considered 'high memory' if it has more than 1.5GB of system
+// memory available for use by the memory manager (not reserved for hardware
+// and drivers). This is a fuzzy version of the ~2GB discussed below.
+const int MemoryPressureMonitor::kLargeMemoryThresholdMb = 1536;
+
+// These are the default thresholds used for systems with < ~2GB of physical
+// memory. Such systems have been observed to always maintain ~100MB of
+// available memory, paging until that is the case. To try to avoid paging a
+// threshold slightly above this is chosen. The moderate threshold is slightly
+// less grounded in reality and chosen as 2.5x critical.
+const int MemoryPressureMonitor::kSmallMemoryDefaultModerateThresholdMb = 500;
+const int MemoryPressureMonitor::kSmallMemoryDefaultCriticalThresholdMb = 200;
+
+// These are the default thresholds used for systems with >= ~2GB of physical
+// memory. Such systems have been observed to always maintain ~300MB of
+// available memory, paging until that is the case.
+const int MemoryPressureMonitor::kLargeMemoryDefaultModerateThresholdMb = 1000;
+const int MemoryPressureMonitor::kLargeMemoryDefaultCriticalThresholdMb = 400;
+
+MemoryPressureMonitor::MemoryPressureMonitor()
+    : moderate_threshold_mb_(0),
+      critical_threshold_mb_(0),
+      current_memory_pressure_level_(
+          MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
+      moderate_pressure_repeat_count_(0),
+      dispatch_callback_(
+          base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
+      weak_ptr_factory_(this) {
+  InferThresholds();
+  StartObserving();
+}
+
+MemoryPressureMonitor::MemoryPressureMonitor(int moderate_threshold_mb,
+                                             int critical_threshold_mb)
+    : moderate_threshold_mb_(moderate_threshold_mb),
+      critical_threshold_mb_(critical_threshold_mb),
+      current_memory_pressure_level_(
+          MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
+      moderate_pressure_repeat_count_(0),
+      dispatch_callback_(
+          base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
+      weak_ptr_factory_(this) {
+  DCHECK_GE(moderate_threshold_mb_, critical_threshold_mb_);
+  DCHECK_LE(0, critical_threshold_mb_);
+  StartObserving();
+}
+
+MemoryPressureMonitor::~MemoryPressureMonitor() {
+  StopObserving();
+}
+
+void MemoryPressureMonitor::CheckMemoryPressureSoon() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  ThreadTaskRunnerHandle::Get()->PostTask(
+      FROM_HERE, Bind(&MemoryPressureMonitor::CheckMemoryPressure,
+                      weak_ptr_factory_.GetWeakPtr()));
+}
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::GetCurrentPressureLevel() {
+  return current_memory_pressure_level_;
+}
+
+void MemoryPressureMonitor::InferThresholds() {
+  // Default to a 'high' memory situation, which uses more conservative
+  // thresholds.
+  bool high_memory = true;
+  MEMORYSTATUSEX mem_status = {};
+  if (GetSystemMemoryStatus(&mem_status)) {
+    static const DWORDLONG kLargeMemoryThresholdBytes =
+        static_cast<DWORDLONG>(kLargeMemoryThresholdMb) * kMBBytes;
+    high_memory = mem_status.ullTotalPhys >= kLargeMemoryThresholdBytes;
+  }
+
+  if (high_memory) {
+    moderate_threshold_mb_ = kLargeMemoryDefaultModerateThresholdMb;
+    critical_threshold_mb_ = kLargeMemoryDefaultCriticalThresholdMb;
+  } else {
+    moderate_threshold_mb_ = kSmallMemoryDefaultModerateThresholdMb;
+    critical_threshold_mb_ = kSmallMemoryDefaultCriticalThresholdMb;
+  }
+}
+
+void MemoryPressureMonitor::StartObserving() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  timer_.Start(FROM_HERE,
+               TimeDelta::FromMilliseconds(kPollingIntervalMs),
+               Bind(&MemoryPressureMonitor::
+                        CheckMemoryPressureAndRecordStatistics,
+                    weak_ptr_factory_.GetWeakPtr()));
+}
+
+void MemoryPressureMonitor::StopObserving() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // If StartObserving failed, StopObserving will still get called.
+  timer_.Stop();
+  weak_ptr_factory_.InvalidateWeakPtrs();
+}
+
+void MemoryPressureMonitor::CheckMemoryPressure() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  // Get the previous pressure level and update the current one.
+  MemoryPressureLevel old_pressure = current_memory_pressure_level_;
+  current_memory_pressure_level_ = CalculateCurrentPressureLevel();
+
+  // |notify| will be set to true if MemoryPressureListeners need to be
+  // notified of a memory pressure level state change.
+  bool notify = false;
+  switch (current_memory_pressure_level_) {
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
+      break;
+
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
+      if (old_pressure != current_memory_pressure_level_) {
+        // This is a new transition to moderate pressure so notify.
+        moderate_pressure_repeat_count_ = 0;
+        notify = true;
+      } else {
+        // Already in moderate pressure, only notify if sustained over the
+        // cooldown period.
+        if (++moderate_pressure_repeat_count_ ==
+                kModeratePressureCooldownCycles) {
+          moderate_pressure_repeat_count_ = 0;
+          notify = true;
+        }
+      }
+      break;
+
+    case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
+      // Always notify of critical pressure levels.
+      notify = true;
+      break;
+  }
+
+  if (!notify)
+    return;
+
+  // Emit a notification of the current memory pressure level. This can only
+  // happen for moderate and critical pressure levels.
+  DCHECK_NE(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+            current_memory_pressure_level_);
+  dispatch_callback_.Run(current_memory_pressure_level_);
+}
+
+void MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics() {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  CheckMemoryPressure();
+
+  RecordMemoryPressure(current_memory_pressure_level_, 1);
+}
+
+MemoryPressureListener::MemoryPressureLevel
+MemoryPressureMonitor::CalculateCurrentPressureLevel() {
+  MEMORYSTATUSEX mem_status = {};
+  if (!GetSystemMemoryStatus(&mem_status))
+    return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+
+  // How much system memory is actively available for use right now, in MBs.
+  int phys_free = static_cast<int>(mem_status.ullAvailPhys / kMBBytes);
+
+  // TODO(chrisha): This should eventually care about address space pressure,
+  // but the browser process (where this is running) effectively never runs out
+  // of address space. Renderers occasionally do, but it does them no good to
+  // have the browser process monitor address space pressure. Long term,
+  // renderers should run their own address space pressure monitors and act
+  // accordingly, with the browser making cross-process decisions based on
+  // system memory pressure.
+
+  // Determine if the physical memory is under critical memory pressure.
+  if (phys_free <= critical_threshold_mb_)
+    return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
+
+  // Determine if the physical memory is under moderate memory pressure.
+  if (phys_free <= moderate_threshold_mb_)
+    return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
+
+  // No memory pressure was detected.
+  return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+}
+
+bool MemoryPressureMonitor::GetSystemMemoryStatus(
+    MEMORYSTATUSEX* mem_status) {
+  DCHECK(mem_status != nullptr);
+  mem_status->dwLength = sizeof(*mem_status);
+  if (!::GlobalMemoryStatusEx(mem_status))
+    return false;
+  return true;
+}
+
+void MemoryPressureMonitor::SetDispatchCallback(
+    const DispatchCallback& callback) {
+  dispatch_callback_ = callback;
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/src/base/memory/memory_pressure_monitor_win.h b/src/base/memory/memory_pressure_monitor_win.h
new file mode 100644
index 0000000..a65c191
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor_win.h
@@ -0,0 +1,148 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_WIN_H_
+#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_WIN_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/memory/memory_pressure_monitor.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "base/timer/timer.h"
+
+// To not pull in windows.h.
+typedef struct _MEMORYSTATUSEX MEMORYSTATUSEX;
+
+namespace base {
+namespace win {
+
+// Windows memory pressure monitor. Because there is no OS provided signal this
+// polls at a low frequency (once per second), and applies internal hysteresis.
+class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
+ public:
+  // Constants governing the polling and hysteresis behaviour of the observer.
+
+  // The polling interval, in milliseconds. While under critical pressure, this
+  // is also the timer to repeat cleanup attempts.
+  static const int kPollingIntervalMs;
+  // The time which should pass between 2 successive moderate memory pressure
+  // signals, in milliseconds.
+  static const int kModeratePressureCooldownMs;
+  // The number of cycles that should pass between 2 successive moderate memory
+  // pressure signals.
+  static const int kModeratePressureCooldownCycles;
+
+  // Constants governing the memory pressure level detection.
+
+  // The amount of total system memory beyond which a system is considered to be
+  // a large-memory system.
+  static const int kLargeMemoryThresholdMb;
+  // Default minimum free memory thresholds for small-memory systems, in MB.
+  static const int kSmallMemoryDefaultModerateThresholdMb;
+  static const int kSmallMemoryDefaultCriticalThresholdMb;
+  // Default minimum free memory thresholds for large-memory systems, in MB.
+  static const int kLargeMemoryDefaultModerateThresholdMb;
+  static const int kLargeMemoryDefaultCriticalThresholdMb;
+
+  // Default constructor. Will choose thresholds automatically basd on the
+  // actual amount of system memory.
+  MemoryPressureMonitor();
+
+  // Constructor with explicit memory thresholds. These represent the amount of
+  // free memory below which the applicable memory pressure state engages.
+  MemoryPressureMonitor(int moderate_threshold_mb, int critical_threshold_mb);
+
+  ~MemoryPressureMonitor() override;
+
+  // Schedules a memory pressure check to run soon. This must be called on the
+  // same thread where the monitor was instantiated.
+  void CheckMemoryPressureSoon();
+
+  // Get the current memory pressure level. This can be called from any thread.
+  MemoryPressureLevel GetCurrentPressureLevel() override;
+  void SetDispatchCallback(const DispatchCallback& callback) override;
+
+  // Returns the moderate pressure level free memory threshold, in MB.
+  int moderate_threshold_mb() const { return moderate_threshold_mb_; }
+
+  // Returns the critical pressure level free memory threshold, in MB.
+  int critical_threshold_mb() const { return critical_threshold_mb_; }
+
+ protected:
+  // Internals are exposed for unittests.
+
+  // Automatically infers threshold values based on system memory. This invokes
+  // GetMemoryStatus so it can be mocked in unittests.
+  void InferThresholds();
+
+  // Starts observing the memory fill level. Calls to StartObserving should
+  // always be matched with calls to StopObserving.
+  void StartObserving();
+
+  // Stop observing the memory fill level. May be safely called if
+  // StartObserving has not been called. Must be called from the same thread on
+  // which the monitor was instantiated.
+  void StopObserving();
+
+  // Checks memory pressure, storing the current level, applying any hysteresis
+  // and emitting memory pressure level change signals as necessary. This
+  // function is called periodically while the monitor is observing memory
+  // pressure. This is split out from CheckMemoryPressureAndRecordStatistics so
+  // that it may be called by CheckMemoryPressureSoon and not invoke UMA
+  // logging. Must be called from the same thread on which the monitor was
+  // instantiated.
+  void CheckMemoryPressure();
+
+  // Wrapper to CheckMemoryPressure that also records the observed memory
+  // pressure level via an UMA enumeration. This is the function that is called
+  // periodically by the timer. Must be called from the same thread on which the
+  // monitor was instantiated.
+  void CheckMemoryPressureAndRecordStatistics();
+
+  // Calculates the current instantaneous memory pressure level. This does not
+  // use any hysteresis and simply returns the result at the current moment. Can
+  // be called on any thread.
+  MemoryPressureLevel CalculateCurrentPressureLevel();
+
+  // Gets system memory status. This is virtual as a unittesting hook. Returns
+  // true if the system call succeeds, false otherwise. Can be called on any
+  // thread.
+  virtual bool GetSystemMemoryStatus(MEMORYSTATUSEX* mem_status);
+
+ private:
+  // Threshold amounts of available memory that trigger pressure levels. See
+  // memory_pressure_monitor.cc for a discussion of reasonable values for these.
+  int moderate_threshold_mb_;
+  int critical_threshold_mb_;
+
+  // A periodic timer to check for memory pressure changes.
+  base::RepeatingTimer timer_;
+
+  // The current memory pressure.
+  MemoryPressureLevel current_memory_pressure_level_;
+
+  // To slow down the amount of moderate pressure event calls, this gets used to
+  // count the number of events since the last event occured. This is used by
+  // |CheckMemoryPressure| to apply hysteresis on the raw results of
+  // |CalculateCurrentPressureLevel|.
+  int moderate_pressure_repeat_count_;
+
+  // Ensures that this object is used from a single thread.
+  base::ThreadChecker thread_checker_;
+
+  DispatchCallback dispatch_callback_;
+
+  // Weak pointer factory to ourself used for scheduling calls to
+  // CheckMemoryPressure/CheckMemoryPressureAndRecordStatistics via |timer_|.
+  base::WeakPtrFactory<MemoryPressureMonitor> weak_ptr_factory_;
+
+  DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
+};
+
+}  // namespace win
+}  // namespace base
+
+#endif  // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_WIN_H_
diff --git a/src/base/memory/memory_pressure_monitor_win_unittest.cc b/src/base/memory/memory_pressure_monitor_win_unittest.cc
new file mode 100644
index 0000000..1002a01
--- /dev/null
+++ b/src/base/memory/memory_pressure_monitor_win_unittest.cc
@@ -0,0 +1,299 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor_win.h"
+
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+
+namespace {
+
+struct PressureSettings {
+  int phys_left_mb;
+  MemoryPressureListener::MemoryPressureLevel level;
+};
+
+}  // namespace
+
+// This is outside of the anonymous namespace so that it can be seen as a friend
+// to the monitor class.
+class TestMemoryPressureMonitor : public MemoryPressureMonitor {
+ public:
+  using MemoryPressureMonitor::CalculateCurrentPressureLevel;
+  using MemoryPressureMonitor::CheckMemoryPressure;
+
+  static const DWORDLONG kMBBytes = 1024 * 1024;
+
+  explicit TestMemoryPressureMonitor(bool large_memory)
+      : mem_status_() {
+    // Generate a plausible amount of memory.
+    mem_status_.ullTotalPhys =
+        static_cast<DWORDLONG>(GenerateTotalMemoryMb(large_memory)) * kMBBytes;
+
+    // Rerun InferThresholds using the test fixture's GetSystemMemoryStatus.
+    InferThresholds();
+    // Stop the timer.
+    StopObserving();
+  }
+
+  TestMemoryPressureMonitor(int system_memory_mb,
+                            int moderate_threshold_mb,
+                            int critical_threshold_mb)
+      : MemoryPressureMonitor(moderate_threshold_mb, critical_threshold_mb),
+        mem_status_() {
+    // Set the amount of system memory.
+    mem_status_.ullTotalPhys = static_cast<DWORDLONG>(
+        system_memory_mb * kMBBytes);
+
+    // Stop the timer.
+    StopObserving();
+  }
+
+  virtual ~TestMemoryPressureMonitor() {}
+
+  MOCK_METHOD1(OnMemoryPressure,
+               void(MemoryPressureListener::MemoryPressureLevel level));
+
+  // Generates an amount of total memory that is consistent with the requested
+  // memory model.
+  int GenerateTotalMemoryMb(bool large_memory) {
+    int total_mb = 64;
+    while (total_mb < MemoryPressureMonitor::kLargeMemoryThresholdMb)
+      total_mb *= 2;
+    if (large_memory)
+      return total_mb * 2;
+    return total_mb / 2;
+  }
+
+  // Sets up the memory status to reflect the provided absolute memory left.
+  void SetMemoryFree(int phys_left_mb) {
+    // ullTotalPhys is set in the constructor and not modified.
+
+    // Set the amount of available memory.
+    mem_status_.ullAvailPhys =
+        static_cast<DWORDLONG>(phys_left_mb) * kMBBytes;
+    DCHECK_LT(mem_status_.ullAvailPhys, mem_status_.ullTotalPhys);
+
+    // These fields are unused.
+    mem_status_.dwMemoryLoad = 0;
+    mem_status_.ullTotalPageFile = 0;
+    mem_status_.ullAvailPageFile = 0;
+    mem_status_.ullTotalVirtual = 0;
+    mem_status_.ullAvailVirtual = 0;
+  }
+
+  void SetNone() {
+    SetMemoryFree(moderate_threshold_mb() + 1);
+  }
+
+  void SetModerate() {
+    SetMemoryFree(moderate_threshold_mb() - 1);
+  }
+
+  void SetCritical() {
+    SetMemoryFree(critical_threshold_mb() - 1);
+  }
+
+ private:
+  bool GetSystemMemoryStatus(MEMORYSTATUSEX* mem_status) override {
+    // Simply copy the memory status set by the test fixture.
+    *mem_status = mem_status_;
+    return true;
+  }
+
+  MEMORYSTATUSEX mem_status_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
+};
+
+class WinMemoryPressureMonitorTest : public testing::Test {
+ protected:
+  void CalculateCurrentMemoryPressureLevelTest(
+      TestMemoryPressureMonitor* monitor) {
+
+    int mod = monitor->moderate_threshold_mb();
+    monitor->SetMemoryFree(mod + 1);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+              monitor->CalculateCurrentPressureLevel());
+
+    monitor->SetMemoryFree(mod);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor->CalculateCurrentPressureLevel());
+
+    monitor->SetMemoryFree(mod - 1);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor->CalculateCurrentPressureLevel());
+
+    int crit = monitor->critical_threshold_mb();
+    monitor->SetMemoryFree(crit + 1);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor->CalculateCurrentPressureLevel());
+
+    monitor->SetMemoryFree(crit);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+              monitor->CalculateCurrentPressureLevel());
+
+    monitor->SetMemoryFree(crit - 1);
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+              monitor->CalculateCurrentPressureLevel());
+  }
+
+  base::MessageLoopForUI message_loop_;
+};
+
+// Tests the fundamental direct calculation of memory pressure with automatic
+// small-memory thresholds.
+TEST_F(WinMemoryPressureMonitorTest, CalculateCurrentMemoryPressureLevelSmall) {
+  static const int kModerateMb =
+      MemoryPressureMonitor::kSmallMemoryDefaultModerateThresholdMb;
+  static const int kCriticalMb =
+      MemoryPressureMonitor::kSmallMemoryDefaultCriticalThresholdMb;
+
+  TestMemoryPressureMonitor monitor(false);  // Small-memory model.
+
+  EXPECT_EQ(kModerateMb, monitor.moderate_threshold_mb());
+  EXPECT_EQ(kCriticalMb, monitor.critical_threshold_mb());
+
+  ASSERT_NO_FATAL_FAILURE(CalculateCurrentMemoryPressureLevelTest(&monitor));
+}
+
+// Tests the fundamental direct calculation of memory pressure with automatic
+// large-memory thresholds.
+TEST_F(WinMemoryPressureMonitorTest, CalculateCurrentMemoryPressureLevelLarge) {
+  static const int kModerateMb =
+      MemoryPressureMonitor::kLargeMemoryDefaultModerateThresholdMb;
+  static const int kCriticalMb =
+      MemoryPressureMonitor::kLargeMemoryDefaultCriticalThresholdMb;
+
+  TestMemoryPressureMonitor monitor(true);  // Large-memory model.
+
+  EXPECT_EQ(kModerateMb, monitor.moderate_threshold_mb());
+  EXPECT_EQ(kCriticalMb, monitor.critical_threshold_mb());
+
+  ASSERT_NO_FATAL_FAILURE(CalculateCurrentMemoryPressureLevelTest(&monitor));
+}
+
+// Tests the fundamental direct calculation of memory pressure with manually
+// specified threshold levels.
+TEST_F(WinMemoryPressureMonitorTest,
+       CalculateCurrentMemoryPressureLevelCustom) {
+  static const int kSystemMb = 512;
+  static const int kModerateMb = 256;
+  static const int kCriticalMb = 128;
+
+  TestMemoryPressureMonitor monitor(kSystemMb, kModerateMb, kCriticalMb);
+
+  EXPECT_EQ(kModerateMb, monitor.moderate_threshold_mb());
+  EXPECT_EQ(kCriticalMb, monitor.critical_threshold_mb());
+
+  ASSERT_NO_FATAL_FAILURE(CalculateCurrentMemoryPressureLevelTest(&monitor));
+}
+
+// This test tests the various transition states from memory pressure, looking
+// for the correct behavior on event reposting as well as state updates.
+TEST_F(WinMemoryPressureMonitorTest, CheckMemoryPressure) {
+  // Large-memory.
+  testing::StrictMock<TestMemoryPressureMonitor> monitor(true);
+  MemoryPressureListener listener(
+      base::Bind(&TestMemoryPressureMonitor::OnMemoryPressure,
+                 base::Unretained(&monitor)));
+
+  // Checking the memory pressure at 0% load should not produce any
+  // events.
+  monitor.SetNone();
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+            monitor.GetCurrentPressureLevel());
+
+  // Setting the memory level to 80% should produce a moderate pressure level.
+  EXPECT_CALL(monitor,
+              OnMemoryPressure(MemoryPressureListener::
+                                   MEMORY_PRESSURE_LEVEL_MODERATE));
+  monitor.SetModerate();
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            monitor.GetCurrentPressureLevel());
+  testing::Mock::VerifyAndClearExpectations(&monitor);
+
+  // Check that the event gets reposted after a while.
+  for (int i = 0; i < monitor.kModeratePressureCooldownCycles; ++i) {
+    if (i + 1 == monitor.kModeratePressureCooldownCycles) {
+      EXPECT_CALL(monitor,
+                  OnMemoryPressure(MemoryPressureListener::
+                                       MEMORY_PRESSURE_LEVEL_MODERATE));
+    }
+    monitor.CheckMemoryPressure();
+    RunLoop().RunUntilIdle();
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor.GetCurrentPressureLevel());
+    testing::Mock::VerifyAndClearExpectations(&monitor);
+  }
+
+  // Setting the memory usage to 99% should produce critical levels.
+  EXPECT_CALL(monitor,
+              OnMemoryPressure(MemoryPressureListener::
+                                   MEMORY_PRESSURE_LEVEL_CRITICAL));
+  monitor.SetCritical();
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            monitor.GetCurrentPressureLevel());
+  testing::Mock::VerifyAndClearExpectations(&monitor);
+
+  // Calling it again should immediately produce a second call.
+  EXPECT_CALL(monitor,
+              OnMemoryPressure(MemoryPressureListener::
+                                   MEMORY_PRESSURE_LEVEL_CRITICAL));
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+            monitor.GetCurrentPressureLevel());
+  testing::Mock::VerifyAndClearExpectations(&monitor);
+
+  // When lowering the pressure again there should be a notification and the
+  // pressure should go back to moderate.
+  EXPECT_CALL(monitor,
+              OnMemoryPressure(MemoryPressureListener::
+                                   MEMORY_PRESSURE_LEVEL_MODERATE));
+  monitor.SetModerate();
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+            monitor.GetCurrentPressureLevel());
+  testing::Mock::VerifyAndClearExpectations(&monitor);
+
+  // Check that the event gets reposted after a while.
+  for (int i = 0; i < monitor.kModeratePressureCooldownCycles; ++i) {
+    if (i + 1 == monitor.kModeratePressureCooldownCycles) {
+      EXPECT_CALL(monitor,
+                  OnMemoryPressure(MemoryPressureListener::
+                                       MEMORY_PRESSURE_LEVEL_MODERATE));
+    }
+    monitor.CheckMemoryPressure();
+    RunLoop().RunUntilIdle();
+    EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+              monitor.GetCurrentPressureLevel());
+    testing::Mock::VerifyAndClearExpectations(&monitor);
+  }
+
+  // Going down to no pressure should not produce an notification.
+  monitor.SetNone();
+  monitor.CheckMemoryPressure();
+  RunLoop().RunUntilIdle();
+  EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+            monitor.GetCurrentPressureLevel());
+  testing::Mock::VerifyAndClearExpectations(&monitor);
+}
+
+}  // namespace win
+}  // namespace base
diff --git a/src/base/memory/platform_shared_memory_region.cc b/src/base/memory/platform_shared_memory_region.cc
new file mode 100644
index 0000000..4564792
--- /dev/null
+++ b/src/base/memory/platform_shared_memory_region.cc
@@ -0,0 +1,62 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include "base/memory/shared_memory_mapping.h"
+#include "base/numerics/checked_math.h"
+
+namespace base {
+namespace subtle {
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateWritable(
+    size_t size) {
+  return Create(Mode::kWritable, size);
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateUnsafe(
+    size_t size) {
+  return Create(Mode::kUnsafe, size);
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion() = default;
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    PlatformSharedMemoryRegion&& other) = default;
+PlatformSharedMemoryRegion& PlatformSharedMemoryRegion::operator=(
+    PlatformSharedMemoryRegion&& other) = default;
+PlatformSharedMemoryRegion::~PlatformSharedMemoryRegion() = default;
+
+PlatformSharedMemoryRegion::ScopedPlatformHandle
+PlatformSharedMemoryRegion::PassPlatformHandle() {
+  return std::move(handle_);
+}
+
+bool PlatformSharedMemoryRegion::MapAt(off_t offset,
+                                       size_t size,
+                                       void** memory,
+                                       size_t* mapped_size) const {
+  if (!IsValid())
+    return false;
+
+  if (size == 0)
+    return false;
+
+  size_t end_byte;
+  if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
+    return false;
+  }
+
+  bool success = MapAtInternal(offset, size, memory, mapped_size);
+  if (success) {
+    DCHECK_EQ(
+        0U, reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+  }
+
+  return success;
+}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/src/base/memory/platform_shared_memory_region.h b/src/base/memory/platform_shared_memory_region.h
new file mode 100644
index 0000000..b7281d4
--- /dev/null
+++ b/src/base/memory/platform_shared_memory_region.h
@@ -0,0 +1,264 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
+
+#if !defined(STARBOARD)
+
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include "base/mac/scoped_mach_port.h"
+#elif defined(OS_FUCHSIA)
+#include <lib/zx/vmo.h>
+#elif defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX)
+#include <sys/types.h>
+#include "base/file_descriptor_posix.h"
+#include "base/files/scoped_file.h"
+#include "starboard/types.h"
+#endif
+
+namespace base {
+namespace subtle {
+
+#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
+    !defined(OS_ANDROID)
+// Helper structs to keep two descriptors on POSIX. It's needed to support
+// ConvertToReadOnly().
+struct BASE_EXPORT FDPair {
+  int fd;
+  int readonly_fd;
+};
+
+struct BASE_EXPORT ScopedFDPair {
+  ScopedFDPair();
+  ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd);
+  ScopedFDPair(ScopedFDPair&&);
+  ScopedFDPair& operator=(ScopedFDPair&&);
+  ~ScopedFDPair();
+
+  FDPair get() const;
+
+  ScopedFD fd;
+  ScopedFD readonly_fd;
+};
+#endif
+
+// Implementation class for shared memory regions.
+//
+// This class does the following:
+//
+// - Wraps and owns a shared memory region platform handle.
+// - Provides a way to allocate a new region of platform shared memory of given
+//   size.
+// - Provides a way to create mapping of the region in the current process'
+//   address space, under special access-control constraints (see Mode).
+// - Provides methods to help transferring the handle across process boundaries.
+// - Holds a 128-bit unique identifier used to uniquely identify the same
+//   kernel region resource across processes (used for memory tracking).
+// - Has a method to retrieve the region's size in bytes.
+//
+// IMPORTANT NOTE: Users should never use this directly, but
+// ReadOnlySharedMemoryRegion, WritableSharedMemoryRegion or
+// UnsafeSharedMemoryRegion since this is an implementation class.
+class BASE_EXPORT PlatformSharedMemoryRegion {
+ public:
+  // Permission mode of the platform handle. Each mode corresponds to one of the
+  // typed shared memory classes:
+  //
+  // * ReadOnlySharedMemoryRegion: A region that can only create read-only
+  // mappings.
+  //
+  // * WritableSharedMemoryRegion: A region that can only create writable
+  // mappings. The region can be demoted to ReadOnlySharedMemoryRegion without
+  // the possibility of promoting back to writable.
+  //
+  // * UnsafeSharedMemoryRegion: A region that can only create writable
+  // mappings. The region cannot be demoted to ReadOnlySharedMemoryRegion.
+  enum class Mode {
+    kReadOnly,  // ReadOnlySharedMemoryRegion
+    kWritable,  // WritableSharedMemoryRegion
+    kUnsafe,    // UnsafeSharedMemoryRegion
+    kMaxValue = kUnsafe
+  };
+
+  // Errors that can occur during Shared Memory construction.
+  // These match tools/metrics/histograms/enums.xml.
+  // This enum is append-only.
+  enum class CreateError {
+    SUCCESS = 0,
+    SIZE_ZERO = 1,
+    SIZE_TOO_LARGE = 2,
+    INITIALIZE_ACL_FAILURE = 3,
+    INITIALIZE_SECURITY_DESC_FAILURE = 4,
+    SET_SECURITY_DESC_FAILURE = 5,
+    CREATE_FILE_MAPPING_FAILURE = 6,
+    REDUCE_PERMISSIONS_FAILURE = 7,
+    ALREADY_EXISTS = 8,
+    kMaxValue = ALREADY_EXISTS
+  };
+
+// Platform-specific shared memory type used by this class.
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  using PlatformHandle = mach_port_t;
+  using ScopedPlatformHandle = mac::ScopedMachSendRight;
+#elif defined(OS_FUCHSIA)
+  using PlatformHandle = zx::unowned_vmo;
+  using ScopedPlatformHandle = zx::vmo;
+#elif defined(OS_WIN)
+  using PlatformHandle = HANDLE;
+  using ScopedPlatformHandle = win::ScopedHandle;
+#elif defined(OS_ANDROID)
+  using PlatformHandle = int;
+  using ScopedPlatformHandle = ScopedFD;
+#else
+  using PlatformHandle = FDPair;
+  using ScopedPlatformHandle = ScopedFDPair;
+#endif
+
+  // The minimum alignment in bytes that any mapped address produced by Map()
+  // and MapAt() is guaranteed to have.
+  enum { kMapMinimumAlignment = 32 };
+
+  // Creates a new PlatformSharedMemoryRegion with corresponding mode and size.
+  // Creating in kReadOnly mode isn't supported because then there will be no
+  // way to modify memory content.
+  static PlatformSharedMemoryRegion CreateWritable(size_t size);
+  static PlatformSharedMemoryRegion CreateUnsafe(size_t size);
+
+  // Returns a new PlatformSharedMemoryRegion that takes ownership of the
+  // |handle|. All parameters must be taken from another valid
+  // PlatformSharedMemoryRegion instance, e.g. |size| must be equal to the
+  // actual region size as allocated by the kernel.
+  // Closes the |handle| and returns an invalid instance if passed parameters
+  // are invalid.
+  static PlatformSharedMemoryRegion Take(ScopedPlatformHandle handle,
+                                         Mode mode,
+                                         size_t size,
+                                         const UnguessableToken& guid);
+
+  // As Take, above, but from a SharedMemoryHandle. This takes ownership of the
+  // handle. |mode| must be kUnsafe or kReadOnly; the latter must be used with a
+  // handle created with SharedMemoryHandle::GetReadOnlyHandle().
+  // TODO(crbug.com/795291): this should only be used while transitioning from
+  // the old shared memory API, and should be removed when done.
+  static PlatformSharedMemoryRegion TakeFromSharedMemoryHandle(
+      const SharedMemoryHandle& handle,
+      Mode mode);
+
+  // Default constructor initializes an invalid instance, i.e. an instance that
+  // doesn't wrap any valid platform handle.
+  PlatformSharedMemoryRegion();
+
+  // Move operations are allowed.
+  PlatformSharedMemoryRegion(PlatformSharedMemoryRegion&&);
+  PlatformSharedMemoryRegion& operator=(PlatformSharedMemoryRegion&&);
+
+  // Destructor closes the platform handle. Does nothing if the handle is
+  // invalid.
+  ~PlatformSharedMemoryRegion();
+
+  // Passes ownership of the platform handle to the caller. The current instance
+  // becomes invalid. It's the responsibility of the caller to close the handle.
+  ScopedPlatformHandle PassPlatformHandle() WARN_UNUSED_RESULT;
+
+  // Returns the platform handle. The current instance keeps ownership of this
+  // handle.
+  PlatformHandle GetPlatformHandle() const;
+
+  // Whether the platform handle is valid.
+  bool IsValid() const;
+
+  // Duplicates the platform handle and creates a new PlatformSharedMemoryRegion
+  // with the same |mode_|, |size_| and |guid_| that owns this handle. Returns
+  // invalid region on failure, the current instance remains valid.
+  // Can be called only in kReadOnly and kUnsafe modes, CHECK-fails if is
+  // called in kWritable mode.
+  PlatformSharedMemoryRegion Duplicate() const;
+
+  // Converts the region to read-only. Returns whether the operation succeeded.
+  // Makes the current instance invalid on failure. Can be called only in
+  // kWritable mode, all other modes will CHECK-fail. The object will have
+  // kReadOnly mode after this call on success.
+  bool ConvertToReadOnly();
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // Same as above, but |mapped_addr| is used as a hint to avoid additional
+  // mapping of the memory object.
+  // |mapped_addr| must be mapped location of |memory_object_|. If the location
+  // is unknown, |mapped_addr| should be |nullptr|.
+  bool ConvertToReadOnly(void* mapped_addr);
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+  // Converts the region to unsafe. Returns whether the operation succeeded.
+  // Makes the current instance invalid on failure. Can be called only in
+  // kWritable mode, all other modes will CHECK-fail. The object will have
+  // kUnsafe mode after this call on success.
+  bool ConvertToUnsafe();
+
+  // Maps |size| bytes of the shared memory region starting with the given
+  // |offset| into the caller's address space. |offset| must be aligned to value
+  // of |SysInfo::VMAllocationGranularity()|. Fails if requested bytes are out
+  // of the region limits.
+  // Returns true and sets |memory| and |mapped_size| on success, returns false
+  // and leaves output parameters in unspecified state otherwise. The mapped
+  // address is guaranteed to have an alignment of at least
+  // |kMapMinimumAlignment|.
+  bool MapAt(off_t offset,
+             size_t size,
+             void** memory,
+             size_t* mapped_size) const;
+
+  const UnguessableToken& GetGUID() const { return guid_; }
+
+  size_t GetSize() const { return size_; }
+
+  Mode GetMode() const { return mode_; }
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
+                           CreateReadOnlyRegionDeathTest);
+  FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
+                           CheckPlatformHandlePermissionsCorrespondToMode);
+  static PlatformSharedMemoryRegion Create(Mode mode, size_t size);
+
+  static bool CheckPlatformHandlePermissionsCorrespondToMode(
+      PlatformHandle handle,
+      Mode mode,
+      size_t size);
+
+  PlatformSharedMemoryRegion(ScopedPlatformHandle handle,
+                             Mode mode,
+                             size_t size,
+                             const UnguessableToken& guid);
+
+  bool MapAtInternal(off_t offset,
+                     size_t size,
+                     void** memory,
+                     size_t* mapped_size) const;
+
+  ScopedPlatformHandle handle_;
+  Mode mode_ = Mode::kReadOnly;
+  size_t size_ = 0;
+  UnguessableToken guid_;
+
+  DISALLOW_COPY_AND_ASSIGN(PlatformSharedMemoryRegion);
+};
+
+}  // namespace subtle
+}  // namespace base
+
+#endif  // !defined(STARBOARD)
+#endif  // BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
diff --git a/src/base/memory/platform_shared_memory_region_android.cc b/src/base/memory/platform_shared_memory_region_android.cc
new file mode 100644
index 0000000..5369deb
--- /dev/null
+++ b/src/base/memory/platform_shared_memory_region_android.cc
@@ -0,0 +1,210 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <sys/mman.h>
+
+#include "base/bits.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process_metrics.h"
+#include "starboard/types.h"
+#include "third_party/ashmem/ashmem.h"
+
+namespace base {
+namespace subtle {
+
+// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
+// will automatically pin the region. We never explicitly call pin/unpin. When
+// all the file descriptors from different processes associated with the region
+// are closed, the memory buffer will go away.
+
+namespace {
+
+static int GetAshmemRegionProtectionMask(int fd) {
+  int prot = ashmem_get_prot_region(fd);
+  if (prot < 0) {
+    DPLOG(ERROR) << "ashmem_get_prot_region failed";
+    return -1;
+  }
+  return prot;
+}
+
+}  // namespace
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    ScopedFD fd,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!fd.is_valid())
+    return {};
+
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK(CheckPlatformHandlePermissionsCorrespondToMode(fd.get(), mode, size));
+
+  return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
+}
+
+// static
+PlatformSharedMemoryRegion
+PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    Mode mode) {
+  CHECK((mode == Mode::kReadOnly && handle.IsReadOnly()) ||
+        (mode == Mode::kUnsafe && !handle.IsReadOnly()));
+  if (!handle.IsValid())
+    return {};
+
+  return Take(ScopedFD(handle.GetHandle()), mode, handle.GetSize(),
+              handle.GetGUID());
+}
+
+int PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.is_valid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid())
+    return {};
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  ScopedFD duped_fd(HANDLE_EINTR(dup(handle_.get())));
+  if (!duped_fd.is_valid()) {
+    DPLOG(ERROR) << "dup(" << handle_.get() << ") failed";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(std::move(duped_fd), mode_, size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  ScopedFD handle_copy(handle_.release());
+
+  int prot = GetAshmemRegionProtectionMask(handle_copy.get());
+  if (prot < 0)
+    return false;
+
+  prot &= ~PROT_WRITE;
+  int ret = ashmem_set_prot_region(handle_copy.get(), prot);
+  if (ret != 0) {
+    DPLOG(ERROR) << "ashmem_set_prot_region failed";
+    return false;
+  }
+
+  handle_ = std::move(handle_copy);
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to unsafe";
+
+  mode_ = Mode::kUnsafe;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
+                                               size_t size,
+                                               void** memory,
+                                               size_t* mapped_size) const {
+  bool write_allowed = mode_ != Mode::kReadOnly;
+  *memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
+                 MAP_SHARED, handle_.get(), offset);
+
+  bool mmap_succeeded = *memory && *memory != reinterpret_cast<void*>(-1);
+  if (!mmap_succeeded) {
+    DPLOG(ERROR) << "mmap " << handle_.get() << " failed";
+    return false;
+  }
+
+  *mapped_size = size;
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+  if (size == 0)
+    return {};
+
+  // Align size as required by ashmem_create_region() API documentation.
+  size_t rounded_size = bits::Align(size, GetPageSize());
+  if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  UnguessableToken guid = UnguessableToken::Create();
+
+  ScopedFD fd(ashmem_create_region(
+      SharedMemoryTracker::GetDumpNameForTracing(guid).c_str(), rounded_size));
+  if (!fd.is_valid()) {
+    DPLOG(ERROR) << "ashmem_create_region failed";
+    return {};
+  }
+
+  int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE);
+  if (err < 0) {
+    DPLOG(ERROR) << "ashmem_set_prot_region failed";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
+}
+
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformHandle handle,
+    Mode mode,
+    size_t size) {
+  int prot = GetAshmemRegionProtectionMask(handle);
+  if (prot < 0)
+    return false;
+
+  bool is_read_only = (prot & PROT_WRITE) == 0;
+  bool expected_read_only = mode == Mode::kReadOnly;
+
+  if (is_read_only != expected_read_only) {
+    DLOG(ERROR) << "Ashmem region has a wrong protection mask: it is"
+                << (is_read_only ? " " : " not ") << "read-only but it should"
+                << (expected_read_only ? " " : " not ") << "be";
+    return false;
+  }
+
+  return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    ScopedFD fd,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(fd)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/src/base/memory/platform_shared_memory_region_fuchsia.cc b/src/base/memory/platform_shared_memory_region_fuchsia.cc
new file mode 100644
index 0000000..6e72d5e
--- /dev/null
+++ b/src/base/memory/platform_shared_memory_region_fuchsia.cc
@@ -0,0 +1,196 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <lib/zx/vmar.h>
+#include <zircon/process.h>
+#include <zircon/rights.h>
+
+#include "base/bits.h"
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/process/process_metrics.h"
+#include "starboard/types.h"
+
+namespace base {
+namespace subtle {
+
+static constexpr int kNoWriteOrExec =
+    ZX_DEFAULT_VMO_RIGHTS &
+    ~(ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE | ZX_RIGHT_SET_PROPERTY);
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    zx::vmo handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!handle.is_valid())
+    return {};
+
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK(CheckPlatformHandlePermissionsCorrespondToMode(zx::unowned_vmo(handle),
+                                                       mode, size));
+
+  return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+// static
+PlatformSharedMemoryRegion
+PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    Mode mode) {
+  CHECK(mode == Mode::kReadOnly || mode == Mode::kUnsafe);
+  if (!handle.IsValid())
+    return {};
+
+  return Take(zx::vmo(handle.GetHandle()), mode, handle.GetSize(),
+              handle.GetGUID());
+}
+
+zx::unowned_vmo PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return zx::unowned_vmo(handle_);
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.is_valid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid())
+    return {};
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  zx::vmo duped_handle;
+  zx_status_t status = handle_.duplicate(ZX_RIGHT_SAME_RIGHTS, &duped_handle);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_handle_duplicate";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(std::move(duped_handle), mode_, size_,
+                                    guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  zx_status_t status = handle_.replace(kNoWriteOrExec, &handle_);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_handle_replace";
+    return false;
+  }
+
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to unsafe";
+
+  mode_ = Mode::kUnsafe;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
+                                               size_t size,
+                                               void** memory,
+                                               size_t* mapped_size) const {
+  bool write_allowed = mode_ != Mode::kReadOnly;
+  uintptr_t addr;
+  zx_status_t status = zx::vmar::root_self()->map(
+      0, handle_, offset, size,
+      ZX_VM_FLAG_PERM_READ | (write_allowed ? ZX_VM_FLAG_PERM_WRITE : 0),
+      &addr);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_vmar_map";
+    return false;
+  }
+
+  *memory = reinterpret_cast<void*>(addr);
+  *mapped_size = size;
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+  if (size == 0)
+    return {};
+
+  size_t rounded_size = bits::Align(size, GetPageSize());
+  if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  zx::vmo vmo;
+  zx_status_t status = zx::vmo::create(rounded_size, 0, &vmo);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_vmo_create";
+    return {};
+  }
+
+  const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
+  status = vmo.replace(kNoExecFlags, &vmo);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_handle_replace";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(std::move(vmo), mode, size,
+                                    UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformHandle handle,
+    Mode mode,
+    size_t size) {
+  zx_info_handle_basic_t basic = {};
+  zx_status_t status = handle->get_info(ZX_INFO_HANDLE_BASIC, &basic,
+                                        sizeof(basic), nullptr, nullptr);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_object_get_info";
+    return false;
+  }
+
+  bool is_read_only = (basic.rights & kNoWriteOrExec) == basic.rights;
+  bool expected_read_only = mode == Mode::kReadOnly;
+
+  if (is_read_only != expected_read_only) {
+    DLOG(ERROR) << "VMO object has wrong access rights: it is"
+                << (is_read_only ? " " : " not ") << "read-only but it should"
+                << (expected_read_only ? " " : " not ") << "be";
+    return false;
+  }
+
+  return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    zx::vmo handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/src/base/memory/platform_shared_memory_region_mac.cc b/src/base/memory/platform_shared_memory_region_mac.cc
new file mode 100644
index 0000000..c53cafb
--- /dev/null
+++ b/src/base/memory/platform_shared_memory_region_mac.cc
@@ -0,0 +1,256 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <mach/mach_vm.h>
+
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_vm.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "build/build_config.h"
+#include "starboard/types.h"
+
+#if defined(OS_IOS)
+#error "MacOS only - iOS uses platform_shared_memory_region_posix.cc"
+#endif
+
+namespace base {
+namespace subtle {
+
+namespace {
+
+void LogCreateError(PlatformSharedMemoryRegion::CreateError error,
+                    kern_return_t mac_error) {
+  UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error);
+  if (mac_error != KERN_SUCCESS)
+    UmaHistogramSparse("SharedMemory.CreateMacError", mac_error);
+}
+
+}  // namespace
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    mac::ScopedMachSendRight handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!handle.is_valid())
+    return {};
+
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK(
+      CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
+
+  return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+// static
+PlatformSharedMemoryRegion
+PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    Mode mode) {
+  CHECK(mode == Mode::kReadOnly || mode == Mode::kUnsafe);
+  CHECK(handle.GetType() == SharedMemoryHandle::MACH);
+  if (!handle.IsValid())
+    return {};
+
+  return Take(base::mac::ScopedMachSendRight(handle.GetMemoryObject()), mode,
+              handle.GetSize(), handle.GetGUID());
+}
+
+mach_port_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.is_valid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid())
+    return {};
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  // Increment the ref count.
+  kern_return_t kr = mach_port_mod_refs(mach_task_self(), handle_.get(),
+                                        MACH_PORT_RIGHT_SEND, 1);
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(ERROR, kr) << "mach_port_mod_refs";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion(mac::ScopedMachSendRight(handle_.get()),
+                                    mode_, size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  return ConvertToReadOnly(nullptr);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly(void* mapped_addr) {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  mac::ScopedMachSendRight handle_copy(handle_.release());
+
+  void* temp_addr = mapped_addr;
+  mac::ScopedMachVM scoped_memory;
+  if (!temp_addr) {
+    // Intentionally lower current prot and max prot to |VM_PROT_READ|.
+    kern_return_t kr = mach_vm_map(
+        mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
+        size_, 0, VM_FLAGS_ANYWHERE, handle_copy.get(), 0, FALSE, VM_PROT_READ,
+        VM_PROT_READ, VM_INHERIT_NONE);
+    if (kr != KERN_SUCCESS) {
+      MACH_DLOG(ERROR, kr) << "mach_vm_map";
+      return false;
+    }
+    scoped_memory.reset(reinterpret_cast<vm_address_t>(temp_addr),
+                        mach_vm_round_page(size_));
+  }
+
+  // Make new memory object.
+  memory_object_size_t allocation_size = size_;
+  mac::ScopedMachSendRight named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(), &allocation_size,
+      reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
+      named_right.receive(), MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
+    return false;
+  }
+  DCHECK_GE(allocation_size, size_);
+
+  handle_ = std::move(named_right);
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to unsafe";
+
+  mode_ = Mode::kUnsafe;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
+                                               size_t size,
+                                               void** memory,
+                                               size_t* mapped_size) const {
+  bool write_allowed = mode_ != Mode::kReadOnly;
+  vm_prot_t vm_prot_write = write_allowed ? VM_PROT_WRITE : 0;
+  kern_return_t kr = mach_vm_map(
+      mach_task_self(),
+      reinterpret_cast<mach_vm_address_t*>(memory),  // Output parameter
+      size,
+      0,  // Alignment mask
+      VM_FLAGS_ANYWHERE, handle_.get(), offset,
+      FALSE,                         // Copy
+      VM_PROT_READ | vm_prot_write,  // Current protection
+      VM_PROT_READ | vm_prot_write,  // Maximum protection
+      VM_INHERIT_NONE);
+  if (kr != KERN_SUCCESS) {
+    MACH_DLOG(ERROR, kr) << "mach_vm_map";
+    return false;
+  }
+
+  *mapped_size = size;
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+  if (size == 0) {
+    LogCreateError(CreateError::SIZE_ZERO, KERN_SUCCESS);
+    return {};
+  }
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max())) {
+    LogCreateError(CreateError::SIZE_TOO_LARGE, KERN_SUCCESS);
+    return {};
+  }
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  mach_vm_size_t vm_size = size;
+  mac::ScopedMachSendRight named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(), &vm_size,
+      0,  // Address.
+      MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
+      named_right.receive(),
+      MACH_PORT_NULL);  // Parent handle.
+  if (kr != KERN_SUCCESS)
+    LogCreateError(CreateError::CREATE_FILE_MAPPING_FAILURE, kr);
+  // Crash as soon as shm allocation fails to debug the issue
+  // https://crbug.com/872237.
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_make_memory_entry_64";
+  DCHECK_GE(vm_size, size);
+
+  LogCreateError(CreateError::SUCCESS, KERN_SUCCESS);
+  return PlatformSharedMemoryRegion(std::move(named_right), mode, size,
+                                    UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformHandle handle,
+    Mode mode,
+    size_t size) {
+  mach_vm_address_t temp_addr = 0;
+  kern_return_t kr =
+      mach_vm_map(mach_task_self(), &temp_addr, size, 0, VM_FLAGS_ANYWHERE,
+                  handle, 0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
+                  VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
+  if (kr == KERN_SUCCESS) {
+    kern_return_t kr_deallocate =
+        mach_vm_deallocate(mach_task_self(), temp_addr, size);
+    MACH_DLOG_IF(ERROR, kr_deallocate != KERN_SUCCESS, kr_deallocate)
+        << "mach_vm_deallocate";
+  } else if (kr != KERN_INVALID_RIGHT) {
+    MACH_DLOG(ERROR, kr) << "mach_vm_map";
+    return false;
+  }
+
+  bool is_read_only = kr == KERN_INVALID_RIGHT;
+  bool expected_read_only = mode == Mode::kReadOnly;
+
+  if (is_read_only != expected_read_only) {
+    DLOG(ERROR) << "VM region has a wrong protection mask: it is"
+                << (is_read_only ? " " : " not ") << "read-only but it should"
+                << (expected_read_only ? " " : " not ") << "be";
+    return false;
+  }
+
+  return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    mac::ScopedMachSendRight handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/src/base/memory/platform_shared_memory_region_posix.cc b/src/base/memory/platform_shared_memory_region_posix.cc
new file mode 100644
index 0000000..f2c4ff6
--- /dev/null
+++ b/src/base/memory/platform_shared_memory_region_posix.cc
@@ -0,0 +1,332 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+
+#include "base/files/file_util.h"
+#include "base/threading/thread_restrictions.h"
+#include "build/build_config.h"
+#include "starboard/types.h"
+
+namespace base {
+namespace subtle {
+
+namespace {
+
+struct ScopedPathUnlinkerTraits {
+  static const FilePath* InvalidValue() { return nullptr; }
+
+  static void Free(const FilePath* path) {
+    if (unlink(path->value().c_str()))
+      PLOG(WARNING) << "unlink";
+  }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+using ScopedPathUnlinker =
+    ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
+
+#if !defined(OS_NACL)
+bool CheckFDAccessMode(int fd, int expected_mode) {
+  int fd_status = fcntl(fd, F_GETFL);
+  if (fd_status == -1) {
+    DPLOG(ERROR) << "fcntl(" << fd << ", F_GETFL) failed";
+    return false;
+  }
+
+  int mode = fd_status & O_ACCMODE;
+  if (mode != expected_mode) {
+    DLOG(ERROR) << "Descriptor access mode (" << mode
+                << ") differs from expected (" << expected_mode << ")";
+    return false;
+  }
+
+  return true;
+}
+#endif  // !defined(OS_NACL)
+
+}  // namespace
+
+ScopedFDPair::ScopedFDPair() = default;
+
+ScopedFDPair::ScopedFDPair(ScopedFDPair&&) = default;
+
+ScopedFDPair& ScopedFDPair::operator=(ScopedFDPair&&) = default;
+
+ScopedFDPair::~ScopedFDPair() = default;
+
+ScopedFDPair::ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd)
+    : fd(std::move(in_fd)), readonly_fd(std::move(in_readonly_fd)) {}
+
+FDPair ScopedFDPair::get() const {
+  return {fd.get(), readonly_fd.get()};
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    ScopedFDPair handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!handle.fd.is_valid())
+    return {};
+
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK(
+      CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
+
+  switch (mode) {
+    case Mode::kReadOnly:
+    case Mode::kUnsafe:
+      if (handle.readonly_fd.is_valid()) {
+        handle.readonly_fd.reset();
+        DLOG(WARNING) << "Readonly handle shouldn't be valid for a "
+                         "non-writable memory region; closing";
+      }
+      break;
+    case Mode::kWritable:
+      if (!handle.readonly_fd.is_valid()) {
+        DLOG(ERROR)
+            << "Readonly handle must be valid for writable memory region";
+        return {};
+      }
+      break;
+    default:
+      DLOG(ERROR) << "Invalid permission mode: " << static_cast<int>(mode);
+      return {};
+  }
+
+  return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+// static
+PlatformSharedMemoryRegion
+PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    Mode mode) {
+  CHECK(mode == Mode::kReadOnly || mode == Mode::kUnsafe);
+  if (!handle.IsValid())
+    return {};
+
+  return Take(
+      base::subtle::ScopedFDPair(ScopedFD(handle.GetHandle()), ScopedFD()),
+      mode, handle.GetSize(), handle.GetGUID());
+}
+
+FDPair PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return handle_.get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.fd.is_valid() &&
+         (mode_ == Mode::kWritable ? handle_.readonly_fd.is_valid() : true);
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid())
+    return {};
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  ScopedFD duped_fd(HANDLE_EINTR(dup(handle_.fd.get())));
+  if (!duped_fd.is_valid()) {
+    DPLOG(ERROR) << "dup(" << handle_.fd.get() << ") failed";
+    return {};
+  }
+
+  return PlatformSharedMemoryRegion({std::move(duped_fd), ScopedFD()}, mode_,
+                                    size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  handle_.fd.reset(handle_.readonly_fd.release());
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to unsafe";
+
+  handle_.readonly_fd.reset();
+  mode_ = Mode::kUnsafe;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
+                                               size_t size,
+                                               void** memory,
+                                               size_t* mapped_size) const {
+  bool write_allowed = mode_ != Mode::kReadOnly;
+  *memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
+                 MAP_SHARED, handle_.fd.get(), offset);
+
+  bool mmap_succeeded = *memory && *memory != MAP_FAILED;
+  if (!mmap_succeeded) {
+    DPLOG(ERROR) << "mmap " << handle_.fd.get() << " failed";
+    return false;
+  }
+
+  *mapped_size = size;
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+#if defined(OS_NACL)
+  // Untrusted code can't create descriptors or handles.
+  return {};
+#else
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  // We don't use shm_open() API in order to support the --disable-dev-shm-usage
+  // flag.
+  FilePath directory;
+  if (!GetShmemTempDir(false /* executable */, &directory))
+    return {};
+
+  ScopedFD fd;
+  FilePath path;
+  fd.reset(CreateAndOpenFdForTemporaryFileInDir(directory, &path));
+
+  if (!fd.is_valid()) {
+    PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+    FilePath dir = path.DirName();
+    if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
+      PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
+      if (dir.value() == "/dev/shm") {
+        LOG(FATAL) << "This is frequently caused by incorrect permissions on "
+                   << "/dev/shm.  Try 'sudo chmod 1777 /dev/shm' to fix.";
+      }
+    }
+    return {};
+  }
+
+  // Deleting the file prevents anyone else from mapping it in (making it
+  // private), and prevents the need for cleanup (once the last fd is
+  // closed, it is truly freed).
+  ScopedPathUnlinker path_unlinker(&path);
+
+  ScopedFD readonly_fd;
+  if (mode == Mode::kWritable) {
+    // Also open as readonly so that we can ConvertToReadOnly().
+    readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+    if (!readonly_fd.is_valid()) {
+      DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+      return {};
+    }
+  }
+
+  // Get current size.
+  struct stat stat = {};
+  if (fstat(fd.get(), &stat) != 0)
+    return {};
+  const size_t current_size = stat.st_size;
+  if (current_size != size) {
+    if (HANDLE_EINTR(ftruncate(fd.get(), size)) != 0)
+      return {};
+  }
+
+  if (readonly_fd.is_valid()) {
+    struct stat readonly_stat = {};
+    if (fstat(readonly_fd.get(), &readonly_stat))
+      NOTREACHED();
+
+    if (stat.st_dev != readonly_stat.st_dev ||
+        stat.st_ino != readonly_stat.st_ino) {
+      LOG(ERROR) << "Writable and read-only inodes don't match; bailing";
+      return {};
+    }
+  }
+
+  return PlatformSharedMemoryRegion({std::move(fd), std::move(readonly_fd)},
+                                    mode, size, UnguessableToken::Create());
+#endif  // !defined(OS_NACL)
+}
+
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformHandle handle,
+    Mode mode,
+    size_t size) {
+#if !defined(OS_NACL)
+  if (!CheckFDAccessMode(handle.fd,
+                         mode == Mode::kReadOnly ? O_RDONLY : O_RDWR)) {
+    return false;
+  }
+
+  if (mode == Mode::kWritable)
+    return CheckFDAccessMode(handle.readonly_fd, O_RDONLY);
+
+  // The second descriptor must be invalid in kReadOnly and kUnsafe modes.
+  if (handle.readonly_fd != -1) {
+    DLOG(ERROR) << "The second descriptor must be invalid";
+    return false;
+  }
+
+  return true;
+#else
+  // fcntl(_, F_GETFL) is not implemented on NaCl.
+  void* temp_memory = nullptr;
+  temp_memory =
+      mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, handle.fd, 0);
+
+  bool mmap_succeeded = temp_memory && temp_memory != MAP_FAILED;
+  if (mmap_succeeded)
+    munmap(temp_memory, size);
+
+  bool is_read_only = !mmap_succeeded;
+  bool expected_read_only = mode == Mode::kReadOnly;
+
+  if (is_read_only != expected_read_only) {
+    DLOG(ERROR) << "Descriptor has a wrong access mode: it is"
+                << (is_read_only ? " " : " not ") << "read-only but it should"
+                << (expected_read_only ? " " : " not ") << "be";
+    return false;
+  }
+
+  return true;
+#endif  // !defined(OS_NACL)
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    ScopedFDPair handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/src/base/memory/platform_shared_memory_region_unittest.cc b/src/base/memory/platform_shared_memory_region_unittest.cc
new file mode 100644
index 0000000..7c567cb
--- /dev/null
+++ b/src/base/memory/platform_shared_memory_region_unittest.cc
@@ -0,0 +1,483 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/process/process_metrics.h"
+#include "base/sys_info.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_shared_memory_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#include <sys/mman.h>
+#elif defined(OS_POSIX) && !defined(OS_IOS)
+#include <sys/mman.h>
+#include "base/debug/proc_maps_linux.h"
+#elif defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_FUCHSIA)
+#include <lib/zx/object.h>
+#include <lib/zx/process.h>
+#include "base/fuchsia/fuchsia_logging.h"
+#include "starboard/types.h"
+#endif
+
+namespace base {
+namespace subtle {
+
+const size_t kRegionSize = 1024;
+
+class PlatformSharedMemoryRegionTest : public ::testing::Test {};
+
+// Tests that a default constructed region is invalid and produces invalid
+// mappings.
+TEST_F(PlatformSharedMemoryRegionTest, DefaultConstructedRegionIsInvalid) {
+  PlatformSharedMemoryRegion region;
+  EXPECT_FALSE(region.IsValid());
+  WritableSharedMemoryMapping mapping = MapForTesting(&region);
+  EXPECT_FALSE(mapping.IsValid());
+  PlatformSharedMemoryRegion duplicate = region.Duplicate();
+  EXPECT_FALSE(duplicate.IsValid());
+  EXPECT_FALSE(region.ConvertToReadOnly());
+}
+
+// Tests that creating a region of 0 size returns an invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, CreateRegionOfZeroSizeIsInvalid) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(0);
+  EXPECT_FALSE(region.IsValid());
+
+  PlatformSharedMemoryRegion region2 =
+      PlatformSharedMemoryRegion::CreateUnsafe(0);
+  EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that creating a region of size bigger than the integer max value
+// returns an invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, CreateTooLargeRegionIsInvalid) {
+  size_t too_large_region_size =
+      static_cast<size_t>(std::numeric_limits<int>::max()) + 1;
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(too_large_region_size);
+  EXPECT_FALSE(region.IsValid());
+
+  PlatformSharedMemoryRegion region2 =
+      PlatformSharedMemoryRegion::CreateUnsafe(too_large_region_size);
+  EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that regions consistently report their size as the size requested at
+// creation time even if their allocation size is larger due to platform
+// constraints.
+TEST_F(PlatformSharedMemoryRegionTest, ReportedSizeIsRequestedSize) {
+  constexpr size_t kTestSizes[] = {1, 2, 3, 64, 4096, 1024 * 1024};
+  for (size_t size : kTestSizes) {
+    PlatformSharedMemoryRegion region =
+        PlatformSharedMemoryRegion::CreateWritable(size);
+    EXPECT_EQ(region.GetSize(), size);
+
+    region.ConvertToReadOnly();
+    EXPECT_EQ(region.GetSize(), size);
+  }
+}
+
+// Tests that a writable region can be converted to read-only.
+TEST_F(PlatformSharedMemoryRegionTest, ConvertWritableToReadOnly) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kWritable);
+  ASSERT_TRUE(region.ConvertToReadOnly());
+  EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kReadOnly);
+}
+
+// Tests that a writable region can be converted to unsafe.
+TEST_F(PlatformSharedMemoryRegionTest, ConvertWritableToUnsafe) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kWritable);
+  ASSERT_TRUE(region.ConvertToUnsafe());
+  EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kUnsafe);
+}
+
+// Tests that the platform-specific handle converted to read-only cannot be used
+// to perform a writable mapping with low-level system APIs like mmap().
+TEST_F(PlatformSharedMemoryRegionTest, ReadOnlyHandleIsNotWritable) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_TRUE(region.ConvertToReadOnly());
+  EXPECT_EQ(region.GetMode(), PlatformSharedMemoryRegion::Mode::kReadOnly);
+  EXPECT_TRUE(
+      CheckReadOnlyPlatformSharedMemoryRegionForTesting(std::move(region)));
+}
+
+// Tests that the PassPlatformHandle() call invalidates the region.
+TEST_F(PlatformSharedMemoryRegionTest, InvalidAfterPass) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ignore_result(region.PassPlatformHandle());
+  EXPECT_FALSE(region.IsValid());
+}
+
+// Tests that the region is invalid after move.
+TEST_F(PlatformSharedMemoryRegionTest, InvalidAfterMove) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  PlatformSharedMemoryRegion moved_region = std::move(region);
+  EXPECT_FALSE(region.IsValid());
+  EXPECT_TRUE(moved_region.IsValid());
+}
+
+// Tests that calling Take() with the size parameter equal to zero returns an
+// invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, TakeRegionOfZeroSizeIsInvalid) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  PlatformSharedMemoryRegion region2 = PlatformSharedMemoryRegion::Take(
+      region.PassPlatformHandle(), region.GetMode(), 0, region.GetGUID());
+  EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that calling Take() with the size parameter bigger than the integer max
+// value returns an invalid region.
+TEST_F(PlatformSharedMemoryRegionTest, TakeTooLargeRegionIsInvalid) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  PlatformSharedMemoryRegion region2 = PlatformSharedMemoryRegion::Take(
+      region.PassPlatformHandle(), region.GetMode(),
+      static_cast<size_t>(std::numeric_limits<int>::max()) + 1,
+      region.GetGUID());
+  EXPECT_FALSE(region2.IsValid());
+}
+
+// Tests that mapping zero bytes fails.
+TEST_F(PlatformSharedMemoryRegionTest, MapAtZeroBytesTest) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  WritableSharedMemoryMapping mapping = MapAtForTesting(&region, 0, 0);
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+// Tests that mapping bytes out of the region limits fails.
+TEST_F(PlatformSharedMemoryRegionTest, MapAtOutOfTheRegionLimitsTest) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  WritableSharedMemoryMapping mapping =
+      MapAtForTesting(&region, 0, region.GetSize() + 1);
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+// Tests that mapping with a size and offset causing overflow fails.
+TEST_F(PlatformSharedMemoryRegionTest, MapAtWithOverflowTest) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(
+          SysInfo::VMAllocationGranularity() * 2);
+  ASSERT_TRUE(region.IsValid());
+  size_t size = std::numeric_limits<size_t>::max();
+  size_t offset = SysInfo::VMAllocationGranularity();
+  // |size| + |offset| should be below the region size due to overflow but
+  // mapping a region with these parameters should be invalid.
+  EXPECT_LT(size + offset, region.GetSize());
+  WritableSharedMemoryMapping mapping = MapAtForTesting(&region, offset, size);
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+#if defined(OS_POSIX) && !defined(OS_ANDROID) && \
+    (!defined(OS_MACOSX) || defined(OS_IOS))
+// Tests that the second handle is closed after a conversion to read-only on
+// POSIX.
+TEST_F(PlatformSharedMemoryRegionTest,
+       ConvertToReadOnlyInvalidatesSecondHandle) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ASSERT_TRUE(region.ConvertToReadOnly());
+  FDPair fds = region.GetPlatformHandle();
+  EXPECT_LT(fds.readonly_fd, 0);
+}
+
+// Tests that the second handle is closed after a conversion to unsafe on
+// POSIX.
+TEST_F(PlatformSharedMemoryRegionTest, ConvertToUnsafeInvalidatesSecondHandle) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ASSERT_TRUE(region.ConvertToUnsafe());
+  FDPair fds = region.GetPlatformHandle();
+  EXPECT_LT(fds.readonly_fd, 0);
+}
+#endif
+
+void CheckReadOnlyMapProtection(void* addr) {
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  vm_region_basic_info_64 basic_info;
+  mach_vm_size_t dummy_size = 0;
+  void* temp_addr = addr;
+  MachVMRegionResult result = GetBasicInfo(
+      mach_task_self(), &dummy_size,
+      reinterpret_cast<mach_vm_address_t*>(&temp_addr), &basic_info);
+  ASSERT_EQ(result, MachVMRegionResult::Success);
+  EXPECT_EQ(basic_info.protection & VM_PROT_ALL, VM_PROT_READ);
+  EXPECT_EQ(basic_info.max_protection & VM_PROT_ALL, VM_PROT_READ);
+#elif defined(OS_POSIX) && !defined(OS_IOS)
+  std::string proc_maps;
+  ASSERT_TRUE(base::debug::ReadProcMaps(&proc_maps));
+  std::vector<base::debug::MappedMemoryRegion> regions;
+  ASSERT_TRUE(base::debug::ParseProcMaps(proc_maps, &regions));
+  auto it =
+      std::find_if(regions.begin(), regions.end(),
+                   [addr](const base::debug::MappedMemoryRegion& region) {
+                     return region.start == reinterpret_cast<uintptr_t>(addr);
+                   });
+  ASSERT_TRUE(it != regions.end());
+  // PROT_READ may imply PROT_EXEC on some architectures, so just check that
+  // permissions don't contain PROT_WRITE bit.
+  EXPECT_FALSE(it->permissions & base::debug::MappedMemoryRegion::WRITE);
+#elif defined(OS_WIN)
+  MEMORY_BASIC_INFORMATION memory_info;
+  size_t result = VirtualQueryEx(GetCurrentProcess(), addr, &memory_info,
+                                 sizeof(memory_info));
+
+  ASSERT_GT(result, 0ULL) << "Failed executing VirtualQueryEx "
+                          << logging::SystemErrorCodeToString(
+                                 logging::GetLastSystemErrorCode());
+  EXPECT_EQ(memory_info.AllocationProtect, static_cast<DWORD>(PAGE_READONLY));
+  EXPECT_EQ(memory_info.Protect, static_cast<DWORD>(PAGE_READONLY));
+#elif defined(OS_FUCHSIA)
+// TODO(alexilin): We cannot call zx_object_get_info ZX_INFO_PROCESS_MAPS in
+// this process. Consider to create an auxiliary process that will read the
+// test process maps.
+#endif
+}
+
+bool TryToRestoreWritablePermissions(void* addr, size_t len) {
+#if defined(OS_POSIX) && !defined(OS_IOS)
+  int result = mprotect(addr, len, PROT_READ | PROT_WRITE);
+  return result != -1;
+#elif defined(OS_WIN)
+  DWORD old_protection;
+  return VirtualProtect(addr, len, PAGE_READWRITE, &old_protection);
+#elif defined(OS_FUCHSIA)
+  zx_status_t status = zx::vmar::root_self()->protect(
+      reinterpret_cast<uintptr_t>(addr), len,
+      ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE);
+  return status == ZX_OK;
+#else
+  return false;
+#endif
+}
+
+// Tests that protection bits are set correctly for read-only region.
+TEST_F(PlatformSharedMemoryRegionTest, MappingProtectionSetCorrectly) {
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ASSERT_TRUE(region.ConvertToReadOnly());
+  WritableSharedMemoryMapping ro_mapping = MapForTesting(&region);
+  ASSERT_TRUE(ro_mapping.IsValid());
+  CheckReadOnlyMapProtection(ro_mapping.memory());
+
+  EXPECT_FALSE(TryToRestoreWritablePermissions(ro_mapping.memory(),
+                                               ro_mapping.mapped_size()));
+  CheckReadOnlyMapProtection(ro_mapping.memory());
+}
+
+// Tests that platform handle permissions are checked correctly.
+TEST_F(PlatformSharedMemoryRegionTest,
+       CheckPlatformHandlePermissionsCorrespondToMode) {
+  using Mode = PlatformSharedMemoryRegion::Mode;
+  auto check = [](const PlatformSharedMemoryRegion& region,
+                  PlatformSharedMemoryRegion::Mode mode) {
+    return PlatformSharedMemoryRegion::
+        CheckPlatformHandlePermissionsCorrespondToMode(
+            region.GetPlatformHandle(), mode, region.GetSize());
+  };
+
+  // Check kWritable region.
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_TRUE(check(region, Mode::kWritable));
+  EXPECT_FALSE(check(region, Mode::kReadOnly));
+
+  // Check kReadOnly region.
+  ASSERT_TRUE(region.ConvertToReadOnly());
+  EXPECT_TRUE(check(region, Mode::kReadOnly));
+  EXPECT_FALSE(check(region, Mode::kWritable));
+  EXPECT_FALSE(check(region, Mode::kUnsafe));
+
+  // Check kUnsafe region.
+  PlatformSharedMemoryRegion region2 =
+      PlatformSharedMemoryRegion::CreateUnsafe(kRegionSize);
+  ASSERT_TRUE(region2.IsValid());
+  EXPECT_TRUE(check(region2, Mode::kUnsafe));
+  EXPECT_FALSE(check(region2, Mode::kReadOnly));
+}
+
+// Tests that it's impossible to create read-only platform shared memory region.
+TEST_F(PlatformSharedMemoryRegionTest, CreateReadOnlyRegionDeathTest) {
+#ifdef OFFICIAL_BUILD
+  // The official build does not print the reason a CHECK failed.
+  const char kErrorRegex[] = "";
+#else
+  const char kErrorRegex[] =
+      "Creating a region in read-only mode will lead to this region being "
+      "non-modifiable";
+#endif
+  EXPECT_DEATH_IF_SUPPORTED(
+      PlatformSharedMemoryRegion::Create(
+          PlatformSharedMemoryRegion::Mode::kReadOnly, kRegionSize),
+      kErrorRegex);
+}
+
+// Tests that it's prohibited to duplicate a writable region.
+TEST_F(PlatformSharedMemoryRegionTest, DuplicateWritableRegionDeathTest) {
+#ifdef OFFICIAL_BUILD
+  const char kErrorRegex[] = "";
+#else
+  const char kErrorRegex[] =
+      "Duplicating a writable shared memory region is prohibited";
+#endif
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_DEATH_IF_SUPPORTED(region.Duplicate(), kErrorRegex);
+}
+
+// Tests that it's prohibited to convert an unsafe region to read-only.
+TEST_F(PlatformSharedMemoryRegionTest, UnsafeRegionConvertToReadOnlyDeathTest) {
+#ifdef OFFICIAL_BUILD
+  const char kErrorRegex[] = "";
+#else
+  const char kErrorRegex[] =
+      "Only writable shared memory region can be converted to read-only";
+#endif
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateUnsafe(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_DEATH_IF_SUPPORTED(region.ConvertToReadOnly(), kErrorRegex);
+}
+
+// Tests that it's prohibited to convert a read-only region to read-only.
+TEST_F(PlatformSharedMemoryRegionTest,
+       ReadOnlyRegionConvertToReadOnlyDeathTest) {
+#ifdef OFFICIAL_BUILD
+  const char kErrorRegex[] = "";
+#else
+  const char kErrorRegex[] =
+      "Only writable shared memory region can be converted to read-only";
+#endif
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_TRUE(region.ConvertToReadOnly());
+  EXPECT_DEATH_IF_SUPPORTED(region.ConvertToReadOnly(), kErrorRegex);
+}
+
+// Tests that it's prohibited to convert a read-only region to unsafe.
+TEST_F(PlatformSharedMemoryRegionTest, ReadOnlyRegionConvertToUnsafeDeathTest) {
+#ifdef OFFICIAL_BUILD
+  const char kErrorRegex[] = "";
+#else
+  const char kErrorRegex[] =
+      "Only writable shared memory region can be converted to unsafe";
+#endif
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ASSERT_TRUE(region.ConvertToReadOnly());
+  EXPECT_DEATH_IF_SUPPORTED(region.ConvertToUnsafe(), kErrorRegex);
+}
+
+// Tests that it's prohibited to convert an unsafe region to unsafe.
+TEST_F(PlatformSharedMemoryRegionTest, UnsafeRegionConvertToUnsafeDeathTest) {
+#ifdef OFFICIAL_BUILD
+  const char kErrorRegex[] = "";
+#else
+  const char kErrorRegex[] =
+      "Only writable shared memory region can be converted to unsafe";
+#endif
+  PlatformSharedMemoryRegion region =
+      PlatformSharedMemoryRegion::CreateUnsafe(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  EXPECT_DEATH_IF_SUPPORTED(region.ConvertToUnsafe(), kErrorRegex);
+}
+
+// Check that taking from a SharedMemoryHandle works.
+TEST_F(PlatformSharedMemoryRegionTest, TakeFromSharedMemoryHandle) {
+  SharedMemory shm;
+  auto region = PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+      shm.TakeHandle(), PlatformSharedMemoryRegion::Mode::kUnsafe);
+  ASSERT_FALSE(region.IsValid());
+
+  shm.CreateAndMapAnonymous(10);
+  region = PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+      shm.TakeHandle(), PlatformSharedMemoryRegion::Mode::kUnsafe);
+  ASSERT_TRUE(region.IsValid());
+
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
+  // Note that it's not possible on all platforms for TakeFromSharedMemoryHandle
+  // to conveniently check if the SharedMemoryHandle is readonly or
+  // not. Therefore it is actually possible to get an kUnsafe
+  // PlatformSharedMemoryRegion from a readonly handle on some platforms.
+  SharedMemoryCreateOptions options;
+  options.size = 10;
+  options.share_read_only = true;
+  shm.Create(options);
+  EXPECT_DEATH_IF_SUPPORTED(
+      PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+          shm.GetReadOnlyHandle(), PlatformSharedMemoryRegion::Mode::kUnsafe),
+      "");
+#endif  // !(defined(OS_MACOSX) && !defined(OS_IOS))
+}
+
+// Check that taking from a readonly SharedMemoryHandle works.
+TEST_F(PlatformSharedMemoryRegionTest, TakeFromReadOnlySharedMemoryHandle) {
+  SharedMemory shm;
+  // Note that getting a read-only handle from an unmapped SharedMemory will
+  // fail, so the invalid region case cannot be tested.
+  SharedMemoryCreateOptions options;
+  options.size = 10;
+  options.share_read_only = true;
+  shm.Create(options);
+  auto readonly_handle = shm.GetReadOnlyHandle();
+#if defined(OS_ANDROID)
+  readonly_handle.SetRegionReadOnly();
+#endif
+  auto region = PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+      readonly_handle, PlatformSharedMemoryRegion::Mode::kReadOnly);
+  ASSERT_TRUE(region.IsValid());
+}
+
+// Check that taking from a SharedMemoryHandle in writable mode fails.
+TEST_F(PlatformSharedMemoryRegionTest, WritableTakeFromSharedMemoryHandle) {
+  SharedMemory shm;
+  EXPECT_DEATH_IF_SUPPORTED(
+      PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+          shm.TakeHandle(), PlatformSharedMemoryRegion::Mode::kWritable),
+      "");
+
+  shm.CreateAndMapAnonymous(10);
+  EXPECT_DEATH_IF_SUPPORTED(
+      PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+          shm.TakeHandle(), PlatformSharedMemoryRegion::Mode::kWritable),
+      "");
+}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/src/base/memory/platform_shared_memory_region_win.cc b/src/base/memory/platform_shared_memory_region_win.cc
new file mode 100644
index 0000000..691ce87
--- /dev/null
+++ b/src/base/memory/platform_shared_memory_region_win.cc
@@ -0,0 +1,351 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include <aclapi.h>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/bits.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/win/windows_version.h"
+#include "starboard/types.h"
+
+namespace base {
+namespace subtle {
+
+namespace {
+
+// Emits UMA metrics about encountered errors. Pass zero (0) for |winerror|
+// if there is no associated Windows error.
+void LogError(PlatformSharedMemoryRegion::CreateError error, DWORD winerror) {
+  UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error);
+  static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
+  if (winerror != ERROR_SUCCESS)
+    UmaHistogramSparse("SharedMemory.CreateWinError", winerror);
+}
+
+typedef enum _SECTION_INFORMATION_CLASS {
+  SectionBasicInformation,
+} SECTION_INFORMATION_CLASS;
+
+typedef struct _SECTION_BASIC_INFORMATION {
+  PVOID BaseAddress;
+  ULONG Attributes;
+  LARGE_INTEGER Size;
+} SECTION_BASIC_INFORMATION, *PSECTION_BASIC_INFORMATION;
+
+typedef ULONG(__stdcall* NtQuerySectionType)(
+    HANDLE SectionHandle,
+    SECTION_INFORMATION_CLASS SectionInformationClass,
+    PVOID SectionInformation,
+    ULONG SectionInformationLength,
+    PULONG ResultLength);
+
+// Returns the length of the memory section starting at the supplied address.
+size_t GetMemorySectionSize(void* address) {
+  MEMORY_BASIC_INFORMATION memory_info;
+  if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
+    return 0;
+  return memory_info.RegionSize -
+         (static_cast<char*>(address) -
+          static_cast<char*>(memory_info.AllocationBase));
+}
+
+// Checks if the section object is safe to map. At the moment this just means
+// it's not an image section.
+bool IsSectionSafeToMap(HANDLE handle) {
+  static NtQuerySectionType nt_query_section_func =
+      reinterpret_cast<NtQuerySectionType>(
+          ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"), "NtQuerySection"));
+  DCHECK(nt_query_section_func);
+
+  // The handle must have SECTION_QUERY access for this to succeed.
+  SECTION_BASIC_INFORMATION basic_information = {};
+  ULONG status =
+      nt_query_section_func(handle, SectionBasicInformation, &basic_information,
+                            sizeof(basic_information), nullptr);
+  if (status)
+    return false;
+  return (basic_information.Attributes & SEC_IMAGE) != SEC_IMAGE;
+}
+
+// Returns a HANDLE on success and |nullptr| on failure.
+// This function is similar to CreateFileMapping, but removes the permissions
+// WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE.
+//
+// A newly created file mapping has two sets of permissions. It has access
+// control permissions (WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE) and
+// file permissions (FILE_MAP_READ, FILE_MAP_WRITE, etc.). The Chrome sandbox
+// prevents HANDLEs with the WRITE_DAC permission from being duplicated into
+// unprivileged processes.
+//
+// In order to remove the access control permissions, after being created the
+// handle is duplicated with only the file access permissions.
+HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
+                                               size_t rounded_size,
+                                               LPCWSTR name) {
+  HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
+                               static_cast<DWORD>(rounded_size), name);
+  if (!h) {
+    LogError(
+        PlatformSharedMemoryRegion::CreateError::CREATE_FILE_MAPPING_FAILURE,
+        GetLastError());
+    return nullptr;
+  }
+
+  HANDLE h2;
+  ProcessHandle process = GetCurrentProcess();
+  BOOL success = ::DuplicateHandle(
+      process, h, process, &h2, FILE_MAP_READ | FILE_MAP_WRITE | SECTION_QUERY,
+      FALSE, 0);
+  BOOL rv = ::CloseHandle(h);
+  DCHECK(rv);
+
+  if (!success) {
+    LogError(
+        PlatformSharedMemoryRegion::CreateError::REDUCE_PERMISSIONS_FAILURE,
+        GetLastError());
+    return nullptr;
+  }
+
+  return h2;
+}
+
+}  // namespace
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+    win::ScopedHandle handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid) {
+  if (!handle.IsValid())
+    return {};
+
+  if (size == 0)
+    return {};
+
+  if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return {};
+
+  if (!IsSectionSafeToMap(handle.Get()))
+    return {};
+
+  CHECK(
+      CheckPlatformHandlePermissionsCorrespondToMode(handle.Get(), mode, size));
+
+  return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
+}
+
+// static
+PlatformSharedMemoryRegion
+PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    Mode mode) {
+  CHECK(mode == Mode::kReadOnly || mode == Mode::kUnsafe);
+  if (!handle.IsValid())
+    return {};
+
+  return Take(base::win::ScopedHandle(handle.GetHandle()), mode,
+              handle.GetSize(), handle.GetGUID());
+}
+
+HANDLE PlatformSharedMemoryRegion::GetPlatformHandle() const {
+  return handle_.Get();
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+  return handle_.IsValid();
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+  if (!IsValid())
+    return {};
+
+  CHECK_NE(mode_, Mode::kWritable)
+      << "Duplicating a writable shared memory region is prohibited";
+
+  HANDLE duped_handle;
+  ProcessHandle process = GetCurrentProcess();
+  BOOL success =
+      ::DuplicateHandle(process, handle_.Get(), process, &duped_handle, 0,
+                        FALSE, DUPLICATE_SAME_ACCESS);
+  if (!success)
+    return {};
+
+  return PlatformSharedMemoryRegion(win::ScopedHandle(duped_handle), mode_,
+                                    size_, guid_);
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to read-only";
+
+  win::ScopedHandle handle_copy(handle_.Take());
+
+  HANDLE duped_handle;
+  ProcessHandle process = GetCurrentProcess();
+  BOOL success =
+      ::DuplicateHandle(process, handle_copy.Get(), process, &duped_handle,
+                        FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
+  if (!success)
+    return false;
+
+  handle_.Set(duped_handle);
+  mode_ = Mode::kReadOnly;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+  if (!IsValid())
+    return false;
+
+  CHECK_EQ(mode_, Mode::kWritable)
+      << "Only writable shared memory region can be converted to unsafe";
+
+  mode_ = Mode::kUnsafe;
+  return true;
+}
+
+bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
+                                               size_t size,
+                                               void** memory,
+                                               size_t* mapped_size) const {
+  bool write_allowed = mode_ != Mode::kReadOnly;
+  // Try to map the shared memory. On the first failure, release any reserved
+  // address space for a single entry.
+  for (int i = 0; i < 2; ++i) {
+    *memory = MapViewOfFile(
+        handle_.Get(), FILE_MAP_READ | (write_allowed ? FILE_MAP_WRITE : 0),
+        static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), size);
+    if (*memory)
+      break;
+    ReleaseReservation();
+  }
+  if (!*memory) {
+    DPLOG(ERROR) << "Failed executing MapViewOfFile";
+    return false;
+  }
+
+  *mapped_size = GetMemorySectionSize(*memory);
+  return true;
+}
+
+// static
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+                                                              size_t size) {
+  // TODO(crbug.com/210609): NaCl forces us to round up 64k here, wasting 32k
+  // per mapping on average.
+  static const size_t kSectionSize = 65536;
+  if (size == 0) {
+    LogError(CreateError::SIZE_ZERO, 0);
+    return {};
+  }
+
+  size_t rounded_size = bits::Align(size, kSectionSize);
+  if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
+    LogError(CreateError::SIZE_TOO_LARGE, 0);
+    return {};
+  }
+
+  CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
+                                     "lead to this region being non-modifiable";
+
+  // Add an empty DACL to enforce anonymous read-only sections.
+  ACL dacl;
+  SECURITY_DESCRIPTOR sd;
+  if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
+    LogError(CreateError::INITIALIZE_ACL_FAILURE, GetLastError());
+    return {};
+  }
+  if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
+    LogError(CreateError::INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
+    return {};
+  }
+  if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
+    LogError(CreateError::SET_SECURITY_DESC_FAILURE, GetLastError());
+    return {};
+  }
+
+  string16 name;
+  if (base::win::GetVersion() < base::win::VERSION_WIN8_1) {
+    // Windows < 8.1 ignores DACLs on certain unnamed objects (like shared
+    // sections). So, we generate a random name when we need to enforce
+    // read-only.
+    uint64_t rand_values[4];
+    RandBytes(&rand_values, sizeof(rand_values));
+    name = StringPrintf(L"CrSharedMem_%016llx%016llx%016llx%016llx",
+                        rand_values[0], rand_values[1], rand_values[2],
+                        rand_values[3]);
+    DCHECK(!name.empty());
+  }
+
+  SECURITY_ATTRIBUTES sa = {sizeof(sa), &sd, FALSE};
+  // Ask for the file mapping with reduced permisions to avoid passing the
+  // access control permissions granted by default into unpriviledged process.
+  HANDLE h = CreateFileMappingWithReducedPermissions(
+      &sa, rounded_size, name.empty() ? nullptr : name.c_str());
+  if (h == nullptr) {
+    // The error is logged within CreateFileMappingWithReducedPermissions().
+    return {};
+  }
+
+  win::ScopedHandle scoped_h(h);
+  // Check if the shared memory pre-exists.
+  if (GetLastError() == ERROR_ALREADY_EXISTS) {
+    LogError(CreateError::ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
+    return {};
+  }
+
+  LogError(CreateError::SUCCESS, ERROR_SUCCESS);
+  return PlatformSharedMemoryRegion(std::move(scoped_h), mode, size,
+                                    UnguessableToken::Create());
+}
+
+// static
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+    PlatformHandle handle,
+    Mode mode,
+    size_t size) {
+  // Call ::DuplicateHandle() with FILE_MAP_WRITE as a desired access to check
+  // if the |handle| has a write access.
+  ProcessHandle process = GetCurrentProcess();
+  HANDLE duped_handle;
+  BOOL success = ::DuplicateHandle(process, handle, process, &duped_handle,
+                                   FILE_MAP_WRITE, FALSE, 0);
+  if (success) {
+    BOOL rv = ::CloseHandle(duped_handle);
+    DCHECK(rv);
+  }
+
+  bool is_read_only = !success;
+  bool expected_read_only = mode == Mode::kReadOnly;
+
+  if (is_read_only != expected_read_only) {
+    DLOG(ERROR) << "File mapping handle has wrong access rights: it is"
+                << (is_read_only ? " " : " not ") << "read-only but it should"
+                << (expected_read_only ? " " : " not ") << "be";
+    return false;
+  }
+
+  return true;
+}
+
+PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
+    win::ScopedHandle handle,
+    Mode mode,
+    size_t size,
+    const UnguessableToken& guid)
+    : handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
+
+}  // namespace subtle
+}  // namespace base
diff --git a/src/base/memory/protected_memory.cc b/src/base/memory/protected_memory.cc
new file mode 100644
index 0000000..157a677
--- /dev/null
+++ b/src/base/memory/protected_memory.cc
@@ -0,0 +1,17 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+#if !defined(COMPONENT_BUILD)
+PROTECTED_MEMORY_SECTION int AutoWritableMemory::writers = 0;
+#endif  // !defined(COMPONENT_BUILD)
+
+base::LazyInstance<Lock>::Leaky AutoWritableMemory::writers_lock =
+    LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace base
diff --git a/src/base/memory/protected_memory.h b/src/base/memory/protected_memory.h
new file mode 100644
index 0000000..3cb2ec3
--- /dev/null
+++ b/src/base/memory/protected_memory.h
@@ -0,0 +1,276 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Protected memory is memory holding security-sensitive data intended to be
+// left read-only for the majority of its lifetime to avoid being overwritten
+// by attackers. ProtectedMemory is a simple wrapper around platform-specific
+// APIs to set memory read-write and read-only when required. Protected memory
+// should be set read-write for the minimum amount of time required.
+
+// Normally mutable variables are held in read-write memory and constant data
+// is held in read-only memory to ensure it is not accidentally overwritten.
+// In some cases we want to hold mutable variables in read-only memory, except
+// when they are being written to, to ensure that they are not tampered with.
+//
+// ProtectedMemory is a container class intended to hold a single variable in
+// read-only memory, except when explicitly set read-write. The variable can be
+// set read-write by creating a scoped AutoWritableMemory object by calling
+// AutoWritableMemory::Create(), the memory stays writable until the returned
+// object goes out of scope and is destructed. The wrapped variable can be
+// accessed using operator* and operator->.
+//
+// Instances of ProtectedMemory must be declared in the PROTECTED_MEMORY_SECTION
+// and as global variables. Because protected memory variables are globals, the
+// the same rules apply disallowing non-trivial constructors and destructors.
+// Global definitions are required to avoid the linker placing statics in
+// inlinable functions into a comdat section and setting the protected memory
+// section read-write when they are merged.
+//
+// EXAMPLE:
+//
+//  struct Items { void* item1; };
+//  static PROTECTED_MEMORY_SECTION base::ProtectedMemory<Items> items;
+//  void InitializeItems() {
+//    // Explicitly set items read-write before writing to it.
+//    auto writer = base::AutoWritableMemory::Create(items);
+//    items->item1 = /* ... */;
+//    assert(items->item1 != nullptr);
+//    // items is set back to read-only on the destruction of writer
+//  }
+//
+//  using FnPtr = void (*)(void);
+//  PROTECTED_MEMORY_SECTION base::ProtectedMemory<FnPtr> fnPtr;
+//  FnPtr ResolveFnPtr(void) {
+//    // The Initializer nested class is a helper class for creating a static
+//    // initializer for a ProtectedMemory variable. It implicitly sets the
+//    // variable read-write during initialization.
+//    static base::ProtectedMemory<FnPtr>::Initializer I(&fnPtr,
+//      reinterpret_cast<FnPtr>(dlsym(/* ... */)));
+//    return *fnPtr;
+//  }
+
+#ifndef BASE_MEMORY_PROTECTED_MEMORY_H_
+#define BASE_MEMORY_PROTECTED_MEMORY_H_
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/protected_memory_buildflags.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+
+#define PROTECTED_MEMORY_ENABLED 1
+
+// Linking with lld is required to workaround crbug.com/792777.
+// TODO(vtsyrklevich): Remove once support for gold on Android/CrOs is dropped
+#if defined(OS_LINUX) && BUILDFLAG(USE_LLD)
+// Define the section read-only
+__asm__(".section protected_memory, \"a\"\n\t");
+#define PROTECTED_MEMORY_SECTION __attribute__((section("protected_memory")))
+
+// Explicitly mark these variables hidden so the symbols are local to the
+// currently built component. Otherwise they are created with global (external)
+// linkage and component builds would break because a single pair of these
+// symbols would override the rest.
+__attribute__((visibility("hidden"))) extern char __start_protected_memory;
+__attribute__((visibility("hidden"))) extern char __stop_protected_memory;
+
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+// The segment the section is in is defined read-only with a linker flag in
+// build/config/mac/BUILD.gn
+#define PROTECTED_MEMORY_SECTION \
+  __attribute__((section("PROTECTED_MEMORY, protected_memory")))
+extern char __start_protected_memory __asm(
+    "section$start$PROTECTED_MEMORY$protected_memory");
+extern char __stop_protected_memory __asm(
+    "section$end$PROTECTED_MEMORY$protected_memory");
+
+#elif defined(OS_WIN)
+// Define a read-write prot section. The $a, $mem, and $z 'sub-sections' are
+// merged alphabetically so $a and $z are used to define the start and end of
+// the protected memory section, and $mem holds protected variables.
+// (Note: Sections in Portable Executables are equivalent to segments in other
+// executable formats, so this section is mapped into its own pages.)
+#pragma section("prot$a", read, write)
+#pragma section("prot$mem", read, write)
+#pragma section("prot$z", read, write)
+
+// We want the protected memory section to be read-only, not read-write so we
+// instruct the linker to set the section read-only at link time. We do this
+// at link time instead of compile time, because defining the prot section
+// read-only would cause mis-compiles due to optimizations assuming that the
+// section contents are constant.
+#pragma comment(linker, "/SECTION:prot,R")
+
+__declspec(allocate("prot$a")) __declspec(selectany)
+char __start_protected_memory;
+__declspec(allocate("prot$z")) __declspec(selectany)
+char __stop_protected_memory;
+
+#define PROTECTED_MEMORY_SECTION __declspec(allocate("prot$mem"))
+
+#else
+#undef PROTECTED_MEMORY_ENABLED
+#define PROTECTED_MEMORY_ENABLED 0
+#define PROTECTED_MEMORY_SECTION
+#endif
+
+namespace base {
+
+template <typename T>
+class ProtectedMemory {
+ public:
+  ProtectedMemory() = default;
+
+  // Expose direct access to the encapsulated variable
+  T& operator*() { return data; }
+  const T& operator*() const { return data; }
+  T* operator->() { return &data; }
+  const T* operator->() const { return &data; }
+
+  // Helper class for creating simple ProtectedMemory static initializers.
+  class Initializer {
+   public:
+    // Defined out-of-line below to break circular definition dependency between
+    // ProtectedMemory and AutoWritableMemory.
+    Initializer(ProtectedMemory<T>* PM, const T& Init);
+
+    DISALLOW_IMPLICIT_CONSTRUCTORS(Initializer);
+  };
+
+ private:
+  T data;
+
+  DISALLOW_COPY_AND_ASSIGN(ProtectedMemory);
+};
+
+// DCHECK that the byte at |ptr| is read-only.
+BASE_EXPORT void AssertMemoryIsReadOnly(const void* ptr);
+
+// Abstract out platform-specific methods to get the beginning and end of the
+// PROTECTED_MEMORY_SECTION. ProtectedMemoryEnd returns a pointer to the byte
+// past the end of the PROTECTED_MEMORY_SECTION.
+#if PROTECTED_MEMORY_ENABLED
+constexpr void* ProtectedMemoryStart = &__start_protected_memory;
+constexpr void* ProtectedMemoryEnd = &__stop_protected_memory;
+#endif
+
+#if defined(COMPONENT_BUILD)
+namespace internal {
+
+// For component builds we want to define a separate global writers variable
+// (explained below) in every DSO that includes this header. To do that we use
+// this template to define a global without duplicate symbol errors.
+template <typename T>
+struct DsoSpecific {
+  static T value;
+};
+template <typename T>
+T DsoSpecific<T>::value = 0;
+
+}  // namespace internal
+#endif  // defined(COMPONENT_BUILD)
+
+// A class that sets a given ProtectedMemory variable writable while the
+// AutoWritableMemory is in scope. This class implements the logic for setting
+// the protected memory region read-only/read-write in a thread-safe manner.
+class AutoWritableMemory {
+ private:
+  // 'writers' is a global holding the number of ProtectedMemory instances set
+  // writable, used to avoid races setting protected memory readable/writable.
+  // When this reaches zero the protected memory region is set read only.
+  // Access is controlled by writers_lock.
+#if defined(COMPONENT_BUILD)
+  // For component builds writers is a reference to an int defined separately in
+  // every DSO.
+  static constexpr int& writers = internal::DsoSpecific<int>::value;
+#else
+  // Otherwise, we declare writers in the protected memory section to avoid the
+  // scenario where an attacker could overwrite it with a large value and invoke
+  // code that constructs and destructs an AutoWritableMemory. After such a call
+  // protected memory would still be set writable because writers > 0.
+  static int writers;
+#endif  // defined(COMPONENT_BUILD)
+
+  // Synchronizes access to the writers variable and the simultaneous actions
+  // that need to happen alongside writers changes, e.g. setting the protected
+  // memory region readable when writers is decremented to 0.
+  static BASE_EXPORT base::LazyInstance<Lock>::Leaky writers_lock;
+
+  // Abstract out platform-specific memory APIs. |end| points to the byte past
+  // the end of the region of memory having its memory protections changed.
+  BASE_EXPORT bool SetMemoryReadWrite(void* start, void* end);
+  BASE_EXPORT bool SetMemoryReadOnly(void* start, void* end);
+
+  // If this is the first writer (e.g. writers == 0) set the writers variable
+  // read-write. Next, increment writers and set the requested memory writable.
+  AutoWritableMemory(void* ptr, void* ptr_end) {
+#if PROTECTED_MEMORY_ENABLED
+    DCHECK(ptr >= ProtectedMemoryStart && ptr_end <= ProtectedMemoryEnd);
+
+    {
+      base::AutoLock auto_lock(writers_lock.Get());
+      if (writers == 0) {
+        AssertMemoryIsReadOnly(ptr);
+#if !defined(COMPONENT_BUILD)
+        AssertMemoryIsReadOnly(&writers);
+        CHECK(SetMemoryReadWrite(&writers, &writers + 1));
+#endif  // !defined(COMPONENT_BUILD)
+      }
+
+      writers++;
+    }
+
+    CHECK(SetMemoryReadWrite(ptr, ptr_end));
+#endif  // PROTECTED_MEMORY_ENABLED
+  }
+
+ public:
+  // Wrap the private constructor to create an easy-to-use interface to
+  // construct AutoWritableMemory objects.
+  template <typename T>
+  static AutoWritableMemory Create(ProtectedMemory<T>& PM) {
+    T* ptr = &*PM;
+    return AutoWritableMemory(ptr, ptr + 1);
+  }
+
+  // Move constructor just increments writers
+  AutoWritableMemory(AutoWritableMemory&& original) {
+#if PROTECTED_MEMORY_ENABLED
+    base::AutoLock auto_lock(writers_lock.Get());
+    CHECK_GT(writers, 0);
+    writers++;
+#endif  // PROTECTED_MEMORY_ENABLED
+  }
+
+  // On destruction decrement writers, and if no other writers exist, set the
+  // entire protected memory region read-only.
+  ~AutoWritableMemory() {
+#if PROTECTED_MEMORY_ENABLED
+    base::AutoLock auto_lock(writers_lock.Get());
+    CHECK_GT(writers, 0);
+    writers--;
+
+    if (writers == 0) {
+      CHECK(SetMemoryReadOnly(ProtectedMemoryStart, ProtectedMemoryEnd));
+#if !defined(COMPONENT_BUILD)
+      AssertMemoryIsReadOnly(&writers);
+#endif  // !defined(COMPONENT_BUILD)
+    }
+#endif  // PROTECTED_MEMORY_ENABLED
+  }
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AutoWritableMemory);
+};
+
+template <typename T>
+ProtectedMemory<T>::Initializer::Initializer(ProtectedMemory<T>* PM,
+                                             const T& Init) {
+  AutoWritableMemory writer = AutoWritableMemory::Create(*PM);
+  **PM = Init;
+}
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_PROTECTED_MEMORY_H_
diff --git a/src/base/memory/protected_memory_cfi.h b/src/base/memory/protected_memory_cfi.h
new file mode 100644
index 0000000..a90023b
--- /dev/null
+++ b/src/base/memory/protected_memory_cfi.h
@@ -0,0 +1,86 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Helper routines to call function pointers stored in protected memory with
+// Control Flow Integrity indirect call checking disabled. Some indirect calls,
+// e.g. dynamically resolved symbols in another DSO, can not be accounted for by
+// CFI-icall. These routines allow those symbols to be called without CFI-icall
+// checking safely by ensuring that they are placed in protected memory.
+
+#ifndef BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
+#define BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
+
+#include <utility>
+
+#include "base/cfi_buildflags.h"
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/protected_memory.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(CFI_ICALL_CHECK) && !PROTECTED_MEMORY_ENABLED
+#error "CFI-icall enabled for platform without protected memory support"
+#endif  // BUILDFLAG(CFI_ICALL_CHECK) && !PROTECTED_MEMORY_ENABLED
+
+namespace base {
+namespace internal {
+
+// This class is used to exempt calls to function pointers stored in
+// ProtectedMemory from cfi-icall checking. It's not secure to use directly, it
+// should only be used by the UnsanitizedCfiCall() functions below. Given an
+// UnsanitizedCfiCall object, you can use operator() to call the encapsulated
+// function pointer without cfi-icall checking.
+template <typename FunctionType>
+class UnsanitizedCfiCall {
+ public:
+  explicit UnsanitizedCfiCall(FunctionType function) : function_(function) {}
+  UnsanitizedCfiCall(UnsanitizedCfiCall&&) = default;
+
+  template <typename... Args>
+  NO_SANITIZE("cfi-icall")
+  auto operator()(Args&&... args) {
+    return function_(std::forward<Args>(args)...);
+  }
+
+ private:
+  FunctionType function_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(UnsanitizedCfiCall);
+};
+
+}  // namespace internal
+
+// These functions can be used to call function pointers in ProtectedMemory
+// without cfi-icall checking. They are intended to be used to create an
+// UnsanitizedCfiCall object and immediately call it. UnsanitizedCfiCall objects
+// should not initialized directly or stored because they hold a function
+// pointer that will be called without CFI-icall checking in mutable memory. The
+// functions can be used as shown below:
+
+// ProtectedMemory<void (*)(int)> p;
+// UnsanitizedCfiCall(p)(5); /* In place of (*p)(5); */
+
+template <typename T>
+auto UnsanitizedCfiCall(const ProtectedMemory<T>& PM) {
+#if PROTECTED_MEMORY_ENABLED
+  DCHECK(&PM >= ProtectedMemoryStart && &PM < ProtectedMemoryEnd);
+#endif  // PROTECTED_MEMORY_ENABLED
+  return internal::UnsanitizedCfiCall<T>(*PM);
+}
+
+// struct S { void (*fp)(int); } s;
+// ProtectedMemory<S> p;
+// UnsanitizedCfiCall(p, &S::fp)(5); /* In place of p->fp(5); */
+
+template <typename T, typename Member>
+auto UnsanitizedCfiCall(const ProtectedMemory<T>& PM, Member member) {
+#if PROTECTED_MEMORY_ENABLED
+  DCHECK(&PM >= ProtectedMemoryStart && &PM < ProtectedMemoryEnd);
+#endif  // PROTECTED_MEMORY_ENABLED
+  return internal::UnsanitizedCfiCall<decltype(*PM.*member)>(*PM.*member);
+}
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
diff --git a/src/base/memory/protected_memory_posix.cc b/src/base/memory/protected_memory_posix.cc
new file mode 100644
index 0000000..9bcbc6c
--- /dev/null
+++ b/src/base/memory/protected_memory_posix.cc
@@ -0,0 +1,79 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+#if defined(OS_LINUX)
+#include <sys/resource.h>
+#endif  // defined(OS_LINUX)
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+#include "base/posix/eintr_wrapper.h"
+#include "base/process/process_metrics.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+#include "starboard/types.h"
+
+namespace base {
+
+namespace {
+
+bool SetMemory(void* start, void* end, int prot) {
+  DCHECK(end > start);
+  const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+  const uintptr_t page_start = reinterpret_cast<uintptr_t>(start) & page_mask;
+  return mprotect(reinterpret_cast<void*>(page_start),
+                  reinterpret_cast<uintptr_t>(end) - page_start, prot) == 0;
+}
+
+}  // namespace
+
+bool AutoWritableMemory::SetMemoryReadWrite(void* start, void* end) {
+  return SetMemory(start, end, PROT_READ | PROT_WRITE);
+}
+
+bool AutoWritableMemory::SetMemoryReadOnly(void* start, void* end) {
+  return SetMemory(start, end, PROT_READ);
+}
+
+#if defined(OS_LINUX)
+void AssertMemoryIsReadOnly(const void* ptr) {
+#if DCHECK_IS_ON()
+  const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+  const uintptr_t page_start = reinterpret_cast<uintptr_t>(ptr) & page_mask;
+
+  // Note: We've casted away const here, which should not be meaningful since
+  // if the memory is written to we will abort immediately.
+  int result =
+      getrlimit(RLIMIT_NPROC, reinterpret_cast<struct rlimit*>(page_start));
+  DCHECK_EQ(result, -1);
+  DCHECK_EQ(errno, EFAULT);
+#endif  // DCHECK_IS_ON()
+}
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+void AssertMemoryIsReadOnly(const void* ptr) {
+#if DCHECK_IS_ON()
+  mach_port_t object_name;
+  vm_region_basic_info_64 region_info;
+  mach_vm_size_t size = 1;
+  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+
+  kern_return_t kr = mach_vm_region(
+      mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&ptr), &size,
+      VM_REGION_BASIC_INFO_64, reinterpret_cast<vm_region_info_t>(&region_info),
+      &count, &object_name);
+  DCHECK_EQ(kr, KERN_SUCCESS);
+  DCHECK_EQ(region_info.protection, VM_PROT_READ);
+#endif  // DCHECK_IS_ON()
+}
+#endif  // defined(OS_LINUX) || (defined(OS_MACOSX) && !defined(OS_IOS))
+
+}  // namespace base
diff --git a/src/base/memory/protected_memory_unittest.cc b/src/base/memory/protected_memory_unittest.cc
new file mode 100644
index 0000000..b7daed3
--- /dev/null
+++ b/src/base/memory/protected_memory_unittest.cc
@@ -0,0 +1,126 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+#include "base/cfi_buildflags.h"
+#include "base/memory/protected_memory_cfi.h"
+#include "base/synchronization/lock.h"
+#include "base/test/gtest_util.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+struct Data {
+  Data() = default;
+  Data(int foo_) : foo(foo_) {}
+  int foo;
+};
+
+}  // namespace
+
+class ProtectedMemoryTest : public ::testing::Test {
+ protected:
+  // Run tests one at a time. Some of the negative tests can not be made thread
+  // safe.
+  void SetUp() final { lock.Acquire(); }
+  void TearDown() final { lock.Release(); }
+
+  Lock lock;
+};
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<int> init;
+
+TEST_F(ProtectedMemoryTest, Initializer) {
+  static ProtectedMemory<int>::Initializer I(&init, 4);
+  EXPECT_EQ(*init, 4);
+}
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<Data> data;
+
+TEST_F(ProtectedMemoryTest, Basic) {
+  AutoWritableMemory writer = AutoWritableMemory::Create(data);
+  data->foo = 5;
+  EXPECT_EQ(data->foo, 5);
+}
+
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+#if PROTECTED_MEMORY_ENABLED
+TEST_F(ProtectedMemoryTest, ReadOnlyOnStart) {
+  EXPECT_DEATH({ data->foo = 6; AutoWritableMemory::Create(data); }, "");
+}
+
+TEST_F(ProtectedMemoryTest, ReadOnlyAfterSetWritable) {
+  { AutoWritableMemory writer = AutoWritableMemory::Create(data); }
+  EXPECT_DEATH({ data->foo = 7; }, "");
+}
+
+TEST_F(ProtectedMemoryTest, AssertMemoryIsReadOnly) {
+  AssertMemoryIsReadOnly(&data->foo);
+  { AutoWritableMemory::Create(data); }
+  AssertMemoryIsReadOnly(&data->foo);
+
+  ProtectedMemory<Data> writable_data;
+  EXPECT_DCHECK_DEATH({ AssertMemoryIsReadOnly(&writable_data->foo); });
+}
+
+TEST_F(ProtectedMemoryTest, FailsIfDefinedOutsideOfProtectMemoryRegion) {
+  ProtectedMemory<Data> data;
+  EXPECT_DCHECK_DEATH({ AutoWritableMemory::Create(data); });
+}
+
+TEST_F(ProtectedMemoryTest, UnsanitizedCfiCallOutsideOfProtectedMemoryRegion) {
+  ProtectedMemory<void (*)(void)> data;
+  EXPECT_DCHECK_DEATH({ UnsanitizedCfiCall(data)(); });
+}
+#endif  // PROTECTED_MEMORY_ENABLED
+
+namespace {
+
+struct BadIcall {
+  BadIcall() = default;
+  BadIcall(int (*fp_)(int)) : fp(fp_) {}
+  int (*fp)(int);
+};
+
+unsigned int bad_icall(int i) {
+  return 4 + i;
+}
+
+}  // namespace
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<BadIcall> icall_pm1;
+
+TEST_F(ProtectedMemoryTest, BadMemberCall) {
+  static ProtectedMemory<BadIcall>::Initializer I(
+      &icall_pm1, BadIcall(reinterpret_cast<int (*)(int)>(&bad_icall)));
+
+  EXPECT_EQ(UnsanitizedCfiCall(icall_pm1, &BadIcall::fp)(1), 5);
+#if !BUILDFLAG(CFI_ICALL_CHECK)
+  EXPECT_EQ(icall_pm1->fp(1), 5);
+#elif BUILDFLAG(CFI_ENFORCEMENT_TRAP) || BUILDFLAG(CFI_ENFORCEMENT_DIAGNOSTIC)
+  EXPECT_DEATH({ icall_pm1->fp(1); }, "");
+#endif
+}
+
+PROTECTED_MEMORY_SECTION ProtectedMemory<int (*)(int)> icall_pm2;
+
+TEST_F(ProtectedMemoryTest, BadFnPtrCall) {
+  static ProtectedMemory<int (*)(int)>::Initializer I(
+      &icall_pm2, reinterpret_cast<int (*)(int)>(&bad_icall));
+
+  EXPECT_EQ(UnsanitizedCfiCall(icall_pm2)(1), 5);
+#if !BUILDFLAG(CFI_ICALL_CHECK)
+  EXPECT_EQ((*icall_pm2)(1), 5);
+#elif BUILDFLAG(CFI_ENFORCEMENT_TRAP) || BUILDFLAG(CFI_ENFORCEMENT_DIAGNOSTIC)
+  EXPECT_DEATH({ (*icall_pm2)(1); }, "");
+#endif
+}
+
+#endif  // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+}  // namespace base
diff --git a/src/base/memory/protected_memory_win.cc b/src/base/memory/protected_memory_win.cc
new file mode 100644
index 0000000..4927412
--- /dev/null
+++ b/src/base/memory/protected_memory_win.cc
@@ -0,0 +1,51 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/protected_memory.h"
+
+#include <windows.h>
+
+#include "base/process/process_metrics.h"
+#include "base/synchronization/lock.h"
+#include "build/build_config.h"
+#include "starboard/types.h"
+
+namespace base {
+
+namespace {
+
+bool SetMemory(void* start, void* end, DWORD prot) {
+  DCHECK(end > start);
+  const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+  const uintptr_t page_start = reinterpret_cast<uintptr_t>(start) & page_mask;
+  DWORD old_prot;
+  return VirtualProtect(reinterpret_cast<void*>(page_start),
+                        reinterpret_cast<uintptr_t>(end) - page_start, prot,
+                        &old_prot) != 0;
+}
+
+}  // namespace
+
+bool AutoWritableMemory::SetMemoryReadWrite(void* start, void* end) {
+  return SetMemory(start, end, PAGE_READWRITE);
+}
+
+bool AutoWritableMemory::SetMemoryReadOnly(void* start, void* end) {
+  return SetMemory(start, end, PAGE_READONLY);
+}
+
+void AssertMemoryIsReadOnly(const void* ptr) {
+#if DCHECK_IS_ON()
+  const uintptr_t page_mask = ~(base::GetPageSize() - 1);
+  const uintptr_t page_start = reinterpret_cast<uintptr_t>(ptr) & page_mask;
+
+  MEMORY_BASIC_INFORMATION info;
+  SIZE_T result =
+      VirtualQuery(reinterpret_cast<LPCVOID>(page_start), &info, sizeof(info));
+  DCHECK_GT(result, 0U);
+  DCHECK(info.Protect == PAGE_READONLY);
+#endif  // DCHECK_IS_ON()
+}
+
+}  // namespace base
diff --git a/src/base/memory/ptr_util.h b/src/base/memory/ptr_util.h
new file mode 100644
index 0000000..a0809a8
--- /dev/null
+++ b/src/base/memory/ptr_util.h
@@ -0,0 +1,25 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PTR_UTIL_H_
+#define BASE_MEMORY_PTR_UTIL_H_
+
+#include <memory>
+#include <utility>
+
+#include "base/cpp14oncpp11.h"
+
+namespace base {
+
+// Helper to transfer ownership of a raw pointer to a std::unique_ptr<T>.
+// Note that std::unique_ptr<T> has very different semantics from
+// std::unique_ptr<T[]>: do not use this helper for array allocations.
+template <typename T>
+std::unique_ptr<T> WrapUnique(T* ptr) {
+  return std::unique_ptr<T>(ptr);
+}
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_PTR_UTIL_H_
diff --git a/src/base/memory/ptr_util_unittest.cc b/src/base/memory/ptr_util_unittest.cc
new file mode 100644
index 0000000..a7c852d
--- /dev/null
+++ b/src/base/memory/ptr_util_unittest.cc
@@ -0,0 +1,38 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ptr_util.h"
+
+#include "starboard/types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+class DeleteCounter {
+ public:
+  DeleteCounter() { ++count_; }
+  ~DeleteCounter() { --count_; }
+
+  static size_t count() { return count_; }
+
+ private:
+  static size_t count_;
+};
+
+size_t DeleteCounter::count_ = 0;
+
+}  // namespace
+
+TEST(PtrUtilTest, WrapUnique) {
+  EXPECT_EQ(0u, DeleteCounter::count());
+  DeleteCounter* counter = new DeleteCounter;
+  EXPECT_EQ(1u, DeleteCounter::count());
+  std::unique_ptr<DeleteCounter> owned_counter = WrapUnique(counter);
+  EXPECT_EQ(1u, DeleteCounter::count());
+  owned_counter.reset();
+  EXPECT_EQ(0u, DeleteCounter::count());
+}
+}  // namespace base
diff --git a/src/base/memory/raw_scoped_refptr_mismatch_checker.h b/src/base/memory/raw_scoped_refptr_mismatch_checker.h
index 7974f30..ab8b2ab 100644
--- a/src/base/memory/raw_scoped_refptr_mismatch_checker.h
+++ b/src/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -5,10 +5,9 @@
 #ifndef BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
 #define BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
 
-#include "base/memory/ref_counted.h"
+#include <type_traits>
+
 #include "base/template_util.h"
-#include "base/tuple.h"
-#include "build/build_config.h"
 
 // It is dangerous to post a task with a T* argument where T is a subtype of
 // RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the
@@ -23,103 +22,27 @@
 // Not for public consumption, so we wrap it in namespace internal.
 namespace internal {
 
+template <typename T, typename = void>
+struct IsRefCountedType : std::false_type {};
+
+template <typename T>
+struct IsRefCountedType<T,
+                        void_t<decltype(std::declval<T*>()->AddRef()),
+                               decltype(std::declval<T*>()->Release())>>
+    : std::true_type {};
+
 template <typename T>
 struct NeedsScopedRefptrButGetsRawPtr {
-#if defined(OS_WIN)
-  enum {
-    value = base::false_type::value
-  };
-#else
+  static_assert(!std::is_reference<T>::value,
+                "NeedsScopedRefptrButGetsRawPtr requires non-reference type.");
+
   enum {
     // Human readable translation: you needed to be a scoped_refptr if you are a
     // raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase)
     // type.
-    value = (is_pointer<T>::value &&
-             (is_convertible<T, subtle::RefCountedBase*>::value ||
-              is_convertible<T, subtle::RefCountedThreadSafeBase*>::value))
+    value = std::is_pointer<T>::value &&
+            IsRefCountedType<std::remove_pointer_t<T>>::value
   };
-#endif
-};
-
-template <typename Params>
-struct ParamsUseScopedRefptrCorrectly {
-  enum { value = 0 };
-};
-
-template <>
-struct ParamsUseScopedRefptrCorrectly<Tuple0> {
-  enum { value = 1 };
-};
-
-template <typename A>
-struct ParamsUseScopedRefptrCorrectly<Tuple1<A> > {
-  enum { value = !NeedsScopedRefptrButGetsRawPtr<A>::value };
-};
-
-template <typename A, typename B>
-struct ParamsUseScopedRefptrCorrectly<Tuple2<A, B> > {
-  enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<B>::value) };
-};
-
-template <typename A, typename B, typename C>
-struct ParamsUseScopedRefptrCorrectly<Tuple3<A, B, C> > {
-  enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<B>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<C>::value) };
-};
-
-template <typename A, typename B, typename C, typename D>
-struct ParamsUseScopedRefptrCorrectly<Tuple4<A, B, C, D> > {
-  enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<B>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<C>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<D>::value) };
-};
-
-template <typename A, typename B, typename C, typename D, typename E>
-struct ParamsUseScopedRefptrCorrectly<Tuple5<A, B, C, D, E> > {
-  enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<B>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<C>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<D>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<E>::value) };
-};
-
-template <typename A, typename B, typename C, typename D, typename E,
-          typename F>
-struct ParamsUseScopedRefptrCorrectly<Tuple6<A, B, C, D, E, F> > {
-  enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<B>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<C>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<D>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<E>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<F>::value) };
-};
-
-template <typename A, typename B, typename C, typename D, typename E,
-          typename F, typename G>
-struct ParamsUseScopedRefptrCorrectly<Tuple7<A, B, C, D, E, F, G> > {
-  enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<B>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<C>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<D>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<E>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<F>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<G>::value) };
-};
-
-template <typename A, typename B, typename C, typename D, typename E,
-          typename F, typename G, typename H>
-struct ParamsUseScopedRefptrCorrectly<Tuple8<A, B, C, D, E, F, G, H> > {
-  enum { value = !(NeedsScopedRefptrButGetsRawPtr<A>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<B>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<C>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<D>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<E>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<F>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<G>::value ||
-                   NeedsScopedRefptrButGetsRawPtr<H>::value) };
 };
 
 }  // namespace internal
diff --git a/src/base/memory/read_only_shared_memory_region.cc b/src/base/memory/read_only_shared_memory_region.cc
new file mode 100644
index 0000000..a19a7aa
--- /dev/null
+++ b/src/base/memory/read_only_shared_memory_region.cc
@@ -0,0 +1,102 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Cobalt does not support multiple process and shared memory.
+#if !defined(STARBOARD)
+
+#include "base/memory/read_only_shared_memory_region.h"
+
+#include <utility>
+
+#include "base/memory/shared_memory.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// static
+MappedReadOnlyRegion ReadOnlySharedMemoryRegion::Create(size_t size) {
+  subtle::PlatformSharedMemoryRegion handle =
+      subtle::PlatformSharedMemoryRegion::CreateWritable(size);
+  if (!handle.IsValid())
+    return {};
+
+  void* memory_ptr = nullptr;
+  size_t mapped_size = 0;
+  if (!handle.MapAt(0, handle.GetSize(), &memory_ptr, &mapped_size))
+    return {};
+
+  WritableSharedMemoryMapping mapping(memory_ptr, size, mapped_size,
+                                      handle.GetGUID());
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  handle.ConvertToReadOnly(memory_ptr);
+#else
+  handle.ConvertToReadOnly();
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+  ReadOnlySharedMemoryRegion region(std::move(handle));
+
+  if (!region.IsValid() || !mapping.IsValid())
+    return {};
+
+  return {std::move(region), std::move(mapping)};
+}
+
+// static
+ReadOnlySharedMemoryRegion ReadOnlySharedMemoryRegion::Deserialize(
+    subtle::PlatformSharedMemoryRegion handle) {
+  return ReadOnlySharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+    ReadOnlySharedMemoryRegion region) {
+  return std::move(region.handle_);
+}
+
+ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion() = default;
+ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion(
+    ReadOnlySharedMemoryRegion&& region) = default;
+ReadOnlySharedMemoryRegion& ReadOnlySharedMemoryRegion::operator=(
+    ReadOnlySharedMemoryRegion&& region) = default;
+ReadOnlySharedMemoryRegion::~ReadOnlySharedMemoryRegion() = default;
+
+ReadOnlySharedMemoryRegion ReadOnlySharedMemoryRegion::Duplicate() const {
+  return ReadOnlySharedMemoryRegion(handle_.Duplicate());
+}
+
+ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::Map() const {
+  return MapAt(0, handle_.GetSize());
+}
+
+ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::MapAt(
+    off_t offset,
+    size_t size) const {
+  if (!IsValid())
+    return {};
+
+  void* memory = nullptr;
+  size_t mapped_size = 0;
+  if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+    return {};
+
+  return ReadOnlySharedMemoryMapping(memory, size, mapped_size,
+                                     handle_.GetGUID());
+}
+
+bool ReadOnlySharedMemoryRegion::IsValid() const {
+  return handle_.IsValid();
+}
+
+ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion(
+    subtle::PlatformSharedMemoryRegion handle)
+    : handle_(std::move(handle)) {
+  if (handle_.IsValid()) {
+    CHECK_EQ(handle_.GetMode(),
+             subtle::PlatformSharedMemoryRegion::Mode::kReadOnly);
+  }
+}
+
+}  // namespace base
+
+#endif  // !defined(STARBOARD)
\ No newline at end of file
diff --git a/src/base/memory/read_only_shared_memory_region.h b/src/base/memory/read_only_shared_memory_region.h
new file mode 100644
index 0000000..837cdce
--- /dev/null
+++ b/src/base/memory/read_only_shared_memory_region.h
@@ -0,0 +1,135 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
+
+// Cobalt does not support multiple process and shared memory.
+#if !defined(STARBOARD)
+
+#include <utility>
+
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+struct MappedReadOnlyRegion;
+
+// Starboard doesn't curretly support multiple processes or shared memory.
+#if !defined(STARBOARD)
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// read-only. These mappings remain valid even after the region handle is moved
+// or destroyed.
+class BASE_EXPORT ReadOnlySharedMemoryRegion {
+ public:
+  using MappingType = ReadOnlySharedMemoryMapping;
+  // Creates a new ReadOnlySharedMemoryRegion instance of a given size along
+  // with the WritableSharedMemoryMapping which provides the only way to modify
+  // the content of the newly created region. The returned region and mapping
+  // are guaranteed to either be both valid or both invalid. Use
+  // |MappedReadOnlyRegion::IsValid()| as a shortcut for checking creation
+  // success.
+  //
+  // This means that the caller's process is the only process that can modify
+  // the region content. If you need to pass write access to another process,
+  // consider using WritableSharedMemoryRegion or UnsafeSharedMemoryRegion.
+  //
+  // This call will fail if the process does not have sufficient permissions to
+  // create a shared memory region itself. See
+  // mojo::CreateReadOnlySharedMemoryRegion in
+  // mojo/public/cpp/base/shared_memory_utils.h for creating a shared memory
+  // region from a an unprivileged process where a broker must be used.
+  static MappedReadOnlyRegion Create(size_t size);
+
+  // Returns a ReadOnlySharedMemoryRegion built from a platform-specific handle
+  // that was taken from another ReadOnlySharedMemoryRegion instance. Returns an
+  // invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
+  // isn't read-only.
+  // This should be used only by the code passing handles across process
+  // boundaries.
+  static ReadOnlySharedMemoryRegion Deserialize(
+      subtle::PlatformSharedMemoryRegion handle);
+
+  // Extracts a platform handle from the region. Ownership is transferred to the
+  // returned region object.
+  // This should be used only for sending the handle from the current process to
+  // another.
+  static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+      ReadOnlySharedMemoryRegion region);
+
+  // Default constructor initializes an invalid instance.
+  ReadOnlySharedMemoryRegion();
+
+  // Move operations are allowed.
+  ReadOnlySharedMemoryRegion(ReadOnlySharedMemoryRegion&&);
+  ReadOnlySharedMemoryRegion& operator=(ReadOnlySharedMemoryRegion&&);
+
+  // Destructor closes shared memory region if valid.
+  // All created mappings will remain valid.
+  ~ReadOnlySharedMemoryRegion();
+
+  // Duplicates the underlying platform handle and creates a new
+  // ReadOnlySharedMemoryRegion instance that owns this handle. Returns a valid
+  // ReadOnlySharedMemoryRegion on success, invalid otherwise. The current
+  // region instance remains valid in any case.
+  ReadOnlySharedMemoryRegion Duplicate() const;
+
+  // Maps the shared memory region into the caller's address space with
+  // read-only access. The mapped address is guaranteed to have an alignment of
+  // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+  // Returns a valid ReadOnlySharedMemoryMapping instance on success, invalid
+  // otherwise.
+  ReadOnlySharedMemoryMapping Map() const;
+
+  // Same as above, but maps only |size| bytes of the shared memory region
+  // starting with the given |offset|. |offset| must be aligned to value of
+  // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+  // requested bytes are out of the region limits.
+  ReadOnlySharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+  // Whether the underlying platform handle is valid.
+  bool IsValid() const;
+
+  // Returns the maximum mapping size that can be created from this region.
+  size_t GetSize() const {
+    DCHECK(IsValid());
+    return handle_.GetSize();
+  }
+
+  // Returns 128-bit GUID of the region.
+  const UnguessableToken& GetGUID() const {
+    DCHECK(IsValid());
+    return handle_.GetGUID();
+  }
+
+ private:
+  explicit ReadOnlySharedMemoryRegion(
+      subtle::PlatformSharedMemoryRegion handle);
+
+  subtle::PlatformSharedMemoryRegion handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryRegion);
+};
+#endif
+
+// Helper struct for return value of ReadOnlySharedMemoryRegion::Create().
+struct MappedReadOnlyRegion {
+  ReadOnlySharedMemoryRegion region;
+  WritableSharedMemoryMapping mapping;
+  // Helper function to check return value of
+  // ReadOnlySharedMemoryRegion::Create(). |region| and |mapping| either both
+  // valid or invalid.
+  bool IsValid() {
+    DCHECK_EQ(region.IsValid(), mapping.IsValid());
+    return region.IsValid() && mapping.IsValid();
+  }
+};
+
+}  // namespace base
+
+#endif  // !defined(STARBOARD)
+#endif  // BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
diff --git a/src/base/memory/ref_counted.cc b/src/base/memory/ref_counted.cc
index 31ad509..f3d35cf 100644
--- a/src/base/memory/ref_counted.cc
+++ b/src/base/memory/ref_counted.cc
@@ -4,92 +4,75 @@
 
 #include "base/memory/ref_counted.h"
 
-#include "base/logging.h"
 #include "base/threading/thread_collision_warner.h"
 
 namespace base {
+namespace {
+
+#if DCHECK_IS_ON()
+std::atomic_int g_cross_thread_ref_count_access_allow_count(0);
+#endif
+
+}  // namespace
 
 namespace subtle {
 
-RefCountedBase::RefCountedBase()
-    : ref_count_(0)
-#ifndef NDEBUG
-    , in_dtor_(false)
-#endif
-    {
-}
-
-RefCountedBase::~RefCountedBase() {
-#ifndef NDEBUG
-  DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
-#endif
-}
-
-void RefCountedBase::AddRef() const {
-  // TODO(maruel): Add back once it doesn't assert 500 times/sec.
-  // Current thread books the critical section "AddRelease" without release it.
-  // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
-#ifndef NDEBUG
-  DCHECK(!in_dtor_);
-#endif
-  ++ref_count_;
-}
-
-bool RefCountedBase::Release() const {
-  // TODO(maruel): Add back once it doesn't assert 500 times/sec.
-  // Current thread books the critical section "AddRelease" without release it.
-  // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
-#ifndef NDEBUG
-  DCHECK(!in_dtor_);
-#endif
-  if (--ref_count_ == 0) {
-#ifndef NDEBUG
-    in_dtor_ = true;
-#endif
-    return true;
-  }
-  return false;
-}
-
 bool RefCountedThreadSafeBase::HasOneRef() const {
-  return AtomicRefCountIsOne(
-      &const_cast<RefCountedThreadSafeBase*>(this)->ref_count_);
+  return ref_count_.IsOne();
 }
 
-RefCountedThreadSafeBase::RefCountedThreadSafeBase() : ref_count_(0) {
-#ifndef NDEBUG
-  in_dtor_ = false;
-#endif
+bool RefCountedThreadSafeBase::HasAtLeastOneRef() const {
+  return !ref_count_.IsZero();
 }
 
+#if DCHECK_IS_ON()
 RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
-#ifndef NDEBUG
   DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
                       "calling Release()";
-#endif
 }
-
-void RefCountedThreadSafeBase::AddRef() const {
-#ifndef NDEBUG
-  DCHECK(!in_dtor_);
 #endif
-  AtomicRefCountInc(&ref_count_);
-}
 
+// This is a security check. In 32-bit-archs, an attacker would run out of
+// address space after allocating at most 2^32 scoped_refptrs. This replicates
+// that boundary for 64-bit-archs.
+#if defined(ARCH_CPU_64_BITS)
+void RefCountedBase::AddRefImpl() const {
+  // Check if |ref_count_| overflow only on 64 bit archs since the number of
+  // objects may exceed 2^32.
+  // To avoid the binary size bloat, use non-inline function here.
+  CHECK(++ref_count_ > 0);
+}
+#endif
+
+#if !defined(ARCH_CPU_X86_FAMILY)
 bool RefCountedThreadSafeBase::Release() const {
-#ifndef NDEBUG
-  DCHECK(!in_dtor_);
-  DCHECK(!AtomicRefCountIsZero(&ref_count_));
-#endif
-  if (!AtomicRefCountDec(&ref_count_)) {
-#ifndef NDEBUG
-    in_dtor_ = true;
-#endif
-    return true;
-  }
-  return false;
+  return ReleaseImpl();
 }
+void RefCountedThreadSafeBase::AddRef() const {
+  AddRefImpl();
+}
+void RefCountedThreadSafeBase::AddRefWithCheck() const {
+  AddRefWithCheckImpl();
+}
+#endif
+
+#if DCHECK_IS_ON()
+bool RefCountedBase::CalledOnValidSequence() const {
+  return sequence_checker_.CalledOnValidSequence() ||
+         g_cross_thread_ref_count_access_allow_count.load() != 0;
+}
+#endif
 
 }  // namespace subtle
 
+#if DCHECK_IS_ON()
+ScopedAllowCrossThreadRefCountAccess::ScopedAllowCrossThreadRefCountAccess() {
+  ++g_cross_thread_ref_count_access_allow_count;
+}
+
+ScopedAllowCrossThreadRefCountAccess::~ScopedAllowCrossThreadRefCountAccess() {
+  --g_cross_thread_ref_count_access_allow_count;
+}
+#endif
+
 }  // namespace base
diff --git a/src/base/memory/ref_counted.h b/src/base/memory/ref_counted.h
index f5941e5..c19072a 100644
--- a/src/base/memory/ref_counted.h
+++ b/src/base/memory/ref_counted.h
@@ -5,33 +5,140 @@
 #ifndef BASE_MEMORY_REF_COUNTED_H_
 #define BASE_MEMORY_REF_COUNTED_H_
 
+#include <utility>
+
 #include "base/atomic_ref_count.h"
 #include "base/base_export.h"
 #include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
 #include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/sequence_checker.h"
 #include "base/threading/thread_collision_warner.h"
+#include "build/build_config.h"
+#include "starboard/types.h"
 
 namespace base {
-
 namespace subtle {
 
 class BASE_EXPORT RefCountedBase {
  public:
   bool HasOneRef() const { return ref_count_ == 1; }
+  bool HasAtLeastOneRef() const { return ref_count_ >= 1; }
 
  protected:
-  RefCountedBase();
-  ~RefCountedBase();
+  explicit RefCountedBase(StartRefCountFromZeroTag) {
+#if DCHECK_IS_ON()
+    sequence_checker_.DetachFromSequence();
+#endif
+  }
 
-  void AddRef() const;
+  explicit RefCountedBase(StartRefCountFromOneTag) : ref_count_(1) {
+#if DCHECK_IS_ON()
+    needs_adopt_ref_ = true;
+    sequence_checker_.DetachFromSequence();
+#endif
+  }
+
+  ~RefCountedBase() {
+#if DCHECK_IS_ON()
+    DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
+#endif
+  }
+
+  void AddRef() const {
+    // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+    // Current thread books the critical section "AddRelease"
+    // without release it.
+    // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+#if DCHECK_IS_ON()
+    DCHECK(!in_dtor_);
+    DCHECK(!needs_adopt_ref_)
+        << "This RefCounted object is created with non-zero reference count."
+        << " The first reference to such a object has to be made by AdoptRef or"
+        << " MakeRefCounted.";
+    if (ref_count_ >= 1) {
+      DCHECK(CalledOnValidSequence());
+    }
+#endif
+
+    AddRefImpl();
+  }
 
   // Returns true if the object should self-delete.
-  bool Release() const;
+  bool Release() const {
+    --ref_count_;
+
+    // TODO(maruel): Add back once it doesn't assert 500 times/sec.
+    // Current thread books the critical section "AddRelease"
+    // without release it.
+    // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+
+#if DCHECK_IS_ON()
+    DCHECK(!in_dtor_);
+    if (ref_count_ == 0)
+      in_dtor_ = true;
+
+    if (ref_count_ >= 1)
+      DCHECK(CalledOnValidSequence());
+    if (ref_count_ == 1)
+      sequence_checker_.DetachFromSequence();
+#endif
+
+    return ref_count_ == 0;
+  }
+
+  // Returns true if it is safe to read or write the object, from a thread
+  // safety standpoint. Should be DCHECK'd from the methods of RefCounted
+  // classes if there is a danger of objects being shared across threads.
+  //
+  // This produces fewer false positives than adding a separate SequenceChecker
+  // into the subclass, because it automatically detaches from the sequence when
+  // the reference count is 1 (and never fails if there is only one reference).
+  //
+  // This means unlike a separate SequenceChecker, it will permit a singly
+  // referenced object to be passed between threads (not holding a reference on
+  // the sending thread), but will trap if the sending thread holds onto a
+  // reference, or if the object is accessed from multiple threads
+  // simultaneously.
+  bool IsOnValidSequence() const {
+#if DCHECK_IS_ON()
+    return ref_count_ <= 1 || CalledOnValidSequence();
+#else
+    return true;
+#endif
+  }
 
  private:
-  mutable int ref_count_;
-#ifndef NDEBUG
-  mutable bool in_dtor_;
+  template <typename U>
+  friend scoped_refptr<U> base::AdoptRef(U*);
+
+  FRIEND_TEST_ALL_PREFIXES(RefCountedDeathTest, TestOverflowCheck);
+
+  void Adopted() const {
+#if DCHECK_IS_ON()
+    DCHECK(needs_adopt_ref_);
+    needs_adopt_ref_ = false;
+#endif
+  }
+
+#if defined(ARCH_CPU_64_BITS)
+  void AddRefImpl() const;
+#else
+  void AddRefImpl() const { ++ref_count_; }
+#endif
+
+#if DCHECK_IS_ON()
+  bool CalledOnValidSequence() const;
+#endif
+
+  mutable uint32_t ref_count_ = 0;
+
+#if DCHECK_IS_ON()
+  mutable bool needs_adopt_ref_ = false;
+  mutable bool in_dtor_ = false;
+  mutable SequenceChecker sequence_checker_;
 #endif
 
   DFAKE_MUTEX(add_release_);
@@ -42,20 +149,89 @@
 class BASE_EXPORT RefCountedThreadSafeBase {
  public:
   bool HasOneRef() const;
+  bool HasAtLeastOneRef() const;
 
  protected:
-  RefCountedThreadSafeBase();
+  explicit constexpr RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
+  explicit CONSTEXPR RefCountedThreadSafeBase(StartRefCountFromOneTag)
+      : ref_count_(1) {
+#if DCHECK_IS_ON()
+    needs_adopt_ref_ = true;
+#endif
+  }
+
+#if DCHECK_IS_ON()
   ~RefCountedThreadSafeBase();
+#else
+  ~RefCountedThreadSafeBase() = default;
+#endif
 
-  void AddRef() const;
-
+// Release and AddRef are suitable for inlining on X86 because they generate
+// very small code sequences. On other platforms (ARM), it causes a size
+// regression and is probably not worth it.
+#if defined(ARCH_CPU_X86_FAMILY)
+  // Returns true if the object should self-delete.
+  bool Release() const { return ReleaseImpl(); }
+  void AddRef() const { AddRefImpl(); }
+  void AddRefWithCheck() const { AddRefWithCheckImpl(); }
+#else
   // Returns true if the object should self-delete.
   bool Release() const;
+  void AddRef() const;
+  void AddRefWithCheck() const;
+#endif
 
  private:
-  mutable AtomicRefCount ref_count_;
-#ifndef NDEBUG
-  mutable bool in_dtor_;
+  template <typename U>
+  friend scoped_refptr<U> base::AdoptRef(U*);
+
+  void Adopted() const {
+#if DCHECK_IS_ON()
+    DCHECK(needs_adopt_ref_);
+    needs_adopt_ref_ = false;
+#endif
+  }
+
+  ALWAYS_INLINE void AddRefImpl() const {
+#if DCHECK_IS_ON()
+    DCHECK(!in_dtor_);
+    DCHECK(!needs_adopt_ref_)
+        << "This RefCounted object is created with non-zero reference count."
+        << " The first reference to such a object has to be made by AdoptRef or"
+        << " MakeRefCounted.";
+#endif
+    ref_count_.Increment();
+  }
+
+  ALWAYS_INLINE void AddRefWithCheckImpl() const {
+#if DCHECK_IS_ON()
+    DCHECK(!in_dtor_);
+    DCHECK(!needs_adopt_ref_)
+        << "This RefCounted object is created with non-zero reference count."
+        << " The first reference to such a object has to be made by AdoptRef or"
+        << " MakeRefCounted.";
+#endif
+    CHECK(ref_count_.Increment() > 0);
+  }
+
+  ALWAYS_INLINE bool ReleaseImpl() const {
+#if DCHECK_IS_ON()
+    DCHECK(!in_dtor_);
+    DCHECK(!ref_count_.IsZero());
+#endif
+    if (!ref_count_.Decrement()) {
+#if DCHECK_IS_ON()
+      in_dtor_ = true;
+#endif
+      return true;
+    }
+    return false;
+  }
+
+  mutable AtomicRefCount ref_count_{0};
+#if DCHECK_IS_ON()
+  mutable bool needs_adopt_ref_ = false;
+  mutable bool in_dtor_ = false;
 #endif
 
   DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
@@ -63,9 +239,30 @@
 
 }  // namespace subtle
 
+// ScopedAllowCrossThreadRefCountAccess disables the check documented on
+// RefCounted below for rare pre-existing use cases where thread-safety was
+// guaranteed through other means (e.g. explicit sequencing of calls across
+// execution sequences when bouncing between threads in order). New callers
+// should refrain from using this (callsites handling thread-safety through
+// locks should use RefCountedThreadSafe per the overhead of its atomics being
+// negligible compared to locks anyways and callsites doing explicit sequencing
+// should properly std::move() the ref to avoid hitting this check).
+// TODO(tzik): Cleanup existing use cases and remove
+// ScopedAllowCrossThreadRefCountAccess.
+class BASE_EXPORT ScopedAllowCrossThreadRefCountAccess final {
+ public:
+#if DCHECK_IS_ON()
+  ScopedAllowCrossThreadRefCountAccess();
+  ~ScopedAllowCrossThreadRefCountAccess();
+#else
+  ScopedAllowCrossThreadRefCountAccess() {}
+  ~ScopedAllowCrossThreadRefCountAccess() {}
+#endif
+};
+
 //
 // A base class for reference counted classes.  Otherwise, known as a cheap
-// knock-off of WebKit's RefCounted<T> class.  To use this guy just extend your
+// knock-off of WebKit's RefCounted<T> class.  To use this, just extend your
 // class from it like so:
 //
 //   class MyFoo : public base::RefCounted<MyFoo> {
@@ -75,12 +272,59 @@
 //     ~MyFoo();
 //   };
 //
-// You should always make your destructor private, to avoid any code deleting
+// You should always make your destructor non-public, to avoid any code deleting
 // the object accidently while there are references to it.
-template <class T>
+//
+//
+// The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs
+// to trap unsafe cross thread usage. A subclass instance of RefCounted can be
+// passed to another execution sequence only when its ref count is 1. If the ref
+// count is more than 1, the RefCounted class verifies the ref updates are made
+// on the same execution sequence as the previous ones. The subclass can also
+// manually call IsOnValidSequence to trap other non-thread-safe accesses; see
+// the documentation for that method.
+//
+//
+// The reference count starts from zero by default, and we intended to migrate
+// to start-from-one ref count. Put REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() to
+// the ref counted class to opt-in.
+//
+// If an object has start-from-one ref count, the first scoped_refptr need to be
+// created by base::AdoptRef() or base::MakeRefCounted(). We can use
+// base::MakeRefCounted() to create create both type of ref counted object.
+//
+// The motivations to use start-from-one ref count are:
+//  - Start-from-one ref count doesn't need the ref count increment for the
+//    first reference.
+//  - It can detect an invalid object acquisition for a being-deleted object
+//    that has zero ref count. That tends to happen on custom deleter that
+//    delays the deletion.
+//    TODO(tzik): Implement invalid acquisition detection.
+//  - Behavior parity to Blink's WTF::RefCounted, whose count starts from one.
+//    And start-from-one ref count is a step to merge WTF::RefCounted into
+//    base::RefCounted.
+//
+#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE()             \
+  static constexpr ::base::subtle::StartRefCountFromOneTag \
+      kRefCountPreference = ::base::subtle::kStartRefCountFromOneTag
+
+template <class T, typename Traits>
+class RefCounted;
+
+template <typename T>
+struct DefaultRefCountedTraits {
+  static void Destruct(const T* x) {
+    RefCounted<T, DefaultRefCountedTraits>::DeleteInternal(x);
+  }
+};
+
+template <class T, typename Traits = DefaultRefCountedTraits<T>>
 class RefCounted : public subtle::RefCountedBase {
  public:
-  RefCounted() {}
+  static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+      subtle::kStartRefCountFromZeroTag;
+
+  RefCounted() : subtle::RefCountedBase(T::kRefCountPreference) {}
 
   void AddRef() const {
     subtle::RefCountedBase::AddRef();
@@ -88,15 +332,26 @@
 
   void Release() const {
     if (subtle::RefCountedBase::Release()) {
-      delete static_cast<const T*>(this);
+      // Prune the code paths which the static analyzer may take to simulate
+      // object destruction. Use-after-free errors aren't possible given the
+      // lifetime guarantees of the refcounting system.
+      ANALYZER_SKIP_THIS_PATH();
+
+      Traits::Destruct(static_cast<const T*>(this));
     }
   }
 
  protected:
-  ~RefCounted() {}
+  ~RefCounted() = default;
 
  private:
-  DISALLOW_COPY_AND_ASSIGN(RefCounted<T>);
+  friend struct DefaultRefCountedTraits<T>;
+  template <typename U>
+  static void DeleteInternal(const U* x) {
+    delete x;
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(RefCounted);
 };
 
 // Forward declaration.
@@ -127,27 +382,44 @@
 //    private:
 //     friend class base::RefCountedThreadSafe<MyFoo>;
 //     ~MyFoo();
+//
+// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
+// too. See the comment above the RefCounted definition for details.
 template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
 class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
  public:
-  RefCountedThreadSafe() {}
+  static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+      subtle::kStartRefCountFromZeroTag;
 
-  void AddRef() const {
-    subtle::RefCountedThreadSafeBase::AddRef();
-  }
+  explicit RefCountedThreadSafe()
+      : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
+
+  void AddRef() const { AddRefImpl(T::kRefCountPreference); }
 
   void Release() const {
     if (subtle::RefCountedThreadSafeBase::Release()) {
+      ANALYZER_SKIP_THIS_PATH();
       Traits::Destruct(static_cast<const T*>(this));
     }
   }
 
  protected:
-  ~RefCountedThreadSafe() {}
+  ~RefCountedThreadSafe() = default;
 
  private:
   friend struct DefaultRefCountedThreadSafeTraits<T>;
-  static void DeleteInternal(const T* x) { delete x; }
+  template <typename U>
+  static void DeleteInternal(const U* x) {
+    delete x;
+  }
+
+  void AddRefImpl(subtle::StartRefCountFromZeroTag) const {
+    subtle::RefCountedThreadSafeBase::AddRef();
+  }
+
+  void AddRefImpl(subtle::StartRefCountFromOneTag) const {
+    subtle::RefCountedThreadSafeBase::AddRefWithCheck();
+  }
 
   DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafe);
 };
@@ -162,217 +434,15 @@
  public:
   RefCountedData() : data() {}
   RefCountedData(const T& in_value) : data(in_value) {}
+  RefCountedData(T&& in_value) : data(std::move(in_value)) {}
 
   T data;
 
  private:
   friend class base::RefCountedThreadSafe<base::RefCountedData<T> >;
-  ~RefCountedData() {}
+  ~RefCountedData() = default;
 };
 
 }  // namespace base
 
-//
-// A smart pointer class for reference counted objects.  Use this class instead
-// of calling AddRef and Release manually on a reference counted object to
-// avoid common memory leaks caused by forgetting to Release an object
-// reference.  Sample usage:
-//
-//   class MyFoo : public RefCounted<MyFoo> {
-//    ...
-//   };
-//
-//   void some_function() {
-//     scoped_refptr<MyFoo> foo = new MyFoo();
-//     foo->Method(param);
-//     // |foo| is released when this function returns
-//   }
-//
-//   void some_other_function() {
-//     scoped_refptr<MyFoo> foo = new MyFoo();
-//     ...
-//     foo = NULL;  // explicitly releases |foo|
-//     ...
-//     if (foo)
-//       foo->Method(param);
-//   }
-//
-// The above examples show how scoped_refptr<T> acts like a pointer to T.
-// Given two scoped_refptr<T> classes, it is also possible to exchange
-// references between the two objects, like so:
-//
-//   {
-//     scoped_refptr<MyFoo> a = new MyFoo();
-//     scoped_refptr<MyFoo> b;
-//
-//     b.swap(a);
-//     // now, |b| references the MyFoo object, and |a| references NULL.
-//   }
-//
-// To make both |a| and |b| in the above example reference the same MyFoo
-// object, simply use the assignment operator:
-//
-//   {
-//     scoped_refptr<MyFoo> a = new MyFoo();
-//     scoped_refptr<MyFoo> b;
-//
-//     b = a;
-//     // now, |a| and |b| each own a reference to the same MyFoo object.
-//   }
-//
-template <class T>
-class scoped_refptr {
- public:
-  typedef T element_type;
-
-  scoped_refptr() : ptr_(NULL) {
-  }
-
-  scoped_refptr(T* p) : ptr_(p) {
-    if (ptr_)
-      ptr_->AddRef();
-  }
-
-  scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
-    if (ptr_)
-      ptr_->AddRef();
-  }
-
-  template <typename U>
-  scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
-    if (ptr_)
-      ptr_->AddRef();
-  }
-
-  ~scoped_refptr() {
-    if (ptr_)
-      ptr_->Release();
-  }
-
-  T* get() const { return ptr_; }
-  operator T*() const { return ptr_; }
-  T* operator->() const {
-    DCHECK(ptr_);
-    return ptr_;
-  }
-  // The compiler requires an explicit * operator here.
-#if defined(__LB_PS3__)
-  T& operator*() const {
-    DCHECK(ptr_);
-    return *ptr_;
-  }
-#endif
-
-  scoped_refptr<T>& operator=(T* p) {
-    // AddRef first so that self assignment should work
-    if (p)
-      p->AddRef();
-    T* old_ptr = ptr_;
-    ptr_ = p;
-    if (old_ptr)
-      old_ptr->Release();
-    return *this;
-  }
-
-  scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
-    return *this = r.ptr_;
-  }
-
-  template <typename U>
-  scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
-    return *this = r.get();
-  }
-
-  void swap(T** pp) {
-    T* p = ptr_;
-    ptr_ = *pp;
-    *pp = p;
-  }
-
-  void swap(scoped_refptr<T>& r) {
-    swap(&r.ptr_);
-  }
-
- protected:
-  T* ptr_;
-};
-
-// Handy utility for creating a scoped_refptr<T> out of a T* explicitly without
-// having to retype all the template arguments
-template <typename T>
-scoped_refptr<T> make_scoped_refptr(T* t) {
-  return scoped_refptr<T>(t);
-}
-
-// Make scoped_reftr usable as key in base::hash_map.
-
-//
-// GCC-flavored hash functor.
-//
-#if defined(COMPILER_GCC) && defined(__LB_LINUX__)
-
-namespace __gnu_cxx {
-
-// Forward declaration in case <hash_fun.h> is not #include'd.
-template <typename Key>
-struct hash;
-
-template <typename T>
-struct hash<scoped_refptr<T> > {
-  size_t operator()(const scoped_refptr<T>& key) const {
-    return base_hash(key.get());
-  }
-
-  hash<T*> base_hash;
-};
-
-}  // namespace __gnu_cxx
-
-//
-// Dinkumware-flavored hash functor.
-//
-#else
-
-#if defined(COMPILER_MSVC)
-namespace stdext {
-#else
-namespace std {
-#endif
-
-// Forward declaration in case <xhash> is not #include'd.
-template <typename Key, typename Predicate>
-class hash_compare;
-
-template <typename T, typename Predicate>
-class hash_compare<scoped_refptr<T>, Predicate> {
- public:
-  typedef hash_compare<T*, Predicate> BaseHashCompare;
-
-  enum {
-    bucket_size = BaseHashCompare::bucket_size,
-#if !defined(COMPILER_MSVC)
-    min_buckets = BaseHashCompare::min_buckets,
-#endif
-  };
-
-  hash_compare() {}
-  hash_compare(Predicate predicate) : base_hash_compare_(predicate) {}
-
-  size_t operator()(const scoped_refptr<T>& key) const {
-    return base_hash_compare_(key.get());
-  }
-
-  bool operator()(const scoped_refptr<T>& lhs,
-                  const scoped_refptr<T>& rhs) const {
-    return base_hash_compare_(lhs.get(), rhs.get());
-  }
-
- private:
-  BaseHashCompare base_hash_compare_;
-};
-
-}  // namespace std[ext]
-
-#endif
-
 #endif  // BASE_MEMORY_REF_COUNTED_H_
diff --git a/src/base/memory/ref_counted_delete_on_sequence.h b/src/base/memory/ref_counted_delete_on_sequence.h
new file mode 100644
index 0000000..4a8ac74
--- /dev/null
+++ b/src/base/memory/ref_counted_delete_on_sequence.h
@@ -0,0 +1,90 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
+#define BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
+
+#include <utility>
+
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+
+// RefCountedDeleteOnSequence is similar to RefCountedThreadSafe, and ensures
+// that the object will be deleted on a specified sequence.
+//
+// Sample usage:
+// class Foo : public RefCountedDeleteOnSequence<Foo> {
+//
+//   Foo(scoped_refptr<SequencedTaskRunner> task_runner)
+//       : RefCountedDeleteOnSequence<Foo>(std::move(task_runner)) {}
+//   ...
+//  private:
+//   friend class RefCountedDeleteOnSequence<Foo>;
+//   friend class DeleteHelper<Foo>;
+//
+//   ~Foo();
+// };
+template <class T>
+class RefCountedDeleteOnSequence : public subtle::RefCountedThreadSafeBase {
+ public:
+  static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
+      subtle::kStartRefCountFromZeroTag;
+
+  // A SequencedTaskRunner for the current sequence can be acquired by calling
+  // SequencedTaskRunnerHandle::Get().
+  RefCountedDeleteOnSequence(
+      scoped_refptr<SequencedTaskRunner> owning_task_runner)
+      : subtle::RefCountedThreadSafeBase(T::kRefCountPreference),
+        owning_task_runner_(std::move(owning_task_runner)) {
+    DCHECK(owning_task_runner_);
+  }
+
+  void AddRef() const { AddRefImpl(T::kRefCountPreference); }
+
+  void Release() const {
+    if (subtle::RefCountedThreadSafeBase::Release())
+      DestructOnSequence();
+  }
+
+ protected:
+  friend class DeleteHelper<RefCountedDeleteOnSequence>;
+  ~RefCountedDeleteOnSequence() = default;
+
+  SequencedTaskRunner* owning_task_runner() {
+    return owning_task_runner_.get();
+  }
+  const SequencedTaskRunner* owning_task_runner() const {
+    return owning_task_runner_.get();
+  }
+
+ private:
+  void DestructOnSequence() const {
+    const T* t = static_cast<const T*>(this);
+    if (owning_task_runner_->RunsTasksInCurrentSequence())
+      delete t;
+    else
+      owning_task_runner_->DeleteSoon(FROM_HERE, t);
+  }
+
+  void AddRefImpl(subtle::StartRefCountFromZeroTag) const {
+    subtle::RefCountedThreadSafeBase::AddRef();
+  }
+
+  void AddRefImpl(subtle::StartRefCountFromOneTag) const {
+    subtle::RefCountedThreadSafeBase::AddRefWithCheck();
+  }
+
+  const scoped_refptr<SequencedTaskRunner> owning_task_runner_;
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedDeleteOnSequence);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
diff --git a/src/base/memory/ref_counted_memory.cc b/src/base/memory/ref_counted_memory.cc
index b048a6e..9239ffc 100644
--- a/src/base/memory/ref_counted_memory.cc
+++ b/src/base/memory/ref_counted_memory.cc
@@ -4,20 +4,23 @@
 
 #include "base/memory/ref_counted_memory.h"
 
+#include <utility>
+
 #include "base/logging.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "starboard/memory.h"
 
 namespace base {
 
 bool RefCountedMemory::Equals(
     const scoped_refptr<RefCountedMemory>& other) const {
-  return other.get() &&
-         size() == other->size() &&
-         (memcmp(front(), other->front(), size()) == 0);
+  return other.get() && size() == other->size() &&
+         (SbMemoryCompare(front(), other->front(), size()) == 0);
 }
 
-RefCountedMemory::RefCountedMemory() {}
+RefCountedMemory::RefCountedMemory() = default;
 
-RefCountedMemory::~RefCountedMemory() {}
+RefCountedMemory::~RefCountedMemory() = default;
 
 const unsigned char* RefCountedStaticMemory::front() const {
   return data_;
@@ -27,17 +30,22 @@
   return length_;
 }
 
-RefCountedStaticMemory::~RefCountedStaticMemory() {}
+RefCountedStaticMemory::~RefCountedStaticMemory() = default;
 
-RefCountedBytes::RefCountedBytes() {}
+RefCountedBytes::RefCountedBytes() = default;
 
 RefCountedBytes::RefCountedBytes(const std::vector<unsigned char>& initializer)
     : data_(initializer) {
 }
 
-RefCountedBytes* RefCountedBytes::TakeVector(
+RefCountedBytes::RefCountedBytes(const unsigned char* p, size_t size)
+    : data_(p, p + size) {}
+
+RefCountedBytes::RefCountedBytes(size_t size) : data_(size, 0) {}
+
+scoped_refptr<RefCountedBytes> RefCountedBytes::TakeVector(
     std::vector<unsigned char>* to_destroy) {
-  RefCountedBytes* bytes = new RefCountedBytes;
+  auto bytes = MakeRefCounted<RefCountedBytes>();
   bytes->data_.swap(*to_destroy);
   return bytes;
 }
@@ -45,33 +53,83 @@
 const unsigned char* RefCountedBytes::front() const {
   // STL will assert if we do front() on an empty vector, but calling code
   // expects a NULL.
-  return size() ? &data_.front() : NULL;
+  return size() ? &data_.front() : nullptr;
 }
 
 size_t RefCountedBytes::size() const {
   return data_.size();
 }
 
-RefCountedBytes::~RefCountedBytes() {}
+RefCountedBytes::~RefCountedBytes() = default;
 
-RefCountedString::RefCountedString() {}
+RefCountedString::RefCountedString() = default;
 
-RefCountedString::~RefCountedString() {}
+RefCountedString::~RefCountedString() = default;
 
 // static
-RefCountedString* RefCountedString::TakeString(std::string* to_destroy) {
-  RefCountedString* self = new RefCountedString;
+scoped_refptr<RefCountedString> RefCountedString::TakeString(
+    std::string* to_destroy) {
+  auto self = MakeRefCounted<RefCountedString>();
   to_destroy->swap(self->data_);
   return self;
 }
 
 const unsigned char* RefCountedString::front() const {
-  return data_.empty() ? NULL :
-         reinterpret_cast<const unsigned char*>(data_.data());
+  return data_.empty() ? nullptr
+                       : reinterpret_cast<const unsigned char*>(data_.data());
 }
 
 size_t RefCountedString::size() const {
   return data_.size();
 }
 
+// Cobalt does not support multiple process and shared memory.
+#if !defined(STARBOARD)
+RefCountedSharedMemory::RefCountedSharedMemory(
+    std::unique_ptr<SharedMemory> shm,
+    size_t size)
+    : shm_(std::move(shm)), size_(size) {
+  DCHECK(shm_);
+  DCHECK(shm_->memory());
+  DCHECK_GT(size_, 0U);
+  DCHECK_LE(size_, shm_->mapped_size());
+}
+
+RefCountedSharedMemory::~RefCountedSharedMemory() = default;
+
+const unsigned char* RefCountedSharedMemory::front() const {
+  return static_cast<const unsigned char*>(shm_->memory());
+}
+
+size_t RefCountedSharedMemory::size() const {
+  return size_;
+}
+
+RefCountedSharedMemoryMapping::RefCountedSharedMemoryMapping(
+    ReadOnlySharedMemoryMapping mapping)
+    : mapping_(std::move(mapping)), size_(mapping_.size()) {
+  DCHECK_GT(size_, 0U);
+}
+
+RefCountedSharedMemoryMapping::~RefCountedSharedMemoryMapping() = default;
+
+const unsigned char* RefCountedSharedMemoryMapping::front() const {
+  return static_cast<const unsigned char*>(mapping_.memory());
+}
+
+size_t RefCountedSharedMemoryMapping::size() const {
+  return size_;
+}
+
+// static
+scoped_refptr<RefCountedSharedMemoryMapping>
+RefCountedSharedMemoryMapping::CreateFromWholeRegion(
+    const ReadOnlySharedMemoryRegion& region) {
+  ReadOnlySharedMemoryMapping mapping = region.Map();
+  if (!mapping.IsValid())
+    return nullptr;
+  return MakeRefCounted<RefCountedSharedMemoryMapping>(std::move(mapping));
+}
+#endif  // !defined(STARBOARD)
+
 }  //  namespace base
diff --git a/src/base/memory/ref_counted_memory.h b/src/base/memory/ref_counted_memory.h
index b3b79a7..dc09d0c 100644
--- a/src/base/memory/ref_counted_memory.h
+++ b/src/base/memory/ref_counted_memory.h
@@ -5,20 +5,29 @@
 #ifndef BASE_MEMORY_REF_COUNTED_MEMORY_H_
 #define BASE_MEMORY_REF_COUNTED_MEMORY_H_
 
+#include <memory>
 #include <string>
 #include <vector>
 
 #include "base/base_export.h"
-#include "base/compiler_specific.h"
+#include "base/macros.h"
 #include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "starboard/types.h"
 
 namespace base {
 
-// A generic interface to memory. This object is reference counted because one
-// of its two subclasses own the data they carry, and we need to have
-// heterogeneous containers of these two types of memory.
+// Cobalt does not support multiple process and shared memory.
+#if !defined(STARBOARD)
+class ReadOnlySharedMemoryRegion;
+#endif
+
+// A generic interface to memory. This object is reference counted because most
+// of its subclasses own the data they carry, and this interface needs to
+// support heterogeneous containers of these different types of memory.
 class BASE_EXPORT RefCountedMemory
-    : public base::RefCountedThreadSafe<RefCountedMemory> {
+    : public RefCountedThreadSafe<RefCountedMemory> {
  public:
   // Retrieves a pointer to the beginning of the data we point to. If the data
   // is empty, this will return NULL.
@@ -30,8 +39,13 @@
   // Returns true if |other| is byte for byte equal.
   bool Equals(const scoped_refptr<RefCountedMemory>& other) const;
 
+  // Handy method to simplify calling front() with a reinterpret_cast.
+  template<typename T> const T* front_as() const {
+    return reinterpret_cast<const T*>(front());
+  }
+
  protected:
-  friend class base::RefCountedThreadSafe<RefCountedMemory>;
+  friend class RefCountedThreadSafe<RefCountedMemory>;
   RefCountedMemory();
   virtual ~RefCountedMemory();
 };
@@ -40,17 +54,17 @@
 // matter.
 class BASE_EXPORT RefCountedStaticMemory : public RefCountedMemory {
  public:
-  RefCountedStaticMemory()
-      : data_(NULL), length_(0) {}
-  RefCountedStaticMemory(const unsigned char* data, size_t length)
-      : data_(length ? data : NULL), length_(length) {}
+  RefCountedStaticMemory() : data_(nullptr), length_(0) {}
+  RefCountedStaticMemory(const void* data, size_t length)
+      : data_(static_cast<const unsigned char*>(length ? data : nullptr)),
+        length_(length) {}
 
-  // Overridden from RefCountedMemory:
-  virtual const unsigned char* front() const override;
-  virtual size_t size() const override;
+  // RefCountedMemory:
+  const unsigned char* front() const override;
+  size_t size() const override;
 
  private:
-  virtual ~RefCountedStaticMemory();
+  ~RefCountedStaticMemory() override;
 
   const unsigned char* data_;
   size_t length_;
@@ -58,36 +72,52 @@
   DISALLOW_COPY_AND_ASSIGN(RefCountedStaticMemory);
 };
 
-// An implementation of RefCountedMemory, where we own our the data in a
+// An implementation of RefCountedMemory, where the data is stored in a STL
 // vector.
 class BASE_EXPORT RefCountedBytes : public RefCountedMemory {
  public:
   RefCountedBytes();
 
-  // Constructs a RefCountedBytes object by _copying_ from |initializer|.
-  RefCountedBytes(const std::vector<unsigned char>& initializer);
+  // Constructs a RefCountedBytes object by copying from |initializer|.
+  explicit RefCountedBytes(const std::vector<unsigned char>& initializer);
+
+  // Constructs a RefCountedBytes object by copying |size| bytes from |p|.
+  RefCountedBytes(const unsigned char* p, size_t size);
+
+  // Constructs a RefCountedBytes object by zero-initializing a new vector of
+  // |size| bytes.
+  explicit RefCountedBytes(size_t size);
 
   // Constructs a RefCountedBytes object by performing a swap. (To non
   // destructively build a RefCountedBytes, use the constructor that takes a
   // vector.)
-  static RefCountedBytes* TakeVector(std::vector<unsigned char>* to_destroy);
+  static scoped_refptr<RefCountedBytes> TakeVector(
+      std::vector<unsigned char>* to_destroy);
 
-  // Overridden from RefCountedMemory:
-  virtual const unsigned char* front() const override;
-  virtual size_t size() const override;
+  // RefCountedMemory:
+  const unsigned char* front() const override;
+  size_t size() const override;
 
   const std::vector<unsigned char>& data() const { return data_; }
   std::vector<unsigned char>& data() { return data_; }
 
+  // Non-const versions of front() and front_as() that are simply shorthand for
+  // data().data().
+  unsigned char* front() { return data_.data(); }
+  template <typename T>
+  T* front_as() {
+    return reinterpret_cast<T*>(front());
+  }
+
  private:
-  virtual ~RefCountedBytes();
+  ~RefCountedBytes() override;
 
   std::vector<unsigned char> data_;
 
   DISALLOW_COPY_AND_ASSIGN(RefCountedBytes);
 };
 
-// An implementation of RefCountedMemory, where the bytes are stored in an STL
+// An implementation of RefCountedMemory, where the bytes are stored in a STL
 // string. Use this if your data naturally arrives in that format.
 class BASE_EXPORT RefCountedString : public RefCountedMemory {
  public:
@@ -96,23 +126,76 @@
   // Constructs a RefCountedString object by performing a swap. (To non
   // destructively build a RefCountedString, use the default constructor and
   // copy into object->data()).
-  static RefCountedString* TakeString(std::string* to_destroy);
+  static scoped_refptr<RefCountedString> TakeString(std::string* to_destroy);
 
-  // Overridden from RefCountedMemory:
-  virtual const unsigned char* front() const override;
-  virtual size_t size() const override;
+  // RefCountedMemory:
+  const unsigned char* front() const override;
+  size_t size() const override;
 
   const std::string& data() const { return data_; }
   std::string& data() { return data_; }
 
  private:
-  virtual ~RefCountedString();
+  ~RefCountedString() override;
 
   std::string data_;
 
   DISALLOW_COPY_AND_ASSIGN(RefCountedString);
 };
 
+// Starboard doesn't curretly support multiple processes or shared memory.
+#if !defined(STARBOARD)
+// An implementation of RefCountedMemory, where the bytes are stored in
+// SharedMemory.
+class BASE_EXPORT RefCountedSharedMemory : public RefCountedMemory {
+ public:
+  // Constructs a RefCountedMemory object by taking ownership of an already
+  // mapped SharedMemory object.
+  RefCountedSharedMemory(std::unique_ptr<SharedMemory> shm, size_t size);
+
+  // RefCountedMemory:
+  const unsigned char* front() const override;
+  size_t size() const override;
+
+ private:
+  ~RefCountedSharedMemory() override;
+
+  const std::unique_ptr<SharedMemory> shm_;
+  const size_t size_;
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedSharedMemory);
+};
+
+// Cobalt does not support multiple process and shared memory.
+#if !defined(STARBOARD)
+// An implementation of RefCountedMemory, where the bytes are stored in
+// ReadOnlySharedMemoryMapping.
+class BASE_EXPORT RefCountedSharedMemoryMapping : public RefCountedMemory {
+ public:
+  // Constructs a RefCountedMemory object by taking ownership of an already
+  // mapped ReadOnlySharedMemoryMapping object.
+  explicit RefCountedSharedMemoryMapping(ReadOnlySharedMemoryMapping mapping);
+
+  // Convenience method to map all of |region| and take ownership of the
+  // mapping. Returns an empty scoped_refptr if the map operation fails.
+  static scoped_refptr<RefCountedSharedMemoryMapping> CreateFromWholeRegion(
+      const ReadOnlySharedMemoryRegion& region);
+
+  // RefCountedMemory:
+  const unsigned char* front() const override;
+  size_t size() const override;
+
+ private:
+  ~RefCountedSharedMemoryMapping() override;
+
+  const ReadOnlySharedMemoryMapping mapping_;
+  const size_t size_;
+
+  DISALLOW_COPY_AND_ASSIGN(RefCountedSharedMemoryMapping);
+};
+#endif  // !defined(STARBOARD)
+#endif
+
 }  // namespace base
 
 #endif  // BASE_MEMORY_REF_COUNTED_MEMORY_H_
diff --git a/src/base/memory/ref_counted_memory_unittest.cc b/src/base/memory/ref_counted_memory_unittest.cc
index c6f2b9c..3f4eb09 100644
--- a/src/base/memory/ref_counted_memory_unittest.cc
+++ b/src/base/memory/ref_counted_memory_unittest.cc
@@ -4,31 +4,61 @@
 
 #include "base/memory/ref_counted_memory.h"
 
+#include <utility>
+
+#include "base/memory/read_only_shared_memory_region.h"
+#include "starboard/memory.h"
+#include "starboard/types.h"
+#include "testing/gmock/include/gmock/gmock.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
+using testing::Each;
+using testing::ElementsAre;
+
 namespace base {
 
 TEST(RefCountedMemoryUnitTest, RefCountedStaticMemory) {
-  scoped_refptr<RefCountedMemory> mem = new RefCountedStaticMemory(
-      reinterpret_cast<const uint8*>("static mem00"), 10);
+  auto mem = MakeRefCounted<RefCountedStaticMemory>("static mem00", 10);
 
   EXPECT_EQ(10U, mem->size());
-  EXPECT_EQ("static mem",
-            std::string(reinterpret_cast<const char*>(mem->front()),
-                        mem->size()));
+  EXPECT_EQ("static mem", std::string(mem->front_as<char>(), mem->size()));
 }
 
 TEST(RefCountedMemoryUnitTest, RefCountedBytes) {
-  std::vector<uint8> data;
+  std::vector<uint8_t> data;
   data.push_back(45);
   data.push_back(99);
   scoped_refptr<RefCountedMemory> mem = RefCountedBytes::TakeVector(&data);
 
   EXPECT_EQ(0U, data.size());
 
-  EXPECT_EQ(2U, mem->size());
+  ASSERT_EQ(2U, mem->size());
   EXPECT_EQ(45U, mem->front()[0]);
   EXPECT_EQ(99U, mem->front()[1]);
+
+  scoped_refptr<RefCountedMemory> mem2;
+  {
+    const unsigned char kData[] = {12, 11, 99};
+    mem2 = MakeRefCounted<RefCountedBytes>(kData, arraysize(kData));
+  }
+  ASSERT_EQ(3U, mem2->size());
+  EXPECT_EQ(12U, mem2->front()[0]);
+  EXPECT_EQ(11U, mem2->front()[1]);
+  EXPECT_EQ(99U, mem2->front()[2]);
+}
+
+TEST(RefCountedMemoryUnitTest, RefCountedBytesMutable) {
+  auto mem = base::MakeRefCounted<RefCountedBytes>(10);
+
+  ASSERT_EQ(10U, mem->size());
+  EXPECT_THAT(mem->data(), Each(0U));
+
+  // Test non-const versions of data(), front() and front_as<>().
+  mem->data()[0] = 1;
+  mem->front()[1] = 2;
+  mem->front_as<char>()[2] = 3;
+
+  EXPECT_THAT(mem->data(), ElementsAre(1, 2, 3, 0, 0, 0, 0, 0, 0, 0));
 }
 
 TEST(RefCountedMemoryUnitTest, RefCountedString) {
@@ -37,20 +67,66 @@
 
   EXPECT_EQ(0U, s.size());
 
-  EXPECT_EQ(10U, mem->size());
+  ASSERT_EQ(10U, mem->size());
   EXPECT_EQ('d', mem->front()[0]);
   EXPECT_EQ('e', mem->front()[1]);
+  EXPECT_EQ('e', mem->front()[9]);
 }
 
+#if !defined(STARBOARD)
+TEST(RefCountedMemoryUnitTest, RefCountedSharedMemory) {
+  static const char kData[] = "shm_dummy_data";
+  auto shm = std::make_unique<SharedMemory>();
+  ASSERT_TRUE(shm->CreateAndMapAnonymous(sizeof(kData)));
+  SbMemoryCopy(shm->memory(), kData, sizeof(kData));
+
+  auto mem =
+      MakeRefCounted<RefCountedSharedMemory>(std::move(shm), sizeof(kData));
+  ASSERT_EQ(sizeof(kData), mem->size());
+  EXPECT_EQ('s', mem->front()[0]);
+  EXPECT_EQ('h', mem->front()[1]);
+  EXPECT_EQ('_', mem->front()[9]);
+}
+
+TEST(RefCountedMemoryUnitTest, RefCountedSharedMemoryMapping) {
+  static const char kData[] = "mem_region_dummy_data";
+  scoped_refptr<RefCountedSharedMemoryMapping> mem;
+  {
+    MappedReadOnlyRegion region =
+        ReadOnlySharedMemoryRegion::Create(sizeof(kData));
+    ReadOnlySharedMemoryMapping ro_mapping = region.region.Map();
+    WritableSharedMemoryMapping rw_mapping = std::move(region.mapping);
+    ASSERT_TRUE(rw_mapping.IsValid());
+    SbMemoryCopy(rw_mapping.memory(), kData, sizeof(kData));
+    mem = MakeRefCounted<RefCountedSharedMemoryMapping>(std::move(ro_mapping));
+  }
+
+  ASSERT_LE(sizeof(kData), mem->size());
+  EXPECT_EQ('e', mem->front()[1]);
+  EXPECT_EQ('m', mem->front()[2]);
+  EXPECT_EQ('o', mem->front()[8]);
+
+  {
+    MappedReadOnlyRegion region =
+        ReadOnlySharedMemoryRegion::Create(sizeof(kData));
+    WritableSharedMemoryMapping rw_mapping = std::move(region.mapping);
+    ASSERT_TRUE(rw_mapping.IsValid());
+    SbMemoryCopy(rw_mapping.memory(), kData, sizeof(kData));
+    mem = RefCountedSharedMemoryMapping::CreateFromWholeRegion(region.region);
+  }
+
+  ASSERT_LE(sizeof(kData), mem->size());
+  EXPECT_EQ('_', mem->front()[3]);
+  EXPECT_EQ('r', mem->front()[4]);
+  EXPECT_EQ('i', mem->front()[7]);
+}
+#endif
+
 TEST(RefCountedMemoryUnitTest, Equals) {
   std::string s1("same");
   scoped_refptr<RefCountedMemory> mem1 = RefCountedString::TakeString(&s1);
 
-  std::vector<unsigned char> d2;
-  d2.push_back('s');
-  d2.push_back('a');
-  d2.push_back('m');
-  d2.push_back('e');
+  std::vector<unsigned char> d2 = {'s', 'a', 'm', 'e'};
   scoped_refptr<RefCountedMemory> mem2 = RefCountedBytes::TakeVector(&d2);
 
   EXPECT_TRUE(mem1->Equals(mem2));
@@ -65,7 +141,7 @@
 TEST(RefCountedMemoryUnitTest, EqualsNull) {
   std::string s("str");
   scoped_refptr<RefCountedMemory> mem = RefCountedString::TakeString(&s);
-  EXPECT_FALSE(mem->Equals(NULL));
+  EXPECT_FALSE(mem->Equals(nullptr));
 }
 
 }  //  namespace base
diff --git a/src/base/memory/ref_counted_unittest.cc b/src/base/memory/ref_counted_unittest.cc
index 8ddd5be..ff52d18 100644
--- a/src/base/memory/ref_counted_unittest.cc
+++ b/src/base/memory/ref_counted_unittest.cc
@@ -3,14 +3,32 @@
 // found in the LICENSE file.
 
 #include "base/memory/ref_counted.h"
+
+#include <type_traits>
+#include <utility>
+
+#include "base/test/gtest_util.h"
+#include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
+namespace base {
+namespace subtle {
 namespace {
 
 class SelfAssign : public base::RefCounted<SelfAssign> {
-  friend class base::RefCounted<SelfAssign>;
+ protected:
+  virtual ~SelfAssign() = default;
 
-  ~SelfAssign() {}
+ private:
+  friend class base::RefCounted<SelfAssign>;
+};
+
+class Derived : public SelfAssign {
+ protected:
+  ~Derived() override = default;
+
+ private:
+  friend class base::RefCounted<Derived>;
 };
 
 class CheckDerivedMemberAccess : public scoped_refptr<SelfAssign> {
@@ -24,31 +42,161 @@
 
 class ScopedRefPtrToSelf : public base::RefCounted<ScopedRefPtrToSelf> {
  public:
-  ScopedRefPtrToSelf()
-      : ALLOW_THIS_IN_INITIALIZER_LIST(self_ptr_(this)) {
-  }
+  ScopedRefPtrToSelf() : self_ptr_(this) {}
 
   static bool was_destroyed() { return was_destroyed_; }
 
-  void SelfDestruct() { self_ptr_ = NULL; }
+  static void reset_was_destroyed() { was_destroyed_ = false; }
+
+  scoped_refptr<ScopedRefPtrToSelf> self_ptr_;
 
  private:
   friend class base::RefCounted<ScopedRefPtrToSelf>;
   ~ScopedRefPtrToSelf() { was_destroyed_ = true; }
 
   static bool was_destroyed_;
-
-  scoped_refptr<ScopedRefPtrToSelf> self_ptr_;
 };
 
 bool ScopedRefPtrToSelf::was_destroyed_ = false;
 
-}  // end namespace
+class ScopedRefPtrCountBase : public base::RefCounted<ScopedRefPtrCountBase> {
+ public:
+  ScopedRefPtrCountBase() { ++constructor_count_; }
+
+  static int constructor_count() { return constructor_count_; }
+
+  static int destructor_count() { return destructor_count_; }
+
+  static void reset_count() {
+    constructor_count_ = 0;
+    destructor_count_ = 0;
+  }
+
+ protected:
+  virtual ~ScopedRefPtrCountBase() { ++destructor_count_; }
+
+ private:
+  friend class base::RefCounted<ScopedRefPtrCountBase>;
+
+  static int constructor_count_;
+  static int destructor_count_;
+};
+
+int ScopedRefPtrCountBase::constructor_count_ = 0;
+int ScopedRefPtrCountBase::destructor_count_ = 0;
+
+class ScopedRefPtrCountDerived : public ScopedRefPtrCountBase {
+ public:
+  ScopedRefPtrCountDerived() { ++constructor_count_; }
+
+  static int constructor_count() { return constructor_count_; }
+
+  static int destructor_count() { return destructor_count_; }
+
+  static void reset_count() {
+    constructor_count_ = 0;
+    destructor_count_ = 0;
+  }
+
+ protected:
+  ~ScopedRefPtrCountDerived() override { ++destructor_count_; }
+
+ private:
+  friend class base::RefCounted<ScopedRefPtrCountDerived>;
+
+  static int constructor_count_;
+  static int destructor_count_;
+};
+
+int ScopedRefPtrCountDerived::constructor_count_ = 0;
+int ScopedRefPtrCountDerived::destructor_count_ = 0;
+
+class Other : public base::RefCounted<Other> {
+ private:
+  friend class base::RefCounted<Other>;
+
+  ~Other() = default;
+};
+
+class HasPrivateDestructorWithDeleter;
+
+struct Deleter {
+  static void Destruct(const HasPrivateDestructorWithDeleter* x);
+};
+
+class HasPrivateDestructorWithDeleter
+    : public base::RefCounted<HasPrivateDestructorWithDeleter, Deleter> {
+ public:
+  HasPrivateDestructorWithDeleter() = default;
+
+ private:
+  friend struct Deleter;
+  ~HasPrivateDestructorWithDeleter() = default;
+};
+
+void Deleter::Destruct(const HasPrivateDestructorWithDeleter* x) {
+  delete x;
+}
+
+scoped_refptr<Other> Overloaded(scoped_refptr<Other> other) {
+  return other;
+}
+
+scoped_refptr<SelfAssign> Overloaded(scoped_refptr<SelfAssign> self_assign) {
+  return self_assign;
+}
+
+class InitialRefCountIsOne : public base::RefCounted<InitialRefCountIsOne> {
+ public:
+  REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
+  InitialRefCountIsOne() = default;
+
+ private:
+  friend class base::RefCounted<InitialRefCountIsOne>;
+  ~InitialRefCountIsOne() = default;
+};
+
+// Checks that the scoped_refptr is null before the reference counted object is
+// destroyed.
+class CheckRefptrNull : public base::RefCounted<CheckRefptrNull> {
+ public:
+  // Set the last scoped_refptr that will have a reference to this object.
+  void set_scoped_refptr(scoped_refptr<CheckRefptrNull>* ptr) { ptr_ = ptr; }
+
+ protected:
+  virtual ~CheckRefptrNull() {
+    EXPECT_NE(ptr_, nullptr);
+    EXPECT_EQ(ptr_->get(), nullptr);
+  }
+
+ private:
+  friend class base::RefCounted<CheckRefptrNull>;
+
+  scoped_refptr<CheckRefptrNull>* ptr_ = nullptr;
+};
+
+class Overflow : public base::RefCounted<Overflow> {
+ public:
+  Overflow() = default;
+
+ private:
+  friend class base::RefCounted<Overflow>;
+  ~Overflow() = default;
+};
+
+}  // namespace
 
 TEST(RefCountedUnitTest, TestSelfAssignment) {
   SelfAssign* p = new SelfAssign;
   scoped_refptr<SelfAssign> var(p);
-  var = var;
+  var = *&var;  // The *& defeats Clang's -Wself-assign warning.
+  EXPECT_EQ(var.get(), p);
+  var = std::move(var);
+  EXPECT_EQ(var.get(), p);
+  var.swap(var);
+  EXPECT_EQ(var.get(), p);
+  swap(var, var);
   EXPECT_EQ(var.get(), p);
 }
 
@@ -56,9 +204,493 @@
   CheckDerivedMemberAccess check;
 }
 
-TEST(RefCountedUnitTest, ScopedRefPtrToSelf) {
+TEST(RefCountedUnitTest, ScopedRefPtrToSelfPointerAssignment) {
+  ScopedRefPtrToSelf::reset_was_destroyed();
+
   ScopedRefPtrToSelf* check = new ScopedRefPtrToSelf();
   EXPECT_FALSE(ScopedRefPtrToSelf::was_destroyed());
-  check->SelfDestruct();
+  check->self_ptr_ = nullptr;
   EXPECT_TRUE(ScopedRefPtrToSelf::was_destroyed());
 }
+
+TEST(RefCountedUnitTest, ScopedRefPtrToSelfMoveAssignment) {
+  ScopedRefPtrToSelf::reset_was_destroyed();
+
+  ScopedRefPtrToSelf* check = new ScopedRefPtrToSelf();
+  EXPECT_FALSE(ScopedRefPtrToSelf::was_destroyed());
+  // Releasing |check->self_ptr_| will delete |check|.
+  // The move assignment operator must assign |check->self_ptr_| first then
+  // release |check->self_ptr_|.
+  check->self_ptr_ = scoped_refptr<ScopedRefPtrToSelf>();
+  EXPECT_TRUE(ScopedRefPtrToSelf::was_destroyed());
+}
+
+TEST(RefCountedUnitTest, BooleanTesting) {
+  scoped_refptr<SelfAssign> ptr_to_an_instance = new SelfAssign;
+  EXPECT_TRUE(ptr_to_an_instance);
+  EXPECT_FALSE(!ptr_to_an_instance);
+
+  if (ptr_to_an_instance) {
+  } else {
+    ADD_FAILURE() << "Pointer to an instance should result in true.";
+  }
+
+  if (!ptr_to_an_instance) {  // check for operator!().
+    ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+  }
+
+  scoped_refptr<SelfAssign> null_ptr;
+  EXPECT_FALSE(null_ptr);
+  EXPECT_TRUE(!null_ptr);
+
+  if (null_ptr) {
+    ADD_FAILURE() << "Null pointer should result in false.";
+  }
+
+  if (!null_ptr) {  // check for operator!().
+  } else {
+    ADD_FAILURE() << "Null pointer should result in !x being true.";
+  }
+}
+
+TEST(RefCountedUnitTest, Equality) {
+  scoped_refptr<SelfAssign> p1(new SelfAssign);
+  scoped_refptr<SelfAssign> p2(new SelfAssign);
+
+  EXPECT_EQ(p1, p1);
+  EXPECT_EQ(p2, p2);
+
+  EXPECT_NE(p1, p2);
+  EXPECT_NE(p2, p1);
+}
+
+TEST(RefCountedUnitTest, NullptrEquality) {
+  scoped_refptr<SelfAssign> ptr_to_an_instance(new SelfAssign);
+  scoped_refptr<SelfAssign> ptr_to_nullptr;
+
+  EXPECT_NE(nullptr, ptr_to_an_instance);
+  EXPECT_NE(ptr_to_an_instance, nullptr);
+  EXPECT_EQ(nullptr, ptr_to_nullptr);
+  EXPECT_EQ(ptr_to_nullptr, nullptr);
+}
+
+TEST(RefCountedUnitTest, ConvertibleEquality) {
+  scoped_refptr<Derived> p1(new Derived);
+  scoped_refptr<SelfAssign> p2;
+
+  EXPECT_NE(p1, p2);
+  EXPECT_NE(p2, p1);
+
+  p2 = p1;
+
+  EXPECT_EQ(p1, p2);
+  EXPECT_EQ(p2, p1);
+}
+
+TEST(RefCountedUnitTest, MoveAssignment1) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2;
+
+      p2 = std::move(p1);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(nullptr, p1.get());
+      EXPECT_EQ(raw, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignment2) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1;
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2(raw);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+      p1 = std::move(p2);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(raw, p1.get());
+      EXPECT_EQ(nullptr, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentSameInstance1) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2(p1);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+      p1 = std::move(p2);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(raw, p1.get());
+      EXPECT_EQ(nullptr, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentSameInstance2) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2(p1);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+      p2 = std::move(p1);
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(nullptr, p1.get());
+      EXPECT_EQ(raw, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentDifferentInstances) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw1 = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw1);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      ScopedRefPtrCountBase *raw2 = new ScopedRefPtrCountBase();
+      scoped_refptr<ScopedRefPtrCountBase> p2(raw2);
+      EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+      p1 = std::move(p2);
+      EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(raw2, p1.get());
+      EXPECT_EQ(nullptr, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(2, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentSelfMove) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase* raw = new ScopedRefPtrCountBase;
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+    scoped_refptr<ScopedRefPtrCountBase>& p1_ref = p1;
+
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    p1 = std::move(p1_ref);
+
+    // |p1| is "valid but unspecified", so don't bother inspecting its
+    // contents, just ensure that we don't crash.
+  }
+
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveAssignmentDerived) {
+  ScopedRefPtrCountBase::reset_count();
+  ScopedRefPtrCountDerived::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw1 = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw1);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountDerived::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+    {
+      ScopedRefPtrCountDerived *raw2 = new ScopedRefPtrCountDerived();
+      scoped_refptr<ScopedRefPtrCountDerived> p2(raw2);
+      EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+      p1 = std::move(p2);
+      EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+      EXPECT_EQ(raw2, p1.get());
+      EXPECT_EQ(nullptr, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(2, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(2, ScopedRefPtrCountBase::destructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveConstructor) {
+  ScopedRefPtrCountBase::reset_count();
+
+  {
+    ScopedRefPtrCountBase *raw = new ScopedRefPtrCountBase();
+    scoped_refptr<ScopedRefPtrCountBase> p1(raw);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2(std::move(p1));
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(nullptr, p1.get());
+      EXPECT_EQ(raw, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+}
+
+TEST(RefCountedUnitTest, MoveConstructorDerived) {
+  ScopedRefPtrCountBase::reset_count();
+  ScopedRefPtrCountDerived::reset_count();
+
+  {
+    ScopedRefPtrCountDerived *raw1 = new ScopedRefPtrCountDerived();
+    scoped_refptr<ScopedRefPtrCountDerived> p1(raw1);
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+    EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+
+    {
+      scoped_refptr<ScopedRefPtrCountBase> p2(std::move(p1));
+      EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountBase::destructor_count());
+      EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+      EXPECT_EQ(0, ScopedRefPtrCountDerived::destructor_count());
+      EXPECT_EQ(nullptr, p1.get());
+      EXPECT_EQ(raw1, p2.get());
+
+      // p2 goes out of scope.
+    }
+    EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+    EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
+
+    // p1 goes out of scope.
+  }
+  EXPECT_EQ(1, ScopedRefPtrCountBase::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountBase::destructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountDerived::constructor_count());
+  EXPECT_EQ(1, ScopedRefPtrCountDerived::destructor_count());
+}
+
+TEST(RefCountedUnitTest, TestOverloadResolutionCopy) {
+  const scoped_refptr<Derived> derived(new Derived);
+  const scoped_refptr<SelfAssign> expected(derived);
+  EXPECT_EQ(expected, Overloaded(derived));
+
+  const scoped_refptr<Other> other(new Other);
+  EXPECT_EQ(other, Overloaded(other));
+}
+
+TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
+  scoped_refptr<Derived> derived(new Derived);
+  const scoped_refptr<SelfAssign> expected(derived);
+  EXPECT_EQ(expected, Overloaded(std::move(derived)));
+
+  scoped_refptr<Other> other(new Other);
+  const scoped_refptr<Other> other2(other);
+  EXPECT_EQ(other2, Overloaded(std::move(other)));
+}
+
+TEST(RefCountedUnitTest, TestMakeRefCounted) {
+  scoped_refptr<Derived> derived = new Derived;
+  EXPECT_TRUE(derived->HasOneRef());
+  derived.reset();
+
+  scoped_refptr<Derived> derived2 = base::MakeRefCounted<Derived>();
+  EXPECT_TRUE(derived2->HasOneRef());
+  derived2.reset();
+}
+
+TEST(RefCountedUnitTest, TestInitialRefCountIsOne) {
+  scoped_refptr<InitialRefCountIsOne> obj =
+      base::MakeRefCounted<InitialRefCountIsOne>();
+  EXPECT_TRUE(obj->HasOneRef());
+  obj.reset();
+
+  scoped_refptr<InitialRefCountIsOne> obj2 =
+      base::AdoptRef(new InitialRefCountIsOne);
+  EXPECT_TRUE(obj2->HasOneRef());
+  obj2.reset();
+
+  scoped_refptr<Other> obj3 = base::MakeRefCounted<Other>();
+  EXPECT_TRUE(obj3->HasOneRef());
+  obj3.reset();
+}
+
+TEST(RefCountedUnitTest, TestPrivateDestructorWithDeleter) {
+  // Ensure that RefCounted doesn't need the access to the pointee dtor when
+  // a custom deleter is given.
+  scoped_refptr<HasPrivateDestructorWithDeleter> obj =
+      base::MakeRefCounted<HasPrivateDestructorWithDeleter>();
+}
+
+TEST(RefCountedUnitTest, TestReset) {
+  ScopedRefPtrCountBase::reset_count();
+
+  // Create ScopedRefPtrCountBase that is referenced by |obj1| and |obj2|.
+  scoped_refptr<ScopedRefPtrCountBase> obj1 =
+      base::MakeRefCounted<ScopedRefPtrCountBase>();
+  scoped_refptr<ScopedRefPtrCountBase> obj2 = obj1;
+  EXPECT_NE(obj1.get(), nullptr);
+  EXPECT_NE(obj2.get(), nullptr);
+  EXPECT_EQ(ScopedRefPtrCountBase::constructor_count(), 1);
+  EXPECT_EQ(ScopedRefPtrCountBase::destructor_count(), 0);
+
+  // Check that calling reset() on |obj1| resets it. |obj2| still has a
+  // reference to the ScopedRefPtrCountBase so it shouldn't be reset.
+  obj1.reset();
+  EXPECT_EQ(obj1.get(), nullptr);
+  EXPECT_EQ(ScopedRefPtrCountBase::constructor_count(), 1);
+  EXPECT_EQ(ScopedRefPtrCountBase::destructor_count(), 0);
+
+  // Check that calling reset() on |obj2| resets it and causes the deletion of
+  // the ScopedRefPtrCountBase.
+  obj2.reset();
+  EXPECT_EQ(obj2.get(), nullptr);
+  EXPECT_EQ(ScopedRefPtrCountBase::constructor_count(), 1);
+  EXPECT_EQ(ScopedRefPtrCountBase::destructor_count(), 1);
+}
+
+TEST(RefCountedUnitTest, TestResetAlreadyNull) {
+  // Check that calling reset() on a null scoped_refptr does nothing.
+  scoped_refptr<ScopedRefPtrCountBase> obj;
+  obj.reset();
+  // |obj| should still be null after calling reset().
+  EXPECT_EQ(obj.get(), nullptr);
+}
+
+TEST(RefCountedUnitTest, CheckScopedRefptrNullBeforeObjectDestruction) {
+  scoped_refptr<CheckRefptrNull> obj = base::MakeRefCounted<CheckRefptrNull>();
+  obj->set_scoped_refptr(&obj);
+
+  // Check that when reset() is called the scoped_refptr internal pointer is set
+  // to null before the reference counted object is destroyed. This check is
+  // done by the CheckRefptrNull destructor.
+  obj.reset();
+  EXPECT_EQ(obj.get(), nullptr);
+}
+
+TEST(RefCountedDeathTest, TestAdoptRef) {
+  // Check that WrapRefCounted() DCHECKs if passed a type that defines
+  // REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE.
+  EXPECT_DCHECK_DEATH(base::WrapRefCounted(new InitialRefCountIsOne));
+
+  // Check that AdoptRef() DCHECKs if passed a nullptr.
+  InitialRefCountIsOne* ptr = nullptr;
+  EXPECT_DCHECK_DEATH(base::AdoptRef(ptr));
+
+  // Check that AdoptRef() DCHECKs if passed an object that doesn't need to be
+  // adopted.
+  scoped_refptr<InitialRefCountIsOne> obj =
+      base::MakeRefCounted<InitialRefCountIsOne>();
+  EXPECT_DCHECK_DEATH(base::AdoptRef(obj.get()));
+}
+
+#if defined(ARCH_CPU_64_BITS)
+TEST(RefCountedDeathTest, TestOverflowCheck) {
+  EXPECT_DCHECK_DEATH({
+    auto p = base::MakeRefCounted<Overflow>();
+    p->ref_count_ = std::numeric_limits<uint32_t>::max();
+    p->AddRef();
+  });
+}
+#endif
+
+}  // namespace subtle
+}  // namespace base
diff --git a/src/base/memory/ref_counted_unittest.nc b/src/base/memory/ref_counted_unittest.nc
new file mode 100644
index 0000000..b8c371f
--- /dev/null
+++ b/src/base/memory/ref_counted_unittest.nc
@@ -0,0 +1,28 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/ref_counted.h"
+
+namespace base {
+
+class InitialRefCountIsZero : public base::RefCounted<InitialRefCountIsZero> {
+ public:
+  InitialRefCountIsZero() {}
+ private:
+  friend class base::RefCounted<InitialRefCountIsZero>;
+  ~InitialRefCountIsZero() {}
+};
+
+// TODO(hans): Remove .* and update the static_assert expectations once we roll
+// past Clang r313315. https://crbug.com/765692.
+
+#if defined(NCTEST_ADOPT_REF_TO_ZERO_START)  // [r"fatal error: static_assert failed .*\"Use AdoptRef only for the reference count starts from one\.\""]
+
+void WontCompile() {
+  AdoptRef(new InitialRefCountIsZero());
+}
+
+#endif
+
+}  // namespace base
diff --git a/src/base/memory/scoped_generic_obj.h b/src/base/memory/scoped_generic_obj.h
deleted file mode 100644
index 4b3cb86..0000000
--- a/src/base/memory/scoped_generic_obj.h
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_SCOPED_GENERIC_OBJ_H_
-#define BASE_MEMORY_SCOPED_GENERIC_OBJ_H_
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-
-// ScopedGenericObj<> is patterned after scoped_ptr_malloc<>, except
-// that it assumes the template argument is typedef'ed to a pointer
-// type. It does not support retain/release semantics. It takes as its
-// second template argument a functor which frees the object.
-//
-// Example (Mac-specific):
-//
-// class ScopedDestroyRendererInfo {
-//  public:
-//   void operator()(CGLRendererInfoObj x) const {
-//     CGLDestroyRendererInfo(x);
-//   }
-// };
-//
-// ...
-//
-//   CGLRendererInfoObj renderer_info = NULL;
-//   ...
-//   ScopedGenericObj<CGLRendererInfoObj, ScopedDestroyRendererInfo>
-//       scoper(renderer_info);
-
-template<class C, class FreeProc>
-class ScopedGenericObj {
- public:
-
-  // The element type
-  typedef C element_type;
-
-  // Constructor.  Defaults to initializing with NULL.
-  // There is no way to create an uninitialized ScopedGenericObj.
-  // The input parameter must be allocated with an allocator that matches the
-  // Free functor.
-  explicit ScopedGenericObj(C p = C()): obj_(p) {}
-
-  // Destructor.  If there is a C object, call the Free functor.
-  ~ScopedGenericObj() {
-    reset();
-  }
-
-  // Reset.  Calls the Free functor on the current owned object, if any.
-  // Then takes ownership of a new object, if given.
-  // this->reset(this->get()) works.
-  void reset(C p = C()) {
-    if (obj_ != p) {
-      FreeProc free_proc;
-      free_proc(obj_);
-      obj_ = p;
-    }
-  }
-
-  operator C() const {
-    return obj_;
-  }
-
-  C get() const {
-    return obj_;
-  }
-
-  // Comparison operators.
-  // These return whether a ScopedGenericObj and a plain pointer refer
-  // to the same object, not just to two different but equal objects.
-  // For compatibility with the boost-derived implementation, these
-  // take non-const arguments.
-  bool operator==(C p) const {
-    return obj_ == p;
-  }
-
-  bool operator!=(C p) const {
-    return obj_ != p;
-  }
-
-  // Swap two ScopedGenericObjs.
-  void swap(ScopedGenericObj& b) {
-    C tmp = b.obj_;
-    b.obj_ = obj_;
-    obj_ = tmp;
-  }
-
-  // Release a pointer.
-  // The return value is the current pointer held by this object.
-  // If this object holds a NULL pointer, the return value is NULL.
-  // After this operation, this object will hold a NULL pointer,
-  // and will not own the object any more.
-  C release() WARN_UNUSED_RESULT {
-    C tmp = obj_;
-    obj_ = NULL;
-    return tmp;
-  }
-
- private:
-  C obj_;
-
-  // no reason to use these: each ScopedGenericObj should have its own object.
-  template <class C2, class GP>
-  bool operator==(ScopedGenericObj<C2, GP> const& p) const;
-  template <class C2, class GP>
-  bool operator!=(ScopedGenericObj<C2, GP> const& p) const;
-
-  // Disallow evil constructors.
-  ScopedGenericObj(const ScopedGenericObj&);
-  void operator=(const ScopedGenericObj&);
-};
-
-template<class C, class FP> inline
-void swap(ScopedGenericObj<C, FP>& a, ScopedGenericObj<C, FP>& b) {
-  a.swap(b);
-}
-
-template<class C, class FP> inline
-bool operator==(C* p, const ScopedGenericObj<C, FP>& b) {
-  return p == b.get();
-}
-
-template<class C, class FP> inline
-bool operator!=(C* p, const ScopedGenericObj<C, FP>& b) {
-  return p != b.get();
-}
-
-#endif  // BASE_MEMORY_SCOPED_GENERIC_OBJ_H_
diff --git a/src/base/memory/scoped_handle.h b/src/base/memory/scoped_handle.h
deleted file mode 100644
index b95559d..0000000
--- a/src/base/memory/scoped_handle.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_SCOPED_HANDLE_H_
-#define BASE_MEMORY_SCOPED_HANDLE_H_
-
-#include <stdio.h>
-
-#include "base/basictypes.h"
-
-class ScopedStdioHandle {
- public:
-  ScopedStdioHandle()
-      : handle_(NULL) { }
-
-  explicit ScopedStdioHandle(FILE* handle)
-      : handle_(handle) { }
-
-  ~ScopedStdioHandle() {
-    Close();
-  }
-
-  void Close() {
-    if (handle_) {
-      fclose(handle_);
-      handle_ = NULL;
-    }
-  }
-
-  FILE* get() const { return handle_; }
-
-  FILE* Take() {
-    FILE* temp = handle_;
-    handle_ = NULL;
-    return temp;
-  }
-
-  void Set(FILE* newhandle) {
-    Close();
-    handle_ = newhandle;
-  }
-
- private:
-  FILE* handle_;
-
-  DISALLOW_COPY_AND_ASSIGN(ScopedStdioHandle);
-};
-
-#endif  // BASE_MEMORY_SCOPED_HANDLE_H_
diff --git a/src/base/memory/scoped_nsobject.h b/src/base/memory/scoped_nsobject.h
deleted file mode 100644
index 5d98e3f..0000000
--- a/src/base/memory/scoped_nsobject.h
+++ /dev/null
@@ -1,174 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_SCOPED_NSOBJECT_H_
-#define BASE_MEMORY_SCOPED_NSOBJECT_H_
-
-#import <Foundation/Foundation.h>
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/memory/scoped_policy.h"
-
-// scoped_nsobject<> is patterned after scoped_ptr<>, but maintains ownership
-// of an NSObject subclass object.  Style deviations here are solely for
-// compatibility with scoped_ptr<>'s interface, with which everyone is already
-// familiar.
-//
-// By default, scoped_nsobject<> takes ownership of an object (in the
-// constructor or in reset()) by taking over the caller's existing ownership
-// claim.  The caller must own the object it gives to scoped_nsobject<>, and
-// relinquishes an ownership claim to that object.  scoped_nsobject<> does not
-// call -retain. This behavior is parametrized by the |OwnershipPolicy| enum.
-// If the value |RETAIN| is passed (in the constructor or in reset()), then
-// scoped_nsobject<> will call -retain on the object, and the initial
-// ownership is not changed.
-//
-// scoped_nsprotocol<> has the same behavior as scoped_nsobject, but can be used
-// with protocols.
-//
-// scoped_nsobject<> is not to be used for NSAutoreleasePools. For
-// NSAutoreleasePools use ScopedNSAutoreleasePool from
-// scoped_nsautorelease_pool.h instead.
-// We check for bad uses of scoped_nsobject and NSAutoreleasePool at compile
-// time with a template specialization (see below).
-
-template<typename NST>
-class scoped_nsprotocol {
- public:
-  explicit scoped_nsprotocol(
-      NST object = nil,
-      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
-      : object_(object) {
-    if (policy == base::scoped_policy::RETAIN)
-      [object retain];
-  }
-
-  scoped_nsprotocol(const scoped_nsprotocol<NST>& that)
-      : object_([that.object_ retain]) {
-  }
-
-  ~scoped_nsprotocol() {
-    [object_ release];
-  }
-
-  scoped_nsprotocol& operator=(const scoped_nsprotocol<NST>& that) {
-    reset(that.get(), base::scoped_policy::RETAIN);
-    return *this;
-  }
-
-  void reset(NST object = nil,
-             base::scoped_policy::OwnershipPolicy policy =
-                base::scoped_policy::ASSUME) {
-    if (policy == base::scoped_policy::RETAIN)
-      [object retain];
-    // We intentionally do not check that object != object_ as the caller must
-    // either already have an ownership claim over whatever it passes to this
-    // method, or call it with the |RETAIN| policy which will have ensured that
-    // the object is retained once more when reaching this point.
-    [object_ release];
-    object_ = object;
-  }
-
-  bool operator==(NST that) const { return object_ == that; }
-  bool operator!=(NST that) const { return object_ != that; }
-
-  operator NST() const {
-    return object_;
-  }
-
-  NST get() const {
-    return object_;
-  }
-
-  void swap(scoped_nsprotocol& that) {
-    NST temp = that.object_;
-    that.object_ = object_;
-    object_ = temp;
-  }
-
-  // scoped_nsprotocol<>::release() is like scoped_ptr<>::release.  It is NOT a
-  // wrapper for [object_ release].  To force a scoped_nsprotocol<> to call
-  // [object_ release], use scoped_nsprotocol<>::reset().
-  NST release() WARN_UNUSED_RESULT {
-    NST temp = object_;
-    object_ = nil;
-    return temp;
-  }
-
-  // Shift reference to the autorelease pool to be released later.
-  NST autorelease() {
-    return [release() autorelease];
-  }
-
- private:
-  NST object_;
-};
-
-// Free functions
-template <class C>
-void swap(scoped_nsprotocol<C>& p1, scoped_nsprotocol<C>& p2) {
-  p1.swap(p2);
-}
-
-template <class C>
-bool operator==(C p1, const scoped_nsprotocol<C>& p2) {
-  return p1 == p2.get();
-}
-
-template <class C>
-bool operator!=(C p1, const scoped_nsprotocol<C>& p2) {
-  return p1 != p2.get();
-}
-
-template<typename NST>
-class scoped_nsobject : public scoped_nsprotocol<NST*> {
- public:
-  explicit scoped_nsobject(
-      NST* object = nil,
-      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
-      : scoped_nsprotocol<NST*>(object, policy) {
-  }
-
-  scoped_nsobject(const scoped_nsobject<NST>& that)
-      : scoped_nsprotocol<NST*>(that) {
-  }
-
-  scoped_nsobject& operator=(const scoped_nsobject<NST>& that) {
-    scoped_nsprotocol<NST*>::operator=(that);
-    return *this;
-  }
-};
-
-// Specialization to make scoped_nsobject<id> work.
-template<>
-class scoped_nsobject<id> : public scoped_nsprotocol<id> {
- public:
-  explicit scoped_nsobject(
-      id object = nil,
-      base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
-      : scoped_nsprotocol<id>(object, policy) {
-  }
-
-  scoped_nsobject(const scoped_nsobject<id>& that)
-      : scoped_nsprotocol<id>(that) {
-  }
-
-  scoped_nsobject& operator=(const scoped_nsobject<id>& that) {
-    scoped_nsprotocol<id>::operator=(that);
-    return *this;
-  }
-};
-
-// Do not use scoped_nsobject for NSAutoreleasePools, use
-// ScopedNSAutoreleasePool instead. This is a compile time check. See details
-// at top of header.
-template<>
-class scoped_nsobject<NSAutoreleasePool> {
- private:
-  explicit scoped_nsobject(NSAutoreleasePool* object = nil,
-                           base::scoped_policy::OwnershipPolicy policy =
-                               base::scoped_policy::ASSUME);
-  DISALLOW_COPY_AND_ASSIGN(scoped_nsobject);
-};
-#endif  // BASE_MEMORY_SCOPED_NSOBJECT_H_
diff --git a/src/base/memory/scoped_nsobject_unittest.mm b/src/base/memory/scoped_nsobject_unittest.mm
deleted file mode 100644
index 377a3de..0000000
--- a/src/base/memory/scoped_nsobject_unittest.mm
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/mac/scoped_nsautorelease_pool.h"
-#include "base/memory/scoped_nsobject.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-TEST(ScopedNSObjectTest, ScopedNSObject) {
-  scoped_nsobject<NSObject> p1([[NSObject alloc] init]);
-  ASSERT_TRUE(p1.get());
-  ASSERT_EQ(1u, [p1 retainCount]);
-  scoped_nsobject<NSObject> p2(p1);
-  ASSERT_EQ(p1.get(), p2.get());
-  ASSERT_EQ(2u, [p1 retainCount]);
-  p2.reset();
-  ASSERT_EQ(nil, p2.get());
-  ASSERT_EQ(1u, [p1 retainCount]);
-  {
-    scoped_nsobject<NSObject> p3 = p1;
-    ASSERT_EQ(p1.get(), p3.get());
-    ASSERT_EQ(2u, [p1 retainCount]);
-    p3 = p1;
-    ASSERT_EQ(p1.get(), p3.get());
-    ASSERT_EQ(2u, [p1 retainCount]);
-  }
-  ASSERT_EQ(1u, [p1 retainCount]);
-  scoped_nsobject<NSObject> p4(p1.get(), base::scoped_policy::RETAIN);
-  ASSERT_EQ(2u, [p1 retainCount]);
-  ASSERT_TRUE(p1 == p1.get());
-  ASSERT_TRUE(p1 == p1);
-  ASSERT_FALSE(p1 != p1);
-  ASSERT_FALSE(p1 != p1.get());
-  scoped_nsobject<NSObject> p5([[NSObject alloc] init]);
-  ASSERT_TRUE(p1 != p5);
-  ASSERT_TRUE(p1 != p5.get());
-  ASSERT_FALSE(p1 == p5);
-  ASSERT_FALSE(p1 == p5.get());
-
-  scoped_nsobject<NSObject> p6 = p1;
-  ASSERT_EQ(3u, [p6 retainCount]);
-  {
-    base::mac::ScopedNSAutoreleasePool pool;
-    p6.autorelease();
-    ASSERT_EQ(nil, p6.get());
-    ASSERT_EQ(3u, [p1 retainCount]);
-  }
-  ASSERT_EQ(2u, [p1 retainCount]);
-}
-
-TEST(ScopedNSObjectTest, ScopedNSObjectInContainer) {
-  scoped_nsobject<id> p([[NSObject alloc] init]);
-  ASSERT_TRUE(p.get());
-  ASSERT_EQ(1u, [p retainCount]);
-  {
-    std::vector<scoped_nsobject<id> > objects;
-    objects.push_back(p);
-    ASSERT_EQ(2u, [p retainCount]);
-    ASSERT_EQ(p.get(), objects[0].get());
-    objects.push_back(scoped_nsobject<id>([[NSObject alloc] init]));
-    ASSERT_TRUE(objects[1].get());
-    ASSERT_EQ(1u, [objects[1] retainCount]);
-  }
-  ASSERT_EQ(1u, [p retainCount]);
-}
-
-TEST(ScopedNSObjectTest, ScopedNSObjectFreeFunctions) {
-  scoped_nsobject<id> p1([[NSObject alloc] init]);
-  id o1 = p1.get();
-  ASSERT_TRUE(o1 == p1);
-  ASSERT_FALSE(o1 != p1);
-  scoped_nsobject<id> p2([[NSObject alloc] init]);
-  ASSERT_TRUE(o1 != p2);
-  ASSERT_FALSE(o1 == p2);
-  id o2 = p2.get();
-  swap(p1, p2);
-  ASSERT_EQ(o2, p1.get());
-  ASSERT_EQ(o1, p2.get());
-}
-
-}  // namespace
diff --git a/src/base/memory/scoped_open_process.h b/src/base/memory/scoped_open_process.h
deleted file mode 100644
index 93ba387..0000000
--- a/src/base/memory/scoped_open_process.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_SCOPED_OPEN_PROCESS_H_
-#define BASE_MEMORY_SCOPED_OPEN_PROCESS_H_
-
-#include "base/process.h"
-#include "base/process_util.h"
-
-namespace base {
-
-// A class that opens a process from its process id and closes it when the
-// instance goes out of scope.
-class ScopedOpenProcess {
- public:
-  ScopedOpenProcess() : handle_(kNullProcessHandle) {
-  }
-
-  // Automatically close the process.
-  ~ScopedOpenProcess() {
-    Close();
-  }
-
-  // Open a new process by pid. Closes any previously opened process (even if
-  // opening the new one fails).
-  bool Open(ProcessId pid) {
-    Close();
-    return OpenProcessHandle(pid, &handle_);
-  }
-
-  // Close the previously opened process.
-  void Close() {
-    if (handle_ == kNullProcessHandle)
-      return;
-
-    CloseProcessHandle(handle_);
-    handle_ = kNullProcessHandle;
-  }
-
-  ProcessHandle handle() const { return handle_; }
-
- private:
-  ProcessHandle handle_;
-  DISALLOW_COPY_AND_ASSIGN(ScopedOpenProcess);
-};
-}  // namespace base
-
-#endif  // BASE_MEMORY_SCOPED_OPEN_PROCESS_H_
diff --git a/src/base/memory/scoped_ptr.h b/src/base/memory/scoped_ptr.h
deleted file mode 100644
index 7f2d7dc..0000000
--- a/src/base/memory/scoped_ptr.h
+++ /dev/null
@@ -1,537 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Scopers help you manage ownership of a pointer, helping you easily manage the
-// a pointer within a scope, and automatically destroying the pointer at the
-// end of a scope.  There are two main classes you will use, which correspond
-// to the operators new/delete and new[]/delete[].
-//
-// Example usage (scoped_ptr):
-//   {
-//     scoped_ptr<Foo> foo(new Foo("wee"));
-//   }  // foo goes out of scope, releasing the pointer with it.
-//
-//   {
-//     scoped_ptr<Foo> foo;          // No pointer managed.
-//     foo.reset(new Foo("wee"));    // Now a pointer is managed.
-//     foo.reset(new Foo("wee2"));   // Foo("wee") was destroyed.
-//     foo.reset(new Foo("wee3"));   // Foo("wee2") was destroyed.
-//     foo->Method();                // Foo::Method() called.
-//     foo.get()->Method();          // Foo::Method() called.
-//     SomeFunc(foo.release());      // SomeFunc takes ownership, foo no longer
-//                                   // manages a pointer.
-//     foo.reset(new Foo("wee4"));   // foo manages a pointer again.
-//     foo.reset();                  // Foo("wee4") destroyed, foo no longer
-//                                   // manages a pointer.
-//   }  // foo wasn't managing a pointer, so nothing was destroyed.
-//
-// Example usage (scoped_array):
-//   {
-//     scoped_array<Foo> foo(new Foo[100]);
-//     foo.get()->Method();  // Foo::Method on the 0th element.
-//     foo[10].Method();     // Foo::Method on the 10th element.
-//   }
-//
-// These scopers also implement part of the functionality of C++11 unique_ptr
-// in that they are "movable but not copyable."  You can use the scopers in
-// the parameter and return types of functions to signify ownership transfer
-// in to and out of a function.  When calling a function that has a scoper
-// as the argument type, it must be called with the result of an analogous
-// scoper's Pass() function or another function that generates a temporary;
-// passing by copy will NOT work.  Here is an example using scoped_ptr:
-//
-//   void TakesOwnership(scoped_ptr<Foo> arg) {
-//     // Do something with arg
-//   }
-//   scoped_ptr<Foo> CreateFoo() {
-//     // No need for calling Pass() because we are constructing a temporary
-//     // for the return value.
-//     return scoped_ptr<Foo>(new Foo("new"));
-//   }
-//   scoped_ptr<Foo> PassThru(scoped_ptr<Foo> arg) {
-//     return arg.Pass();
-//   }
-//
-//   {
-//     scoped_ptr<Foo> ptr(new Foo("yay"));  // ptr manages Foo("yay").
-//     TakesOwnership(ptr.Pass());           // ptr no longer owns Foo("yay").
-//     scoped_ptr<Foo> ptr2 = CreateFoo();   // ptr2 owns the return Foo.
-//     scoped_ptr<Foo> ptr3 =                // ptr3 now owns what was in ptr2.
-//         PassThru(ptr2.Pass());            // ptr2 is correspondingly NULL.
-//   }
-//
-// Notice that if you do not call Pass() when returning from PassThru(), or
-// when invoking TakesOwnership(), the code will not compile because scopers
-// are not copyable; they only implement move semantics which require calling
-// the Pass() function to signify a destructive transfer of state. CreateFoo()
-// is different though because we are constructing a temporary on the return
-// line and thus can avoid needing to call Pass().
-//
-// Pass() properly handles upcast in assignment, i.e. you can assign
-// scoped_ptr<Child> to scoped_ptr<Parent>:
-//
-//   scoped_ptr<Foo> foo(new Foo());
-//   scoped_ptr<FooParent> parent = foo.Pass();
-//
-// PassAs<>() should be used to upcast return value in return statement:
-//
-//   scoped_ptr<Foo> CreateFoo() {
-//     scoped_ptr<FooChild> result(new FooChild());
-//     return result.PassAs<Foo>();
-//   }
-//
-// Note that PassAs<>() is implemented only for scoped_ptr, but not for
-// scoped_array. This is because casting array pointers may not be safe.
-
-#ifndef BASE_MEMORY_SCOPED_PTR_H_
-#define BASE_MEMORY_SCOPED_PTR_H_
-
-// This is an implementation designed to match the anticipated future TR2
-// implementation of the scoped_ptr class, and its closely-related brethren,
-// scoped_array, scoped_ptr_malloc.
-
-#include <assert.h>
-#include <stddef.h>
-#include <stdlib.h>
-
-#include "base/basictypes.h"
-#include "base/compiler_specific.h"
-#include "base/move.h"
-#include "base/template_util.h"
-#include "build/build_config.h"
-
-#if defined(OS_STARBOARD)
-#include "starboard/memory.h"
-#endif
-
-namespace base {
-
-namespace subtle {
-class RefCountedBase;
-class RefCountedThreadSafeBase;
-}  // namespace subtle
-
-namespace internal {
-
-template <typename T> struct IsNotRefCounted {
-  enum {
-    value = !base::is_convertible<T*, base::subtle::RefCountedBase*>::value &&
-        !base::is_convertible<T*, base::subtle::RefCountedThreadSafeBase*>::
-            value
-  };
-};
-
-}  // namespace internal
-}  // namespace base
-
-// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T>
-// automatically deletes the pointer it holds (if any).
-// That is, scoped_ptr<T> owns the T object that it points to.
-// Like a T*, a scoped_ptr<T> may hold either NULL or a pointer to a T object.
-// Also like T*, scoped_ptr<T> is thread-compatible, and once you
-// dereference it, you get the thread safety guarantees of T.
-//
-// The size of a scoped_ptr is small:
-// sizeof(scoped_ptr<C>) == sizeof(C*)
-template <class C>
-class scoped_ptr {
-  MOVE_ONLY_TYPE_FOR_CPP_03(scoped_ptr, RValue)
-
-  COMPILE_ASSERT(base::internal::IsNotRefCounted<C>::value,
-                 C_is_refcounted_type_and_needs_scoped_refptr);
-
- public:
-
-  // The element type
-  typedef C element_type;
-
-  // Constructor.  Defaults to initializing with NULL.
-  // There is no way to create an uninitialized scoped_ptr.
-  // The input parameter must be allocated with new.
-  explicit scoped_ptr(C* p = NULL) : ptr_(p) { }
-
-  // The GHS compiler always chooses this copy constructor over the next one,
-  // so disable this to promote the more important and frequently used constr.
-#if !defined(COMPILER_GHS)
-  // Constructor.  Allows construction from a scoped_ptr rvalue for a
-  // convertible type.
-  template <typename U>
-  scoped_ptr(scoped_ptr<U> other) : ptr_(other.release()) { }
-#endif
-
-  // Constructor.  Move constructor for C++03 move emulation of this type.
-  scoped_ptr(RValue rvalue)
-      : ptr_(rvalue.object->release()) {
-  }
-
-  // Destructor.  If there is a C object, delete it.
-  // We don't need to test ptr_ == NULL because C++ does that for us.
-  ~scoped_ptr() {
-    enum { type_must_be_complete = sizeof(C) };
-    delete ptr_;
-  }
-
-  // operator=.  Allows assignment from a scoped_ptr rvalue for a convertible
-  // type.
-  template <typename U>
-  scoped_ptr& operator=(scoped_ptr<U> rhs) {
-    reset(rhs.release());
-    return *this;
-  }
-
-  // operator=.  Move operator= for C++03 move emulation of this type.
-  scoped_ptr& operator=(RValue rhs) {
-    swap(*rhs->object);
-    return *this;
-  }
-
-  // Reset.  Deletes the current owned object, if any.
-  // Then takes ownership of a new object, if given.
-  // this->reset(this->get()) works.
-  void reset(C* p = NULL) {
-    if (p != ptr_) {
-      enum { type_must_be_complete = sizeof(C) };
-      delete ptr_;
-      ptr_ = p;
-    }
-  }
-
-  // Accessors to get the owned object.
-  // operator* and operator-> will assert() if there is no current object.
-  C& operator*() const {
-    assert(ptr_ != NULL);
-    return *ptr_;
-  }
-  C* operator->() const  {
-    assert(ptr_ != NULL);
-    return ptr_;
-  }
-  C* get() const { return ptr_; }
-
-  // Allow scoped_ptr<C> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
-  typedef C* scoped_ptr::*Testable;
-  operator Testable() const { return ptr_ ? &scoped_ptr::ptr_ : NULL; }
-
-  // Comparison operators.
-  // These return whether two scoped_ptr refer to the same object, not just to
-  // two different but equal objects.
-  bool operator==(C* p) const { return ptr_ == p; }
-  bool operator!=(C* p) const { return ptr_ != p; }
-
-  // Swap two scoped pointers.
-  void swap(scoped_ptr& p2) {
-    C* tmp = ptr_;
-    ptr_ = p2.ptr_;
-    p2.ptr_ = tmp;
-  }
-
-  // Release a pointer.
-  // The return value is the current pointer held by this object.
-  // If this object holds a NULL pointer, the return value is NULL.
-  // After this operation, this object will hold a NULL pointer,
-  // and will not own the object any more.
-  C* release() WARN_UNUSED_RESULT {
-    C* retVal = ptr_;
-    ptr_ = NULL;
-    return retVal;
-  }
-
-  template <typename PassAsType>
-  scoped_ptr<PassAsType> PassAs() {
-    return scoped_ptr<PassAsType>(release());
-  }
-
- private:
-  C* ptr_;
-
-  // Forbid comparison of scoped_ptr types.  If C2 != C, it totally doesn't
-  // make sense, and if C2 == C, it still doesn't make sense because you should
-  // never have the same object owned by two different scoped_ptrs.
-  template <class C2> bool operator==(scoped_ptr<C2> const& p2) const;
-  template <class C2> bool operator!=(scoped_ptr<C2> const& p2) const;
-
-};
-
-// Free functions
-template <class C>
-void swap(scoped_ptr<C>& p1, scoped_ptr<C>& p2) {
-  p1.swap(p2);
-}
-
-template <class C>
-bool operator==(C* p1, const scoped_ptr<C>& p2) {
-  return p1 == p2.get();
-}
-
-template <class C>
-bool operator!=(C* p1, const scoped_ptr<C>& p2) {
-  return p1 != p2.get();
-}
-
-// scoped_array<C> is like scoped_ptr<C>, except that the caller must allocate
-// with new [] and the destructor deletes objects with delete [].
-//
-// As with scoped_ptr<C>, a scoped_array<C> either points to an object
-// or is NULL.  A scoped_array<C> owns the object that it points to.
-// scoped_array<T> is thread-compatible, and once you index into it,
-// the returned objects have only the thread safety guarantees of T.
-//
-// Size: sizeof(scoped_array<C>) == sizeof(C*)
-template <class C>
-class scoped_array {
-  MOVE_ONLY_TYPE_FOR_CPP_03(scoped_array, RValue)
-
- public:
-
-  // The element type
-  typedef C element_type;
-
-  // Constructor.  Defaults to initializing with NULL.
-  // There is no way to create an uninitialized scoped_array.
-  // The input parameter must be allocated with new [].
-  explicit scoped_array(C* p = NULL) : array_(p) { }
-
-  // Constructor.  Move constructor for C++03 move emulation of this type.
-  scoped_array(RValue rvalue)
-      : array_(rvalue.object->release()) {
-  }
-
-  // Destructor.  If there is a C object, delete it.
-  // We don't need to test ptr_ == NULL because C++ does that for us.
-  ~scoped_array() {
-    enum { type_must_be_complete = sizeof(C) };
-    delete[] array_;
-  }
-
-  // operator=.  Move operator= for C++03 move emulation of this type.
-  scoped_array& operator=(RValue rhs) {
-    swap(*rhs.object);
-    return *this;
-  }
-
-  // Reset.  Deletes the current owned object, if any.
-  // Then takes ownership of a new object, if given.
-  // this->reset(this->get()) works.
-  void reset(C* p = NULL) {
-    if (p != array_) {
-      enum { type_must_be_complete = sizeof(C) };
-      delete[] array_;
-      array_ = p;
-    }
-  }
-
-  // Get one element of the current object.
-  // Will assert() if there is no current object, or index i is negative.
-  C& operator[](ptrdiff_t i) const {
-    assert(i >= 0);
-    assert(array_ != NULL);
-    return array_[i];
-  }
-
-  // Get a pointer to the zeroth element of the current object.
-  // If there is no current object, return NULL.
-  C* get() const {
-    return array_;
-  }
-
-  // Allow scoped_array<C> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
-  typedef C* scoped_array::*Testable;
-  operator Testable() const { return array_ ? &scoped_array::array_ : NULL; }
-
-  // Comparison operators.
-  // These return whether two scoped_array refer to the same object, not just to
-  // two different but equal objects.
-  bool operator==(C* p) const { return array_ == p; }
-  bool operator!=(C* p) const { return array_ != p; }
-
-  // Swap two scoped arrays.
-  void swap(scoped_array& p2) {
-    C* tmp = array_;
-    array_ = p2.array_;
-    p2.array_ = tmp;
-  }
-
-  // Release an array.
-  // The return value is the current pointer held by this object.
-  // If this object holds a NULL pointer, the return value is NULL.
-  // After this operation, this object will hold a NULL pointer,
-  // and will not own the object any more.
-  C* release() WARN_UNUSED_RESULT {
-    C* retVal = array_;
-    array_ = NULL;
-    return retVal;
-  }
-
- private:
-  C* array_;
-
-  // Forbid comparison of different scoped_array types.
-  template <class C2> bool operator==(scoped_array<C2> const& p2) const;
-  template <class C2> bool operator!=(scoped_array<C2> const& p2) const;
-};
-
-// Free functions
-template <class C>
-void swap(scoped_array<C>& p1, scoped_array<C>& p2) {
-  p1.swap(p2);
-}
-
-template <class C>
-bool operator==(C* p1, const scoped_array<C>& p2) {
-  return p1 == p2.get();
-}
-
-template <class C>
-bool operator!=(C* p1, const scoped_array<C>& p2) {
-  return p1 != p2.get();
-}
-
-// This class wraps the c library function free() in a class that can be
-// passed as a template argument to scoped_ptr_malloc below.
-class ScopedPtrMallocFree {
- public:
-  inline void operator()(void* x) const {
-#if defined(OS_STARBOARD)
-    SbMemoryDeallocate(x);
-#else
-    free(x);
-#endif
-  }
-};
-
-// scoped_ptr_malloc<> is similar to scoped_ptr<>, but it accepts a
-// second template argument, the functor used to free the object.
-
-template<class C, class FreeProc = ScopedPtrMallocFree>
-class scoped_ptr_malloc {
-  MOVE_ONLY_TYPE_FOR_CPP_03(scoped_ptr_malloc, RValue)
-
- public:
-
-  // The element type
-  typedef C element_type;
-
-  // Constructor.  Defaults to initializing with NULL.
-  // There is no way to create an uninitialized scoped_ptr.
-  // The input parameter must be allocated with an allocator that matches the
-  // Free functor.  For the default Free functor, this is malloc, calloc, or
-  // realloc.
-  explicit scoped_ptr_malloc(C* p = NULL): ptr_(p) {}
-
-  // Constructor.  Move constructor for C++03 move emulation of this type.
-  scoped_ptr_malloc(RValue rvalue)
-      : ptr_(rvalue.object->release()) {
-  }
-
-  // Destructor.  If there is a C object, call the Free functor.
-  ~scoped_ptr_malloc() {
-    reset();
-  }
-
-  // operator=.  Move operator= for C++03 move emulation of this type.
-  scoped_ptr_malloc& operator=(RValue rhs) {
-    swap(*rhs.object);
-    return *this;
-  }
-
-  // Reset.  Calls the Free functor on the current owned object, if any.
-  // Then takes ownership of a new object, if given.
-  // this->reset(this->get()) works.
-  void reset(C* p = NULL) {
-    if (ptr_ != p) {
-      FreeProc free_proc;
-      free_proc(ptr_);
-      ptr_ = p;
-    }
-  }
-
-  // Get the current object.
-  // operator* and operator-> will cause an assert() failure if there is
-  // no current object.
-  C& operator*() const {
-    assert(ptr_ != NULL);
-    return *ptr_;
-  }
-
-  C* operator->() const {
-    assert(ptr_ != NULL);
-    return ptr_;
-  }
-
-  C* get() const {
-    return ptr_;
-  }
-
-  // Allow scoped_ptr_malloc<C> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
-  typedef C* scoped_ptr_malloc::*Testable;
-  operator Testable() const { return ptr_ ? &scoped_ptr_malloc::ptr_ : NULL; }
-
-  // Comparison operators.
-  // These return whether a scoped_ptr_malloc and a plain pointer refer
-  // to the same object, not just to two different but equal objects.
-  // For compatibility with the boost-derived implementation, these
-  // take non-const arguments.
-  bool operator==(C* p) const {
-    return ptr_ == p;
-  }
-
-  bool operator!=(C* p) const {
-    return ptr_ != p;
-  }
-
-  // Swap two scoped pointers.
-  void swap(scoped_ptr_malloc & b) {
-    C* tmp = b.ptr_;
-    b.ptr_ = ptr_;
-    ptr_ = tmp;
-  }
-
-  // Release a pointer.
-  // The return value is the current pointer held by this object.
-  // If this object holds a NULL pointer, the return value is NULL.
-  // After this operation, this object will hold a NULL pointer,
-  // and will not own the object any more.
-  C* release() WARN_UNUSED_RESULT {
-    C* tmp = ptr_;
-    ptr_ = NULL;
-    return tmp;
-  }
-
- private:
-  C* ptr_;
-
-  // no reason to use these: each scoped_ptr_malloc should have its own object
-  template <class C2, class GP>
-  bool operator==(scoped_ptr_malloc<C2, GP> const& p) const;
-  template <class C2, class GP>
-  bool operator!=(scoped_ptr_malloc<C2, GP> const& p) const;
-};
-
-template<class C, class FP> inline
-void swap(scoped_ptr_malloc<C, FP>& a, scoped_ptr_malloc<C, FP>& b) {
-  a.swap(b);
-}
-
-template<class C, class FP> inline
-bool operator==(C* p, const scoped_ptr_malloc<C, FP>& b) {
-  return p == b.get();
-}
-
-template<class C, class FP> inline
-bool operator!=(C* p, const scoped_ptr_malloc<C, FP>& b) {
-  return p != b.get();
-}
-
-// A function to convert T* into scoped_ptr<T>
-// Doing e.g. make_scoped_ptr(new FooBarBaz<type>(arg)) is a shorter notation
-// for scoped_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
-template <typename T>
-scoped_ptr<T> make_scoped_ptr(T* ptr) {
-  return scoped_ptr<T>(ptr);
-}
-
-#endif  // BASE_MEMORY_SCOPED_PTR_H_
diff --git a/src/base/memory/scoped_ptr_unittest.cc b/src/base/memory/scoped_ptr_unittest.cc
deleted file mode 100644
index 1860a4e..0000000
--- a/src/base/memory/scoped_ptr_unittest.cc
+++ /dev/null
@@ -1,366 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-// Used to test depth subtyping.
-class ConDecLoggerParent {
- public:
-  virtual ~ConDecLoggerParent() {}
-
-  virtual void SetPtr(int* ptr) = 0;
-
-  virtual int SomeMeth(int x) const = 0;
-};
-
-class ConDecLogger : public ConDecLoggerParent {
- public:
-  ConDecLogger() : ptr_(NULL) { }
-  explicit ConDecLogger(int* ptr) { SetPtr(ptr); }
-  virtual ~ConDecLogger() { --*ptr_; }
-
-  virtual void SetPtr(int* ptr) override { ptr_ = ptr; ++*ptr_; }
-
-  virtual int SomeMeth(int x) const override { return x; }
-
- private:
-  int* ptr_;
-
-  DISALLOW_COPY_AND_ASSIGN(ConDecLogger);
-};
-
-scoped_ptr<ConDecLogger> PassThru(scoped_ptr<ConDecLogger> logger) {
-  return logger.Pass();
-}
-
-void GrabAndDrop(scoped_ptr<ConDecLogger> logger) {
-}
-
-// Do not delete this function!  It's existence is to test that you can
-// return a temporarily constructed version of the scoper.
-scoped_ptr<ConDecLogger> TestReturnOfType(int* constructed) {
-  return scoped_ptr<ConDecLogger>(new ConDecLogger(constructed));
-}
-
-scoped_ptr<ConDecLoggerParent> UpcastUsingPassAs(
-    scoped_ptr<ConDecLogger> object) {
-  return object.PassAs<ConDecLoggerParent>();
-}
-
-}  // namespace
-
-TEST(ScopedPtrTest, ScopedPtr) {
-  int constructed = 0;
-
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    EXPECT_EQ(10, scoper->SomeMeth(10));
-    EXPECT_EQ(10, scoper.get()->SomeMeth(10));
-    EXPECT_EQ(10, (*scoper).SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test reset() and release()
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoper.reset(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoper.reset();
-    EXPECT_EQ(0, constructed);
-    EXPECT_FALSE(scoper.get());
-
-    scoper.reset(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    ConDecLogger* take = scoper.release();
-    EXPECT_EQ(1, constructed);
-    EXPECT_FALSE(scoper.get());
-    delete take;
-    EXPECT_EQ(0, constructed);
-
-    scoper.reset(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test swap(), == and !=
-  {
-    scoped_ptr<ConDecLogger> scoper1;
-    scoped_ptr<ConDecLogger> scoper2;
-    EXPECT_TRUE(scoper1 == scoper2.get());
-    EXPECT_FALSE(scoper1 != scoper2.get());
-
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoper1.reset(logger);
-    EXPECT_EQ(logger, scoper1.get());
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_FALSE(scoper1 == scoper2.get());
-    EXPECT_TRUE(scoper1 != scoper2.get());
-
-    scoper2.swap(scoper1);
-    EXPECT_EQ(logger, scoper2.get());
-    EXPECT_FALSE(scoper1.get());
-    EXPECT_FALSE(scoper1 == scoper2.get());
-    EXPECT_TRUE(scoper1 != scoper2.get());
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, ScopedPtrDepthSubtyping) {
-  int constructed = 0;
-
-#if !defined(COMPILER_GHS)
-  // Test construction from a scoped_ptr to a derived class.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<ConDecLoggerParent> scoper_parent(scoper.Pass());
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_parent.get());
-    EXPECT_FALSE(scoper.get());
-
-    EXPECT_EQ(10, scoper_parent->SomeMeth(10));
-    EXPECT_EQ(10, scoper_parent.get()->SomeMeth(10));
-    EXPECT_EQ(10, (*scoper_parent).SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-#endif
-
-  // Test assignment from a scoped_ptr to a derived class.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<ConDecLoggerParent> scoper_parent;
-    scoper_parent = scoper.Pass();
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_parent.get());
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-#if !defined(COMPILER_GHS)
-  // For the GHS compiler we have had to disable depth subtyping constructor,
-  // since that interferes with the move constructor. Luckily, we don't use
-  // the subtyping constructor anywhere in chromium, yet.
-  // Test construction of a scoped_ptr with an additional const annotation.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<const ConDecLogger> scoper_const(scoper.Pass());
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_const.get());
-    EXPECT_FALSE(scoper.get());
-
-    EXPECT_EQ(10, scoper_const->SomeMeth(10));
-    EXPECT_EQ(10, scoper_const.get()->SomeMeth(10));
-    EXPECT_EQ(10, (*scoper_const).SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-#endif
-
-  // Test assignment to a scoped_ptr with an additional const annotation.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<const ConDecLogger> scoper_const;
-    scoper_const = scoper.Pass();
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_const.get());
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, ScopedArray) {
-  static const int kNumLoggers = 12;
-
-  int constructed = 0;
-
-  {
-    scoped_array<ConDecLogger> scoper(new ConDecLogger[kNumLoggers]);
-    EXPECT_TRUE(scoper.get());
-    EXPECT_EQ(&scoper[0], scoper.get());
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(12, constructed);
-
-    EXPECT_EQ(10, scoper.get()->SomeMeth(10));
-    EXPECT_EQ(10, scoper[2].SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test reset() and release()
-  {
-    scoped_array<ConDecLogger> scoper;
-    EXPECT_FALSE(scoper.get());
-    EXPECT_FALSE(scoper.release());
-    EXPECT_FALSE(scoper.get());
-    scoper.reset();
-    EXPECT_FALSE(scoper.get());
-
-    scoper.reset(new ConDecLogger[kNumLoggers]);
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(12, constructed);
-    scoper.reset();
-    EXPECT_EQ(0, constructed);
-
-    scoper.reset(new ConDecLogger[kNumLoggers]);
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(12, constructed);
-    ConDecLogger* ptr = scoper.release();
-    EXPECT_EQ(12, constructed);
-    delete[] ptr;
-    EXPECT_EQ(0, constructed);
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test swap(), == and !=
-  {
-    scoped_array<ConDecLogger> scoper1;
-    scoped_array<ConDecLogger> scoper2;
-    EXPECT_TRUE(scoper1 == scoper2.get());
-    EXPECT_FALSE(scoper1 != scoper2.get());
-
-    ConDecLogger* loggers = new ConDecLogger[kNumLoggers];
-    for (int i = 0; i < kNumLoggers; ++i) {
-      loggers[i].SetPtr(&constructed);
-    }
-    scoper1.reset(loggers);
-    EXPECT_EQ(loggers, scoper1.get());
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_FALSE(scoper1 == scoper2.get());
-    EXPECT_TRUE(scoper1 != scoper2.get());
-
-    scoper2.swap(scoper1);
-    EXPECT_EQ(loggers, scoper2.get());
-    EXPECT_FALSE(scoper1.get());
-    EXPECT_FALSE(scoper1 == scoper2.get());
-    EXPECT_TRUE(scoper1 != scoper2.get());
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, PassBehavior) {
-  int constructed = 0;
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Test Pass() with constructor;
-    scoped_ptr<ConDecLogger> scoper2(scoper.Pass());
-    EXPECT_EQ(1, constructed);
-
-    // Test Pass() with assignment;
-    scoped_ptr<ConDecLogger> scoper3;
-    scoper3 = scoper2.Pass();
-    EXPECT_EQ(1, constructed);
-    EXPECT_FALSE(scoper.get());
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_TRUE(scoper3.get());
-  }
-
-  // Test uncaught Pass() does not leak.
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Should auto-destruct logger by end of scope.
-    scoper.Pass();
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test that passing to function which does nothing does not leak.
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Should auto-destruct logger by end of scope.
-    GrabAndDrop(scoper.Pass());
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, ReturnTypeBehavior) {
-  int constructed = 0;
-
-  // Test that we can return a scoped_ptr.
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    PassThru(scoper.Pass());
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test uncaught return type not leak.
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Should auto-destruct logger by end of scope.
-    PassThru(scoper.Pass());
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Call TestReturnOfType() so the compiler doesn't warn for an unused
-  // function.
-  {
-    TestReturnOfType(&constructed);
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, PassAs) {
-  int constructed = 0;
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<ConDecLoggerParent> scoper_parent;
-    scoper_parent = UpcastUsingPassAs(scoper.Pass());
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_parent.get());
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-// TODO scoped_ptr_malloc
diff --git a/src/base/memory/scoped_ptr_unittest.nc b/src/base/memory/scoped_ptr_unittest.nc
deleted file mode 100644
index 30a332e..0000000
--- a/src/base/memory/scoped_ptr_unittest.nc
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/basictypes.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/ref_counted.h"
-
-namespace {
-
-class Parent {
-};
-
-class Child : public Parent {
-};
-
-class RefCountedClass : public base::RefCountedThreadSafe<RefCountedClass> {
-};
-
-}  // namespace
-
-#if defined(NCTEST_NO_PASSAS_DOWNCAST)  // [r"invalid conversion from"]
-
-scoped_ptr<Child> DowncastUsingPassAs(scoped_ptr<Parent> object) {
-  return object.PassAs<Child>();
-}
-
-#elif defined(NCTEST_NO_REF_COUNTED_SCOPED_PTR)  // [r"creating array with negative size"]
-
-// scoped_ptr<> should not work for ref-counted objects.
-void WontCompile() {
-  scoped_ptr<RefCountedClass> x;
-}
-
-#endif
diff --git a/src/base/memory/scoped_refptr.h b/src/base/memory/scoped_refptr.h
new file mode 100644
index 0000000..7b47e88
--- /dev/null
+++ b/src/base/memory/scoped_refptr.h
@@ -0,0 +1,352 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SCOPED_REFPTR_H_
+#define BASE_MEMORY_SCOPED_REFPTR_H_
+
+#include <iosfwd>
+#include <type_traits>
+#include <utility>
+
+#include "base/compiler_specific.h"
+#include "base/cpp14oncpp11.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "starboard/types.h"
+
+template <class T>
+class scoped_refptr;
+
+namespace base {
+
+template <class, typename>
+class RefCounted;
+template <class, typename>
+class RefCountedThreadSafe;
+
+template <typename T>
+scoped_refptr<T> AdoptRef(T* t);
+
+namespace subtle {
+
+enum AdoptRefTag { kAdoptRefTag };
+enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
+enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(const T*,
+                                              const RefCounted<U, V>*) {
+  return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
+                       std::decay_t<decltype(U::kRefCountPreference)>>::value;
+}
+
+template <typename T, typename U, typename V>
+constexpr bool IsRefCountPreferenceOverridden(
+    const T*,
+    const RefCountedThreadSafe<U, V>*) {
+  return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
+                       std::decay_t<decltype(U::kRefCountPreference)>>::value;
+}
+
+constexpr bool IsRefCountPreferenceOverridden(...) {
+  return false;
+}
+
+}  // namespace subtle
+
+// Creates a scoped_refptr from a raw pointer without incrementing the reference
+// count. Use this only for a newly created object whose reference count starts
+// from 1 instead of 0.
+template <typename T>
+scoped_refptr<T> AdoptRef(T* obj) {
+  using Tag = std::decay_t<decltype(T::kRefCountPreference)>;
+  static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
+                "Use AdoptRef only for the reference count starts from one.");
+
+  DCHECK(obj);
+  DCHECK(obj->HasOneRef());
+  obj->Adopted();
+  return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
+}
+
+namespace subtle {
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
+  return scoped_refptr<T>(obj);
+}
+
+template <typename T>
+scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
+  return AdoptRef(obj);
+}
+
+}  // namespace subtle
+
+// Constructs an instance of T, which is a ref counted type, and wraps the
+// object into a scoped_refptr<T>.
+template <typename T, typename... Args>
+scoped_refptr<T> MakeRefCounted(Args&&... args) {
+  T* obj = new T(std::forward<Args>(args)...);
+  return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
+}
+
+// Takes an instance of T, which is a ref counted type, and wraps the object
+// into a scoped_refptr<T>.
+template <typename T>
+scoped_refptr<T> WrapRefCounted(T* t) {
+  return scoped_refptr<T>(t);
+}
+
+}  // namespace base
+
+//
+// A smart pointer class for reference counted objects.  Use this class instead
+// of calling AddRef and Release manually on a reference counted object to
+// avoid common memory leaks caused by forgetting to Release an object
+// reference.  Sample usage:
+//
+//   class MyFoo : public RefCounted<MyFoo> {
+//    ...
+//    private:
+//     friend class RefCounted<MyFoo>;  // Allow destruction by RefCounted<>.
+//     ~MyFoo();                        // Destructor must be private/protected.
+//   };
+//
+//   void some_function() {
+//     scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
+//     foo->Method(param);
+//     // |foo| is released when this function returns
+//   }
+//
+//   void some_other_function() {
+//     scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
+//     ...
+//     foo.reset();  // explicitly releases |foo|
+//     ...
+//     if (foo)
+//       foo->Method(param);
+//   }
+//
+// The above examples show how scoped_refptr<T> acts like a pointer to T.
+// Given two scoped_refptr<T> classes, it is also possible to exchange
+// references between the two objects, like so:
+//
+//   {
+//     scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
+//     scoped_refptr<MyFoo> b;
+//
+//     b.swap(a);
+//     // now, |b| references the MyFoo object, and |a| references nullptr.
+//   }
+//
+// To make both |a| and |b| in the above example reference the same MyFoo
+// object, simply use the assignment operator:
+//
+//   {
+//     scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
+//     scoped_refptr<MyFoo> b;
+//
+//     b = a;
+//     // now, |a| and |b| each own a reference to the same MyFoo object.
+//   }
+//
+// Also see Chromium's ownership and calling conventions:
+// https://chromium.googlesource.com/chromium/src/+/lkgr/styleguide/c++/c++.md#object-ownership-and-calling-conventions
+// Specifically:
+//   If the function (at least sometimes) takes a ref on a refcounted object,
+//   declare the param as scoped_refptr<T>. The caller can decide whether it
+//   wishes to transfer ownership (by calling std::move(t) when passing t) or
+//   retain its ref (by simply passing t directly).
+//   In other words, use scoped_refptr like you would a std::unique_ptr except
+//   in the odd case where it's required to hold on to a ref while handing one
+//   to another component (if a component merely needs to use t on the stack
+//   without keeping a ref: pass t as a raw T*).
+template <class T>
+class scoped_refptr {
+ public:
+  typedef T element_type;
+
+  constexpr scoped_refptr() = default;
+
+  // Constructs from raw pointer. constexpr if |p| is null.
+  CONSTEXPR scoped_refptr(T* p) : ptr_(p) {
+    if (ptr_)
+      AddRef(ptr_);
+  }
+
+  // Copy constructor. This is required in addition to the copy conversion
+  // constructor below.
+  scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {}
+
+  // Copy conversion constructor.
+  template <typename U,
+            typename = typename std::enable_if<
+                std::is_convertible<U*, T*>::value>::type>
+  scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
+
+  // Move constructor. This is required in addition to the move conversion
+  // constructor below.
+  scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; }
+
+  // Move conversion constructor.
+  template <typename U,
+            typename = typename std::enable_if<
+                std::is_convertible<U*, T*>::value>::type>
+  scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
+    r.ptr_ = nullptr;
+  }
+
+  ~scoped_refptr() {
+    static_assert(!base::subtle::IsRefCountPreferenceOverridden(
+                      static_cast<T*>(nullptr), static_cast<T*>(nullptr)),
+                  "It's unsafe to override the ref count preference."
+                  " Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE"
+                  " from subclasses.");
+    if (ptr_)
+      Release(ptr_);
+  }
+
+  T* get() const { return ptr_; }
+#if defined(STARBOARD)
+  // TODO[johnx]: remove this implicit convertor and replace all occurances of
+  // necessary implicit conversion with scoped_refptr.get().
+  operator T*() const { return ptr_; }
+#endif
+  T& operator*() const {
+    DCHECK(ptr_);
+    return *ptr_;
+  }
+
+  T* operator->() const {
+    DCHECK(ptr_);
+    return ptr_;
+  }
+
+#if !defined(STARBOARD)
+  scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); }
+#endif
+
+  // Unified assignment operator.
+  scoped_refptr& operator=(scoped_refptr r) noexcept {
+    swap(r);
+    return *this;
+  }
+
+  // Sets managed object to null and releases reference to the previous managed
+  // object, if it existed.
+  void reset() { scoped_refptr().swap(*this); }
+
+  void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
+
+  explicit operator bool() const { return ptr_ != nullptr; }
+
+  template <typename U>
+  bool operator==(const scoped_refptr<U>& rhs) const {
+    return ptr_ == rhs.get();
+  }
+
+#if defined(STARBOARD)
+  template <typename U>
+  bool operator!=(U* rhs) const {
+    return ptr_ != rhs;
+  }
+#endif
+
+  template <typename U>
+  bool operator!=(const scoped_refptr<U>& rhs) const {
+    return !operator==(rhs);
+  }
+
+  template <typename U>
+  bool operator<(const scoped_refptr<U>& rhs) const {
+    return ptr_ < rhs.get();
+  }
+
+ protected:
+  T* ptr_ = nullptr;
+
+ private:
+  template <typename U>
+  friend scoped_refptr<U> base::AdoptRef(U*);
+
+  scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
+
+  // Friend required for move constructors that set r.ptr_ to null.
+  template <typename U>
+  friend class scoped_refptr;
+
+  // Non-inline helpers to allow:
+  //     class Opaque;
+  //     extern template class scoped_refptr<Opaque>;
+  // Otherwise the compiler will complain that Opaque is an incomplete type.
+  static void AddRef(T* ptr);
+  static void Release(T* ptr);
+};
+
+// static
+template <typename T>
+void scoped_refptr<T>::AddRef(T* ptr) {
+  ptr->AddRef();
+}
+
+// static
+template <typename T>
+void scoped_refptr<T>::Release(T* ptr) {
+  ptr->Release();
+}
+
+#if !defined(STARBOARD)
+template <typename T, typename U>
+bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
+  return lhs.get() == rhs;
+}
+
+template <typename T, typename U>
+bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
+  return lhs == rhs.get();
+}
+
+template <typename T>
+bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t /*null*/) {
+  return !static_cast<bool>(lhs);
+}
+
+template <typename T>
+bool operator==(std::nullptr_t /*null*/, const scoped_refptr<T>& rhs) {
+  return !static_cast<bool>(rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
+  return !operator==(lhs, rhs);
+}
+
+template <typename T, typename U>
+bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
+  return !operator==(lhs, rhs);
+}
+
+template <typename T>
+bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
+  return !operator==(lhs, null);
+}
+
+template <typename T>
+bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
+  return !operator==(null, rhs);
+}
+#endif
+
+template <typename T>
+std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
+  return out << p.get();
+}
+
+template <typename T>
+void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) noexcept {
+  lhs.swap(rhs);
+}
+
+#endif  // BASE_MEMORY_SCOPED_REFPTR_H_
diff --git a/src/base/memory/scoped_vector.h b/src/base/memory/scoped_vector.h
deleted file mode 100644
index 90a1f7d..0000000
--- a/src/base/memory/scoped_vector.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_SCOPED_VECTOR_H_
-#define BASE_MEMORY_SCOPED_VECTOR_H_
-
-#include <vector>
-
-#include "base/basictypes.h"
-#include "base/move.h"
-#include "base/stl_util.h"
-
-// ScopedVector wraps a vector deleting the elements from its
-// destructor.
-template <class T>
-class ScopedVector {
-  MOVE_ONLY_TYPE_FOR_CPP_03(ScopedVector, RValue)
-
- public:
-  typedef typename std::vector<T*>::allocator_type allocator_type;
-  typedef typename std::vector<T*>::size_type size_type;
-  typedef typename std::vector<T*>::difference_type difference_type;
-  typedef typename std::vector<T*>::pointer pointer;
-  typedef typename std::vector<T*>::const_pointer const_pointer;
-  typedef typename std::vector<T*>::reference reference;
-  typedef typename std::vector<T*>::const_reference const_reference;
-  typedef typename std::vector<T*>::value_type value_type;
-  typedef typename std::vector<T*>::iterator iterator;
-  typedef typename std::vector<T*>::const_iterator const_iterator;
-  typedef typename std::vector<T*>::reverse_iterator reverse_iterator;
-  typedef typename std::vector<T*>::const_reverse_iterator
-      const_reverse_iterator;
-
-  ScopedVector() {}
-  ~ScopedVector() { clear(); }
-  ScopedVector(RValue other) { swap(*other.object); }
-
-  ScopedVector& operator=(RValue rhs) {
-    swap(*rhs.object);
-    return *this;
-  }
-
-  T*& operator[](size_t index) { return v_[index]; }
-  const T* operator[](size_t index) const { return v_[index]; }
-
-  bool empty() const { return v_.empty(); }
-  size_t size() const { return v_.size(); }
-
-  reverse_iterator rbegin() { return v_.rbegin(); }
-  const_reverse_iterator rbegin() const { return v_.rbegin(); }
-  reverse_iterator rend() { return v_.rend(); }
-  const_reverse_iterator rend() const { return v_.rend(); }
-
-  iterator begin() { return v_.begin(); }
-  const_iterator begin() const { return v_.begin(); }
-  iterator end() { return v_.end(); }
-  const_iterator end() const { return v_.end(); }
-
-  const_reference front() const { return v_.front(); }
-  reference front() { return v_.front(); }
-  const_reference back() const { return v_.back(); }
-  reference back() { return v_.back(); }
-
-  void push_back(T* elem) { v_.push_back(elem); }
-
-  std::vector<T*>& get() { return v_; }
-  const std::vector<T*>& get() const { return v_; }
-  void swap(std::vector<T*>& other) { v_.swap(other); }
-  void swap(ScopedVector<T>& other) { v_.swap(other.v_); }
-  void release(std::vector<T*>* out) {
-    out->swap(v_);
-    v_.clear();
-  }
-
-  void reserve(size_t capacity) { v_.reserve(capacity); }
-
-  // Resize, deleting elements in the disappearing range if we are shrinking.
-  void resize(size_t new_size) {
-    if (v_.size() > new_size)
-      STLDeleteContainerPointers(v_.begin() + new_size, v_.end());
-    v_.resize(new_size);
-  }
-
-  template<typename InputIterator>
-  void assign(InputIterator begin, InputIterator end) {
-    v_.assign(begin, end);
-  }
-
-  void clear() { STLDeleteElements(&v_); }
-
-  // Like |clear()|, but doesn't delete any elements.
-  void weak_clear() { v_.clear(); }
-
-  // Lets the ScopedVector take ownership of |x|.
-  iterator insert(iterator position, T* x) {
-    return v_.insert(position, x);
-  }
-
-  // Lets the ScopedVector take ownership of elements in [first,last).
-  template<typename InputIterator>
-  void insert(iterator position, InputIterator first, InputIterator last) {
-    v_.insert(position, first, last);
-  }
-
-  iterator erase(iterator position) {
-    delete *position;
-    return v_.erase(position);
-  }
-
-  iterator erase(iterator first, iterator last) {
-    STLDeleteContainerPointers(first, last);
-    return v_.erase(first, last);
-  }
-
-  // Like |erase()|, but doesn't delete the element at |position|.
-  iterator weak_erase(iterator position) {
-    return v_.erase(position);
-  }
-
-  // Like |erase()|, but doesn't delete the elements in [first, last).
-  iterator weak_erase(iterator first, iterator last) {
-    return v_.erase(first, last);
-  }
-
- private:
-  std::vector<T*> v_;
-};
-
-#endif  // BASE_MEMORY_SCOPED_VECTOR_H_
diff --git a/src/base/memory/scoped_vector_unittest.cc b/src/base/memory/scoped_vector_unittest.cc
deleted file mode 100644
index 0757a56..0000000
--- a/src/base/memory/scoped_vector_unittest.cc
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/scoped_vector.h"
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/memory/scoped_ptr.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-// The LifeCycleObject notifies its Observer upon construction & destruction.
-class LifeCycleObject {
- public:
-  class Observer {
-   public:
-    virtual void OnLifeCycleConstruct(LifeCycleObject* o) = 0;
-    virtual void OnLifeCycleDestroy(LifeCycleObject* o) = 0;
-
-   protected:
-    virtual ~Observer() {}
-  };
-
-  ~LifeCycleObject() {
-    observer_->OnLifeCycleDestroy(this);
-  }
-
- private:
-  friend class LifeCycleWatcher;
-
-  explicit LifeCycleObject(Observer* observer)
-      : observer_(observer) {
-    observer_->OnLifeCycleConstruct(this);
-  }
-
-  Observer* observer_;
-
-  DISALLOW_COPY_AND_ASSIGN(LifeCycleObject);
-};
-
-// The life cycle states we care about for the purposes of testing ScopedVector
-// against objects.
-enum LifeCycleState {
-  LC_INITIAL,
-  LC_CONSTRUCTED,
-  LC_DESTROYED,
-};
-
-// Because we wish to watch the life cycle of an object being constructed and
-// destroyed, and further wish to test expectations against the state of that
-// object, we cannot save state in that object itself. Instead, we use this
-// pairing of the watcher, which observes the object and notifies of
-// construction & destruction. Since we also may be testing assumptions about
-// things not getting freed, this class also acts like a scoping object and
-// deletes the |constructed_life_cycle_object_|, if any when the
-// LifeCycleWatcher is destroyed. To keep this simple, the only expected state
-// changes are:
-//   INITIAL -> CONSTRUCTED -> DESTROYED.
-// Anything more complicated than that should start another test.
-class LifeCycleWatcher : public LifeCycleObject::Observer {
- public:
-  LifeCycleWatcher()
-      : life_cycle_state_(LC_INITIAL),
-        constructed_life_cycle_object_(NULL) {}
-  virtual ~LifeCycleWatcher() {}
-
-  // Assert INITIAL -> CONSTRUCTED and no LifeCycleObject associated with this
-  // LifeCycleWatcher.
-  virtual void OnLifeCycleConstruct(LifeCycleObject* object) override {
-    ASSERT_EQ(LC_INITIAL, life_cycle_state_);
-    ASSERT_EQ(NULL, constructed_life_cycle_object_.get());
-    life_cycle_state_ = LC_CONSTRUCTED;
-    constructed_life_cycle_object_.reset(object);
-  }
-
-  // Assert CONSTRUCTED -> DESTROYED and the |object| being destroyed is the
-  // same one we saw constructed.
-  virtual void OnLifeCycleDestroy(LifeCycleObject* object) override {
-    ASSERT_EQ(LC_CONSTRUCTED, life_cycle_state_);
-    LifeCycleObject* constructed_life_cycle_object =
-        constructed_life_cycle_object_.release();
-    ASSERT_EQ(constructed_life_cycle_object, object);
-    life_cycle_state_ = LC_DESTROYED;
-  }
-
-  LifeCycleState life_cycle_state() const { return life_cycle_state_; }
-
-  // Factory method for creating a new LifeCycleObject tied to this
-  // LifeCycleWatcher.
-  LifeCycleObject* NewLifeCycleObject() {
-    return new LifeCycleObject(this);
-  }
-
-  // Returns true iff |object| is the same object that this watcher is tracking.
-  bool IsWatching(LifeCycleObject* object) const {
-    return object == constructed_life_cycle_object_.get();
-  }
-
- private:
-  LifeCycleState life_cycle_state_;
-  scoped_ptr<LifeCycleObject> constructed_life_cycle_object_;
-
-  DISALLOW_COPY_AND_ASSIGN(LifeCycleWatcher);
-};
-
-TEST(ScopedVectorTest, LifeCycleWatcher) {
-  LifeCycleWatcher watcher;
-  EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
-  LifeCycleObject* object = watcher.NewLifeCycleObject();
-  EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
-  delete object;
-  EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
-}
-
-TEST(ScopedVectorTest, Clear) {
-  LifeCycleWatcher watcher;
-  EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
-  ScopedVector<LifeCycleObject> scoped_vector;
-  scoped_vector.push_back(watcher.NewLifeCycleObject());
-  EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
-  EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
-  scoped_vector.clear();
-  EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
-  EXPECT_TRUE(scoped_vector.empty());
-}
-
-TEST(ScopedVectorTest, WeakClear) {
-  LifeCycleWatcher watcher;
-  EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
-  ScopedVector<LifeCycleObject> scoped_vector;
-  scoped_vector.push_back(watcher.NewLifeCycleObject());
-  EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
-  EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
-  scoped_vector.weak_clear();
-  EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
-  EXPECT_TRUE(scoped_vector.empty());
-}
-
-TEST(ScopedVectorTest, ResizeShrink) {
-  LifeCycleWatcher first_watcher;
-  EXPECT_EQ(LC_INITIAL, first_watcher.life_cycle_state());
-  LifeCycleWatcher second_watcher;
-  EXPECT_EQ(LC_INITIAL, second_watcher.life_cycle_state());
-  ScopedVector<LifeCycleObject> scoped_vector;
-
-  scoped_vector.push_back(first_watcher.NewLifeCycleObject());
-  EXPECT_EQ(LC_CONSTRUCTED, first_watcher.life_cycle_state());
-  EXPECT_EQ(LC_INITIAL, second_watcher.life_cycle_state());
-  EXPECT_TRUE(first_watcher.IsWatching(scoped_vector[0]));
-  EXPECT_FALSE(second_watcher.IsWatching(scoped_vector[0]));
-
-  scoped_vector.push_back(second_watcher.NewLifeCycleObject());
-  EXPECT_EQ(LC_CONSTRUCTED, first_watcher.life_cycle_state());
-  EXPECT_EQ(LC_CONSTRUCTED, second_watcher.life_cycle_state());
-  EXPECT_FALSE(first_watcher.IsWatching(scoped_vector[1]));
-  EXPECT_TRUE(second_watcher.IsWatching(scoped_vector[1]));
-
-  // Test that shrinking a vector deletes elements in the disappearing range.
-  scoped_vector.resize(1);
-  EXPECT_EQ(LC_CONSTRUCTED, first_watcher.life_cycle_state());
-  EXPECT_EQ(LC_DESTROYED, second_watcher.life_cycle_state());
-  EXPECT_EQ(1u, scoped_vector.size());
-  EXPECT_TRUE(first_watcher.IsWatching(scoped_vector[0]));
-}
-
-TEST(ScopedVectorTest, ResizeGrow) {
-  LifeCycleWatcher watcher;
-  EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
-  ScopedVector<LifeCycleObject> scoped_vector;
-  scoped_vector.push_back(watcher.NewLifeCycleObject());
-  EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
-  EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
-
-  scoped_vector.resize(5);
-  EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
-  ASSERT_EQ(5u, scoped_vector.size());
-  EXPECT_TRUE(watcher.IsWatching(scoped_vector[0]));
-  EXPECT_FALSE(watcher.IsWatching(scoped_vector[1]));
-  EXPECT_FALSE(watcher.IsWatching(scoped_vector[2]));
-  EXPECT_FALSE(watcher.IsWatching(scoped_vector[3]));
-  EXPECT_FALSE(watcher.IsWatching(scoped_vector[4]));
-}
-
-TEST(ScopedVectorTest, Scope) {
-  LifeCycleWatcher watcher;
-  EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
-  {
-    ScopedVector<LifeCycleObject> scoped_vector;
-    scoped_vector.push_back(watcher.NewLifeCycleObject());
-    EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
-    EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
-  }
-  EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
-}
-
-TEST(ScopedVectorTest, MoveConstruct) {
-  LifeCycleWatcher watcher;
-  EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
-  {
-    ScopedVector<LifeCycleObject> scoped_vector;
-    scoped_vector.push_back(watcher.NewLifeCycleObject());
-    EXPECT_FALSE(scoped_vector.empty());
-    EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
-
-    ScopedVector<LifeCycleObject> scoped_vector_copy(scoped_vector.Pass());
-    EXPECT_TRUE(scoped_vector.empty());
-    EXPECT_FALSE(scoped_vector_copy.empty());
-    EXPECT_TRUE(watcher.IsWatching(scoped_vector_copy.back()));
-
-    EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
-  }
-  EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
-}
-
-TEST(ScopedVectorTest, MoveAssign) {
-  LifeCycleWatcher watcher;
-  EXPECT_EQ(LC_INITIAL, watcher.life_cycle_state());
-  {
-    ScopedVector<LifeCycleObject> scoped_vector;
-    scoped_vector.push_back(watcher.NewLifeCycleObject());
-    ScopedVector<LifeCycleObject> scoped_vector_assign;
-    EXPECT_FALSE(scoped_vector.empty());
-    EXPECT_TRUE(watcher.IsWatching(scoped_vector.back()));
-
-    scoped_vector_assign = scoped_vector.Pass();
-    EXPECT_TRUE(scoped_vector.empty());
-    EXPECT_FALSE(scoped_vector_assign.empty());
-    EXPECT_TRUE(watcher.IsWatching(scoped_vector_assign.back()));
-
-    EXPECT_EQ(LC_CONSTRUCTED, watcher.life_cycle_state());
-  }
-  EXPECT_EQ(LC_DESTROYED, watcher.life_cycle_state());
-}
-
-class DeleteCounter {
- public:
-  explicit DeleteCounter(int* deletes)
-      : deletes_(deletes) {
-  }
-
-  ~DeleteCounter() {
-    (*deletes_)++;
-  }
-
-  void VoidMethod0() {}
-
- private:
-  int* const deletes_;
-
-  DISALLOW_COPY_AND_ASSIGN(DeleteCounter);
-};
-
-template <typename T>
-ScopedVector<T> PassThru(ScopedVector<T> scoper) {
-  return scoper.Pass();
-}
-
-TEST(ScopedVectorTest, Passed) {
-  int deletes = 0;
-  ScopedVector<DeleteCounter> deleter_vector;
-  deleter_vector.push_back(new DeleteCounter(&deletes));
-  EXPECT_EQ(0, deletes);
-  base::Callback<ScopedVector<DeleteCounter>(void)> callback =
-      base::Bind(&PassThru<DeleteCounter>, base::Passed(&deleter_vector));
-  EXPECT_EQ(0, deletes);
-  ScopedVector<DeleteCounter> result = callback.Run();
-  EXPECT_EQ(0, deletes);
-  result.clear();
-  EXPECT_EQ(1, deletes);
-};
-
-TEST(ScopedVectorTest, InsertRange) {
-  LifeCycleWatcher watchers[5];
-
-  std::vector<LifeCycleObject*> vec;
-  for(LifeCycleWatcher* it = watchers; it != watchers + arraysize(watchers);
-      ++it) {
-    EXPECT_EQ(LC_INITIAL, it->life_cycle_state());
-    vec.push_back(it->NewLifeCycleObject());
-    EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
-  }
-  // Start scope for ScopedVector.
-  {
-    ScopedVector<LifeCycleObject> scoped_vector;
-    scoped_vector.insert(scoped_vector.end(), vec.begin() + 1, vec.begin() + 3);
-    for(LifeCycleWatcher* it = watchers; it != watchers + arraysize(watchers);
-        ++it)
-      EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
-  }
-  for(LifeCycleWatcher* it = watchers; it != watchers + 1; ++it)
-    EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
-  for(LifeCycleWatcher* it = watchers + 1; it != watchers + 3; ++it)
-    EXPECT_EQ(LC_DESTROYED, it->life_cycle_state());
-  for(LifeCycleWatcher* it = watchers + 3; it != watchers + arraysize(watchers);
-      ++it)
-    EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
-}
-
-}  // namespace
diff --git a/src/base/memory/shared_memory.h b/src/base/memory/shared_memory.h
new file mode 100644
index 0000000..d6ac8bc
--- /dev/null
+++ b/src/base/memory/shared_memory.h
@@ -0,0 +1,257 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_H_
+#define BASE_MEMORY_SHARED_MEMORY_H_
+
+// Starboard doesn't curretly support multiple processes or shared memory.
+#if !defined(STARBOARD)
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/hash.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/process/process_handle.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <stdio.h>
+#include <sys/types.h>
+#include <semaphore.h>
+#include "base/file_descriptor_posix.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#include "starboard/types.h"
+#endif
+
+namespace base {
+
+class FilePath;
+
+// Options for creating a shared memory object.
+struct BASE_EXPORT SharedMemoryCreateOptions {
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The type of OS primitive that should back the SharedMemory object.
+  SharedMemoryHandle::Type type = SharedMemoryHandle::MACH;
+#elif !defined(OS_FUCHSIA)
+  // DEPRECATED (crbug.com/345734):
+  // If NULL, the object is anonymous.  This pointer is owned by the caller
+  // and must live through the call to Create().
+  const std::string* name_deprecated = nullptr;
+
+  // DEPRECATED (crbug.com/345734):
+  // If true, and the shared memory already exists, Create() will open the
+  // existing shared memory and ignore the size parameter.  If false,
+  // shared memory must not exist.  This flag is meaningless unless
+  // name_deprecated is non-NULL.
+  bool open_existing_deprecated = false;
+#endif  // defined(OS_MACOSX) && !defined(OS_IOS)
+
+  // Size of the shared memory object to be created.
+  // When opening an existing object, this has no effect.
+  size_t size = 0;
+
+  // If true, mappings might need to be made executable later.
+  bool executable = false;
+
+  // If true, the file can be shared read-only to a process.
+  bool share_read_only = false;
+};
+
+// Platform abstraction for shared memory.
+// SharedMemory consumes a SharedMemoryHandle [potentially one that it created]
+// to map a shared memory OS resource into the virtual address space of the
+// current process.
+class BASE_EXPORT SharedMemory {
+ public:
+  SharedMemory();
+
+#if defined(OS_WIN)
+  // Similar to the default constructor, except that this allows for
+  // calling LockDeprecated() to acquire the named mutex before either Create or
+  // Open are called on Windows.
+  explicit SharedMemory(const string16& name);
+#endif
+
+  // Create a new SharedMemory object from an existing, open
+  // shared memory file.
+  //
+  // WARNING: This does not reduce the OS-level permissions on the handle; it
+  // only affects how the SharedMemory will be mmapped. Use
+  // GetReadOnlyHandle to drop permissions. TODO(jln,jyasskin): DCHECK
+  // that |read_only| matches the permissions of the handle.
+  SharedMemory(const SharedMemoryHandle& handle, bool read_only);
+
+  // Closes any open files.
+  ~SharedMemory();
+
+  // Return true iff the given handle is valid (i.e. not the distingished
+  // invalid value; NULL for a HANDLE and -1 for a file descriptor)
+  static bool IsHandleValid(const SharedMemoryHandle& handle);
+
+  // Closes a shared memory handle.
+  static void CloseHandle(const SharedMemoryHandle& handle);
+
+  // Returns the maximum number of handles that can be open at once per process.
+  static size_t GetHandleLimit();
+
+  // Duplicates The underlying OS primitive. Returns an invalid handle on
+  // failure. The caller is responsible for destroying the duplicated OS
+  // primitive.
+  static SharedMemoryHandle DuplicateHandle(const SharedMemoryHandle& handle);
+
+#if defined(OS_POSIX)
+  // This method requires that the SharedMemoryHandle is backed by a POSIX fd.
+  static int GetFdFromSharedMemoryHandle(const SharedMemoryHandle& handle);
+#endif
+
+  // Creates a shared memory object as described by the options struct.
+  // Returns true on success and false on failure.
+  bool Create(const SharedMemoryCreateOptions& options);
+
+  // Creates and maps an anonymous shared memory segment of size size.
+  // Returns true on success and false on failure.
+  bool CreateAndMapAnonymous(size_t size);
+
+  // Creates an anonymous shared memory segment of size size.
+  // Returns true on success and false on failure.
+  bool CreateAnonymous(size_t size) {
+    SharedMemoryCreateOptions options;
+    options.size = size;
+    return Create(options);
+  }
+
+#if (!defined(OS_MACOSX) || defined(OS_IOS)) && !defined(OS_FUCHSIA)
+  // DEPRECATED (crbug.com/345734):
+  // Creates or opens a shared memory segment based on a name.
+  // If open_existing is true, and the shared memory already exists,
+  // opens the existing shared memory and ignores the size parameter.
+  // If open_existing is false, shared memory must not exist.
+  // size is the size of the block to be created.
+  // Returns true on success, false on failure.
+  bool CreateNamedDeprecated(
+      const std::string& name, bool open_existing, size_t size) {
+    SharedMemoryCreateOptions options;
+    options.name_deprecated = &name;
+    options.open_existing_deprecated = open_existing;
+    options.size = size;
+    return Create(options);
+  }
+
+  // Deletes resources associated with a shared memory segment based on name.
+  // Not all platforms require this call.
+  bool Delete(const std::string& name);
+
+  // Opens a shared memory segment based on a name.
+  // If read_only is true, opens for read-only access.
+  // Returns true on success, false on failure.
+  bool Open(const std::string& name, bool read_only);
+#endif  // !defined(OS_MACOSX) || defined(OS_IOS)
+
+  // Maps the shared memory into the caller's address space.
+  // Returns true on success, false otherwise.  The memory address
+  // is accessed via the memory() accessor.  The mapped address is guaranteed to
+  // have an alignment of at least MAP_MINIMUM_ALIGNMENT. This method will fail
+  // if this object is currently mapped.
+  bool Map(size_t bytes) {
+    return MapAt(0, bytes);
+  }
+
+  // Same as above, but with |offset| to specify from begining of the shared
+  // memory block to map.
+  // |offset| must be alignent to value of |SysInfo::VMAllocationGranularity()|.
+  bool MapAt(off_t offset, size_t bytes);
+  enum { MAP_MINIMUM_ALIGNMENT = 32 };
+
+  // Unmaps the shared memory from the caller's address space.
+  // Returns true if successful; returns false on error or if the
+  // memory is not mapped.
+  bool Unmap();
+
+  // The size requested when the map is first created.
+  size_t requested_size() const { return requested_size_; }
+
+  // The actual size of the mapped memory (may be larger than requested).
+  size_t mapped_size() const { return mapped_size_; }
+
+  // Gets a pointer to the opened memory space if it has been
+  // Mapped via Map().  Returns NULL if it is not mapped.
+  void* memory() const { return memory_; }
+
+  // Returns the underlying OS handle for this segment.
+  // Use of this handle for anything other than an opaque
+  // identifier is not portable.
+  SharedMemoryHandle handle() const;
+
+  // Returns the underlying OS handle for this segment. The caller takes
+  // ownership of the handle and memory is unmapped. This is equivalent to
+  // duplicating the handle and then calling Unmap() and Close() on this object,
+  // without the overhead of duplicating the handle.
+  SharedMemoryHandle TakeHandle();
+
+  // Closes the open shared memory segment. The memory will remain mapped if
+  // it was previously mapped.
+  // It is safe to call Close repeatedly.
+  void Close();
+
+  // Returns a read-only handle to this shared memory region. The caller takes
+  // ownership of the handle. For POSIX handles, CHECK-fails if the region
+  // wasn't Created or Opened with share_read_only=true, which is required to
+  // make the handle read-only. When the handle is passed to the IPC subsystem,
+  // that takes ownership of the handle. As such, it's not valid to pass the
+  // sample handle to the IPC subsystem twice. Returns an invalid handle on
+  // failure.
+  SharedMemoryHandle GetReadOnlyHandle() const;
+
+  // Returns an ID for the mapped region. This is ID of the SharedMemoryHandle
+  // that was mapped. The ID is valid even after the SharedMemoryHandle is
+  // Closed, as long as the region is not unmapped.
+  const UnguessableToken& mapped_id() const { return mapped_id_; }
+
+ private:
+#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID) && \
+    (!defined(OS_MACOSX) || defined(OS_IOS))
+  bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
+#endif
+
+#if defined(OS_WIN)
+  // If true indicates this came from an external source so needs extra checks
+  // before being mapped.
+  bool external_section_ = false;
+  string16 name_;
+#elif !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+  // If valid, points to the same memory region as shm_, but with readonly
+  // permissions.
+  SharedMemoryHandle readonly_shm_;
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The mechanism by which the memory is mapped. Only valid if |memory_| is not
+  // |nullptr|.
+  SharedMemoryHandle::Type mapped_memory_mechanism_ = SharedMemoryHandle::MACH;
+#endif
+
+  // The OS primitive that backs the shared memory region.
+  SharedMemoryHandle shm_;
+
+  size_t mapped_size_ = 0;
+  void* memory_ = nullptr;
+  bool read_only_ = false;
+  size_t requested_size_ = 0;
+  base::UnguessableToken mapped_id_;
+
+  DISALLOW_COPY_AND_ASSIGN(SharedMemory);
+};
+
+}  // namespace base
+
+#endif  // !defined(STARBOARD)
+#endif  // BASE_MEMORY_SHARED_MEMORY_H_
diff --git a/src/base/memory/shared_memory_android.cc b/src/base/memory/shared_memory_android.cc
new file mode 100644
index 0000000..8375453
--- /dev/null
+++ b/src/base/memory/shared_memory_android.cc
@@ -0,0 +1,80 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <sys/mman.h>
+
+#include "base/bits.h"
+#include "base/logging.h"
+#include "base/process/process_metrics.h"
+#include "starboard/types.h"
+#include "third_party/ashmem/ashmem.h"
+
+namespace base {
+
+// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
+// will automatically pin the region. We never explicitly call pin/unpin. When
+// all the file descriptors from different processes associated with the region
+// are closed, the memory buffer will go away.
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  DCHECK(!shm_.IsValid());
+
+  // Align size as required by ashmem_create_region() API documentation.
+  size_t rounded_size = bits::Align(options.size, GetPageSize());
+
+  if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  // "name" is just a label in ashmem. It is visible in /proc/pid/maps.
+  int fd = ashmem_create_region(
+      options.name_deprecated ? options.name_deprecated->c_str() : "",
+      rounded_size);
+  shm_ = SharedMemoryHandle::ImportHandle(fd, options.size);
+  if (!shm_.IsValid()) {
+    DLOG(ERROR) << "Shared memory creation failed";
+    return false;
+  }
+
+  int flags = PROT_READ | PROT_WRITE | (options.executable ? PROT_EXEC : 0);
+  int err = ashmem_set_prot_region(shm_.GetHandle(), flags);
+  if (err < 0) {
+    DLOG(ERROR) << "Error " << err << " when setting protection of ashmem";
+    return false;
+  }
+
+  requested_size_ = options.size;
+
+  return true;
+}
+
+bool SharedMemory::Delete(const std::string& name) {
+  // Like on Windows, this is intentionally returning true as ashmem will
+  // automatically releases the resource when all FDs on it are closed.
+  return true;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+  // ashmem doesn't support name mapping
+  NOTIMPLEMENTED();
+  return false;
+}
+
+void SharedMemory::Close() {
+  if (shm_.IsValid()) {
+    shm_.Close();
+    shm_ = SharedMemoryHandle();
+  }
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  // There are no read-only Ashmem descriptors on Android.
+  // Instead, the protection mask is a property of the region itself.
+  SharedMemoryHandle handle = shm_.Duplicate();
+  handle.SetReadOnly();
+  return handle;
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_fuchsia.cc b/src/base/memory/shared_memory_fuchsia.cc
new file mode 100644
index 0000000..a79d937
--- /dev/null
+++ b/src/base/memory/shared_memory_fuchsia.cc
@@ -0,0 +1,164 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <limits>
+
+#include <lib/zx/vmar.h>
+#include <lib/zx/vmo.h>
+#include <zircon/rights.h>
+
+#include "base/bits.h"
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/process/process_metrics.h"
+#include "starboard/types.h"
+
+namespace base {
+
+SharedMemory::SharedMemory() {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : shm_(handle), read_only_(read_only) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  DCHECK(handle.IsValid());
+  handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  // Duplicated from the internal Magenta kernel constant kMaxHandleCount
+  // (kernel/lib/zircon/zircon.cpp).
+  return 256 * 1024u;
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  requested_size_ = options.size;
+  mapped_size_ = bits::Align(requested_size_, GetPageSize());
+  zx::vmo vmo;
+  zx_status_t status = zx::vmo::create(mapped_size_, 0, &vmo);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_vmo_create";
+    return false;
+  }
+
+  if (!options.executable) {
+    // If options.executable isn't set, drop that permission by replacement.
+    const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
+    status = vmo.replace(kNoExecFlags, &vmo);
+    if (status != ZX_OK) {
+      ZX_DLOG(ERROR, status) << "zx_handle_replace";
+      return false;
+    }
+  }
+
+  shm_ = SharedMemoryHandle(vmo.release(), mapped_size_,
+                            UnguessableToken::Create());
+  return true;
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid())
+    return false;
+
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  if (memory_)
+    return false;
+
+  int flags = ZX_VM_FLAG_PERM_READ;
+  if (!read_only_)
+    flags |= ZX_VM_FLAG_PERM_WRITE;
+  uintptr_t addr;
+  zx_status_t status = zx::vmar::root_self()->map(
+      0, *zx::unowned_vmo(shm_.GetHandle()), offset, bytes, flags, &addr);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_vmar_map";
+    return false;
+  }
+  memory_ = reinterpret_cast<void*>(addr);
+
+  mapped_size_ = bytes;
+  mapped_id_ = shm_.GetGUID();
+  SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+  return true;
+}
+
+bool SharedMemory::Unmap() {
+  if (!memory_)
+    return false;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+
+  uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
+  zx_status_t status = zx::vmar::root_self()->unmap(addr, mapped_size_);
+  if (status != ZX_OK) {
+    ZX_DLOG(ERROR, status) << "zx_vmar_unmap";
+    return false;
+  }
+
+  memory_ = nullptr;
+  mapped_id_ = UnguessableToken();
+  return true;
+}
+
+void SharedMemory::Close() {
+  if (shm_.IsValid()) {
+    shm_.Close();
+    shm_ = SharedMemoryHandle();
+  }
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return shm_;
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+  SharedMemoryHandle handle(shm_);
+  handle.SetOwnershipPassesToIPC(true);
+  Unmap();
+  shm_ = SharedMemoryHandle();
+  return handle;
+}
+
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  zx::vmo duped_handle;
+  const int kNoWriteOrExec =
+      ZX_DEFAULT_VMO_RIGHTS &
+      ~(ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE | ZX_RIGHT_SET_PROPERTY);
+  zx_status_t status = zx::unowned_vmo(shm_.GetHandle())
+                           ->duplicate(kNoWriteOrExec, &duped_handle);
+  if (status != ZX_OK)
+    return SharedMemoryHandle();
+
+  SharedMemoryHandle handle(duped_handle.release(), shm_.GetSize(),
+                            shm_.GetGUID());
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_handle.cc b/src/base/memory/shared_memory_handle.cc
new file mode 100644
index 0000000..085bde4
--- /dev/null
+++ b/src/base/memory/shared_memory_handle.cc
@@ -0,0 +1,23 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle(const SharedMemoryHandle& handle) =
+    default;
+
+SharedMemoryHandle& SharedMemoryHandle::operator=(
+    const SharedMemoryHandle& handle) = default;
+
+base::UnguessableToken SharedMemoryHandle::GetGUID() const {
+  return guid_;
+}
+
+size_t SharedMemoryHandle::GetSize() const {
+  return size_;
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_handle.h b/src/base/memory/shared_memory_handle.h
new file mode 100644
index 0000000..8329829
--- /dev/null
+++ b/src/base/memory/shared_memory_handle.h
@@ -0,0 +1,244 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
+#define BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
+
+// Starboard doesn't curretly support multiple processes or shared memory.
+#if !defined(STARBOARD)
+
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include "base/process/process_handle.h"
+#include "base/win/windows_types.h"
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include "base/base_export.h"
+#include "base/file_descriptor_posix.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#elif defined(OS_POSIX)
+#include <sys/types.h>
+#include "base/file_descriptor_posix.h"
+#elif defined(OS_FUCHSIA)
+#include <zircon/types.h>
+
+#include "starboard/types.h"
+#endif
+
+namespace base {
+
+// SharedMemoryHandle is the smallest possible IPC-transportable "reference" to
+// a shared memory OS resource. A "reference" can be consumed exactly once [by
+// base::SharedMemory] to map the shared memory OS resource into the virtual
+// address space of the current process.
+// TODO(erikchen): This class should have strong ownership semantics to prevent
+// leaks of the underlying OS resource. https://crbug.com/640840.
+class BASE_EXPORT SharedMemoryHandle {
+ public:
+  // The default constructor returns an invalid SharedMemoryHandle.
+  SharedMemoryHandle();
+
+  // Standard copy constructor. The new instance shares the underlying OS
+  // primitives.
+  SharedMemoryHandle(const SharedMemoryHandle& handle);
+
+  // Standard assignment operator. The updated instance shares the underlying
+  // OS primitives.
+  SharedMemoryHandle& operator=(const SharedMemoryHandle& handle);
+
+  // Closes the underlying OS resource.
+  // The fact that this method needs to be "const" is an artifact of the
+  // original interface for base::SharedMemory::CloseHandle.
+  // TODO(erikchen): This doesn't clear the underlying reference, which seems
+  // like a bug, but is how this class has always worked. Fix this:
+  // https://crbug.com/716072.
+  void Close() const;
+
+  // Whether ownership of the underlying OS resource is implicitly passed to
+  // the IPC subsystem during serialization.
+  void SetOwnershipPassesToIPC(bool ownership_passes);
+  bool OwnershipPassesToIPC() const;
+
+  // Whether the underlying OS resource is valid.
+  bool IsValid() const;
+
+  // Duplicates the underlying OS resource. Using the return value as a
+  // parameter to an IPC message will cause the IPC subsystem to consume the OS
+  // resource.
+  SharedMemoryHandle Duplicate() const;
+
+  // Uniques identifies the shared memory region that the underlying OS resource
+  // points to. Multiple SharedMemoryHandles that point to the same shared
+  // memory region will have the same GUID. Preserved across IPC.
+  base::UnguessableToken GetGUID() const;
+
+  // Returns the size of the memory region that SharedMemoryHandle points to.
+  size_t GetSize() const;
+
+#if defined(OS_WIN)
+  // Takes implicit ownership of |h|.
+  // |guid| uniquely identifies the shared memory region pointed to by the
+  // underlying OS resource. If the HANDLE is associated with another
+  // SharedMemoryHandle, the caller must pass the |guid| of that
+  // SharedMemoryHandle. Otherwise, the caller should generate a new
+  // UnguessableToken.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  SharedMemoryHandle(HANDLE h, size_t size, const base::UnguessableToken& guid);
+  HANDLE GetHandle() const;
+#elif defined(OS_FUCHSIA)
+  // Takes implicit ownership of |h|.
+  // |guid| uniquely identifies the shared memory region pointed to by the
+  // underlying OS resource. If the zx_handle_t is associated with another
+  // SharedMemoryHandle, the caller must pass the |guid| of that
+  // SharedMemoryHandle. Otherwise, the caller should generate a new
+  // UnguessableToken.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  SharedMemoryHandle(zx_handle_t h,
+                     size_t size,
+                     const base::UnguessableToken& guid);
+  zx_handle_t GetHandle() const;
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+  enum Type {
+    // The SharedMemoryHandle is backed by a POSIX fd.
+    POSIX,
+    // The SharedMemoryHandle is backed by the Mach primitive "memory object".
+    MACH,
+  };
+
+  // Makes a Mach-based SharedMemoryHandle of the given size. On error,
+  // subsequent calls to IsValid() return false.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  SharedMemoryHandle(mach_vm_size_t size, const base::UnguessableToken& guid);
+
+  // Makes a Mach-based SharedMemoryHandle from |memory_object|, a named entry
+  // in the current task. The memory region has size |size|.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  SharedMemoryHandle(mach_port_t memory_object,
+                     mach_vm_size_t size,
+                     const base::UnguessableToken& guid);
+
+  Type GetType() const { return type_; }
+
+  // Exposed so that the SharedMemoryHandle can be transported between
+  // processes.
+  mach_port_t GetMemoryObject() const;
+
+  // The SharedMemoryHandle must be valid.
+  // Returns whether the SharedMemoryHandle was successfully mapped into memory.
+  // On success, |memory| is an output variable that contains the start of the
+  // mapped memory.
+  bool MapAt(off_t offset, size_t bytes, void** memory, bool read_only);
+#elif defined(OS_POSIX)
+  // Creates a SharedMemoryHandle from an |fd| supplied from an external
+  // service.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  static SharedMemoryHandle ImportHandle(int fd, size_t size);
+
+  // Returns the underlying OS resource.
+  int GetHandle() const;
+
+  // Invalidates [but doesn't close] the underlying OS resource. This will leak
+  // unless the caller is careful.
+  int Release();
+#endif
+
+#if defined(OS_ANDROID)
+  // Marks the current file descriptor as read-only, for the purpose of
+  // mapping. This is independent of the region's read-only status.
+  void SetReadOnly() { read_only_ = true; }
+
+  // Returns true iff the descriptor is to be used for read-only
+  // mappings.
+  bool IsReadOnly() const { return read_only_; }
+
+  // Returns true iff the corresponding region is read-only.
+  bool IsRegionReadOnly() const;
+
+  // Try to set the region read-only. This will fail any future attempt
+  // at read-write mapping.
+  bool SetRegionReadOnly() const;
+#endif
+
+#if defined(OS_POSIX)
+  // Constructs a SharedMemoryHandle backed by a FileDescriptor. The newly
+  // created instance has the same ownership semantics as base::FileDescriptor.
+  // This typically means that the SharedMemoryHandle takes ownership of the
+  // |fd| if |auto_close| is true. Unfortunately, it's common for existing code
+  // to make shallow copies of SharedMemoryHandle, and the one that is finally
+  // passed into a base::SharedMemory is the one that "consumes" the fd.
+  //
+  // |guid| uniquely identifies the shared memory region pointed to by the
+  // underlying OS resource. If |file_descriptor| is associated with another
+  // SharedMemoryHandle, the caller must pass the |guid| of that
+  // SharedMemoryHandle. Otherwise, the caller should generate a new
+  // UnguessableToken.
+  // Passing the wrong |size| has no immediate consequence, but may cause errors
+  // when trying to map the SharedMemoryHandle at a later point in time.
+  SharedMemoryHandle(const base::FileDescriptor& file_descriptor,
+                     size_t size,
+                     const base::UnguessableToken& guid);
+#endif
+
+ private:
+#if defined(OS_WIN)
+  HANDLE handle_ = nullptr;
+
+  // Whether passing this object as a parameter to an IPC message passes
+  // ownership of |handle_| to the IPC stack. This is meant to mimic the
+  // behavior of the |auto_close| parameter of FileDescriptor. This member only
+  // affects attachment-brokered SharedMemoryHandles.
+  // Defaults to |false|.
+  bool ownership_passes_to_ipc_ = false;
+#elif defined(OS_FUCHSIA)
+  zx_handle_t handle_ = ZX_HANDLE_INVALID;
+  bool ownership_passes_to_ipc_ = false;
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+  friend class SharedMemory;
+  friend bool CheckReadOnlySharedMemoryHandleForTesting(
+      SharedMemoryHandle handle);
+
+  Type type_ = MACH;
+
+  // Each instance of a SharedMemoryHandle is backed either by a POSIX fd or a
+  // mach port. |type_| determines the backing member.
+  union {
+    FileDescriptor file_descriptor_;
+
+    struct {
+      mach_port_t memory_object_ = MACH_PORT_NULL;
+
+      // Whether passing this object as a parameter to an IPC message passes
+      // ownership of |memory_object_| to the IPC stack. This is meant to mimic
+      // the behavior of the |auto_close| parameter of FileDescriptor.
+      // Defaults to |false|.
+      bool ownership_passes_to_ipc_ = false;
+    };
+  };
+#elif defined(OS_ANDROID)
+  friend class SharedMemory;
+
+  FileDescriptor file_descriptor_;
+  bool read_only_ = false;
+#elif defined(OS_POSIX)
+  FileDescriptor file_descriptor_;
+#endif
+
+  base::UnguessableToken guid_;
+
+  // The size of the region referenced by the SharedMemoryHandle.
+  size_t size_ = 0;
+};
+
+}  // namespace base
+
+#endif  // !defined(STARBOARD)
+#endif  // BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
diff --git a/src/base/memory/shared_memory_handle_android.cc b/src/base/memory/shared_memory_handle_android.cc
new file mode 100644
index 0000000..b3dcc3b
--- /dev/null
+++ b/src/base/memory/shared_memory_handle_android.cc
@@ -0,0 +1,116 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/unix_domain_socket.h"
+#include "base/unguessable_token.h"
+#include "starboard/types.h"
+#include "third_party/ashmem/ashmem.h"
+
+namespace base {
+
+static int GetAshmemRegionProtectionMask(int fd) {
+  int prot = ashmem_get_prot_region(fd);
+  if (prot < 0) {
+    DPLOG(ERROR) << "ashmem_get_prot_region";
+    return -1;
+  }
+  return prot;
+}
+
+SharedMemoryHandle::SharedMemoryHandle() = default;
+
+SharedMemoryHandle::SharedMemoryHandle(
+    const base::FileDescriptor& file_descriptor,
+    size_t size,
+    const base::UnguessableToken& guid)
+    : guid_(guid), size_(size) {
+  DCHECK_GE(file_descriptor.fd, 0);
+  file_descriptor_ = file_descriptor;
+}
+
+// static
+SharedMemoryHandle SharedMemoryHandle::ImportHandle(int fd, size_t size) {
+  SharedMemoryHandle handle;
+  handle.file_descriptor_.fd = fd;
+  handle.file_descriptor_.auto_close = false;
+  handle.guid_ = UnguessableToken::Create();
+  handle.size_ = size;
+  return handle;
+}
+
+int SharedMemoryHandle::GetHandle() const {
+  DCHECK(IsValid());
+  return file_descriptor_.fd;
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  return file_descriptor_.fd >= 0;
+}
+
+void SharedMemoryHandle::Close() const {
+  DCHECK(IsValid());
+  if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
+    PLOG(ERROR) << "close";
+}
+
+int SharedMemoryHandle::Release() {
+  int old_fd = file_descriptor_.fd;
+  file_descriptor_.fd = -1;
+  return old_fd;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  DCHECK(IsValid());
+  SharedMemoryHandle result;
+  int duped_handle = HANDLE_EINTR(dup(file_descriptor_.fd));
+  if (duped_handle >= 0) {
+    result = SharedMemoryHandle(FileDescriptor(duped_handle, true), GetSize(),
+                                GetGUID());
+    if (IsReadOnly())
+      result.SetReadOnly();
+  }
+  return result;
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  file_descriptor_.auto_close = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  return file_descriptor_.auto_close;
+}
+
+bool SharedMemoryHandle::IsRegionReadOnly() const {
+  int prot = GetAshmemRegionProtectionMask(file_descriptor_.fd);
+  return (prot >= 0 && (prot & PROT_WRITE) == 0);
+}
+
+bool SharedMemoryHandle::SetRegionReadOnly() const {
+  int fd = file_descriptor_.fd;
+  int prot = GetAshmemRegionProtectionMask(fd);
+  if (prot < 0)
+    return false;
+
+  if ((prot & PROT_WRITE) == 0) {
+    // Region is already read-only.
+    return true;
+  }
+
+  prot &= ~PROT_WRITE;
+  int ret = ashmem_set_prot_region(fd, prot);
+  if (ret != 0) {
+    DPLOG(ERROR) << "ashmem_set_prot_region";
+    return false;
+  }
+  return true;
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_handle_fuchsia.cc b/src/base/memory/shared_memory_handle_fuchsia.cc
new file mode 100644
index 0000000..b559bac
--- /dev/null
+++ b/src/base/memory/shared_memory_handle_fuchsia.cc
@@ -0,0 +1,55 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <zircon/syscalls.h>
+
+#include "base/logging.h"
+#include "base/unguessable_token.h"
+#include "starboard/types.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() {}
+
+SharedMemoryHandle::SharedMemoryHandle(zx_handle_t h,
+                                       size_t size,
+                                       const base::UnguessableToken& guid)
+    : handle_(h), guid_(guid), size_(size) {}
+
+void SharedMemoryHandle::Close() const {
+  DCHECK(handle_ != ZX_HANDLE_INVALID);
+  zx_handle_close(handle_);
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  return handle_ != ZX_HANDLE_INVALID;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  zx_handle_t duped_handle;
+  zx_status_t status =
+      zx_handle_duplicate(handle_, ZX_RIGHT_SAME_RIGHTS, &duped_handle);
+  if (status != ZX_OK)
+    return SharedMemoryHandle();
+
+  SharedMemoryHandle handle(duped_handle, GetSize(), GetGUID());
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
+}
+
+zx_handle_t SharedMemoryHandle::GetHandle() const {
+  return handle_;
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  ownership_passes_to_ipc_ = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  return ownership_passes_to_ipc_;
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_handle_mac.cc b/src/base/memory/shared_memory_handle_mac.cc
new file mode 100644
index 0000000..4db9edb
--- /dev/null
+++ b/src/base/memory/shared_memory_handle_mac.cc
@@ -0,0 +1,154 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <mach/mach_vm.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/unguessable_token.h"
+#include "starboard/types.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() {}
+
+SharedMemoryHandle::SharedMemoryHandle(
+    const base::FileDescriptor& file_descriptor,
+    size_t size,
+    const base::UnguessableToken& guid)
+    : type_(POSIX),
+      file_descriptor_(file_descriptor),
+      guid_(guid),
+      size_(size) {}
+
+SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size,
+                                       const base::UnguessableToken& guid) {
+  type_ = MACH;
+  mach_port_t named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(),
+      &size,
+      0,  // Address.
+      MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
+      &named_right,
+      MACH_PORT_NULL);  // Parent handle.
+  if (kr != KERN_SUCCESS) {
+    memory_object_ = MACH_PORT_NULL;
+    return;
+  }
+
+  memory_object_ = named_right;
+  size_ = size;
+  ownership_passes_to_ipc_ = false;
+  guid_ = guid;
+}
+
+SharedMemoryHandle::SharedMemoryHandle(mach_port_t memory_object,
+                                       mach_vm_size_t size,
+                                       const base::UnguessableToken& guid)
+    : type_(MACH),
+      memory_object_(memory_object),
+      ownership_passes_to_ipc_(false),
+      guid_(guid),
+      size_(size) {}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  switch (type_) {
+    case POSIX: {
+      if (!IsValid())
+        return SharedMemoryHandle();
+      int duped_fd = HANDLE_EINTR(dup(file_descriptor_.fd));
+      if (duped_fd < 0)
+        return SharedMemoryHandle();
+      return SharedMemoryHandle(FileDescriptor(duped_fd, true), size_, guid_);
+    }
+    case MACH: {
+      if (!IsValid())
+        return SharedMemoryHandle();
+
+      // Increment the ref count.
+      kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
+                                            MACH_PORT_RIGHT_SEND, 1);
+      DCHECK_EQ(kr, KERN_SUCCESS);
+      SharedMemoryHandle handle(*this);
+      handle.SetOwnershipPassesToIPC(true);
+      return handle;
+    }
+  }
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  switch (type_) {
+    case POSIX:
+      return file_descriptor_.fd >= 0;
+    case MACH:
+      return memory_object_ != MACH_PORT_NULL;
+  }
+}
+
+mach_port_t SharedMemoryHandle::GetMemoryObject() const {
+  DCHECK_EQ(type_, MACH);
+  return memory_object_;
+}
+
+bool SharedMemoryHandle::MapAt(off_t offset,
+                               size_t bytes,
+                               void** memory,
+                               bool read_only) {
+  DCHECK(IsValid());
+  switch (type_) {
+    case SharedMemoryHandle::POSIX:
+      *memory = mmap(nullptr, bytes, PROT_READ | (read_only ? 0 : PROT_WRITE),
+                     MAP_SHARED, file_descriptor_.fd, offset);
+      return *memory != MAP_FAILED;
+    case SharedMemoryHandle::MACH:
+      kern_return_t kr = mach_vm_map(
+          mach_task_self(),
+          reinterpret_cast<mach_vm_address_t*>(memory),    // Output parameter
+          bytes,
+          0,                                               // Alignment mask
+          VM_FLAGS_ANYWHERE,
+          memory_object_,
+          offset,
+          FALSE,                                           // Copy
+          VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE),  // Current protection
+          VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK,  // Maximum protection
+          VM_INHERIT_NONE);
+      return kr == KERN_SUCCESS;
+  }
+}
+
+void SharedMemoryHandle::Close() const {
+  if (!IsValid())
+    return;
+
+  switch (type_) {
+    case POSIX:
+      if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
+        DPLOG(ERROR) << "Error closing fd";
+      break;
+    case MACH:
+      kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
+      if (kr != KERN_SUCCESS)
+        MACH_DLOG(ERROR, kr) << "Error deallocating mach port";
+      break;
+  }
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  DCHECK_EQ(type_, MACH);
+  ownership_passes_to_ipc_ = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  DCHECK_EQ(type_, MACH);
+  return ownership_passes_to_ipc_;
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_handle_posix.cc b/src/base/memory/shared_memory_handle_posix.cc
new file mode 100644
index 0000000..852e6a2
--- /dev/null
+++ b/src/base/memory/shared_memory_handle_posix.cc
@@ -0,0 +1,72 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include <unistd.h>
+
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/unguessable_token.h"
+#include "starboard/types.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() = default;
+
+SharedMemoryHandle::SharedMemoryHandle(
+    const base::FileDescriptor& file_descriptor,
+    size_t size,
+    const base::UnguessableToken& guid)
+    : file_descriptor_(file_descriptor), guid_(guid), size_(size) {}
+
+// static
+SharedMemoryHandle SharedMemoryHandle::ImportHandle(int fd, size_t size) {
+  SharedMemoryHandle handle;
+  handle.file_descriptor_.fd = fd;
+  handle.file_descriptor_.auto_close = false;
+  handle.guid_ = UnguessableToken::Create();
+  handle.size_ = size;
+  return handle;
+}
+
+int SharedMemoryHandle::GetHandle() const {
+  return file_descriptor_.fd;
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  return file_descriptor_.fd >= 0;
+}
+
+void SharedMemoryHandle::Close() const {
+  if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
+    PLOG(ERROR) << "close";
+}
+
+int SharedMemoryHandle::Release() {
+  int old_fd = file_descriptor_.fd;
+  file_descriptor_.fd = -1;
+  return old_fd;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  if (!IsValid())
+    return SharedMemoryHandle();
+
+  int duped_handle = HANDLE_EINTR(dup(file_descriptor_.fd));
+  if (duped_handle < 0)
+    return SharedMemoryHandle();
+  return SharedMemoryHandle(FileDescriptor(duped_handle, true), GetSize(),
+                            GetGUID());
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  file_descriptor_.auto_close = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  return file_descriptor_.auto_close;
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_handle_win.cc b/src/base/memory/shared_memory_handle_win.cc
new file mode 100644
index 0000000..c8339ab
--- /dev/null
+++ b/src/base/memory/shared_memory_handle_win.cc
@@ -0,0 +1,57 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_handle.h"
+
+#include "base/logging.h"
+#include "base/unguessable_token.h"
+
+#include <windows.h>
+
+#include "starboard/types.h"
+
+namespace base {
+
+SharedMemoryHandle::SharedMemoryHandle() {}
+
+SharedMemoryHandle::SharedMemoryHandle(HANDLE h,
+                                       size_t size,
+                                       const base::UnguessableToken& guid)
+    : handle_(h), guid_(guid), size_(size) {}
+
+void SharedMemoryHandle::Close() const {
+  DCHECK(handle_ != nullptr);
+  ::CloseHandle(handle_);
+}
+
+bool SharedMemoryHandle::IsValid() const {
+  return handle_ != nullptr;
+}
+
+SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
+  HANDLE duped_handle;
+  ProcessHandle process = GetCurrentProcess();
+  BOOL success = ::DuplicateHandle(process, handle_, process, &duped_handle, 0,
+                                   FALSE, DUPLICATE_SAME_ACCESS);
+  if (!success)
+    return SharedMemoryHandle();
+
+  base::SharedMemoryHandle handle(duped_handle, GetSize(), GetGUID());
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
+}
+
+HANDLE SharedMemoryHandle::GetHandle() const {
+  return handle_;
+}
+
+void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+  ownership_passes_to_ipc_ = ownership_passes;
+}
+
+bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+  return ownership_passes_to_ipc_;
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_helper.cc b/src/base/memory/shared_memory_helper.cc
new file mode 100644
index 0000000..9803ca3
--- /dev/null
+++ b/src/base/memory/shared_memory_helper.cc
@@ -0,0 +1,161 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_helper.h"
+
+#if defined(OS_CHROMEOS)
+#include <sys/resource.h>
+#include <sys/time.h>
+
+#include "base/debug/alias.h"
+#endif  // defined(OS_CHROMEOS)
+
+#include "base/threading/thread_restrictions.h"
+#include "starboard/memory.h"
+#include "starboard/string.h"
+#include "starboard/types.h"
+
+namespace base {
+
+struct ScopedPathUnlinkerTraits {
+  static const FilePath* InvalidValue() { return nullptr; }
+
+  static void Free(const FilePath* path) {
+    if (unlink(path->value().c_str()))
+      PLOG(WARNING) << "unlink";
+  }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+using ScopedPathUnlinker =
+    ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
+
+#if !defined(OS_ANDROID)
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+                                 ScopedFD* fd,
+                                 ScopedFD* readonly_fd,
+                                 FilePath* path) {
+#if defined(OS_LINUX)
+  // It doesn't make sense to have a open-existing private piece of shmem
+  DCHECK(!options.open_existing_deprecated);
+#endif  // defined(OS_LINUX)
+  // Q: Why not use the shm_open() etc. APIs?
+  // A: Because they're limited to 4mb on OS X.  FFFFFFFUUUUUUUUUUU
+  FilePath directory;
+  ScopedPathUnlinker path_unlinker;
+  if (!GetShmemTempDir(options.executable, &directory))
+    return false;
+
+  fd->reset(base::CreateAndOpenFdForTemporaryFileInDir(directory, path));
+
+  if (!fd->is_valid())
+    return false;
+
+  // Deleting the file prevents anyone else from mapping it in (making it
+  // private), and prevents the need for cleanup (once the last fd is
+  // closed, it is truly freed).
+  path_unlinker.reset(path);
+
+  if (options.share_read_only) {
+    // Also open as readonly so that we can GetReadOnlyHandle.
+    readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
+    if (!readonly_fd->is_valid()) {
+      DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
+      fd->reset();
+      return false;
+    }
+  }
+  return true;
+}
+
+bool PrepareMapFile(ScopedFD fd,
+                    ScopedFD readonly_fd,
+                    int* mapped_file,
+                    int* readonly_mapped_file) {
+  DCHECK_EQ(-1, *mapped_file);
+  DCHECK_EQ(-1, *readonly_mapped_file);
+  if (!fd.is_valid())
+    return false;
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+  if (readonly_fd.is_valid()) {
+    struct stat st = {};
+    if (fstat(fd.get(), &st))
+      NOTREACHED();
+
+    struct stat readonly_st = {};
+    if (fstat(readonly_fd.get(), &readonly_st))
+      NOTREACHED();
+    if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
+      LOG(ERROR) << "writable and read-only inodes don't match; bailing";
+      return false;
+    }
+  }
+
+  *mapped_file = HANDLE_EINTR(dup(fd.get()));
+  if (*mapped_file == -1) {
+    NOTREACHED() << "Call to dup failed, errno=" << errno;
+
+#if defined(OS_CHROMEOS)
+    if (errno == EMFILE) {
+      // We're out of file descriptors and are probably about to crash somewhere
+      // else in Chrome anyway. Let's collect what FD information we can and
+      // crash.
+      // Added for debugging crbug.com/733718
+      int original_fd_limit = 16384;
+      struct rlimit rlim;
+      if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) {
+        original_fd_limit = rlim.rlim_cur;
+        if (rlim.rlim_max > rlim.rlim_cur) {
+          // Increase fd limit so breakpad has a chance to write a minidump.
+          rlim.rlim_cur = rlim.rlim_max;
+          if (setrlimit(RLIMIT_NOFILE, &rlim) != 0) {
+            PLOG(ERROR) << "setrlimit() failed";
+          }
+        }
+      } else {
+        PLOG(ERROR) << "getrlimit() failed";
+      }
+
+      const char kFileDataMarker[] = "FDATA";
+      char buf[PATH_MAX];
+      char fd_path[PATH_MAX];
+      char crash_buffer[32 * 1024] = {0};
+      char* crash_ptr = crash_buffer;
+      base::debug::Alias(crash_buffer);
+
+      // Put a marker at the start of our data so we can confirm where it
+      // begins.
+      crash_ptr = SbStringCopy(crash_ptr, kFileDataMarker,
+                               SbStringGetLength(kFileDataMarker));
+      for (int i = original_fd_limit; i >= 0; --i) {
+        SbMemorySet(buf, 0, arraysize(buf));
+        SbMemorySet(fd_path, 0, arraysize(fd_path));
+        snprintf(fd_path, arraysize(fd_path) - 1, "/proc/self/fd/%d", i);
+        ssize_t count = readlink(fd_path, buf, arraysize(buf) - 1);
+        if (count < 0) {
+          PLOG(ERROR) << "readlink failed for: " << fd_path;
+          continue;
+        }
+
+        if (crash_ptr + count + 1 < crash_buffer + arraysize(crash_buffer)) {
+          crash_ptr = SbStringCopy(crash_ptr, buf, count + 1);
+        }
+        LOG(ERROR) << i << ": " << buf;
+      }
+      LOG(FATAL) << "Logged for file descriptor exhaustion, crashing now";
+    }
+#endif  // defined(OS_CHROMEOS)
+  }
+  *readonly_mapped_file = readonly_fd.release();
+
+  return true;
+}
+#endif  // !defined(OS_ANDROID)
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_helper.h b/src/base/memory/shared_memory_helper.h
new file mode 100644
index 0000000..53dde90
--- /dev/null
+++ b/src/base/memory/shared_memory_helper.h
@@ -0,0 +1,38 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_HELPER_H_
+#define BASE_MEMORY_SHARED_MEMORY_HELPER_H_
+
+#include "base/memory/shared_memory.h"
+#include "build/build_config.h"
+
+#include <fcntl.h>
+
+#include "starboard/types.h"
+
+namespace base {
+
+#if !defined(OS_ANDROID)
+// Makes a temporary file, fdopens it, and then unlinks it. |fd| is populated
+// with the opened fd. |readonly_fd| is populated with the opened fd if
+// options.share_read_only is true. |path| is populated with the location of
+// the file before it was unlinked.
+// Returns false if there's an unhandled failure.
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+                                 ScopedFD* fd,
+                                 ScopedFD* readonly_fd,
+                                 FilePath* path);
+
+// Takes the outputs of CreateAnonymousSharedMemory and maps them properly to
+// |mapped_file| or |readonly_mapped_file|, depending on which one is populated.
+bool PrepareMapFile(ScopedFD fd,
+                    ScopedFD readonly_fd,
+                    int* mapped_file,
+                    int* readonly_mapped_file);
+#endif  // !defined(OS_ANDROID)
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_SHARED_MEMORY_HELPER_H_
diff --git a/src/base/memory/shared_memory_mac.cc b/src/base/memory/shared_memory_mac.cc
new file mode 100644
index 0000000..504732f
--- /dev/null
+++ b/src/base/memory/shared_memory_mac.cc
@@ -0,0 +1,264 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <errno.h>
+#include <mach/mach_vm.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/scoped_mach_vm.h"
+#include "base/memory/shared_memory_helper.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/safe_strerror.h"
+#include "base/process/process_metrics.h"
+#include "base/scoped_generic.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+#include "starboard/types.h"
+
+#if defined(OS_IOS)
+#error "MacOS only - iOS uses shared_memory_posix.cc"
+#endif
+
+namespace base {
+
+namespace {
+
+// Returns whether the operation succeeded.
+// |new_handle| is an output variable, populated on success. The caller takes
+// ownership of the underlying memory object.
+// |handle| is the handle to copy.
+// If |handle| is already mapped, |mapped_addr| is its mapped location.
+// Otherwise, |mapped_addr| should be |nullptr|.
+bool MakeMachSharedMemoryHandleReadOnly(SharedMemoryHandle* new_handle,
+                                        SharedMemoryHandle handle,
+                                        void* mapped_addr) {
+  if (!handle.IsValid())
+    return false;
+
+  size_t size = handle.GetSize();
+
+  // Map if necessary.
+  void* temp_addr = mapped_addr;
+  base::mac::ScopedMachVM scoper;
+  if (!temp_addr) {
+    // Intentionally lower current prot and max prot to |VM_PROT_READ|.
+    kern_return_t kr = mach_vm_map(
+        mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
+        size, 0, VM_FLAGS_ANYWHERE, handle.GetMemoryObject(), 0, FALSE,
+        VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
+    if (kr != KERN_SUCCESS)
+      return false;
+    scoper.reset(reinterpret_cast<vm_address_t>(temp_addr),
+                 mach_vm_round_page(size));
+  }
+
+  // Make new memory object.
+  mach_port_t named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(), reinterpret_cast<memory_object_size_t*>(&size),
+      reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
+      &named_right, MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS)
+    return false;
+
+  *new_handle = SharedMemoryHandle(named_right, size, handle.GetGUID());
+  return true;
+}
+
+}  // namespace
+
+SharedMemory::SharedMemory() {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
+      shm_(handle),
+      read_only_(read_only) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  return GetMaxFds();
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+// static
+int SharedMemory::GetFdFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.file_descriptor_.fd;
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+// Chromium mostly only uses the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  DCHECK(!shm_.IsValid());
+  if (options.size == 0)
+    return false;
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  if (options.type == SharedMemoryHandle::MACH) {
+    shm_ = SharedMemoryHandle(options.size, UnguessableToken::Create());
+    requested_size_ = options.size;
+    return shm_.IsValid();
+  }
+
+  // This function theoretically can block on the disk. Both profiling of real
+  // users and local instrumentation shows that this is a real problem.
+  // https://code.google.com/p/chromium/issues/detail?id=466437
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  ScopedFD fd;
+  ScopedFD readonly_fd;
+
+  FilePath path;
+  bool result = CreateAnonymousSharedMemory(options, &fd, &readonly_fd, &path);
+  if (!result)
+    return false;
+  // Should be guaranteed by CreateAnonymousSharedMemory().
+  DCHECK(fd.is_valid());
+
+  // Get current size.
+  struct stat stat;
+  if (fstat(fd.get(), &stat) != 0)
+    return false;
+  const size_t current_size = stat.st_size;
+  if (current_size != options.size) {
+    if (HANDLE_EINTR(ftruncate(fd.get(), options.size)) != 0)
+      return false;
+  }
+  requested_size_ = options.size;
+
+  int mapped_file = -1;
+  int readonly_mapped_file = -1;
+  result = PrepareMapFile(std::move(fd), std::move(readonly_fd), &mapped_file,
+                          &readonly_mapped_file);
+  shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), options.size,
+                            UnguessableToken::Create());
+  readonly_shm_ =
+      SharedMemoryHandle(FileDescriptor(readonly_mapped_file, false),
+                         options.size, shm_.GetGUID());
+  return result;
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid())
+    return false;
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+  if (memory_)
+    return false;
+
+  bool success = shm_.MapAt(offset, bytes, &memory_, read_only_);
+  if (success) {
+    mapped_size_ = bytes;
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+                      (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+    mapped_memory_mechanism_ = shm_.type_;
+    mapped_id_ = shm_.GetGUID();
+    SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+  } else {
+    memory_ = nullptr;
+  }
+
+  return success;
+}
+
+bool SharedMemory::Unmap() {
+  if (!memory_)
+    return false;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+  switch (mapped_memory_mechanism_) {
+    case SharedMemoryHandle::POSIX:
+      munmap(memory_, mapped_size_);
+      break;
+    case SharedMemoryHandle::MACH:
+      mach_vm_deallocate(mach_task_self(),
+                         reinterpret_cast<mach_vm_address_t>(memory_),
+                         mapped_size_);
+      break;
+  }
+  memory_ = nullptr;
+  mapped_size_ = 0;
+  mapped_id_ = UnguessableToken();
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return shm_;
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+  SharedMemoryHandle dup = DuplicateHandle(handle());
+  Unmap();
+  Close();
+  return dup;
+}
+
+void SharedMemory::Close() {
+  shm_.Close();
+  shm_ = SharedMemoryHandle();
+  if (shm_.type_ == SharedMemoryHandle::POSIX) {
+    if (readonly_shm_.IsValid()) {
+      readonly_shm_.Close();
+      readonly_shm_ = SharedMemoryHandle();
+    }
+  }
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  if (shm_.type_ == SharedMemoryHandle::POSIX) {
+    // We could imagine re-opening the file from /dev/fd, but that can't make it
+    // readonly on Mac: https://codereview.chromium.org/27265002/#msg10.
+    CHECK(readonly_shm_.IsValid());
+    return readonly_shm_.Duplicate();
+  }
+
+  DCHECK(shm_.IsValid());
+  SharedMemoryHandle new_handle;
+  bool success = MakeMachSharedMemoryHandleReadOnly(&new_handle, shm_, memory_);
+  if (success)
+    new_handle.SetOwnershipPassesToIPC(true);
+  return new_handle;
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_mac_unittest.cc b/src/base/memory/shared_memory_mac_unittest.cc
new file mode 100644
index 0000000..0b78455
--- /dev/null
+++ b/src/base/memory/shared_memory_mac_unittest.cc
@@ -0,0 +1,457 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <servers/bootstrap.h>
+
+#include "base/command_line.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "base/unguessable_token.h"
+#include "starboard/memory.h"
+#include "starboard/types.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+
+// Gets the current and maximum protection levels of the memory region.
+// Returns whether the operation was successful.
+// |current| and |max| are output variables only populated on success.
+bool GetProtections(void* address, size_t size, int* current, int* max) {
+  vm_region_info_t region_info;
+  mach_vm_address_t mem_address = reinterpret_cast<mach_vm_address_t>(address);
+  mach_vm_size_t mem_size = size;
+  vm_region_basic_info_64 basic_info;
+
+  region_info = reinterpret_cast<vm_region_recurse_info_t>(&basic_info);
+  vm_region_flavor_t flavor = VM_REGION_BASIC_INFO_64;
+  memory_object_name_t memory_object;
+  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+
+  kern_return_t kr =
+      mach_vm_region(mach_task_self(), &mem_address, &mem_size, flavor,
+                     region_info, &count, &memory_object);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "Failed to get region info.";
+    return false;
+  }
+
+  *current = basic_info.protection;
+  *max = basic_info.max_protection;
+  return true;
+}
+
+// Creates a new SharedMemory with the given |size|, filled with 'a'.
+std::unique_ptr<SharedMemory> CreateSharedMemory(int size) {
+  SharedMemoryHandle shm(size, UnguessableToken::Create());
+  if (!shm.IsValid()) {
+    LOG(ERROR) << "Failed to make SharedMemoryHandle";
+    return nullptr;
+  }
+  std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+  shared_memory->Map(size);
+  SbMemorySet(shared_memory->memory(), 'a', size);
+  return shared_memory;
+}
+
+static const std::string g_service_switch_name = "service_name";
+
+// Structs used to pass a mach port from client to server.
+struct MachSendPortMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+};
+struct MachReceivePortMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+  mach_msg_trailer_t trailer;
+};
+
+// Makes the current process into a Mach Server with the given |service_name|.
+mach_port_t BecomeMachServer(const char* service_name) {
+  mach_port_t port;
+  kern_return_t kr = bootstrap_check_in(bootstrap_port, service_name, &port);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "BecomeMachServer";
+  return port;
+}
+
+// Returns the mach port for the Mach Server with the given |service_name|.
+mach_port_t LookupServer(const char* service_name) {
+  mach_port_t server_port;
+  kern_return_t kr =
+      bootstrap_look_up(bootstrap_port, service_name, &server_port);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "LookupServer";
+  return server_port;
+}
+
+mach_port_t MakeReceivingPort() {
+  mach_port_t client_port;
+  kern_return_t kr =
+      mach_port_allocate(mach_task_self(),         // our task is acquiring
+                         MACH_PORT_RIGHT_RECEIVE,  // a new receive right
+                         &client_port);            // with this name
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "MakeReceivingPort";
+  return client_port;
+}
+
+// Blocks until a mach message is sent to |server_port|. This mach message
+// must contain a mach port. Returns that mach port.
+mach_port_t ReceiveMachPort(mach_port_t port_to_listen_on) {
+  MachReceivePortMessage recv_msg;
+  mach_msg_header_t* recv_hdr = &(recv_msg.header);
+  recv_hdr->msgh_local_port = port_to_listen_on;
+  recv_hdr->msgh_size = sizeof(recv_msg);
+  kern_return_t kr =
+      mach_msg(recv_hdr,               // message buffer
+               MACH_RCV_MSG,           // option indicating service
+               0,                      // send size
+               recv_hdr->msgh_size,    // size of header + body
+               port_to_listen_on,      // receive name
+               MACH_MSG_TIMEOUT_NONE,  // no timeout, wait forever
+               MACH_PORT_NULL);        // no notification port
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "ReceiveMachPort";
+  mach_port_t other_task_port = recv_msg.data.name;
+  return other_task_port;
+}
+
+// Passes a copy of the send right of |port_to_send| to |receiving_port|.
+void SendMachPort(mach_port_t receiving_port,
+                  mach_port_t port_to_send,
+                  int disposition) {
+  MachSendPortMessage send_msg;
+  mach_msg_header_t* send_hdr;
+  send_hdr = &(send_msg.header);
+  send_hdr->msgh_bits =
+      MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0) | MACH_MSGH_BITS_COMPLEX;
+  send_hdr->msgh_size = sizeof(send_msg);
+  send_hdr->msgh_remote_port = receiving_port;
+  send_hdr->msgh_local_port = MACH_PORT_NULL;
+  send_hdr->msgh_reserved = 0;
+  send_hdr->msgh_id = 0;
+  send_msg.body.msgh_descriptor_count = 1;
+  send_msg.data.name = port_to_send;
+  send_msg.data.disposition = disposition;
+  send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
+  int kr = mach_msg(send_hdr,               // message buffer
+                    MACH_SEND_MSG,          // option indicating send
+                    send_hdr->msgh_size,    // size of header + body
+                    0,                      // receive limit
+                    MACH_PORT_NULL,         // receive name
+                    MACH_MSG_TIMEOUT_NONE,  // no timeout, wait forever
+                    MACH_PORT_NULL);        // no notification port
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "SendMachPort";
+}
+
+std::string CreateRandomServiceName() {
+  return StringPrintf("SharedMemoryMacMultiProcessTest.%llu", RandUint64());
+}
+
+// Sets up the mach communication ports with the server. Returns a port to which
+// the server will send mach objects.
+mach_port_t CommonChildProcessSetUp() {
+  CommandLine cmd_line = *CommandLine::ForCurrentProcess();
+  std::string service_name =
+      cmd_line.GetSwitchValueASCII(g_service_switch_name);
+  mac::ScopedMachSendRight server_port(LookupServer(service_name.c_str()));
+  mach_port_t client_port = MakeReceivingPort();
+
+  // Send the port that this process is listening on to the server.
+  SendMachPort(server_port.get(), client_port, MACH_MSG_TYPE_MAKE_SEND);
+  return client_port;
+}
+
+// The number of active names in the current task's port name space.
+mach_msg_type_number_t GetActiveNameCount() {
+  mach_port_name_array_t name_array;
+  mach_msg_type_number_t names_count;
+  mach_port_type_array_t type_array;
+  mach_msg_type_number_t types_count;
+  kern_return_t kr = mach_port_names(mach_task_self(), &name_array,
+                                     &names_count, &type_array, &types_count);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "GetActiveNameCount";
+  return names_count;
+}
+
+}  // namespace
+
+class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
+ public:
+  SharedMemoryMacMultiProcessTest() {}
+
+  CommandLine MakeCmdLine(const std::string& procname) override {
+    CommandLine command_line = MultiProcessTest::MakeCmdLine(procname);
+    // Pass the service name to the child process.
+    command_line.AppendSwitchASCII(g_service_switch_name, service_name_);
+    return command_line;
+  }
+
+  void SetUpChild(const std::string& name) {
+    // Make a random service name so that this test doesn't conflict with other
+    // similar tests.
+    service_name_ = CreateRandomServiceName();
+    server_port_.reset(BecomeMachServer(service_name_.c_str()));
+    child_process_ = SpawnChild(name);
+    client_port_.reset(ReceiveMachPort(server_port_.get()));
+  }
+
+  static const int s_memory_size = 99999;
+
+ protected:
+  std::string service_name_;
+
+  // A port on which the main process listens for mach messages from the child
+  // process.
+  mac::ScopedMachReceiveRight server_port_;
+
+  // A port on which the child process listens for mach messages from the main
+  // process.
+  mac::ScopedMachSendRight client_port_;
+
+  base::Process child_process_;
+  DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
+};
+
+// Tests that content written to shared memory in the server process can be read
+// by the child process.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
+  SetUpChild("MachBasedSharedMemoryClient");
+
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
+
+  // Send the underlying memory object to the client process.
+  SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
+               MACH_MSG_TYPE_COPY_SEND);
+  int rv = -1;
+  ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryClient) {
+  mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+  // The next mach port should be for a memory object.
+  mach_port_t memory_object = ReceiveMachPort(client_port.get());
+  SharedMemoryHandle shm(memory_object,
+                         SharedMemoryMacMultiProcessTest::s_memory_size,
+                         UnguessableToken::Create());
+  SharedMemory shared_memory(shm, false);
+  shared_memory.Map(SharedMemoryMacMultiProcessTest::s_memory_size);
+  const char* start = static_cast<const char*>(shared_memory.memory());
+  for (int i = 0; i < SharedMemoryMacMultiProcessTest::s_memory_size; ++i) {
+    DCHECK_EQ(start[i], 'a');
+  }
+  return 0;
+}
+
+// Tests that mapping shared memory with an offset works correctly.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
+  SetUpChild("MachBasedSharedMemoryWithOffsetClient");
+
+  SharedMemoryHandle shm(s_memory_size, UnguessableToken::Create());
+  ASSERT_TRUE(shm.IsValid());
+  SharedMemory shared_memory(shm, false);
+  shared_memory.Map(s_memory_size);
+
+  size_t page_size = SysInfo::VMAllocationGranularity();
+  char* start = static_cast<char*>(shared_memory.memory());
+  SbMemorySet(start, 'a', page_size);
+  SbMemorySet(start + page_size, 'b', page_size);
+  SbMemorySet(start + 2 * page_size, 'c', page_size);
+
+  // Send the underlying memory object to the client process.
+  SendMachPort(
+      client_port_.get(), shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
+  int rv = -1;
+  ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryWithOffsetClient) {
+  mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+  // The next mach port should be for a memory object.
+  mach_port_t memory_object = ReceiveMachPort(client_port.get());
+  SharedMemoryHandle shm(memory_object,
+                         SharedMemoryMacMultiProcessTest::s_memory_size,
+                         UnguessableToken::Create());
+  SharedMemory shared_memory(shm, false);
+  size_t page_size = SysInfo::VMAllocationGranularity();
+  shared_memory.MapAt(page_size, 2 * page_size);
+  const char* start = static_cast<const char*>(shared_memory.memory());
+  for (size_t i = 0; i < page_size; ++i) {
+    DCHECK_EQ(start[i], 'b');
+  }
+  for (size_t i = page_size; i < 2 * page_size; ++i) {
+    DCHECK_EQ(start[i], 'c');
+  }
+  return 0;
+}
+
+// Tests that duplication and closing has the right effect on Mach reference
+// counts.
+TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicateAndClose) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  // Making a new SharedMemoryHandle increments the name count.
+  SharedMemoryHandle shm(s_memory_size, UnguessableToken::Create());
+  ASSERT_TRUE(shm.IsValid());
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Duplicating the SharedMemoryHandle increments the ref count, but doesn't
+  // make a new name.
+  shm.Duplicate();
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Closing the SharedMemoryHandle decrements the ref count. The first time has
+  // no effect.
+  shm.Close();
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Closing the SharedMemoryHandle decrements the ref count. The second time
+  // destroys the port.
+  shm.Close();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that Mach shared memory can be mapped and unmapped.
+TEST_F(SharedMemoryMacMultiProcessTest, MachUnmapMap) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  std::unique_ptr<SharedMemory> shared_memory =
+      CreateSharedMemory(s_memory_size);
+  ASSERT_TRUE(shared_memory->Unmap());
+  ASSERT_TRUE(shared_memory->Map(s_memory_size));
+  shared_memory.reset();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that passing a SharedMemoryHandle to a SharedMemory object also passes
+// ownership, and that destroying the SharedMemory closes the SharedMemoryHandle
+// as well.
+TEST_F(SharedMemoryMacMultiProcessTest, MachSharedMemoryTakesOwnership) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  // Making a new SharedMemoryHandle increments the name count.
+  SharedMemoryHandle shm(s_memory_size, UnguessableToken::Create());
+  ASSERT_TRUE(shm.IsValid());
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Name count doesn't change when mapping the memory.
+  std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+  shared_memory->Map(s_memory_size);
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Destroying the SharedMemory object frees the resource.
+  shared_memory.reset();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the read-only flag works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachReadOnly) {
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
+
+  SharedMemoryHandle shm2 = shared_memory->handle().Duplicate();
+  ASSERT_TRUE(shm2.IsValid());
+  SharedMemory shared_memory2(shm2, true);
+  shared_memory2.Map(s_memory_size);
+  ASSERT_DEATH(SbMemorySet(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+// Tests that duplication of the underlying handle works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicate) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  {
+    std::unique_ptr<SharedMemory> shared_memory(
+        CreateSharedMemory(s_memory_size));
+
+    SharedMemoryHandle shm2 = shared_memory->handle().Duplicate();
+    ASSERT_TRUE(shm2.IsValid());
+    SharedMemory shared_memory2(shm2, true);
+    shared_memory2.Map(s_memory_size);
+
+    ASSERT_EQ(0, SbMemoryCompare(shared_memory->memory(),
+                                 shared_memory2.memory(), s_memory_size));
+  }
+
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the method GetReadOnlyHandle() creates a memory object that
+// is read only.
+TEST_F(SharedMemoryMacMultiProcessTest, MachReadonly) {
+  std::unique_ptr<SharedMemory> shared_memory(
+      CreateSharedMemory(s_memory_size));
+
+  // Check the protection levels.
+  int current_prot, max_prot;
+  ASSERT_TRUE(GetProtections(shared_memory->memory(),
+                             shared_memory->mapped_size(), &current_prot,
+                             &max_prot));
+  ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, current_prot);
+  ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, max_prot);
+
+  // Make a new memory object.
+  SharedMemoryHandle shm2 = shared_memory->GetReadOnlyHandle();
+  ASSERT_TRUE(shm2.IsValid());
+  EXPECT_EQ(shared_memory->handle().GetGUID(), shm2.GetGUID());
+
+  // Mapping with |readonly| set to |false| should fail.
+  SharedMemory shared_memory2(shm2, false);
+  shared_memory2.Map(s_memory_size);
+  ASSERT_EQ(nullptr, shared_memory2.memory());
+
+  // Now trying mapping with |readonly| set to |true|.
+  SharedMemory shared_memory3(shm2.Duplicate(), true);
+  shared_memory3.Map(s_memory_size);
+  ASSERT_NE(nullptr, shared_memory3.memory());
+
+  // Check the protection levels.
+  ASSERT_TRUE(GetProtections(shared_memory3.memory(),
+                             shared_memory3.mapped_size(), &current_prot,
+                             &max_prot));
+  ASSERT_EQ(VM_PROT_READ, current_prot);
+  ASSERT_EQ(VM_PROT_READ, max_prot);
+
+  // The memory should still be readonly, since the underlying memory object
+  // is readonly.
+  ASSERT_DEATH(SbMemorySet(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+// Tests that the method GetReadOnlyHandle() doesn't leak.
+TEST_F(SharedMemoryMacMultiProcessTest, MachReadonlyLeak) {
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  {
+    std::unique_ptr<SharedMemory> shared_memory(
+        CreateSharedMemory(s_memory_size));
+
+    SharedMemoryHandle shm2 = shared_memory->GetReadOnlyHandle();
+    ASSERT_TRUE(shm2.IsValid());
+
+    // Intentionally map with |readonly| set to |false|.
+    SharedMemory shared_memory2(shm2, false);
+    shared_memory2.Map(s_memory_size);
+  }
+
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+}  //  namespace base
diff --git a/src/base/memory/shared_memory_mapping.cc b/src/base/memory/shared_memory_mapping.cc
new file mode 100644
index 0000000..0348ff1
--- /dev/null
+++ b/src/base/memory/shared_memory_mapping.cc
@@ -0,0 +1,116 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_mapping.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+#include <sys/mman.h>
+#endif
+
+#if defined(OS_WIN)
+#include <aclapi.h>
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach_vm.h>
+#include "base/mac/mach_logging.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <lib/zx/vmar.h>
+#include "base/fuchsia/fuchsia_logging.h"
+#include "starboard/types.h"
+#endif
+
+namespace base {
+
+SharedMemoryMapping::SharedMemoryMapping() = default;
+
+SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping)
+    : memory_(mapping.memory_),
+      size_(mapping.size_),
+      mapped_size_(mapping.mapped_size_),
+      guid_(mapping.guid_) {
+  mapping.memory_ = nullptr;
+}
+
+SharedMemoryMapping& SharedMemoryMapping::operator=(
+    SharedMemoryMapping&& mapping) {
+  Unmap();
+  memory_ = mapping.memory_;
+  size_ = mapping.size_;
+  mapped_size_ = mapping.mapped_size_;
+  guid_ = mapping.guid_;
+  mapping.memory_ = nullptr;
+  return *this;
+}
+
+SharedMemoryMapping::~SharedMemoryMapping() {
+  Unmap();
+}
+
+SharedMemoryMapping::SharedMemoryMapping(void* memory,
+                                         size_t size,
+                                         size_t mapped_size,
+                                         const UnguessableToken& guid)
+    : memory_(memory), size_(size), mapped_size_(mapped_size), guid_(guid) {
+  SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+}
+
+void SharedMemoryMapping::Unmap() {
+  if (!IsValid())
+    return;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+#if defined(OS_WIN)
+  if (!UnmapViewOfFile(memory_))
+    DPLOG(ERROR) << "UnmapViewOfFile";
+#elif defined(OS_FUCHSIA)
+  uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
+  zx_status_t status = zx::vmar::root_self()->unmap(addr, mapped_size_);
+  if (status != ZX_OK)
+    ZX_DLOG(ERROR, status) << "zx_vmar_unmap";
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+  kern_return_t kr = mach_vm_deallocate(
+      mach_task_self(), reinterpret_cast<mach_vm_address_t>(memory_),
+      mapped_size_);
+  MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "mach_vm_deallocate";
+#else
+  if (munmap(memory_, mapped_size_) < 0)
+    DPLOG(ERROR) << "munmap";
+#endif
+}
+
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping() = default;
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
+    ReadOnlySharedMemoryMapping&&) = default;
+ReadOnlySharedMemoryMapping& ReadOnlySharedMemoryMapping::operator=(
+    ReadOnlySharedMemoryMapping&&) = default;
+ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
+    void* address,
+    size_t size,
+    size_t mapped_size,
+    const UnguessableToken& guid)
+    : SharedMemoryMapping(address, size, mapped_size, guid) {}
+
+WritableSharedMemoryMapping::WritableSharedMemoryMapping() = default;
+WritableSharedMemoryMapping::WritableSharedMemoryMapping(
+    WritableSharedMemoryMapping&&) = default;
+WritableSharedMemoryMapping& WritableSharedMemoryMapping::operator=(
+    WritableSharedMemoryMapping&&) = default;
+WritableSharedMemoryMapping::WritableSharedMemoryMapping(
+    void* address,
+    size_t size,
+    size_t mapped_size,
+    const UnguessableToken& guid)
+    : SharedMemoryMapping(address, size, mapped_size, guid) {}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_mapping.h b/src/base/memory/shared_memory_mapping.h
new file mode 100644
index 0000000..7c77790
--- /dev/null
+++ b/src/base/memory/shared_memory_mapping.h
@@ -0,0 +1,221 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
+#define BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
+
+#if !defined(STARBOARD)
+
+#include <cstddef>
+
+#include "base/containers/span.h"
+#include "base/macros.h"
+#include "base/unguessable_token.h"
+
+namespace base {
+
+namespace subtle {
+class PlatformSharedMemoryRegion;
+}  // namespace subtle
+
+// Base class for scoped handles to a shared memory mapping created from a
+// shared memory region. Created shared memory mappings remain valid even if the
+// creator region is transferred or destroyed.
+//
+// Each mapping has an UnguessableToken that identifies the shared memory region
+// it was created from. This is used for memory metrics, to avoid overcounting
+// shared memory.
+class BASE_EXPORT SharedMemoryMapping {
+ public:
+  // Default constructor initializes an invalid instance.
+  SharedMemoryMapping();
+
+  // Move operations are allowed.
+  SharedMemoryMapping(SharedMemoryMapping&& mapping);
+  SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping);
+
+  // Unmaps the region if the mapping is valid.
+  virtual ~SharedMemoryMapping();
+
+  // Returns true iff the mapping is valid. False means there is no
+  // corresponding area of memory.
+  bool IsValid() const { return memory_ != nullptr; }
+
+  // Returns the logical size of the mapping in bytes. This is precisely the
+  // size requested by whoever created the mapping, and it is always less than
+  // or equal to |mapped_size()|. This is undefined for invalid instances.
+  size_t size() const {
+    DCHECK(IsValid());
+    return size_;
+  }
+
+  // Returns the actual size of the mapping in bytes. This is always at least
+  // as large as |size()| but may be larger due to platform mapping alignment
+  // constraints. This is undefined for invalid instances.
+  size_t mapped_size() const {
+    DCHECK(IsValid());
+    return mapped_size_;
+  }
+
+  // Returns 128-bit GUID of the region this mapping belongs to.
+  const UnguessableToken& guid() const {
+    DCHECK(IsValid());
+    return guid_;
+  }
+
+ protected:
+  SharedMemoryMapping(void* address,
+                      size_t size,
+                      size_t mapped_size,
+                      const UnguessableToken& guid);
+  void* raw_memory_ptr() const { return memory_; }
+
+ private:
+  friend class SharedMemoryTracker;
+
+  void Unmap();
+
+  void* memory_ = nullptr;
+  size_t size_ = 0;
+  size_t mapped_size_ = 0;
+  UnguessableToken guid_;
+
+  DISALLOW_COPY_AND_ASSIGN(SharedMemoryMapping);
+};
+
+// Class modeling a read-only mapping of a shared memory region into the
+// current process' address space. This is created by ReadOnlySharedMemoryRegion
+// instances.
+class BASE_EXPORT ReadOnlySharedMemoryMapping : public SharedMemoryMapping {
+ public:
+  // Default constructor initializes an invalid instance.
+  ReadOnlySharedMemoryMapping();
+
+  // Move operations are allowed.
+  ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&);
+  ReadOnlySharedMemoryMapping& operator=(ReadOnlySharedMemoryMapping&&);
+
+  // Returns the base address of the mapping. This is read-only memory. This is
+  // page-aligned. This is nullptr for invalid instances.
+  const void* memory() const { return raw_memory_ptr(); }
+
+  // Returns a pointer to a page-aligned const T if the mapping is valid and
+  // large enough to contain a T, or nullptr otherwise.
+  template <typename T>
+  const T* GetMemoryAs() const {
+    if (!IsValid())
+      return nullptr;
+    if (sizeof(T) > size())
+      return nullptr;
+    return static_cast<const T*>(raw_memory_ptr());
+  }
+
+  // Returns a span of const T. The number of elements is autodeduced from the
+  // size of the shared memory mapping. The number of elements may be
+  // autodeduced as zero, i.e. the mapping is invalid or the size of the mapping
+  // isn't large enough to contain even one T: in that case, an empty span
+  // will be returned. The first element, if any, is guaranteed to be
+  // page-aligned.
+  template <typename T>
+  span<const T> GetMemoryAsSpan() const {
+    if (!IsValid())
+      return span<const T>();
+    size_t count = size() / sizeof(T);
+    return GetMemoryAsSpan<T>(count);
+  }
+
+  // Returns a span of const T with |count| elements if the mapping is valid and
+  // large enough to contain |count| elements, or an empty span otherwise. The
+  // first element, if any, is guaranteed to be page-aligned.
+  template <typename T>
+  span<const T> GetMemoryAsSpan(size_t count) const {
+    if (!IsValid())
+      return span<const T>();
+    if (size() / sizeof(T) < count)
+      return span<const T>();
+    return span<const T>(static_cast<const T*>(raw_memory_ptr()), count);
+  }
+
+ private:
+  friend class ReadOnlySharedMemoryRegion;
+  ReadOnlySharedMemoryMapping(void* address,
+                              size_t size,
+                              size_t mapped_size,
+                              const UnguessableToken& guid);
+
+  DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryMapping);
+};
+
+// Class modeling a writable mapping of a shared memory region into the
+// current process' address space. This is created by *SharedMemoryRegion
+// instances.
+class BASE_EXPORT WritableSharedMemoryMapping : public SharedMemoryMapping {
+ public:
+  // Default constructor initializes an invalid instance.
+  WritableSharedMemoryMapping();
+
+  // Move operations are allowed.
+  WritableSharedMemoryMapping(WritableSharedMemoryMapping&&);
+  WritableSharedMemoryMapping& operator=(WritableSharedMemoryMapping&&);
+
+  // Returns the base address of the mapping. This is writable memory. This is
+  // page-aligned. This is nullptr for invalid instances.
+  void* memory() const { return raw_memory_ptr(); }
+
+  // Returns a pointer to a page-aligned T if the mapping is valid and large
+  // enough to contain a T, or nullptr otherwise.
+  template <typename T>
+  T* GetMemoryAs() const {
+    if (!IsValid())
+      return nullptr;
+    if (sizeof(T) > size())
+      return nullptr;
+    return static_cast<T*>(raw_memory_ptr());
+  }
+
+  // Returns a span of T. The number of elements is autodeduced from the size of
+  // the shared memory mapping. The number of elements may be autodeduced as
+  // zero, i.e. the mapping is invalid or the size of the mapping isn't large
+  // enough to contain even one T: in that case, an empty span will be returned.
+  // The first element, if any, is guaranteed to be page-aligned.
+  template <typename T>
+  span<T> GetMemoryAsSpan() const {
+    if (!IsValid())
+      return span<T>();
+    size_t count = size() / sizeof(T);
+    return GetMemoryAsSpan<T>(count);
+  }
+
+  // Returns a span of T with |count| elements if the mapping is valid and large
+  // enough to contain |count| elements, or an empty span otherwise. The first
+  // element, if any, is guaranteed to be page-aligned.
+  template <typename T>
+  span<T> GetMemoryAsSpan(size_t count) const {
+    if (!IsValid())
+      return span<T>();
+    if (size() / sizeof(T) < count)
+      return span<T>();
+    return span<T>(static_cast<T*>(raw_memory_ptr()), count);
+  }
+
+ private:
+  friend WritableSharedMemoryMapping MapAtForTesting(
+      subtle::PlatformSharedMemoryRegion* region,
+      off_t offset,
+      size_t size);
+  friend class ReadOnlySharedMemoryRegion;
+  friend class WritableSharedMemoryRegion;
+  friend class UnsafeSharedMemoryRegion;
+  WritableSharedMemoryMapping(void* address,
+                              size_t size,
+                              size_t mapped_size,
+                              const UnguessableToken& guid);
+
+  DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryMapping);
+};
+
+}  // namespace base
+
+#endif  // !defined(STARBOARD)
+#endif  // BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
diff --git a/src/base/memory/shared_memory_mapping_unittest.cc b/src/base/memory/shared_memory_mapping_unittest.cc
new file mode 100644
index 0000000..2a56087
--- /dev/null
+++ b/src/base/memory/shared_memory_mapping_unittest.cc
@@ -0,0 +1,145 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_mapping.h"
+
+#include <algorithm>
+#include <limits>
+
+#include "base/containers/span.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "starboard/types.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class SharedMemoryMappingTest : public ::testing::Test {
+ protected:
+  void CreateMapping(size_t size) {
+    auto result = ReadOnlySharedMemoryRegion::Create(size);
+    ASSERT_TRUE(result.IsValid());
+    write_mapping_ = std::move(result.mapping);
+    read_mapping_ = result.region.Map();
+    ASSERT_TRUE(read_mapping_.IsValid());
+  }
+
+  WritableSharedMemoryMapping write_mapping_;
+  ReadOnlySharedMemoryMapping read_mapping_;
+};
+
+TEST_F(SharedMemoryMappingTest, Invalid) {
+  EXPECT_EQ(nullptr, write_mapping_.GetMemoryAs<uint8_t>());
+  EXPECT_EQ(nullptr, read_mapping_.GetMemoryAs<uint8_t>());
+  EXPECT_TRUE(write_mapping_.GetMemoryAsSpan<uint8_t>().empty());
+  EXPECT_TRUE(read_mapping_.GetMemoryAsSpan<uint8_t>().empty());
+  EXPECT_TRUE(write_mapping_.GetMemoryAsSpan<uint8_t>(1).empty());
+  EXPECT_TRUE(read_mapping_.GetMemoryAsSpan<uint8_t>(1).empty());
+}
+
+TEST_F(SharedMemoryMappingTest, Scalar) {
+  CreateMapping(sizeof(uint32_t));
+
+  uint32_t* write_ptr = write_mapping_.GetMemoryAs<uint32_t>();
+  EXPECT_NE(nullptr, write_ptr);
+
+  const uint32_t* read_ptr = read_mapping_.GetMemoryAs<uint32_t>();
+  EXPECT_NE(nullptr, read_ptr);
+
+  *write_ptr = 0u;
+  EXPECT_EQ(0u, *read_ptr);
+
+  *write_ptr = 0x12345678u;
+  EXPECT_EQ(0x12345678u, *read_ptr);
+}
+
+TEST_F(SharedMemoryMappingTest, SpanWithAutoDeducedElementCount) {
+  CreateMapping(sizeof(uint8_t) * 8);
+
+  span<uint8_t> write_span = write_mapping_.GetMemoryAsSpan<uint8_t>();
+  ASSERT_EQ(8u, write_span.size());
+
+  span<const uint32_t> read_span = read_mapping_.GetMemoryAsSpan<uint32_t>();
+  ASSERT_EQ(2u, read_span.size());
+
+  std::fill(write_span.begin(), write_span.end(), 0);
+  EXPECT_EQ(0u, read_span[0]);
+  EXPECT_EQ(0u, read_span[1]);
+
+  for (size_t i = 0; i < write_span.size(); ++i)
+    write_span[i] = i + 1;
+  EXPECT_EQ(0x04030201u, read_span[0]);
+  EXPECT_EQ(0x08070605u, read_span[1]);
+}
+
+TEST_F(SharedMemoryMappingTest, SpanWithExplicitElementCount) {
+  CreateMapping(sizeof(uint8_t) * 8);
+
+  span<uint8_t> write_span = write_mapping_.GetMemoryAsSpan<uint8_t>(8);
+  ASSERT_EQ(8u, write_span.size());
+
+  span<uint8_t> write_span_2 = write_mapping_.GetMemoryAsSpan<uint8_t>(4);
+  ASSERT_EQ(4u, write_span_2.size());
+
+  span<const uint32_t> read_span = read_mapping_.GetMemoryAsSpan<uint32_t>(2);
+  ASSERT_EQ(2u, read_span.size());
+
+  span<const uint32_t> read_span_2 = read_mapping_.GetMemoryAsSpan<uint32_t>(1);
+  ASSERT_EQ(1u, read_span_2.size());
+
+  std::fill(write_span.begin(), write_span.end(), 0);
+  EXPECT_EQ(0u, read_span[0]);
+  EXPECT_EQ(0u, read_span[1]);
+  EXPECT_EQ(0u, read_span_2[0]);
+
+  for (size_t i = 0; i < write_span.size(); ++i)
+    write_span[i] = i + 1;
+  EXPECT_EQ(0x04030201u, read_span[0]);
+  EXPECT_EQ(0x08070605u, read_span[1]);
+  EXPECT_EQ(0x04030201u, read_span_2[0]);
+
+  std::fill(write_span_2.begin(), write_span_2.end(), 0);
+  EXPECT_EQ(0u, read_span[0]);
+  EXPECT_EQ(0x08070605u, read_span[1]);
+  EXPECT_EQ(0u, read_span_2[0]);
+}
+
+TEST_F(SharedMemoryMappingTest, SpanWithZeroElementCount) {
+  CreateMapping(sizeof(uint8_t) * 8);
+
+  EXPECT_TRUE(write_mapping_.GetMemoryAsSpan<uint8_t>(0).empty());
+
+  EXPECT_TRUE(read_mapping_.GetMemoryAsSpan<uint8_t>(0).empty());
+}
+
+TEST_F(SharedMemoryMappingTest, TooBigScalar) {
+  CreateMapping(sizeof(uint8_t));
+
+  EXPECT_EQ(nullptr, write_mapping_.GetMemoryAs<uint32_t>());
+
+  EXPECT_EQ(nullptr, read_mapping_.GetMemoryAs<uint32_t>());
+}
+
+TEST_F(SharedMemoryMappingTest, TooBigSpanWithAutoDeducedElementCount) {
+  CreateMapping(sizeof(uint8_t));
+
+  EXPECT_TRUE(write_mapping_.GetMemoryAsSpan<uint32_t>().empty());
+
+  EXPECT_TRUE(read_mapping_.GetMemoryAsSpan<uint32_t>().empty());
+}
+
+TEST_F(SharedMemoryMappingTest, TooBigSpanWithExplicitElementCount) {
+  CreateMapping(sizeof(uint8_t));
+
+  // Deliberately pick element counts such that a naive bounds calculation would
+  // overflow.
+  EXPECT_TRUE(write_mapping_
+                  .GetMemoryAsSpan<uint32_t>(std::numeric_limits<size_t>::max())
+                  .empty());
+
+  EXPECT_TRUE(read_mapping_
+                  .GetMemoryAsSpan<uint32_t>(std::numeric_limits<size_t>::max())
+                  .empty());
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_nacl.cc b/src/base/memory/shared_memory_nacl.cc
new file mode 100644
index 0000000..981e7a6
--- /dev/null
+++ b/src/base/memory/shared_memory_nacl.cc
@@ -0,0 +1,138 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "starboard/types.h"
+
+namespace base {
+
+SharedMemory::SharedMemory()
+    : mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : shm_(handle),
+      mapped_size_(0),
+      memory_(NULL),
+      read_only_(read_only),
+      requested_size_(0) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  DCHECK(handle.IsValid());
+  handle.Close();
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  // Untrusted code can't create descriptors or handles.
+  return false;
+}
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  // Untrusted code can't create descriptors or handles.
+  return false;
+}
+
+bool SharedMemory::Delete(const std::string& name) {
+  return false;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+  return false;
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid())
+    return false;
+
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  if (memory_)
+    return false;
+
+  memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
+                 MAP_SHARED, shm_.GetHandle(), offset);
+
+  bool mmap_succeeded = memory_ != MAP_FAILED && memory_ != NULL;
+  if (mmap_succeeded) {
+    mapped_size_ = bytes;
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+        (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+    mapped_id_ = shm_.GetGUID();
+    SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+  } else {
+    memory_ = NULL;
+  }
+
+  return mmap_succeeded;
+}
+
+bool SharedMemory::Unmap() {
+  if (memory_ == NULL)
+    return false;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+  if (munmap(memory_, mapped_size_) < 0)
+    DPLOG(ERROR) << "munmap";
+  memory_ = NULL;
+  mapped_size_ = 0;
+  mapped_id_ = UnguessableToken();
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  SharedMemoryHandle handle_copy = shm_;
+  handle_copy.SetOwnershipPassesToIPC(false);
+  return handle_copy;
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+  SharedMemoryHandle handle_copy = shm_;
+  handle_copy.SetOwnershipPassesToIPC(true);
+  Unmap();
+  shm_ = SharedMemoryHandle();
+  return handle_copy;
+}
+
+void SharedMemory::Close() {
+  if (shm_.IsValid()) {
+    shm_.Close();
+    shm_ = SharedMemoryHandle();
+  }
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  // Untrusted code can't create descriptors or handles, which is needed to
+  // drop permissions.
+  return SharedMemoryHandle();
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_posix.cc b/src/base/memory/shared_memory_posix.cc
new file mode 100644
index 0000000..07f9471
--- /dev/null
+++ b/src/base/memory/shared_memory_posix.cc
@@ -0,0 +1,379 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_helper.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/safe_strerror.h"
+#include "base/process/process_metrics.h"
+#include "base/scoped_generic.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
+#include "base/trace_event/trace_event.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#include "starboard/types.h"
+#include "third_party/ashmem/ashmem.h"
+#endif
+
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+#error "MacOS uses shared_memory_mac.cc"
+#endif
+
+namespace base {
+
+SharedMemory::SharedMemory() = default;
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : shm_(handle), read_only_(read_only) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  DCHECK(handle.IsValid());
+  handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  return GetMaxFds();
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+// static
+int SharedMemory::GetFdFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.GetHandle();
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+#if !defined(OS_ANDROID)
+
+// Chromium mostly only uses the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+// TODO(jrg): there is no way to "clean up" all unused named shmem if
+// we restart from a crash.  (That isn't a new problem, but it is a problem.)
+// In case we want to delete it later, it may be useful to save the value
+// of mem_filename after FilePathForMemoryName().
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  DCHECK(!shm_.IsValid());
+  if (options.size == 0) return false;
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  ThreadRestrictions::ScopedAllowIO allow_io;
+
+  bool fix_size = true;
+  ScopedFD fd;
+  ScopedFD readonly_fd;
+  FilePath path;
+  if (!options.name_deprecated || options.name_deprecated->empty()) {
+    bool result =
+        CreateAnonymousSharedMemory(options, &fd, &readonly_fd, &path);
+    if (!result)
+      return false;
+  } else {
+    if (!FilePathForMemoryName(*options.name_deprecated, &path))
+      return false;
+
+    // Make sure that the file is opened without any permission
+    // to other users on the system.
+    const mode_t kOwnerOnly = S_IRUSR | S_IWUSR;
+
+    // First, try to create the file.
+    fd.reset(HANDLE_EINTR(
+        open(path.value().c_str(), O_RDWR | O_CREAT | O_EXCL, kOwnerOnly)));
+    if (!fd.is_valid() && options.open_existing_deprecated) {
+      // If this doesn't work, try and open an existing file in append mode.
+      // Opening an existing file in a world writable directory has two main
+      // security implications:
+      // - Attackers could plant a file under their control, so ownership of
+      //   the file is checked below.
+      // - Attackers could plant a symbolic link so that an unexpected file
+      //   is opened, so O_NOFOLLOW is passed to open().
+#if !defined(OS_AIX)
+      fd.reset(HANDLE_EINTR(
+          open(path.value().c_str(), O_RDWR | O_APPEND | O_NOFOLLOW)));
+#else
+      // AIX has no 64-bit support for open flags such as -
+      //  O_CLOEXEC, O_NOFOLLOW and O_TTY_INIT.
+      fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDWR | O_APPEND)));
+#endif
+      // Check that the current user owns the file.
+      // If uid != euid, then a more complex permission model is used and this
+      // API is not appropriate.
+      const uid_t real_uid = getuid();
+      const uid_t effective_uid = geteuid();
+      struct stat sb;
+      if (fd.is_valid() &&
+          (fstat(fd.get(), &sb) != 0 || sb.st_uid != real_uid ||
+           sb.st_uid != effective_uid)) {
+        LOG(ERROR) <<
+            "Invalid owner when opening existing shared memory file.";
+        close(fd.get());
+        return false;
+      }
+
+      // An existing file was opened, so its size should not be fixed.
+      fix_size = false;
+    }
+
+    if (options.share_read_only) {
+      // Also open as readonly so that we can GetReadOnlyHandle.
+      readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+      if (!readonly_fd.is_valid()) {
+        DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+        close(fd.get());
+        return false;
+      }
+    }
+  }
+  if (fd.is_valid() && fix_size) {
+    // Get current size.
+    struct stat stat;
+    if (fstat(fd.get(), &stat) != 0)
+      return false;
+    const size_t current_size = stat.st_size;
+    if (current_size != options.size) {
+      if (HANDLE_EINTR(ftruncate(fd.get(), options.size)) != 0)
+        return false;
+    }
+    requested_size_ = options.size;
+  }
+  if (!fd.is_valid()) {
+    PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+    FilePath dir = path.DirName();
+    if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
+      PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
+      if (dir.value() == "/dev/shm") {
+        LOG(FATAL) << "This is frequently caused by incorrect permissions on "
+                   << "/dev/shm.  Try 'sudo chmod 1777 /dev/shm' to fix.";
+      }
+    }
+    return false;
+  }
+
+  int mapped_file = -1;
+  int readonly_mapped_file = -1;
+
+  bool result = PrepareMapFile(std::move(fd), std::move(readonly_fd),
+                               &mapped_file, &readonly_mapped_file);
+  shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), options.size,
+                            UnguessableToken::Create());
+  readonly_shm_ =
+      SharedMemoryHandle(FileDescriptor(readonly_mapped_file, false),
+                         options.size, shm_.GetGUID());
+  return result;
+}
+
+// Our current implementation of shmem is with mmap()ing of files.
+// These files need to be deleted explicitly.
+// In practice this call is only needed for unit tests.
+bool SharedMemory::Delete(const std::string& name) {
+  FilePath path;
+  if (!FilePathForMemoryName(name, &path))
+    return false;
+
+  if (PathExists(path))
+    return DeleteFile(path, false);
+
+  // Doesn't exist, so success.
+  return true;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+  FilePath path;
+  if (!FilePathForMemoryName(name, &path))
+    return false;
+
+  read_only_ = read_only;
+
+  int mode = read_only ? O_RDONLY : O_RDWR;
+  ScopedFD fd(HANDLE_EINTR(open(path.value().c_str(), mode)));
+  ScopedFD readonly_fd(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+  if (!readonly_fd.is_valid()) {
+    DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+    return false;
+  }
+  int mapped_file = -1;
+  int readonly_mapped_file = -1;
+  bool result = PrepareMapFile(std::move(fd), std::move(readonly_fd),
+                               &mapped_file, &readonly_mapped_file);
+  // This form of sharing shared memory is deprecated. https://crbug.com/345734.
+  // However, we can't get rid of it without a significant refactor because its
+  // used to communicate between two versions of the same service process, very
+  // early in the life cycle.
+  // Technically, we should also pass the GUID from the original shared memory
+  // region. We don't do that - this means that we will overcount this memory,
+  // which thankfully isn't relevant since Chrome only communicates with a
+  // single version of the service process.
+  // We pass the size |0|, which is a dummy size and wrong, but otherwise
+  // harmless.
+  shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), 0u,
+                            UnguessableToken::Create());
+  readonly_shm_ = SharedMemoryHandle(
+      FileDescriptor(readonly_mapped_file, false), 0, shm_.GetGUID());
+  return result;
+}
+#endif  // !defined(OS_ANDROID)
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid())
+    return false;
+
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  if (memory_)
+    return false;
+
+#if defined(OS_ANDROID)
+  // On Android, Map can be called with a size and offset of zero to use the
+  // ashmem-determined size.
+  if (bytes == 0) {
+    DCHECK_EQ(0, offset);
+    int ashmem_bytes = ashmem_get_size_region(shm_.GetHandle());
+    if (ashmem_bytes < 0)
+      return false;
+    bytes = ashmem_bytes;
+  }
+
+  // Sanity check. This shall catch invalid uses of the SharedMemory APIs
+  // but will not protect against direct mmap() attempts.
+  if (shm_.IsReadOnly()) {
+    // Use a DCHECK() to call writable mappings with read-only descriptors
+    // in debug builds immediately. Return an error for release builds
+    // or during unit-testing (assuming a ScopedLogAssertHandler was installed).
+    DCHECK(read_only_)
+        << "Trying to map a region writable with a read-only descriptor.";
+    if (!read_only_) {
+      return false;
+    }
+    if (!shm_.SetRegionReadOnly()) {  // Ensure the region is read-only.
+      return false;
+    }
+  }
+#endif
+
+  memory_ = mmap(nullptr, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
+                 MAP_SHARED, shm_.GetHandle(), offset);
+
+  bool mmap_succeeded = memory_ && memory_ != reinterpret_cast<void*>(-1);
+  if (mmap_succeeded) {
+    mapped_size_ = bytes;
+    mapped_id_ = shm_.GetGUID();
+    DCHECK_EQ(0U,
+              reinterpret_cast<uintptr_t>(memory_) &
+                  (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+    SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+  } else {
+    memory_ = nullptr;
+  }
+
+  return mmap_succeeded;
+}
+
+bool SharedMemory::Unmap() {
+  if (!memory_)
+    return false;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+  munmap(memory_, mapped_size_);
+  memory_ = nullptr;
+  mapped_size_ = 0;
+  mapped_id_ = UnguessableToken();
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return shm_;
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+  SharedMemoryHandle handle_copy = shm_;
+  handle_copy.SetOwnershipPassesToIPC(true);
+  Unmap();
+  shm_ = SharedMemoryHandle();
+  return handle_copy;
+}
+
+#if !defined(OS_ANDROID)
+void SharedMemory::Close() {
+  if (shm_.IsValid()) {
+    shm_.Close();
+    shm_ = SharedMemoryHandle();
+  }
+  if (readonly_shm_.IsValid()) {
+    readonly_shm_.Close();
+    readonly_shm_ = SharedMemoryHandle();
+  }
+}
+
+// For the given shmem named |mem_name|, return a filename to mmap()
+// (and possibly create).  Modifies |filename|.  Return false on
+// error, or true of we are happy.
+bool SharedMemory::FilePathForMemoryName(const std::string& mem_name,
+                                         FilePath* path) {
+  // mem_name will be used for a filename; make sure it doesn't
+  // contain anything which will confuse us.
+  DCHECK_EQ(std::string::npos, mem_name.find('/'));
+  DCHECK_EQ(std::string::npos, mem_name.find('\0'));
+
+  FilePath temp_dir;
+  if (!GetShmemTempDir(false, &temp_dir))
+    return false;
+
+#if defined(GOOGLE_CHROME_BUILD)
+  static const char kShmem[] = "com.google.Chrome.shmem.";
+#else
+  static const char kShmem[] = "org.chromium.Chromium.shmem.";
+#endif
+  CR_DEFINE_STATIC_LOCAL(const std::string, name_base, (kShmem));
+  *path = temp_dir.AppendASCII(name_base + mem_name);
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  CHECK(readonly_shm_.IsValid());
+  return readonly_shm_.Duplicate();
+}
+#endif  // !defined(OS_ANDROID)
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_region_unittest.cc b/src/base/memory/shared_memory_region_unittest.cc
new file mode 100644
index 0000000..d80affe
--- /dev/null
+++ b/src/base/memory/shared_memory_region_unittest.cc
@@ -0,0 +1,305 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <utility>
+
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "base/memory/shared_memory.h"
+#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/memory/writable_shared_memory_region.h"
+#include "base/sys_info.h"
+#include "base/test/test_shared_memory_util.h"
+#include "build/build_config.h"
+#include "starboard/memory.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+const size_t kRegionSize = 1024;
+
+bool IsMemoryFilledWithByte(const void* memory, size_t size, char byte) {
+  const char* start_ptr = static_cast<const char*>(memory);
+  const char* end_ptr = start_ptr + size;
+  for (const char* ptr = start_ptr; ptr < end_ptr; ++ptr) {
+    if (*ptr != byte)
+      return false;
+  }
+
+  return true;
+}
+
+template <typename SharedMemoryRegionType>
+class SharedMemoryRegionTest : public ::testing::Test {
+ public:
+  void SetUp() override {
+    std::tie(region_, rw_mapping_) =
+        CreateMappedRegion<SharedMemoryRegionType>(kRegionSize);
+    ASSERT_TRUE(region_.IsValid());
+    ASSERT_TRUE(rw_mapping_.IsValid());
+    SbMemorySet(rw_mapping_.memory(), 'G', kRegionSize);
+    EXPECT_TRUE(IsMemoryFilledWithByte(rw_mapping_.memory(), kRegionSize, 'G'));
+  }
+
+ protected:
+  SharedMemoryRegionType region_;
+  WritableSharedMemoryMapping rw_mapping_;
+};
+
+typedef ::testing::Types<WritableSharedMemoryRegion,
+                         UnsafeSharedMemoryRegion,
+                         ReadOnlySharedMemoryRegion>
+    AllRegionTypes;
+TYPED_TEST_CASE(SharedMemoryRegionTest, AllRegionTypes);
+
+TYPED_TEST(SharedMemoryRegionTest, NonValidRegion) {
+  TypeParam region;
+  EXPECT_FALSE(region.IsValid());
+  // We shouldn't crash on Map but should return an invalid mapping.
+  typename TypeParam::MappingType mapping = region.Map();
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MoveRegion) {
+  TypeParam moved_region = std::move(this->region_);
+  EXPECT_FALSE(this->region_.IsValid());
+  ASSERT_TRUE(moved_region.IsValid());
+
+  // Check that moved region maps correctly.
+  typename TypeParam::MappingType mapping = moved_region.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
+  EXPECT_EQ(SbMemoryCompare(this->rw_mapping_.memory(), mapping.memory(),
+                            kRegionSize),
+            0);
+
+  // Verify that the second mapping reflects changes in the first.
+  SbMemorySet(this->rw_mapping_.memory(), '#', kRegionSize);
+  EXPECT_EQ(SbMemoryCompare(this->rw_mapping_.memory(), mapping.memory(),
+                            kRegionSize),
+            0);
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MappingValidAfterClose) {
+  // Check the mapping is still valid after the region is closed.
+  this->region_ = TypeParam();
+  EXPECT_FALSE(this->region_.IsValid());
+  ASSERT_TRUE(this->rw_mapping_.IsValid());
+  EXPECT_TRUE(
+      IsMemoryFilledWithByte(this->rw_mapping_.memory(), kRegionSize, 'G'));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapTwice) {
+  // The second mapping is either writable or read-only.
+  typename TypeParam::MappingType mapping = this->region_.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
+  EXPECT_EQ(SbMemoryCompare(this->rw_mapping_.memory(), mapping.memory(),
+                            kRegionSize),
+            0);
+
+  // Verify that the second mapping reflects changes in the first.
+  SbMemorySet(this->rw_mapping_.memory(), '#', kRegionSize);
+  EXPECT_EQ(SbMemoryCompare(this->rw_mapping_.memory(), mapping.memory(),
+                            kRegionSize),
+            0);
+
+  // Close the region and unmap the first memory segment, verify the second
+  // still has the right data.
+  this->region_ = TypeParam();
+  this->rw_mapping_ = WritableSharedMemoryMapping();
+  EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, '#'));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapUnmapMap) {
+  this->rw_mapping_ = WritableSharedMemoryMapping();
+
+  typename TypeParam::MappingType mapping = this->region_.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, 'G'));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, SerializeAndDeserialize) {
+  subtle::PlatformSharedMemoryRegion platform_region =
+      TypeParam::TakeHandleForSerialization(std::move(this->region_));
+  EXPECT_EQ(platform_region.GetGUID(), this->rw_mapping_.guid());
+  TypeParam region = TypeParam::Deserialize(std::move(platform_region));
+  EXPECT_TRUE(region.IsValid());
+  EXPECT_FALSE(this->region_.IsValid());
+  typename TypeParam::MappingType mapping = region.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, 'G'));
+
+  // Verify that the second mapping reflects changes in the first.
+  SbMemorySet(this->rw_mapping_.memory(), '#', kRegionSize);
+  EXPECT_EQ(SbMemoryCompare(this->rw_mapping_.memory(), mapping.memory(),
+                            kRegionSize),
+            0);
+}
+
+// Map() will return addresses which are aligned to the platform page size, this
+// varies from platform to platform though.  Since we'd like to advertise a
+// minimum alignment that callers can count on, test for it here.
+TYPED_TEST(SharedMemoryRegionTest, MapMinimumAlignment) {
+  EXPECT_EQ(0U,
+            reinterpret_cast<uintptr_t>(this->rw_mapping_.memory()) &
+                (subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment - 1));
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapSize) {
+  EXPECT_EQ(this->rw_mapping_.size(), kRegionSize);
+  EXPECT_GE(this->rw_mapping_.mapped_size(), kRegionSize);
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapGranularity) {
+  EXPECT_LT(this->rw_mapping_.mapped_size(),
+            kRegionSize + SysInfo::VMAllocationGranularity());
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapAt) {
+  const size_t kPageSize = SysInfo::VMAllocationGranularity();
+  ASSERT_TRUE(kPageSize >= sizeof(uint32_t));
+  ASSERT_EQ(kPageSize % sizeof(uint32_t), 0U);
+  const size_t kDataSize = kPageSize * 2;
+  const size_t kCount = kDataSize / sizeof(uint32_t);
+
+  TypeParam region;
+  WritableSharedMemoryMapping rw_mapping;
+  std::tie(region, rw_mapping) = CreateMappedRegion<TypeParam>(kDataSize);
+  ASSERT_TRUE(region.IsValid());
+  ASSERT_TRUE(rw_mapping.IsValid());
+  uint32_t* ptr = static_cast<uint32_t*>(rw_mapping.memory());
+
+  for (size_t i = 0; i < kCount; ++i)
+    ptr[i] = i;
+
+  rw_mapping = WritableSharedMemoryMapping();
+  off_t bytes_offset = kPageSize;
+  typename TypeParam::MappingType mapping =
+      region.MapAt(bytes_offset, kDataSize - bytes_offset);
+  ASSERT_TRUE(mapping.IsValid());
+
+  off_t int_offset = bytes_offset / sizeof(uint32_t);
+  const uint32_t* ptr2 = static_cast<const uint32_t*>(mapping.memory());
+  for (size_t i = int_offset; i < kCount; ++i) {
+    EXPECT_EQ(ptr2[i - int_offset], i);
+  }
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapAtNotAlignedOffsetFails) {
+  const size_t kDataSize = SysInfo::VMAllocationGranularity();
+
+  TypeParam region;
+  WritableSharedMemoryMapping rw_mapping;
+  std::tie(region, rw_mapping) = CreateMappedRegion<TypeParam>(kDataSize);
+  ASSERT_TRUE(region.IsValid());
+  ASSERT_TRUE(rw_mapping.IsValid());
+  off_t offset = kDataSize / 2;
+  typename TypeParam::MappingType mapping =
+      region.MapAt(offset, kDataSize - offset);
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapZeroBytesFails) {
+  typename TypeParam::MappingType mapping = this->region_.MapAt(0, 0);
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+TYPED_TEST(SharedMemoryRegionTest, MapMoreBytesThanRegionSizeFails) {
+  size_t region_real_size = this->region_.GetSize();
+  typename TypeParam::MappingType mapping =
+      this->region_.MapAt(0, region_real_size + 1);
+  EXPECT_FALSE(mapping.IsValid());
+}
+
+template <typename DuplicatableSharedMemoryRegion>
+class DuplicatableSharedMemoryRegionTest
+    : public SharedMemoryRegionTest<DuplicatableSharedMemoryRegion> {};
+
+typedef ::testing::Types<UnsafeSharedMemoryRegion, ReadOnlySharedMemoryRegion>
+    DuplicatableRegionTypes;
+TYPED_TEST_CASE(DuplicatableSharedMemoryRegionTest, DuplicatableRegionTypes);
+
+TYPED_TEST(DuplicatableSharedMemoryRegionTest, Duplicate) {
+  TypeParam dup_region = this->region_.Duplicate();
+  EXPECT_EQ(this->region_.GetGUID(), dup_region.GetGUID());
+  typename TypeParam::MappingType mapping = dup_region.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
+  EXPECT_EQ(this->rw_mapping_.guid(), mapping.guid());
+  EXPECT_TRUE(IsMemoryFilledWithByte(mapping.memory(), kRegionSize, 'G'));
+}
+
+class ReadOnlySharedMemoryRegionTest : public ::testing::Test {
+ public:
+  ReadOnlySharedMemoryRegion GetInitiallyReadOnlyRegion(size_t size) {
+    MappedReadOnlyRegion mapped_region =
+        ReadOnlySharedMemoryRegion::Create(size);
+    ReadOnlySharedMemoryRegion region = std::move(mapped_region.region);
+    return region;
+  }
+
+  ReadOnlySharedMemoryRegion GetConvertedToReadOnlyRegion(size_t size) {
+    WritableSharedMemoryRegion region =
+        WritableSharedMemoryRegion::Create(kRegionSize);
+    ReadOnlySharedMemoryRegion ro_region =
+        WritableSharedMemoryRegion::ConvertToReadOnly(std::move(region));
+    return ro_region;
+  }
+};
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+       InitiallyReadOnlyRegionCannotBeMappedAsWritable) {
+  ReadOnlySharedMemoryRegion region = GetInitiallyReadOnlyRegion(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+
+  EXPECT_TRUE(CheckReadOnlyPlatformSharedMemoryRegionForTesting(
+      ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+          std::move(region))));
+}
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+       ConvertedToReadOnlyRegionCannotBeMappedAsWritable) {
+  ReadOnlySharedMemoryRegion region = GetConvertedToReadOnlyRegion(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+
+  EXPECT_TRUE(CheckReadOnlyPlatformSharedMemoryRegionForTesting(
+      ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
+          std::move(region))));
+}
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+       InitiallyReadOnlyRegionProducedMappingWriteDeathTest) {
+  ReadOnlySharedMemoryRegion region = GetInitiallyReadOnlyRegion(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ReadOnlySharedMemoryMapping mapping = region.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  void* memory_ptr = const_cast<void*>(mapping.memory());
+  EXPECT_DEATH_IF_SUPPORTED(SbMemorySet(memory_ptr, 'G', kRegionSize), "");
+}
+
+TEST_F(ReadOnlySharedMemoryRegionTest,
+       ConvertedToReadOnlyRegionProducedMappingWriteDeathTest) {
+  ReadOnlySharedMemoryRegion region = GetConvertedToReadOnlyRegion(kRegionSize);
+  ASSERT_TRUE(region.IsValid());
+  ReadOnlySharedMemoryMapping mapping = region.Map();
+  ASSERT_TRUE(mapping.IsValid());
+  void* memory_ptr = const_cast<void*>(mapping.memory());
+  EXPECT_DEATH_IF_SUPPORTED(SbMemorySet(memory_ptr, 'G', kRegionSize), "");
+}
+
+class UnsafeSharedMemoryRegionTest : public ::testing::Test {};
+
+TEST_F(UnsafeSharedMemoryRegionTest, CreateFromHandleTest) {
+  SharedMemory shm;
+
+  auto region = UnsafeSharedMemoryRegion::CreateFromHandle(shm.TakeHandle());
+  ASSERT_FALSE(region.IsValid());
+
+  shm.CreateAndMapAnonymous(10);
+  region = UnsafeSharedMemoryRegion::CreateFromHandle(shm.TakeHandle());
+  ASSERT_TRUE(region.IsValid());
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_tracker.cc b/src/base/memory/shared_memory_tracker.cc
new file mode 100644
index 0000000..5ca7c84
--- /dev/null
+++ b/src/base/memory/shared_memory_tracker.cc
@@ -0,0 +1,147 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_tracker.h"
+
+#include "base/memory/shared_memory.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_manager.h"
+#include "base/trace_event/process_memory_dump.h"
+
+namespace base {
+
+const char SharedMemoryTracker::kDumpRootName[] = "shared_memory";
+
+// static
+SharedMemoryTracker* SharedMemoryTracker::GetInstance() {
+  static SharedMemoryTracker* instance = new SharedMemoryTracker;
+  return instance;
+}
+
+// static
+std::string SharedMemoryTracker::GetDumpNameForTracing(
+    const UnguessableToken& id) {
+  DCHECK(!id.is_empty());
+  return std::string(kDumpRootName) + "/" + id.ToString();
+}
+
+// static
+trace_event::MemoryAllocatorDumpGuid
+SharedMemoryTracker::GetGlobalDumpIdForTracing(const UnguessableToken& id) {
+  std::string dump_name = GetDumpNameForTracing(id);
+  return trace_event::MemoryAllocatorDumpGuid(dump_name);
+}
+
+// static
+const trace_event::MemoryAllocatorDump*
+SharedMemoryTracker::GetOrCreateSharedMemoryDump(
+    const SharedMemory* shared_memory,
+    trace_event::ProcessMemoryDump* pmd) {
+  return GetOrCreateSharedMemoryDumpInternal(shared_memory->memory(),
+                                             shared_memory->mapped_size(),
+                                             shared_memory->mapped_id(), pmd);
+}
+
+const trace_event::MemoryAllocatorDump*
+SharedMemoryTracker::GetOrCreateSharedMemoryDump(
+    const SharedMemoryMapping& shared_memory,
+    trace_event::ProcessMemoryDump* pmd) {
+  return GetOrCreateSharedMemoryDumpInternal(shared_memory.raw_memory_ptr(),
+                                             shared_memory.mapped_size(),
+                                             shared_memory.guid(), pmd);
+}
+
+void SharedMemoryTracker::IncrementMemoryUsage(
+    const SharedMemory& shared_memory) {
+  AutoLock hold(usages_lock_);
+  DCHECK(usages_.find(shared_memory.memory()) == usages_.end());
+  usages_.emplace(shared_memory.memory(), UsageInfo(shared_memory.mapped_size(),
+                                                    shared_memory.mapped_id()));
+}
+
+void SharedMemoryTracker::IncrementMemoryUsage(
+    const SharedMemoryMapping& mapping) {
+  AutoLock hold(usages_lock_);
+  DCHECK(usages_.find(mapping.raw_memory_ptr()) == usages_.end());
+  usages_.emplace(mapping.raw_memory_ptr(),
+                  UsageInfo(mapping.mapped_size(), mapping.guid()));
+}
+
+void SharedMemoryTracker::DecrementMemoryUsage(
+    const SharedMemory& shared_memory) {
+  AutoLock hold(usages_lock_);
+  DCHECK(usages_.find(shared_memory.memory()) != usages_.end());
+  usages_.erase(shared_memory.memory());
+}
+
+void SharedMemoryTracker::DecrementMemoryUsage(
+    const SharedMemoryMapping& mapping) {
+  AutoLock hold(usages_lock_);
+  DCHECK(usages_.find(mapping.raw_memory_ptr()) != usages_.end());
+  usages_.erase(mapping.raw_memory_ptr());
+}
+
+SharedMemoryTracker::SharedMemoryTracker() {
+  trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+      this, "SharedMemoryTracker", nullptr);
+}
+
+SharedMemoryTracker::~SharedMemoryTracker() = default;
+
+bool SharedMemoryTracker::OnMemoryDump(const trace_event::MemoryDumpArgs& args,
+                                       trace_event::ProcessMemoryDump* pmd) {
+  AutoLock hold(usages_lock_);
+  for (const auto& usage : usages_) {
+    const trace_event::MemoryAllocatorDump* dump =
+        GetOrCreateSharedMemoryDumpInternal(
+            usage.first, usage.second.mapped_size, usage.second.mapped_id, pmd);
+    DCHECK(dump);
+  }
+  return true;
+}
+
+// static
+const trace_event::MemoryAllocatorDump*
+SharedMemoryTracker::GetOrCreateSharedMemoryDumpInternal(
+    void* mapped_memory,
+    size_t mapped_size,
+    const UnguessableToken& mapped_id,
+    trace_event::ProcessMemoryDump* pmd) {
+  const std::string dump_name = GetDumpNameForTracing(mapped_id);
+  trace_event::MemoryAllocatorDump* local_dump =
+      pmd->GetAllocatorDump(dump_name);
+  if (local_dump)
+    return local_dump;
+
+  size_t virtual_size = mapped_size;
+  // If resident size is not available, a virtual size is used as fallback.
+  size_t size = virtual_size;
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+  base::Optional<size_t> resident_size =
+      trace_event::ProcessMemoryDump::CountResidentBytesInSharedMemory(
+          mapped_memory, mapped_size);
+  if (resident_size.has_value())
+    size = resident_size.value();
+#endif
+
+  local_dump = pmd->CreateAllocatorDump(dump_name);
+  local_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
+                        trace_event::MemoryAllocatorDump::kUnitsBytes, size);
+  local_dump->AddScalar("virtual_size",
+                        trace_event::MemoryAllocatorDump::kUnitsBytes,
+                        virtual_size);
+  auto global_dump_guid = GetGlobalDumpIdForTracing(mapped_id);
+  trace_event::MemoryAllocatorDump* global_dump =
+      pmd->CreateSharedGlobalAllocatorDump(global_dump_guid);
+  global_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
+                         trace_event::MemoryAllocatorDump::kUnitsBytes, size);
+
+  // The edges will be overriden by the clients with correct importance.
+  pmd->AddOverridableOwnershipEdge(local_dump->guid(), global_dump->guid(),
+                                   0 /* importance */);
+  return local_dump;
+}
+
+}  // namespace
diff --git a/src/base/memory/shared_memory_tracker.h b/src/base/memory/shared_memory_tracker.h
new file mode 100644
index 0000000..66d130a
--- /dev/null
+++ b/src/base/memory/shared_memory_tracker.h
@@ -0,0 +1,92 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
+#define BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
+
+#include <map>
+#include <string>
+
+#include "base/memory/shared_memory.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/synchronization/lock.h"
+#include "base/trace_event/memory_dump_provider.h"
+
+namespace base {
+
+#if !defined(STARBOARD)
+namespace trace_event {
+class MemoryAllocatorDump;
+class MemoryAllocatorDumpGuid;
+class ProcessMemoryDump;
+}
+
+// SharedMemoryTracker tracks shared memory usage.
+class BASE_EXPORT SharedMemoryTracker : public trace_event::MemoryDumpProvider {
+ public:
+  // Returns a singleton instance.
+  static SharedMemoryTracker* GetInstance();
+
+  static std::string GetDumpNameForTracing(const UnguessableToken& id);
+
+  static trace_event::MemoryAllocatorDumpGuid GetGlobalDumpIdForTracing(
+      const UnguessableToken& id);
+
+  // Gets or creates if non-existant, a memory dump for the |shared_memory|
+  // inside the given |pmd|. Also adds the necessary edges for the dump when
+  // creating the dump.
+  static const trace_event::MemoryAllocatorDump* GetOrCreateSharedMemoryDump(
+      const SharedMemory* shared_memory,
+      trace_event::ProcessMemoryDump* pmd);
+  // We're in the middle of a refactor https://crbug.com/795291. Eventually, the
+  // first call will go away.
+  static const trace_event::MemoryAllocatorDump* GetOrCreateSharedMemoryDump(
+      const SharedMemoryMapping& shared_memory,
+      trace_event::ProcessMemoryDump* pmd);
+
+  // Records shared memory usage on valid mapping.
+  void IncrementMemoryUsage(const SharedMemory& shared_memory);
+  void IncrementMemoryUsage(const SharedMemoryMapping& mapping);
+
+  // Records shared memory usage on unmapping.
+  void DecrementMemoryUsage(const SharedMemory& shared_memory);
+  void DecrementMemoryUsage(const SharedMemoryMapping& mapping);
+
+  // Root dump name for all shared memory dumps.
+  static const char kDumpRootName[];
+
+ private:
+  SharedMemoryTracker();
+  ~SharedMemoryTracker() override;
+
+  // trace_event::MemoryDumpProvider implementation.
+  bool OnMemoryDump(const trace_event::MemoryDumpArgs& args,
+                    trace_event::ProcessMemoryDump* pmd) override;
+
+  static const trace_event::MemoryAllocatorDump*
+  GetOrCreateSharedMemoryDumpInternal(void* mapped_memory,
+                                      size_t mapped_size,
+                                      const UnguessableToken& mapped_id,
+                                      trace_event::ProcessMemoryDump* pmd);
+
+  // Information associated with each mapped address.
+  struct UsageInfo {
+    UsageInfo(size_t size, const UnguessableToken& id)
+        : mapped_size(size), mapped_id(id) {}
+
+    size_t mapped_size;
+    UnguessableToken mapped_id;
+  };
+
+  // Used to lock when |usages_| is modified or read.
+  Lock usages_lock_;
+  std::map<void*, UsageInfo> usages_;
+
+  DISALLOW_COPY_AND_ASSIGN(SharedMemoryTracker);
+};
+
+#endif
+}  // namespace base
+
+#endif  // BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
diff --git a/src/base/memory/shared_memory_unittest.cc b/src/base/memory/shared_memory_unittest.cc
new file mode 100644
index 0000000..1e97c93
--- /dev/null
+++ b/src/base/memory/shared_memory_unittest.cc
@@ -0,0 +1,977 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <memory>
+
+#include "starboard/types.h"
+
+#include "starboard/memory.h"
+
+#include "base/atomicops.h"
+#include "base/base_switches.h"
+#include "base/bind.h"
+#include "base/command_line.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/process/kill.h"
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "base/unguessable_token.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#if defined(OS_ANDROID)
+#include "base/callback.h"
+#endif
+
+#if defined(OS_POSIX)
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#if defined(OS_LINUX)
+#include <sys/syscall.h>
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+#if defined(OS_FUCHSIA)
+#include <lib/zx/vmar.h>
+#include <lib/zx/vmo.h>
+#endif
+
+namespace base {
+
+namespace {
+
+#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+// Each thread will open the shared memory.  Each thread will take a different 4
+// byte int pointer, and keep changing it, with some small pauses in between.
+// Verify that each thread's value in the shared memory is always correct.
+class MultipleThreadMain : public PlatformThread::Delegate {
+ public:
+  explicit MultipleThreadMain(int16_t id) : id_(id) {}
+  ~MultipleThreadMain() override = default;
+
+  static void CleanUp() {
+    SharedMemory memory;
+    memory.Delete(s_test_name_);
+  }
+
+  // PlatformThread::Delegate interface.
+  void ThreadMain() override {
+    const uint32_t kDataSize = 1024;
+    SharedMemory memory;
+    bool rv = memory.CreateNamedDeprecated(s_test_name_, true, kDataSize);
+    EXPECT_TRUE(rv);
+    rv = memory.Map(kDataSize);
+    EXPECT_TRUE(rv);
+    int* ptr = static_cast<int*>(memory.memory()) + id_;
+    EXPECT_EQ(0, *ptr);
+
+    for (int idx = 0; idx < 100; idx++) {
+      *ptr = idx;
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+      EXPECT_EQ(*ptr, idx);
+    }
+    // Reset back to 0 for the next test that uses the same name.
+    *ptr = 0;
+
+    memory.Close();
+  }
+
+ private:
+  int16_t id_;
+
+  static const char s_test_name_[];
+
+  DISALLOW_COPY_AND_ASSIGN(MultipleThreadMain);
+};
+
+const char MultipleThreadMain::s_test_name_[] =
+    "SharedMemoryOpenThreadTest";
+#endif  // !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+enum class Mode {
+  Default,
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+  DisableDevShm = 1,
+#endif
+};
+
+class SharedMemoryTest : public ::testing::TestWithParam<Mode> {
+ public:
+  void SetUp() override {
+    switch (GetParam()) {
+      case Mode::Default:
+        break;
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+      case Mode::DisableDevShm:
+        CommandLine* cmdline = CommandLine::ForCurrentProcess();
+        cmdline->AppendSwitch(switches::kDisableDevShmUsage);
+        break;
+#endif  // defined(OS_LINUX) && !defined(OS_CHROMEOS)
+    }
+  }
+};
+
+}  // namespace
+
+// Android/Mac/Fuchsia doesn't support SharedMemory::Open/Delete/
+// CreateNamedDeprecated(openExisting=true)
+#if !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+TEST_P(SharedMemoryTest, OpenClose) {
+  const uint32_t kDataSize = 1024;
+  std::string test_name = "SharedMemoryOpenCloseTest";
+
+  // Open two handles to a memory segment, confirm that they are mapped
+  // separately yet point to the same space.
+  SharedMemory memory1;
+  bool rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory1.Open(test_name, false);
+  EXPECT_FALSE(rv);
+  rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
+  EXPECT_TRUE(rv);
+  rv = memory1.Map(kDataSize);
+  EXPECT_TRUE(rv);
+  SharedMemory memory2;
+  rv = memory2.Open(test_name, false);
+  EXPECT_TRUE(rv);
+  rv = memory2.Map(kDataSize);
+  EXPECT_TRUE(rv);
+  EXPECT_NE(memory1.memory(), memory2.memory());  // Compare the pointers.
+
+  // Make sure we don't segfault. (it actually happened!)
+  ASSERT_NE(memory1.memory(), static_cast<void*>(nullptr));
+  ASSERT_NE(memory2.memory(), static_cast<void*>(nullptr));
+
+  // Write data to the first memory segment, verify contents of second.
+  SbMemorySet(memory1.memory(), '1', kDataSize);
+  EXPECT_EQ(SbMemoryCompare(memory1.memory(), memory2.memory(), kDataSize), 0);
+
+  // Close the first memory segment, and verify the second has the right data.
+  memory1.Close();
+  char* start_ptr = static_cast<char*>(memory2.memory());
+  char* end_ptr = start_ptr + kDataSize;
+  for (char* ptr = start_ptr; ptr < end_ptr; ptr++)
+    EXPECT_EQ(*ptr, '1');
+
+  // Close the second memory segment.
+  memory2.Close();
+
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory2.Delete(test_name);
+  EXPECT_TRUE(rv);
+}
+
+TEST_P(SharedMemoryTest, OpenExclusive) {
+  const uint32_t kDataSize = 1024;
+  const uint32_t kDataSize2 = 2048;
+  std::ostringstream test_name_stream;
+  test_name_stream << "SharedMemoryOpenExclusiveTest."
+                   << Time::Now().ToDoubleT();
+  std::string test_name = test_name_stream.str();
+
+  // Open two handles to a memory segment and check that
+  // open_existing_deprecated works as expected.
+  SharedMemory memory1;
+  bool rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
+  EXPECT_TRUE(rv);
+
+  // Memory1 knows it's size because it created it.
+  EXPECT_EQ(memory1.requested_size(), kDataSize);
+
+  rv = memory1.Map(kDataSize);
+  EXPECT_TRUE(rv);
+
+  // The mapped memory1 must be at least the size we asked for.
+  EXPECT_GE(memory1.mapped_size(), kDataSize);
+
+  // The mapped memory1 shouldn't exceed rounding for allocation granularity.
+  EXPECT_LT(memory1.mapped_size(),
+            kDataSize + SysInfo::VMAllocationGranularity());
+
+  SbMemorySet(memory1.memory(), 'G', kDataSize);
+
+  SharedMemory memory2;
+  // Should not be able to create if openExisting is false.
+  rv = memory2.CreateNamedDeprecated(test_name, false, kDataSize2);
+  EXPECT_FALSE(rv);
+
+  // Should be able to create with openExisting true.
+  rv = memory2.CreateNamedDeprecated(test_name, true, kDataSize2);
+  EXPECT_TRUE(rv);
+
+  // Memory2 shouldn't know the size because we didn't create it.
+  EXPECT_EQ(memory2.requested_size(), 0U);
+
+  // We should be able to map the original size.
+  rv = memory2.Map(kDataSize);
+  EXPECT_TRUE(rv);
+
+  // The mapped memory2 must be at least the size of the original.
+  EXPECT_GE(memory2.mapped_size(), kDataSize);
+
+  // The mapped memory2 shouldn't exceed rounding for allocation granularity.
+  EXPECT_LT(memory2.mapped_size(),
+            kDataSize2 + SysInfo::VMAllocationGranularity());
+
+  // Verify that opening memory2 didn't truncate or delete memory 1.
+  char* start_ptr = static_cast<char*>(memory2.memory());
+  char* end_ptr = start_ptr + kDataSize;
+  for (char* ptr = start_ptr; ptr < end_ptr; ptr++) {
+    EXPECT_EQ(*ptr, 'G');
+  }
+
+  memory1.Close();
+  memory2.Close();
+
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+}
+#endif  // !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+
+// Check that memory is still mapped after its closed.
+TEST_P(SharedMemoryTest, CloseNoUnmap) {
+  const size_t kDataSize = 4096;
+
+  SharedMemory memory;
+  ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
+  char* ptr = static_cast<char*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(nullptr));
+  SbMemorySet(ptr, 'G', kDataSize);
+
+  memory.Close();
+
+  EXPECT_EQ(ptr, memory.memory());
+  EXPECT_TRUE(!memory.handle().IsValid());
+
+  for (size_t i = 0; i < kDataSize; i++) {
+    EXPECT_EQ('G', ptr[i]);
+  }
+
+  memory.Unmap();
+  EXPECT_EQ(nullptr, memory.memory());
+}
+
+#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+// Create a set of N threads to each open a shared memory segment and write to
+// it. Verify that they are always reading/writing consistent data.
+TEST_P(SharedMemoryTest, MultipleThreads) {
+  const int kNumThreads = 5;
+
+  MultipleThreadMain::CleanUp();
+  // On POSIX we have a problem when 2 threads try to create the shmem
+  // (a file) at exactly the same time, since create both creates the
+  // file and zerofills it.  We solve the problem for this unit test
+  // (make it not flaky) by starting with 1 thread, then
+  // intentionally don't clean up its shmem before running with
+  // kNumThreads.
+
+  int threadcounts[] = { 1, kNumThreads };
+  for (size_t i = 0; i < arraysize(threadcounts); i++) {
+    int numthreads = threadcounts[i];
+    std::unique_ptr<PlatformThreadHandle[]> thread_handles;
+    std::unique_ptr<MultipleThreadMain* []> thread_delegates;
+
+    thread_handles.reset(new PlatformThreadHandle[numthreads]);
+    thread_delegates.reset(new MultipleThreadMain*[numthreads]);
+
+    // Spawn the threads.
+    for (int16_t index = 0; index < numthreads; index++) {
+      PlatformThreadHandle pth;
+      thread_delegates[index] = new MultipleThreadMain(index);
+      EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth));
+      thread_handles[index] = pth;
+    }
+
+    // Wait for the threads to finish.
+    for (int index = 0; index < numthreads; index++) {
+      PlatformThread::Join(thread_handles[index]);
+      delete thread_delegates[index];
+    }
+  }
+  MultipleThreadMain::CleanUp();
+}
+#endif
+
+// Allocate private (unique) shared memory with an empty string for a
+// name.  Make sure several of them don't point to the same thing as
+// we might expect if the names are equal.
+TEST_P(SharedMemoryTest, AnonymousPrivate) {
+  int i, j;
+  int count = 4;
+  bool rv;
+  const uint32_t kDataSize = 8192;
+
+  std::unique_ptr<SharedMemory[]> memories(new SharedMemory[count]);
+  std::unique_ptr<int* []> pointers(new int*[count]);
+  ASSERT_TRUE(memories.get());
+  ASSERT_TRUE(pointers.get());
+
+  for (i = 0; i < count; i++) {
+    rv = memories[i].CreateAndMapAnonymous(kDataSize);
+    EXPECT_TRUE(rv);
+    int* ptr = static_cast<int*>(memories[i].memory());
+    EXPECT_TRUE(ptr);
+    pointers[i] = ptr;
+  }
+
+  for (i = 0; i < count; i++) {
+    // zero out the first int in each except for i; for that one, make it 100.
+    for (j = 0; j < count; j++) {
+      if (i == j)
+        pointers[j][0] = 100;
+      else
+        pointers[j][0] = 0;
+    }
+    // make sure there is no bleeding of the 100 into the other pointers
+    for (j = 0; j < count; j++) {
+      if (i == j)
+        EXPECT_EQ(100, pointers[j][0]);
+      else
+        EXPECT_EQ(0, pointers[j][0]);
+    }
+  }
+
+  for (int i = 0; i < count; i++) {
+    memories[i].Close();
+  }
+}
+
+TEST_P(SharedMemoryTest, GetReadOnlyHandle) {
+  StringPiece contents = "Hello World";
+
+  SharedMemory writable_shmem;
+  SharedMemoryCreateOptions options;
+  options.size = contents.size();
+  options.share_read_only = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+  ASSERT_TRUE(writable_shmem.Create(options));
+  ASSERT_TRUE(writable_shmem.Map(options.size));
+  SbMemoryCopy(writable_shmem.memory(), contents.data(), contents.size());
+  EXPECT_TRUE(writable_shmem.Unmap());
+
+  SharedMemoryHandle readonly_handle = writable_shmem.GetReadOnlyHandle();
+  EXPECT_EQ(writable_shmem.handle().GetGUID(), readonly_handle.GetGUID());
+  EXPECT_EQ(writable_shmem.handle().GetSize(), readonly_handle.GetSize());
+  ASSERT_TRUE(readonly_handle.IsValid());
+  SharedMemory readonly_shmem(readonly_handle, /*readonly=*/true);
+
+  ASSERT_TRUE(readonly_shmem.Map(contents.size()));
+  EXPECT_EQ(contents,
+            StringPiece(static_cast<const char*>(readonly_shmem.memory()),
+                        contents.size()));
+  EXPECT_TRUE(readonly_shmem.Unmap());
+
+#if defined(OS_ANDROID)
+  // On Android, mapping a region through a read-only descriptor makes the
+  // region read-only. Any writable mapping attempt should fail.
+  ASSERT_FALSE(writable_shmem.Map(contents.size()));
+#else
+  // Make sure the writable instance is still writable.
+  ASSERT_TRUE(writable_shmem.Map(contents.size()));
+  StringPiece new_contents = "Goodbye";
+  SbMemoryCopy(writable_shmem.memory(), new_contents.data(),
+               new_contents.size());
+  EXPECT_EQ(new_contents,
+            StringPiece(static_cast<const char*>(writable_shmem.memory()),
+                        new_contents.size()));
+#endif
+
+  // We'd like to check that if we send the read-only segment to another
+  // process, then that other process can't reopen it read/write.  (Since that
+  // would be a security hole.)  Setting up multiple processes is hard in a
+  // unittest, so this test checks that the *current* process can't reopen the
+  // segment read/write.  I think the test here is stronger than we actually
+  // care about, but there's a remote possibility that sending a file over a
+  // pipe would transform it into read/write.
+  SharedMemoryHandle handle = readonly_shmem.handle();
+
+#if defined(OS_ANDROID)
+  // The "read-only" handle is still writable on Android:
+  // http://crbug.com/320865
+  (void)handle;
+#elif defined(OS_FUCHSIA)
+  uintptr_t addr;
+  EXPECT_NE(ZX_OK, zx::vmar::root_self()->map(
+                       0, *zx::unowned_vmo(handle.GetHandle()), 0,
+                       contents.size(), ZX_VM_FLAG_PERM_WRITE, &addr))
+      << "Shouldn't be able to map as writable.";
+
+  zx::vmo duped_handle;
+  EXPECT_NE(ZX_OK, zx::unowned_vmo(handle.GetHandle())
+                       ->duplicate(ZX_RIGHT_WRITE, &duped_handle))
+      << "Shouldn't be able to duplicate the handle into a writable one.";
+
+  EXPECT_EQ(ZX_OK, zx::unowned_vmo(handle.GetHandle())
+                       ->duplicate(ZX_RIGHT_READ, &duped_handle))
+      << "Should be able to duplicate the handle into a readable one.";
+#elif defined(OS_POSIX)
+  int handle_fd = SharedMemory::GetFdFromSharedMemoryHandle(handle);
+  EXPECT_EQ(O_RDONLY, fcntl(handle_fd, F_GETFL) & O_ACCMODE)
+      << "The descriptor itself should be read-only.";
+
+  errno = 0;
+  void* writable = mmap(nullptr, contents.size(), PROT_READ | PROT_WRITE,
+                        MAP_SHARED, handle_fd, 0);
+  int mmap_errno = errno;
+  EXPECT_EQ(MAP_FAILED, writable)
+      << "It shouldn't be possible to re-mmap the descriptor writable.";
+  EXPECT_EQ(EACCES, mmap_errno) << strerror(mmap_errno);
+  if (writable != MAP_FAILED)
+    EXPECT_EQ(0, munmap(writable, readonly_shmem.mapped_size()));
+
+#elif defined(OS_WIN)
+  EXPECT_EQ(NULL, MapViewOfFile(handle.GetHandle(), FILE_MAP_WRITE, 0, 0, 0))
+      << "Shouldn't be able to map memory writable.";
+
+  HANDLE temp_handle;
+  BOOL rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+                              GetCurrentProcess(), &temp_handle,
+                              FILE_MAP_ALL_ACCESS, false, 0);
+  EXPECT_EQ(FALSE, rv)
+      << "Shouldn't be able to duplicate the handle into a writable one.";
+  if (rv)
+    win::ScopedHandle writable_handle(temp_handle);
+  rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+                         GetCurrentProcess(), &temp_handle, FILE_MAP_READ,
+                         false, 0);
+  EXPECT_EQ(TRUE, rv)
+      << "Should be able to duplicate the handle into a readable one.";
+  if (rv)
+    win::ScopedHandle writable_handle(temp_handle);
+#else
+#error Unexpected platform; write a test that tries to make 'handle' writable.
+#endif  // defined(OS_POSIX) || defined(OS_WIN)
+}
+
+TEST_P(SharedMemoryTest, ShareToSelf) {
+  StringPiece contents = "Hello World";
+
+  SharedMemory shmem;
+  ASSERT_TRUE(shmem.CreateAndMapAnonymous(contents.size()));
+  SbMemoryCopy(shmem.memory(), contents.data(), contents.size());
+  EXPECT_TRUE(shmem.Unmap());
+
+  SharedMemoryHandle shared_handle = shmem.handle().Duplicate();
+  ASSERT_TRUE(shared_handle.IsValid());
+  EXPECT_TRUE(shared_handle.OwnershipPassesToIPC());
+  EXPECT_EQ(shared_handle.GetGUID(), shmem.handle().GetGUID());
+  EXPECT_EQ(shared_handle.GetSize(), shmem.handle().GetSize());
+  SharedMemory shared(shared_handle, /*readonly=*/false);
+
+  ASSERT_TRUE(shared.Map(contents.size()));
+  EXPECT_EQ(
+      contents,
+      StringPiece(static_cast<const char*>(shared.memory()), contents.size()));
+
+  shared_handle = shmem.handle().Duplicate();
+  ASSERT_TRUE(shared_handle.IsValid());
+  ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
+  SharedMemory readonly(shared_handle, /*readonly=*/true);
+
+  ASSERT_TRUE(readonly.Map(contents.size()));
+  EXPECT_EQ(contents,
+            StringPiece(static_cast<const char*>(readonly.memory()),
+                        contents.size()));
+}
+
+TEST_P(SharedMemoryTest, ShareWithMultipleInstances) {
+  static const StringPiece kContents = "Hello World";
+
+  SharedMemory shmem;
+  ASSERT_TRUE(shmem.CreateAndMapAnonymous(kContents.size()));
+  // We do not need to unmap |shmem| to let |shared| map.
+  const StringPiece shmem_contents(static_cast<const char*>(shmem.memory()),
+                                   shmem.requested_size());
+
+  SharedMemoryHandle shared_handle = shmem.handle().Duplicate();
+  ASSERT_TRUE(shared_handle.IsValid());
+  SharedMemory shared(shared_handle, /*readonly=*/false);
+  ASSERT_TRUE(shared.Map(kContents.size()));
+  // The underlying shared memory is created by |shmem|, so both
+  // |shared|.requested_size() and |readonly|.requested_size() are zero.
+  ASSERT_EQ(0U, shared.requested_size());
+  const StringPiece shared_contents(static_cast<const char*>(shared.memory()),
+                                    shmem.requested_size());
+
+  shared_handle = shmem.handle().Duplicate();
+  ASSERT_TRUE(shared_handle.IsValid());
+  ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
+  SharedMemory readonly(shared_handle, /*readonly=*/true);
+  ASSERT_TRUE(readonly.Map(kContents.size()));
+  ASSERT_EQ(0U, readonly.requested_size());
+  const StringPiece readonly_contents(
+      static_cast<const char*>(readonly.memory()),
+      shmem.requested_size());
+
+  // |shmem| should be able to update the content.
+  SbMemoryCopy(shmem.memory(), kContents.data(), kContents.size());
+
+  ASSERT_EQ(kContents, shmem_contents);
+  ASSERT_EQ(kContents, shared_contents);
+  ASSERT_EQ(kContents, readonly_contents);
+
+  // |shared| should also be able to update the content.
+  SbMemoryCopy(shared.memory(), ToLowerASCII(kContents).c_str(),
+               kContents.size());
+
+  ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), shmem_contents);
+  ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), shared_contents);
+  ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), readonly_contents);
+}
+
+TEST_P(SharedMemoryTest, MapAt) {
+  ASSERT_TRUE(SysInfo::VMAllocationGranularity() >= sizeof(uint32_t));
+  const size_t kCount = SysInfo::VMAllocationGranularity();
+  const size_t kDataSize = kCount * sizeof(uint32_t);
+
+  SharedMemory memory;
+  ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
+  uint32_t* ptr = static_cast<uint32_t*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(nullptr));
+
+  for (size_t i = 0; i < kCount; ++i) {
+    ptr[i] = i;
+  }
+
+  memory.Unmap();
+
+  off_t offset = SysInfo::VMAllocationGranularity();
+  ASSERT_TRUE(memory.MapAt(offset, kDataSize - offset));
+  offset /= sizeof(uint32_t);
+  ptr = static_cast<uint32_t*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(nullptr));
+  for (size_t i = offset; i < kCount; ++i) {
+    EXPECT_EQ(ptr[i - offset], i);
+  }
+}
+
+TEST_P(SharedMemoryTest, MapTwice) {
+  const uint32_t kDataSize = 1024;
+  SharedMemory memory;
+  bool rv = memory.CreateAndMapAnonymous(kDataSize);
+  EXPECT_TRUE(rv);
+
+  void* old_address = memory.memory();
+
+  rv = memory.Map(kDataSize);
+  EXPECT_FALSE(rv);
+  EXPECT_EQ(old_address, memory.memory());
+}
+
+#if defined(OS_POSIX)
+// This test is not applicable for iOS (crbug.com/399384).
+#if !defined(OS_IOS)
+// Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
+TEST_P(SharedMemoryTest, AnonymousExecutable) {
+#if defined(OS_LINUX)
+  // On Chromecast both /dev/shm and /tmp are mounted with 'noexec' option,
+  // which makes this test fail. But Chromecast doesn't use NaCL so we don't
+  // need this.
+  if (!IsPathExecutable(FilePath("/dev/shm")) &&
+      !IsPathExecutable(FilePath("/tmp"))) {
+    return;
+  }
+#endif  // OS_LINUX
+  const uint32_t kTestSize = 1 << 16;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+  options.executable = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+
+  EXPECT_TRUE(shared_memory.Create(options));
+  EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
+
+  EXPECT_EQ(0, mprotect(shared_memory.memory(), shared_memory.requested_size(),
+                        PROT_READ | PROT_EXEC));
+}
+#endif  // !defined(OS_IOS)
+
+#if defined(OS_ANDROID)
+// This test is restricted to Android since there is no way on other platforms
+// to guarantee that a region can never be mapped with PROT_EXEC. E.g. on
+// Linux, anonymous shared regions come from /dev/shm which can be mounted
+// without 'noexec'. In this case, anything can perform an mprotect() to
+// change the protection mask of a given page.
+TEST(SharedMemoryTest, AnonymousIsNotExecutableByDefault) {
+  const uint32_t kTestSize = 1 << 16;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+
+  EXPECT_TRUE(shared_memory.Create(options));
+  EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
+
+  errno = 0;
+  EXPECT_EQ(-1, mprotect(shared_memory.memory(), shared_memory.requested_size(),
+                         PROT_READ | PROT_EXEC));
+  EXPECT_EQ(EACCES, errno);
+}
+#endif  // OS_ANDROID
+
+// Android supports a different permission model than POSIX for its "ashmem"
+// shared memory implementation. So the tests about file permissions are not
+// included on Android. Fuchsia does not use a file-backed shared memory
+// implementation.
+
+#if !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+
+// Set a umask and restore the old mask on destruction.
+class ScopedUmaskSetter {
+ public:
+  explicit ScopedUmaskSetter(mode_t target_mask) {
+    old_umask_ = umask(target_mask);
+  }
+  ~ScopedUmaskSetter() { umask(old_umask_); }
+ private:
+  mode_t old_umask_;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedUmaskSetter);
+};
+
+// Create a shared memory object, check its permissions.
+TEST_P(SharedMemoryTest, FilePermissionsAnonymous) {
+  const uint32_t kTestSize = 1 << 8;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+  // Set a file mode creation mask that gives all permissions.
+  ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
+
+  EXPECT_TRUE(shared_memory.Create(options));
+
+  int shm_fd =
+      SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+  struct stat shm_stat;
+  EXPECT_EQ(0, fstat(shm_fd, &shm_stat));
+  // Neither the group, nor others should be able to read the shared memory
+  // file.
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
+}
+
+// Create a shared memory object, check its permissions.
+TEST_P(SharedMemoryTest, FilePermissionsNamed) {
+  const uint32_t kTestSize = 1 << 8;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+
+  // Set a file mode creation mask that gives all permissions.
+  ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
+
+  EXPECT_TRUE(shared_memory.Create(options));
+
+  int fd = SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+  struct stat shm_stat;
+  EXPECT_EQ(0, fstat(fd, &shm_stat));
+  // Neither the group, nor others should have been able to open the shared
+  // memory file while its name existed.
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
+}
+#endif  // !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+
+#endif  // defined(OS_POSIX)
+
+// Map() will return addresses which are aligned to the platform page size, this
+// varies from platform to platform though.  Since we'd like to advertise a
+// minimum alignment that callers can count on, test for it here.
+TEST_P(SharedMemoryTest, MapMinimumAlignment) {
+  static const int kDataSize = 8192;
+
+  SharedMemory shared_memory;
+  ASSERT_TRUE(shared_memory.CreateAndMapAnonymous(kDataSize));
+  EXPECT_EQ(0U, reinterpret_cast<uintptr_t>(
+      shared_memory.memory()) & (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+  shared_memory.Close();
+}
+
+#if defined(OS_WIN)
+TEST_P(SharedMemoryTest, UnsafeImageSection) {
+  const char kTestSectionName[] = "UnsafeImageSection";
+  wchar_t path[MAX_PATH];
+  EXPECT_GT(::GetModuleFileName(nullptr, path, arraysize(path)), 0U);
+
+  // Map the current executable image to save us creating a new PE file on disk.
+  base::win::ScopedHandle file_handle(::CreateFile(
+      path, GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, 0, nullptr));
+  EXPECT_TRUE(file_handle.IsValid());
+  base::win::ScopedHandle section_handle(
+      ::CreateFileMappingA(file_handle.Get(), nullptr,
+                           PAGE_READONLY | SEC_IMAGE, 0, 0, kTestSectionName));
+  EXPECT_TRUE(section_handle.IsValid());
+
+  // Check direct opening by name, from handle and duplicated from handle.
+  SharedMemory shared_memory_open;
+  EXPECT_TRUE(shared_memory_open.Open(kTestSectionName, true));
+  EXPECT_FALSE(shared_memory_open.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_open.memory());
+
+  SharedMemory shared_memory_handle_local(
+      SharedMemoryHandle(section_handle.Take(), 1, UnguessableToken::Create()),
+      true);
+  EXPECT_FALSE(shared_memory_handle_local.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_handle_local.memory());
+
+  // Check that a handle without SECTION_QUERY also can't be mapped as it can't
+  // be checked.
+  SharedMemory shared_memory_handle_dummy;
+  SharedMemoryCreateOptions options;
+  options.size = 0x1000;
+  EXPECT_TRUE(shared_memory_handle_dummy.Create(options));
+  HANDLE handle_no_query;
+  EXPECT_TRUE(::DuplicateHandle(
+      ::GetCurrentProcess(), shared_memory_handle_dummy.handle().GetHandle(),
+      ::GetCurrentProcess(), &handle_no_query, FILE_MAP_READ, FALSE, 0));
+  SharedMemory shared_memory_handle_no_query(
+      SharedMemoryHandle(handle_no_query, options.size,
+                         UnguessableToken::Create()),
+      true);
+  EXPECT_FALSE(shared_memory_handle_no_query.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_handle_no_query.memory());
+}
+#endif  // defined(OS_WIN)
+
+// iOS does not allow multiple processes.
+// Android ashmem does not support named shared memory.
+// Fuchsia SharedMemory does not support named shared memory.
+// Mac SharedMemory does not support named shared memory. crbug.com/345734
+#if !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) && \
+    !defined(OS_FUCHSIA)
+// On POSIX it is especially important we test shmem across processes,
+// not just across threads.  But the test is enabled on all platforms.
+class SharedMemoryProcessTest : public MultiProcessTest {
+ public:
+  static void CleanUp() {
+    SharedMemory memory;
+    memory.Delete(s_test_name_);
+  }
+
+  static int TaskTestMain() {
+    int errors = 0;
+    SharedMemory memory;
+    bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
+    EXPECT_TRUE(rv);
+    if (rv != true)
+      errors++;
+    rv = memory.Map(s_data_size_);
+    EXPECT_TRUE(rv);
+    if (rv != true)
+      errors++;
+    int* ptr = static_cast<int*>(memory.memory());
+
+    // This runs concurrently in multiple processes. Writes need to be atomic.
+    subtle::Barrier_AtomicIncrement(ptr, 1);
+    memory.Close();
+    return errors;
+  }
+
+  static const char s_test_name_[];
+  static const uint32_t s_data_size_;
+};
+
+const char SharedMemoryProcessTest::s_test_name_[] = "MPMem";
+const uint32_t SharedMemoryProcessTest::s_data_size_ = 1024;
+
+TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
+  const int kNumTasks = 5;
+
+  SharedMemoryProcessTest::CleanUp();
+
+  // Create a shared memory region. Set the first word to 0.
+  SharedMemory memory;
+  bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
+  ASSERT_TRUE(rv);
+  rv = memory.Map(s_data_size_);
+  ASSERT_TRUE(rv);
+  int* ptr = static_cast<int*>(memory.memory());
+  *ptr = 0;
+
+  // Start |kNumTasks| processes, each of which atomically increments the first
+  // word by 1.
+  Process processes[kNumTasks];
+  for (int index = 0; index < kNumTasks; ++index) {
+    processes[index] = SpawnChild("SharedMemoryTestMain");
+    ASSERT_TRUE(processes[index].IsValid());
+  }
+
+  // Check that each process exited correctly.
+  int exit_code = 0;
+  for (int index = 0; index < kNumTasks; ++index) {
+    EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
+    EXPECT_EQ(0, exit_code);
+  }
+
+  // Check that the shared memory region reflects |kNumTasks| increments.
+  ASSERT_EQ(kNumTasks, *ptr);
+
+  memory.Close();
+  SharedMemoryProcessTest::CleanUp();
+}
+
+MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
+  return SharedMemoryProcessTest::TaskTestMain();
+}
+#endif  // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) &&
+        // !defined(OS_FUCHSIA)
+
+TEST_P(SharedMemoryTest, MappedId) {
+  const uint32_t kDataSize = 1024;
+  SharedMemory memory;
+  SharedMemoryCreateOptions options;
+  options.size = kDataSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+
+  EXPECT_TRUE(memory.Create(options));
+  base::UnguessableToken id = memory.handle().GetGUID();
+  EXPECT_FALSE(id.is_empty());
+  EXPECT_TRUE(memory.mapped_id().is_empty());
+
+  EXPECT_TRUE(memory.Map(kDataSize));
+  EXPECT_EQ(id, memory.mapped_id());
+
+  memory.Close();
+  EXPECT_EQ(id, memory.mapped_id());
+
+  memory.Unmap();
+  EXPECT_TRUE(memory.mapped_id().is_empty());
+}
+
+INSTANTIATE_TEST_CASE_P(Default,
+                        SharedMemoryTest,
+                        ::testing::Values(Mode::Default));
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+INSTANTIATE_TEST_CASE_P(SkipDevShm,
+                        SharedMemoryTest,
+                        ::testing::Values(Mode::DisableDevShm));
+#endif  // defined(OS_LINUX) && !defined(OS_CHROMEOS)
+
+#if defined(OS_ANDROID)
+TEST(SharedMemoryTest, ReadOnlyRegions) {
+  const uint32_t kDataSize = 1024;
+  SharedMemory memory;
+  SharedMemoryCreateOptions options;
+  options.size = kDataSize;
+  EXPECT_TRUE(memory.Create(options));
+
+  EXPECT_FALSE(memory.handle().IsRegionReadOnly());
+
+  // Check that it is possible to map the region directly from the fd.
+  int region_fd = memory.handle().GetHandle();
+  EXPECT_GE(region_fd, 0);
+  void* address = mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED,
+                       region_fd, 0);
+  bool success = address && address != MAP_FAILED;
+  ASSERT_TRUE(address);
+  ASSERT_NE(address, MAP_FAILED);
+  if (success) {
+    EXPECT_EQ(0, munmap(address, kDataSize));
+  }
+
+  ASSERT_TRUE(memory.handle().SetRegionReadOnly());
+  EXPECT_TRUE(memory.handle().IsRegionReadOnly());
+
+  // Check that it is no longer possible to map the region read/write.
+  errno = 0;
+  address = mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED,
+                 region_fd, 0);
+  success = address && address != MAP_FAILED;
+  ASSERT_FALSE(success);
+  ASSERT_EQ(EPERM, errno);
+  if (success) {
+    EXPECT_EQ(0, munmap(address, kDataSize));
+  }
+}
+
+TEST(SharedMemoryTest, ReadOnlyDescriptors) {
+  const uint32_t kDataSize = 1024;
+  SharedMemory memory;
+  SharedMemoryCreateOptions options;
+  options.size = kDataSize;
+  EXPECT_TRUE(memory.Create(options));
+
+  EXPECT_FALSE(memory.handle().IsRegionReadOnly());
+
+  // Getting a read-only descriptor should not make the region read-only itself.
+  SharedMemoryHandle ro_handle = memory.GetReadOnlyHandle();
+  EXPECT_FALSE(memory.handle().IsRegionReadOnly());
+
+  // Mapping a writable region from a read-only descriptor should not
+  // be possible, it will DCHECK() in debug builds (see test below),
+  // while returning false on release ones.
+  {
+    bool dcheck_fired = false;
+    logging::ScopedLogAssertHandler log_assert(
+        base::BindRepeating([](bool* flag, const char*, int, base::StringPiece,
+                               base::StringPiece) { *flag = true; },
+                            base::Unretained(&dcheck_fired)));
+
+    SharedMemory rw_region(ro_handle.Duplicate(), /* read_only */ false);
+    EXPECT_FALSE(rw_region.Map(kDataSize));
+    EXPECT_EQ(DCHECK_IS_ON() ? true : false, dcheck_fired);
+  }
+
+  // Nor shall it turn the region read-only itself.
+  EXPECT_FALSE(ro_handle.IsRegionReadOnly());
+
+  // Mapping a read-only region from a read-only descriptor should work.
+  SharedMemory ro_region(ro_handle.Duplicate(), /* read_only */ true);
+  EXPECT_TRUE(ro_region.Map(kDataSize));
+
+  // And it should turn the region read-only too.
+  EXPECT_TRUE(ro_handle.IsRegionReadOnly());
+  EXPECT_TRUE(memory.handle().IsRegionReadOnly());
+  EXPECT_FALSE(memory.Map(kDataSize));
+
+  ro_handle.Close();
+}
+
+#endif  // OS_ANDROID
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_win.cc b/src/base/memory/shared_memory_win.cc
new file mode 100644
index 0000000..0f4e965
--- /dev/null
+++ b/src/base/memory/shared_memory_win.cc
@@ -0,0 +1,386 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <aclapi.h>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory_tracker.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/utf_string_conversions.h"
+#include "base/unguessable_token.h"
+#include "base/win/windows_version.h"
+#include "starboard/types.h"
+
+namespace base {
+namespace {
+
+// Errors that can occur during Shared Memory construction.
+// These match tools/metrics/histograms/histograms.xml.
+// This enum is append-only.
+enum CreateError {
+  SUCCESS = 0,
+  SIZE_ZERO = 1,
+  SIZE_TOO_LARGE = 2,
+  INITIALIZE_ACL_FAILURE = 3,
+  INITIALIZE_SECURITY_DESC_FAILURE = 4,
+  SET_SECURITY_DESC_FAILURE = 5,
+  CREATE_FILE_MAPPING_FAILURE = 6,
+  REDUCE_PERMISSIONS_FAILURE = 7,
+  ALREADY_EXISTS = 8,
+  CREATE_ERROR_LAST = ALREADY_EXISTS
+};
+
+// Emits UMA metrics about encountered errors. Pass zero (0) for |winerror|
+// if there is no associated Windows error.
+void LogError(CreateError error, DWORD winerror) {
+  UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error,
+                            CREATE_ERROR_LAST + 1);
+  static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
+  if (winerror != ERROR_SUCCESS)
+    UmaHistogramSparse("SharedMemory.CreateWinError", winerror);
+}
+
+typedef enum _SECTION_INFORMATION_CLASS {
+  SectionBasicInformation,
+} SECTION_INFORMATION_CLASS;
+
+typedef struct _SECTION_BASIC_INFORMATION {
+  PVOID BaseAddress;
+  ULONG Attributes;
+  LARGE_INTEGER Size;
+} SECTION_BASIC_INFORMATION, *PSECTION_BASIC_INFORMATION;
+
+typedef ULONG(__stdcall* NtQuerySectionType)(
+    HANDLE SectionHandle,
+    SECTION_INFORMATION_CLASS SectionInformationClass,
+    PVOID SectionInformation,
+    ULONG SectionInformationLength,
+    PULONG ResultLength);
+
+// Returns the length of the memory section starting at the supplied address.
+size_t GetMemorySectionSize(void* address) {
+  MEMORY_BASIC_INFORMATION memory_info;
+  if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
+    return 0;
+  return memory_info.RegionSize - (static_cast<char*>(address) -
+         static_cast<char*>(memory_info.AllocationBase));
+}
+
+// Checks if the section object is safe to map. At the moment this just means
+// it's not an image section.
+bool IsSectionSafeToMap(HANDLE handle) {
+  static NtQuerySectionType nt_query_section_func;
+  if (!nt_query_section_func) {
+    nt_query_section_func = reinterpret_cast<NtQuerySectionType>(
+        ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"), "NtQuerySection"));
+    DCHECK(nt_query_section_func);
+  }
+
+  // The handle must have SECTION_QUERY access for this to succeed.
+  SECTION_BASIC_INFORMATION basic_information = {};
+  ULONG status =
+      nt_query_section_func(handle, SectionBasicInformation, &basic_information,
+                            sizeof(basic_information), nullptr);
+  if (status)
+    return false;
+  return (basic_information.Attributes & SEC_IMAGE) != SEC_IMAGE;
+}
+
+// Returns a HANDLE on success and |nullptr| on failure.
+// This function is similar to CreateFileMapping, but removes the permissions
+// WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE.
+//
+// A newly created file mapping has two sets of permissions. It has access
+// control permissions (WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE) and
+// file permissions (FILE_MAP_READ, FILE_MAP_WRITE, etc.). ::DuplicateHandle()
+// with the parameter DUPLICATE_SAME_ACCESS copies both sets of permissions.
+//
+// The Chrome sandbox prevents HANDLEs with the WRITE_DAC permission from being
+// duplicated into unprivileged processes. But the only way to copy file
+// permissions is with the parameter DUPLICATE_SAME_ACCESS. This means that
+// there is no way for a privileged process to duplicate a file mapping into an
+// unprivileged process while maintaining the previous file permissions.
+//
+// By removing all access control permissions of a file mapping immediately
+// after creation, ::DuplicateHandle() effectively only copies the file
+// permissions.
+HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
+                                               size_t rounded_size,
+                                               LPCWSTR name) {
+  HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
+                               static_cast<DWORD>(rounded_size), name);
+  if (!h) {
+    LogError(CREATE_FILE_MAPPING_FAILURE, GetLastError());
+    return nullptr;
+  }
+
+  HANDLE h2;
+  BOOL success = ::DuplicateHandle(
+      GetCurrentProcess(), h, GetCurrentProcess(), &h2,
+      FILE_MAP_READ | FILE_MAP_WRITE | SECTION_QUERY, FALSE, 0);
+  BOOL rv = ::CloseHandle(h);
+  DCHECK(rv);
+
+  if (!success) {
+    LogError(REDUCE_PERMISSIONS_FAILURE, GetLastError());
+    return nullptr;
+  }
+
+  return h2;
+}
+
+}  // namespace.
+
+SharedMemory::SharedMemory() {}
+
+SharedMemory::SharedMemory(const string16& name) : name_(name) {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : external_section_(true), shm_(handle), read_only_(read_only) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  // Rounded down from value reported here:
+  // http://blogs.technet.com/b/markrussinovich/archive/2009/09/29/3283844.aspx
+  return static_cast<size_t>(1 << 23);
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  // TODO(crbug.com/210609): NaCl forces us to round up 64k here, wasting 32k
+  // per mapping on average.
+  static const size_t kSectionMask = 65536 - 1;
+  DCHECK(!options.executable);
+  DCHECK(!shm_.IsValid());
+  if (options.size == 0) {
+    LogError(SIZE_ZERO, 0);
+    return false;
+  }
+
+  // Check maximum accounting for overflow.
+  if (options.size >
+      static_cast<size_t>(std::numeric_limits<int>::max()) - kSectionMask) {
+    LogError(SIZE_TOO_LARGE, 0);
+    return false;
+  }
+
+  size_t rounded_size = (options.size + kSectionMask) & ~kSectionMask;
+  name_ = options.name_deprecated ?
+      ASCIIToUTF16(*options.name_deprecated) : L"";
+  SECURITY_ATTRIBUTES sa = {sizeof(sa), nullptr, FALSE};
+  SECURITY_DESCRIPTOR sd;
+  ACL dacl;
+
+  if (name_.empty()) {
+    // Add an empty DACL to enforce anonymous read-only sections.
+    sa.lpSecurityDescriptor = &sd;
+    if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
+      LogError(INITIALIZE_ACL_FAILURE, GetLastError());
+      return false;
+    }
+    if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
+      LogError(INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
+      return false;
+    }
+    if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
+      LogError(SET_SECURITY_DESC_FAILURE, GetLastError());
+      return false;
+    }
+
+    if (base::win::GetVersion() < base::win::VERSION_WIN8_1) {
+      // Windows < 8.1 ignores DACLs on certain unnamed objects (like shared
+      // sections). So, we generate a random name when we need to enforce
+      // read-only.
+      uint64_t rand_values[4];
+      RandBytes(&rand_values, sizeof(rand_values));
+      name_ = StringPrintf(L"CrSharedMem_%016llx%016llx%016llx%016llx",
+                           rand_values[0], rand_values[1], rand_values[2],
+                           rand_values[3]);
+      DCHECK(!name_.empty());
+    }
+  }
+
+  shm_ = SharedMemoryHandle(
+      CreateFileMappingWithReducedPermissions(
+          &sa, rounded_size, name_.empty() ? nullptr : name_.c_str()),
+      rounded_size, UnguessableToken::Create());
+  if (!shm_.IsValid()) {
+    // The error is logged within CreateFileMappingWithReducedPermissions().
+    return false;
+  }
+
+  requested_size_ = options.size;
+
+  // Check if the shared memory pre-exists.
+  if (GetLastError() == ERROR_ALREADY_EXISTS) {
+    // If the file already existed, set requested_size_ to 0 to show that
+    // we don't know the size.
+    requested_size_ = 0;
+    external_section_ = true;
+    if (!options.open_existing_deprecated) {
+      Close();
+      // From "if" above: GetLastError() == ERROR_ALREADY_EXISTS.
+      LogError(ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
+      return false;
+    }
+  }
+
+  LogError(SUCCESS, ERROR_SUCCESS);
+  return true;
+}
+
+bool SharedMemory::Delete(const std::string& name) {
+  // intentionally empty -- there is nothing for us to do on Windows.
+  return true;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+  DCHECK(!shm_.IsValid());
+  DWORD access = FILE_MAP_READ | SECTION_QUERY;
+  if (!read_only)
+    access |= FILE_MAP_WRITE;
+  name_ = ASCIIToUTF16(name);
+  read_only_ = read_only;
+
+  // This form of sharing shared memory is deprecated. https://crbug.com/345734.
+  // However, we can't get rid of it without a significant refactor because its
+  // used to communicate between two versions of the same service process, very
+  // early in the life cycle.
+  // Technically, we should also pass the GUID from the original shared memory
+  // region. We don't do that - this means that we will overcount this memory,
+  // which thankfully isn't relevant since Chrome only communicates with a
+  // single version of the service process.
+  // We pass the size |0|, which is a dummy size and wrong, but otherwise
+  // harmless.
+  shm_ = SharedMemoryHandle(
+      OpenFileMapping(access, false, name_.empty() ? nullptr : name_.c_str()),
+      0u, UnguessableToken::Create());
+  if (!shm_.IsValid())
+    return false;
+  // If a name specified assume it's an external section.
+  if (!name_.empty())
+    external_section_ = true;
+  // Note: size_ is not set in this case.
+  return true;
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid()) {
+    DLOG(ERROR) << "Invalid SharedMemoryHandle.";
+    return false;
+  }
+
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max())) {
+    DLOG(ERROR) << "Bytes required exceeds the 2G limitation.";
+    return false;
+  }
+
+  if (memory_) {
+    DLOG(ERROR) << "The SharedMemory has been mapped already.";
+    return false;
+  }
+
+  if (external_section_ && !IsSectionSafeToMap(shm_.GetHandle())) {
+    DLOG(ERROR) << "SharedMemoryHandle is not safe to be mapped.";
+    return false;
+  }
+
+  // Try to map the shared memory. On the first failure, release any reserved
+  // address space for a single retry.
+  for (int i = 0; i < 2; ++i) {
+    memory_ = MapViewOfFile(
+        shm_.GetHandle(),
+        read_only_ ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE,
+        static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), bytes);
+    if (memory_)
+      break;
+    ReleaseReservation();
+  }
+  if (!memory_) {
+    DPLOG(ERROR) << "Failed executing MapViewOfFile";
+    return false;
+  }
+
+  DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+                    (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+  mapped_size_ = GetMemorySectionSize(memory_);
+  mapped_id_ = shm_.GetGUID();
+  SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
+  return true;
+}
+
+bool SharedMemory::Unmap() {
+  if (!memory_)
+    return false;
+
+  SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
+  UnmapViewOfFile(memory_);
+  memory_ = nullptr;
+  mapped_id_ = UnguessableToken();
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
+  HANDLE result;
+  ProcessHandle process = GetCurrentProcess();
+  if (!::DuplicateHandle(process, shm_.GetHandle(), process, &result,
+                         FILE_MAP_READ | SECTION_QUERY, FALSE, 0)) {
+    return SharedMemoryHandle();
+  }
+  SharedMemoryHandle handle =
+      SharedMemoryHandle(result, shm_.GetSize(), shm_.GetGUID());
+  handle.SetOwnershipPassesToIPC(true);
+  return handle;
+}
+
+void SharedMemory::Close() {
+  if (shm_.IsValid()) {
+    shm_.Close();
+    shm_ = SharedMemoryHandle();
+  }
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return shm_;
+}
+
+SharedMemoryHandle SharedMemory::TakeHandle() {
+  SharedMemoryHandle handle(shm_);
+  handle.SetOwnershipPassesToIPC(true);
+  Unmap();
+  shm_ = SharedMemoryHandle();
+  return handle;
+}
+
+}  // namespace base
diff --git a/src/base/memory/shared_memory_win_unittest.cc b/src/base/memory/shared_memory_win_unittest.cc
new file mode 100644
index 0000000..1763013
--- /dev/null
+++ b/src/base/memory/shared_memory_win_unittest.cc
@@ -0,0 +1,226 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <sddl.h>
+
+#include <memory>
+
+#include "base/command_line.h"
+#include "base/memory/free_deleter.h"
+#include "base/memory/shared_memory.h"
+#include "base/process/process.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/strings/sys_string_conversions.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/win_util.h"
+#include "starboard/memory.h"
+#include "starboard/types.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+namespace {
+const char* kHandleSwitchName = "shared_memory_win_test_switch";
+
+// Creates a process token with a low integrity SID.
+win::ScopedHandle CreateLowIntegritySID() {
+  HANDLE process_token_raw = nullptr;
+  BOOL success = ::OpenProcessToken(GetCurrentProcess(),
+                                    TOKEN_DUPLICATE | TOKEN_ADJUST_DEFAULT |
+                                        TOKEN_QUERY | TOKEN_ASSIGN_PRIMARY,
+                                    &process_token_raw);
+  if (!success)
+    return base::win::ScopedHandle();
+  win::ScopedHandle process_token(process_token_raw);
+
+  HANDLE lowered_process_token_raw = nullptr;
+  success =
+      ::DuplicateTokenEx(process_token.Get(), 0, NULL, SecurityImpersonation,
+                         TokenPrimary, &lowered_process_token_raw);
+  if (!success)
+    return base::win::ScopedHandle();
+  win::ScopedHandle lowered_process_token(lowered_process_token_raw);
+
+  // Low integrity SID
+  WCHAR integrity_sid_string[20] = L"S-1-16-4096";
+  PSID integrity_sid = nullptr;
+  success = ::ConvertStringSidToSid(integrity_sid_string, &integrity_sid);
+  if (!success)
+    return base::win::ScopedHandle();
+
+  TOKEN_MANDATORY_LABEL TIL = {};
+  TIL.Label.Attributes = SE_GROUP_INTEGRITY;
+  TIL.Label.Sid = integrity_sid;
+  success = ::SetTokenInformation(
+      lowered_process_token.Get(), TokenIntegrityLevel, &TIL,
+      sizeof(TOKEN_MANDATORY_LABEL) + GetLengthSid(integrity_sid));
+  if (!success)
+    return base::win::ScopedHandle();
+  return lowered_process_token;
+}
+
+// Reads a HANDLE from the pipe as a raw int, least significant digit first.
+win::ScopedHandle ReadHandleFromPipe(HANDLE pipe) {
+  // Read from parent pipe.
+  const size_t buf_size = 1000;
+  char buffer[buf_size];
+  SbMemorySet(buffer, 0, buf_size);
+  DWORD bytes_read;
+  BOOL success = ReadFile(pipe, buffer, buf_size, &bytes_read, NULL);
+
+  if (!success || bytes_read == 0) {
+    LOG(ERROR) << "Failed to read handle from pipe.";
+    return win::ScopedHandle();
+  }
+
+  int handle_as_int = 0;
+  int power_of_ten = 1;
+  for (unsigned int i = 0; i < bytes_read; ++i) {
+    handle_as_int += buffer[i] * power_of_ten;
+    power_of_ten *= 10;
+  }
+
+  return win::ScopedHandle(reinterpret_cast<HANDLE>(handle_as_int));
+}
+
+// Writes a HANDLE to a pipe as a raw int, least significant digit first.
+void WriteHandleToPipe(HANDLE pipe, HANDLE handle) {
+  uint32_t handle_as_int = base::win::HandleToUint32(handle);
+
+  std::unique_ptr<char, base::FreeDeleter> buffer(
+      static_cast<char*>(SbMemoryAllocate(1000)));
+  size_t index = 0;
+  while (handle_as_int > 0) {
+    buffer.get()[index] = handle_as_int % 10;
+    handle_as_int /= 10;
+    ++index;
+  }
+
+  ::ConnectNamedPipe(pipe, nullptr);
+  DWORD written;
+  ASSERT_TRUE(::WriteFile(pipe, buffer.get(), index, &written, NULL));
+}
+
+// Creates a communication pipe with the given name.
+win::ScopedHandle CreateCommunicationPipe(const std::wstring& name) {
+  return win::ScopedHandle(CreateNamedPipe(name.c_str(),  // pipe name
+                                           PIPE_ACCESS_DUPLEX, PIPE_WAIT, 255,
+                                           1000, 1000, 0, NULL));
+}
+
+// Generates a random name for a communication pipe.
+std::wstring CreateCommunicationPipeName() {
+  uint64_t rand_values[4];
+  RandBytes(&rand_values, sizeof(rand_values));
+  std::wstring child_pipe_name = StringPrintf(
+      L"\\\\.\\pipe\\SharedMemoryWinTest_%016llx%016llx%016llx%016llx",
+      rand_values[0], rand_values[1], rand_values[2], rand_values[3]);
+  return child_pipe_name;
+}
+
+class SharedMemoryWinTest : public base::MultiProcessTest {
+ protected:
+  CommandLine MakeCmdLine(const std::string& procname) override {
+    CommandLine line = base::MultiProcessTest::MakeCmdLine(procname);
+    line.AppendSwitchASCII(kHandleSwitchName, communication_pipe_name_);
+    return line;
+  }
+
+  std::string communication_pipe_name_;
+};
+
+MULTIPROCESS_TEST_MAIN(LowerPermissions) {
+  std::string handle_name =
+      CommandLine::ForCurrentProcess()->GetSwitchValueASCII(kHandleSwitchName);
+  std::wstring handle_name16 = SysUTF8ToWide(handle_name);
+  win::ScopedHandle parent_pipe(
+      ::CreateFile(handle_name16.c_str(),  // pipe name
+                   GENERIC_READ,
+                   0,              // no sharing
+                   NULL,           // default security attributes
+                   OPEN_EXISTING,  // opens existing pipe
+                   0,              // default attributes
+                   NULL));         // no template file
+  if (parent_pipe.Get() == INVALID_HANDLE_VALUE) {
+    LOG(ERROR) << "Failed to open communication pipe.";
+    return 1;
+  }
+
+  win::ScopedHandle received_handle = ReadHandleFromPipe(parent_pipe.Get());
+  if (!received_handle.Get()) {
+    LOG(ERROR) << "Failed to read handle from pipe.";
+    return 1;
+  }
+
+  // Attempting to add the WRITE_DAC permission should fail.
+  HANDLE duped_handle;
+  BOOL success = ::DuplicateHandle(GetCurrentProcess(), received_handle.Get(),
+                                   GetCurrentProcess(), &duped_handle,
+                                   FILE_MAP_READ | WRITE_DAC, FALSE, 0);
+  if (success) {
+    LOG(ERROR) << "Should not have been able to add WRITE_DAC permission.";
+    return 1;
+  }
+
+  // Attempting to add the FILE_MAP_WRITE permission should fail.
+  success = ::DuplicateHandle(GetCurrentProcess(), received_handle.Get(),
+                              GetCurrentProcess(), &duped_handle,
+                              FILE_MAP_READ | FILE_MAP_WRITE, FALSE, 0);
+  if (success) {
+    LOG(ERROR) << "Should not have been able to add FILE_MAP_WRITE permission.";
+    return 1;
+  }
+
+  // Attempting to duplicate the HANDLE with the same permissions should
+  // succeed.
+  success = ::DuplicateHandle(GetCurrentProcess(), received_handle.Get(),
+                              GetCurrentProcess(), &duped_handle, FILE_MAP_READ,
+                              FALSE, 0);
+  if (!success) {
+    LOG(ERROR) << "Failed to duplicate handle.";
+    return 4;
+  }
+  ::CloseHandle(duped_handle);
+  return 0;
+}
+
+TEST_F(SharedMemoryWinTest, LowerPermissions) {
+  std::wstring communication_pipe_name = CreateCommunicationPipeName();
+  communication_pipe_name_ = SysWideToUTF8(communication_pipe_name);
+
+  win::ScopedHandle communication_pipe =
+      CreateCommunicationPipe(communication_pipe_name);
+  ASSERT_TRUE(communication_pipe.Get());
+
+  win::ScopedHandle lowered_process_token = CreateLowIntegritySID();
+  ASSERT_TRUE(lowered_process_token.Get());
+
+  base::LaunchOptions options;
+  options.as_user = lowered_process_token.Get();
+  base::Process process = SpawnChildWithOptions("LowerPermissions", options);
+  ASSERT_TRUE(process.IsValid());
+
+  SharedMemory memory;
+  memory.CreateAndMapAnonymous(1001);
+
+  // Duplicate into child process, giving only FILE_MAP_READ permissions.
+  HANDLE raw_handle = nullptr;
+  ::DuplicateHandle(::GetCurrentProcess(), memory.handle().GetHandle(),
+                    process.Handle(), &raw_handle,
+                    FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
+  ASSERT_TRUE(raw_handle);
+
+  WriteHandleToPipe(communication_pipe.Get(), raw_handle);
+
+  int exit_code;
+  EXPECT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
+                                             &exit_code));
+  EXPECT_EQ(0, exit_code);
+}
+
+}  // namespace
+}  // namespace base
diff --git a/src/base/memory/singleton.cc b/src/base/memory/singleton.cc
deleted file mode 100644
index ee5e58d..0000000
--- a/src/base/memory/singleton.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/singleton.h"
-#include "base/threading/platform_thread.h"
-
-namespace base {
-namespace internal {
-
-subtle::AtomicWord WaitForInstance(subtle::AtomicWord* instance) {
-  // Handle the race. Another thread beat us and either:
-  // - Has the object in BeingCreated state
-  // - Already has the object created...
-  // We know value != NULL.  It could be kBeingCreatedMarker, or a valid ptr.
-  // Unless your constructor can be very time consuming, it is very unlikely
-  // to hit this race.  When it does, we just spin and yield the thread until
-  // the object has been created.
-  subtle::AtomicWord value;
-  while (true) {
-    value = subtle::NoBarrier_Load(instance);
-    if (value != kBeingCreatedMarker)
-      break;
-    PlatformThread::YieldCurrentThread();
-  }
-  return value;
-}
-
-}  // namespace internal
-}  // namespace base
-
diff --git a/src/base/memory/singleton.h b/src/base/memory/singleton.h
index 0d4fc89..880ef0a 100644
--- a/src/base/memory/singleton.h
+++ b/src/base/memory/singleton.h
@@ -1,8 +1,16 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
-
-// PLEASE READ: Do you really need a singleton?
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// PLEASE READ: Do you really need a singleton? If possible, use a
+// function-local static of type base::NoDestructor<T> instead:
+//
+// Factory& Factory::GetInstance() {
+//   static base::NoDestructor<Factory> instance;
+//   return *instance;
+// }
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 //
 // Singletons make it hard to determine the lifetime of an object, which can
 // lead to buggy code and spurious crashes.
@@ -22,25 +30,12 @@
 #include "base/at_exit.h"
 #include "base/atomicops.h"
 #include "base/base_export.h"
-#include "base/memory/aligned_memory.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/lazy_instance_helpers.h"
+#include "base/logging.h"
+#include "base/macros.h"
 #include "base/threading/thread_restrictions.h"
 
 namespace base {
-namespace internal {
-
-// Our AtomicWord doubles as a spinlock, where a value of
-// kBeingCreatedMarker means the spinlock is being held for creation.
-static const subtle::AtomicWord kBeingCreatedMarker = 1;
-
-// We pull out some of the functionality into a non-templated function, so that
-// we can implement the more complicated pieces out of line in the .cc file.
-BASE_EXPORT subtle::AtomicWord WaitForInstance(subtle::AtomicWord* instance);
-
-}  // namespace internal
-}  // namespace base
-
-// TODO(joth): Move more of this file into namespace base
 
 // Default traits for Singleton<Type>. Calls operator new and operator delete on
 // the object. Registers automatic deletion at process exit.
@@ -63,10 +58,12 @@
   // exit. See below for the required call that makes this happen.
   static const bool kRegisterAtExit = true;
 
+#if DCHECK_IS_ON()
   // Set to false to disallow access on a non-joinable thread.  This is
   // different from kRegisterAtExit because StaticMemorySingletonTraits allows
   // access on non-joinable threads, and gracefully handles this.
   static const bool kAllowedToAccessOnNonjoinableThread = false;
+#endif
 };
 
 
@@ -76,14 +73,15 @@
 template<typename Type>
 struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
   static const bool kRegisterAtExit = false;
+#if DCHECK_IS_ON()
   static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
 };
 
-
 // Alternate traits for use with the Singleton<Type>.  Allocates memory
 // for the singleton instance from a static buffer.  The singleton will
 // be cleaned up at exit, but can't be revived after destruction unless
-// the Resurrect() method is called.
+// the ResurrectForTesting() method is called.
 //
 // This is useful for a certain category of things, notably logging and
 // tracing, where the singleton instance is of a type carefully constructed to
@@ -103,39 +101,38 @@
 // process once you've unloaded.
 template <typename Type>
 struct StaticMemorySingletonTraits {
-  // WARNING: User has to deal with get() in the singleton class
-  // this is traits for returning NULL.
+  // WARNING: User has to support a New() which returns null.
   static Type* New() {
-    // Only constructs once and returns pointer; otherwise returns NULL.
-    if (base::subtle::NoBarrier_AtomicExchange(&dead_, 1))
-      return NULL;
+    // Only constructs once and returns pointer; otherwise returns null.
+    if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
+      return nullptr;
 
-    return new(buffer_.void_data()) Type();
+    return new (buffer_) Type();
   }
 
   static void Delete(Type* p) {
-    if (p != NULL)
+    if (p)
       p->Type::~Type();
   }
 
   static const bool kRegisterAtExit = true;
-  static const bool kAllowedToAccessOnNonjoinableThread = true;
 
-  // Exposed for unittesting.
-  static void Resurrect() {
-    base::subtle::NoBarrier_Store(&dead_, 0);
-  }
+#if DCHECK_IS_ON()
+  static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
+
+  static void ResurrectForTesting() { subtle::NoBarrier_Store(&dead_, 0); }
 
  private:
-  static base::AlignedMemory<sizeof(Type), ALIGNOF(Type)> buffer_;
+  alignas(Type) static char buffer_[sizeof(Type)];
   // Signal the object was already deleted, so it is not revived.
-  static base::subtle::Atomic32 dead_;
+  static subtle::Atomic32 dead_;
 };
 
-template <typename Type> base::AlignedMemory<sizeof(Type), ALIGNOF(Type)>
-    StaticMemorySingletonTraits<Type>::buffer_;
-template <typename Type> base::subtle::Atomic32
-    StaticMemorySingletonTraits<Type>::dead_ = 0;
+template <typename Type>
+alignas(Type) char StaticMemorySingletonTraits<Type>::buffer_[sizeof(Type)];
+template <typename Type>
+subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
 
 // The Singleton<Type, Traits, DifferentiatingType> class manages a single
 // instance of Type which will be created on first use and will be destroyed at
@@ -149,14 +146,17 @@
 // Example usage:
 //
 // In your header:
-//   template <typename T> struct DefaultSingletonTraits;
+//   namespace base {
+//   template <typename T>
+//   struct DefaultSingletonTraits;
+//   }
 //   class FooClass {
 //    public:
 //     static FooClass* GetInstance();  <-- See comment below on this.
 //     void Bar() { ... }
 //    private:
 //     FooClass() { ... }
-//     friend struct DefaultSingletonTraits<FooClass>;
+//     friend struct base::DefaultSingletonTraits<FooClass>;
 //
 //     DISALLOW_COPY_AND_ASSIGN(FooClass);
 //   };
@@ -164,7 +164,14 @@
 // In your source file:
 //  #include "base/memory/singleton.h"
 //  FooClass* FooClass::GetInstance() {
-//    return Singleton<FooClass>::get();
+//    return base::Singleton<FooClass>::get();
+//  }
+//
+// Or for leaky singletons:
+//  #include "base/memory/singleton.h"
+//  FooClass* FooClass::GetInstance() {
+//    return base::Singleton<
+//        FooClass, base::LeakySingletonTraits<FooClass>>::get();
 //  }
 //
 // And to call methods on FooClass:
@@ -187,7 +194,7 @@
 //   RAE = kRegisterAtExit
 //
 // On every platform, if Traits::RAE is true, the singleton will be destroyed at
-// process exit. More precisely it uses base::AtExitManager which requires an
+// process exit. More precisely it uses AtExitManager which requires an
 // object of this type to be instantiated. AtExitManager mimics the semantics
 // of atexit() such as LIFO order but under Windows is safer to call. For more
 // information see at_exit.h.
@@ -206,6 +213,7 @@
 // (b) Your factory function must never throw an exception. This class is not
 //     exception-safe.
 //
+
 template <typename Type,
           typename Traits = DefaultSingletonTraits<Type>,
           typename DifferentiatingType = Type>
@@ -215,71 +223,40 @@
   // method and call Singleton::get() from within that.
   friend Type* Type::GetInstance();
 
-  // Allow TraceLog tests to test tracing after OnExit.
-  friend class DeleteTraceLogForTesting;
-
   // This class is safe to be constructed and copy-constructed since it has no
   // member.
 
   // Return a pointer to the one true instance of the class.
   static Type* get() {
-#ifndef NDEBUG
-    // Avoid making TLS lookup on release builds.
+#if DCHECK_IS_ON()
     if (!Traits::kAllowedToAccessOnNonjoinableThread)
-      base::ThreadRestrictions::AssertSingletonAllowed();
+      ThreadRestrictions::AssertSingletonAllowed();
 #endif
 
-    base::subtle::AtomicWord value = base::subtle::NoBarrier_Load(&instance_);
-    if (value != 0 && value != base::internal::kBeingCreatedMarker) {
-      // See the corresponding HAPPENS_BEFORE below.
-      ANNOTATE_HAPPENS_AFTER(&instance_);
-      return reinterpret_cast<Type*>(value);
-    }
-
-    // Object isn't created yet, maybe we will get to create it, let's try...
-    if (base::subtle::Acquire_CompareAndSwap(
-          &instance_, 0, base::internal::kBeingCreatedMarker) == 0) {
-      // instance_ was NULL and is now kBeingCreatedMarker.  Only one thread
-      // will ever get here.  Threads might be spinning on us, and they will
-      // stop right after we do this store.
-      Type* newval = Traits::New();
-
-      // This annotation helps race detectors recognize correct lock-less
-      // synchronization between different threads calling get().
-      // See the corresponding HAPPENS_AFTER below and above.
-      ANNOTATE_HAPPENS_BEFORE(&instance_);
-      base::subtle::Release_Store(
-          &instance_, reinterpret_cast<base::subtle::AtomicWord>(newval));
-
-      if (newval != NULL && Traits::kRegisterAtExit)
-        base::AtExitManager::RegisterCallback(OnExit, NULL);
-
-      return newval;
-    }
-
-    // We hit a race. Wait for the other thread to complete it.
-    value = base::internal::WaitForInstance(&instance_);
-
-    // See the corresponding HAPPENS_BEFORE above.
-    ANNOTATE_HAPPENS_AFTER(&instance_);
-    return reinterpret_cast<Type*>(value);
+    return subtle::GetOrCreateLazyPointer(
+        &instance_, &CreatorFunc, nullptr,
+        Traits::kRegisterAtExit ? OnExit : nullptr, nullptr);
   }
 
+  // Internal method used as an adaptor for GetOrCreateLazyPointer(). Do not use
+  // outside of that use case.
+  static Type* CreatorFunc(void* /* creator_arg*/) { return Traits::New(); }
+
   // Adapter function for use with AtExit().  This should be called single
   // threaded, so don't use atomic operations.
   // Calling OnExit while singleton is in use by other threads is a mistake.
   static void OnExit(void* /*unused*/) {
     // AtExit should only ever be register after the singleton instance was
     // created.  We should only ever get here with a valid instance_ pointer.
-    Traits::Delete(
-        reinterpret_cast<Type*>(base::subtle::NoBarrier_Load(&instance_)));
+    Traits::Delete(reinterpret_cast<Type*>(subtle::NoBarrier_Load(&instance_)));
     instance_ = 0;
   }
-  static base::subtle::AtomicWord instance_;
+  static subtle::AtomicWord instance_;
 };
 
 template <typename Type, typename Traits, typename DifferentiatingType>
-base::subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::
-    instance_ = 0;
+subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::instance_ = 0;
+
+}  // namespace base
 
 #endif  // BASE_MEMORY_SINGLETON_H_
diff --git a/src/base/memory/singleton_objc.h b/src/base/memory/singleton_objc.h
deleted file mode 100644
index 6df3f77..0000000
--- a/src/base/memory/singleton_objc.h
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Support for using the Singleton<T> pattern with Objective-C objects.  A
-// SingletonObjC is the same as a Singleton, except the default traits are
-// appropriate for Objective-C objects.  A typical Objective-C object of type
-// NSExampleType can be maintained as a singleton and accessed with:
-//
-//   NSExampleType* exampleSingleton = SingletonObjC<NSExampleType>::get();
-//
-// The first time this is used, it will create exampleSingleton as the result
-// of [[NSExampleType alloc] init].  Subsequent calls will return the same
-// NSExampleType* object.  The object will be released by calling
-// -[NSExampleType release] when Singleton's atexit routines run
-// (see singleton.h).
-//
-// For Objective-C objects initialized through means other than the
-// no-parameter -init selector, DefaultSingletonObjCTraits may be extended
-// as needed:
-//
-//   struct FooSingletonTraits : public DefaultSingletonObjCTraits<Foo> {
-//     static Foo* New() {
-//       return [[Foo alloc] initWithName:@"selecty"];
-//     }
-//   };
-//   ...
-//   Foo* widgetSingleton = SingletonObjC<Foo, FooSingletonTraits>::get();
-
-#ifndef BASE_MEMORY_SINGLETON_OBJC_H_
-#define BASE_MEMORY_SINGLETON_OBJC_H_
-
-#import <Foundation/Foundation.h>
-#include "base/memory/singleton.h"
-
-// Singleton traits usable to manage traditional Objective-C objects, which
-// are instantiated by sending |alloc| and |init| messages, and are deallocated
-// in a memory-managed environment when their retain counts drop to 0 by
-// sending |release| messages.
-template<typename Type>
-struct DefaultSingletonObjCTraits : public DefaultSingletonTraits<Type> {
-  static Type* New() {
-    return [[Type alloc] init];
-  }
-
-  static void Delete(Type* object) {
-    [object release];
-  }
-};
-
-// Exactly like Singleton, but without the DefaultSingletonObjCTraits as the
-// default trait class.  This makes it straightforward for Objective-C++ code
-// to hold Objective-C objects as singletons.
-template<typename Type,
-         typename Traits = DefaultSingletonObjCTraits<Type>,
-         typename DifferentiatingType = Type>
-class SingletonObjC : public Singleton<Type, Traits, DifferentiatingType> {
-};
-
-#endif  // BASE_MEMORY_SINGLETON_OBJC_H_
diff --git a/src/base/memory/singleton_unittest.cc b/src/base/memory/singleton_unittest.cc
index 78e9c19..4d3f407 100644
--- a/src/base/memory/singleton_unittest.cc
+++ b/src/base/memory/singleton_unittest.cc
@@ -2,18 +2,27 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "base/at_exit.h"
-#include "base/file_util.h"
 #include "base/memory/singleton.h"
-#include "base/path_service.h"
+#include "base/at_exit.h"
+#include "starboard/types.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
+namespace base {
 namespace {
 
-COMPILE_ASSERT(DefaultSingletonTraits<int>::kRegisterAtExit == true, a);
+static_assert(DefaultSingletonTraits<int>::kRegisterAtExit == true,
+              "object must be deleted on process exit");
 
 typedef void (*CallbackFunc)();
 
+template <size_t alignment>
+class AlignedData {
+ public:
+  AlignedData() = default;
+  ~AlignedData() = default;
+  alignas(alignment) char data_[alignment];
+};
+
 class IntSingleton {
  public:
   static IntSingleton* GetInstance() {
@@ -61,7 +70,7 @@
 
 class CallbackSingleton {
  public:
-  CallbackSingleton() : callback_(NULL) { }
+  CallbackSingleton() : callback_(nullptr) {}
   CallbackFunc callback_;
 };
 
@@ -113,11 +122,11 @@
 template <class Type>
 class AlignedTestSingleton {
  public:
-  AlignedTestSingleton() {}
-  ~AlignedTestSingleton() {}
+  AlignedTestSingleton() = default;
+  ~AlignedTestSingleton() = default;
   static AlignedTestSingleton* GetInstance() {
     return Singleton<AlignedTestSingleton,
-        StaticMemorySingletonTraits<AlignedTestSingleton> >::get();
+                     StaticMemorySingletonTraits<AlignedTestSingleton>>::get();
   }
 
   Type type_;
@@ -149,13 +158,12 @@
   return &CallbackSingletonWithStaticTrait::GetInstance()->callback_;
 }
 
-}  // namespace
 
 class SingletonTest : public testing::Test {
  public:
-  SingletonTest() {}
+  SingletonTest() = default;
 
-  virtual void SetUp() override {
+  void SetUp() override {
     non_leak_called_ = false;
     leaky_called_ = false;
     static_called_ = false;
@@ -209,7 +217,7 @@
   CallbackFunc* static_singleton;
 
   {
-    base::ShadowingAtExitManager sem;
+    ShadowingAtExitManager sem;
     {
       singleton_int = SingletonInt();
     }
@@ -240,10 +248,10 @@
   DeleteLeakySingleton();
 
   // The static singleton can't be acquired post-atexit.
-  EXPECT_EQ(NULL, GetStaticSingleton());
+  EXPECT_EQ(nullptr, GetStaticSingleton());
 
   {
-    base::ShadowingAtExitManager sem;
+    ShadowingAtExitManager sem;
     // Verifiy that the variables were reset.
     {
       singleton_int = SingletonInt();
@@ -256,7 +264,7 @@
     {
       // Resurrect the static singleton, and assert that it
       // still points to the same (static) memory.
-      CallbackSingletonWithStaticTrait::Trait::Resurrect();
+      CallbackSingletonWithStaticTrait::Trait::ResurrectForTesting();
       EXPECT_EQ(GetStaticSingleton(), static_singleton);
     }
   }
@@ -268,22 +276,30 @@
     EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
 
 TEST_F(SingletonTest, Alignment) {
-  using base::AlignedMemory;
-
   // Create some static singletons with increasing sizes and alignment
   // requirements. By ordering this way, the linker will need to do some work to
   // ensure proper alignment of the static data.
-  AlignedTestSingleton<int32>* align4 =
-      AlignedTestSingleton<int32>::GetInstance();
-  AlignedTestSingleton<AlignedMemory<32, 32> >* align32 =
-      AlignedTestSingleton<AlignedMemory<32, 32> >::GetInstance();
-  AlignedTestSingleton<AlignedMemory<128, 128> >* align128 =
-      AlignedTestSingleton<AlignedMemory<128, 128> >::GetInstance();
-  AlignedTestSingleton<AlignedMemory<4096, 4096> >* align4096 =
-      AlignedTestSingleton<AlignedMemory<4096, 4096> >::GetInstance();
+  AlignedTestSingleton<int32_t>* align4 =
+      AlignedTestSingleton<int32_t>::GetInstance();
+  AlignedTestSingleton<AlignedData<32>>* align32 =
+      AlignedTestSingleton<AlignedData<32>>::GetInstance();
+#if !defined(STARBOARD)
+  AlignedTestSingleton<AlignedData<128>>* align128 =
+      AlignedTestSingleton<AlignedData<128>>::GetInstance();
+  AlignedTestSingleton<AlignedData<4096>>* align4096 =
+      AlignedTestSingleton<AlignedData<4096>>::GetInstance();
+#endif
 
   EXPECT_ALIGNED(align4, 4);
   EXPECT_ALIGNED(align32, 32);
+// At least on Raspi, alignas with big alignment numbers does not work and
+// that is compliant with C++ standard as the alignment is larger than
+// std::max_align_t.
+#if !defined(STARBOARD)
   EXPECT_ALIGNED(align128, 128);
   EXPECT_ALIGNED(align4096, 4096);
+#endif
 }
+
+}  // namespace
+}  // namespace base
diff --git a/src/base/memory/unsafe_shared_memory_region.cc b/src/base/memory/unsafe_shared_memory_region.cc
new file mode 100644
index 0000000..a28ef9c
--- /dev/null
+++ b/src/base/memory/unsafe_shared_memory_region.cc
@@ -0,0 +1,90 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/unsafe_shared_memory_region.h"
+
+#include <utility>
+
+#include "base/memory/shared_memory.h"
+
+namespace base {
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Create(size_t size) {
+  subtle::PlatformSharedMemoryRegion handle =
+      subtle::PlatformSharedMemoryRegion::CreateUnsafe(size);
+
+  return UnsafeSharedMemoryRegion(std::move(handle));
+}
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::CreateFromHandle(
+    const SharedMemoryHandle& handle) {
+  if (!handle.IsValid())
+    return UnsafeSharedMemoryRegion();
+  auto platform_region =
+      subtle::PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
+          handle, subtle::PlatformSharedMemoryRegion::Mode::kUnsafe);
+  if (!platform_region.IsValid()) {
+    return UnsafeSharedMemoryRegion();
+  }
+  return Deserialize(std::move(platform_region));
+}
+
+// static
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Deserialize(
+    subtle::PlatformSharedMemoryRegion handle) {
+  return UnsafeSharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+UnsafeSharedMemoryRegion::TakeHandleForSerialization(
+    UnsafeSharedMemoryRegion region) {
+  return std::move(region.handle_);
+}
+
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion() = default;
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
+    UnsafeSharedMemoryRegion&& region) = default;
+UnsafeSharedMemoryRegion& UnsafeSharedMemoryRegion::operator=(
+    UnsafeSharedMemoryRegion&& region) = default;
+UnsafeSharedMemoryRegion::~UnsafeSharedMemoryRegion() = default;
+
+UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Duplicate() const {
+  return UnsafeSharedMemoryRegion(handle_.Duplicate());
+}
+
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::Map() const {
+  return MapAt(0, handle_.GetSize());
+}
+
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::MapAt(off_t offset,
+                                                            size_t size) const {
+  if (!IsValid())
+    return {};
+
+  void* memory = nullptr;
+  size_t mapped_size = 0;
+  if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+    return {};
+
+  return WritableSharedMemoryMapping(memory, size, mapped_size,
+                                     handle_.GetGUID());
+}
+
+bool UnsafeSharedMemoryRegion::IsValid() const {
+  return handle_.IsValid();
+}
+
+UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
+    subtle::PlatformSharedMemoryRegion handle)
+    : handle_(std::move(handle)) {
+  if (handle_.IsValid()) {
+    CHECK_EQ(handle_.GetMode(),
+             subtle::PlatformSharedMemoryRegion::Mode::kUnsafe);
+  }
+}
+
+}  // namespace base
diff --git a/src/base/memory/unsafe_shared_memory_region.h b/src/base/memory/unsafe_shared_memory_region.h
new file mode 100644
index 0000000..fae6032
--- /dev/null
+++ b/src/base/memory/unsafe_shared_memory_region.h
@@ -0,0 +1,132 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
+
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/memory/shared_memory_mapping.h"
+
+namespace base {
+
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// writable. These mappings remain valid even after the region handle is moved
+// or destroyed.
+//
+// NOTE: UnsafeSharedMemoryRegion cannot be converted to a read-only region. Use
+// with caution as the region will be writable to any process with a handle to
+// the region.
+//
+// Use this if and only if the following is true:
+// - You do not need to share the region as read-only, and,
+// - You need to have several instances of the region simultaneously, possibly
+//   in different processes, that can produce writable mappings.
+
+class BASE_EXPORT UnsafeSharedMemoryRegion {
+ public:
+  using MappingType = WritableSharedMemoryMapping;
+  // Creates a new UnsafeSharedMemoryRegion instance of a given size that can be
+  // used for mapping writable shared memory into the virtual address space.
+  //
+  // This call will fail if the process does not have sufficient permissions to
+  // create a shared memory region itself. See
+  // mojo::CreateUnsafeSharedMemoryRegion in
+  // mojo/public/cpp/base/shared_memory_utils.h for creating a shared memory
+  // region from a an unprivileged process where a broker must be used.
+  static UnsafeSharedMemoryRegion Create(size_t size);
+
+  // Creates a new UnsafeSharedMemoryRegion from a SharedMemoryHandle. This
+  // consumes the handle, which should not be used again.
+  // TODO(crbug.com/795291): this should only be used while transitioning from
+  // the old shared memory API, and should be removed when done.
+  static UnsafeSharedMemoryRegion CreateFromHandle(
+      const base::SharedMemoryHandle& handle);
+
+  // Returns an UnsafeSharedMemoryRegion built from a platform-specific handle
+  // that was taken from another UnsafeSharedMemoryRegion instance. Returns an
+  // invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
+  // isn't unsafe.
+  // This should be used only by the code passing a handle across
+  // process boundaries.
+  static UnsafeSharedMemoryRegion Deserialize(
+      subtle::PlatformSharedMemoryRegion handle);
+
+  // Extracts a platform handle from the region. Ownership is transferred to the
+  // returned region object.
+  // This should be used only for sending the handle from the current
+  // process to another.
+  static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+      UnsafeSharedMemoryRegion region);
+
+  // Default constructor initializes an invalid instance.
+  UnsafeSharedMemoryRegion();
+
+  // Move operations are allowed.
+  UnsafeSharedMemoryRegion(UnsafeSharedMemoryRegion&&);
+  UnsafeSharedMemoryRegion& operator=(UnsafeSharedMemoryRegion&&);
+
+  // Destructor closes shared memory region if valid.
+  // All created mappings will remain valid.
+  ~UnsafeSharedMemoryRegion();
+
+  // Duplicates the underlying platform handle and creates a new
+  // UnsafeSharedMemoryRegion instance that owns the newly created handle.
+  // Returns a valid UnsafeSharedMemoryRegion on success, invalid otherwise.
+  // The current region instance remains valid in any case.
+  UnsafeSharedMemoryRegion Duplicate() const;
+
+  // Maps the shared memory region into the caller's address space with write
+  // access. The mapped address is guaranteed to have an alignment of
+  // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+  // Returns a valid WritableSharedMemoryMapping instance on success, invalid
+  // otherwise.
+  WritableSharedMemoryMapping Map() const;
+
+  // Same as above, but maps only |size| bytes of the shared memory region
+  // starting with the given |offset|. |offset| must be aligned to value of
+  // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+  // requested bytes are out of the region limits.
+  WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+  // Whether the underlying platform handle is valid.
+  bool IsValid() const;
+
+  // Returns the maximum mapping size that can be created from this region.
+  size_t GetSize() const {
+    DCHECK(IsValid());
+    return handle_.GetSize();
+  }
+
+  // Returns 128-bit GUID of the region.
+  const UnguessableToken& GetGUID() const {
+    DCHECK(IsValid());
+    return handle_.GetGUID();
+  }
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(DiscardableSharedMemoryTest,
+                           LockShouldFailIfPlatformLockPagesFails);
+  friend class DiscardableSharedMemory;
+
+  explicit UnsafeSharedMemoryRegion(subtle::PlatformSharedMemoryRegion handle);
+
+  // Returns a platform shared memory handle. |this| remains the owner of the
+  // handle.
+  subtle::PlatformSharedMemoryRegion::PlatformHandle GetPlatformHandle() const {
+    DCHECK(IsValid());
+    return handle_.GetPlatformHandle();
+  }
+
+  subtle::PlatformSharedMemoryRegion handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(UnsafeSharedMemoryRegion);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
diff --git a/src/base/memory/weak_ptr.cc b/src/base/memory/weak_ptr.cc
index 9dec8fd..c993fcb 100644
--- a/src/base/memory/weak_ptr.cc
+++ b/src/base/memory/weak_ptr.cc
@@ -7,66 +7,89 @@
 namespace base {
 namespace internal {
 
-WeakReference::Flag::Flag() : is_valid_(true) {
+WeakReference::Flag::Flag() {
+  // Flags only become bound when checked for validity, or invalidated,
+  // so that we can check that later validity/invalidation operations on
+  // the same Flag take place on the same sequenced thread.
+  DETACH_FROM_SEQUENCE(sequence_checker_);
 }
 
 void WeakReference::Flag::Invalidate() {
   // The flag being invalidated with a single ref implies that there are no
   // weak pointers in existence. Allow deletion on other thread in this case.
-  DCHECK(thread_checker_.CalledOnValidThread() || HasOneRef());
-  is_valid_ = false;
+#if DCHECK_IS_ON()
+  DCHECK(sequence_checker_.CalledOnValidSequence() || HasOneRef())
+      << "WeakPtrs must be invalidated on the same sequenced thread.";
+#endif
+  invalidated_.Set();
 }
 
 bool WeakReference::Flag::IsValid() const {
-  DCHECK(thread_checker_.CalledOnValidThread());
-  return is_valid_;
+  DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_)
+      << "WeakPtrs must be checked on the same sequenced thread.";
+  return !invalidated_.IsSet();
 }
 
-WeakReference::Flag::~Flag() {
+bool WeakReference::Flag::MaybeValid() const {
+  return !invalidated_.IsSet();
 }
 
-WeakReference::WeakReference() {
-}
+WeakReference::Flag::~Flag() = default;
 
-WeakReference::WeakReference(const Flag* flag) : flag_(flag) {
-}
+WeakReference::WeakReference() = default;
 
-WeakReference::~WeakReference() {
-}
+WeakReference::WeakReference(const scoped_refptr<Flag>& flag) : flag_(flag) {}
 
-bool WeakReference::is_valid() const {
+WeakReference::~WeakReference() = default;
+
+WeakReference::WeakReference(WeakReference&& other) = default;
+
+WeakReference::WeakReference(const WeakReference& other) = default;
+
+bool WeakReference::IsValid() const {
   return flag_ && flag_->IsValid();
 }
 
-WeakReferenceOwner::WeakReferenceOwner() {
+bool WeakReference::MaybeValid() const {
+  return flag_ && flag_->MaybeValid();
 }
 
+WeakReferenceOwner::WeakReferenceOwner() = default;
+
 WeakReferenceOwner::~WeakReferenceOwner() {
   Invalidate();
 }
 
 WeakReference WeakReferenceOwner::GetRef() const {
-  // We also want to reattach to the current thread if all previous references
-  // have gone away.
+  // If we hold the last reference to the Flag then create a new one.
   if (!HasRefs())
     flag_ = new WeakReference::Flag();
+
   return WeakReference(flag_);
 }
 
 void WeakReferenceOwner::Invalidate() {
   if (flag_) {
     flag_->Invalidate();
-    flag_ = NULL;
+    flag_ = nullptr;
   }
 }
 
-WeakPtrBase::WeakPtrBase() {
+WeakPtrBase::WeakPtrBase() : ptr_(0) {}
+
+WeakPtrBase::~WeakPtrBase() = default;
+
+WeakPtrBase::WeakPtrBase(const WeakReference& ref, uintptr_t ptr)
+    : ref_(ref), ptr_(ptr) {
+  DCHECK(ptr_);
 }
 
-WeakPtrBase::~WeakPtrBase() {
+WeakPtrFactoryBase::WeakPtrFactoryBase(uintptr_t ptr) : ptr_(ptr) {
+  DCHECK(ptr_);
 }
 
-WeakPtrBase::WeakPtrBase(const WeakReference& ref) : ref_(ref) {
+WeakPtrFactoryBase::~WeakPtrFactoryBase() {
+  ptr_ = 0;
 }
 
 }  // namespace internal
diff --git a/src/base/memory/weak_ptr.h b/src/base/memory/weak_ptr.h
index 6dc93b7..4d9c162 100644
--- a/src/base/memory/weak_ptr.h
+++ b/src/base/memory/weak_ptr.h
@@ -2,24 +2,28 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Weak pointers help in cases where you have many objects referring back to a
-// shared object and you wish for the lifetime of the shared object to not be
-// bound to the lifetime of the referrers.  In other words, this is useful when
-// reference counting is not a good fit.
-//
-// A common alternative to weak pointers is to have the shared object hold a
-// list of all referrers, and then when the shared object is destroyed, it
-// calls a method on the referrers to tell them to drop their references.  This
-// approach also requires the referrers to tell the shared object when they get
-// destroyed so that the shared object can remove the referrer from its list of
-// referrers.  Such a solution works, but it is a bit complex.
-//
+// Weak pointers are pointers to an object that do not affect its lifetime,
+// and which may be invalidated (i.e. reset to nullptr) by the object, or its
+// owner, at any time, most commonly when the object is about to be deleted.
+
+// Weak pointers are useful when an object needs to be accessed safely by one
+// or more objects other than its owner, and those callers can cope with the
+// object vanishing and e.g. tasks posted to it being silently dropped.
+// Reference-counting such an object would complicate the ownership graph and
+// make it harder to reason about the object's lifetime.
+
 // EXAMPLE:
 //
-//  class Controller : public SupportsWeakPtr<Controller> {
+//  class Controller {
 //   public:
-//    void SpawnWorker() { Worker::StartNew(AsWeakPtr()); }
+//    Controller() : weak_factory_(this) {}
+//    void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
 //    void WorkComplete(const Result& result) { ... }
+//   private:
+//    // Member variables should appear before the WeakPtrFactory, to ensure
+//    // that any WeakPtrs to Controller are invalidated before its members
+//    // variable's destructors are executed, rendering them invalid.
+//    WeakPtrFactory<Controller> weak_factory_;
 //  };
 //
 //  class Worker {
@@ -38,44 +42,43 @@
 //    WeakPtr<Controller> controller_;
 //  };
 //
-// Given the above classes, a consumer may allocate a Controller object, call
-// SpawnWorker several times, and then destroy the Controller object before all
-// of the workers have completed.  Because the Worker class only holds a weak
-// pointer to the Controller, we don't have to worry about the Worker
-// dereferencing the Controller back pointer after the Controller has been
-// destroyed.
+// With this implementation a caller may use SpawnWorker() to dispatch multiple
+// Workers and subsequently delete the Controller, without waiting for all
+// Workers to have completed.
+
+// ------------------------- IMPORTANT: Thread-safety -------------------------
+
+// Weak pointers may be passed safely between sequences, but must always be
+// dereferenced and invalidated on the same SequencedTaskRunner otherwise
+// checking the pointer would be racey.
 //
-// ------------------------ Thread-safety notes ------------------------
-// When you get a WeakPtr (from a WeakPtrFactory or SupportsWeakPtr), if it's
-// the only one pointing to the object, the object become bound to the
-// current thread, as well as this WeakPtr and all later ones get created.
+// To ensure correct use, the first time a WeakPtr issued by a WeakPtrFactory
+// is dereferenced, the factory and its WeakPtrs become bound to the calling
+// sequence or current SequencedWorkerPool token, and cannot be dereferenced or
+// invalidated on any other task runner. Bound WeakPtrs can still be handed
+// off to other task runners, e.g. to use to post tasks back to object on the
+// bound sequence.
 //
-// You may only dereference the WeakPtr on the thread it binds to. However, it
-// is safe to destroy the WeakPtr object on another thread. Because of this,
-// querying WeakPtrFactory's HasWeakPtrs() method can be racy.
+// If all WeakPtr objects are destroyed or invalidated then the factory is
+// unbound from the SequencedTaskRunner/Thread. The WeakPtrFactory may then be
+// destroyed, or new WeakPtr objects may be used, from a different sequence.
 //
-// On the other hand, the object that supports WeakPtr (extends SupportsWeakPtr)
-// can only be deleted from the thread it binds to, until all WeakPtrs are
-// deleted.
-//
-// Calling SupportsWeakPtr::DetachFromThread() can work around the limitations
-// above and cancel the thread binding of the object and all WeakPtrs pointing
-// to it, but it's not recommended and unsafe.
-//
-// WeakPtrs may be copy-constructed or assigned on threads other than the thread
-// they are bound to. This does not change the thread binding. So these WeakPtrs
-// may only be dereferenced on the thread that the original WeakPtr was bound
-// to.
+// Thus, at least one WeakPtr object must exist and have been dereferenced on
+// the correct sequence to enforce that other WeakPtr objects will enforce they
+// are used on the desired sequence.
 
 #ifndef BASE_MEMORY_WEAK_PTR_H_
 #define BASE_MEMORY_WEAK_PTR_H_
 
-#include "base/basictypes.h"
+#include <cstddef>
+#include <type_traits>
+
 #include "base/base_export.h"
 #include "base/logging.h"
+#include "base/macros.h"
 #include "base/memory/ref_counted.h"
-#include "base/template_util.h"
-#include "base/threading/thread_checker.h"
+#include "base/sequence_checker.h"
+#include "base/synchronization/atomic_flag.h"
 
 namespace base {
 
@@ -88,31 +91,37 @@
 
 class BASE_EXPORT WeakReference {
  public:
-  // While Flag is bound to a specific thread, it may be deleted from another
-  // via base::WeakPtr::~WeakPtr().
-  class Flag : public RefCountedThreadSafe<Flag> {
+  // Although Flag is bound to a specific SequencedTaskRunner, it may be
+  // deleted from another via base::WeakPtr::~WeakPtr().
+  class BASE_EXPORT Flag : public RefCountedThreadSafe<Flag> {
    public:
     Flag();
 
     void Invalidate();
     bool IsValid() const;
 
-    void DetachFromThread() { thread_checker_.DetachFromThread(); }
+    bool MaybeValid() const;
 
    private:
     friend class base::RefCountedThreadSafe<Flag>;
 
     ~Flag();
 
-    ThreadChecker thread_checker_;
-    bool is_valid_;
+    SEQUENCE_CHECKER(sequence_checker_);
+    AtomicFlag invalidated_;
   };
 
   WeakReference();
-  explicit WeakReference(const Flag* flag);
+  explicit WeakReference(const scoped_refptr<Flag>& flag);
   ~WeakReference();
 
-  bool is_valid() const;
+  WeakReference(WeakReference&& other);
+  WeakReference(const WeakReference& other);
+  WeakReference& operator=(WeakReference&& other) = default;
+  WeakReference& operator=(const WeakReference& other) = default;
+
+  bool IsValid() const;
+  bool MaybeValid() const;
 
  private:
   scoped_refptr<const Flag> flag_;
@@ -125,17 +134,10 @@
 
   WeakReference GetRef() const;
 
-  bool HasRefs() const {
-    return flag_.get() && !flag_->HasOneRef();
-  }
+  bool HasRefs() const { return flag_ && !flag_->HasOneRef(); }
 
   void Invalidate();
 
-  // Indicates that this object will be used on another thread from now on.
-  void DetachFromThread() {
-    if (flag_) flag_->DetachFromThread();
-  }
-
  private:
   mutable scoped_refptr<WeakReference::Flag> flag_;
 };
@@ -149,10 +151,24 @@
   WeakPtrBase();
   ~WeakPtrBase();
 
+  WeakPtrBase(const WeakPtrBase& other) = default;
+  WeakPtrBase(WeakPtrBase&& other) = default;
+  WeakPtrBase& operator=(const WeakPtrBase& other) = default;
+  WeakPtrBase& operator=(WeakPtrBase&& other) = default;
+
+  void reset() {
+    ref_ = internal::WeakReference();
+    ptr_ = 0;
+  }
+
  protected:
-  explicit WeakPtrBase(const WeakReference& ref);
+  WeakPtrBase(const WeakReference& ref, uintptr_t ptr);
 
   WeakReference ref_;
+
+  // This pointer is only valid when ref_.is_valid() is true.  Otherwise, its
+  // value is undefined (as opposed to nullptr).
+  uintptr_t ptr_;
 };
 
 // This class provides a common implementation of common functions that would
@@ -164,13 +180,14 @@
   // conversion will only compile if there is exists a Base which inherits
   // from SupportsWeakPtr<Base>. See base::AsWeakPtr() below for a helper
   // function that makes calling this easier.
+  //
+  // Precondition: t != nullptr
   template<typename Derived>
   static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
-    typedef
-        is_convertible<Derived, internal::SupportsWeakPtrBase&> convertible;
-    COMPILE_ASSERT(convertible::value,
-                   AsWeakPtr_argument_inherits_from_SupportsWeakPtr);
-    return AsWeakPtrImpl<Derived>(t, *t);
+    static_assert(
+        std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
+        "AsWeakPtr argument must inherit from SupportsWeakPtr");
+    return AsWeakPtrImpl<Derived>(t);
   }
 
  private:
@@ -178,10 +195,10 @@
   // which is an instance of SupportsWeakPtr<Base>. We can then safely
   // static_cast the Base* to a Derived*.
   template <typename Derived, typename Base>
-  static WeakPtr<Derived> AsWeakPtrImpl(
-      Derived* t, const SupportsWeakPtr<Base>&) {
-    WeakPtr<Base> ptr = t->Base::AsWeakPtr();
-    return WeakPtr<Derived>(ptr.ref_, static_cast<Derived*>(ptr.ptr_));
+  static WeakPtr<Derived> AsWeakPtrImpl(SupportsWeakPtr<Base>* t) {
+    WeakPtr<Base> ptr = t->AsWeakPtr();
+    return WeakPtr<Derived>(
+        ptr.ref_, static_cast<Derived*>(reinterpret_cast<Base*>(ptr.ptr_)));
   }
 };
 
@@ -205,73 +222,159 @@
 template <typename T>
 class WeakPtr : public internal::WeakPtrBase {
  public:
-  WeakPtr() : ptr_(NULL) {
-  }
+  WeakPtr() = default;
 
-  // Allow conversion from U to T provided U "is a" T.
+  WeakPtr(std::nullptr_t) {}
+
+  // Allow conversion from U to T provided U "is a" T. Note that this
+  // is separate from the (implicit) copy and move constructors.
   template <typename U>
-  WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other), ptr_(other.get()) {
+  WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other) {
+    // Need to cast from U* to T* to do pointer adjustment in case of multiple
+    // inheritance. This also enforces the "U is a T" rule.
+    T* t = reinterpret_cast<U*>(other.ptr_);
+    ptr_ = reinterpret_cast<uintptr_t>(t);
+  }
+  template <typename U>
+  WeakPtr(WeakPtr<U>&& other) : WeakPtrBase(std::move(other)) {
+    // Need to cast from U* to T* to do pointer adjustment in case of multiple
+    // inheritance. This also enforces the "U is a T" rule.
+    T* t = reinterpret_cast<U*>(other.ptr_);
+    ptr_ = reinterpret_cast<uintptr_t>(t);
   }
 
-  T* get() const { return ref_.is_valid() ? ptr_ : NULL; }
+  T* get() const {
+    return ref_.IsValid() ? reinterpret_cast<T*>(ptr_) : nullptr;
+  }
+
+#if defined(STARBOARD)
+  // TODO[johnx]: Remove the implicit convertor.
   operator T*() const { return get(); }
+#endif
 
   T& operator*() const {
-    DCHECK(get() != NULL);
+    DCHECK(get() != nullptr);
     return *get();
   }
   T* operator->() const {
-    DCHECK(get() != NULL);
+    DCHECK(get() != nullptr);
     return get();
   }
 
-  void reset() {
-    ref_ = internal::WeakReference();
-    ptr_ = NULL;
-  }
+  // Allow conditionals to test validity, e.g. if (weak_ptr) {...};
+  explicit operator bool() const { return get() != nullptr; }
+
+  // Returns false if the WeakPtr is confirmed to be invalid. This call is safe
+  // to make from any thread, e.g. to optimize away unnecessary work, but
+  // operator bool() must always be called, on the correct sequence, before
+  // actually using the pointer.
+  //
+  // Warning: as with any object, this call is only thread-safe if the WeakPtr
+  // instance isn't being re-assigned or reset() racily with this call.
+  bool MaybeValid() const { return ref_.MaybeValid(); }
+
+  // Returns whether the object |this| points to has been invalidated. This can
+  // be used to distinguish a WeakPtr to a destroyed object from one that has
+  // been explicitly set to null.
+  bool WasInvalidated() const { return ptr_ && !ref_.IsValid(); }
 
  private:
   friend class internal::SupportsWeakPtrBase;
+  template <typename U> friend class WeakPtr;
   friend class SupportsWeakPtr<T>;
   friend class WeakPtrFactory<T>;
 
   WeakPtr(const internal::WeakReference& ref, T* ptr)
-      : WeakPtrBase(ref),
-        ptr_(ptr) {
-  }
-
-  // This pointer is only valid when ref_.is_valid() is true.  Otherwise, its
-  // value is undefined (as opposed to NULL).
-  T* ptr_;
+      : WeakPtrBase(ref, reinterpret_cast<uintptr_t>(ptr)) {}
 };
 
-// A class may extend from SupportsWeakPtr to expose weak pointers to itself.
-// This is useful in cases where you want others to be able to get a weak
-// pointer to your class.  It also has the property that you don't need to
-// initialize it from your constructor.
+#if !defined(STARBOARD)
+// Allow callers to compare WeakPtrs against nullptr to test validity.
+template <class T>
+bool operator!=(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+  return !(weak_ptr == nullptr);
+}
+template <class T>
+bool operator!=(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+  return weak_ptr != nullptr;
+}
+template <class T>
+bool operator==(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
+  return weak_ptr.get() == nullptr;
+}
+template <class T>
+bool operator==(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
+  return weak_ptr == nullptr;
+}
+#endif
+
+namespace internal {
+class BASE_EXPORT WeakPtrFactoryBase {
+ protected:
+  WeakPtrFactoryBase(uintptr_t ptr);
+  ~WeakPtrFactoryBase();
+  internal::WeakReferenceOwner weak_reference_owner_;
+  uintptr_t ptr_;
+};
+}  // namespace internal
+
+// A class may be composed of a WeakPtrFactory and thereby
+// control how it exposes weak pointers to itself.  This is helpful if you only
+// need weak pointers within the implementation of a class.  This class is also
+// useful when working with primitive types.  For example, you could have a
+// WeakPtrFactory<bool> that is used to pass around a weak reference to a bool.
+template <class T>
+class WeakPtrFactory : public internal::WeakPtrFactoryBase {
+ public:
+  explicit WeakPtrFactory(T* ptr)
+      : WeakPtrFactoryBase(reinterpret_cast<uintptr_t>(ptr)) {}
+
+  ~WeakPtrFactory() = default;
+
+  WeakPtr<T> GetWeakPtr() {
+    return WeakPtr<T>(weak_reference_owner_.GetRef(),
+                      reinterpret_cast<T*>(ptr_));
+  }
+
+  // Call this method to invalidate all existing weak pointers.
+  void InvalidateWeakPtrs() {
+    DCHECK(ptr_);
+    weak_reference_owner_.Invalidate();
+  }
+
+  // Call this method to determine if any weak pointers exist.
+  bool HasWeakPtrs() const {
+    DCHECK(ptr_);
+    return weak_reference_owner_.HasRefs();
+  }
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory);
+};
+
+// A class may extend from SupportsWeakPtr to let others take weak pointers to
+// it. This avoids the class itself implementing boilerplate to dispense weak
+// pointers.  However, since SupportsWeakPtr's destructor won't invalidate
+// weak pointers to the class until after the derived class' members have been
+// destroyed, its use can lead to subtle use-after-destroy issues.
 template <class T>
 class SupportsWeakPtr : public internal::SupportsWeakPtrBase {
  public:
-  SupportsWeakPtr() {}
+  SupportsWeakPtr() = default;
 
   WeakPtr<T> AsWeakPtr() {
     return WeakPtr<T>(weak_reference_owner_.GetRef(), static_cast<T*>(this));
   }
 
-  // Indicates that this object will be used on another thread from now on.
-  void DetachFromThread() {
-    weak_reference_owner_.DetachFromThread();
-  }
-
- protected:
-  ~SupportsWeakPtr() {}
-
+#if defined(STARBOARD)
   // Call this method to invalidate all existing weak pointers.
   // This may be useful to call explicitly in a destructor of a derived class,
   // as the SupportsWeakPtr destructor won't run until late in destruction.
-  void InvalidateWeakPtrs() {
-    weak_reference_owner_.Invalidate();
-  }
+  void InvalidateWeakPtrs() { weak_reference_owner_.Invalidate(); }
+#endif
+
+ protected:
+  ~SupportsWeakPtr() = default;
 
  private:
   internal::WeakReferenceOwner weak_reference_owner_;
@@ -301,50 +404,6 @@
   return internal::SupportsWeakPtrBase::StaticAsWeakPtr<Derived>(t);
 }
 
-// A class may alternatively be composed of a WeakPtrFactory and thereby
-// control how it exposes weak pointers to itself.  This is helpful if you only
-// need weak pointers within the implementation of a class.  This class is also
-// useful when working with primitive types.  For example, you could have a
-// WeakPtrFactory<bool> that is used to pass around a weak reference to a bool.
-template <class T>
-class WeakPtrFactory {
- public:
-  explicit WeakPtrFactory(T* ptr) : ptr_(ptr) {
-  }
-
-  ~WeakPtrFactory() {
-    ptr_ = NULL;
-  }
-
-  WeakPtr<T> GetWeakPtr() {
-    DCHECK(ptr_);
-    return WeakPtr<T>(weak_reference_owner_.GetRef(), ptr_);
-  }
-
-  // Call this method to invalidate all existing weak pointers.
-  void InvalidateWeakPtrs() {
-    DCHECK(ptr_);
-    weak_reference_owner_.Invalidate();
-  }
-
-  // Call this method to determine if any weak pointers exist.
-  bool HasWeakPtrs() const {
-    DCHECK(ptr_);
-    return weak_reference_owner_.HasRefs();
-  }
-
-  // Indicates that this object will be used on another thread from now on.
-  void DetachFromThread() {
-    DCHECK(ptr_);
-    weak_reference_owner_.DetachFromThread();
-  }
-
- private:
-  internal::WeakReferenceOwner weak_reference_owner_;
-  T* ptr_;
-  DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory);
-};
-
 }  // namespace base
 
 #endif  // BASE_MEMORY_WEAK_PTR_H_
diff --git a/src/base/memory/weak_ptr_unittest.cc b/src/base/memory/weak_ptr_unittest.cc
index d5f8057..a4629df 100644
--- a/src/base/memory/weak_ptr_unittest.cc
+++ b/src/base/memory/weak_ptr_unittest.cc
@@ -4,18 +4,26 @@
 
 #include "base/memory/weak_ptr.h"
 
+#include <memory>
 #include <string>
 
 #include "base/bind.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/message_loop.h"
+#include "base/debug/leak_annotations.h"
+#include "base/location.h"
+#include "base/single_thread_task_runner.h"
 #include "base/synchronization/waitable_event.h"
+#include "base/test/gtest_util.h"
+#include "base/test/test_timeouts.h"
 #include "base/threading/thread.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 namespace {
 
+WeakPtr<int> PassThru(WeakPtr<int> ptr) {
+  return ptr;
+}
+
 template <class T>
 class OffThreadObjectCreator {
  public:
@@ -24,9 +32,9 @@
     {
       Thread creator_thread("creator_thread");
       creator_thread.Start();
-      creator_thread.message_loop()->PostTask(
+      creator_thread.task_runner()->PostTask(
           FROM_HERE,
-          base::Bind(OffThreadObjectCreator::CreateObject, &result));
+          base::BindOnce(OffThreadObjectCreator::CreateObject, &result));
     }
     DCHECK(result);  // We synchronized on thread destruction above.
     return result;
@@ -40,13 +48,40 @@
 struct Base {
   std::string member;
 };
-struct Derived : Base {};
+struct Derived : public Base {};
 
-struct Target : SupportsWeakPtr<Target> {};
-struct DerivedTarget : Target {};
+struct TargetBase {};
+struct Target : public TargetBase, public SupportsWeakPtr<Target> {
+  virtual ~Target() = default;
+};
+
+struct DerivedTarget : public Target {};
+
+// A class inheriting from Target and defining a nested type called 'Base'.
+// To guard against strange compilation errors.
+struct DerivedTargetWithNestedBase : public Target {
+  using Base = void;
+};
+
+// A struct with a virtual destructor.
+struct VirtualDestructor {
+  virtual ~VirtualDestructor() = default;
+};
+
+// A class inheriting from Target where Target is not the first base, and where
+// the first base has a virtual method table. This creates a structure where the
+// Target base is not positioned at the beginning of
+// DerivedTargetMultipleInheritance.
+struct DerivedTargetMultipleInheritance : public VirtualDestructor,
+                                          public Target {};
+
 struct Arrow {
   WeakPtr<Target> target;
 };
+struct TargetWithFactory : public Target {
+  TargetWithFactory() : factory(this) {}
+  WeakPtrFactory<Target> factory;
+};
 
 // Helper class to create and destroy weak pointer copies
 // and delete objects on a background thread.
@@ -54,50 +89,69 @@
  public:
   BackgroundThread() : Thread("owner_thread") {}
 
-  virtual ~BackgroundThread() {
-    Stop();
-  }
+  ~BackgroundThread() override { Stop(); }
 
   void CreateArrowFromTarget(Arrow** arrow, Target* target) {
-    WaitableEvent completion(true, false);
-    message_loop()->PostTask(
-        FROM_HERE,
-        base::Bind(&BackgroundThread::DoCreateArrowFromTarget,
-                   arrow, target, &completion));
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&BackgroundThread::DoCreateArrowFromTarget,
+                                  arrow, target, &completion));
     completion.Wait();
   }
 
   void CreateArrowFromArrow(Arrow** arrow, const Arrow* other) {
-    WaitableEvent completion(true, false);
-    message_loop()->PostTask(
-        FROM_HERE,
-        base::Bind(&BackgroundThread::DoCreateArrowFromArrow,
-                   arrow, other, &completion));
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&BackgroundThread::DoCreateArrowFromArrow,
+                                  arrow, other, &completion));
     completion.Wait();
   }
 
   void DeleteTarget(Target* object) {
-    WaitableEvent completion(true, false);
-    message_loop()->PostTask(
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
         FROM_HERE,
-        base::Bind(&BackgroundThread::DoDeleteTarget, object, &completion));
+        base::BindOnce(&BackgroundThread::DoDeleteTarget, object, &completion));
+    completion.Wait();
+  }
+
+  void CopyAndAssignArrow(Arrow* object) {
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&BackgroundThread::DoCopyAndAssignArrow,
+                                  object, &completion));
+    completion.Wait();
+  }
+
+  void CopyAndAssignArrowBase(Arrow* object) {
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&BackgroundThread::DoCopyAndAssignArrowBase,
+                                  object, &completion));
     completion.Wait();
   }
 
   void DeleteArrow(Arrow* object) {
-    WaitableEvent completion(true, false);
-    message_loop()->PostTask(
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    task_runner()->PostTask(
         FROM_HERE,
-        base::Bind(&BackgroundThread::DoDeleteArrow, object, &completion));
+        base::BindOnce(&BackgroundThread::DoDeleteArrow, object, &completion));
     completion.Wait();
   }
 
   Target* DeRef(const Arrow* arrow) {
-    WaitableEvent completion(true, false);
-    Target* result = NULL;
-    message_loop()->PostTask(
-        FROM_HERE,
-        base::Bind(&BackgroundThread::DoDeRef, arrow, &result, &completion));
+    WaitableEvent completion(WaitableEvent::ResetPolicy::MANUAL,
+                             WaitableEvent::InitialState::NOT_SIGNALED);
+    Target* result = nullptr;
+    task_runner()->PostTask(
+        FROM_HERE, base::BindOnce(&BackgroundThread::DoDeRef, arrow, &result,
+                                  &completion));
     completion.Wait();
     return result;
   }
@@ -131,6 +185,25 @@
     completion->Signal();
   }
 
+  static void DoCopyAndAssignArrow(Arrow* object, WaitableEvent* completion) {
+    // Copy constructor.
+    Arrow a = *object;
+    // Assignment operator.
+    *object = a;
+    completion->Signal();
+  }
+
+  static void DoCopyAndAssignArrowBase(
+      Arrow* object,
+      WaitableEvent* completion) {
+    // Copy constructor.
+    WeakPtr<TargetBase> b = object->target;
+    // Assignment operator.
+    WeakPtr<TargetBase> c;
+    c = object->target;
+    completion->Signal();
+  }
+
   static void DoDeleteArrow(Arrow* object, WaitableEvent* completion) {
     delete object;
     completion->Signal();
@@ -151,18 +224,28 @@
   WeakPtrFactory<int> factory(&data);
   WeakPtr<int> ptr = factory.GetWeakPtr();
   WeakPtr<int> ptr2 = ptr;
-  EXPECT_EQ(ptr, ptr2);
+  EXPECT_EQ(ptr.get(), ptr2.get());
+}
+
+TEST(WeakPtrFactoryTest, Move) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  WeakPtr<int> ptr2 = factory.GetWeakPtr();
+  WeakPtr<int> ptr3 = std::move(ptr2);
+  EXPECT_NE(ptr.get(), ptr2.get());
+  EXPECT_EQ(ptr.get(), ptr3.get());
 }
 
 TEST(WeakPtrFactoryTest, OutOfScope) {
   WeakPtr<int> ptr;
-  EXPECT_EQ(NULL, ptr.get());
+  EXPECT_EQ(nullptr, ptr.get());
   {
     int data;
     WeakPtrFactory<int> factory(&data);
     ptr = factory.GetWeakPtr();
   }
-  EXPECT_EQ(NULL, ptr.get());
+  EXPECT_EQ(nullptr, ptr.get());
 }
 
 TEST(WeakPtrFactoryTest, Multiple) {
@@ -175,8 +258,8 @@
     EXPECT_EQ(&data, a.get());
     EXPECT_EQ(&data, b.get());
   }
-  EXPECT_EQ(NULL, a.get());
-  EXPECT_EQ(NULL, b.get());
+  EXPECT_EQ(nullptr, a.get());
+  EXPECT_EQ(nullptr, b.get());
 }
 
 TEST(WeakPtrFactoryTest, MultipleStaged) {
@@ -188,9 +271,9 @@
     {
       WeakPtr<int> b = factory.GetWeakPtr();
     }
-    EXPECT_TRUE(NULL != a.get());
+    EXPECT_NE(nullptr, a.get());
   }
-  EXPECT_EQ(NULL, a.get());
+  EXPECT_EQ(nullptr, a.get());
 }
 
 TEST(WeakPtrFactoryTest, Dereference) {
@@ -211,6 +294,11 @@
   EXPECT_EQ(ptr.get(), &data);
 }
 
+TEST(WeakPtrTest, ConstructFromNullptr) {
+  WeakPtr<int> ptr = PassThru(nullptr);
+  EXPECT_EQ(nullptr, ptr.get());
+}
+
 TEST(WeakPtrTest, SupportsWeakPtr) {
   Target target;
   WeakPtr<Target> ptr = target.AsWeakPtr();
@@ -223,6 +311,66 @@
   EXPECT_EQ(&target, ptr.get());
 }
 
+TEST(WeakPtrTest, DerivedTargetWithNestedBase) {
+  DerivedTargetWithNestedBase target;
+  WeakPtr<DerivedTargetWithNestedBase> ptr = AsWeakPtr(&target);
+  EXPECT_EQ(&target, ptr.get());
+}
+
+TEST(WeakPtrTest, DerivedTargetMultipleInheritance) {
+  DerivedTargetMultipleInheritance d;
+  Target& b = d;
+  EXPECT_NE(static_cast<void*>(&d), static_cast<void*>(&b));
+  const WeakPtr<Target> pb = AsWeakPtr(&b);
+  EXPECT_EQ(pb.get(), &b);
+  const WeakPtr<DerivedTargetMultipleInheritance> pd = AsWeakPtr(&d);
+  EXPECT_EQ(pd.get(), &d);
+}
+
+TEST(WeakPtrFactoryTest, BooleanTesting) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+
+  WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+  EXPECT_TRUE(ptr_to_an_instance);
+  EXPECT_FALSE(!ptr_to_an_instance);
+
+  if (ptr_to_an_instance) {
+  } else {
+    ADD_FAILURE() << "Pointer to an instance should result in true.";
+  }
+
+  if (!ptr_to_an_instance) {  // check for operator!().
+    ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+  }
+
+  WeakPtr<int> null_ptr;
+  EXPECT_FALSE(null_ptr);
+  EXPECT_TRUE(!null_ptr);
+
+  if (null_ptr) {
+    ADD_FAILURE() << "Null pointer should result in false.";
+  }
+
+  if (!null_ptr) {  // check for operator!().
+  } else {
+    ADD_FAILURE() << "Null pointer should result in !x being true.";
+  }
+}
+
+TEST(WeakPtrFactoryTest, ComparisonToNull) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+
+  WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+  EXPECT_NE(nullptr, ptr_to_an_instance);
+  EXPECT_NE(ptr_to_an_instance, nullptr);
+
+  WeakPtr<int> null_ptr;
+  EXPECT_EQ(null_ptr, nullptr);
+  EXPECT_EQ(nullptr, null_ptr);
+}
+
 TEST(WeakPtrTest, InvalidateWeakPtrs) {
   int data;
   WeakPtrFactory<int> factory(&data);
@@ -230,8 +378,102 @@
   EXPECT_EQ(&data, ptr.get());
   EXPECT_TRUE(factory.HasWeakPtrs());
   factory.InvalidateWeakPtrs();
-  EXPECT_EQ(NULL, ptr.get());
+  EXPECT_EQ(nullptr, ptr.get());
   EXPECT_FALSE(factory.HasWeakPtrs());
+
+  // Test that the factory can create new weak pointers after a
+  // InvalidateWeakPtrs call, and they remain valid until the next
+  // InvalidateWeakPtrs call.
+  WeakPtr<int> ptr2 = factory.GetWeakPtr();
+  EXPECT_EQ(&data, ptr2.get());
+  EXPECT_TRUE(factory.HasWeakPtrs());
+  factory.InvalidateWeakPtrs();
+  EXPECT_EQ(nullptr, ptr2.get());
+  EXPECT_FALSE(factory.HasWeakPtrs());
+}
+
+// Tests that WasInvalidated() is true only for invalidated WeakPtrs (not
+// nullptr) and doesn't DCHECK (e.g. because of a dereference attempt).
+TEST(WeakPtrTest, WasInvalidatedByFactoryDestruction) {
+  WeakPtr<int> ptr;
+  EXPECT_FALSE(ptr.WasInvalidated());
+
+  // Test |data| destroyed. That is, the typical pattern when |data| (and its
+  // associated factory) go out of scope.
+  {
+    int data = 0;
+    WeakPtrFactory<int> factory(&data);
+    ptr = factory.GetWeakPtr();
+
+    // Verify that a live WeakPtr is not reported as Invalidated.
+    EXPECT_FALSE(ptr.WasInvalidated());
+  }
+
+  // Checking validity shouldn't read beyond the stack frame.
+  EXPECT_TRUE(ptr.WasInvalidated());
+  ptr = nullptr;
+  EXPECT_FALSE(ptr.WasInvalidated());
+}
+
+// As above, but testing InvalidateWeakPtrs().
+TEST(WeakPtrTest, WasInvalidatedByInvalidateWeakPtrs) {
+  int data = 0;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  EXPECT_FALSE(ptr.WasInvalidated());
+  factory.InvalidateWeakPtrs();
+  EXPECT_TRUE(ptr.WasInvalidated());
+  ptr = nullptr;
+  EXPECT_FALSE(ptr.WasInvalidated());
+}
+
+// A WeakPtr should not be reported as 'invalidated' if nullptr was assigned to
+// it.
+TEST(WeakPtrTest, WasInvalidatedWhilstNull) {
+  int data = 0;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  EXPECT_FALSE(ptr.WasInvalidated());
+  ptr = nullptr;
+  EXPECT_FALSE(ptr.WasInvalidated());
+  factory.InvalidateWeakPtrs();
+  EXPECT_FALSE(ptr.WasInvalidated());
+}
+
+TEST(WeakPtrTest, MaybeValidOnSameSequence) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  EXPECT_TRUE(ptr.MaybeValid());
+  factory.InvalidateWeakPtrs();
+  // Since InvalidateWeakPtrs() ran on this sequence, MaybeValid() should be
+  // false.
+  EXPECT_FALSE(ptr.MaybeValid());
+}
+
+TEST(WeakPtrTest, MaybeValidOnOtherSequence) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+  WeakPtr<int> ptr = factory.GetWeakPtr();
+  EXPECT_TRUE(ptr.MaybeValid());
+
+  base::Thread other_thread("other_thread");
+  other_thread.StartAndWaitForTesting();
+  other_thread.task_runner()->PostTask(
+      FROM_HERE,
+      base::BindOnce(
+          [](WeakPtr<int> ptr) {
+            // Check that MaybeValid() _eventually_ returns false.
+            const TimeDelta timeout = TestTimeouts::tiny_timeout();
+            const TimeTicks begin = TimeTicks::Now();
+            while (ptr.MaybeValid() && (TimeTicks::Now() - begin) < timeout)
+              PlatformThread::YieldCurrentThread();
+            EXPECT_FALSE(ptr.MaybeValid());
+          },
+          ptr));
+  factory.InvalidateWeakPtrs();
+  // |other_thread|'s destructor will join, ensuring we wait for the task to be
+  // run.
 }
 
 TEST(WeakPtrTest, HasWeakPtrs) {
@@ -248,7 +490,7 @@
   // Test that it is OK to create an object that supports WeakPtr on one thread,
   // but use it on another.  This tests that we do not trip runtime checks that
   // ensure that a WeakPtr is not used by multiple threads.
-  scoped_ptr<Target> target(OffThreadObjectCreator<Target>::NewObject());
+  std::unique_ptr<Target> target(OffThreadObjectCreator<Target>::NewObject());
   WeakPtr<Target> weak_ptr = target->AsWeakPtr();
   EXPECT_EQ(target.get(), weak_ptr.get());
 }
@@ -257,7 +499,7 @@
   // Test that it is OK to create an object that has a WeakPtr member on one
   // thread, but use it on another.  This tests that we do not trip runtime
   // checks that ensure that a WeakPtr is not used by multiple threads.
-  scoped_ptr<Arrow> arrow(OffThreadObjectCreator<Arrow>::NewObject());
+  std::unique_ptr<Arrow> arrow(OffThreadObjectCreator<Arrow>::NewObject());
   Target target;
   arrow->target = target.AsWeakPtr();
   EXPECT_EQ(&target, arrow->target.get());
@@ -297,43 +539,54 @@
   background.DeleteArrow(arrow);
 }
 
-TEST(WeakPtrTest, MoveOwnershipExplicitlyObjectNotReferenced) {
-  // Case 1: The target is not bound to any thread yet. So calling
-  // DetachFromThread() is a no-op.
-  Target target;
-  target.DetachFromThread();
-
-  // Case 2: The target is bound to main thread but no WeakPtr is pointing to
-  // it. In this case, it will be re-bound to any thread trying to get a
-  // WeakPtr pointing to it. So detach function call is again no-op.
-  {
-    WeakPtr<Target> weak_ptr = target.AsWeakPtr();
-  }
-  target.DetachFromThread();
-}
-
-TEST(WeakPtrTest, MoveOwnershipExplicitly) {
+TEST(WeakPtrTest, MoveOwnershipOfUnreferencedObject) {
   BackgroundThread background;
   background.Start();
 
   Arrow* arrow;
   {
     Target target;
-    // Background thread creates WeakPtr(and implicitly owns the object).
+    // Background thread creates WeakPtr.
     background.CreateArrowFromTarget(&arrow, &target);
+
+    // Bind to background thread.
     EXPECT_EQ(&target, background.DeRef(arrow));
 
-    // Detach from background thread.
-    target.DetachFromThread();
+    // Release the only WeakPtr.
+    arrow->target.reset();
+
+    // Now we should be able to create a new reference from this thread.
+    arrow->target = target.AsWeakPtr();
 
     // Re-bind to main thread.
     EXPECT_EQ(&target, arrow->target.get());
 
-    // Main thread can now delete the target.
+    // And the main thread can now delete the target.
   }
 
-  // WeakPtr can be deleted on non-owner thread.
-  background.DeleteArrow(arrow);
+  delete arrow;
+}
+
+TEST(WeakPtrTest, MoveOwnershipAfterInvalidate) {
+  BackgroundThread background;
+  background.Start();
+
+  Arrow arrow;
+  std::unique_ptr<TargetWithFactory> target(new TargetWithFactory);
+
+  // Bind to main thread.
+  arrow.target = target->factory.GetWeakPtr();
+  EXPECT_EQ(target.get(), arrow.target.get());
+
+  target->factory.InvalidateWeakPtrs();
+  EXPECT_EQ(nullptr, arrow.target.get());
+
+  arrow.target = target->factory.GetWeakPtr();
+  // Re-bind to background thread.
+  EXPECT_EQ(target.get(), background.DeRef(&arrow));
+
+  // And the background thread can now delete the target.
+  background.DeleteTarget(target.release());
 }
 
 TEST(WeakPtrTest, MainThreadRefOutlivesBackgroundThreadRef) {
@@ -351,7 +604,7 @@
 
   Arrow* arrow_copy;
   background.CreateArrowFromArrow(&arrow_copy, &arrow);
-  EXPECT_EQ(arrow_copy->target, &target);
+  EXPECT_EQ(arrow_copy->target.get(), &target);
   background.DeleteArrow(arrow_copy);
 }
 
@@ -370,7 +623,7 @@
     arrow.target = target.AsWeakPtr();
     background.CreateArrowFromArrow(&arrow_copy, &arrow);
   }
-  EXPECT_EQ(arrow_copy->target, &target);
+  EXPECT_EQ(arrow_copy->target.get(), &target);
   background.DeleteArrow(arrow_copy);
 }
 
@@ -389,10 +642,38 @@
     arrow.target = target.AsWeakPtr();
     background.CreateArrowFromArrow(&arrow_copy, &arrow);
   }
-  EXPECT_EQ(NULL, arrow_copy->target.get());
+  EXPECT_EQ(nullptr, arrow_copy->target.get());
   background.DeleteArrow(arrow_copy);
 }
 
+TEST(WeakPtrTest, NonOwnerThreadCanCopyAndAssignWeakPtr) {
+  // Main thread creates a Target object.
+  Target target;
+  // Main thread creates an arrow referencing the Target.
+  Arrow *arrow = new Arrow();
+  arrow->target = target.AsWeakPtr();
+
+  // Background can copy and assign arrow (as well as the WeakPtr inside).
+  BackgroundThread background;
+  background.Start();
+  background.CopyAndAssignArrow(arrow);
+  background.DeleteArrow(arrow);
+}
+
+TEST(WeakPtrTest, NonOwnerThreadCanCopyAndAssignWeakPtrBase) {
+  // Main thread creates a Target object.
+  Target target;
+  // Main thread creates an arrow referencing the Target.
+  Arrow *arrow = new Arrow();
+  arrow->target = target.AsWeakPtr();
+
+  // Background can copy and assign arrow's WeakPtr to a base class WeakPtr.
+  BackgroundThread background;
+  background.Start();
+  background.CopyAndAssignArrowBase(arrow);
+  background.DeleteArrow(arrow);
+}
+
 TEST(WeakPtrTest, NonOwnerThreadCanDeleteWeakPtr) {
   // Main thread creates a Target object.
   Target target;
@@ -406,8 +687,6 @@
   background.DeleteArrow(arrow);
 }
 
-#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
-
 TEST(WeakPtrDeathTest, WeakPtrCopyDoesNotChangeThreadBinding) {
   // The default style "fast" does not support multi-threaded tests
   // (introduces deadlock on Linux).
@@ -431,12 +710,12 @@
 
   // Although background thread created the copy, it can not deref the copied
   // WeakPtr.
-  ASSERT_DEATH(background.DeRef(arrow_copy), "");
+  ASSERT_DCHECK_DEATH(background.DeRef(arrow_copy));
 
   background.DeleteArrow(arrow_copy);
 }
 
-TEST(WeakPtrDeathTest, NonOwnerThreadDereferencesWeakPtr) {
+TEST(WeakPtrDeathTest, NonOwnerThreadDereferencesWeakPtrAfterReference) {
   // The default style "fast" does not support multi-threaded tests
   // (introduces deadlock on Linux).
   ::testing::FLAGS_gtest_death_test_style = "threadsafe";
@@ -448,30 +727,75 @@
   // thread ownership can not be implicitly moved).
   Arrow arrow;
   arrow.target = target.AsWeakPtr();
+  arrow.target.get();
 
   // Background thread tries to deref target, which violates thread ownership.
   BackgroundThread background;
   background.Start();
-  ASSERT_DEATH(background.DeRef(&arrow), "");
+  ASSERT_DCHECK_DEATH(background.DeRef(&arrow));
 }
 
-TEST(WeakPtrDeathTest, NonOwnerThreadDeletesObject) {
+TEST(WeakPtrDeathTest, NonOwnerThreadDeletesWeakPtrAfterReference) {
   // The default style "fast" does not support multi-threaded tests
   // (introduces deadlock on Linux).
   ::testing::FLAGS_gtest_death_test_style = "threadsafe";
 
-  scoped_ptr<Target> target(new Target());
-  // Main thread creates an arrow referencing the Target (so target's thread
-  // ownership can not be implicitly moved).
+  std::unique_ptr<Target> target(new Target());
+
+  // Main thread creates an arrow referencing the Target.
   Arrow arrow;
   arrow.target = target->AsWeakPtr();
 
-  // Background thread tries to delete target, which violates thread ownership.
+  // Background thread tries to deref target, binding it to the thread.
   BackgroundThread background;
   background.Start();
-  ASSERT_DEATH(background.DeleteTarget(target.release()), "");
+  background.DeRef(&arrow);
+
+  // Main thread deletes Target, violating thread binding.
+  ASSERT_DCHECK_DEATH(target.reset());
+
+  // |target.reset()| died so |target| still holds the object, so we
+  // must pass it to the background thread to teardown.
+  background.DeleteTarget(target.release());
 }
 
-#endif
+TEST(WeakPtrDeathTest, NonOwnerThreadDeletesObjectAfterReference) {
+  // The default style "fast" does not support multi-threaded tests
+  // (introduces deadlock on Linux).
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+  std::unique_ptr<Target> target(new Target());
+
+  // Main thread creates an arrow referencing the Target, and references it, so
+  // that it becomes bound to the thread.
+  Arrow arrow;
+  arrow.target = target->AsWeakPtr();
+  arrow.target.get();
+
+  // Background thread tries to delete target, volating thread binding.
+  BackgroundThread background;
+  background.Start();
+  ASSERT_DCHECK_DEATH(background.DeleteTarget(target.release()));
+}
+
+TEST(WeakPtrDeathTest, NonOwnerThreadReferencesObjectAfterDeletion) {
+  // The default style "fast" does not support multi-threaded tests
+  // (introduces deadlock on Linux).
+  ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+  std::unique_ptr<Target> target(new Target());
+
+  // Main thread creates an arrow referencing the Target.
+  Arrow arrow;
+  arrow.target = target->AsWeakPtr();
+
+  // Background thread tries to delete target, binding the object to the thread.
+  BackgroundThread background;
+  background.Start();
+  background.DeleteTarget(target.release());
+
+  // Main thread attempts to dereference the target, violating thread binding.
+  ASSERT_DCHECK_DEATH(arrow.target.get());
+}
 
 }  // namespace base
diff --git a/src/base/memory/weak_ptr_unittest.nc b/src/base/memory/weak_ptr_unittest.nc
index afc8060..b96b033 100644
--- a/src/base/memory/weak_ptr_unittest.nc
+++ b/src/base/memory/weak_ptr_unittest.nc
@@ -2,6 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
 #include "base/memory/weak_ptr.h"
 
 namespace base {
@@ -14,7 +17,7 @@
 struct Unrelated {};
 struct DerivedUnrelated : Unrelated {};
 
-#if defined(NCTEST_AUTO_DOWNCAST)  // [r"invalid conversion from"]
+#if defined(NCTEST_AUTO_DOWNCAST)  // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
 
 void WontCompile() {
   Producer f;
@@ -22,7 +25,7 @@
   WeakPtr<DerivedProducer> derived_ptr = ptr;
 }
 
-#elif defined(NCTEST_STATIC_DOWNCAST)  // [r"invalid conversion from"]
+#elif defined(NCTEST_STATIC_DOWNCAST)  // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
 
 void WontCompile() {
   Producer f;
@@ -31,7 +34,7 @@
       static_cast<WeakPtr<DerivedProducer> >(ptr);
 }
 
-#elif defined(NCTEST_AUTO_REF_DOWNCAST)  // [r"invalid initialization of reference"]
+#elif defined(NCTEST_AUTO_REF_DOWNCAST)  // [r"fatal error: non-const lvalue reference to type 'WeakPtr<base::DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<base::Producer>'"]
 
 void WontCompile() {
   Producer f;
@@ -39,7 +42,7 @@
   WeakPtr<DerivedProducer>& derived_ptr = ptr;
 }
 
-#elif defined(NCTEST_STATIC_REF_DOWNCAST)  // [r"invalid static_cast"]
+#elif defined(NCTEST_STATIC_REF_DOWNCAST)  // [r"fatal error: non-const lvalue reference to type 'WeakPtr<base::DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<base::Producer>'"]
 
 void WontCompile() {
   Producer f;
@@ -56,7 +59,7 @@
       SupportsWeakPtr<Producer>::StaticAsWeakPtr<DerivedProducer>(&f);
 }
 
-#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST)  // [r"invalid conversion from"]
+#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST)  // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
 
 void WontCompile() {
   Producer f;
@@ -70,35 +73,35 @@
   WeakPtr<DerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
 }
 
-#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST)  // [r"invalid conversion from"]
+#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST)  // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
 
 void WontCompile() {
-  Producer f; 
+  Producer f;
   WeakPtr<DerivedProducer> ptr = AsWeakPtr<Producer>(&f);
 }
 
-#elif defined(NCTEST_UNSAFE_HELPER_CAST)  // [r"cannot convert"]
+#elif defined(NCTEST_UNSAFE_HELPER_CAST)  // [r"cannot initialize a variable of type 'base::OtherDerivedProducer \*' with an rvalue of type 'base::DerivedProducer \*'"]
 
 void WontCompile() {
   DerivedProducer f;
   WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr(&f);
 }
 
-#elif defined(NCTEST_UNSAFE_INSTANTIATED_HELPER_SIDECAST)  // [r"no matching function"]
+#elif defined(NCTEST_UNSAFE_INSTANTIATED_HELPER_SIDECAST)  // [r"fatal error: no matching function for call to 'AsWeakPtr'"]
 
 void WontCompile() {
   DerivedProducer f;
   WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<OtherDerivedProducer>(&f);
 }
 
-#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST)  // [r"cannot convert"]
+#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST)  // [r"cannot initialize a variable of type 'base::OtherDerivedProducer \*' with an rvalue of type 'base::DerivedProducer \*'"]
 
 void WontCompile() {
   DerivedProducer f;
   WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
 }
 
-#elif defined(NCTEST_UNRELATED_HELPER)  // [r"cannot convert"]
+#elif defined(NCTEST_UNRELATED_HELPER)  // [r"cannot initialize a variable of type 'base::Unrelated \*' with an rvalue of type 'base::DerivedProducer \*'"]
 
 void WontCompile() {
   DerivedProducer f;
@@ -112,21 +115,24 @@
   WeakPtr<Unrelated> ptr = AsWeakPtr<Unrelated>(&f);
 }
 
-#elif defined(NCTEST_COMPLETELY_UNRELATED_HELPER)  // [r"array with negative size"]
+// TODO(hans): Remove .* and update the static_assert expectations once we roll
+// past Clang r313315. https://crbug.com/765692.
+
+#elif defined(NCTEST_COMPLETELY_UNRELATED_HELPER)  // [r"fatal error: static_assert failed .*\"AsWeakPtr argument must inherit from SupportsWeakPtr\""]
 
 void WontCompile() {
   Unrelated f;
   WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
 }
 
-#elif defined(NCTEST_DERIVED_COMPLETELY_UNRELATED_HELPER)  // [r"array with negative size"]
+#elif defined(NCTEST_DERIVED_COMPLETELY_UNRELATED_HELPER)  // [r"fatal error: static_assert failed .*\"AsWeakPtr argument must inherit from SupportsWeakPtr\""]
 
 void WontCompile() {
   DerivedUnrelated f;
   WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
 }
 
-#elif defined(NCTEST_AMBIGUOUS_ANCESTORS)  // [r"ambiguous base"]
+#elif defined(NCTEST_AMBIGUOUS_ANCESTORS)  // [r"fatal error: use of undeclared identifier 'AsWeakPtrImpl'"]
 
 void WontCompile() {
   MultiplyDerivedProducer f;
diff --git a/src/base/memory/writable_shared_memory_region.cc b/src/base/memory/writable_shared_memory_region.cc
new file mode 100644
index 0000000..063e672
--- /dev/null
+++ b/src/base/memory/writable_shared_memory_region.cc
@@ -0,0 +1,93 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/writable_shared_memory_region.h"
+
+#include <utility>
+
+#include "base/memory/shared_memory.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// static
+WritableSharedMemoryRegion WritableSharedMemoryRegion::Create(size_t size) {
+  subtle::PlatformSharedMemoryRegion handle =
+      subtle::PlatformSharedMemoryRegion::CreateWritable(size);
+
+  return WritableSharedMemoryRegion(std::move(handle));
+}
+
+// static
+WritableSharedMemoryRegion WritableSharedMemoryRegion::Deserialize(
+    subtle::PlatformSharedMemoryRegion handle) {
+  return WritableSharedMemoryRegion(std::move(handle));
+}
+
+// static
+subtle::PlatformSharedMemoryRegion
+WritableSharedMemoryRegion::TakeHandleForSerialization(
+    WritableSharedMemoryRegion region) {
+  return std::move(region.handle_);
+}
+
+// static
+ReadOnlySharedMemoryRegion WritableSharedMemoryRegion::ConvertToReadOnly(
+    WritableSharedMemoryRegion region) {
+  subtle::PlatformSharedMemoryRegion handle = std::move(region.handle_);
+  if (!handle.ConvertToReadOnly())
+    return {};
+
+  return ReadOnlySharedMemoryRegion::Deserialize(std::move(handle));
+}
+
+UnsafeSharedMemoryRegion WritableSharedMemoryRegion::ConvertToUnsafe(
+    WritableSharedMemoryRegion region) {
+  subtle::PlatformSharedMemoryRegion handle = std::move(region.handle_);
+  if (!handle.ConvertToUnsafe())
+    return {};
+
+  return UnsafeSharedMemoryRegion::Deserialize(std::move(handle));
+}
+
+WritableSharedMemoryRegion::WritableSharedMemoryRegion() = default;
+WritableSharedMemoryRegion::WritableSharedMemoryRegion(
+    WritableSharedMemoryRegion&& region) = default;
+WritableSharedMemoryRegion& WritableSharedMemoryRegion::operator=(
+    WritableSharedMemoryRegion&& region) = default;
+WritableSharedMemoryRegion::~WritableSharedMemoryRegion() = default;
+
+WritableSharedMemoryMapping WritableSharedMemoryRegion::Map() const {
+  return MapAt(0, handle_.GetSize());
+}
+
+WritableSharedMemoryMapping WritableSharedMemoryRegion::MapAt(
+    off_t offset,
+    size_t size) const {
+  if (!IsValid())
+    return {};
+
+  void* memory = nullptr;
+  size_t mapped_size = 0;
+  if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+    return {};
+
+  return WritableSharedMemoryMapping(memory, size, mapped_size,
+                                     handle_.GetGUID());
+}
+
+bool WritableSharedMemoryRegion::IsValid() const {
+  return handle_.IsValid();
+}
+
+WritableSharedMemoryRegion::WritableSharedMemoryRegion(
+    subtle::PlatformSharedMemoryRegion handle)
+    : handle_(std::move(handle)) {
+  if (handle_.IsValid()) {
+    CHECK_EQ(handle_.GetMode(),
+             subtle::PlatformSharedMemoryRegion::Mode::kWritable);
+  }
+}
+
+}  // namespace base
diff --git a/src/base/memory/writable_shared_memory_region.h b/src/base/memory/writable_shared_memory_region.h
new file mode 100644
index 0000000..edd25aa
--- /dev/null
+++ b/src/base/memory/writable_shared_memory_region.h
@@ -0,0 +1,115 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
+#define BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
+
+#include "base/macros.h"
+#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/read_only_shared_memory_region.h"
+#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/unsafe_shared_memory_region.h"
+
+namespace base {
+
+// Scoped move-only handle to a region of platform shared memory. The instance
+// owns the platform handle it wraps. Mappings created by this region are
+// writable. These mappings remain valid even after the region handle is moved
+// or destroyed.
+//
+// This region can be locked to read-only access by converting it to a
+// ReadOnlySharedMemoryRegion. However, unlike ReadOnlySharedMemoryRegion and
+// UnsafeSharedMemoryRegion, ownership of this region (while writable) is unique
+// and may only be transferred, not duplicated.
+class BASE_EXPORT WritableSharedMemoryRegion {
+ public:
+  using MappingType = WritableSharedMemoryMapping;
+  // Creates a new WritableSharedMemoryRegion instance of a given
+  // size that can be used for mapping writable shared memory into the virtual
+  // address space.
+  //
+  // This call will fail if the process does not have sufficient permissions to
+  // create a shared memory region itself. See
+  // mojo::CreateWritableSharedMemoryRegion in
+  // mojo/public/cpp/base/shared_memory_utils.h for creating a shared memory
+  // region from a an unprivileged process where a broker must be used.
+  static WritableSharedMemoryRegion Create(size_t size);
+
+  // Returns a WritableSharedMemoryRegion built from a platform handle that was
+  // taken from another WritableSharedMemoryRegion instance. Returns an invalid
+  // region iff the |handle| is invalid. CHECK-fails if the |handle| isn't
+  // writable.
+  // This should be used only by the code passing handles across process
+  // boundaries.
+  static WritableSharedMemoryRegion Deserialize(
+      subtle::PlatformSharedMemoryRegion handle);
+
+  // Extracts a platform handle from the region. Ownership is transferred to the
+  // returned region object.
+  // This should be used only for sending the handle from the current
+  // process to another.
+  static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
+      WritableSharedMemoryRegion region);
+
+  // Makes the region read-only. No new writable mappings of the region can be
+  // created after this call. Returns an invalid region on failure.
+  static ReadOnlySharedMemoryRegion ConvertToReadOnly(
+      WritableSharedMemoryRegion region);
+
+  // Makes the region unsafe. The region cannot be converted to read-only after
+  // this call. Returns an invalid region on failure.
+  static UnsafeSharedMemoryRegion ConvertToUnsafe(
+      WritableSharedMemoryRegion region);
+
+  // Default constructor initializes an invalid instance.
+  WritableSharedMemoryRegion();
+
+  // Move operations are allowed.
+  WritableSharedMemoryRegion(WritableSharedMemoryRegion&&);
+  WritableSharedMemoryRegion& operator=(WritableSharedMemoryRegion&&);
+
+  // Destructor closes shared memory region if valid.
+  // All created mappings will remain valid.
+  ~WritableSharedMemoryRegion();
+
+  // Maps the shared memory region into the caller's address space with write
+  // access. The mapped address is guaranteed to have an alignment of
+  // at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
+  // Returns a valid WritableSharedMemoryMapping instance on success, invalid
+  // otherwise.
+  WritableSharedMemoryMapping Map() const;
+
+  // Same as above, but maps only |size| bytes of the shared memory block
+  // starting with the given |offset|. |offset| must be aligned to value of
+  // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
+  // requested bytes are out of the region limits.
+  WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
+
+  // Whether underlying platform handles are valid.
+  bool IsValid() const;
+
+  // Returns the maximum mapping size that can be created from this region.
+  size_t GetSize() const {
+    DCHECK(IsValid());
+    return handle_.GetSize();
+  }
+
+  // Returns 128-bit GUID of the region.
+  const UnguessableToken& GetGUID() const {
+    DCHECK(IsValid());
+    return handle_.GetGUID();
+  }
+
+ private:
+  explicit WritableSharedMemoryRegion(
+      subtle::PlatformSharedMemoryRegion handle);
+
+  subtle::PlatformSharedMemoryRegion handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryRegion);
+};
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_