Import cobalt 25.master.0.1034729
diff --git a/base/memory/MIRACLE_PTR_OWNERS b/base/memory/MIRACLE_PTR_OWNERS
new file mode 100644
index 0000000..c1390d5
--- /dev/null
+++ b/base/memory/MIRACLE_PTR_OWNERS
@@ -0,0 +1,6 @@
+bartekn@chromium.org
+danakj@chromium.org
+dcheng@chromium.org
+glazunov@google.com
+keishi@chromium.org
+lukasza@chromium.org
diff --git a/base/memory/OWNERS b/base/memory/OWNERS
index 9b7cbb1..f7f90b0 100644
--- a/base/memory/OWNERS
+++ b/base/memory/OWNERS
@@ -1,4 +1,10 @@
-per-file *chromeos*=skuhne@chromium.org
per-file *chromeos*=oshima@chromium.org
per-file *shared_memory*=set noparent
per-file *shared_memory*=file://ipc/SECURITY_OWNERS
+per-file raw_ptr*=file://base/memory/MIRACLE_PTR_OWNERS
+per-file raw_ref*=file://base/memory/MIRACLE_PTR_OWNERS
+per-file madv_free*=lizeb@chromium.org
+per-file madv_free*=pasko@chromium.org
+per-file madv_free*=mthiesse@chromium.org
+per-file nonscannable_memory*=bikineev@chromium.org
+per-file nonscannable_memory*=mlippautz@chromium.org
diff --git a/base/memory/README.md b/base/memory/README.md
new file mode 100644
index 0000000..8b16557
--- /dev/null
+++ b/base/memory/README.md
@@ -0,0 +1,84 @@
+# //base/memory Types
+
+## Overview
+This directory contains a variety of pointer-like objects (aka smart pointers).
+This is a brief overview of what they are and how they should be used. Refer to
+individual header files for details. C++ is not memory safe, so use these types
+to help guard against potential memory bugs.
+There are other pointer-like object types implemented elsewhere that may be
+right for a given use case, such as `absl::optional<T>` and
+`std::unique_ptr<T>`. More on all types in video form
+[here](https://youtu.be/MpwbWSEDfjM?t=582s) and in a doc
+[here](https://docs.google.com/document/d/1VRevv8JhlP4I8fIlvf87IrW2IRjE0PbkSfIcI6-UbJo/edit?usp=sharing).
+
+## `raw_ptr<T>`
+Use for class fields/members that would otherwise be a `T*`.
+
+This is a weakly refcounted wrapper for a `T*` (also called a raw
+pointer). When the object is deleted, the allocator will "poison" the memory
+that object occupied and keep the memory around so it’s not reused. This reduces
+the risk and impact of a use-after-free bug.
+
+Depending on the use case, it's possible a smart pointer with additional
+features would be more appropriate, but if none of those are applicable or
+necessary, `raw_ptr<T>` is preferred over a `T*`.
+
+For more information, see [`raw_ptr.md`](./raw_ptr.md); for guidance on
+usage, see
+[the style guide](../../styleguide/c++/c++.md#non_owning-pointers-in-class-fields).
+
+## `raw_ref<T>`
+Use for class fields/members that would otherwise be a `T&`.
+
+This shares much in common with `raw_ptr<T>`, but asserts that the
+`raw_ref<T>` is not nullable.
+
+For more information, see [`raw_ptr.md`](./raw_ptr.md); for guidance on
+usage, see
+[the style guide](../../styleguide/c++/c++.md#non_owning-pointers-in-class-fields).
+
+## `base::WeakPtr<T>`
+Use when a reference to an object might outlive the object itself.
+
+These are useful for asynchronous work, which is common in Chrome. If an async
+task references other objects or state, and it's possible for that state to be
+destroyed before the task runs, those references should be held in a
+`WeakPtr<T>`. Each `WeakPtr<T>` is associated with a `WeakPtrFactory<T>`. When
+the associated factory (usually owned by T) is destroyed, all `WeakPtr<T>` are
+invalidated (becomes null) rather than becoming use-after-frees. If such
+references should never outlive the object, consider using SafeRef instead.
+
+## `base::SafeRef<T>`
+Use to express that a reference to an object must not outlive the object.
+
+An example is if you have a class member that you want to guarantee outlives the
+class itself. SafeRef automatically enforces the lifetime assumptions and
+eliminates the need for validity checks.
+
+If the assumption that the object is valid is broken, then the process
+terminates safely and generates a crash report. Though not ideal, it's
+preferable to a potentially undiscovered security bug.
+
+This type is built on top of WeakPtr, so if you want a `SafeRef<T>`, T needs a
+WeakPtrFactory as a member. It works like `WeakPtr`, but doesn't allow for a
+null state. There's also overlap with `raw_ptr`, though this was implemented
+first.
+
+## `base::scoped_refptr<T>`
+Use when you want manually managed strong refcounting. Use carefully!
+
+It’s an owning smart pointer, so it owns a pointer to something allocated in the
+heap and gives shared ownership of the underlying object, since it can be
+copied. When all `scoped_refptr`s pointing to the same object are gone, that
+object gets destroyed.
+
+This is Chrome's answer to `std::shared_ptr<T>`. It additionally requires T to
+inherit from `RefCounted` or `RefCountedThreadSafe`, since the ref counting
+happens in the object itself, unlike `shared_ptr<T>`. It's preferred for an
+object to remain on the same thread, as `RefCounted` is much cheaper. If there
+are `scoped_refptr`s to the same object on different threads, use
+`RefCountedThreadSafe`, since accesses to the reference count can race.
+In this case, without external synchronization, the destructor can run on any
+thread. If the destructor interacts with other systems it is important to
+control and know which thread has the last reference to the object, or you can
+end up with flakiness.
diff --git a/base/memory/aligned_memory.cc b/base/memory/aligned_memory.cc
index 6cf239f..40d0e40 100644
--- a/base/memory/aligned_memory.cc
+++ b/base/memory/aligned_memory.cc
@@ -1,38 +1,41 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/aligned_memory.h"
+#include "base/check_op.h"
#include "base/logging.h"
#include "build/build_config.h"
-#if defined(OS_ANDROID)
+#if BUILDFLAG(IS_ANDROID)
#include <malloc.h>
-
-#include "starboard/types.h"
#endif
namespace base {
void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_GT(size, 0U);
- DCHECK_EQ(alignment & (alignment - 1), 0U);
+ DCHECK(bits::IsPowerOfTwo(alignment));
DCHECK_EQ(alignment % sizeof(void*), 0U);
void* ptr = nullptr;
#if defined(COMPILER_MSVC)
ptr = _aligned_malloc(size, alignment);
-// Android technically supports posix_memalign(), but does not expose it in
-// the current version of the library headers used by Chrome. Luckily,
-// memalign() on Android returns pointers which can safely be used with
-// free(), so we can use it instead. Issue filed to document this:
-// http://code.google.com/p/android/issues/detail?id=35391
-#elif defined(OS_ANDROID)
+#elif BUILDFLAG(IS_ANDROID)
+ // Android technically supports posix_memalign(), but does not expose it in
+ // the current version of the library headers used by Chromium. Luckily,
+ // memalign() on Android returns pointers which can safely be used with
+ // free(), so we can use it instead. Issue filed to document this:
+ // http://code.google.com/p/android/issues/detail?id=35391
ptr = memalign(alignment, size);
#else
- if (posix_memalign(&ptr, alignment, size))
+ int ret = posix_memalign(&ptr, alignment, size);
+ if (ret != 0) {
+ DLOG(ERROR) << "posix_memalign() returned with error " << ret;
ptr = nullptr;
+ }
#endif
+
// Since aligned allocations may fail for non-memory related reasons, force a
// crash if we encounter a failed allocation; maintaining consistent behavior
// with a normal allocation failure in Chrome.
@@ -42,7 +45,7 @@
CHECK(false);
}
// Sanity check alignment just to be safe.
- DCHECK_EQ(reinterpret_cast<uintptr_t>(ptr) & (alignment - 1), 0U);
+ DCHECK(IsAligned(ptr, alignment));
return ptr;
}
diff --git a/base/memory/aligned_memory.h b/base/memory/aligned_memory.h
index 224f1fb..d71a91f 100644
--- a/base/memory/aligned_memory.h
+++ b/base/memory/aligned_memory.h
@@ -1,19 +1,19 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
#define BASE_MEMORY_ALIGNED_MEMORY_H_
-#include <type_traits>
+#include <stddef.h>
+#include <stdint.h>
-#include "starboard/types.h"
-
-#include "starboard/memory.h"
+#include <ostream>
#include "base/base_export.h"
#include "base/basictypes.h"
-#include "base/compiler_specific.h"
+#include "base/bits.h"
+#include "base/check.h"
#include "build/build_config.h"
#if defined(COMPILER_MSVC)
@@ -134,6 +134,28 @@
}
};
+#ifdef __has_builtin
+#define SUPPORTS_BUILTIN_IS_ALIGNED (__has_builtin(__builtin_is_aligned))
+#else
+#define SUPPORTS_BUILTIN_IS_ALIGNED 0
+#endif
+
+inline bool IsAligned(uintptr_t val, size_t alignment) {
+ // If the compiler supports builtin alignment checks prefer them.
+#if SUPPORTS_BUILTIN_IS_ALIGNED
+ return __builtin_is_aligned(val, alignment);
+#else
+ DCHECK(bits::IsPowerOfTwo(alignment)) << alignment << " is not a power of 2";
+ return (val & (alignment - 1)) == 0;
+#endif
+}
+
+#undef SUPPORTS_BUILTIN_IS_ALIGNED
+
+inline bool IsAligned(const void* val, size_t alignment) {
+ return IsAligned(reinterpret_cast<uintptr_t>(val), alignment);
+}
+
} // namespace base
#endif // BASE_MEMORY_ALIGNED_MEMORY_H_
diff --git a/base/memory/aligned_memory_unittest.cc b/base/memory/aligned_memory_unittest.cc
index e354f38..912de77 100644
--- a/base/memory/aligned_memory_unittest.cc
+++ b/base/memory/aligned_memory_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -9,30 +9,27 @@
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
-#define EXPECT_ALIGNED(ptr, align) \
- EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
-
namespace base {
TEST(AlignedMemoryTest, DynamicAllocation) {
void* p = AlignedAlloc(8, 8);
EXPECT_TRUE(p);
- EXPECT_ALIGNED(p, 8);
+ EXPECT_TRUE(IsAligned(p, 8));
AlignedFree(p);
p = AlignedAlloc(8, 16);
EXPECT_TRUE(p);
- EXPECT_ALIGNED(p, 16);
+ EXPECT_TRUE(IsAligned(p, 16));
AlignedFree(p);
p = AlignedAlloc(8, 256);
EXPECT_TRUE(p);
- EXPECT_ALIGNED(p, 256);
+ EXPECT_TRUE(IsAligned(p, 256));
AlignedFree(p);
p = AlignedAlloc(8, 4096);
EXPECT_TRUE(p);
- EXPECT_ALIGNED(p, 4096);
+ EXPECT_TRUE(IsAligned(p, 4096));
AlignedFree(p);
}
@@ -40,7 +37,48 @@
std::unique_ptr<float, AlignedFreeDeleter> p(
static_cast<float*>(AlignedAlloc(8, 8)));
EXPECT_TRUE(p.get());
- EXPECT_ALIGNED(p.get(), 8);
+ EXPECT_TRUE(IsAligned(p.get(), 8));
+
+ // Make sure IsAligned() can check const pointers as well.
+ const float* const_p = p.get();
+ EXPECT_TRUE(IsAligned(const_p, 8));
+}
+
+TEST(AlignedMemoryTest, IsAligned) {
+ // Check alignment around powers of two.
+ for (int i = 0; i < 64; ++i) {
+ const uint64_t n = static_cast<uint64_t>(1) << i;
+
+ // Walk back down all lower powers of two checking alignment.
+ for (int j = i - 1; j >= 0; --j) {
+ // n is aligned on all powers of two less than or equal to 2^i.
+ EXPECT_TRUE(IsAligned(n, n >> j))
+ << "Expected " << n << " to be " << (n >> j) << " aligned";
+
+ // Also, n - 1 should not be aligned on ANY lower power of two except 1
+ // (but since we're starting from i - 1 we don't test that case here.
+ EXPECT_FALSE(IsAligned(n - 1, n >> j))
+ << "Expected " << (n - 1) << " to NOT be " << (n >> j) << " aligned";
+ }
+ }
+
+ // And a few hard coded smoke tests for completeness:
+ EXPECT_TRUE(IsAligned(4, 2));
+ EXPECT_TRUE(IsAligned(8, 4));
+ EXPECT_TRUE(IsAligned(8, 2));
+ EXPECT_TRUE(IsAligned(0x1000, 4 << 10));
+ EXPECT_TRUE(IsAligned(0x2000, 8 << 10));
+ EXPECT_TRUE(IsAligned(1, 1));
+ EXPECT_TRUE(IsAligned(7, 1));
+ EXPECT_TRUE(IsAligned(reinterpret_cast<void*>(0x1000), 4 << 10));
+ EXPECT_TRUE(IsAligned(reinterpret_cast<int*>(0x1000), 4 << 10));
+
+ EXPECT_FALSE(IsAligned(3, 2));
+ EXPECT_FALSE(IsAligned(7, 4));
+ EXPECT_FALSE(IsAligned(7, 2));
+ EXPECT_FALSE(IsAligned(0x1001, 4 << 10));
+ EXPECT_FALSE(IsAligned(0x999, 8 << 10));
+ EXPECT_FALSE(IsAligned(7, 8));
}
} // namespace base
diff --git a/base/memory/discardable_memory.cc b/base/memory/discardable_memory.cc
index f0730aa..55db6f6 100644
--- a/base/memory/discardable_memory.cc
+++ b/base/memory/discardable_memory.cc
@@ -1,13 +1,133 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/discardable_memory.h"
+#include "base/feature_list.h"
+#include "base/memory/discardable_memory_internal.h"
+#include "base/memory/madv_free_discardable_memory_posix.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/notreached.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_ANDROID)
+#include "third_party/ashmem/ashmem.h"
+#endif // BUILDFLAG(IS_ANDROID)
+
namespace base {
+namespace features {
+#if !defined(STARBOARD)
+#if BUILDFLAG(IS_POSIX)
+// Feature flag allowing the use of MADV_FREE discardable memory when there are
+// multiple supported discardable memory backings.
+BASE_FEATURE(kMadvFreeDiscardableMemory,
+ "MadvFreeDiscardableMemory",
+ base::FEATURE_DISABLED_BY_DEFAULT);
+#endif // BUILDFLAG(IS_POSIX)
+
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+BASE_FEATURE(kDiscardableMemoryBackingTrial,
+ "DiscardableMemoryBackingTrial",
+ base::FEATURE_DISABLED_BY_DEFAULT);
+
+// Association of trial group names to trial group enum. Array order must match
+// order of DiscardableMemoryTrialGroup enum.
+const base::FeatureParam<DiscardableMemoryTrialGroup>::Option
+ kDiscardableMemoryBackingParamOptions[] = {
+ {DiscardableMemoryTrialGroup::kEmulatedSharedMemory, "shmem"},
+ {DiscardableMemoryTrialGroup::kMadvFree, "madvfree"},
+ {DiscardableMemoryTrialGroup::kAshmem, "ashmem"}};
+
+const base::FeatureParam<DiscardableMemoryTrialGroup>
+ kDiscardableMemoryBackingParam{
+ &kDiscardableMemoryBackingTrial, "DiscardableMemoryBacking",
+ DiscardableMemoryTrialGroup::kEmulatedSharedMemory,
+ &kDiscardableMemoryBackingParamOptions};
+
+#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) ||
+ // BUILDFLAG(IS_CHROMEOS)
+#endif
+
+} // namespace features
+
+namespace {
+
+#if defined(STARBOARD)
+#elif BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+DiscardableMemoryBacking GetBackingForFieldTrial() {
+ DiscardableMemoryTrialGroup trial_group =
+ GetDiscardableMemoryBackingFieldTrialGroup();
+ switch (trial_group) {
+ case DiscardableMemoryTrialGroup::kEmulatedSharedMemory:
+ case DiscardableMemoryTrialGroup::kAshmem:
+ return DiscardableMemoryBacking::kSharedMemory;
+ case DiscardableMemoryTrialGroup::kMadvFree:
+ return DiscardableMemoryBacking::kMadvFree;
+ }
+ NOTREACHED();
+}
+#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) ||
+ // BUILDFLAG(IS_CHROMEOS)
+
+} // namespace
+
+#if defined(STARBOARD)
+#elif BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+// Probe capabilities of this device to determine whether we should participate
+// in the discardable memory backing trial.
+bool DiscardableMemoryBackingFieldTrialIsEnabled() {
+#if BUILDFLAG(IS_ANDROID)
+ if (!ashmem_device_is_supported())
+ return false;
+#endif // BUILDFLAG(IS_ANDROID)
+ if (base::GetMadvFreeSupport() != base::MadvFreeSupport::kSupported)
+ return false;
+
+ // IMPORTANT: Only query the feature after we determine the device has the
+ // capabilities required, which will have the side-effect of assigning a
+ // trial-group.
+ return base::FeatureList::IsEnabled(features::kDiscardableMemoryBackingTrial);
+}
+
+DiscardableMemoryTrialGroup GetDiscardableMemoryBackingFieldTrialGroup() {
+ DCHECK(DiscardableMemoryBackingFieldTrialIsEnabled());
+ return features::kDiscardableMemoryBackingParam.Get();
+}
+#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) ||
+ // BUILDFLAG(IS_CHROMEOS)
+
DiscardableMemory::DiscardableMemory() = default;
DiscardableMemory::~DiscardableMemory() = default;
+DiscardableMemoryBacking GetDiscardableMemoryBacking() {
+#if !defined(STARBOARD)
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+ if (DiscardableMemoryBackingFieldTrialIsEnabled()) {
+ return GetBackingForFieldTrial();
+ }
+#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) ||
+ // BUILDFLAG(IS_CHROMEOS)
+
+#if BUILDFLAG(IS_ANDROID)
+ if (ashmem_device_is_supported())
+ return DiscardableMemoryBacking::kSharedMemory;
+#endif // BUILDFLAG(IS_ANDROID)
+
+#if BUILDFLAG(IS_POSIX)
+ if (base::FeatureList::IsEnabled(
+ base::features::kMadvFreeDiscardableMemory) &&
+ base::GetMadvFreeSupport() == base::MadvFreeSupport::kSupported) {
+ return DiscardableMemoryBacking::kMadvFree;
+ }
+#endif // BUILDFLAG(IS_POSIX)
+#endif
+
+ return DiscardableMemoryBacking::kSharedMemory;
+}
+
} // namespace base
diff --git a/base/memory/discardable_memory.h b/base/memory/discardable_memory.h
index 5c632d1..bc1398c 100644
--- a/base/memory/discardable_memory.h
+++ b/base/memory/discardable_memory.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,14 +6,14 @@
#define BASE_MEMORY_DISCARDABLE_MEMORY_H_
#include "base/base_export.h"
-#include "base/compiler_specific.h"
+#include "build/build_config.h"
namespace base {
namespace trace_event {
class MemoryAllocatorDump;
class ProcessMemoryDump;
-}
+} // namespace trace_event
// Discardable memory is used to cache large objects without worrying about
// blowing out memory, both on mobile devices where there is no swap, and
@@ -48,8 +48,8 @@
// Locks the memory so that it will not be purged by the system. Returns
// true on success. If the return value is false then this object should be
- // discarded and a new one should be created.
- virtual bool Lock() WARN_UNUSED_RESULT = 0;
+ // destroyed and a new one should be created.
+ [[nodiscard]] virtual bool Lock() = 0;
// Unlocks the memory so that it can be purged by the system. Must be called
// after every successful lock call.
@@ -59,6 +59,10 @@
// before calling this.
virtual void* data() const = 0;
+ // Forces the memory to be purged, such that any following Lock() will fail.
+ // The object must be unlocked before calling this.
+ virtual void DiscardForTesting() = 0;
+
// Handy method to simplify calling data() with a reinterpret_cast.
template<typename T> T* data_as() const {
return reinterpret_cast<T*>(data());
@@ -73,6 +77,9 @@
trace_event::ProcessMemoryDump* pmd) const = 0;
};
+enum class DiscardableMemoryBacking { kSharedMemory, kMadvFree };
+BASE_EXPORT DiscardableMemoryBacking GetDiscardableMemoryBacking();
+
} // namespace base
#endif // BASE_MEMORY_DISCARDABLE_MEMORY_H_
diff --git a/base/memory/discardable_memory_allocator.cc b/base/memory/discardable_memory_allocator.cc
index 3dbb276..f8d4e49 100644
--- a/base/memory/discardable_memory_allocator.cc
+++ b/base/memory/discardable_memory_allocator.cc
@@ -1,10 +1,13 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/discardable_memory_allocator.h"
-#include "base/logging.h"
+#include <utility>
+
+#include "base/check.h"
+#include "base/process/memory.h"
namespace base {
namespace {
@@ -26,4 +29,28 @@
return g_discardable_allocator;
}
+std::unique_ptr<base::DiscardableMemory>
+DiscardableMemoryAllocator::AllocateLockedDiscardableMemoryWithRetryOrDie(
+ size_t size,
+ OnceClosure on_no_memory) {
+ auto* allocator = GetInstance();
+ auto memory = allocator->AllocateLockedDiscardableMemory(size);
+ if (memory)
+ return memory;
+
+ std::move(on_no_memory).Run();
+ // The call above will likely have freed some memory, which will end up in the
+ // freelist. To actually reduce memory footprint, need to empty the freelist
+ // as well.
+ ReleaseFreeMemory();
+
+ memory = allocator->AllocateLockedDiscardableMemory(size);
+#if !defined(COBALT_PENDING_CLEAN_UP)
+ if (!memory)
+ TerminateBecauseOutOfMemory(size);
+#endif
+
+ return memory;
+}
+
} // namespace base
diff --git a/base/memory/discardable_memory_allocator.h b/base/memory/discardable_memory_allocator.h
index e45a31f..f7a7605 100644
--- a/base/memory/discardable_memory_allocator.h
+++ b/base/memory/discardable_memory_allocator.h
@@ -1,20 +1,34 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
#define BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
+#include <stddef.h>
+
#include <memory>
#include "base/base_export.h"
-#include "starboard/types.h"
+#include "base/functional/callback.h"
+#include "base/memory/discardable_memory.h"
namespace base {
class DiscardableMemory;
+// An allocator which creates and manages DiscardableMemory. The allocator
+// itself should be created via CreateDiscardableMemoryAllocator, which
+// selects an appropriate implementation depending on platform support.
class BASE_EXPORT DiscardableMemoryAllocator {
public:
+ DiscardableMemoryAllocator() = default;
+
+ DiscardableMemoryAllocator(const DiscardableMemoryAllocator&) = delete;
+ DiscardableMemoryAllocator& operator=(const DiscardableMemoryAllocator&) =
+ delete;
+
+ virtual ~DiscardableMemoryAllocator() = default;
+
// Returns the allocator instance.
static DiscardableMemoryAllocator* GetInstance();
@@ -22,14 +36,34 @@
// Ownership of |instance| remains with the caller.
static void SetInstance(DiscardableMemoryAllocator* allocator);
- // Giant WARNING: Discardable[Shared]Memory is only implemented on Android. On
- // non-Android platforms, it behaves exactly the same as SharedMemory.
- // See LockPages() in discardable_shared_memory.cc.
+ // Creates an initially-locked instance of discardable memory.
+ // If the platform supports Android ashmem or madvise(MADV_FREE),
+ // platform-specific techniques will be used to discard memory under pressure.
+ // Otherwise, discardable memory is emulated and manually discarded
+ // heuristicly (via memory pressure notifications).
virtual std::unique_ptr<DiscardableMemory> AllocateLockedDiscardableMemory(
size_t size) = 0;
- protected:
- virtual ~DiscardableMemoryAllocator() = default;
+ // Allocates discardable memory the same way |AllocateLockedDiscardableMemory|
+ // does. In case of failure, calls |on_no_memory| and retries once. As a
+ // consequence, |on_no_memory| should free some memory, and importantly,
+ // address space as well.
+ //
+ // In case of allocation failure after retry, terminates the process with
+ // an Out Of Memory status (for triage in crash reports).
+ //
+ // As a consequence, does *not* return nullptr.
+ std::unique_ptr<DiscardableMemory>
+ AllocateLockedDiscardableMemoryWithRetryOrDie(size_t size,
+ OnceClosure on_no_memory);
+
+ // Gets the total number of bytes allocated by this allocator which have not
+ // been discarded.
+ virtual size_t GetBytesAllocated() const = 0;
+
+ // Release any memory used in the implementation of discardable memory that is
+ // not immediately being used.
+ virtual void ReleaseFreeMemory() = 0;
};
} // namespace base
diff --git a/base/memory/discardable_memory_backing_field_trial_unittest.cc b/base/memory/discardable_memory_backing_field_trial_unittest.cc
new file mode 100644
index 0000000..8d765db
--- /dev/null
+++ b/base/memory/discardable_memory_backing_field_trial_unittest.cc
@@ -0,0 +1,91 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/discardable_memory.h"
+#include "base/memory/discardable_memory_internal.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/field_trial_params.h"
+#include "base/test/scoped_feature_list.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if BUILDFLAG(IS_POSIX)
+#include "base/memory/madv_free_discardable_memory_posix.h"
+#endif // BUILDFLAG(IS_POSIX)
+
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+namespace base {
+
+class DiscardableMemoryBackingFieldTrialTest : public ::testing::Test {
+ protected:
+ DiscardableMemoryBackingFieldTrialTest() = default;
+ ~DiscardableMemoryBackingFieldTrialTest() override = default;
+
+ std::unique_ptr<test::ScopedFeatureList>
+ GetScopedFeatureListForDiscardableMemoryTrialGroup(
+ DiscardableMemoryTrialGroup group) {
+ auto feature_list = std::make_unique<test::ScopedFeatureList>();
+ feature_list->InitAndEnableFeatureWithParameters(
+ base::features::kDiscardableMemoryBackingTrial,
+ {{features::kDiscardableMemoryBackingParam.name,
+ features::kDiscardableMemoryBackingParamOptions[group].name}});
+ return feature_list;
+ }
+};
+
+TEST_F(DiscardableMemoryBackingFieldTrialTest, TrialActiveOnlyIfCapable) {
+ std::unique_ptr<test::ScopedFeatureList> scoped_feature =
+ GetScopedFeatureListForDiscardableMemoryTrialGroup(
+ DiscardableMemoryTrialGroup::kEmulatedSharedMemory);
+ FieldTrial* trial =
+ FeatureList::GetFieldTrial(features::kDiscardableMemoryBackingTrial);
+ ASSERT_NE(trial, nullptr);
+
+ // Ensure the trial goes from disabled to enabled after querying state, if and
+ // only if we are capable of running the trial. We have force enabled the
+ // trial feature in the feature list, so |trial_enabled| implies that the
+ // device is capable.
+ EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+ bool trial_enabled = DiscardableMemoryBackingFieldTrialIsEnabled();
+ EXPECT_EQ(trial_enabled, FieldTrialList::IsTrialActive(trial->trial_name()));
+}
+
+TEST_F(DiscardableMemoryBackingFieldTrialTest,
+ EmulatedSharedMemoryBackingMatchesTrialGroup) {
+ std::unique_ptr<test::ScopedFeatureList> scoped_feature =
+ GetScopedFeatureListForDiscardableMemoryTrialGroup(
+ DiscardableMemoryTrialGroup::kEmulatedSharedMemory);
+ if (!DiscardableMemoryBackingFieldTrialIsEnabled())
+ return;
+ DiscardableMemoryBacking backing = GetDiscardableMemoryBacking();
+ EXPECT_EQ(backing, DiscardableMemoryBacking::kSharedMemory);
+}
+
+TEST_F(DiscardableMemoryBackingFieldTrialTest,
+ MadvFreeBackingMatchesTrialGroup) {
+ std::unique_ptr<test::ScopedFeatureList> scoped_feature =
+ GetScopedFeatureListForDiscardableMemoryTrialGroup(
+ DiscardableMemoryTrialGroup::kMadvFree);
+ if (!DiscardableMemoryBackingFieldTrialIsEnabled())
+ return;
+ DiscardableMemoryBacking backing = GetDiscardableMemoryBacking();
+ EXPECT_EQ(backing, DiscardableMemoryBacking::kMadvFree);
+}
+
+#if BUILDFLAG(IS_ANDROID)
+TEST_F(DiscardableMemoryBackingFieldTrialTest, AshmemBackingMatchesTrialGroup) {
+ std::unique_ptr<test::ScopedFeatureList> scoped_feature =
+ GetScopedFeatureListForDiscardableMemoryTrialGroup(
+ DiscardableMemoryTrialGroup::kAshmem);
+ if (!DiscardableMemoryBackingFieldTrialIsEnabled())
+ return;
+ DiscardableMemoryBacking backing = GetDiscardableMemoryBacking();
+ EXPECT_EQ(backing, DiscardableMemoryBacking::kSharedMemory);
+}
+#endif // BUILDFLAG(IS_ANDROID)
+
+} // namespace base
+
+#endif // BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) ||
+ // BUILDFLAG(IS_CHROMEOS)
diff --git a/base/memory/discardable_memory_internal.h b/base/memory/discardable_memory_internal.h
new file mode 100644
index 0000000..b737476
--- /dev/null
+++ b/base/memory/discardable_memory_internal.h
@@ -0,0 +1,53 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_INTERNAL_H_
+#define BASE_MEMORY_DISCARDABLE_MEMORY_INTERNAL_H_
+
+#include "base/base_export.h"
+#include "base/feature_list.h"
+#include "base/metrics/field_trial_params.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
+namespace base {
+
+// Enumeration of the possible experiment groups in the discardable memory
+// backing trial. Note that |kAshmem| and |kEmulatedSharedMemory| both map to
+// discardable shared memory, except the former allows for the use of ashmem for
+// unpinning memory. Ensure that the order of the enum values matches those in
+// |kDiscardableMemoryBackingParamOptions|.
+enum DiscardableMemoryTrialGroup : int {
+ kEmulatedSharedMemory = 0,
+ kMadvFree,
+ // Only Android devices will be assigned to the ashmem group.
+ kAshmem,
+};
+
+namespace features {
+// Feature flag enabling the discardable memory backing trial.
+BASE_EXPORT BASE_DECLARE_FEATURE(kDiscardableMemoryBackingTrial);
+
+BASE_EXPORT extern const base::FeatureParam<DiscardableMemoryTrialGroup>::Option
+ kDiscardableMemoryBackingParamOptions[];
+
+BASE_EXPORT extern const base::FeatureParam<DiscardableMemoryTrialGroup>
+ kDiscardableMemoryBackingParam;
+} // namespace features
+
+// Whether we should do the discardable memory backing trial for this session.
+BASE_EXPORT bool DiscardableMemoryBackingFieldTrialIsEnabled();
+
+// If we should do the discardable memory backing trial, then get the trial
+// group this session belongs in.
+BASE_EXPORT DiscardableMemoryTrialGroup
+GetDiscardableMemoryBackingFieldTrialGroup();
+
+} // namespace base
+
+#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
+ // BUILDFLAG(IS_ANDROID)
+
+#endif // BASE_MEMORY_DISCARDABLE_MEMORY_INTERNAL_H_
diff --git a/base/memory/discardable_shared_memory.cc b/base/memory/discardable_shared_memory.cc
index f8289cf..23c95ba 100644
--- a/base/memory/discardable_shared_memory.cc
+++ b/base/memory/discardable_shared_memory.cc
@@ -1,36 +1,51 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/discardable_shared_memory.h"
+#include <stdint.h>
+
#include <algorithm>
+#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/atomicops.h"
#include "base/bits.h"
+#include "base/feature_list.h"
#include "base/logging.h"
+#include "base/memory/discardable_memory.h"
+#include "base/memory/discardable_memory_internal.h"
+#include "base/memory/page_size.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/numerics/safe_math.h"
-#include "base/process/process_metrics.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/process_memory_dump.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
-#if defined(OS_POSIX) && !defined(OS_NACL)
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
// For madvise() which is available on all POSIX compatible systems.
#include <sys/mman.h>
#endif
-#if defined(OS_ANDROID)
+#if BUILDFLAG(IS_ANDROID)
#include "third_party/ashmem/ashmem.h"
#endif
-#if defined(OS_WIN)
+#if BUILDFLAG(IS_WIN)
#include <windows.h>
#include "base/win/windows_version.h"
-#include "starboard/types.h"
#endif
+#if BUILDFLAG(IS_FUCHSIA)
+#include <lib/zx/vmar.h>
+#include <zircon/types.h>
+#include "base/fuchsia/fuchsia_logging.h"
+#endif
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_allocator_dump.h" // no-presubmit-check
+#include "base/trace_event/process_memory_dump.h" // no-presubmit-check
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
namespace base {
namespace {
@@ -51,21 +66,21 @@
// Serialize to Unix time when using 4-byte wire format.
// Note: 19 January 2038, this will cease to work.
template <>
-Time ALLOW_UNUSED_TYPE TimeFromWireFormat<4>(int64_t value) {
- return value ? Time::UnixEpoch() + TimeDelta::FromSeconds(value) : Time();
+[[maybe_unused]] Time TimeFromWireFormat<4>(int64_t value) {
+ return value ? Time::UnixEpoch() + Seconds(value) : Time();
}
template <>
-int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<4>(Time time) {
+[[maybe_unused]] int64_t TimeToWireFormat<4>(Time time) {
return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
}
// Standard serialization format when using 8-byte wire format.
template <>
-Time ALLOW_UNUSED_TYPE TimeFromWireFormat<8>(int64_t value) {
+[[maybe_unused]] Time TimeFromWireFormat<8>(int64_t value) {
return Time::FromInternalValue(value);
}
template <>
-int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<8>(Time time) {
+[[maybe_unused]] int64_t TimeToWireFormat<8>(Time time) {
return time.ToInternalValue();
}
@@ -104,9 +119,24 @@
// Round up |size| to a multiple of page size.
size_t AlignToPageSize(size_t size) {
- return bits::Align(size, base::GetPageSize());
+ return bits::AlignUp(size, base::GetPageSize());
}
+#if BUILDFLAG(IS_ANDROID)
+bool UseAshmemUnpinningForDiscardableMemory() {
+ if (!ashmem_device_is_supported())
+ return false;
+
+ // If we are participating in the discardable memory backing trial, only
+ // enable ashmem unpinning when we are in the corresponding trial group.
+ if (base::DiscardableMemoryBackingFieldTrialIsEnabled()) {
+ return base::GetDiscardableMemoryBackingFieldTrialGroup() ==
+ base::DiscardableMemoryTrialGroup::kAshmem;
+ }
+ return true;
+}
+#endif // BUILDFLAG(IS_ANDROID)
+
} // namespace
DiscardableSharedMemory::DiscardableSharedMemory()
@@ -245,11 +275,11 @@
if (!length)
return PURGED;
-#if defined(OS_ANDROID)
+#if BUILDFLAG(IS_ANDROID)
// Ensure that the platform won't discard the required pages.
return LockPages(shared_memory_region_,
AlignToPageSize(sizeof(SharedState)) + offset, length);
-#elif defined(OS_MACOSX)
+#elif BUILDFLAG(IS_APPLE)
// On macOS, there is no mechanism to lock pages. However, we do need to call
// madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
// footprint via task_info().
@@ -367,13 +397,13 @@
// Note: this memory will not be accessed again. The segment will be
// freed asynchronously at a later time, so just do the best
// immediately.
-#if defined(OS_POSIX) && !defined(OS_NACL)
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
// Linux and Android provide MADV_REMOVE which is preferred as it has a
// behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
// provide MADV_FREE which has the same result but memory is purged lazily.
-#if defined(OS_LINUX) || defined(OS_ANDROID)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
#define MADV_PURGE_ARGUMENT MADV_REMOVE
-#elif defined(OS_MACOSX)
+#elif BUILDFLAG(IS_APPLE)
// MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
// reusable bit, which allows both Activity Monitor and memory-infra to
// correctly track the pages.
@@ -389,32 +419,66 @@
AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) {
DPLOG(ERROR) << "madvise() failed";
}
-#elif defined(OS_WIN)
- if (base::win::GetVersion() >= base::win::VERSION_WIN8_1) {
- // Discard the purged pages, which releases the physical storage (resident
- // memory, compressed or swapped), but leaves them reserved & committed.
- // This does not free commit for use by other applications, but allows the
- // system to avoid compressing/swapping these pages to free physical memory.
- static const auto discard_virtual_memory =
- reinterpret_cast<decltype(&::DiscardVirtualMemory)>(GetProcAddress(
- GetModuleHandle(L"kernel32.dll"), "DiscardVirtualMemory"));
- if (discard_virtual_memory) {
- DWORD discard_result = discard_virtual_memory(
- static_cast<char*>(shared_memory_mapping_.memory()) +
- AlignToPageSize(sizeof(SharedState)),
- AlignToPageSize(mapped_size_));
- if (discard_result != ERROR_SUCCESS) {
- DLOG(DCHECK) << "DiscardVirtualMemory() failed in Purge(): "
- << logging::SystemErrorCodeToString(discard_result);
- }
- }
+#elif BUILDFLAG(IS_WIN)
+ // On Windows, discarded pages are not returned to the system immediately and
+ // not guaranteed to be zeroed when returned to the application.
+ char* address = static_cast<char*>(shared_memory_mapping_.memory()) +
+ AlignToPageSize(sizeof(SharedState));
+ size_t length = AlignToPageSize(mapped_size_);
+
+ DWORD ret = DiscardVirtualMemory(address, length);
+ // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
+ // failure.
+ if (ret != ERROR_SUCCESS) {
+ void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
+ CHECK(ptr);
}
-#endif
+#elif BUILDFLAG(IS_FUCHSIA)
+ // De-commit via our VMAR, rather than relying on the VMO handle, since the
+ // handle may have been closed after the memory was mapped into this process.
+ uint64_t address_int = reinterpret_cast<uint64_t>(
+ static_cast<char*>(shared_memory_mapping_.memory()) +
+ AlignToPageSize(sizeof(SharedState)));
+ zx_status_t status = zx::vmar::root_self()->op_range(
+ ZX_VMO_OP_DECOMMIT, address_int, AlignToPageSize(mapped_size_), nullptr,
+ 0);
+ ZX_DCHECK(status == ZX_OK, status) << "zx_vmo_op_range(ZX_VMO_OP_DECOMMIT)";
+#endif // BUILDFLAG(IS_FUCHSIA)
last_known_usage_ = Time();
return true;
}
+void DiscardableSharedMemory::ReleaseMemoryIfPossible(size_t offset,
+ size_t length) {
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
+// Linux and Android provide MADV_REMOVE which is preferred as it has a
+// behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
+// provide MADV_FREE which has the same result but memory is purged lazily.
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
+#define MADV_PURGE_ARGUMENT MADV_REMOVE
+#elif BUILDFLAG(IS_APPLE)
+// MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
+// reusable bit, which allows both Activity Monitor and memory-infra to
+// correctly track the pages.
+#define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
+#else // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
+#define MADV_PURGE_ARGUMENT MADV_FREE
+#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
+ // BUILDFLAG(IS_ANDROID)
+ // Advise the kernel to remove resources associated with purged pages.
+ // Subsequent accesses of memory pages will succeed, but might result in
+ // zero-fill-on-demand pages.
+ if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) + offset,
+ length, MADV_PURGE_ARGUMENT)) {
+ DPLOG(ERROR) << "madvise() failed";
+ }
+#else // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
+ partition_alloc::DiscardSystemPages(
+ static_cast<char*>(shared_memory_mapping_.memory()) + offset, length);
+#endif // BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_NACL)
+}
+
bool DiscardableSharedMemory::IsMemoryResident() const {
DCHECK(shared_memory_mapping_.IsValid());
@@ -442,11 +506,13 @@
trace_event::MemoryAllocatorDump* local_segment_dump,
trace_event::ProcessMemoryDump* pmd,
bool is_owned) const {
+// Memory dumps are only supported when tracing support is enabled,.
+#if BUILDFLAG(ENABLE_BASE_TRACING)
auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
shared_memory_mapping_, pmd);
// TODO(ssid): Clean this by a new api to inherit size of parent dump once the
// we send the full PMD and calculate sizes inside chrome, crbug.com/704203.
- size_t resident_size = shared_memory_dump->GetSizeInternal();
+ uint64_t resident_size = shared_memory_dump->GetSizeInternal();
local_segment_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
trace_event::MemoryAllocatorDump::kUnitsBytes,
resident_size);
@@ -471,6 +537,7 @@
pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
shared_memory_guid, kImportance);
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
// static
@@ -478,14 +545,16 @@
const UnsafeSharedMemoryRegion& region,
size_t offset,
size_t length) {
-#if defined(OS_ANDROID)
+#if BUILDFLAG(IS_ANDROID)
if (region.IsValid()) {
- int pin_result =
- ashmem_pin_region(region.GetPlatformHandle(), offset, length);
- if (pin_result == ASHMEM_WAS_PURGED)
- return PURGED;
- if (pin_result < 0)
- return FAILED;
+ if (UseAshmemUnpinningForDiscardableMemory()) {
+ int pin_result =
+ ashmem_pin_region(region.GetPlatformHandle(), offset, length);
+ if (pin_result == ASHMEM_WAS_PURGED)
+ return PURGED;
+ if (pin_result < 0)
+ return FAILED;
+ }
}
#endif
return SUCCESS;
@@ -496,11 +565,13 @@
const UnsafeSharedMemoryRegion& region,
size_t offset,
size_t length) {
-#if defined(OS_ANDROID)
+#if BUILDFLAG(IS_ANDROID)
if (region.IsValid()) {
- int unpin_result =
- ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
- DCHECK_EQ(0, unpin_result);
+ if (UseAshmemUnpinningForDiscardableMemory()) {
+ int unpin_result =
+ ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
+ DCHECK_EQ(0, unpin_result);
+ }
}
#endif
}
@@ -509,4 +580,11 @@
return Time::Now();
}
+#if BUILDFLAG(IS_ANDROID)
+// static
+bool DiscardableSharedMemory::IsAshmemDeviceSupportedForTesting() {
+ return UseAshmemUnpinningForDiscardableMemory();
+}
+#endif
+
} // namespace base
diff --git a/base/memory/discardable_shared_memory.h b/base/memory/discardable_shared_memory.h
index 73a2723..73ae10b 100644
--- a/base/memory/discardable_shared_memory.h
+++ b/base/memory/discardable_shared_memory.h
@@ -1,13 +1,14 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
#define BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
+#include <stddef.h>
+
#include "base/base_export.h"
-#include "base/logging.h"
-#include "base/macros.h"
+#include "base/dcheck_is_on.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/memory/unsafe_shared_memory_region.h"
#include "base/threading/thread_collision_warner.h"
@@ -16,8 +17,6 @@
#if DCHECK_IS_ON()
#include <set>
-
-#include "starboard/types.h"
#endif
// Linux (including Android) support the MADV_REMOVE argument with madvise()
@@ -27,7 +26,7 @@
// and Android to indicate that this type of behavior can be expected on
// those platforms. Note that madvise() will still be used on other POSIX
// platforms but doesn't provide the zero-fill-on-demand pages guarantee.
-#if defined(OS_LINUX) || defined(OS_ANDROID)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
#define DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE
#endif
@@ -52,6 +51,9 @@
// memory file. Memory must be locked.
explicit DiscardableSharedMemory(UnsafeSharedMemoryRegion region);
+ DiscardableSharedMemory(const DiscardableSharedMemory&) = delete;
+ DiscardableSharedMemory& operator=(const DiscardableSharedMemory&) = delete;
+
// Closes any open files.
virtual ~DiscardableSharedMemory();
@@ -116,6 +118,13 @@
// different process. Returns NULL time if purged.
Time last_known_usage() const { return last_known_usage_; }
+ // Releases any allocated pages in the specified range, if supported by the
+ // platform. Address space in the specified range continues to be reserved.
+ // The memory is not guaranteed to be released immediately.
+ // |offset| and |length| are both in bytes. |offset| and |length| must both be
+ // page aligned.
+ void ReleaseMemoryIfPossible(size_t offset, size_t length);
+
// This returns true and sets |last_known_usage_| to 0 if
// DiscardableSharedMemory object was successfully purged. Purging can fail
// for two reasons; object might be locked or our last known usage timestamp
@@ -150,6 +159,12 @@
trace_event::ProcessMemoryDump* pmd,
bool is_owned) const;
+#if BUILDFLAG(IS_ANDROID)
+ // Returns true if the Ashmem device is supported on this system.
+ // Only use this for unit-testing.
+ static bool IsAshmemDeviceSupportedForTesting();
+#endif
+
private:
// LockPages/UnlockPages are platform-native discardable page management
// helper functions. Both expect |offset| to be specified relative to the
@@ -178,8 +193,6 @@
// synchronized somehow. Use a collision warner to detect incorrect usage.
DFAKE_MUTEX(thread_collision_warner_);
Time last_known_usage_;
-
- DISALLOW_COPY_AND_ASSIGN(DiscardableSharedMemory);
};
} // namespace base
diff --git a/base/memory/discardable_shared_memory_unittest.cc b/base/memory/discardable_shared_memory_unittest.cc
index 6320be7..6ed2e1b 100644
--- a/base/memory/discardable_shared_memory_unittest.cc
+++ b/base/memory/discardable_shared_memory_unittest.cc
@@ -1,20 +1,23 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2014 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <fcntl.h>
+#include <stdint.h>
#include "base/files/scoped_file.h"
#include "base/memory/discardable_shared_memory.h"
+#include "base/memory/page_size.h"
#include "base/memory/shared_memory_tracker.h"
-#include "base/process/process_metrics.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/process_memory_dump.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
-#include "starboard/memory.h"
-#include "starboard/types.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_allocator_dump.h" // no-presubmit-check
+#include "base/trace_event/process_memory_dump.h" // no-presubmit-check
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
namespace base {
class TestDiscardableSharedMemory : public DiscardableSharedMemory {
@@ -151,6 +154,23 @@
ASSERT_FALSE(memory2.IsMemoryResident());
}
+TEST(DiscardableSharedMemoryTest, PurgeAfterClose) {
+ const uint32_t kDataSize = 1024;
+
+ TestDiscardableSharedMemory memory;
+ bool rv = memory.CreateAndMap(kDataSize);
+ ASSERT_TRUE(rv);
+
+ // Unlock things so we can Purge().
+ memory.SetNow(Time::FromDoubleT(2));
+ memory.Unlock(0, 0);
+
+ // It should be safe to Purge() |memory| after Close()ing the handle.
+ memory.Close();
+ rv = memory.Purge(Time::FromDoubleT(4));
+ EXPECT_TRUE(rv);
+}
+
TEST(DiscardableSharedMemoryTest, LastUsed) {
const uint32_t kDataSize = 1024;
@@ -242,10 +262,15 @@
EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
}
-#if defined(OS_ANDROID)
+#if BUILDFLAG(IS_ANDROID)
TEST(DiscardableSharedMemoryTest, LockShouldFailIfPlatformLockPagesFails) {
const uint32_t kDataSize = 1024;
+ // This test cannot succeed on devices without a proper ashmem device
+ // because Lock() will always succeed.
+ if (!DiscardableSharedMemory::IsAshmemDeviceSupportedForTesting())
+ return;
+
DiscardableSharedMemory memory1;
bool rv1 = memory1.CreateAndMap(kDataSize);
ASSERT_TRUE(rv1);
@@ -273,12 +298,12 @@
memory2.Lock(0, base::GetPageSize());
EXPECT_EQ(DiscardableSharedMemory::FAILED, lock_rv);
}
-#endif // defined(OS_ANDROID)
+#endif // BUILDFLAG(IS_ANDROID)
TEST(DiscardableSharedMemoryTest, LockAndUnlockRange) {
- const uint32_t kDataSize = 32;
+ const size_t kDataSize = 32;
- uint32_t data_size_in_bytes = kDataSize * base::GetPageSize();
+ size_t data_size_in_bytes = kDataSize * base::GetPageSize();
TestDiscardableSharedMemory memory1;
bool rv = memory1.CreateAndMap(data_size_in_bytes);
@@ -429,6 +454,7 @@
}
#endif
+#if BUILDFLAG(ENABLE_BASE_TRACING)
TEST(DiscardableSharedMemoryTest, TracingOwnershipEdges) {
const uint32_t kDataSize = 1024;
TestDiscardableSharedMemory memory1;
@@ -453,5 +479,6 @@
// TODO(ssid): test for weak global dump once the
// CreateWeakSharedMemoryOwnershipEdge() is fixed, crbug.com/661257.
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
} // namespace base
diff --git a/base/memory/fake_memory_pressure_monitor.cc b/base/memory/fake_memory_pressure_monitor.cc
deleted file mode 100644
index 713b161..0000000
--- a/base/memory/fake_memory_pressure_monitor.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/fake_memory_pressure_monitor.h"
-
-namespace base {
-namespace test {
-
-FakeMemoryPressureMonitor::FakeMemoryPressureMonitor()
- : MemoryPressureMonitor(),
- memory_pressure_level_(MemoryPressureLevel::MEMORY_PRESSURE_LEVEL_NONE) {}
-
-FakeMemoryPressureMonitor::~FakeMemoryPressureMonitor() {}
-
-void FakeMemoryPressureMonitor::SetAndNotifyMemoryPressure(
- MemoryPressureLevel level) {
- memory_pressure_level_ = level;
- base::MemoryPressureListener::SimulatePressureNotification(level);
-}
-
-base::MemoryPressureMonitor::MemoryPressureLevel
-FakeMemoryPressureMonitor::GetCurrentPressureLevel() {
- return memory_pressure_level_;
-}
-
-void FakeMemoryPressureMonitor::SetDispatchCallback(
- const DispatchCallback& callback) {
- LOG(ERROR) << "FakeMemoryPressureMonitor::SetDispatchCallback";
-}
-
-} // namespace test
-} // namespace base
diff --git a/base/memory/fake_memory_pressure_monitor.h b/base/memory/fake_memory_pressure_monitor.h
deleted file mode 100644
index 2194b5f..0000000
--- a/base/memory/fake_memory_pressure_monitor.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_FAKE_MEMORY_PRESSURE_MONITOR_H_
-#define BASE_MEMORY_FAKE_MEMORY_PRESSURE_MONITOR_H_
-
-#include "base/macros.h"
-#include "base/memory/memory_pressure_monitor.h"
-
-namespace base {
-namespace test {
-
-class FakeMemoryPressureMonitor : public base::MemoryPressureMonitor {
- public:
- FakeMemoryPressureMonitor();
- ~FakeMemoryPressureMonitor() override;
-
- void SetAndNotifyMemoryPressure(MemoryPressureLevel level);
-
- // base::MemoryPressureMonitor overrides:
- MemoryPressureLevel GetCurrentPressureLevel() override;
- void SetDispatchCallback(const DispatchCallback& callback) override;
-
- private:
- MemoryPressureLevel memory_pressure_level_;
-
- DISALLOW_COPY_AND_ASSIGN(FakeMemoryPressureMonitor);
-};
-
-} // namespace test
-} // namespace base
-
-#endif // BASE_MEMORY_FAKE_MEMORY_PRESSURE_MONITOR_H_
diff --git a/base/memory/free_deleter.h b/base/memory/free_deleter.h
index 91649a4..9130486 100644
--- a/base/memory/free_deleter.h
+++ b/base/memory/free_deleter.h
@@ -1,4 +1,4 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
+// Copyright 2016 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,9 +7,6 @@
#include <stdlib.h>
-#include "starboard/memory.h"
-#include "starboard/types.h"
-
namespace base {
// Function object which invokes 'free' on its parameter, which must be
@@ -18,7 +15,9 @@
// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
// static_cast<int*>(malloc(sizeof(int))));
struct FreeDeleter {
- inline void operator()(void* ptr) const { free(ptr); }
+ inline void operator()(void* ptr) const {
+ free(ptr);
+ }
};
} // namespace base
diff --git a/base/memory/linked_ptr.h b/base/memory/linked_ptr.h
deleted file mode 100644
index 6851286..0000000
--- a/base/memory/linked_ptr.h
+++ /dev/null
@@ -1,179 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// A "smart" pointer type with reference tracking. Every pointer to a
-// particular object is kept on a circular linked list. When the last pointer
-// to an object is destroyed or reassigned, the object is deleted.
-//
-// Used properly, this deletes the object when the last reference goes away.
-// There are several caveats:
-// - Like all reference counting schemes, cycles lead to leaks.
-// - Each smart pointer is actually two pointers (8 bytes instead of 4).
-// - Every time a pointer is released, the entire list of pointers to that
-// object is traversed. This class is therefore NOT SUITABLE when there
-// will often be more than two or three pointers to a particular object.
-// - References are only tracked as long as linked_ptr<> objects are copied.
-// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
-// will happen (double deletion).
-//
-// Note: If you use an incomplete type with linked_ptr<>, the class
-// *containing* linked_ptr<> must have a constructor and destructor (even
-// if they do nothing!).
-//
-// Thread Safety:
-// A linked_ptr is NOT thread safe. Copying a linked_ptr object is
-// effectively a read-write operation.
-//
-// Alternative: to linked_ptr is shared_ptr, which
-// - is also two pointers in size (8 bytes for 32 bit addresses)
-// - is thread safe for copying and deletion
-// - supports weak_ptrs
-
-#ifndef BASE_MEMORY_LINKED_PTR_H_
-#define BASE_MEMORY_LINKED_PTR_H_
-
-#include "base/logging.h" // for CHECK macros
-
-// This is used internally by all instances of linked_ptr<>. It needs to be
-// a non-template class because different types of linked_ptr<> can refer to
-// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
-// So, it needs to be possible for different types of linked_ptr to participate
-// in the same circular linked list, so we need a single class type here.
-//
-// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr<T>.
-class linked_ptr_internal {
- public:
- // Create a new circle that includes only this instance.
- void join_new() {
- next_ = this;
- }
-
- // Join an existing circle.
- void join(linked_ptr_internal const* ptr) {
- next_ = ptr->next_;
- ptr->next_ = this;
- }
-
- // Leave whatever circle we're part of. Returns true iff we were the
- // last member of the circle. Once this is done, you can join() another.
- bool depart() {
- if (next_ == this) return true;
- linked_ptr_internal const* p = next_;
- while (p->next_ != this) p = p->next_;
- p->next_ = next_;
- return false;
- }
-
- private:
- mutable linked_ptr_internal const* next_;
-};
-
-// TODO(http://crbug.com/556939): DEPRECATED: Use unique_ptr instead (now that
-// we have support for moveable types inside STL containers).
-template <typename T>
-class linked_ptr {
- public:
- typedef T element_type;
-
- // Take over ownership of a raw pointer. This should happen as soon as
- // possible after the object is created.
- explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
- ~linked_ptr() { depart(); }
-
- // Copy an existing linked_ptr<>, adding ourselves to the list of references.
- template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
-
- linked_ptr(linked_ptr const& ptr) {
- DCHECK_NE(&ptr, this);
- copy(&ptr);
- }
-
- // Assignment releases the old value and acquires the new.
- template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
- depart();
- copy(&ptr);
- return *this;
- }
-
- linked_ptr& operator=(linked_ptr const& ptr) {
- if (&ptr != this) {
- depart();
- copy(&ptr);
- }
- return *this;
- }
-
- // Smart pointer members.
- void reset(T* ptr = NULL) {
- depart();
- capture(ptr);
- }
- T* get() const { return value_; }
- T* operator->() const { return value_; }
- T& operator*() const { return *value_; }
- // Release ownership of the pointed object and returns it.
- // Sole ownership by this linked_ptr object is required.
- T* release() {
- bool last = link_.depart();
- CHECK(last);
- T* v = value_;
- value_ = NULL;
- return v;
- }
-
- bool operator==(const T* p) const { return value_ == p; }
- bool operator!=(const T* p) const { return value_ != p; }
- template <typename U>
- bool operator==(linked_ptr<U> const& ptr) const {
- return value_ == ptr.get();
- }
- template <typename U>
- bool operator!=(linked_ptr<U> const& ptr) const {
- return value_ != ptr.get();
- }
-
- private:
- template <typename U>
- friend class linked_ptr;
-
- T* value_;
- linked_ptr_internal link_;
-
- void depart() {
- if (link_.depart()) delete value_;
- }
-
- void capture(T* ptr) {
- value_ = ptr;
- link_.join_new();
- }
-
- template <typename U> void copy(linked_ptr<U> const* ptr) {
- value_ = ptr->get();
- if (value_)
- link_.join(&ptr->link_);
- else
- link_.join_new();
- }
-};
-
-template<typename T> inline
-bool operator==(T* ptr, const linked_ptr<T>& x) {
- return ptr == x.get();
-}
-
-template<typename T> inline
-bool operator!=(T* ptr, const linked_ptr<T>& x) {
- return ptr != x.get();
-}
-
-// A function to convert T* into linked_ptr<T>
-// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
-// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
-template <typename T>
-linked_ptr<T> make_linked_ptr(T* ptr) {
- return linked_ptr<T>(ptr);
-}
-
-#endif // BASE_MEMORY_LINKED_PTR_H_
diff --git a/base/memory/linked_ptr_unittest.cc b/base/memory/linked_ptr_unittest.cc
deleted file mode 100644
index 344ffa4..0000000
--- a/base/memory/linked_ptr_unittest.cc
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <string>
-
-#include "base/memory/linked_ptr.h"
-#include "base/strings/stringprintf.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-int num = 0;
-
-std::string history;
-
-// Class which tracks allocation/deallocation
-struct A {
- A(): mynum(num++) { history += base::StringPrintf("A%d ctor\n", mynum); }
- virtual ~A() { history += base::StringPrintf("A%d dtor\n", mynum); }
- virtual void Use() { history += base::StringPrintf("A%d use\n", mynum); }
- int mynum;
-};
-
-// Subclass
-struct B: public A {
- B() { history += base::StringPrintf("B%d ctor\n", mynum); }
- ~B() override { history += base::StringPrintf("B%d dtor\n", mynum); }
- void Use() override { history += base::StringPrintf("B%d use\n", mynum); }
-};
-
-} // namespace
-
-TEST(LinkedPtrTest, Test) {
- {
- linked_ptr<A> a0, a1, a2;
- a0 = *&a0; // The *& defeats Clang's -Wself-assign warning.
- a1 = a2;
- ASSERT_EQ(a0.get(), static_cast<A*>(nullptr));
- ASSERT_EQ(a1.get(), static_cast<A*>(nullptr));
- ASSERT_EQ(a2.get(), static_cast<A*>(nullptr));
- ASSERT_TRUE(a0 == nullptr);
- ASSERT_TRUE(a1 == nullptr);
- ASSERT_TRUE(a2 == nullptr);
-
- {
- linked_ptr<A> a3(new A);
- a0 = a3;
- ASSERT_TRUE(a0 == a3);
- ASSERT_TRUE(a0 != nullptr);
- ASSERT_TRUE(a0.get() == a3);
- ASSERT_TRUE(a0 == a3.get());
- linked_ptr<A> a4(a0);
- a1 = a4;
- linked_ptr<A> a5(new A);
- ASSERT_TRUE(a5.get() != a3);
- ASSERT_TRUE(a5 != a3.get());
- a2 = a5;
- linked_ptr<B> b0(new B);
- linked_ptr<A> a6(b0);
- ASSERT_TRUE(b0 == a6);
- ASSERT_TRUE(a6 == b0);
- ASSERT_TRUE(b0 != nullptr);
- a5 = b0;
- a5 = b0;
- a3->Use();
- a4->Use();
- a5->Use();
- a6->Use();
- b0->Use();
- (*b0).Use();
- b0.get()->Use();
- }
-
- a0->Use();
- a1->Use();
- a2->Use();
-
- a1 = a2;
- a2.reset(new A);
- a0.reset();
-
- linked_ptr<A> a7;
- }
-
- ASSERT_EQ(history,
- "A0 ctor\n"
- "A1 ctor\n"
- "A2 ctor\n"
- "B2 ctor\n"
- "A0 use\n"
- "A0 use\n"
- "B2 use\n"
- "B2 use\n"
- "B2 use\n"
- "B2 use\n"
- "B2 use\n"
- "B2 dtor\n"
- "A2 dtor\n"
- "A0 use\n"
- "A0 use\n"
- "A1 use\n"
- "A3 ctor\n"
- "A0 dtor\n"
- "A3 dtor\n"
- "A1 dtor\n"
- );
-}
diff --git a/base/memory/madv_free_discardable_memory_allocator_posix.cc b/base/memory/madv_free_discardable_memory_allocator_posix.cc
new file mode 100644
index 0000000..d40576f
--- /dev/null
+++ b/base/memory/madv_free_discardable_memory_allocator_posix.cc
@@ -0,0 +1,72 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <inttypes.h>
+#include <sys/mman.h>
+
+#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
+#include "base/process/process_metrics.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/task/single_thread_task_runner.h"
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_dump_manager.h" // no-presubmit-check
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
+namespace base {
+
+MadvFreeDiscardableMemoryAllocatorPosix::
+ MadvFreeDiscardableMemoryAllocatorPosix() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ // Don't register dump provider if
+ // SingleThreadTaskRunner::CurrentDefaultHAndle is not set, such as in tests
+ // and Android Webview.
+ if (base::SingleThreadTaskRunner::HasCurrentDefault()) {
+ trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
+ this, "MadvFreeDiscardableMemoryAllocator",
+ SingleThreadTaskRunner::GetCurrentDefault());
+ }
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+}
+
+MadvFreeDiscardableMemoryAllocatorPosix::
+ ~MadvFreeDiscardableMemoryAllocatorPosix() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+}
+
+std::unique_ptr<DiscardableMemory>
+MadvFreeDiscardableMemoryAllocatorPosix::AllocateLockedDiscardableMemory(
+ size_t size) {
+ return std::make_unique<MadvFreeDiscardableMemoryPosix>(size,
+ &bytes_allocated_);
+}
+
+size_t MadvFreeDiscardableMemoryAllocatorPosix::GetBytesAllocated() const {
+ return bytes_allocated_;
+}
+
+bool MadvFreeDiscardableMemoryAllocatorPosix::OnMemoryDump(
+ const trace_event::MemoryDumpArgs& args,
+ trace_event::ProcessMemoryDump* pmd) {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ if (args.level_of_detail !=
+ base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND) {
+ return true;
+ }
+
+ base::trace_event::MemoryAllocatorDump* total_dump =
+ pmd->CreateAllocatorDump("discardable/madv_free_allocated");
+ total_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
+ base::trace_event::MemoryAllocatorDump::kUnitsBytes,
+ GetBytesAllocated());
+ return true;
+#else // BUILDFLAG(ENABLE_BASE_TRACING)
+ return false;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+}
+
+} // namespace base
diff --git a/base/memory/madv_free_discardable_memory_allocator_posix.h b/base/memory/madv_free_discardable_memory_allocator_posix.h
new file mode 100644
index 0000000..3e12720
--- /dev/null
+++ b/base/memory/madv_free_discardable_memory_allocator_posix.h
@@ -0,0 +1,54 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_ALLOCATOR_POSIX_H_
+#define BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_ALLOCATOR_POSIX_H_
+
+#include <stddef.h>
+
+#include <atomic>
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/functional/bind.h"
+#include "base/functional/callback.h"
+#include "base/memory/discardable_memory.h"
+#include "base/memory/discardable_memory_allocator.h"
+#include "base/memory/madv_free_discardable_memory_posix.h"
+#include "base/trace_event/base_tracing.h"
+#include "build/build_config.h"
+
+namespace base {
+class BASE_EXPORT MadvFreeDiscardableMemoryAllocatorPosix
+ : public DiscardableMemoryAllocator,
+ public base::trace_event::MemoryDumpProvider {
+ public:
+ MadvFreeDiscardableMemoryAllocatorPosix();
+
+ MadvFreeDiscardableMemoryAllocatorPosix(
+ const MadvFreeDiscardableMemoryAllocatorPosix&) = delete;
+ MadvFreeDiscardableMemoryAllocatorPosix& operator=(
+ const MadvFreeDiscardableMemoryAllocatorPosix&) = delete;
+
+ ~MadvFreeDiscardableMemoryAllocatorPosix() override;
+
+ std::unique_ptr<DiscardableMemory> AllocateLockedDiscardableMemory(
+ size_t size) override;
+
+ size_t GetBytesAllocated() const override;
+
+ void ReleaseFreeMemory() override {
+ // Do nothing, since MADV_FREE discardable memory does not keep any memory
+ // overhead that can be released.
+ }
+
+ bool OnMemoryDump(const trace_event::MemoryDumpArgs& args,
+ trace_event::ProcessMemoryDump* pmd) override;
+
+ private:
+ std::atomic<size_t> bytes_allocated_{0};
+};
+} // namespace base
+
+#endif // BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_ALLOCATOR_POSIX_H_
diff --git a/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc b/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
new file mode 100644
index 0000000..6985988
--- /dev/null
+++ b/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
@@ -0,0 +1,107 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fcntl.h>
+#include <stdint.h>
+
+#include <memory>
+
+#include "base/files/scoped_file.h"
+#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
+#include "base/memory/madv_free_discardable_memory_posix.h"
+#include "base/memory/page_size.h"
+#include "base/tracing_buildflags.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_allocator_dump.h" // no-presubmit-check
+#include "base/trace_event/process_memory_dump.h" // no-presubmit-check
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
+#define SUCCEED_IF_MADV_FREE_UNSUPPORTED() \
+ do { \
+ if (GetMadvFreeSupport() != base::MadvFreeSupport::kSupported) { \
+ SUCCEED() \
+ << "MADV_FREE is not supported (Linux 4.5+ required), vacuously " \
+ "passing test"; \
+ return; \
+ } \
+ } while (0)
+
+namespace base {
+
+class MadvFreeDiscardableMemoryAllocatorPosixTest : public ::testing::Test {
+ protected:
+ MadvFreeDiscardableMemoryAllocatorPosixTest() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ base::trace_event::MemoryDumpArgs dump_args = {
+ base::trace_event::MemoryDumpLevelOfDetail::DETAILED};
+ pmd_ = std::make_unique<base::trace_event::ProcessMemoryDump>(dump_args);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+ }
+
+ std::unique_ptr<MadvFreeDiscardableMemoryPosix>
+ AllocateLockedMadvFreeDiscardableMemory(size_t size) {
+ return std::unique_ptr<MadvFreeDiscardableMemoryPosix>(
+ static_cast<MadvFreeDiscardableMemoryPosix*>(
+ allocator_.AllocateLockedDiscardableMemory(size).release()));
+ }
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ size_t GetDiscardableMemorySizeFromDump(const DiscardableMemory& mem,
+ const std::string& dump_id) {
+ return mem.CreateMemoryAllocatorDump(dump_id.c_str(), pmd_.get())
+ ->GetSizeInternal();
+ }
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
+ MadvFreeDiscardableMemoryAllocatorPosix allocator_;
+ const size_t kPageSize = base::GetPageSize();
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ std::unique_ptr<base::trace_event::ProcessMemoryDump> pmd_;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+};
+
+TEST_F(MadvFreeDiscardableMemoryAllocatorPosixTest, AllocateAndUseMemory) {
+ SUCCEED_IF_MADV_FREE_UNSUPPORTED();
+
+ // Allocate 4 pages of discardable memory.
+ auto mem1 = AllocateLockedMadvFreeDiscardableMemory(kPageSize * 3 + 1);
+
+ EXPECT_TRUE(mem1->IsLockedForTesting());
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ EXPECT_EQ(GetDiscardableMemorySizeFromDump(*mem1, "dummy_dump_1"),
+ kPageSize * 3 + 1);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+ EXPECT_EQ(allocator_.GetBytesAllocated(), kPageSize * 3 + 1);
+
+ // Allocate 3 pages of discardable memory, and free the previously allocated
+ // pages.
+ auto mem2 = AllocateLockedMadvFreeDiscardableMemory(kPageSize * 3);
+
+ EXPECT_TRUE(mem2->IsLockedForTesting());
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ EXPECT_EQ(GetDiscardableMemorySizeFromDump(*mem2, "dummy_dump_2"),
+ kPageSize * 3);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+ EXPECT_EQ(allocator_.GetBytesAllocated(), kPageSize * 6 + 1);
+
+ mem1.reset();
+
+ EXPECT_EQ(allocator_.GetBytesAllocated(), kPageSize * 3);
+
+ // Write to and read from an allocated discardable memory buffer.
+ const char test_pattern[] = "ABCDEFGHIJKLMNOP";
+ char buffer[sizeof(test_pattern)];
+
+ void* data = mem2->data();
+ memcpy(data, test_pattern, sizeof(test_pattern));
+
+ data = mem2->data_as<uint8_t>();
+ memcpy(buffer, data, sizeof(test_pattern));
+
+ EXPECT_EQ(memcmp(test_pattern, buffer, sizeof(test_pattern)), 0);
+}
+} // namespace base
diff --git a/base/memory/madv_free_discardable_memory_posix.cc b/base/memory/madv_free_discardable_memory_posix.cc
new file mode 100644
index 0000000..a72aee6
--- /dev/null
+++ b/base/memory/madv_free_discardable_memory_posix.cc
@@ -0,0 +1,351 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/madv_free_discardable_memory_posix.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+
+#include <atomic>
+
+#include "base/atomicops.h"
+#include "base/bits.h"
+#include "base/functional/callback.h"
+#include "base/logging.h"
+#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
+#include "base/memory/page_size.h"
+#include "base/notreached.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/stringprintf.h"
+#include "base/tracing_buildflags.h"
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_ANDROID)
+#include <sys/prctl.h>
+#endif
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_allocator_dump.h" // no-presubmit-check
+#include "base/trace_event/memory_dump_manager.h" // no-presubmit-check
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
+#if defined(ADDRESS_SANITIZER)
+#include <sanitizer/asan_interface.h>
+#endif // defined(ADDRESS_SANITIZER)
+
+namespace {
+
+constexpr intptr_t kPageMagicCookie = 1;
+
+void* AllocatePages(size_t size_in_pages) {
+ const size_t length = size_in_pages * base::GetPageSize();
+ void* data = mmap(nullptr, length, PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ PCHECK(data != MAP_FAILED);
+
+#if BUILDFLAG(IS_ANDROID)
+ prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, data, length,
+ "madv-free-discardable");
+#endif
+
+ return data;
+}
+
+// Checks if the system supports usage of MADV_FREE as a backing for discardable
+// memory.
+base::MadvFreeSupport ProbePlatformMadvFreeSupport() {
+ // Note: If the compiling system does not have headers for Linux 4.5+, then
+ // the MADV_FREE define will not exist and the probe will default to
+ // unsupported, regardless of whether the target system actually supports
+ // MADV_FREE.
+#if !BUILDFLAG(IS_APPLE) && defined(MADV_FREE)
+ uint8_t* dummy_page = static_cast<uint8_t*>(AllocatePages(1));
+ dummy_page[0] = 1;
+
+ base::MadvFreeSupport support = base::MadvFreeSupport::kUnsupported;
+
+ // Check if the MADV_FREE advice value exists.
+ int retval = madvise(dummy_page, base::GetPageSize(), MADV_FREE);
+ if (!retval) {
+ // For Linux 4.5 to 4.12, MADV_FREE on a swapless system will lead to memory
+ // being immediately discarded. Verify that the memory was not discarded.
+ if (dummy_page[0]) {
+ support = base::MadvFreeSupport::kSupported;
+ }
+ }
+ PCHECK(!munmap(dummy_page, base::GetPageSize()));
+ return support;
+#else
+ return base::MadvFreeSupport::kUnsupported;
+#endif
+}
+
+} // namespace
+
+namespace base {
+
+MadvFreeDiscardableMemoryPosix::MadvFreeDiscardableMemoryPosix(
+ size_t size_in_bytes,
+ std::atomic<size_t>* allocator_byte_count)
+ : size_in_bytes_(size_in_bytes),
+ allocated_pages_((size_in_bytes_ + base::GetPageSize() - 1) /
+ base::GetPageSize()),
+ allocator_byte_count_(allocator_byte_count),
+ page_first_word_((size_in_bytes_ + base::GetPageSize() - 1) /
+ base::GetPageSize()) {
+ data_ = AllocatePages(allocated_pages_);
+ (*allocator_byte_count_) += size_in_bytes_;
+}
+
+MadvFreeDiscardableMemoryPosix::~MadvFreeDiscardableMemoryPosix() {
+ if (Deallocate()) {
+ DVLOG(1) << "Region evicted during destructor with " << allocated_pages_
+ << " pages";
+ }
+}
+
+bool MadvFreeDiscardableMemoryPosix::Lock() {
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
+ DCHECK(!is_locked_);
+ // Locking fails if the memory has been deallocated.
+ if (!data_)
+ return false;
+
+#if defined(ADDRESS_SANITIZER)
+ // We need to unpoison here since locking pages writes to them.
+ // Note that even if locking fails, we want to unpoison anyways after
+ // deallocation.
+ ASAN_UNPOISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
+#endif // defined(ADDRESS_SANITIZER)
+
+ size_t page_index;
+ for (page_index = 0; page_index < allocated_pages_; ++page_index) {
+ if (!LockPage(page_index))
+ break;
+ }
+
+ if (page_index < allocated_pages_) {
+ DVLOG(1) << "Region eviction discovered during lock with "
+ << allocated_pages_ << " pages";
+ Deallocate();
+ return false;
+ }
+ DCHECK(IsResident());
+
+ is_locked_ = true;
+ return true;
+}
+
+void MadvFreeDiscardableMemoryPosix::Unlock() {
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
+ DCHECK(is_locked_);
+ DCHECK(data_ != nullptr);
+
+ for (size_t page_index = 0; page_index < allocated_pages_; ++page_index) {
+ UnlockPage(page_index);
+ }
+
+#ifdef MADV_FREE
+ if (!keep_memory_for_testing_) {
+ int retval =
+ madvise(data_, allocated_pages_ * base::GetPageSize(), MADV_FREE);
+ DPCHECK(!retval);
+ }
+#endif
+
+#if defined(ADDRESS_SANITIZER)
+ ASAN_POISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
+#endif // defined(ADDRESS_SANITIZER)
+
+ is_locked_ = false;
+}
+
+void* MadvFreeDiscardableMemoryPosix::data() const {
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
+ DCHECK(is_locked_);
+ DCHECK(data_ != nullptr);
+
+ return data_;
+}
+
+bool MadvFreeDiscardableMemoryPosix::LockPage(size_t page_index) {
+ // We require the byte-level representation of std::atomic<intptr_t> to be
+ // equivalent to that of an intptr_t. Since std::atomic<intptr_t> has standard
+ // layout, having equal size is sufficient but not necessary for them to have
+ // the same byte-level representation.
+ static_assert(sizeof(intptr_t) == sizeof(std::atomic<intptr_t>),
+ "Incompatible layout of std::atomic.");
+ DCHECK(std::atomic<intptr_t>{}.is_lock_free());
+ std::atomic<intptr_t>* page_as_atomic =
+ reinterpret_cast<std::atomic<intptr_t>*>(
+ static_cast<uint8_t*>(data_) + page_index * base::GetPageSize());
+
+ intptr_t expected = kPageMagicCookie;
+
+ // Recall that we set the first word of the page to |kPageMagicCookie|
+ // (non-zero) during unlocking. Thus, if the value has changed, the page has
+ // been discarded. Restore the page's original first word from before
+ // unlocking only if the page has not been discarded.
+ if (!std::atomic_compare_exchange_strong_explicit(
+ page_as_atomic, &expected,
+ static_cast<intptr_t>(page_first_word_[page_index]),
+ std::memory_order_relaxed, std::memory_order_relaxed)) {
+ return false;
+ }
+
+ return true;
+}
+
+void MadvFreeDiscardableMemoryPosix::UnlockPage(size_t page_index) {
+ DCHECK(std::atomic<intptr_t>{}.is_lock_free());
+
+ std::atomic<intptr_t>* page_as_atomic =
+ reinterpret_cast<std::atomic<intptr_t>*>(
+ static_cast<uint8_t*>(data_) + page_index * base::GetPageSize());
+
+ // Store the first word of the page for use during unlocking.
+ page_first_word_[page_index].store(*page_as_atomic,
+ std::memory_order_relaxed);
+ // Store a non-zero value into the first word of the page, so we can tell when
+ // the page is discarded during locking.
+ page_as_atomic->store(kPageMagicCookie, std::memory_order_relaxed);
+}
+
+void MadvFreeDiscardableMemoryPosix::DiscardPage(size_t page_index) {
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
+ DCHECK(!is_locked_);
+ DCHECK(page_index < allocated_pages_);
+ int retval =
+ madvise(static_cast<uint8_t*>(data_) + base::GetPageSize() * page_index,
+ base::GetPageSize(), MADV_DONTNEED);
+ DPCHECK(!retval);
+}
+
+bool MadvFreeDiscardableMemoryPosix::IsLockedForTesting() const {
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
+ return is_locked_;
+}
+
+void MadvFreeDiscardableMemoryPosix::DiscardForTesting() {
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
+ DCHECK(!is_locked_);
+ int retval =
+ madvise(data_, base::GetPageSize() * allocated_pages_, MADV_DONTNEED);
+ DPCHECK(!retval);
+}
+
+trace_event::MemoryAllocatorDump*
+MadvFreeDiscardableMemoryPosix::CreateMemoryAllocatorDump(
+ const char* name,
+ trace_event::ProcessMemoryDump* pmd) const {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
+
+ using base::trace_event::MemoryAllocatorDump;
+ std::string allocator_dump_name = base::StringPrintf(
+ "discardable/segment_0x%" PRIXPTR, reinterpret_cast<uintptr_t>(this));
+
+ MemoryAllocatorDump* allocator_dump =
+ pmd->CreateAllocatorDump(allocator_dump_name);
+
+ bool is_discarded = IsDiscarded();
+
+ MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(name);
+ // The effective_size is the amount of unused space as a result of being
+ // page-aligned.
+ dump->AddScalar(MemoryAllocatorDump::kNameSize,
+ MemoryAllocatorDump::kUnitsBytes,
+ is_discarded ? 0U : static_cast<uint64_t>(size_in_bytes_));
+
+ allocator_dump->AddScalar(
+ MemoryAllocatorDump::kNameSize, MemoryAllocatorDump::kUnitsBytes,
+ is_discarded
+ ? 0U
+ : static_cast<uint64_t>(allocated_pages_ * base::GetPageSize()));
+ allocator_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
+ MemoryAllocatorDump::kUnitsObjects, 1U);
+ allocator_dump->AddScalar(
+ "wasted_size", MemoryAllocatorDump::kUnitsBytes,
+ static_cast<uint64_t>(allocated_pages_ * base::GetPageSize() -
+ size_in_bytes_));
+ allocator_dump->AddScalar("locked_size", MemoryAllocatorDump::kUnitsBytes,
+ is_locked_ ? size_in_bytes_ : 0U);
+ allocator_dump->AddScalar("page_count", MemoryAllocatorDump::kUnitsObjects,
+ static_cast<uint64_t>(allocated_pages_));
+
+ // The amount of space that is discarded, but not unmapped (i.e. the memory
+ // was discarded while unlocked, but the pages are still mapped in memory
+ // since Deallocate() has not been called yet). This instance is discarded if
+ // it is unlocked and not all pages are resident in memory.
+ allocator_dump->AddScalar(
+ "discarded_size", MemoryAllocatorDump::kUnitsBytes,
+ is_discarded ? allocated_pages_ * base::GetPageSize() : 0U);
+
+ pmd->AddSuballocation(dump->guid(), allocator_dump_name);
+ return dump;
+#else // BUILDFLAG(ENABLE_BASE_TRACING)
+ NOTREACHED();
+ return nullptr;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+}
+
+bool MadvFreeDiscardableMemoryPosix::IsValid() const {
+ DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
+ return data_ != nullptr;
+}
+
+void MadvFreeDiscardableMemoryPosix::SetKeepMemoryForTesting(bool keep_memory) {
+ DFAKE_SCOPED_LOCK(thread_collision_warner_);
+ DCHECK(is_locked_);
+ keep_memory_for_testing_ = keep_memory;
+}
+
+bool MadvFreeDiscardableMemoryPosix::IsResident() const {
+ DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
+#if BUILDFLAG(IS_APPLE)
+ std::vector<char> vec(allocated_pages_);
+#else
+ std::vector<unsigned char> vec(allocated_pages_);
+#endif
+
+ int retval =
+ mincore(data_, allocated_pages_ * base::GetPageSize(), vec.data());
+ DPCHECK(retval == 0 || errno == EAGAIN);
+
+ for (size_t i = 0; i < allocated_pages_; ++i) {
+ if (!(vec[i] & 1))
+ return false;
+ }
+ return true;
+}
+
+bool MadvFreeDiscardableMemoryPosix::IsDiscarded() const {
+ return !is_locked_ && !IsResident();
+}
+
+bool MadvFreeDiscardableMemoryPosix::Deallocate() {
+ DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
+ if (data_) {
+#if defined(ADDRESS_SANITIZER)
+ ASAN_UNPOISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
+#endif // defined(ADDRESS_SANITIZER)
+
+ int retval = munmap(data_, allocated_pages_ * base::GetPageSize());
+ PCHECK(!retval);
+ data_ = nullptr;
+ (*allocator_byte_count_) -= size_in_bytes_;
+ return true;
+ }
+ return false;
+}
+
+MadvFreeSupport GetMadvFreeSupport() {
+ static MadvFreeSupport kMadvFreeSupport = ProbePlatformMadvFreeSupport();
+ return kMadvFreeSupport;
+}
+
+} // namespace base
diff --git a/base/memory/madv_free_discardable_memory_posix.h b/base/memory/madv_free_discardable_memory_posix.h
new file mode 100644
index 0000000..70ef71c
--- /dev/null
+++ b/base/memory/madv_free_discardable_memory_posix.h
@@ -0,0 +1,133 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_POSIX_H_
+#define BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_POSIX_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <atomic>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/functional/callback.h"
+#include "base/memory/discardable_memory.h"
+#include "base/memory/raw_ptr.h"
+#include "base/sequence_checker.h"
+#include "base/threading/thread_collision_warner.h"
+#include "build/build_config.h"
+
+namespace base {
+// Discardable memory backed by the MADV_FREE advice value, available since
+// Linux 4.5.
+//
+// When unlocked, this implementation of discardable memory will
+// apply the MADV_FREE advice value to all pages within the allocated range,
+// causing pages to be discarded instead of swapped upon memory pressure.
+// When pages are discarded, they become zero-fill-on-demand pages.
+// Attempting to unlock an already-unlocked instance is undefined behaviour.
+//
+// When locked, all pages will be checked for eviction. If any page has
+// been discarded, the entire allocated range is unmapped and the lock fails.
+// After a failed lock, the instance remains unlocked but any further attempts
+// to lock will fail. Additionally, the discardable memory instance is
+// invalidated and access to memory obtained via data() is undefined behaviour.
+// Attempting to lock an already-locked instance is undefined behaviour. If no
+// page in the allocated range has been discarded, then lock succeeds and the
+// allocated range of memory is available for use without any page fault,
+// additional allocations, or memory zeroing.
+//
+// If DCHECK_IS_ON(), additional checks are added to ensure that the discardable
+// memory instance is being used correctly. These checks are not present by
+// default, as some incur a significant performance penalty or do not warrant
+// crashing the process. These checks are:
+// - Do not allow lock while already locked or unlock while already unlocked
+// - Do not allow memory access via data() if instance is deallocated after
+// Lock() (although invalid memory can still be accessed through existing
+// pointers)
+// - After Unlock(), disallow read or write of memory pointed to by data()
+// with PROT_NONE until next Lock()
+//
+// Caveats:
+// [1]: The smallest allocation unit is the size of a page, so it is
+// unsuitable for small allocations.
+//
+// [2]: The size of a discardable memory instance must be greater than 0 bytes.
+//
+class BASE_EXPORT MadvFreeDiscardableMemoryPosix : public DiscardableMemory {
+ public:
+ MadvFreeDiscardableMemoryPosix(size_t size_in_pages,
+ std::atomic<size_t>* allocator_byte_count);
+
+ MadvFreeDiscardableMemoryPosix(const MadvFreeDiscardableMemoryPosix&) =
+ delete;
+ MadvFreeDiscardableMemoryPosix& operator=(
+ const MadvFreeDiscardableMemoryPosix&) = delete;
+
+ ~MadvFreeDiscardableMemoryPosix() override;
+
+ bool Lock() override;
+ void Unlock() override;
+ void* data() const override;
+
+ bool IsLockedForTesting() const;
+ void DiscardForTesting() override;
+
+ trace_event::MemoryAllocatorDump* CreateMemoryAllocatorDump(
+ const char* name,
+ trace_event::ProcessMemoryDump* pmd) const override;
+
+ protected:
+ size_t GetPageCount() const { return allocated_pages_; }
+
+ bool IsValid() const;
+
+ void SetKeepMemoryForTesting(bool keep_memory);
+
+ // Force page discard by applying MADV_DONTNEED hint on a page.
+ // Has the same effect as if the page was naturally discarded during
+ // memory pressure due to MADV_FREE (i.e. zero-fill-on-demand pages for
+ // anonymous private mappings).
+ // Note that MADV_DONTNEED takes effect immediately for non-shared mappings.
+ void DiscardPage(size_t page_index);
+
+ private:
+ bool LockPage(size_t page_index);
+ void UnlockPage(size_t page_index);
+
+ bool Deallocate();
+
+ // Gets whether this instance has been discarded (but not yet unmapped).
+ bool IsDiscarded() const;
+
+ // Get whether all pages in this discardable memory instance are resident.
+ bool IsResident() const;
+
+ const size_t size_in_bytes_;
+ const size_t allocated_pages_;
+
+ // Pointer to allocator memory usage metric for updating upon allocation and
+ // destruction.
+ raw_ptr<std::atomic<size_t>> allocator_byte_count_;
+
+ // Data comes from mmap() and we manage its poisioning.
+ RAW_PTR_EXCLUSION void* data_;
+ bool is_locked_ = true;
+
+ // If true, MADV_FREE will not be set on Unlock().
+ bool keep_memory_for_testing_ = false;
+
+ // Stores the first word of a page for use during locking.
+ std::vector<std::atomic<intptr_t>> page_first_word_;
+
+ DFAKE_MUTEX(thread_collision_warner_);
+};
+
+enum class MadvFreeSupport { kUnsupported, kSupported };
+BASE_EXPORT MadvFreeSupport GetMadvFreeSupport();
+
+} // namespace base
+
+#endif // BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_POSIX_H_
diff --git a/base/memory/madv_free_discardable_memory_posix_unittest.cc b/base/memory/madv_free_discardable_memory_posix_unittest.cc
new file mode 100644
index 0000000..11d5404
--- /dev/null
+++ b/base/memory/madv_free_discardable_memory_posix_unittest.cc
@@ -0,0 +1,141 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <fcntl.h>
+#include <stdint.h>
+
+#include <sys/mman.h>
+#include <memory>
+
+#include "base/files/scoped_file.h"
+#include "base/functional/bind.h"
+#include "base/functional/callback.h"
+#include "base/functional/callback_helpers.h"
+#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
+#include "base/memory/madv_free_discardable_memory_posix.h"
+#include "base/memory/page_size.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#define SUCCEED_IF_MADV_FREE_UNSUPPORTED() \
+ do { \
+ if (GetMadvFreeSupport() != base::MadvFreeSupport::kSupported) { \
+ SUCCEED() \
+ << "MADV_FREE is not supported (Linux 4.5+ required), vacuously " \
+ "passing test"; \
+ return; \
+ } \
+ } while (0)
+
+namespace base {
+std::atomic<size_t> allocator_byte_count;
+class MadvFreeDiscardableMemoryPosixTester
+ : public MadvFreeDiscardableMemoryPosix {
+ public:
+ MadvFreeDiscardableMemoryPosixTester(size_t size_in_bytes)
+ : MadvFreeDiscardableMemoryPosix(size_in_bytes, &allocator_byte_count) {}
+
+ using MadvFreeDiscardableMemoryPosix::DiscardPage;
+ using MadvFreeDiscardableMemoryPosix::GetPageCount;
+ using MadvFreeDiscardableMemoryPosix::IsLockedForTesting;
+ using MadvFreeDiscardableMemoryPosix::IsValid;
+ using MadvFreeDiscardableMemoryPosix::SetKeepMemoryForTesting;
+};
+
+class MadvFreeDiscardableMemoryTest : public ::testing::Test {
+ protected:
+ MadvFreeDiscardableMemoryTest() {}
+ ~MadvFreeDiscardableMemoryTest() override {}
+
+ const size_t kPageSize = base::GetPageSize();
+
+ std::unique_ptr<MadvFreeDiscardableMemoryPosixTester>
+ AllocateLockedDiscardableMemoryPagesForTest(size_t size_in_pages) {
+ return std::make_unique<MadvFreeDiscardableMemoryPosixTester>(
+ size_in_pages * kPageSize);
+ }
+};
+
+using MadvFreeDiscardableMemoryDeathTest = MadvFreeDiscardableMemoryTest;
+
+constexpr char kTestPattern[] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789";
+
+TEST_F(MadvFreeDiscardableMemoryTest, AllocateAndUse) {
+ SUCCEED_IF_MADV_FREE_UNSUPPORTED();
+
+ std::unique_ptr<MadvFreeDiscardableMemoryPosixTester> mem =
+ AllocateLockedDiscardableMemoryPagesForTest(1);
+
+ mem->SetKeepMemoryForTesting(true);
+
+ ASSERT_TRUE(mem->IsValid());
+ ASSERT_TRUE(mem->IsLockedForTesting());
+
+ char buffer[sizeof(kTestPattern)];
+
+ // Write test pattern to block
+ uint8_t* data = mem->data_as<uint8_t>();
+ memcpy(data, kTestPattern, sizeof(kTestPattern));
+
+ // Read test pattern from block
+ data = mem->data_as<uint8_t>();
+ memcpy(buffer, data, sizeof(kTestPattern));
+
+ EXPECT_EQ(memcmp(kTestPattern, buffer, sizeof(kTestPattern)), 0);
+
+ // Memory contents should not change after successful unlock and lock.
+ mem->Unlock();
+ ASSERT_TRUE(mem->Lock());
+
+ EXPECT_EQ(memcmp(kTestPattern, buffer, sizeof(kTestPattern)), 0);
+}
+
+TEST_F(MadvFreeDiscardableMemoryTest, LockAndUnlock) {
+ SUCCEED_IF_MADV_FREE_UNSUPPORTED();
+
+ const size_t kPageCount = 10;
+ std::unique_ptr<MadvFreeDiscardableMemoryPosixTester> mem =
+ AllocateLockedDiscardableMemoryPagesForTest(kPageCount);
+
+ ASSERT_TRUE(mem->IsValid());
+ ASSERT_TRUE(mem->IsLockedForTesting());
+ memset(mem->data(), 0xE7, kPageSize * kPageCount);
+ mem->Unlock();
+ ASSERT_FALSE(mem->IsLockedForTesting());
+ bool result = mem->Lock();
+ // If Lock() succeeded, the memory region should be valid. If Lock() failed,
+ // the memory region should be invalid.
+ ASSERT_EQ(result, mem->IsValid());
+}
+
+TEST_F(MadvFreeDiscardableMemoryTest, LockShouldFailAfterDiscard) {
+ SUCCEED_IF_MADV_FREE_UNSUPPORTED();
+
+ constexpr size_t kPageCount = 10;
+
+ std::unique_ptr<MadvFreeDiscardableMemoryPosixTester> mem =
+ AllocateLockedDiscardableMemoryPagesForTest(kPageCount);
+ uint8_t* data = mem->data_as<uint8_t>();
+
+ ASSERT_TRUE(mem->IsValid());
+ ASSERT_TRUE(mem->IsLockedForTesting());
+ // Modify block data such that at least one page is non-zero.
+ memset(data, 0xff, kPageSize * kPageCount);
+
+ mem->Unlock();
+ ASSERT_FALSE(mem->IsLockedForTesting());
+ // Forcefully discard at least one non-zero page.
+ mem->DiscardPage(5);
+
+ // Locking when a page has been discarded should fail.
+ ASSERT_FALSE(mem->Lock());
+ // Locking after memory is deallocated should fail.
+ ASSERT_FALSE(mem->Lock());
+
+ // Check that memory has been deallocated.
+ ASSERT_FALSE(mem->IsValid());
+}
+
+} // namespace base
diff --git a/base/memory/memory_coordinator_client.cc b/base/memory/memory_coordinator_client.cc
deleted file mode 100644
index 7fa6232..0000000
--- a/base/memory/memory_coordinator_client.cc
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_coordinator_client.h"
-
-#include "base/logging.h"
-
-namespace base {
-
-const char* MemoryStateToString(MemoryState state) {
- switch (state) {
- case MemoryState::UNKNOWN:
- return "unknown";
- case MemoryState::NORMAL:
- return "normal";
- case MemoryState::THROTTLED:
- return "throttled";
- case MemoryState::SUSPENDED:
- return "suspended";
- default:
- NOTREACHED();
- }
- return "";
-}
-
-} // namespace base
diff --git a/base/memory/memory_coordinator_client.h b/base/memory/memory_coordinator_client.h
deleted file mode 100644
index 804f0a6..0000000
--- a/base/memory/memory_coordinator_client.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_MEMORY_COORDINATOR_CLIENT_H_
-#define BASE_MEMORY_MEMORY_COORDINATOR_CLIENT_H_
-
-#include "base/base_export.h"
-
-namespace base {
-
-// OVERVIEW:
-//
-// MemoryCoordinatorClient is an interface which a component can implement to
-// adjust "future allocation" and "existing allocation". For "future allocation"
-// it provides a callback to observe memory state changes, and for "existing
-// allocation" it provides a callback to purge memory.
-//
-// Unlike MemoryPressureListener, memory state changes are stateful. State
-// transitions are throttled to avoid thrashing; the exact throttling period is
-// platform dependent, but will be at least 5-10 seconds. When a state change
-// notification is dispatched, clients are expected to update their allocation
-// policies (e.g. setting cache limit) that persist for the duration of the
-// memory state. Note that clients aren't expected to free up memory on memory
-// state changes. Clients should wait for a separate purge request to free up
-// memory. Purging requests will be throttled as well.
-
-// MemoryState is an indicator that processes can use to guide their memory
-// allocation policies. For example, a process that receives the throttled
-// state can use that as as signal to decrease memory cache limits.
-// NOTE: This enum is used to back an UMA histogram, and therefore should be
-// treated as append-only.
-enum class MemoryState : int {
- // The state is unknown.
- UNKNOWN = -1,
- // No memory constraints.
- NORMAL = 0,
- // Running and interactive but memory allocation should be throttled.
- // Clients should set lower budget for any memory that is used as an
- // optimization but that is not necessary for the process to run.
- // (e.g. caches)
- THROTTLED = 1,
- // Still resident in memory but core processing logic has been suspended.
- // In most cases, OnPurgeMemory() will be called before entering this state.
- SUSPENDED = 2,
-};
-
-const int kMemoryStateMax = static_cast<int>(MemoryState::SUSPENDED) + 1;
-
-// Returns a string representation of MemoryState.
-BASE_EXPORT const char* MemoryStateToString(MemoryState state);
-
-// This is an interface for components which can respond to memory status
-// changes. An initial state is NORMAL. See MemoryCoordinatorClientRegistry for
-// threading guarantees and ownership management.
-class BASE_EXPORT MemoryCoordinatorClient {
- public:
- // Called when memory state has changed. Any transition can occur except for
- // UNKNOWN. General guidelines are:
- // * NORMAL: Restore the default settings for memory allocation/usage if
- // it has changed.
- // * THROTTLED: Use smaller limits for future memory allocations. You don't
- // need to take any action on existing allocations.
- // * SUSPENDED: Use much smaller limits for future memory allocations. You
- // don't need to take any action on existing allocations.
- virtual void OnMemoryStateChange(MemoryState state) {}
-
- // Called to purge memory.
- // This callback should free up any memory that is used as an optimization, or
- // any memory whose contents can be reproduced.
- virtual void OnPurgeMemory() {}
-
- protected:
- virtual ~MemoryCoordinatorClient() = default;
-};
-
-} // namespace base
-
-#endif // BASE_MEMORY_MEMORY_COORDINATOR_CLIENT_H_
diff --git a/base/memory/memory_coordinator_client_registry.cc b/base/memory/memory_coordinator_client_registry.cc
deleted file mode 100644
index 45b4a7f..0000000
--- a/base/memory/memory_coordinator_client_registry.cc
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_coordinator_client_registry.h"
-
-namespace base {
-
-// static
-MemoryCoordinatorClientRegistry*
-MemoryCoordinatorClientRegistry::GetInstance() {
- return Singleton<
- MemoryCoordinatorClientRegistry,
- LeakySingletonTraits<MemoryCoordinatorClientRegistry>>::get();
-}
-
-MemoryCoordinatorClientRegistry::MemoryCoordinatorClientRegistry()
- : clients_(new ClientList) {}
-
-MemoryCoordinatorClientRegistry::~MemoryCoordinatorClientRegistry() = default;
-
-void MemoryCoordinatorClientRegistry::Register(
- MemoryCoordinatorClient* client) {
- clients_->AddObserver(client);
-}
-
-void MemoryCoordinatorClientRegistry::Unregister(
- MemoryCoordinatorClient* client) {
- clients_->RemoveObserver(client);
-}
-
-void MemoryCoordinatorClientRegistry::Notify(MemoryState state) {
- clients_->Notify(FROM_HERE,
- &base::MemoryCoordinatorClient::OnMemoryStateChange, state);
-}
-
-void MemoryCoordinatorClientRegistry::PurgeMemory() {
- clients_->Notify(FROM_HERE, &base::MemoryCoordinatorClient::OnPurgeMemory);
-}
-
-} // namespace base
diff --git a/base/memory/memory_coordinator_client_registry.h b/base/memory/memory_coordinator_client_registry.h
deleted file mode 100644
index e2c81b7..0000000
--- a/base/memory/memory_coordinator_client_registry.h
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_MEMORY_CLIENT_REGISTRY_H_
-#define BASE_MEMORY_MEMORY_CLIENT_REGISTRY_H_
-
-#include "base/base_export.h"
-#include "base/memory/memory_coordinator_client.h"
-#include "base/memory/singleton.h"
-#include "base/observer_list_threadsafe.h"
-
-namespace base {
-
-// MemoryCoordinatorClientRegistry is the registry of MemoryCoordinatorClients.
-// This class manages clients and provides a way to notify memory state changes
-// to clients, but this isn't responsible to determine how/when to change
-// memory states.
-//
-// Threading guarantees:
-// This class uses ObserverListThreadsafe internally, which means that
-// * Registering/unregistering callbacks are thread-safe.
-// * Callbacks are invoked on the same thread on which they are registered.
-// See base/observer_list_threadsafe.h for reference.
-//
-// Ownership management:
-// This class doesn't take the ownership of clients. Clients must be
-// unregistered before they are destroyed.
-class BASE_EXPORT MemoryCoordinatorClientRegistry {
- public:
- static MemoryCoordinatorClientRegistry* GetInstance();
-
- ~MemoryCoordinatorClientRegistry();
-
- // Registers/unregisters a client. Does not take ownership of client.
- void Register(MemoryCoordinatorClient* client);
- void Unregister(MemoryCoordinatorClient* client);
-
- // Notify clients of a memory state change.
- void Notify(MemoryState state);
-
- // Requests purging memory.
- void PurgeMemory();
-
- private:
- friend struct DefaultSingletonTraits<MemoryCoordinatorClientRegistry>;
-
- MemoryCoordinatorClientRegistry();
-
- using ClientList = ObserverListThreadSafe<MemoryCoordinatorClient>;
- scoped_refptr<ClientList> clients_;
-};
-
-} // namespace base
-
-#endif // BASE_MEMORY_MEMORY_CLIENT_REGISTRY_H_
diff --git a/base/memory/memory_coordinator_client_registry_unittest.cc b/base/memory/memory_coordinator_client_registry_unittest.cc
deleted file mode 100644
index 37ed767..0000000
--- a/base/memory/memory_coordinator_client_registry_unittest.cc
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_coordinator_client_registry.h"
-
-#include "base/message_loop/message_loop.h"
-#include "base/run_loop.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-namespace {
-
-class TestMemoryCoordinatorClient : public MemoryCoordinatorClient {
- public:
- void OnMemoryStateChange(MemoryState state) override { state_ = state; }
-
- void OnPurgeMemory() override { ++purge_count_; }
-
- MemoryState state() const { return state_; }
- size_t purge_count() const { return purge_count_; }
-
- private:
- MemoryState state_ = MemoryState::UNKNOWN;
- size_t purge_count_ = 0;
-};
-
-void RunUntilIdle() {
- base::RunLoop loop;
- loop.RunUntilIdle();
-}
-
-TEST(MemoryCoordinatorClientRegistryTest, NotifyStateChange) {
- MessageLoop loop;
- auto* registry = MemoryCoordinatorClientRegistry::GetInstance();
- TestMemoryCoordinatorClient client;
- registry->Register(&client);
- registry->Notify(MemoryState::THROTTLED);
- RunUntilIdle();
- ASSERT_EQ(MemoryState::THROTTLED, client.state());
- registry->Unregister(&client);
-}
-
-TEST(MemoryCoordinatorClientRegistryTest, PurgeMemory) {
- MessageLoop loop;
- auto* registry = MemoryCoordinatorClientRegistry::GetInstance();
- TestMemoryCoordinatorClient client;
- registry->Register(&client);
- registry->PurgeMemory();
- RunUntilIdle();
- ASSERT_EQ(1u, client.purge_count());
- registry->Unregister(&client);
-}
-
-} // namespace
-
-} // namespace base
diff --git a/base/memory/memory_coordinator_proxy.cc b/base/memory/memory_coordinator_proxy.cc
deleted file mode 100644
index 4e22fe0..0000000
--- a/base/memory/memory_coordinator_proxy.cc
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_coordinator_proxy.h"
-
-namespace base {
-
-namespace {
-
-MemoryCoordinator* g_memory_coordinator = nullptr;
-
-} // namespace
-
-MemoryCoordinatorProxy::MemoryCoordinatorProxy() = default;
-
-MemoryCoordinatorProxy::~MemoryCoordinatorProxy() = default;
-
-// static
-MemoryCoordinatorProxy* MemoryCoordinatorProxy::GetInstance() {
- return Singleton<base::MemoryCoordinatorProxy>::get();
-}
-
-// static
-void MemoryCoordinatorProxy::SetMemoryCoordinator(
- MemoryCoordinator* coordinator) {
- DCHECK(!g_memory_coordinator || !coordinator);
- g_memory_coordinator = coordinator;
-}
-
-MemoryState MemoryCoordinatorProxy::GetCurrentMemoryState() const {
- if (!g_memory_coordinator)
- return MemoryState::NORMAL;
- return g_memory_coordinator->GetCurrentMemoryState();
-}
-
-} // namespace base
diff --git a/base/memory/memory_coordinator_proxy.h b/base/memory/memory_coordinator_proxy.h
deleted file mode 100644
index b6e7b3f..0000000
--- a/base/memory/memory_coordinator_proxy.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
-#define BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
-
-#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/memory/memory_coordinator_client.h"
-#include "base/memory/singleton.h"
-
-namespace base {
-
-// The MemoryCoordinator interface. See comments in MemoryCoordinatorProxy for
-// method descriptions.
-class BASE_EXPORT MemoryCoordinator {
- public:
- virtual ~MemoryCoordinator() = default;
-
- virtual MemoryState GetCurrentMemoryState() const = 0;
-};
-
-// The proxy of MemoryCoordinator to be accessed from components that are not
-// in content/browser e.g. net.
-class BASE_EXPORT MemoryCoordinatorProxy {
- public:
- static MemoryCoordinatorProxy* GetInstance();
-
- // Sets an implementation of MemoryCoordinator. MemoryCoordinatorProxy doesn't
- // take the ownership of |coordinator|. It must outlive this proxy.
- // This should be called before any components starts using this proxy.
- static void SetMemoryCoordinator(MemoryCoordinator* coordinator);
-
- // Returns the current memory state.
- MemoryState GetCurrentMemoryState() const;
-
- private:
- friend struct base::DefaultSingletonTraits<MemoryCoordinatorProxy>;
-
- MemoryCoordinatorProxy();
- virtual ~MemoryCoordinatorProxy();
-
- DISALLOW_COPY_AND_ASSIGN(MemoryCoordinatorProxy);
-};
-
-} // namespace base
-
-#endif // BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
diff --git a/base/memory/memory_pressure_listener.cc b/base/memory/memory_pressure_listener.cc
index 900a7d4..0df91b0 100644
--- a/base/memory/memory_pressure_listener.cc
+++ b/base/memory/memory_pressure_listener.cc
@@ -1,11 +1,20 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/memory_pressure_listener.h"
+#include <atomic>
+
+#include "base/observer_list.h"
#include "base/observer_list_threadsafe.h"
-#include "base/trace_event/trace_event.h"
+#include "base/task/sequenced_task_runner.h"
+#include "base/trace_event/base_tracing.h"
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_pressure_level_proto.h" // no-presubmit-check
+#endif
namespace base {
@@ -18,7 +27,13 @@
~MemoryPressureObserver() = delete;
void AddObserver(MemoryPressureListener* listener, bool sync) {
- async_observers_->AddObserver(listener);
+ // TODO(crbug.com/1063868): DCHECK instead of silently failing when a
+ // MemoryPressureListener is created in a non-sequenced context. Tests will
+ // need to be adjusted for that to work.
+ if (SequencedTaskRunner::HasCurrentDefault()) {
+ async_observers_->AddObserver(listener);
+ }
+
if (sync) {
AutoLock lock(sync_observers_lock_);
sync_observers_.AddObserver(listener);
@@ -42,8 +57,9 @@
private:
const scoped_refptr<ObserverListThreadSafe<MemoryPressureListener>>
- async_observers_ = base::MakeRefCounted<
- ObserverListThreadSafe<MemoryPressureListener>>();
+ async_observers_ =
+ base::MakeRefCounted<ObserverListThreadSafe<MemoryPressureListener>>(
+ ObserverListPolicy::EXISTING_ONLY);
ObserverList<MemoryPressureListener>::Unchecked sync_observers_;
Lock sync_observers_lock_;
};
@@ -54,22 +70,25 @@
return observer;
}
-subtle::Atomic32 g_notifications_suppressed = 0;
+std::atomic<bool> g_notifications_suppressed;
} // namespace
MemoryPressureListener::MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureListener::MemoryPressureCallback& callback)
- : callback_(callback) {
+ : callback_(callback), creation_location_(creation_location) {
GetMemoryPressureObserver()->AddObserver(this, false);
}
MemoryPressureListener::MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureListener::MemoryPressureCallback& callback,
const MemoryPressureListener::SyncMemoryPressureCallback&
sync_memory_pressure_callback)
: callback_(callback),
- sync_memory_pressure_callback_(sync_memory_pressure_callback) {
+ sync_memory_pressure_callback_(sync_memory_pressure_callback),
+ creation_location_(creation_location) {
GetMemoryPressureObserver()->AddObserver(this, true);
}
@@ -78,6 +97,17 @@
}
void MemoryPressureListener::Notify(MemoryPressureLevel memory_pressure_level) {
+ TRACE_EVENT(
+ "base", "MemoryPressureListener::Notify",
+ [&](perfetto::EventContext ctx) {
+ auto* event = ctx.event<perfetto::protos::pbzero::ChromeTrackEvent>();
+ auto* data = event->set_chrome_memory_pressure_notification();
+ data->set_level(
+ trace_event::MemoryPressureLevelToTraceEnum(memory_pressure_level));
+ data->set_creation_location_iid(
+ base::trace_event::InternedSourceLocation::Get(&ctx,
+ creation_location_));
+ });
callback_.Run(memory_pressure_level);
}
@@ -92,10 +122,15 @@
void MemoryPressureListener::NotifyMemoryPressure(
MemoryPressureLevel memory_pressure_level) {
DCHECK_NE(memory_pressure_level, MEMORY_PRESSURE_LEVEL_NONE);
- TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("memory-infra"),
- "MemoryPressureListener::NotifyMemoryPressure",
- TRACE_EVENT_SCOPE_THREAD, "level",
- memory_pressure_level);
+ TRACE_EVENT_INSTANT(
+ trace_event::MemoryDumpManager::kTraceCategory,
+ "MemoryPressureListener::NotifyMemoryPressure",
+ [&](perfetto::EventContext ctx) {
+ auto* event = ctx.event<perfetto::protos::pbzero::ChromeTrackEvent>();
+ auto* data = event->set_chrome_memory_pressure_notification();
+ data->set_level(
+ trace_event::MemoryPressureLevelToTraceEnum(memory_pressure_level));
+ });
if (AreNotificationsSuppressed())
return;
DoNotifyMemoryPressure(memory_pressure_level);
@@ -103,12 +138,12 @@
// static
bool MemoryPressureListener::AreNotificationsSuppressed() {
- return subtle::Acquire_Load(&g_notifications_suppressed) == 1;
+ return g_notifications_suppressed.load(std::memory_order_acquire);
}
// static
void MemoryPressureListener::SetNotificationsSuppressed(bool suppress) {
- subtle::Release_Store(&g_notifications_suppressed, suppress ? 1 : 0);
+ g_notifications_suppressed.store(suppress, std::memory_order_release);
}
// static
diff --git a/base/memory/memory_pressure_listener.h b/base/memory/memory_pressure_listener.h
index 7e97010..29e9364 100644
--- a/base/memory/memory_pressure_listener.h
+++ b/base/memory/memory_pressure_listener.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -11,8 +11,9 @@
#define BASE_MEMORY_MEMORY_PRESSURE_LISTENER_H_
#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/macros.h"
+#include "base/functional/callback.h"
+#include "base/location.h"
+#include "base/tracing_buildflags.h"
namespace base {
@@ -35,43 +36,57 @@
// }
//
// // Start listening.
-// MemoryPressureListener* my_listener =
-// new MemoryPressureListener(base::Bind(&OnMemoryPressure));
+// auto listener = std::make_unique<MemoryPressureListener>(
+// base::BindRepeating(&OnMemoryPressure));
//
// ...
//
// // Stop listening.
-// delete my_listener;
+// listener.reset();
//
class BASE_EXPORT MemoryPressureListener {
public:
// A Java counterpart will be generated for this enum.
+ // The values needs to be kept in sync with the MemoryPressureLevel entry in
+ // enums.xml.
// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base
+ // GENERATED_JAVA_PREFIX_TO_STRIP: MEMORY_PRESSURE_LEVEL_
enum MemoryPressureLevel {
// No problems, there is enough memory to use. This event is not sent via
// callback, but the enum is used in other places to find out the current
// state of the system.
- MEMORY_PRESSURE_LEVEL_NONE,
+ MEMORY_PRESSURE_LEVEL_NONE = 0,
// Modules are advised to free buffers that are cheap to re-allocate and not
// immediately needed.
- MEMORY_PRESSURE_LEVEL_MODERATE,
+ MEMORY_PRESSURE_LEVEL_MODERATE = 1,
// At this level, modules are advised to free all possible memory. The
// alternative is to be killed by the system, which means all memory will
// have to be re-created, plus the cost of a cold start.
- MEMORY_PRESSURE_LEVEL_CRITICAL,
+ MEMORY_PRESSURE_LEVEL_CRITICAL = 2,
+
+ // This must be the last value in the enum. The casing is different from the
+ // other values to make this enum work well with the
+ // UMA_HISTOGRAM_ENUMERATION macro.
+ kMaxValue = MEMORY_PRESSURE_LEVEL_CRITICAL,
};
- typedef Callback<void(MemoryPressureLevel)> MemoryPressureCallback;
- typedef Callback<void(MemoryPressureLevel)> SyncMemoryPressureCallback;
+ using MemoryPressureCallback = RepeatingCallback<void(MemoryPressureLevel)>;
+ using SyncMemoryPressureCallback =
+ RepeatingCallback<void(MemoryPressureLevel)>;
- explicit MemoryPressureListener(
+ MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureCallback& memory_pressure_callback);
MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureCallback& memory_pressure_callback,
const SyncMemoryPressureCallback& sync_memory_pressure_callback);
+ MemoryPressureListener(const MemoryPressureListener&) = delete;
+ MemoryPressureListener& operator=(const MemoryPressureListener&) = delete;
+
~MemoryPressureListener();
// Intended for use by the platform specific implementation.
@@ -94,7 +109,7 @@
MemoryPressureCallback callback_;
SyncMemoryPressureCallback sync_memory_pressure_callback_;
- DISALLOW_COPY_AND_ASSIGN(MemoryPressureListener);
+ const base::Location creation_location_;
};
} // namespace base
diff --git a/base/memory/memory_pressure_listener_unittest.cc b/base/memory/memory_pressure_listener_unittest.cc
index 87d5f4c..13232c9 100644
--- a/base/memory/memory_pressure_listener_unittest.cc
+++ b/base/memory/memory_pressure_listener_unittest.cc
@@ -1,12 +1,12 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/memory_pressure_listener.h"
-#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
+#include "base/functional/bind.h"
#include "base/run_loop.h"
+#include "base/test/task_environment.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace base {
@@ -15,15 +15,17 @@
class MemoryPressureListenerTest : public testing::Test {
public:
+ MemoryPressureListenerTest()
+ : task_environment_(test::TaskEnvironment::MainThreadType::UI) {}
+
void SetUp() override {
- message_loop_.reset(new MessageLoopForUI());
- listener_.reset(new MemoryPressureListener(
- Bind(&MemoryPressureListenerTest::OnMemoryPressure, Unretained(this))));
+ listener_ = std::make_unique<MemoryPressureListener>(
+ FROM_HERE, BindRepeating(&MemoryPressureListenerTest::OnMemoryPressure,
+ Unretained(this)));
}
void TearDown() override {
listener_.reset();
- message_loop_.reset();
}
protected:
@@ -47,7 +49,7 @@
MOCK_METHOD1(OnMemoryPressure,
void(MemoryPressureListener::MemoryPressureLevel));
- std::unique_ptr<MessageLoopForUI> message_loop_;
+ test::TaskEnvironment task_environment_;
std::unique_ptr<MemoryPressureListener> listener_;
};
diff --git a/base/memory/memory_pressure_monitor.cc b/base/memory/memory_pressure_monitor.cc
index ed350b8..9bbd0f0 100644
--- a/base/memory/memory_pressure_monitor.cc
+++ b/base/memory/memory_pressure_monitor.cc
@@ -1,43 +1,18 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/memory_pressure_monitor.h"
-#include "base/logging.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/check.h"
+#include "base/metrics/histogram.h"
+#include "base/notreached.h"
namespace base {
namespace {
MemoryPressureMonitor* g_monitor = nullptr;
-// Enumeration of UMA memory pressure levels. This needs to be kept in sync with
-// histograms.xml and the memory pressure levels defined in
-// MemoryPressureListener.
-enum MemoryPressureLevelUMA {
- UMA_MEMORY_PRESSURE_LEVEL_NONE = 0,
- UMA_MEMORY_PRESSURE_LEVEL_MODERATE = 1,
- UMA_MEMORY_PRESSURE_LEVEL_CRITICAL = 2,
- // This must be the last value in the enum.
- UMA_MEMORY_PRESSURE_LEVEL_COUNT,
-};
-
-// Converts a memory pressure level to an UMA enumeration value.
-MemoryPressureLevelUMA MemoryPressureLevelToUmaEnumValue(
- base::MemoryPressureListener::MemoryPressureLevel level) {
- switch (level) {
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- return UMA_MEMORY_PRESSURE_LEVEL_NONE;
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
- return UMA_MEMORY_PRESSURE_LEVEL_MODERATE;
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
- return UMA_MEMORY_PRESSURE_LEVEL_CRITICAL;
- }
- NOTREACHED();
- return UMA_MEMORY_PRESSURE_LEVEL_NONE;
-}
-
} // namespace
MemoryPressureMonitor::MemoryPressureMonitor() {
@@ -54,18 +29,5 @@
MemoryPressureMonitor* MemoryPressureMonitor::Get() {
return g_monitor;
}
-void MemoryPressureMonitor::RecordMemoryPressure(
- base::MemoryPressureListener::MemoryPressureLevel level,
- int ticks) {
- // Use the more primitive STATIC_HISTOGRAM_POINTER_BLOCK macro because the
- // simple UMA_HISTOGRAM macros don't expose 'AddCount' functionality.
- STATIC_HISTOGRAM_POINTER_BLOCK(
- "Memory.PressureLevel",
- AddCount(MemoryPressureLevelToUmaEnumValue(level), ticks),
- base::LinearHistogram::FactoryGet(
- "Memory.PressureLevel", 1, UMA_MEMORY_PRESSURE_LEVEL_COUNT,
- UMA_MEMORY_PRESSURE_LEVEL_COUNT + 1,
- base::HistogramBase::kUmaTargetedHistogramFlag));
-}
} // namespace base
diff --git a/base/memory/memory_pressure_monitor.h b/base/memory/memory_pressure_monitor.h
index e48244b..d91a8f8 100644
--- a/base/memory/memory_pressure_monitor.h
+++ b/base/memory/memory_pressure_monitor.h
@@ -1,4 +1,4 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,9 +6,9 @@
#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_H_
#include "base/base_export.h"
-#include "base/callback.h"
-#include "base/macros.h"
+#include "base/functional/callback.h"
#include "base/memory/memory_pressure_listener.h"
+#include "base/time/time.h"
namespace base {
@@ -24,28 +24,22 @@
class BASE_EXPORT MemoryPressureMonitor {
public:
using MemoryPressureLevel = base::MemoryPressureListener::MemoryPressureLevel;
- using DispatchCallback = base::Callback<void(MemoryPressureLevel level)>;
+ using DispatchCallback =
+ base::RepeatingCallback<void(MemoryPressureLevel level)>;
+
+ MemoryPressureMonitor(const MemoryPressureMonitor&) = delete;
+ MemoryPressureMonitor& operator=(const MemoryPressureMonitor&) = delete;
virtual ~MemoryPressureMonitor();
// Return the singleton MemoryPressureMonitor.
static MemoryPressureMonitor* Get();
- // Record memory pressure UMA statistic. A tick is 5 seconds.
- static void RecordMemoryPressure(MemoryPressureLevel level, int ticks);
-
// Returns the currently observed memory pressure.
- virtual MemoryPressureLevel GetCurrentPressureLevel() = 0;
-
- // Sets a notification callback. The default callback invokes
- // base::MemoryPressureListener::NotifyMemoryPressure.
- virtual void SetDispatchCallback(const DispatchCallback& callback) = 0;
+ virtual MemoryPressureLevel GetCurrentPressureLevel() const = 0;
protected:
MemoryPressureMonitor();
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
};
} // namespace base
diff --git a/base/memory/memory_pressure_monitor_chromeos.cc b/base/memory/memory_pressure_monitor_chromeos.cc
deleted file mode 100644
index 4b44cab..0000000
--- a/base/memory/memory_pressure_monitor_chromeos.cc
+++ /dev/null
@@ -1,297 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_pressure_monitor_chromeos.h"
-
-#include <fcntl.h>
-#include <sys/select.h>
-
-#include "base/metrics/histogram_macros.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/process/process_metrics.h"
-#include "base/single_thread_task_runner.h"
-#include "base/sys_info.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/time/time.h"
-#include "starboard/types.h"
-
-namespace base {
-namespace chromeos {
-
-namespace {
-
-// Type-safe version of |g_monitor| from base/memory/memory_pressure_monitor.cc.
-MemoryPressureMonitor* g_monitor = nullptr;
-
-// The time between memory pressure checks. While under critical pressure, this
-// is also the timer to repeat cleanup attempts.
-const int kMemoryPressureIntervalMs = 1000;
-
-// The time which should pass between two moderate memory pressure calls.
-const int kModerateMemoryPressureCooldownMs = 10000;
-
-// Number of event polls before the next moderate pressure event can be sent.
-const int kModerateMemoryPressureCooldown =
- kModerateMemoryPressureCooldownMs / kMemoryPressureIntervalMs;
-
-// Threshold constants to emit pressure events.
-const int kNormalMemoryPressureModerateThresholdPercent = 60;
-const int kNormalMemoryPressureCriticalThresholdPercent = 95;
-const int kAggressiveMemoryPressureModerateThresholdPercent = 35;
-const int kAggressiveMemoryPressureCriticalThresholdPercent = 70;
-
-// The possible state for memory pressure level. The values should be in line
-// with values in MemoryPressureListener::MemoryPressureLevel and should be
-// updated if more memory pressure levels are introduced.
-enum MemoryPressureLevelUMA {
- MEMORY_PRESSURE_LEVEL_NONE = 0,
- MEMORY_PRESSURE_LEVEL_MODERATE,
- MEMORY_PRESSURE_LEVEL_CRITICAL,
- NUM_MEMORY_PRESSURE_LEVELS
-};
-
-// This is the file that will exist if low memory notification is available
-// on the device. Whenever it becomes readable, it signals a low memory
-// condition.
-const char kLowMemFile[] = "/dev/chromeos-low-mem";
-
-// Converts a |MemoryPressureThreshold| value into a used memory percentage for
-// the moderate pressure event.
-int GetModerateMemoryThresholdInPercent(
- MemoryPressureMonitor::MemoryPressureThresholds thresholds) {
- return thresholds == MemoryPressureMonitor::
- THRESHOLD_AGGRESSIVE_CACHE_DISCARD ||
- thresholds == MemoryPressureMonitor::THRESHOLD_AGGRESSIVE
- ? kAggressiveMemoryPressureModerateThresholdPercent
- : kNormalMemoryPressureModerateThresholdPercent;
-}
-
-// Converts a |MemoryPressureThreshold| value into a used memory percentage for
-// the critical pressure event.
-int GetCriticalMemoryThresholdInPercent(
- MemoryPressureMonitor::MemoryPressureThresholds thresholds) {
- return thresholds == MemoryPressureMonitor::
- THRESHOLD_AGGRESSIVE_TAB_DISCARD ||
- thresholds == MemoryPressureMonitor::THRESHOLD_AGGRESSIVE
- ? kAggressiveMemoryPressureCriticalThresholdPercent
- : kNormalMemoryPressureCriticalThresholdPercent;
-}
-
-// Converts free percent of memory into a memory pressure value.
-MemoryPressureListener::MemoryPressureLevel GetMemoryPressureLevelFromFillLevel(
- int actual_fill_level,
- int moderate_threshold,
- int critical_threshold) {
- if (actual_fill_level < moderate_threshold)
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
- return actual_fill_level < critical_threshold
- ? MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE
- : MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
-}
-
-// This function will be called less than once a second. It will check if
-// the kernel has detected a low memory situation.
-bool IsLowMemoryCondition(int file_descriptor) {
- fd_set fds;
- struct timeval tv;
-
- FD_ZERO(&fds);
- FD_SET(file_descriptor, &fds);
-
- tv.tv_sec = 0;
- tv.tv_usec = 0;
-
- return HANDLE_EINTR(select(file_descriptor + 1, &fds, NULL, NULL, &tv)) > 0;
-}
-
-} // namespace
-
-MemoryPressureMonitor::MemoryPressureMonitor(
- MemoryPressureThresholds thresholds)
- : current_memory_pressure_level_(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
- moderate_pressure_repeat_count_(0),
- seconds_since_reporting_(0),
- moderate_pressure_threshold_percent_(
- GetModerateMemoryThresholdInPercent(thresholds)),
- critical_pressure_threshold_percent_(
- GetCriticalMemoryThresholdInPercent(thresholds)),
- low_mem_file_(HANDLE_EINTR(::open(kLowMemFile, O_RDONLY))),
- dispatch_callback_(
- base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
- weak_ptr_factory_(this) {
- DCHECK(!g_monitor);
- g_monitor = this;
-
- StartObserving();
- LOG_IF(ERROR,
- base::SysInfo::IsRunningOnChromeOS() && !low_mem_file_.is_valid())
- << "Cannot open kernel listener";
-}
-
-MemoryPressureMonitor::~MemoryPressureMonitor() {
- DCHECK(g_monitor);
- g_monitor = nullptr;
-
- StopObserving();
-}
-
-void MemoryPressureMonitor::ScheduleEarlyCheck() {
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&MemoryPressureMonitor::CheckMemoryPressure,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::GetCurrentPressureLevel() {
- return current_memory_pressure_level_;
-}
-
-// static
-MemoryPressureMonitor* MemoryPressureMonitor::Get() {
- return g_monitor;
-}
-
-void MemoryPressureMonitor::StartObserving() {
- timer_.Start(FROM_HERE,
- TimeDelta::FromMilliseconds(kMemoryPressureIntervalMs),
- Bind(&MemoryPressureMonitor::
- CheckMemoryPressureAndRecordStatistics,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-void MemoryPressureMonitor::StopObserving() {
- // If StartObserving failed, StopObserving will still get called.
- timer_.Stop();
-}
-
-void MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics() {
- CheckMemoryPressure();
- if (seconds_since_reporting_++ == 5) {
- seconds_since_reporting_ = 0;
- RecordMemoryPressure(current_memory_pressure_level_, 1);
- }
- // Record UMA histogram statistics for the current memory pressure level.
- // TODO(lgrey): Remove this once there's a usable history for the
- // "Memory.PressureLevel" statistic
- MemoryPressureLevelUMA memory_pressure_level_uma(MEMORY_PRESSURE_LEVEL_NONE);
- switch (current_memory_pressure_level_) {
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_NONE;
- break;
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
- memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_MODERATE;
- break;
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
- memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_CRITICAL;
- break;
- }
-
- UMA_HISTOGRAM_ENUMERATION("ChromeOS.MemoryPressureLevel",
- memory_pressure_level_uma,
- NUM_MEMORY_PRESSURE_LEVELS);
-}
-
-void MemoryPressureMonitor::CheckMemoryPressure() {
- MemoryPressureListener::MemoryPressureLevel old_pressure =
- current_memory_pressure_level_;
-
- // If we have the kernel low memory observer, we use it's flag instead of our
- // own computation (for now). Note that in "simulation mode" it can be null.
- // TODO(skuhne): We need to add code which makes sure that the kernel and this
- // computation come to similar results and then remove this override again.
- // TODO(skuhne): Add some testing framework here to see how close the kernel
- // and the internal functions are.
- if (low_mem_file_.is_valid() && IsLowMemoryCondition(low_mem_file_.get())) {
- current_memory_pressure_level_ =
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
- } else {
- current_memory_pressure_level_ = GetMemoryPressureLevelFromFillLevel(
- GetUsedMemoryInPercent(),
- moderate_pressure_threshold_percent_,
- critical_pressure_threshold_percent_);
-
- // When listening to the kernel, we ignore the reported memory pressure
- // level from our own computation and reduce critical to moderate.
- if (low_mem_file_.is_valid() &&
- current_memory_pressure_level_ ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
- current_memory_pressure_level_ =
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
- }
- }
-
- // In case there is no memory pressure we do not notify.
- if (current_memory_pressure_level_ ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE) {
- return;
- }
- if (old_pressure == current_memory_pressure_level_) {
- // If the memory pressure is still at the same level, we notify again for a
- // critical level. In case of a moderate level repeat however, we only send
- // a notification after a certain time has passed.
- if (current_memory_pressure_level_ ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE &&
- ++moderate_pressure_repeat_count_ <
- kModerateMemoryPressureCooldown) {
- return;
- }
- } else if (current_memory_pressure_level_ ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE &&
- old_pressure ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
- // When we reducing the pressure level from critical to moderate, we
- // restart the timeout and do not send another notification.
- moderate_pressure_repeat_count_ = 0;
- return;
- }
- moderate_pressure_repeat_count_ = 0;
- dispatch_callback_.Run(current_memory_pressure_level_);
-}
-
-// Gets the used ChromeOS memory in percent.
-int MemoryPressureMonitor::GetUsedMemoryInPercent() {
- base::SystemMemoryInfoKB info;
- if (!base::GetSystemMemoryInfo(&info)) {
- VLOG(1) << "Cannot determine the free memory of the system.";
- return 0;
- }
- // TODO(skuhne): Instead of adding the kernel memory pressure calculation
- // logic here, we should have a kernel mechanism similar to the low memory
- // notifier in ChromeOS which offers multiple pressure states.
- // To track this, we have crbug.com/381196.
-
- // The available memory consists of "real" and virtual (z)ram memory.
- // Since swappable memory uses a non pre-deterministic compression and
- // the compression creates its own "dynamic" in the system, it gets
- // de-emphasized by the |kSwapWeight| factor.
- const int kSwapWeight = 4;
-
- // The total memory we have is the "real memory" plus the virtual (z)ram.
- int total_memory = info.total + info.swap_total / kSwapWeight;
-
- // The kernel internally uses 50MB.
- const int kMinFileMemory = 50 * 1024;
-
- // Most file memory can be easily reclaimed.
- int file_memory = info.active_file + info.inactive_file;
- // unless it is dirty or it's a minimal portion which is required.
- file_memory -= info.dirty + kMinFileMemory;
-
- // Available memory is the sum of free, swap and easy reclaimable memory.
- int available_memory =
- info.free + info.swap_free / kSwapWeight + file_memory;
-
- DCHECK(available_memory < total_memory);
- int percentage = ((total_memory - available_memory) * 100) / total_memory;
- return percentage;
-}
-
-void MemoryPressureMonitor::SetDispatchCallback(
- const DispatchCallback& callback) {
- dispatch_callback_ = callback;
-}
-
-} // namespace chromeos
-} // namespace base
diff --git a/base/memory/memory_pressure_monitor_chromeos.h b/base/memory/memory_pressure_monitor_chromeos.h
deleted file mode 100644
index 563ba85..0000000
--- a/base/memory/memory_pressure_monitor_chromeos.h
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
-#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
-
-#include "base/base_export.h"
-#include "base/files/scoped_file.h"
-#include "base/macros.h"
-#include "base/memory/memory_pressure_listener.h"
-#include "base/memory/memory_pressure_monitor.h"
-#include "base/memory/weak_ptr.h"
-#include "base/timer/timer.h"
-
-namespace base {
-namespace chromeos {
-
-class TestMemoryPressureMonitor;
-
-////////////////////////////////////////////////////////////////////////////////
-// MemoryPressureMonitor
-//
-// A class to handle the observation of our free memory. It notifies the
-// MemoryPressureListener of memory fill level changes, so that it can take
-// action to reduce memory resources accordingly.
-//
-class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
- public:
- using GetUsedMemoryInPercentCallback = int (*)();
-
- // There are two memory pressure events:
- // MODERATE - which will mainly release caches.
- // CRITICAL - which will discard tabs.
- // The |MemoryPressureThresholds| enum selects the strategy of firing these
- // events: A conservative strategy will keep as much content in memory as
- // possible (causing the system to swap to zram) and an aggressive strategy
- // will release memory earlier to avoid swapping.
- enum MemoryPressureThresholds {
- // Use the system default.
- THRESHOLD_DEFAULT = 0,
- // Try to keep as much content in memory as possible.
- THRESHOLD_CONSERVATIVE = 1,
- // Discard caches earlier, allowing to keep more tabs in memory.
- THRESHOLD_AGGRESSIVE_CACHE_DISCARD = 2,
- // Discard tabs earlier, allowing the system to get faster.
- THRESHOLD_AGGRESSIVE_TAB_DISCARD = 3,
- // Discard caches and tabs earlier to allow the system to be faster.
- THRESHOLD_AGGRESSIVE = 4
- };
-
- explicit MemoryPressureMonitor(MemoryPressureThresholds thresholds);
- ~MemoryPressureMonitor() override;
-
- // Redo the memory pressure calculation soon and call again if a critical
- // memory pressure prevails. Note that this call will trigger an asynchronous
- // action which gives the system time to release memory back into the pool.
- void ScheduleEarlyCheck();
-
- // Get the current memory pressure level.
- MemoryPressureListener::MemoryPressureLevel GetCurrentPressureLevel()
- override;
- void SetDispatchCallback(const DispatchCallback& callback) override;
-
- // Returns a type-casted version of the current memory pressure monitor. A
- // simple wrapper to base::MemoryPressureMonitor::Get.
- static MemoryPressureMonitor* Get();
-
- private:
- friend TestMemoryPressureMonitor;
- // Starts observing the memory fill level.
- // Calls to StartObserving should always be matched with calls to
- // StopObserving.
- void StartObserving();
-
- // Stop observing the memory fill level.
- // May be safely called if StartObserving has not been called.
- void StopObserving();
-
- // The function which gets periodically called to check any changes in the
- // memory pressure. It will report pressure changes as well as continuous
- // critical pressure levels.
- void CheckMemoryPressure();
-
- // The function periodically checks the memory pressure changes and records
- // the UMA histogram statistics for the current memory pressure level.
- void CheckMemoryPressureAndRecordStatistics();
-
- // Get the memory pressure in percent (virtual for testing).
- virtual int GetUsedMemoryInPercent();
-
- // The current memory pressure.
- base::MemoryPressureListener::MemoryPressureLevel
- current_memory_pressure_level_;
-
- // A periodic timer to check for resource pressure changes. This will get
- // replaced by a kernel triggered event system (see crbug.com/381196).
- base::RepeatingTimer timer_;
-
- // To slow down the amount of moderate pressure event calls, this counter
- // gets used to count the number of events since the last event occured.
- int moderate_pressure_repeat_count_;
-
- // The "Memory.PressureLevel" statistic is recorded every
- // 5 seconds, but the timer to report "ChromeOS.MemoryPressureLevel"
- // fires every second. This counter is used to allow reporting
- // "Memory.PressureLevel" correctly without adding another
- // timer.
- int seconds_since_reporting_;
-
- // The thresholds for moderate and critical pressure.
- const int moderate_pressure_threshold_percent_;
- const int critical_pressure_threshold_percent_;
-
- // File descriptor used to detect low memory condition.
- ScopedFD low_mem_file_;
-
- DispatchCallback dispatch_callback_;
-
- base::WeakPtrFactory<MemoryPressureMonitor> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
-};
-
-} // namespace chromeos
-} // namespace base
-
-#endif // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
diff --git a/base/memory/memory_pressure_monitor_chromeos_unittest.cc b/base/memory/memory_pressure_monitor_chromeos_unittest.cc
deleted file mode 100644
index ee00091..0000000
--- a/base/memory/memory_pressure_monitor_chromeos_unittest.cc
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_pressure_monitor_chromeos.h"
-
-#include "base/macros.h"
-#include "base/memory/memory_pressure_listener.h"
-#include "base/message_loop/message_loop.h"
-#include "base/run_loop.h"
-#include "base/sys_info.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace chromeos {
-
-namespace {
-
-// True if the memory notifier got called.
-// Do not read/modify value directly.
-bool on_memory_pressure_called = false;
-
-// If the memory notifier got called, this is the memory pressure reported.
-MemoryPressureListener::MemoryPressureLevel on_memory_pressure_level =
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
-
-// Processes OnMemoryPressure calls.
-void OnMemoryPressure(MemoryPressureListener::MemoryPressureLevel level) {
- on_memory_pressure_called = true;
- on_memory_pressure_level = level;
-}
-
-// Resets the indicator for memory pressure.
-void ResetOnMemoryPressureCalled() {
- on_memory_pressure_called = false;
-}
-
-// Returns true when OnMemoryPressure was called (and resets it).
-bool WasOnMemoryPressureCalled() {
- bool b = on_memory_pressure_called;
- ResetOnMemoryPressureCalled();
- return b;
-}
-
-} // namespace
-
-class TestMemoryPressureMonitor : public MemoryPressureMonitor {
- public:
- TestMemoryPressureMonitor()
- : MemoryPressureMonitor(THRESHOLD_DEFAULT),
- memory_in_percent_override_(0) {
- // Disable any timers which are going on and set a special memory reporting
- // function.
- StopObserving();
- }
- ~TestMemoryPressureMonitor() override = default;
-
- void SetMemoryInPercentOverride(int percent) {
- memory_in_percent_override_ = percent;
- }
-
- void CheckMemoryPressureForTest() {
- CheckMemoryPressure();
- }
-
- private:
- int GetUsedMemoryInPercent() override {
- return memory_in_percent_override_;
- }
-
- int memory_in_percent_override_;
- DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
-};
-
-// This test tests the various transition states from memory pressure, looking
-// for the correct behavior on event reposting as well as state updates.
-TEST(ChromeOSMemoryPressureMonitorTest, CheckMemoryPressure) {
- // crbug.com/844102:
- if (base::SysInfo::IsRunningOnChromeOS())
- return;
-
- base::MessageLoopForUI message_loop;
- std::unique_ptr<TestMemoryPressureMonitor> monitor(
- new TestMemoryPressureMonitor);
- std::unique_ptr<MemoryPressureListener> listener(
- new MemoryPressureListener(base::Bind(&OnMemoryPressure)));
- // Checking the memory pressure while 0% are used should not produce any
- // events.
- monitor->SetMemoryInPercentOverride(0);
- ResetOnMemoryPressureCalled();
-
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_FALSE(WasOnMemoryPressureCalled());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- monitor->GetCurrentPressureLevel());
-
- // Setting the memory level to 80% should produce a moderate pressure level.
- monitor->SetMemoryInPercentOverride(80);
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_TRUE(WasOnMemoryPressureCalled());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->GetCurrentPressureLevel());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- on_memory_pressure_level);
-
- // We need to check that the event gets reposted after a while.
- int i = 0;
- for (; i < 100; i++) {
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->GetCurrentPressureLevel());
- if (WasOnMemoryPressureCalled()) {
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- on_memory_pressure_level);
- break;
- }
- }
- // Should be more than 5 and less than 100.
- EXPECT_LE(5, i);
- EXPECT_GE(99, i);
-
- // Setting the memory usage to 99% should produce critical levels.
- monitor->SetMemoryInPercentOverride(99);
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_TRUE(WasOnMemoryPressureCalled());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- on_memory_pressure_level);
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- monitor->GetCurrentPressureLevel());
-
- // Calling it again should immediately produce a second call.
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_TRUE(WasOnMemoryPressureCalled());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- on_memory_pressure_level);
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- monitor->GetCurrentPressureLevel());
-
- // When lowering the pressure again we should not get an event, but the
- // pressure should go back to moderate.
- monitor->SetMemoryInPercentOverride(80);
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_FALSE(WasOnMemoryPressureCalled());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->GetCurrentPressureLevel());
-
- // We should need exactly the same amount of calls as before, before the next
- // call comes in.
- int j = 0;
- for (; j < 100; j++) {
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->GetCurrentPressureLevel());
- if (WasOnMemoryPressureCalled()) {
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- on_memory_pressure_level);
- break;
- }
- }
- // We should have needed exactly the same amount of checks as before.
- EXPECT_EQ(j, i);
-}
-
-} // namespace chromeos
-} // namespace base
diff --git a/base/memory/memory_pressure_monitor_mac.cc b/base/memory/memory_pressure_monitor_mac.cc
deleted file mode 100644
index 23857d6..0000000
--- a/base/memory/memory_pressure_monitor_mac.cc
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_pressure_monitor_mac.h"
-
-#include <CoreFoundation/CoreFoundation.h>
-
-#include <dlfcn.h>
-#include <sys/sysctl.h>
-
-#include <cmath>
-
-#include "base/bind.h"
-#include "base/logging.h"
-#include "base/mac/mac_util.h"
-#include "starboard/types.h"
-
-// Redeclare for partial 10.9 availability.
-DISPATCH_EXPORT const struct dispatch_source_type_s
- _dispatch_source_type_memorypressure;
-
-namespace {
-static const int kUMATickSize = 5;
-} // namespace
-
-namespace base {
-namespace mac {
-
-MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
- int mac_memory_pressure_level) {
- switch (mac_memory_pressure_level) {
- case DISPATCH_MEMORYPRESSURE_NORMAL:
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
- case DISPATCH_MEMORYPRESSURE_WARN:
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
- case DISPATCH_MEMORYPRESSURE_CRITICAL:
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
- }
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
-}
-
-void MemoryPressureMonitor::OnRunLoopExit(CFRunLoopObserverRef observer,
- CFRunLoopActivity activity,
- void* info) {
- MemoryPressureMonitor* self = static_cast<MemoryPressureMonitor*>(info);
- self->UpdatePressureLevelOnRunLoopExit();
-}
-
-MemoryPressureMonitor::MemoryPressureMonitor()
- : memory_level_event_source_(dispatch_source_create(
- DISPATCH_SOURCE_TYPE_MEMORYPRESSURE,
- 0,
- DISPATCH_MEMORYPRESSURE_WARN | DISPATCH_MEMORYPRESSURE_CRITICAL |
- DISPATCH_MEMORYPRESSURE_NORMAL,
- dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0))),
- dispatch_callback_(
- base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
- last_statistic_report_time_(CFAbsoluteTimeGetCurrent()),
- last_pressure_level_(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
- subtick_seconds_(0) {
- // Attach an event handler to the memory pressure event source.
- if (memory_level_event_source_.get()) {
- dispatch_source_set_event_handler(memory_level_event_source_, ^{
- OnMemoryPressureChanged(memory_level_event_source_.get(),
- dispatch_callback_);
- });
-
- // Start monitoring the event source.
- dispatch_resume(memory_level_event_source_);
- }
-
- // Create a CFRunLoopObserver to check the memory pressure at the end of
- // every pass through the event loop (modulo kUMATickSize).
- CFRunLoopObserverContext observer_context = {0, this, NULL, NULL, NULL};
-
- exit_observer_.reset(
- CFRunLoopObserverCreate(kCFAllocatorDefault, kCFRunLoopExit, true, 0,
- OnRunLoopExit, &observer_context));
-
- CFRunLoopRef run_loop = CFRunLoopGetCurrent();
- CFRunLoopAddObserver(run_loop, exit_observer_, kCFRunLoopCommonModes);
- CFRunLoopAddObserver(run_loop, exit_observer_,
- kMessageLoopExclusiveRunLoopMode);
-}
-
-MemoryPressureMonitor::~MemoryPressureMonitor() {
- // Detach from the run loop.
- CFRunLoopRef run_loop = CFRunLoopGetCurrent();
- CFRunLoopRemoveObserver(run_loop, exit_observer_, kCFRunLoopCommonModes);
- CFRunLoopRemoveObserver(run_loop, exit_observer_,
- kMessageLoopExclusiveRunLoopMode);
-
- // Remove the memory pressure event source.
- if (memory_level_event_source_.get()) {
- dispatch_source_cancel(memory_level_event_source_);
- }
-}
-
-int MemoryPressureMonitor::GetMacMemoryPressureLevel() {
- // Get the raw memory pressure level from macOS.
- int mac_memory_pressure_level;
- size_t length = sizeof(int);
- sysctlbyname("kern.memorystatus_vm_pressure_level",
- &mac_memory_pressure_level, &length, nullptr, 0);
-
- return mac_memory_pressure_level;
-}
-
-void MemoryPressureMonitor::UpdatePressureLevel() {
- // Get the current macOS pressure level and convert to the corresponding
- // Chrome pressure level.
- int mac_memory_pressure_level = GetMacMemoryPressureLevel();
- MemoryPressureListener::MemoryPressureLevel new_pressure_level =
- MemoryPressureLevelForMacMemoryPressureLevel(mac_memory_pressure_level);
-
- // Compute the number of "ticks" spent at |last_pressure_level_| (since the
- // last report sent to UMA).
- CFTimeInterval now = CFAbsoluteTimeGetCurrent();
- CFTimeInterval time_since_last_report = now - last_statistic_report_time_;
- last_statistic_report_time_ = now;
-
- double accumulated_time = time_since_last_report + subtick_seconds_;
- int ticks_to_report = static_cast<int>(accumulated_time / kUMATickSize);
- // Save for later the seconds that didn't make it into a full tick.
- subtick_seconds_ = std::fmod(accumulated_time, kUMATickSize);
-
- // Round the tick count up on a pressure level change to ensure we capture it.
- bool pressure_level_changed = (new_pressure_level != last_pressure_level_);
- if (pressure_level_changed && ticks_to_report < 1) {
- ticks_to_report = 1;
- subtick_seconds_ = 0;
- }
-
- // Send elapsed ticks to UMA.
- if (ticks_to_report >= 1) {
- RecordMemoryPressure(last_pressure_level_, ticks_to_report);
- }
-
- // Save the now-current memory pressure level.
- last_pressure_level_ = new_pressure_level;
-}
-
-void MemoryPressureMonitor::UpdatePressureLevelOnRunLoopExit() {
- // Wait until it's time to check the pressure level.
- CFTimeInterval now = CFAbsoluteTimeGetCurrent();
- if (now >= next_run_loop_update_time_) {
- UpdatePressureLevel();
-
- // Update again in kUMATickSize seconds. We can update at any frequency,
- // but because we're only checking memory pressure levels for UMA there's
- // no need to update more frequently than we're keeping statistics on.
- next_run_loop_update_time_ = now + kUMATickSize - subtick_seconds_;
- }
-}
-
-// Static.
-int MemoryPressureMonitor::GetSecondsPerUMATick() {
- return kUMATickSize;
-}
-
-MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::GetCurrentPressureLevel() {
- return last_pressure_level_;
-}
-
-void MemoryPressureMonitor::OnMemoryPressureChanged(
- dispatch_source_s* event_source,
- const MemoryPressureMonitor::DispatchCallback& dispatch_callback) {
- // The OS has sent a notification that the memory pressure level has changed.
- // Go through the normal memory pressure level checking mechanism so that
- // last_pressure_level_ and UMA get updated to the current value.
- UpdatePressureLevel();
-
- // Run the callback that's waiting on memory pressure change notifications.
- // The convention is to not send notifiations on memory pressure returning to
- // normal.
- if (last_pressure_level_ !=
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE)
- dispatch_callback.Run(last_pressure_level_);
-}
-
-void MemoryPressureMonitor::SetDispatchCallback(
- const DispatchCallback& callback) {
- dispatch_callback_ = callback;
-}
-
-} // namespace mac
-} // namespace base
diff --git a/base/memory/memory_pressure_monitor_mac.h b/base/memory/memory_pressure_monitor_mac.h
deleted file mode 100644
index 6f0e02f..0000000
--- a/base/memory/memory_pressure_monitor_mac.h
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
-#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
-
-#include <CoreFoundation/CFDate.h>
-#include <dispatch/dispatch.h>
-
-#include "base/base_export.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/mac/scoped_dispatch_object.h"
-#include "base/macros.h"
-#include "base/memory/memory_pressure_listener.h"
-#include "base/memory/memory_pressure_monitor.h"
-#include "base/message_loop/message_pump_mac.h"
-#include "starboard/types.h"
-
-namespace base {
-namespace mac {
-
-class TestMemoryPressureMonitor;
-
-// Declares the interface for the Mac MemoryPressureMonitor, which reports
-// memory pressure events and status.
-class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
- public:
- MemoryPressureMonitor();
- ~MemoryPressureMonitor() override;
-
- // Returns the currently-observed memory pressure.
- MemoryPressureLevel GetCurrentPressureLevel() override;
-
- void SetDispatchCallback(const DispatchCallback& callback) override;
-
- private:
- friend TestMemoryPressureMonitor;
-
- static MemoryPressureLevel MemoryPressureLevelForMacMemoryPressureLevel(
- int mac_memory_pressure_level);
- static void OnRunLoopExit(CFRunLoopObserverRef observer,
- CFRunLoopActivity activity,
- void* info);
- // Returns the raw memory pressure level from the macOS. Exposed for
- // unit testing.
- virtual int GetMacMemoryPressureLevel();
-
- // Updates |last_pressure_level_| with the current memory pressure level.
- void UpdatePressureLevel();
-
- // Updates |last_pressure_level_| at the end of every run loop pass (modulo
- // some number of seconds).
- void UpdatePressureLevelOnRunLoopExit();
-
- // Run |dispatch_callback| on memory pressure notifications from the OS.
- void OnMemoryPressureChanged(dispatch_source_s* event_source,
- const DispatchCallback& dispatch_callback);
-
- // Returns the number of seconds per UMA tick (for statistics recording).
- // Exposed for testing.
- static int GetSecondsPerUMATick();
-
- // The dispatch source that generates memory pressure change notifications.
- ScopedDispatchObject<dispatch_source_t> memory_level_event_source_;
-
- // The callback to call upon receiving a memory pressure change notification.
- DispatchCallback dispatch_callback_;
-
- // Last UMA report time.
- CFTimeInterval last_statistic_report_time_;
-
- // Most-recent memory pressure level.
- MemoryPressureLevel last_pressure_level_;
-
- // Observer that tracks exits from the main run loop.
- ScopedCFTypeRef<CFRunLoopObserverRef> exit_observer_;
-
- // Next time to update the memory pressure level when exiting the run loop.
- CFTimeInterval next_run_loop_update_time_;
-
- // Seconds left over from the last UMA tick calculation (to be added to the
- // next calculation).
- CFTimeInterval subtick_seconds_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
-};
-
-} // namespace mac
-} // namespace base
-
-#endif // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
diff --git a/base/memory/memory_pressure_monitor_mac_unittest.cc b/base/memory/memory_pressure_monitor_mac_unittest.cc
deleted file mode 100644
index 3f5f4b7..0000000
--- a/base/memory/memory_pressure_monitor_mac_unittest.cc
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_pressure_monitor_mac.h"
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/mac/scoped_cftyperef.h"
-#include "base/macros.h"
-#include "base/test/metrics/histogram_tester.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace mac {
-
-class TestMemoryPressureMonitor : public MemoryPressureMonitor {
- public:
- using MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel;
-
- // A HistogramTester for verifying correct UMA stat generation.
- base::HistogramTester tester;
-
- TestMemoryPressureMonitor() { }
-
- // Clears the next run loop update time so that the next pass of the run
- // loop checks the memory pressure level immediately. Normally there's a
- // 5 second delay between pressure readings.
- void ResetRunLoopUpdateTime() { next_run_loop_update_time_ = 0; }
-
- // Sets the last UMA stat report time. Time spent in memory pressure is
- // recorded in 5-second "ticks" from the last time statistics were recorded.
- void SetLastStatisticReportTime(CFTimeInterval time) {
- last_statistic_report_time_ = time;
- }
-
- // Sets the raw macOS memory pressure level read by the memory pressure
- // monitor.
- int macos_pressure_level_for_testing_;
-
- // Exposes the UpdatePressureLevel() method for testing.
- void UpdatePressureLevel() { MemoryPressureMonitor::UpdatePressureLevel(); }
-
- // Returns the number of seconds left over from the last UMA tick
- // calculation.
- int SubTickSeconds() { return subtick_seconds_; }
-
- // Returns the number of seconds per UMA tick.
- static int GetSecondsPerUMATick() {
- return MemoryPressureMonitor::GetSecondsPerUMATick();
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
-
- int GetMacMemoryPressureLevel() override {
- return macos_pressure_level_for_testing_;
- }
-};
-
-TEST(MacMemoryPressureMonitorTest, MemoryPressureFromMacMemoryPressure) {
- EXPECT_EQ(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
- DISPATCH_MEMORYPRESSURE_NORMAL));
- EXPECT_EQ(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
- DISPATCH_MEMORYPRESSURE_WARN));
- EXPECT_EQ(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
- DISPATCH_MEMORYPRESSURE_CRITICAL));
- EXPECT_EQ(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
- 0));
- EXPECT_EQ(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
- 3));
- EXPECT_EQ(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
- 5));
- EXPECT_EQ(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- TestMemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressureLevel(
- -1));
-}
-
-TEST(MacMemoryPressureMonitorTest, CurrentMemoryPressure) {
- TestMemoryPressureMonitor monitor;
-
- MemoryPressureListener::MemoryPressureLevel memory_pressure =
- monitor.GetCurrentPressureLevel();
- EXPECT_TRUE(memory_pressure ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE ||
- memory_pressure ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE ||
- memory_pressure ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
-}
-
-TEST(MacMemoryPressureMonitorTest, MemoryPressureConversion) {
- TestMemoryPressureMonitor monitor;
-
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
- monitor.UpdatePressureLevel();
- MemoryPressureListener::MemoryPressureLevel memory_pressure =
- monitor.GetCurrentPressureLevel();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- memory_pressure);
-
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
- monitor.UpdatePressureLevel();
- memory_pressure = monitor.GetCurrentPressureLevel();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- memory_pressure);
-
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
- monitor.UpdatePressureLevel();
- memory_pressure = monitor.GetCurrentPressureLevel();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- memory_pressure);
-}
-
-TEST(MacMemoryPressureMonitorTest, MemoryPressureRunLoopChecking) {
- TestMemoryPressureMonitor monitor;
-
- // To test grabbing the memory presure at the end of the run loop, we have to
- // run the run loop, but to do that the run loop needs a run loop source. Add
- // a timer as the source. We know that the exit observer is attached to
- // the kMessageLoopExclusiveRunLoopMode mode, so use that mode.
- ScopedCFTypeRef<CFRunLoopTimerRef> timer_ref(CFRunLoopTimerCreate(
- NULL, CFAbsoluteTimeGetCurrent() + 10, 0, 0, 0, nullptr, nullptr));
- CFRunLoopAddTimer(CFRunLoopGetCurrent(), timer_ref,
- kMessageLoopExclusiveRunLoopMode);
-
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
- monitor.ResetRunLoopUpdateTime();
- CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
- EXPECT_EQ(monitor.GetCurrentPressureLevel(),
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE);
-
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
- monitor.ResetRunLoopUpdateTime();
- CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
- EXPECT_EQ(monitor.GetCurrentPressureLevel(),
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL);
-
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
- monitor.ResetRunLoopUpdateTime();
- CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0, true);
- EXPECT_EQ(monitor.GetCurrentPressureLevel(),
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE);
-
- CFRunLoopRemoveTimer(CFRunLoopGetCurrent(), timer_ref,
- kMessageLoopExclusiveRunLoopMode);
-}
-
-TEST(MacMemoryPressureMonitorTest, RecordMemoryPressureStats) {
- TestMemoryPressureMonitor monitor;
- const char* kHistogram = "Memory.PressureLevel";
- CFTimeInterval now = CFAbsoluteTimeGetCurrent();
- const int seconds_per_tick =
- TestMemoryPressureMonitor::GetSecondsPerUMATick();
-
- // Set the initial pressure level.
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
- // Incur one UMA tick of time (and include one extra second of elapsed time).
- monitor.SetLastStatisticReportTime(now - (seconds_per_tick + 1));
- monitor.UpdatePressureLevel();
- monitor.tester.ExpectTotalCount(kHistogram, 1);
- monitor.tester.ExpectBucketCount(kHistogram, 0, 1);
- // The report time above included an extra second so there should be 1
- // sub-tick second left over.
- EXPECT_EQ(1, monitor.SubTickSeconds());
-
- // Simulate sitting in normal pressure for 1 second less than 6 UMA tick
- // seconds and then elevating to warning. With the left over sub-tick second
- // from above, the total elapsed ticks should be an even 6 UMA ticks.
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_WARN;
- monitor.SetLastStatisticReportTime(now - (seconds_per_tick * 6 - 1));
- monitor.UpdatePressureLevel();
- monitor.tester.ExpectTotalCount(kHistogram, 7);
- monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
- monitor.tester.ExpectBucketCount(kHistogram, 1, 0);
- EXPECT_EQ(0, monitor.SubTickSeconds());
-
- // Simulate sitting in warning pressure for 20 UMA ticks and 2 seconds, and
- // then elevating to critical.
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
- monitor.SetLastStatisticReportTime(now - (20 * seconds_per_tick + 2));
- monitor.UpdatePressureLevel();
- monitor.tester.ExpectTotalCount(kHistogram, 27);
- monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
- monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
- monitor.tester.ExpectBucketCount(kHistogram, 2, 0);
- EXPECT_EQ(2, monitor.SubTickSeconds());
-
- // A quick update while critical - the stats should not budge because less
- // than 1 tick of time has elapsed.
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_CRITICAL;
- monitor.SetLastStatisticReportTime(now - 1);
- monitor.UpdatePressureLevel();
- monitor.tester.ExpectTotalCount(kHistogram, 27);
- monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
- monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
- monitor.tester.ExpectBucketCount(kHistogram, 2, 0);
- EXPECT_EQ(3, monitor.SubTickSeconds());
-
- // A quick change back to normal. Less than 1 tick of time has elapsed, but
- // in this case the pressure level changed, so the critical bucket should
- // get another sample (otherwise we could miss quick level changes).
- monitor.macos_pressure_level_for_testing_ = DISPATCH_MEMORYPRESSURE_NORMAL;
- monitor.SetLastStatisticReportTime(now - 1);
- monitor.UpdatePressureLevel();
- monitor.tester.ExpectTotalCount(kHistogram, 28);
- monitor.tester.ExpectBucketCount(kHistogram, 0, 7);
- monitor.tester.ExpectBucketCount(kHistogram, 1, 20);
- monitor.tester.ExpectBucketCount(kHistogram, 2, 1);
- // When less than 1 tick of time has elapsed but the pressure level changed,
- // the subtick remainder gets zeroed out.
- EXPECT_EQ(0, monitor.SubTickSeconds());
-}
-} // namespace mac
-} // namespace base
diff --git a/base/memory/memory_pressure_monitor_unittest.cc b/base/memory/memory_pressure_monitor_unittest.cc
deleted file mode 100644
index 10d9d24..0000000
--- a/base/memory/memory_pressure_monitor_unittest.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_pressure_monitor.h"
-
-#include "base/macros.h"
-#include "base/memory/memory_pressure_listener.h"
-#include "base/test/metrics/histogram_tester.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-TEST(MemoryPressureMonitorTest, RecordMemoryPressure) {
- base::HistogramTester tester;
- const char* kHistogram = "Memory.PressureLevel";
-
- MemoryPressureMonitor::RecordMemoryPressure(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE, 3);
- tester.ExpectTotalCount(kHistogram, 3);
- tester.ExpectBucketCount(kHistogram, 0, 3);
-
- MemoryPressureMonitor::RecordMemoryPressure(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE, 2);
- tester.ExpectTotalCount(kHistogram, 5);
- tester.ExpectBucketCount(kHistogram, 1, 2);
-
- MemoryPressureMonitor::RecordMemoryPressure(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, 1);
- tester.ExpectTotalCount(kHistogram, 6);
- tester.ExpectBucketCount(kHistogram, 2, 1);
-}
-} // namespace base
diff --git a/base/memory/memory_pressure_monitor_win.cc b/base/memory/memory_pressure_monitor_win.cc
deleted file mode 100644
index e6f9815..0000000
--- a/base/memory/memory_pressure_monitor_win.cc
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_pressure_monitor_win.h"
-
-#include <windows.h>
-
-#include "base/single_thread_task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/time/time.h"
-#include "starboard/types.h"
-
-namespace base {
-namespace win {
-
-namespace {
-
-static const DWORDLONG kMBBytes = 1024 * 1024;
-
-} // namespace
-
-// The following constants have been lifted from similar values in the ChromeOS
-// memory pressure monitor. The values were determined experimentally to ensure
-// sufficient responsiveness of the memory pressure subsystem, and minimal
-// overhead.
-const int MemoryPressureMonitor::kPollingIntervalMs = 5000;
-const int MemoryPressureMonitor::kModeratePressureCooldownMs = 10000;
-const int MemoryPressureMonitor::kModeratePressureCooldownCycles =
- kModeratePressureCooldownMs / kPollingIntervalMs;
-
-// TODO(chrisha): Explore the following constants further with an experiment.
-
-// A system is considered 'high memory' if it has more than 1.5GB of system
-// memory available for use by the memory manager (not reserved for hardware
-// and drivers). This is a fuzzy version of the ~2GB discussed below.
-const int MemoryPressureMonitor::kLargeMemoryThresholdMb = 1536;
-
-// These are the default thresholds used for systems with < ~2GB of physical
-// memory. Such systems have been observed to always maintain ~100MB of
-// available memory, paging until that is the case. To try to avoid paging a
-// threshold slightly above this is chosen. The moderate threshold is slightly
-// less grounded in reality and chosen as 2.5x critical.
-const int MemoryPressureMonitor::kSmallMemoryDefaultModerateThresholdMb = 500;
-const int MemoryPressureMonitor::kSmallMemoryDefaultCriticalThresholdMb = 200;
-
-// These are the default thresholds used for systems with >= ~2GB of physical
-// memory. Such systems have been observed to always maintain ~300MB of
-// available memory, paging until that is the case.
-const int MemoryPressureMonitor::kLargeMemoryDefaultModerateThresholdMb = 1000;
-const int MemoryPressureMonitor::kLargeMemoryDefaultCriticalThresholdMb = 400;
-
-MemoryPressureMonitor::MemoryPressureMonitor()
- : moderate_threshold_mb_(0),
- critical_threshold_mb_(0),
- current_memory_pressure_level_(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
- moderate_pressure_repeat_count_(0),
- dispatch_callback_(
- base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
- weak_ptr_factory_(this) {
- InferThresholds();
- StartObserving();
-}
-
-MemoryPressureMonitor::MemoryPressureMonitor(int moderate_threshold_mb,
- int critical_threshold_mb)
- : moderate_threshold_mb_(moderate_threshold_mb),
- critical_threshold_mb_(critical_threshold_mb),
- current_memory_pressure_level_(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
- moderate_pressure_repeat_count_(0),
- dispatch_callback_(
- base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
- weak_ptr_factory_(this) {
- DCHECK_GE(moderate_threshold_mb_, critical_threshold_mb_);
- DCHECK_LE(0, critical_threshold_mb_);
- StartObserving();
-}
-
-MemoryPressureMonitor::~MemoryPressureMonitor() {
- StopObserving();
-}
-
-void MemoryPressureMonitor::CheckMemoryPressureSoon() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, Bind(&MemoryPressureMonitor::CheckMemoryPressure,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::GetCurrentPressureLevel() {
- return current_memory_pressure_level_;
-}
-
-void MemoryPressureMonitor::InferThresholds() {
- // Default to a 'high' memory situation, which uses more conservative
- // thresholds.
- bool high_memory = true;
- MEMORYSTATUSEX mem_status = {};
- if (GetSystemMemoryStatus(&mem_status)) {
- static const DWORDLONG kLargeMemoryThresholdBytes =
- static_cast<DWORDLONG>(kLargeMemoryThresholdMb) * kMBBytes;
- high_memory = mem_status.ullTotalPhys >= kLargeMemoryThresholdBytes;
- }
-
- if (high_memory) {
- moderate_threshold_mb_ = kLargeMemoryDefaultModerateThresholdMb;
- critical_threshold_mb_ = kLargeMemoryDefaultCriticalThresholdMb;
- } else {
- moderate_threshold_mb_ = kSmallMemoryDefaultModerateThresholdMb;
- critical_threshold_mb_ = kSmallMemoryDefaultCriticalThresholdMb;
- }
-}
-
-void MemoryPressureMonitor::StartObserving() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- timer_.Start(FROM_HERE,
- TimeDelta::FromMilliseconds(kPollingIntervalMs),
- Bind(&MemoryPressureMonitor::
- CheckMemoryPressureAndRecordStatistics,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-void MemoryPressureMonitor::StopObserving() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- // If StartObserving failed, StopObserving will still get called.
- timer_.Stop();
- weak_ptr_factory_.InvalidateWeakPtrs();
-}
-
-void MemoryPressureMonitor::CheckMemoryPressure() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- // Get the previous pressure level and update the current one.
- MemoryPressureLevel old_pressure = current_memory_pressure_level_;
- current_memory_pressure_level_ = CalculateCurrentPressureLevel();
-
- // |notify| will be set to true if MemoryPressureListeners need to be
- // notified of a memory pressure level state change.
- bool notify = false;
- switch (current_memory_pressure_level_) {
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- break;
-
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
- if (old_pressure != current_memory_pressure_level_) {
- // This is a new transition to moderate pressure so notify.
- moderate_pressure_repeat_count_ = 0;
- notify = true;
- } else {
- // Already in moderate pressure, only notify if sustained over the
- // cooldown period.
- if (++moderate_pressure_repeat_count_ ==
- kModeratePressureCooldownCycles) {
- moderate_pressure_repeat_count_ = 0;
- notify = true;
- }
- }
- break;
-
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
- // Always notify of critical pressure levels.
- notify = true;
- break;
- }
-
- if (!notify)
- return;
-
- // Emit a notification of the current memory pressure level. This can only
- // happen for moderate and critical pressure levels.
- DCHECK_NE(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- current_memory_pressure_level_);
- dispatch_callback_.Run(current_memory_pressure_level_);
-}
-
-void MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics() {
- DCHECK(thread_checker_.CalledOnValidThread());
-
- CheckMemoryPressure();
-
- RecordMemoryPressure(current_memory_pressure_level_, 1);
-}
-
-MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::CalculateCurrentPressureLevel() {
- MEMORYSTATUSEX mem_status = {};
- if (!GetSystemMemoryStatus(&mem_status))
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
-
- // How much system memory is actively available for use right now, in MBs.
- int phys_free = static_cast<int>(mem_status.ullAvailPhys / kMBBytes);
-
- // TODO(chrisha): This should eventually care about address space pressure,
- // but the browser process (where this is running) effectively never runs out
- // of address space. Renderers occasionally do, but it does them no good to
- // have the browser process monitor address space pressure. Long term,
- // renderers should run their own address space pressure monitors and act
- // accordingly, with the browser making cross-process decisions based on
- // system memory pressure.
-
- // Determine if the physical memory is under critical memory pressure.
- if (phys_free <= critical_threshold_mb_)
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
-
- // Determine if the physical memory is under moderate memory pressure.
- if (phys_free <= moderate_threshold_mb_)
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
-
- // No memory pressure was detected.
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
-}
-
-bool MemoryPressureMonitor::GetSystemMemoryStatus(
- MEMORYSTATUSEX* mem_status) {
- DCHECK(mem_status != nullptr);
- mem_status->dwLength = sizeof(*mem_status);
- if (!::GlobalMemoryStatusEx(mem_status))
- return false;
- return true;
-}
-
-void MemoryPressureMonitor::SetDispatchCallback(
- const DispatchCallback& callback) {
- dispatch_callback_ = callback;
-}
-
-} // namespace win
-} // namespace base
diff --git a/base/memory/memory_pressure_monitor_win.h b/base/memory/memory_pressure_monitor_win.h
deleted file mode 100644
index a65c191..0000000
--- a/base/memory/memory_pressure_monitor_win.h
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_WIN_H_
-#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_WIN_H_
-
-#include "base/base_export.h"
-#include "base/macros.h"
-#include "base/memory/memory_pressure_listener.h"
-#include "base/memory/memory_pressure_monitor.h"
-#include "base/memory/weak_ptr.h"
-#include "base/threading/thread_checker.h"
-#include "base/timer/timer.h"
-
-// To not pull in windows.h.
-typedef struct _MEMORYSTATUSEX MEMORYSTATUSEX;
-
-namespace base {
-namespace win {
-
-// Windows memory pressure monitor. Because there is no OS provided signal this
-// polls at a low frequency (once per second), and applies internal hysteresis.
-class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
- public:
- // Constants governing the polling and hysteresis behaviour of the observer.
-
- // The polling interval, in milliseconds. While under critical pressure, this
- // is also the timer to repeat cleanup attempts.
- static const int kPollingIntervalMs;
- // The time which should pass between 2 successive moderate memory pressure
- // signals, in milliseconds.
- static const int kModeratePressureCooldownMs;
- // The number of cycles that should pass between 2 successive moderate memory
- // pressure signals.
- static const int kModeratePressureCooldownCycles;
-
- // Constants governing the memory pressure level detection.
-
- // The amount of total system memory beyond which a system is considered to be
- // a large-memory system.
- static const int kLargeMemoryThresholdMb;
- // Default minimum free memory thresholds for small-memory systems, in MB.
- static const int kSmallMemoryDefaultModerateThresholdMb;
- static const int kSmallMemoryDefaultCriticalThresholdMb;
- // Default minimum free memory thresholds for large-memory systems, in MB.
- static const int kLargeMemoryDefaultModerateThresholdMb;
- static const int kLargeMemoryDefaultCriticalThresholdMb;
-
- // Default constructor. Will choose thresholds automatically basd on the
- // actual amount of system memory.
- MemoryPressureMonitor();
-
- // Constructor with explicit memory thresholds. These represent the amount of
- // free memory below which the applicable memory pressure state engages.
- MemoryPressureMonitor(int moderate_threshold_mb, int critical_threshold_mb);
-
- ~MemoryPressureMonitor() override;
-
- // Schedules a memory pressure check to run soon. This must be called on the
- // same thread where the monitor was instantiated.
- void CheckMemoryPressureSoon();
-
- // Get the current memory pressure level. This can be called from any thread.
- MemoryPressureLevel GetCurrentPressureLevel() override;
- void SetDispatchCallback(const DispatchCallback& callback) override;
-
- // Returns the moderate pressure level free memory threshold, in MB.
- int moderate_threshold_mb() const { return moderate_threshold_mb_; }
-
- // Returns the critical pressure level free memory threshold, in MB.
- int critical_threshold_mb() const { return critical_threshold_mb_; }
-
- protected:
- // Internals are exposed for unittests.
-
- // Automatically infers threshold values based on system memory. This invokes
- // GetMemoryStatus so it can be mocked in unittests.
- void InferThresholds();
-
- // Starts observing the memory fill level. Calls to StartObserving should
- // always be matched with calls to StopObserving.
- void StartObserving();
-
- // Stop observing the memory fill level. May be safely called if
- // StartObserving has not been called. Must be called from the same thread on
- // which the monitor was instantiated.
- void StopObserving();
-
- // Checks memory pressure, storing the current level, applying any hysteresis
- // and emitting memory pressure level change signals as necessary. This
- // function is called periodically while the monitor is observing memory
- // pressure. This is split out from CheckMemoryPressureAndRecordStatistics so
- // that it may be called by CheckMemoryPressureSoon and not invoke UMA
- // logging. Must be called from the same thread on which the monitor was
- // instantiated.
- void CheckMemoryPressure();
-
- // Wrapper to CheckMemoryPressure that also records the observed memory
- // pressure level via an UMA enumeration. This is the function that is called
- // periodically by the timer. Must be called from the same thread on which the
- // monitor was instantiated.
- void CheckMemoryPressureAndRecordStatistics();
-
- // Calculates the current instantaneous memory pressure level. This does not
- // use any hysteresis and simply returns the result at the current moment. Can
- // be called on any thread.
- MemoryPressureLevel CalculateCurrentPressureLevel();
-
- // Gets system memory status. This is virtual as a unittesting hook. Returns
- // true if the system call succeeds, false otherwise. Can be called on any
- // thread.
- virtual bool GetSystemMemoryStatus(MEMORYSTATUSEX* mem_status);
-
- private:
- // Threshold amounts of available memory that trigger pressure levels. See
- // memory_pressure_monitor.cc for a discussion of reasonable values for these.
- int moderate_threshold_mb_;
- int critical_threshold_mb_;
-
- // A periodic timer to check for memory pressure changes.
- base::RepeatingTimer timer_;
-
- // The current memory pressure.
- MemoryPressureLevel current_memory_pressure_level_;
-
- // To slow down the amount of moderate pressure event calls, this gets used to
- // count the number of events since the last event occured. This is used by
- // |CheckMemoryPressure| to apply hysteresis on the raw results of
- // |CalculateCurrentPressureLevel|.
- int moderate_pressure_repeat_count_;
-
- // Ensures that this object is used from a single thread.
- base::ThreadChecker thread_checker_;
-
- DispatchCallback dispatch_callback_;
-
- // Weak pointer factory to ourself used for scheduling calls to
- // CheckMemoryPressure/CheckMemoryPressureAndRecordStatistics via |timer_|.
- base::WeakPtrFactory<MemoryPressureMonitor> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
-};
-
-} // namespace win
-} // namespace base
-
-#endif // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_WIN_H_
diff --git a/base/memory/memory_pressure_monitor_win_unittest.cc b/base/memory/memory_pressure_monitor_win_unittest.cc
deleted file mode 100644
index 1002a01..0000000
--- a/base/memory/memory_pressure_monitor_win_unittest.cc
+++ /dev/null
@@ -1,299 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_pressure_monitor_win.h"
-
-#include "base/macros.h"
-#include "base/memory/memory_pressure_listener.h"
-#include "base/message_loop/message_loop.h"
-#include "base/run_loop.h"
-#include "testing/gmock/include/gmock/gmock.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace win {
-
-namespace {
-
-struct PressureSettings {
- int phys_left_mb;
- MemoryPressureListener::MemoryPressureLevel level;
-};
-
-} // namespace
-
-// This is outside of the anonymous namespace so that it can be seen as a friend
-// to the monitor class.
-class TestMemoryPressureMonitor : public MemoryPressureMonitor {
- public:
- using MemoryPressureMonitor::CalculateCurrentPressureLevel;
- using MemoryPressureMonitor::CheckMemoryPressure;
-
- static const DWORDLONG kMBBytes = 1024 * 1024;
-
- explicit TestMemoryPressureMonitor(bool large_memory)
- : mem_status_() {
- // Generate a plausible amount of memory.
- mem_status_.ullTotalPhys =
- static_cast<DWORDLONG>(GenerateTotalMemoryMb(large_memory)) * kMBBytes;
-
- // Rerun InferThresholds using the test fixture's GetSystemMemoryStatus.
- InferThresholds();
- // Stop the timer.
- StopObserving();
- }
-
- TestMemoryPressureMonitor(int system_memory_mb,
- int moderate_threshold_mb,
- int critical_threshold_mb)
- : MemoryPressureMonitor(moderate_threshold_mb, critical_threshold_mb),
- mem_status_() {
- // Set the amount of system memory.
- mem_status_.ullTotalPhys = static_cast<DWORDLONG>(
- system_memory_mb * kMBBytes);
-
- // Stop the timer.
- StopObserving();
- }
-
- virtual ~TestMemoryPressureMonitor() {}
-
- MOCK_METHOD1(OnMemoryPressure,
- void(MemoryPressureListener::MemoryPressureLevel level));
-
- // Generates an amount of total memory that is consistent with the requested
- // memory model.
- int GenerateTotalMemoryMb(bool large_memory) {
- int total_mb = 64;
- while (total_mb < MemoryPressureMonitor::kLargeMemoryThresholdMb)
- total_mb *= 2;
- if (large_memory)
- return total_mb * 2;
- return total_mb / 2;
- }
-
- // Sets up the memory status to reflect the provided absolute memory left.
- void SetMemoryFree(int phys_left_mb) {
- // ullTotalPhys is set in the constructor and not modified.
-
- // Set the amount of available memory.
- mem_status_.ullAvailPhys =
- static_cast<DWORDLONG>(phys_left_mb) * kMBBytes;
- DCHECK_LT(mem_status_.ullAvailPhys, mem_status_.ullTotalPhys);
-
- // These fields are unused.
- mem_status_.dwMemoryLoad = 0;
- mem_status_.ullTotalPageFile = 0;
- mem_status_.ullAvailPageFile = 0;
- mem_status_.ullTotalVirtual = 0;
- mem_status_.ullAvailVirtual = 0;
- }
-
- void SetNone() {
- SetMemoryFree(moderate_threshold_mb() + 1);
- }
-
- void SetModerate() {
- SetMemoryFree(moderate_threshold_mb() - 1);
- }
-
- void SetCritical() {
- SetMemoryFree(critical_threshold_mb() - 1);
- }
-
- private:
- bool GetSystemMemoryStatus(MEMORYSTATUSEX* mem_status) override {
- // Simply copy the memory status set by the test fixture.
- *mem_status = mem_status_;
- return true;
- }
-
- MEMORYSTATUSEX mem_status_;
-
- DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
-};
-
-class WinMemoryPressureMonitorTest : public testing::Test {
- protected:
- void CalculateCurrentMemoryPressureLevelTest(
- TestMemoryPressureMonitor* monitor) {
-
- int mod = monitor->moderate_threshold_mb();
- monitor->SetMemoryFree(mod + 1);
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- monitor->CalculateCurrentPressureLevel());
-
- monitor->SetMemoryFree(mod);
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->CalculateCurrentPressureLevel());
-
- monitor->SetMemoryFree(mod - 1);
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->CalculateCurrentPressureLevel());
-
- int crit = monitor->critical_threshold_mb();
- monitor->SetMemoryFree(crit + 1);
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->CalculateCurrentPressureLevel());
-
- monitor->SetMemoryFree(crit);
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- monitor->CalculateCurrentPressureLevel());
-
- monitor->SetMemoryFree(crit - 1);
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- monitor->CalculateCurrentPressureLevel());
- }
-
- base::MessageLoopForUI message_loop_;
-};
-
-// Tests the fundamental direct calculation of memory pressure with automatic
-// small-memory thresholds.
-TEST_F(WinMemoryPressureMonitorTest, CalculateCurrentMemoryPressureLevelSmall) {
- static const int kModerateMb =
- MemoryPressureMonitor::kSmallMemoryDefaultModerateThresholdMb;
- static const int kCriticalMb =
- MemoryPressureMonitor::kSmallMemoryDefaultCriticalThresholdMb;
-
- TestMemoryPressureMonitor monitor(false); // Small-memory model.
-
- EXPECT_EQ(kModerateMb, monitor.moderate_threshold_mb());
- EXPECT_EQ(kCriticalMb, monitor.critical_threshold_mb());
-
- ASSERT_NO_FATAL_FAILURE(CalculateCurrentMemoryPressureLevelTest(&monitor));
-}
-
-// Tests the fundamental direct calculation of memory pressure with automatic
-// large-memory thresholds.
-TEST_F(WinMemoryPressureMonitorTest, CalculateCurrentMemoryPressureLevelLarge) {
- static const int kModerateMb =
- MemoryPressureMonitor::kLargeMemoryDefaultModerateThresholdMb;
- static const int kCriticalMb =
- MemoryPressureMonitor::kLargeMemoryDefaultCriticalThresholdMb;
-
- TestMemoryPressureMonitor monitor(true); // Large-memory model.
-
- EXPECT_EQ(kModerateMb, monitor.moderate_threshold_mb());
- EXPECT_EQ(kCriticalMb, monitor.critical_threshold_mb());
-
- ASSERT_NO_FATAL_FAILURE(CalculateCurrentMemoryPressureLevelTest(&monitor));
-}
-
-// Tests the fundamental direct calculation of memory pressure with manually
-// specified threshold levels.
-TEST_F(WinMemoryPressureMonitorTest,
- CalculateCurrentMemoryPressureLevelCustom) {
- static const int kSystemMb = 512;
- static const int kModerateMb = 256;
- static const int kCriticalMb = 128;
-
- TestMemoryPressureMonitor monitor(kSystemMb, kModerateMb, kCriticalMb);
-
- EXPECT_EQ(kModerateMb, monitor.moderate_threshold_mb());
- EXPECT_EQ(kCriticalMb, monitor.critical_threshold_mb());
-
- ASSERT_NO_FATAL_FAILURE(CalculateCurrentMemoryPressureLevelTest(&monitor));
-}
-
-// This test tests the various transition states from memory pressure, looking
-// for the correct behavior on event reposting as well as state updates.
-TEST_F(WinMemoryPressureMonitorTest, CheckMemoryPressure) {
- // Large-memory.
- testing::StrictMock<TestMemoryPressureMonitor> monitor(true);
- MemoryPressureListener listener(
- base::Bind(&TestMemoryPressureMonitor::OnMemoryPressure,
- base::Unretained(&monitor)));
-
- // Checking the memory pressure at 0% load should not produce any
- // events.
- monitor.SetNone();
- monitor.CheckMemoryPressure();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- monitor.GetCurrentPressureLevel());
-
- // Setting the memory level to 80% should produce a moderate pressure level.
- EXPECT_CALL(monitor,
- OnMemoryPressure(MemoryPressureListener::
- MEMORY_PRESSURE_LEVEL_MODERATE));
- monitor.SetModerate();
- monitor.CheckMemoryPressure();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor.GetCurrentPressureLevel());
- testing::Mock::VerifyAndClearExpectations(&monitor);
-
- // Check that the event gets reposted after a while.
- for (int i = 0; i < monitor.kModeratePressureCooldownCycles; ++i) {
- if (i + 1 == monitor.kModeratePressureCooldownCycles) {
- EXPECT_CALL(monitor,
- OnMemoryPressure(MemoryPressureListener::
- MEMORY_PRESSURE_LEVEL_MODERATE));
- }
- monitor.CheckMemoryPressure();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor.GetCurrentPressureLevel());
- testing::Mock::VerifyAndClearExpectations(&monitor);
- }
-
- // Setting the memory usage to 99% should produce critical levels.
- EXPECT_CALL(monitor,
- OnMemoryPressure(MemoryPressureListener::
- MEMORY_PRESSURE_LEVEL_CRITICAL));
- monitor.SetCritical();
- monitor.CheckMemoryPressure();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- monitor.GetCurrentPressureLevel());
- testing::Mock::VerifyAndClearExpectations(&monitor);
-
- // Calling it again should immediately produce a second call.
- EXPECT_CALL(monitor,
- OnMemoryPressure(MemoryPressureListener::
- MEMORY_PRESSURE_LEVEL_CRITICAL));
- monitor.CheckMemoryPressure();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- monitor.GetCurrentPressureLevel());
- testing::Mock::VerifyAndClearExpectations(&monitor);
-
- // When lowering the pressure again there should be a notification and the
- // pressure should go back to moderate.
- EXPECT_CALL(monitor,
- OnMemoryPressure(MemoryPressureListener::
- MEMORY_PRESSURE_LEVEL_MODERATE));
- monitor.SetModerate();
- monitor.CheckMemoryPressure();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor.GetCurrentPressureLevel());
- testing::Mock::VerifyAndClearExpectations(&monitor);
-
- // Check that the event gets reposted after a while.
- for (int i = 0; i < monitor.kModeratePressureCooldownCycles; ++i) {
- if (i + 1 == monitor.kModeratePressureCooldownCycles) {
- EXPECT_CALL(monitor,
- OnMemoryPressure(MemoryPressureListener::
- MEMORY_PRESSURE_LEVEL_MODERATE));
- }
- monitor.CheckMemoryPressure();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor.GetCurrentPressureLevel());
- testing::Mock::VerifyAndClearExpectations(&monitor);
- }
-
- // Going down to no pressure should not produce an notification.
- monitor.SetNone();
- monitor.CheckMemoryPressure();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- monitor.GetCurrentPressureLevel());
- testing::Mock::VerifyAndClearExpectations(&monitor);
-}
-
-} // namespace win
-} // namespace base
diff --git a/base/memory/nonscannable_memory.cc b/base/memory/nonscannable_memory.cc
new file mode 100644
index 0000000..13960b7
--- /dev/null
+++ b/base/memory/nonscannable_memory.cc
@@ -0,0 +1,126 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/nonscannable_memory.h"
+
+#include <stdlib.h>
+
+#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/feature_list.h"
+#include "base/no_destructor.h"
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#include "base/allocator/partition_alloc_features.h"
+#include "base/allocator/partition_allocator/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
+
+#if BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
+#include "base/allocator/partition_allocator/starscan/pcscan.h"
+#endif
+#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+namespace base {
+namespace internal {
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+template <bool Quarantinable>
+NonScannableAllocatorImpl<Quarantinable>::NonScannableAllocatorImpl() = default;
+template <bool Quarantinable>
+NonScannableAllocatorImpl<Quarantinable>::~NonScannableAllocatorImpl() =
+ default;
+
+template <bool Quarantinable>
+NonScannableAllocatorImpl<Quarantinable>&
+NonScannableAllocatorImpl<Quarantinable>::Instance() {
+ static base::NoDestructor<NonScannableAllocatorImpl> instance;
+ return *instance;
+}
+
+template <bool Quarantinable>
+void* NonScannableAllocatorImpl<Quarantinable>::Alloc(size_t size) {
+#if BUILDFLAG(USE_STARSCAN)
+ // TODO(bikineev): Change to LIKELY once PCScan is enabled by default.
+ if (UNLIKELY(pcscan_enabled_.load(std::memory_order_acquire))) {
+ PA_DCHECK(allocator_.get());
+ return allocator_->root()->AllocWithFlagsNoHooks(
+ 0, size, partition_alloc::PartitionPageSize());
+ }
+#endif // BUILDFLAG(USE_STARSCAN)
+ // Otherwise, dispatch to default partition.
+ return allocator_shim::internal::PartitionAllocMalloc::Allocator()
+ ->AllocWithFlagsNoHooks(0, size, partition_alloc::PartitionPageSize());
+}
+
+template <bool Quarantinable>
+void NonScannableAllocatorImpl<Quarantinable>::Free(void* ptr) {
+ partition_alloc::ThreadSafePartitionRoot::FreeNoHooks(ptr);
+}
+
+template <bool Quarantinable>
+void NonScannableAllocatorImpl<Quarantinable>::NotifyPCScanEnabled() {
+#if BUILDFLAG(USE_STARSCAN)
+ allocator_.reset(partition_alloc::internal::MakePCScanMetadata<
+ partition_alloc::PartitionAllocator>());
+ allocator_->init({
+ partition_alloc::PartitionOptions::AlignedAlloc::kDisallowed,
+ partition_alloc::PartitionOptions::ThreadCache::kDisabled,
+ Quarantinable
+ ? partition_alloc::PartitionOptions::Quarantine::kAllowed
+ : partition_alloc::PartitionOptions::Quarantine::kDisallowed,
+ partition_alloc::PartitionOptions::Cookie::kAllowed,
+ partition_alloc::PartitionOptions::BackupRefPtr::kDisabled,
+ partition_alloc::PartitionOptions::BackupRefPtrZapping::kDisabled,
+ partition_alloc::PartitionOptions::UseConfigurablePool::kNo,
+ });
+ if (Quarantinable) {
+ partition_alloc::internal::PCScan::RegisterNonScannableRoot(
+ allocator_->root());
+ }
+ pcscan_enabled_.store(true, std::memory_order_release);
+#endif // BUILDFLAG(USE_STARSCAN)
+}
+
+template class NonScannableAllocatorImpl<true>;
+template class NonScannableAllocatorImpl<false>;
+
+#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+} // namespace internal
+
+void* AllocNonScannable(size_t size) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+ return internal::NonScannableAllocatorImpl</*Quarantinable=*/true>::Instance()
+ .Alloc(size);
+#else
+ return ::malloc(size);
+#endif
+}
+
+void FreeNonScannable(void* ptr) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+ internal::NonScannableAllocatorImpl</*Quarantinable=*/true>::Free(ptr);
+#else
+ return ::free(ptr);
+#endif
+}
+
+void* AllocNonQuarantinable(size_t size) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+ return internal::NonScannableAllocatorImpl<
+ /*Quarantinable=*/false>::Instance()
+ .Alloc(size);
+#else
+ return ::malloc(size);
+#endif
+}
+
+void FreeNonQuarantinable(void* ptr) {
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+ internal::NonScannableAllocatorImpl</*Quarantinable=*/false>::Free(ptr);
+#else
+ return ::free(ptr);
+#endif
+}
+
+} // namespace base
diff --git a/base/memory/nonscannable_memory.h b/base/memory/nonscannable_memory.h
new file mode 100644
index 0000000..4f3c02a
--- /dev/null
+++ b/base/memory/nonscannable_memory.h
@@ -0,0 +1,109 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_NONSCANNABLE_MEMORY_H_
+#define BASE_MEMORY_NONSCANNABLE_MEMORY_H_
+
+#include <cstdint>
+
+#include <atomic>
+#include <memory>
+
+#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+#include "base/base_export.h"
+#include "base/no_destructor.h"
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+#include "base/allocator/partition_allocator/partition_alloc.h"
+
+#if BUILDFLAG(USE_STARSCAN)
+#include "base/allocator/partition_allocator/starscan/metadata_allocator.h"
+#endif
+#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+// This file contains allocation/deallocation functions for memory that doesn't
+// need to be scanned by PCScan. Such memory should only contain "data" objects,
+// i.e. objects that don't have pointers/references to other objects. An example
+// would be strings or socket/IPC/file buffers. Use with caution.
+namespace base {
+
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+namespace internal {
+
+// Represents allocator that contains memory for data-like objects (that don't
+// contain pointers) and therefore doesn't require scanning.
+template <bool Quarantinable>
+class BASE_EXPORT NonScannableAllocatorImpl final {
+ public:
+ static NonScannableAllocatorImpl& Instance();
+
+ NonScannableAllocatorImpl(const NonScannableAllocatorImpl&) = delete;
+ NonScannableAllocatorImpl& operator=(const NonScannableAllocatorImpl&) =
+ delete;
+
+ void* Alloc(size_t size);
+ static void Free(void*);
+
+ // Returns PartitionRoot corresponding to the allocator, or nullptr if the
+ // allocator is not enabled.
+ partition_alloc::ThreadSafePartitionRoot* root() {
+#if BUILDFLAG(USE_STARSCAN)
+ if (!allocator_.get()) {
+ return nullptr;
+ }
+ return allocator_->root();
+#else
+ return nullptr;
+#endif // BUILDFLAG(USE_STARSCAN)
+ }
+
+ void NotifyPCScanEnabled();
+
+ private:
+ template <typename>
+ friend class base::NoDestructor;
+
+ NonScannableAllocatorImpl();
+ ~NonScannableAllocatorImpl();
+
+#if BUILDFLAG(USE_STARSCAN)
+ std::unique_ptr<partition_alloc::PartitionAllocator,
+ partition_alloc::internal::PCScanMetadataDeleter>
+ allocator_;
+ std::atomic_bool pcscan_enabled_{false};
+#endif // BUILDFLAG(USE_STARSCAN)
+};
+
+extern template class NonScannableAllocatorImpl<true>;
+extern template class NonScannableAllocatorImpl<false>;
+
+using NonScannableAllocator = NonScannableAllocatorImpl<true>;
+using NonQuarantinableAllocator = NonScannableAllocatorImpl<false>;
+
+} // namespace internal
+#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+// Allocate/free non-scannable, but still quarantinable memory.
+BASE_EXPORT void* AllocNonScannable(size_t size);
+BASE_EXPORT void FreeNonScannable(void* ptr);
+
+// Allocate/free non-scannable and non-quarantinable memory. These functions
+// behave as normal, *Scan-unaware allocation functions. This can be useful for
+// allocations that are guaranteed to be safe by the user, i.e. allocations that
+// cannot be referenced from outside and cannot contain dangling references
+// themselves.
+BASE_EXPORT void* AllocNonQuarantinable(size_t size);
+BASE_EXPORT void FreeNonQuarantinable(void* ptr);
+
+// Deleters to be used with std::unique_ptr.
+struct NonScannableDeleter {
+ void operator()(void* ptr) const { FreeNonScannable(ptr); }
+};
+struct NonQuarantinableDeleter {
+ void operator()(void* ptr) const { FreeNonQuarantinable(ptr); }
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_NONSCANNABLE_MEMORY_H_
diff --git a/base/memory/page_size.h b/base/memory/page_size.h
new file mode 100644
index 0000000..dc8f47c
--- /dev/null
+++ b/base/memory/page_size.h
@@ -0,0 +1,22 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PAGE_SIZE_H_
+#define BASE_MEMORY_PAGE_SIZE_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+
+namespace base {
+
+// Returns the number of bytes in a memory page. Do not use this to compute
+// the number of pages in a block of memory for calling mincore(). On some
+// platforms, e.g. iOS, mincore() uses a different page size from what is
+// returned by GetPageSize().
+BASE_EXPORT size_t GetPageSize();
+
+} // namespace base
+
+#endif // BASE_MEMORY_PAGE_SIZE_H_
diff --git a/base/memory/page_size_nacl.cc b/base/memory/page_size_nacl.cc
new file mode 100644
index 0000000..99d75be
--- /dev/null
+++ b/base/memory/page_size_nacl.cc
@@ -0,0 +1,16 @@
+// Copyright 2015 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process_metrics.h"
+
+#include <stddef.h>
+#include <unistd.h>
+
+namespace base {
+
+size_t GetPageSize() {
+ return static_cast<size_t>(getpagesize());
+}
+
+} // namespace base
diff --git a/base/memory/page_size_posix.cc b/base/memory/page_size_posix.cc
new file mode 100644
index 0000000..6118967
--- /dev/null
+++ b/base/memory/page_size_posix.cc
@@ -0,0 +1,24 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/page_size.h"
+
+#include <unistd.h>
+
+namespace base {
+
+size_t GetPageSize() {
+ static const size_t pagesize = []() -> size_t {
+ // For more information see getpagesize(2). Portable applications should use
+ // sysconf(_SC_PAGESIZE) rather than getpagesize() if it's available.
+#if defined(_SC_PAGESIZE)
+ return static_cast<size_t>(sysconf(_SC_PAGESIZE));
+#else
+ return getpagesize();
+#endif
+ }();
+ return pagesize;
+}
+
+} // namespace base
diff --git a/base/memory/page_size_starboard.cc b/base/memory/page_size_starboard.cc
new file mode 100644
index 0000000..65d6ed8
--- /dev/null
+++ b/base/memory/page_size_starboard.cc
@@ -0,0 +1,26 @@
+// Copyright 2023 The Cobalt Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "base/memory/page_size.h"
+
+#include "base/notreached.h"
+
+namespace base {
+
+size_t GetPageSize() {
+ NOTREACHED();
+ return 0;
+}
+
+}
\ No newline at end of file
diff --git a/base/memory/page_size_win.cc b/base/memory/page_size_win.cc
new file mode 100644
index 0000000..5a6fb06
--- /dev/null
+++ b/base/memory/page_size_win.cc
@@ -0,0 +1,15 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/page_size.h"
+
+namespace base {
+
+size_t GetPageSize() {
+ // System pagesize. This value remains constant on x86/64 architectures.
+ constexpr int PAGESIZE_KB = 4;
+ return PAGESIZE_KB * 1024;
+}
+
+} // namespace base
diff --git a/base/memory/platform_shared_memory_handle.cc b/base/memory/platform_shared_memory_handle.cc
new file mode 100644
index 0000000..eacedd5
--- /dev/null
+++ b/base/memory/platform_shared_memory_handle.cc
@@ -0,0 +1,27 @@
+// Copyright 2018 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_handle.h"
+
+namespace base::subtle {
+
+#if defined(STARBOARD)
+#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
+ScopedFDPair::ScopedFDPair() = default;
+
+ScopedFDPair::ScopedFDPair(ScopedFDPair&&) = default;
+
+ScopedFDPair& ScopedFDPair::operator=(ScopedFDPair&&) = default;
+
+ScopedFDPair::~ScopedFDPair() = default;
+
+ScopedFDPair::ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd)
+ : fd(std::move(in_fd)), readonly_fd(std::move(in_readonly_fd)) {}
+
+FDPair ScopedFDPair::get() const {
+ return {fd.get(), readonly_fd.get()};
+}
+#endif
+
+} // namespace base::subtle
diff --git a/base/memory/platform_shared_memory_handle.h b/base/memory/platform_shared_memory_handle.h
new file mode 100644
index 0000000..e7484ca
--- /dev/null
+++ b/base/memory/platform_shared_memory_handle.h
@@ -0,0 +1,78 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PLATFORM_SHARED_MEMORY_HANDLE_H_
+#define BASE_MEMORY_PLATFORM_SHARED_MEMORY_HANDLE_H_
+
+#include "build/build_config.h"
+
+#if BUILDFLAG(IS_APPLE)
+#include <mach/mach.h>
+#include "base/mac/scoped_mach_port.h"
+#elif BUILDFLAG(IS_FUCHSIA)
+#include <lib/zx/vmo.h>
+#elif BUILDFLAG(IS_WIN)
+#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
+#elif BUILDFLAG(IS_POSIX)
+#include <sys/types.h>
+#include "base/files/scoped_file.h"
+#elif defined(STARBOARD)
+#include "base/files/scoped_file.h"
+#include "starboard/file.h"
+#endif
+
+namespace base::subtle {
+
+#if defined(STARBOARD)
+#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_ANDROID)
+// Helper structs to keep two descriptors on POSIX. It's needed to support
+// ConvertToReadOnly().
+struct BASE_EXPORT FDPair {
+ // The main shared memory descriptor that is used for mapping. May be either
+ // writable or read-only, depending on region's mode.
+ int fd;
+ // The read-only descriptor, valid only in kWritable mode. Replaces |fd| when
+ // a region is converted to read-only.
+ int readonly_fd;
+};
+
+struct BASE_EXPORT ScopedFDPair {
+ ScopedFDPair();
+ ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd);
+ ScopedFDPair(ScopedFDPair&&);
+ ScopedFDPair& operator=(ScopedFDPair&&);
+ ~ScopedFDPair();
+
+ FDPair get() const;
+
+ ScopedFD fd;
+ ScopedFD readonly_fd;
+};
+#endif
+
+// Platform-specific shared memory type used by the shared memory system.
+#if defined(STARBOARD)
+using PlatformSharedMemoryHandle = SbFile;
+using ScopedPlatformSharedMemoryHandle = ScopedFD;
+#elif BUILDFLAG(IS_APPLE)
+using PlatformSharedMemoryHandle = mach_port_t;
+using ScopedPlatformSharedMemoryHandle = mac::ScopedMachSendRight;
+#elif BUILDFLAG(IS_FUCHSIA)
+using PlatformSharedMemoryHandle = zx::unowned_vmo;
+using ScopedPlatformSharedMemoryHandle = zx::vmo;
+#elif BUILDFLAG(IS_WIN)
+using PlatformSharedMemoryHandle = HANDLE;
+using ScopedPlatformSharedMemoryHandle = win::ScopedHandle;
+#elif BUILDFLAG(IS_ANDROID)
+using PlatformSharedMemoryHandle = int;
+using ScopedPlatformSharedMemoryHandle = ScopedFD;
+#else
+using PlatformSharedMemoryHandle = FDPair;
+using ScopedPlatformSharedMemoryHandle = ScopedFDPair;
+#endif
+
+} // namespace base::subtle
+
+#endif // BASE_MEMORY_PLATFORM_SHARED_MEMORY_HANDLE_H_
diff --git a/base/memory/platform_shared_memory_mapper.h b/base/memory/platform_shared_memory_mapper.h
new file mode 100644
index 0000000..3e461d4
--- /dev/null
+++ b/base/memory/platform_shared_memory_mapper.h
@@ -0,0 +1,28 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_PLATFORM_SHARED_MEMORY_MAPPER_H_
+#define BASE_MEMORY_PLATFORM_SHARED_MEMORY_MAPPER_H_
+
+#include "base/base_export.h"
+#include "base/memory/shared_memory_mapper.h"
+
+namespace base {
+
+// Default implementation of the SharedMemoryMapper interface. Implements the
+// platform-specific logic for mapping shared memory regions into the virtual
+// address space of the process.
+class PlatformSharedMemoryMapper : public SharedMemoryMapper {
+ public:
+ absl::optional<span<uint8_t>> Map(subtle::PlatformSharedMemoryHandle handle,
+ bool write_allowed,
+ uint64_t offset,
+ size_t size) override;
+
+ void Unmap(span<uint8_t> mapping) override;
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_PLATFORM_SHARED_MEMORY_MAPPER_H_
diff --git a/base/memory/platform_shared_memory_mapper_android.cc b/base/memory/platform_shared_memory_mapper_android.cc
new file mode 100644
index 0000000..88bc538
--- /dev/null
+++ b/base/memory/platform_shared_memory_mapper_android.cc
@@ -0,0 +1,39 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_mapper.h"
+
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+
+#include <sys/mman.h>
+
+namespace base {
+
+absl::optional<span<uint8_t>> PlatformSharedMemoryMapper::Map(
+ subtle::PlatformSharedMemoryHandle handle,
+ bool write_allowed,
+ uint64_t offset,
+ size_t size) {
+ // IMPORTANT: Even if the mapping is readonly and the mapped data is not
+ // changing, the region must ALWAYS be mapped with MAP_SHARED, otherwise with
+ // ashmem the mapping is equivalent to a private anonymous mapping.
+ void* address =
+ mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
+ MAP_SHARED, handle, checked_cast<off_t>(offset));
+
+ if (address == MAP_FAILED) {
+ DPLOG(ERROR) << "mmap " << handle << " failed";
+ return absl::nullopt;
+ }
+
+ return make_span(reinterpret_cast<uint8_t*>(address), size);
+}
+
+void PlatformSharedMemoryMapper::Unmap(span<uint8_t> mapping) {
+ if (munmap(mapping.data(), mapping.size()) < 0)
+ DPLOG(ERROR) << "munmap";
+}
+
+} // namespace base
diff --git a/base/memory/platform_shared_memory_mapper_fuchsia.cc b/base/memory/platform_shared_memory_mapper_fuchsia.cc
new file mode 100644
index 0000000..357b3b3
--- /dev/null
+++ b/base/memory/platform_shared_memory_mapper_fuchsia.cc
@@ -0,0 +1,40 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_mapper.h"
+
+#include "base/logging.h"
+
+#include <lib/zx/vmar.h>
+#include "base/fuchsia/fuchsia_logging.h"
+
+namespace base {
+
+absl::optional<span<uint8_t>> PlatformSharedMemoryMapper::Map(
+ subtle::PlatformSharedMemoryHandle handle,
+ bool write_allowed,
+ uint64_t offset,
+ size_t size) {
+ uintptr_t addr;
+ zx_vm_option_t options = ZX_VM_REQUIRE_NON_RESIZABLE | ZX_VM_PERM_READ;
+ if (write_allowed)
+ options |= ZX_VM_PERM_WRITE;
+ zx_status_t status = zx::vmar::root_self()->map(options, /*vmar_offset=*/0,
+ *handle, offset, size, &addr);
+ if (status != ZX_OK) {
+ ZX_DLOG(ERROR, status) << "zx_vmar_map";
+ return absl::nullopt;
+ }
+
+ return make_span(reinterpret_cast<uint8_t*>(addr), size);
+}
+
+void PlatformSharedMemoryMapper::Unmap(span<uint8_t> mapping) {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(mapping.data());
+ zx_status_t status = zx::vmar::root_self()->unmap(addr, mapping.size());
+ if (status != ZX_OK)
+ ZX_DLOG(ERROR, status) << "zx_vmar_unmap";
+}
+
+} // namespace base
diff --git a/base/memory/platform_shared_memory_mapper_mac.cc b/base/memory/platform_shared_memory_mapper_mac.cc
new file mode 100644
index 0000000..b3dd55c
--- /dev/null
+++ b/base/memory/platform_shared_memory_mapper_mac.cc
@@ -0,0 +1,45 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_mapper.h"
+
+#include "base/logging.h"
+
+#include <mach/vm_map.h>
+#include "base/mac/mach_logging.h"
+
+namespace base {
+
+absl::optional<span<uint8_t>> PlatformSharedMemoryMapper::Map(
+ subtle::PlatformSharedMemoryHandle handle,
+ bool write_allowed,
+ uint64_t offset,
+ size_t size) {
+ vm_prot_t vm_prot_write = write_allowed ? VM_PROT_WRITE : 0;
+ vm_address_t address = 0;
+ kern_return_t kr = vm_map(mach_task_self(),
+ &address, // Output parameter
+ size,
+ 0, // Alignment mask
+ VM_FLAGS_ANYWHERE, handle, offset,
+ FALSE, // Copy
+ VM_PROT_READ | vm_prot_write, // Current protection
+ VM_PROT_READ | vm_prot_write, // Maximum protection
+ VM_INHERIT_NONE);
+ if (kr != KERN_SUCCESS) {
+ MACH_DLOG(ERROR, kr) << "vm_map";
+ return absl::nullopt;
+ }
+
+ return make_span(reinterpret_cast<uint8_t*>(address), size);
+}
+
+void PlatformSharedMemoryMapper::Unmap(span<uint8_t> mapping) {
+ kern_return_t kr = vm_deallocate(
+ mach_task_self(), reinterpret_cast<vm_address_t>(mapping.data()),
+ mapping.size());
+ MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "vm_deallocate";
+}
+
+} // namespace base
diff --git a/base/memory/platform_shared_memory_mapper_posix.cc b/base/memory/platform_shared_memory_mapper_posix.cc
new file mode 100644
index 0000000..19725e0
--- /dev/null
+++ b/base/memory/platform_shared_memory_mapper_posix.cc
@@ -0,0 +1,36 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_mapper.h"
+
+#include "base/logging.h"
+#include "base/numerics/safe_conversions.h"
+
+#include <sys/mman.h>
+
+namespace base {
+
+absl::optional<span<uint8_t>> PlatformSharedMemoryMapper::Map(
+ subtle::PlatformSharedMemoryHandle handle,
+ bool write_allowed,
+ uint64_t offset,
+ size_t size) {
+ void* address =
+ mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
+ MAP_SHARED, handle.fd, checked_cast<off_t>(offset));
+
+ if (address == MAP_FAILED) {
+ DPLOG(ERROR) << "mmap " << handle.fd << " failed";
+ return absl::nullopt;
+ }
+
+ return make_span(reinterpret_cast<uint8_t*>(address), size);
+}
+
+void PlatformSharedMemoryMapper::Unmap(span<uint8_t> mapping) {
+ if (munmap(mapping.data(), mapping.size()) < 0)
+ DPLOG(ERROR) << "munmap";
+}
+
+} // namespace base
diff --git a/base/memory/platform_shared_memory_mapper_starboard.cc b/base/memory/platform_shared_memory_mapper_starboard.cc
new file mode 100644
index 0000000..d150ead
--- /dev/null
+++ b/base/memory/platform_shared_memory_mapper_starboard.cc
@@ -0,0 +1,34 @@
+// Copyright 2023 The Cobalt Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "base/memory/platform_shared_memory_mapper.h"
+
+#include "base/notreached.h"
+
+namespace base {
+
+absl::optional<span<uint8_t>> PlatformSharedMemoryMapper::Map(
+ subtle::PlatformSharedMemoryHandle handle,
+ bool write_allowed,
+ uint64_t offset,
+ size_t size) {
+ NOTREACHED();
+ return absl::optional<span<uint8_t>>();
+}
+
+void PlatformSharedMemoryMapper::Unmap(span<uint8_t> mapping) {
+ NOTREACHED();
+}
+
+}
\ No newline at end of file
diff --git a/base/memory/platform_shared_memory_mapper_win.cc b/base/memory/platform_shared_memory_mapper_win.cc
new file mode 100644
index 0000000..8ba40d8
--- /dev/null
+++ b/base/memory/platform_shared_memory_mapper_win.cc
@@ -0,0 +1,56 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/platform_shared_memory_mapper.h"
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/logging.h"
+
+#include <aclapi.h>
+
+namespace base {
+
+namespace {
+// Returns the length of the memory section starting at the supplied address.
+size_t GetMemorySectionSize(void* address) {
+ MEMORY_BASIC_INFORMATION memory_info;
+ if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
+ return 0;
+ return memory_info.RegionSize -
+ static_cast<size_t>(static_cast<char*>(address) -
+ static_cast<char*>(memory_info.AllocationBase));
+}
+} // namespace
+
+absl::optional<span<uint8_t>> PlatformSharedMemoryMapper::Map(
+ subtle::PlatformSharedMemoryHandle handle,
+ bool write_allowed,
+ uint64_t offset,
+ size_t size) {
+ // Try to map the shared memory. On the first failure, release any reserved
+ // address space for a single retry.
+ void* address;
+ for (int i = 0; i < 2; ++i) {
+ address = MapViewOfFile(
+ handle, FILE_MAP_READ | (write_allowed ? FILE_MAP_WRITE : 0),
+ static_cast<DWORD>(offset >> 32), static_cast<DWORD>(offset), size);
+ if (address)
+ break;
+ partition_alloc::ReleaseReservation();
+ }
+ if (!address) {
+ DPLOG(ERROR) << "Failed executing MapViewOfFile";
+ return absl::nullopt;
+ }
+
+ return make_span(reinterpret_cast<uint8_t*>(address),
+ GetMemorySectionSize(address));
+}
+
+void PlatformSharedMemoryMapper::Unmap(span<uint8_t> mapping) {
+ if (!UnmapViewOfFile(mapping.data()))
+ DPLOG(ERROR) << "UnmapViewOfFile";
+}
+
+} // namespace base
diff --git a/base/memory/platform_shared_memory_region.cc b/base/memory/platform_shared_memory_region.cc
index 4564792..24dc53e 100644
--- a/base/memory/platform_shared_memory_region.cc
+++ b/base/memory/platform_shared_memory_region.cc
@@ -1,11 +1,16 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
+#include "base/bits.h"
+#include "base/memory/aligned_memory.h"
#include "base/memory/shared_memory_mapping.h"
+#include "base/memory/shared_memory_security_policy.h"
+#include "base/metrics/histogram_functions.h"
#include "base/numerics/checked_math.h"
+#include "base/system/sys_info.h"
namespace base {
namespace subtle {
@@ -29,33 +34,59 @@
PlatformSharedMemoryRegion&& other) = default;
PlatformSharedMemoryRegion::~PlatformSharedMemoryRegion() = default;
-PlatformSharedMemoryRegion::ScopedPlatformHandle
+ScopedPlatformSharedMemoryHandle
PlatformSharedMemoryRegion::PassPlatformHandle() {
return std::move(handle_);
}
-bool PlatformSharedMemoryRegion::MapAt(off_t offset,
- size_t size,
- void** memory,
- size_t* mapped_size) const {
+absl::optional<span<uint8_t>> PlatformSharedMemoryRegion::MapAt(
+ uint64_t offset,
+ size_t size,
+ SharedMemoryMapper* mapper) const {
if (!IsValid())
- return false;
+ return absl::nullopt;
if (size == 0)
- return false;
+ return absl::nullopt;
size_t end_byte;
if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
- return false;
+ return absl::nullopt;
}
- bool success = MapAtInternal(offset, size, memory, mapped_size);
- if (success) {
- DCHECK_EQ(
- 0U, reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+ // TODO(dcheng): Presumably the actual size of the mapping is rounded to
+ // `SysInfo::VMAllocationGranularity()`. Should this accounting be done with
+ // that in mind?
+ if (!SharedMemorySecurityPolicy::AcquireReservationForMapping(size)) {
+ return absl::nullopt;
}
- return success;
+ if (!mapper)
+ mapper = SharedMemoryMapper::GetDefaultInstance();
+
+ // The backing mapper expects offset to be aligned to
+ // `SysInfo::VMAllocationGranularity()`.
+ uint64_t aligned_offset =
+ bits::AlignDown(offset, uint64_t{SysInfo::VMAllocationGranularity()});
+ size_t adjustment_for_alignment =
+ static_cast<size_t>(offset - aligned_offset);
+
+ bool write_allowed = mode_ != Mode::kReadOnly;
+ auto result = mapper->Map(GetPlatformHandle(), write_allowed, aligned_offset,
+ size + adjustment_for_alignment);
+
+ if (result.has_value()) {
+ DCHECK(IsAligned(result.value().data(), kMapMinimumAlignment));
+ if (offset != 0) {
+ // Undo the previous adjustment so the returned mapping respects the exact
+ // requested `offset` and `size`.
+ result = result->subspan(adjustment_for_alignment);
+ }
+ } else {
+ SharedMemorySecurityPolicy::ReleaseReservationForMapping(size);
+ }
+
+ return result;
}
} // namespace subtle
diff --git a/base/memory/platform_shared_memory_region.h b/base/memory/platform_shared_memory_region.h
index b7281d4..6847795 100644
--- a/base/memory/platform_shared_memory_region.h
+++ b/base/memory/platform_shared_memory_region.h
@@ -1,62 +1,30 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
-#if !defined(STARBOARD)
-
-#include <utility>
-
-#include "base/compiler_specific.h"
+#include "base/base_export.h"
+#include "base/containers/span.h"
#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/memory/shared_memory_handle.h"
+#include "base/memory/platform_shared_memory_handle.h"
+#include "base/memory/shared_memory_mapper.h"
#include "base/unguessable_token.h"
#include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-#include <mach/mach.h>
-#include "base/mac/scoped_mach_port.h"
-#elif defined(OS_FUCHSIA)
-#include <lib/zx/vmo.h>
-#elif defined(OS_WIN)
-#include "base/win/scoped_handle.h"
-#include "base/win/windows_types.h"
-#elif defined(OS_POSIX)
-#include <sys/types.h>
-#include "base/file_descriptor_posix.h"
-#include "base/files/scoped_file.h"
-#include "starboard/types.h"
+#include <stdint.h>
+
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+namespace content {
+class SandboxIPCHandler;
+}
#endif
namespace base {
namespace subtle {
-#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
- !defined(OS_ANDROID)
-// Helper structs to keep two descriptors on POSIX. It's needed to support
-// ConvertToReadOnly().
-struct BASE_EXPORT FDPair {
- int fd;
- int readonly_fd;
-};
-
-struct BASE_EXPORT ScopedFDPair {
- ScopedFDPair();
- ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd);
- ScopedFDPair(ScopedFDPair&&);
- ScopedFDPair& operator=(ScopedFDPair&&);
- ~ScopedFDPair();
-
- FDPair get() const;
-
- ScopedFD fd;
- ScopedFD readonly_fd;
-};
-#endif
-
// Implementation class for shared memory regions.
//
// This class does the following:
@@ -108,25 +76,31 @@
CREATE_FILE_MAPPING_FAILURE = 6,
REDUCE_PERMISSIONS_FAILURE = 7,
ALREADY_EXISTS = 8,
- kMaxValue = ALREADY_EXISTS
+ ALLOCATE_FILE_REGION_FAILURE = 9,
+ FSTAT_FAILURE = 10,
+ INODES_MISMATCH = 11,
+ GET_SHMEM_TEMP_DIR_FAILURE = 12,
+ kMaxValue = GET_SHMEM_TEMP_DIR_FAILURE
};
-// Platform-specific shared memory type used by this class.
-#if defined(OS_MACOSX) && !defined(OS_IOS)
- using PlatformHandle = mach_port_t;
- using ScopedPlatformHandle = mac::ScopedMachSendRight;
-#elif defined(OS_FUCHSIA)
- using PlatformHandle = zx::unowned_vmo;
- using ScopedPlatformHandle = zx::vmo;
-#elif defined(OS_WIN)
- using PlatformHandle = HANDLE;
- using ScopedPlatformHandle = win::ScopedHandle;
-#elif defined(OS_ANDROID)
- using PlatformHandle = int;
- using ScopedPlatformHandle = ScopedFD;
-#else
- using PlatformHandle = FDPair;
- using ScopedPlatformHandle = ScopedFDPair;
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+ // Structure to limit access to executable region creation.
+ struct ExecutableRegion {
+ private:
+ // Creates a new shared memory region the unsafe mode (writable and not and
+ // convertible to read-only), and in addition marked executable. A ScopedFD
+ // to this region is returned. Any any mapping will have to be done
+ // manually, including setting executable permissions if necessary
+ //
+ // This is only used to support sandbox_ipc_linux.cc, and should not be used
+ // anywhere else in chrome. This is restricted via AllowCreateExecutable.
+ // TODO(crbug.com/982879): remove this when NaCl is unshipped.
+ //
+ // Returns an invalid ScopedFD if the call fails.
+ static ScopedFD CreateFD(size_t size);
+
+ friend class content::SandboxIPCHandler;
+ };
#endif
// The minimum alignment in bytes that any mapped address produced by Map()
@@ -145,19 +119,20 @@
// actual region size as allocated by the kernel.
// Closes the |handle| and returns an invalid instance if passed parameters
// are invalid.
- static PlatformSharedMemoryRegion Take(ScopedPlatformHandle handle,
+ static PlatformSharedMemoryRegion Take(
+ ScopedPlatformSharedMemoryHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid);
+#if defined(STARBOARD)
+#elif BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID) && !BUILDFLAG(IS_APPLE)
+ // Specialized version of Take() for POSIX that takes only one file descriptor
+ // instead of pair. Cannot be used with kWritable |mode|.
+ static PlatformSharedMemoryRegion Take(ScopedFD handle,
Mode mode,
size_t size,
const UnguessableToken& guid);
-
- // As Take, above, but from a SharedMemoryHandle. This takes ownership of the
- // handle. |mode| must be kUnsafe or kReadOnly; the latter must be used with a
- // handle created with SharedMemoryHandle::GetReadOnlyHandle().
- // TODO(crbug.com/795291): this should only be used while transitioning from
- // the old shared memory API, and should be removed when done.
- static PlatformSharedMemoryRegion TakeFromSharedMemoryHandle(
- const SharedMemoryHandle& handle,
- Mode mode);
+#endif
// Default constructor initializes an invalid instance, i.e. an instance that
// doesn't wrap any valid platform handle.
@@ -166,18 +141,23 @@
// Move operations are allowed.
PlatformSharedMemoryRegion(PlatformSharedMemoryRegion&&);
PlatformSharedMemoryRegion& operator=(PlatformSharedMemoryRegion&&);
+ PlatformSharedMemoryRegion(const PlatformSharedMemoryRegion&) = delete;
+ PlatformSharedMemoryRegion& operator=(const PlatformSharedMemoryRegion&) =
+ delete;
// Destructor closes the platform handle. Does nothing if the handle is
// invalid.
~PlatformSharedMemoryRegion();
// Passes ownership of the platform handle to the caller. The current instance
- // becomes invalid. It's the responsibility of the caller to close the handle.
- ScopedPlatformHandle PassPlatformHandle() WARN_UNUSED_RESULT;
+ // becomes invalid. It's the responsibility of the caller to close the
+ // handle. If the current instance is invalid, ScopedPlatformHandle will also
+ // be invalid.
+ [[nodiscard]] ScopedPlatformSharedMemoryHandle PassPlatformHandle();
// Returns the platform handle. The current instance keeps ownership of this
// handle.
- PlatformHandle GetPlatformHandle() const;
+ PlatformSharedMemoryHandle GetPlatformHandle() const;
// Whether the platform handle is valid.
bool IsValid() const;
@@ -194,13 +174,13 @@
// kWritable mode, all other modes will CHECK-fail. The object will have
// kReadOnly mode after this call on success.
bool ConvertToReadOnly();
-#if defined(OS_MACOSX) && !defined(OS_IOS)
+#if BUILDFLAG(IS_APPLE)
// Same as above, but |mapped_addr| is used as a hint to avoid additional
// mapping of the memory object.
// |mapped_addr| must be mapped location of |memory_object_|. If the location
// is unknown, |mapped_addr| should be |nullptr|.
bool ConvertToReadOnly(void* mapped_addr);
-#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+#endif // BUILDFLAG(IS_APPLE)
// Converts the region to unsafe. Returns whether the operation succeeded.
// Makes the current instance invalid on failure. Can be called only in
@@ -209,17 +189,15 @@
bool ConvertToUnsafe();
// Maps |size| bytes of the shared memory region starting with the given
- // |offset| into the caller's address space. |offset| must be aligned to value
- // of |SysInfo::VMAllocationGranularity()|. Fails if requested bytes are out
- // of the region limits.
- // Returns true and sets |memory| and |mapped_size| on success, returns false
- // and leaves output parameters in unspecified state otherwise. The mapped
- // address is guaranteed to have an alignment of at least
- // |kMapMinimumAlignment|.
- bool MapAt(off_t offset,
- size_t size,
- void** memory,
- size_t* mapped_size) const;
+ // |offset| into the caller's address space using the provided
+ // |SharedMemoryMapper|. |offset| must be aligned to value of
+ // |SysInfo::VMAllocationGranularity()|. Fails if requested bytes are out of
+ // the region limits. Returns the mapping as span on success, or absl::nullopt
+ // on failure. The mapped address is guaranteed to have an alignment of at
+ // least |kMapMinimumAlignment|.
+ absl::optional<span<uint8_t>> MapAt(uint64_t offset,
+ size_t size,
+ SharedMemoryMapper* mapper) const;
const UnguessableToken& GetGUID() const { return guid_; }
@@ -232,33 +210,32 @@
CreateReadOnlyRegionDeathTest);
FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
CheckPlatformHandlePermissionsCorrespondToMode);
- static PlatformSharedMemoryRegion Create(Mode mode, size_t size);
+ static PlatformSharedMemoryRegion Create(Mode mode,
+ size_t size
+#if defined(STARBOARD)
+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+ ,
+ bool executable = false
+#endif
+ );
static bool CheckPlatformHandlePermissionsCorrespondToMode(
- PlatformHandle handle,
+ PlatformSharedMemoryHandle handle,
Mode mode,
size_t size);
- PlatformSharedMemoryRegion(ScopedPlatformHandle handle,
+ PlatformSharedMemoryRegion(ScopedPlatformSharedMemoryHandle handle,
Mode mode,
size_t size,
const UnguessableToken& guid);
- bool MapAtInternal(off_t offset,
- size_t size,
- void** memory,
- size_t* mapped_size) const;
-
- ScopedPlatformHandle handle_;
+ ScopedPlatformSharedMemoryHandle handle_;
Mode mode_ = Mode::kReadOnly;
size_t size_ = 0;
UnguessableToken guid_;
-
- DISALLOW_COPY_AND_ASSIGN(PlatformSharedMemoryRegion);
};
} // namespace subtle
} // namespace base
-#endif // !defined(STARBOARD)
#endif // BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
diff --git a/base/memory/platform_shared_memory_region_android.cc b/base/memory/platform_shared_memory_region_android.cc
index 5369deb..1b68a24 100644
--- a/base/memory/platform_shared_memory_region_android.cc
+++ b/base/memory/platform_shared_memory_region_android.cc
@@ -1,4 +1,4 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,10 +7,11 @@
#include <sys/mman.h>
#include "base/bits.h"
+#include "base/logging.h"
+#include "base/memory/page_size.h"
#include "base/memory/shared_memory_tracker.h"
+#include "base/metrics/histogram_macros.h"
#include "base/posix/eintr_wrapper.h"
-#include "base/process/process_metrics.h"
-#include "starboard/types.h"
#include "third_party/ashmem/ashmem.h"
namespace base {
@@ -23,10 +24,11 @@
namespace {
-static int GetAshmemRegionProtectionMask(int fd) {
+int GetAshmemRegionProtectionMask(int fd) {
int prot = ashmem_get_prot_region(fd);
if (prot < 0) {
- DPLOG(ERROR) << "ashmem_get_prot_region failed";
+ // TODO(crbug.com/838365): convert to DLOG when bug fixed.
+ PLOG(ERROR) << "ashmem_get_prot_region failed";
return -1;
}
return prot;
@@ -54,20 +56,6 @@
return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
}
-// static
-PlatformSharedMemoryRegion
-PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- const SharedMemoryHandle& handle,
- Mode mode) {
- CHECK((mode == Mode::kReadOnly && handle.IsReadOnly()) ||
- (mode == Mode::kUnsafe && !handle.IsReadOnly()));
- if (!handle.IsValid())
- return {};
-
- return Take(ScopedFD(handle.GetHandle()), mode, handle.GetSize(),
- handle.GetGUID());
-}
-
int PlatformSharedMemoryRegion::GetPlatformHandle() const {
return handle_.get();
}
@@ -128,58 +116,45 @@
return true;
}
-bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
- size_t size,
- void** memory,
- size_t* mapped_size) const {
- bool write_allowed = mode_ != Mode::kReadOnly;
- *memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
- MAP_SHARED, handle_.get(), offset);
-
- bool mmap_succeeded = *memory && *memory != reinterpret_cast<void*>(-1);
- if (!mmap_succeeded) {
- DPLOG(ERROR) << "mmap " << handle_.get() << " failed";
- return false;
- }
-
- *mapped_size = size;
- return true;
-}
-
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
- if (size == 0)
+ if (size == 0) {
return {};
+ }
- // Align size as required by ashmem_create_region() API documentation.
- size_t rounded_size = bits::Align(size, GetPageSize());
- if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ // Align size as required by ashmem_create_region() API documentation. This
+ // operation may overflow so check that the result doesn't decrease.
+ size_t rounded_size = bits::AlignUp(size, GetPageSize());
+ if (rounded_size < size ||
+ rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
return {};
+ }
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
UnguessableToken guid = UnguessableToken::Create();
- ScopedFD fd(ashmem_create_region(
- SharedMemoryTracker::GetDumpNameForTracing(guid).c_str(), rounded_size));
- if (!fd.is_valid()) {
+ int fd = ashmem_create_region(
+ SharedMemoryTracker::GetDumpNameForTracing(guid).c_str(), rounded_size);
+ if (fd < 0) {
DPLOG(ERROR) << "ashmem_create_region failed";
return {};
}
- int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE);
+ ScopedFD scoped_fd(fd);
+ int err = ashmem_set_prot_region(scoped_fd.get(), PROT_READ | PROT_WRITE);
if (err < 0) {
DPLOG(ERROR) << "ashmem_set_prot_region failed";
return {};
}
- return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
+ return PlatformSharedMemoryRegion(std::move(scoped_fd), mode, size, guid);
}
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
- PlatformHandle handle,
+ PlatformSharedMemoryHandle handle,
Mode mode,
size_t size) {
int prot = GetAshmemRegionProtectionMask(handle);
@@ -190,9 +165,10 @@
bool expected_read_only = mode == Mode::kReadOnly;
if (is_read_only != expected_read_only) {
- DLOG(ERROR) << "Ashmem region has a wrong protection mask: it is"
- << (is_read_only ? " " : " not ") << "read-only but it should"
- << (expected_read_only ? " " : " not ") << "be";
+ // TODO(crbug.com/838365): convert to DLOG when bug fixed.
+ LOG(ERROR) << "Ashmem region has a wrong protection mask: it is"
+ << (is_read_only ? " " : " not ") << "read-only but it should"
+ << (expected_read_only ? " " : " not ") << "be";
return false;
}
diff --git a/base/memory/platform_shared_memory_region_fuchsia.cc b/base/memory/platform_shared_memory_region_fuchsia.cc
index 378877f..17a25c1 100644
--- a/base/memory/platform_shared_memory_region_fuchsia.cc
+++ b/base/memory/platform_shared_memory_region_fuchsia.cc
@@ -1,4 +1,4 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -9,9 +9,9 @@
#include <zircon/rights.h>
#include "base/bits.h"
+#include "base/check_op.h"
#include "base/fuchsia/fuchsia_logging.h"
-#include "base/process/process_metrics.h"
-#include "starboard/types.h"
+#include "base/memory/page_size.h"
namespace base {
namespace subtle {
@@ -41,19 +41,6 @@
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
-// static
-PlatformSharedMemoryRegion
-PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- const SharedMemoryHandle& handle,
- Mode mode) {
- CHECK(mode == Mode::kReadOnly || mode == Mode::kUnsafe);
- if (!handle.IsValid())
- return {};
-
- return Take(zx::vmo(handle.GetHandle()), mode, handle.GetSize(),
- handle.GetGUID());
-}
-
zx::unowned_vmo PlatformSharedMemoryRegion::GetPlatformHandle() const {
return zx::unowned_vmo(handle_);
}
@@ -108,47 +95,35 @@
return true;
}
-bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
- size_t size,
- void** memory,
- size_t* mapped_size) const {
- bool write_allowed = mode_ != Mode::kReadOnly;
- uintptr_t addr;
- zx_status_t status = zx::vmar::root_self()->map(
- 0, handle_, offset, size,
- ZX_VM_FLAG_PERM_READ | (write_allowed ? ZX_VM_FLAG_PERM_WRITE : 0),
- &addr);
- if (status != ZX_OK) {
- ZX_DLOG(ERROR, status) << "zx_vmar_map";
- return false;
- }
-
- *memory = reinterpret_cast<void*>(addr);
- *mapped_size = size;
- return true;
-}
-
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
if (size == 0)
return {};
- size_t rounded_size = bits::Align(size, GetPageSize());
- if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ // Aligning may overflow so check that the result doesn't decrease.
+ size_t rounded_size = bits::AlignUp(size, GetPageSize());
+ if (rounded_size < size ||
+ rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
return {};
+ }
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
zx::vmo vmo;
- zx_status_t status =
- zx::vmo::create(rounded_size, ZX_VMO_NON_RESIZABLE, &vmo);
+ zx_status_t status = zx::vmo::create(rounded_size, 0, &vmo);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_vmo_create";
return {};
}
+ // TODO(crbug.com/991805): Take base::Location from the caller and use it to
+ // generate the name here.
+ constexpr char kVmoName[] = "cr-shared-memory-region";
+ status = vmo.set_property(ZX_PROP_NAME, kVmoName, strlen(kVmoName));
+ ZX_DCHECK(status == ZX_OK, status);
+
const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
status = vmo.replace(kNoExecFlags, &vmo);
if (status != ZX_OK) {
@@ -162,24 +137,28 @@
// static
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
- PlatformHandle handle,
+ PlatformSharedMemoryHandle handle,
Mode mode,
size_t size) {
zx_info_handle_basic_t basic = {};
zx_status_t status = handle->get_info(ZX_INFO_HANDLE_BASIC, &basic,
sizeof(basic), nullptr, nullptr);
- if (status != ZX_OK) {
- ZX_DLOG(ERROR, status) << "zx_object_get_info";
+ ZX_CHECK(status == ZX_OK, status) << "zx_object_get_info";
+
+ if (basic.type != ZX_OBJ_TYPE_VMO) {
+ // TODO(crbug.com/838365): convert to DLOG when bug fixed.
+ LOG(ERROR) << "Received zircon handle is not a VMO";
return false;
}
- bool is_read_only = (basic.rights & kNoWriteOrExec) == basic.rights;
+ bool is_read_only = (basic.rights & (ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE)) == 0;
bool expected_read_only = mode == Mode::kReadOnly;
if (is_read_only != expected_read_only) {
- DLOG(ERROR) << "VMO object has wrong access rights: it is"
- << (is_read_only ? " " : " not ") << "read-only but it should"
- << (expected_read_only ? " " : " not ") << "be";
+ // TODO(crbug.com/838365): convert to DLOG when bug fixed.
+ LOG(ERROR) << "VMO object has wrong access rights: it is"
+ << (is_read_only ? " " : " not ") << "read-only but it should"
+ << (expected_read_only ? " " : " not ") << "be";
return false;
}
diff --git a/base/memory/platform_shared_memory_region_mac.cc b/base/memory/platform_shared_memory_region_mac.cc
index c53cafb..798f69a 100644
--- a/base/memory/platform_shared_memory_region_mac.cc
+++ b/base/memory/platform_shared_memory_region_mac.cc
@@ -1,34 +1,22 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
-#include <mach/mach_vm.h>
+#include <mach/vm_map.h>
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_mach_vm.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "build/build_config.h"
-#include "starboard/types.h"
-
-#if defined(OS_IOS)
-#error "MacOS only - iOS uses platform_shared_memory_region_posix.cc"
-#endif
namespace base {
namespace subtle {
namespace {
-void LogCreateError(PlatformSharedMemoryRegion::CreateError error,
- kern_return_t mac_error) {
- UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error);
- if (mac_error != KERN_SUCCESS)
- UmaHistogramSparse("SharedMemory.CreateMacError", mac_error);
-}
-
} // namespace
// static
@@ -52,20 +40,6 @@
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
-// static
-PlatformSharedMemoryRegion
-PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- const SharedMemoryHandle& handle,
- Mode mode) {
- CHECK(mode == Mode::kReadOnly || mode == Mode::kUnsafe);
- CHECK(handle.GetType() == SharedMemoryHandle::MACH);
- if (!handle.IsValid())
- return {};
-
- return Take(base::mac::ScopedMachSendRight(handle.GetMemoryObject()), mode,
- handle.GetSize(), handle.GetGUID());
-}
-
mach_port_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
return handle_.get();
}
@@ -110,12 +84,12 @@
mac::ScopedMachVM scoped_memory;
if (!temp_addr) {
// Intentionally lower current prot and max prot to |VM_PROT_READ|.
- kern_return_t kr = mach_vm_map(
- mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
- size_, 0, VM_FLAGS_ANYWHERE, handle_copy.get(), 0, FALSE, VM_PROT_READ,
- VM_PROT_READ, VM_INHERIT_NONE);
+ kern_return_t kr =
+ vm_map(mach_task_self(), reinterpret_cast<vm_address_t*>(&temp_addr),
+ size_, 0, VM_FLAGS_ANYWHERE, handle_copy.get(), 0, FALSE,
+ VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
- MACH_DLOG(ERROR, kr) << "mach_vm_map";
+ MACH_DLOG(ERROR, kr) << "vm_map";
return false;
}
scoped_memory.reset(reinterpret_cast<vm_address_t>(temp_addr),
@@ -128,7 +102,7 @@
kern_return_t kr = mach_make_memory_entry_64(
mach_task_self(), &allocation_size,
reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
- named_right.receive(), MACH_PORT_NULL);
+ mac::ScopedMachSendRight::Receiver(named_right).get(), MACH_PORT_NULL);
if (kr != KERN_SUCCESS) {
MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
return false;
@@ -151,84 +125,55 @@
return true;
}
-bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
- size_t size,
- void** memory,
- size_t* mapped_size) const {
- bool write_allowed = mode_ != Mode::kReadOnly;
- vm_prot_t vm_prot_write = write_allowed ? VM_PROT_WRITE : 0;
- kern_return_t kr = mach_vm_map(
- mach_task_self(),
- reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
- size,
- 0, // Alignment mask
- VM_FLAGS_ANYWHERE, handle_.get(), offset,
- FALSE, // Copy
- VM_PROT_READ | vm_prot_write, // Current protection
- VM_PROT_READ | vm_prot_write, // Maximum protection
- VM_INHERIT_NONE);
- if (kr != KERN_SUCCESS) {
- MACH_DLOG(ERROR, kr) << "mach_vm_map";
- return false;
- }
-
- *mapped_size = size;
- return true;
-}
-
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
if (size == 0) {
- LogCreateError(CreateError::SIZE_ZERO, KERN_SUCCESS);
return {};
}
if (size > static_cast<size_t>(std::numeric_limits<int>::max())) {
- LogCreateError(CreateError::SIZE_TOO_LARGE, KERN_SUCCESS);
return {};
}
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
- mach_vm_size_t vm_size = size;
+ memory_object_size_t vm_size = size;
mac::ScopedMachSendRight named_right;
kern_return_t kr = mach_make_memory_entry_64(
mach_task_self(), &vm_size,
0, // Address.
MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
- named_right.receive(),
+ mac::ScopedMachSendRight::Receiver(named_right).get(),
MACH_PORT_NULL); // Parent handle.
- if (kr != KERN_SUCCESS)
- LogCreateError(CreateError::CREATE_FILE_MAPPING_FAILURE, kr);
// Crash as soon as shm allocation fails to debug the issue
// https://crbug.com/872237.
MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_make_memory_entry_64";
DCHECK_GE(vm_size, size);
- LogCreateError(CreateError::SUCCESS, KERN_SUCCESS);
return PlatformSharedMemoryRegion(std::move(named_right), mode, size,
UnguessableToken::Create());
}
// static
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
- PlatformHandle handle,
+ PlatformSharedMemoryHandle handle,
Mode mode,
size_t size) {
- mach_vm_address_t temp_addr = 0;
+ vm_address_t temp_addr = 0;
kern_return_t kr =
- mach_vm_map(mach_task_self(), &temp_addr, size, 0, VM_FLAGS_ANYWHERE,
- handle, 0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
- VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
+ vm_map(mach_task_self(), &temp_addr, size, 0, VM_FLAGS_ANYWHERE, handle,
+ 0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
+ VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
if (kr == KERN_SUCCESS) {
kern_return_t kr_deallocate =
- mach_vm_deallocate(mach_task_self(), temp_addr, size);
- MACH_DLOG_IF(ERROR, kr_deallocate != KERN_SUCCESS, kr_deallocate)
- << "mach_vm_deallocate";
+ vm_deallocate(mach_task_self(), temp_addr, size);
+ // TODO(crbug.com/838365): convert to DLOG when bug fixed.
+ MACH_LOG_IF(ERROR, kr_deallocate != KERN_SUCCESS, kr_deallocate)
+ << "vm_deallocate";
} else if (kr != KERN_INVALID_RIGHT) {
- MACH_DLOG(ERROR, kr) << "mach_vm_map";
+ MACH_LOG(ERROR, kr) << "vm_map";
return false;
}
@@ -236,9 +181,10 @@
bool expected_read_only = mode == Mode::kReadOnly;
if (is_read_only != expected_read_only) {
- DLOG(ERROR) << "VM region has a wrong protection mask: it is"
- << (is_read_only ? " " : " not ") << "read-only but it should"
- << (expected_read_only ? " " : " not ") << "be";
+ // TODO(crbug.com/838365): convert to DLOG when bug fixed.
+ LOG(ERROR) << "VM region has a wrong protection mask: it is"
+ << (is_read_only ? " " : " not ") << "read-only but it should"
+ << (expected_read_only ? " " : " not ") << "be";
return false;
}
diff --git a/base/memory/platform_shared_memory_region_posix.cc b/base/memory/platform_shared_memory_region_posix.cc
index f2c4ff6..f1d82cd 100644
--- a/base/memory/platform_shared_memory_region_posix.cc
+++ b/base/memory/platform_shared_memory_region_posix.cc
@@ -1,4 +1,4 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,12 +6,13 @@
#include <fcntl.h>
#include <sys/mman.h>
-#include <sys/stat.h>
+#include "base/files/file.h"
#include "base/files/file_util.h"
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
-#include "starboard/types.h"
namespace base {
namespace subtle {
@@ -31,41 +32,39 @@
using ScopedPathUnlinker =
ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
-#if !defined(OS_NACL)
+#if !BUILDFLAG(IS_NACL)
bool CheckFDAccessMode(int fd, int expected_mode) {
int fd_status = fcntl(fd, F_GETFL);
if (fd_status == -1) {
- DPLOG(ERROR) << "fcntl(" << fd << ", F_GETFL) failed";
+ // TODO(crbug.com/838365): convert to DLOG when bug fixed.
+ PLOG(ERROR) << "fcntl(" << fd << ", F_GETFL) failed";
return false;
}
int mode = fd_status & O_ACCMODE;
if (mode != expected_mode) {
- DLOG(ERROR) << "Descriptor access mode (" << mode
- << ") differs from expected (" << expected_mode << ")";
+ // TODO(crbug.com/838365): convert to DLOG when bug fixed.
+ LOG(ERROR) << "Descriptor access mode (" << mode
+ << ") differs from expected (" << expected_mode << ")";
return false;
}
return true;
}
-#endif // !defined(OS_NACL)
+#endif // !BUILDFLAG(IS_NACL)
} // namespace
-ScopedFDPair::ScopedFDPair() = default;
-
-ScopedFDPair::ScopedFDPair(ScopedFDPair&&) = default;
-
-ScopedFDPair& ScopedFDPair::operator=(ScopedFDPair&&) = default;
-
-ScopedFDPair::~ScopedFDPair() = default;
-
-ScopedFDPair::ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd)
- : fd(std::move(in_fd)), readonly_fd(std::move(in_readonly_fd)) {}
-
-FDPair ScopedFDPair::get() const {
- return {fd.get(), readonly_fd.get()};
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+// static
+ScopedFD PlatformSharedMemoryRegion::ExecutableRegion::CreateFD(size_t size) {
+ PlatformSharedMemoryRegion region =
+ Create(Mode::kUnsafe, size, true /* executable */);
+ if (region.IsValid())
+ return region.PassPlatformHandle().fd;
+ return ScopedFD();
}
+#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
@@ -101,26 +100,19 @@
return {};
}
break;
- default:
- DLOG(ERROR) << "Invalid permission mode: " << static_cast<int>(mode);
- return {};
}
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
// static
-PlatformSharedMemoryRegion
-PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- const SharedMemoryHandle& handle,
- Mode mode) {
- CHECK(mode == Mode::kReadOnly || mode == Mode::kUnsafe);
- if (!handle.IsValid())
- return {};
-
- return Take(
- base::subtle::ScopedFDPair(ScopedFD(handle.GetHandle()), ScopedFD()),
- mode, handle.GetSize(), handle.GetGUID());
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+ ScopedFD handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid) {
+ CHECK_NE(mode, Mode::kWritable);
+ return Take(ScopedFDPair(std::move(handle), ScopedFD()), mode, size, guid);
}
FDPair PlatformSharedMemoryRegion::GetPlatformHandle() const {
@@ -173,36 +165,25 @@
return true;
}
-bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
- size_t size,
- void** memory,
- size_t* mapped_size) const {
- bool write_allowed = mode_ != Mode::kReadOnly;
- *memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
- MAP_SHARED, handle_.fd.get(), offset);
-
- bool mmap_succeeded = *memory && *memory != MAP_FAILED;
- if (!mmap_succeeded) {
- DPLOG(ERROR) << "mmap " << handle_.fd.get() << " failed";
- return false;
- }
-
- *mapped_size = size;
- return true;
-}
-
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
- size_t size) {
-#if defined(OS_NACL)
+ size_t size
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+ ,
+ bool executable
+#endif
+) {
+#if BUILDFLAG(IS_NACL)
// Untrusted code can't create descriptors or handles.
return {};
#else
- if (size == 0)
+ if (size == 0) {
return {};
+ }
- if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max())) {
return {};
+ }
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
@@ -210,19 +191,26 @@
// This function theoretically can block on the disk, but realistically
// the temporary files we create will just go into the buffer cache
// and be deleted before they ever make it out to disk.
- ThreadRestrictions::ScopedAllowIO allow_io;
+ ScopedAllowBlocking scoped_allow_blocking;
// We don't use shm_open() API in order to support the --disable-dev-shm-usage
// flag.
FilePath directory;
- if (!GetShmemTempDir(false /* executable */, &directory))
+ if (!GetShmemTempDir(
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+ executable,
+#else
+ false /* executable */,
+#endif
+ &directory)) {
return {};
+ }
- ScopedFD fd;
FilePath path;
- fd.reset(CreateAndOpenFdForTemporaryFileInDir(directory, &path));
+ ScopedFD fd = CreateAndOpenFdForTemporaryFileInDir(directory, &path);
+ File shm_file(fd.release());
- if (!fd.is_valid()) {
+ if (!shm_file.IsValid()) {
PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
FilePath dir = path.DirName();
if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
@@ -250,38 +238,41 @@
}
}
- // Get current size.
- struct stat stat = {};
- if (fstat(fd.get(), &stat) != 0)
+ if (!AllocateFileRegion(&shm_file, 0, size)) {
return {};
- const size_t current_size = stat.st_size;
- if (current_size != size) {
- if (HANDLE_EINTR(ftruncate(fd.get(), size)) != 0)
- return {};
}
if (readonly_fd.is_valid()) {
- struct stat readonly_stat = {};
- if (fstat(readonly_fd.get(), &readonly_stat))
- NOTREACHED();
+ stat_wrapper_t shm_stat;
+ if (File::Fstat(shm_file.GetPlatformFile(), &shm_stat) != 0) {
+ DPLOG(ERROR) << "fstat(fd) failed";
+ return {};
+ }
- if (stat.st_dev != readonly_stat.st_dev ||
- stat.st_ino != readonly_stat.st_ino) {
+ stat_wrapper_t readonly_stat;
+ if (File::Fstat(readonly_fd.get(), &readonly_stat) != 0) {
+ DPLOG(ERROR) << "fstat(readonly_fd) failed";
+ return {};
+ }
+
+ if (shm_stat.st_dev != readonly_stat.st_dev ||
+ shm_stat.st_ino != readonly_stat.st_ino) {
LOG(ERROR) << "Writable and read-only inodes don't match; bailing";
return {};
}
}
- return PlatformSharedMemoryRegion({std::move(fd), std::move(readonly_fd)},
- mode, size, UnguessableToken::Create());
-#endif // !defined(OS_NACL)
+ return PlatformSharedMemoryRegion(
+ {ScopedFD(shm_file.TakePlatformFile()), std::move(readonly_fd)}, mode,
+ size, UnguessableToken::Create());
+#endif // !BUILDFLAG(IS_NACL)
}
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
- PlatformHandle handle,
+ PlatformSharedMemoryHandle handle,
Mode mode,
size_t size) {
-#if !defined(OS_NACL)
+#if !BUILDFLAG(IS_NACL)
if (!CheckFDAccessMode(handle.fd,
mode == Mode::kReadOnly ? O_RDONLY : O_RDWR)) {
return false;
@@ -292,33 +283,18 @@
// The second descriptor must be invalid in kReadOnly and kUnsafe modes.
if (handle.readonly_fd != -1) {
- DLOG(ERROR) << "The second descriptor must be invalid";
+ // TODO(crbug.com/838365): convert to DLOG when bug fixed.
+ LOG(ERROR) << "The second descriptor must be invalid";
return false;
}
return true;
#else
// fcntl(_, F_GETFL) is not implemented on NaCl.
- void* temp_memory = nullptr;
- temp_memory =
- mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, handle.fd, 0);
-
- bool mmap_succeeded = temp_memory && temp_memory != MAP_FAILED;
- if (mmap_succeeded)
- munmap(temp_memory, size);
-
- bool is_read_only = !mmap_succeeded;
- bool expected_read_only = mode == Mode::kReadOnly;
-
- if (is_read_only != expected_read_only) {
- DLOG(ERROR) << "Descriptor has a wrong access mode: it is"
- << (is_read_only ? " " : " not ") << "read-only but it should"
- << (expected_read_only ? " " : " not ") << "be";
- return false;
- }
-
+ // We also cannot try to mmap() a region as writable and look at the return
+ // status because the plugin process crashes if system mmap() fails.
return true;
-#endif // !defined(OS_NACL)
+#endif // !BUILDFLAG(IS_NACL)
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
diff --git a/base/memory/platform_shared_memory_region_starboard.cc b/base/memory/platform_shared_memory_region_starboard.cc
new file mode 100644
index 0000000..ab81901
--- /dev/null
+++ b/base/memory/platform_shared_memory_region_starboard.cc
@@ -0,0 +1,72 @@
+// Copyright 2023 The Cobalt Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "base/memory/platform_shared_memory_region.h"
+
+#include "base/notreached.h"
+
+namespace base {
+namespace subtle {
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
+ ScopedPlatformSharedMemoryHandle handle,
+ Mode mode,
+ size_t size,
+ const UnguessableToken& guid) {
+ NOTREACHED();
+ return {};
+}
+
+bool PlatformSharedMemoryRegion::IsValid() const {
+ NOTREACHED();
+ return false;
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
+ NOTREACHED();
+ return {};
+}
+
+bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
+ NOTREACHED();
+ return false;
+}
+
+bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
+ NOTREACHED();
+ return false;
+}
+
+PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
+ size_t size) {
+ NOTREACHED();
+ return {};
+}
+
+bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
+ PlatformSharedMemoryHandle handle,
+ Mode mode,
+ size_t size) {
+ NOTREACHED();
+ return false;
+}
+
+PlatformSharedMemoryHandle PlatformSharedMemoryRegion::GetPlatformHandle()
+ const {
+ NOTREACHED();
+ return 0;
+}
+
+} // namespace subtle
+} // namespace base
\ No newline at end of file
diff --git a/base/memory/platform_shared_memory_region_unittest.cc b/base/memory/platform_shared_memory_region_unittest.cc
index 7c567cb..c18b43f 100644
--- a/base/memory/platform_shared_memory_region_unittest.cc
+++ b/base/memory/platform_shared_memory_region_unittest.cc
@@ -1,32 +1,34 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
-#include "base/logging.h"
-#include "base/memory/shared_memory.h"
+#include <tuple>
+
+#include "base/check.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/process/process_metrics.h"
-#include "base/sys_info.h"
+#include "base/ranges/algorithm.h"
+#include "base/system/sys_info.h"
#include "base/test/gtest_util.h"
#include "base/test/test_shared_memory_util.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-#include <mach/mach_vm.h>
+#if BUILDFLAG(IS_APPLE)
+#include <mach/vm_map.h>
#include <sys/mman.h>
-#elif defined(OS_POSIX) && !defined(OS_IOS)
+#elif BUILDFLAG(IS_POSIX)
#include <sys/mman.h>
#include "base/debug/proc_maps_linux.h"
-#elif defined(OS_WIN)
+#elif BUILDFLAG(IS_WIN)
#include <windows.h>
-#elif defined(OS_FUCHSIA)
+#include "base/logging.h"
+#elif BUILDFLAG(IS_FUCHSIA)
#include <lib/zx/object.h>
#include <lib/zx/process.h>
#include "base/fuchsia/fuchsia_logging.h"
-#include "starboard/types.h"
#endif
namespace base {
@@ -73,6 +75,19 @@
EXPECT_FALSE(region2.IsValid());
}
+// Tests that creating a region of maximum possible value returns an invalid
+// region.
+TEST_F(PlatformSharedMemoryRegionTest, CreateMaxSizeRegionIsInvalid) {
+ size_t max_region_size = std::numeric_limits<size_t>::max();
+ PlatformSharedMemoryRegion region =
+ PlatformSharedMemoryRegion::CreateWritable(max_region_size);
+ EXPECT_FALSE(region.IsValid());
+
+ PlatformSharedMemoryRegion region2 =
+ PlatformSharedMemoryRegion::CreateUnsafe(max_region_size);
+ EXPECT_FALSE(region2.IsValid());
+}
+
// Tests that regions consistently report their size as the size requested at
// creation time even if their allocation size is larger due to platform
// constraints.
@@ -125,7 +140,7 @@
PlatformSharedMemoryRegion region =
PlatformSharedMemoryRegion::CreateWritable(kRegionSize);
ASSERT_TRUE(region.IsValid());
- ignore_result(region.PassPlatformHandle());
+ std::ignore = region.PassPlatformHandle();
EXPECT_FALSE(region.IsValid());
}
@@ -197,8 +212,7 @@
EXPECT_FALSE(mapping.IsValid());
}
-#if defined(OS_POSIX) && !defined(OS_ANDROID) && \
- (!defined(OS_MACOSX) || defined(OS_IOS))
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_ANDROID) && !BUILDFLAG(IS_APPLE)
// Tests that the second handle is closed after a conversion to read-only on
// POSIX.
TEST_F(PlatformSharedMemoryRegionTest,
@@ -224,31 +238,34 @@
#endif
void CheckReadOnlyMapProtection(void* addr) {
-#if defined(OS_MACOSX) && !defined(OS_IOS)
+#if BUILDFLAG(IS_APPLE)
vm_region_basic_info_64 basic_info;
- mach_vm_size_t dummy_size = 0;
- void* temp_addr = addr;
- MachVMRegionResult result = GetBasicInfo(
- mach_task_self(), &dummy_size,
- reinterpret_cast<mach_vm_address_t*>(&temp_addr), &basic_info);
- ASSERT_EQ(result, MachVMRegionResult::Success);
+ vm_size_t dummy_size = 0;
+ mach_msg_type_number_t info_count = VM_REGION_BASIC_INFO_COUNT_64;
+ mach_port_t object_name;
+ kern_return_t kr = vm_region_64(
+ mach_task_self(), reinterpret_cast<vm_address_t*>(&addr), &dummy_size,
+ VM_REGION_BASIC_INFO_64, reinterpret_cast<vm_region_info_t>(&basic_info),
+ &info_count, &object_name);
+ mach_port_deallocate(mach_task_self(), object_name);
+
+ ASSERT_EQ(kr, KERN_SUCCESS);
EXPECT_EQ(basic_info.protection & VM_PROT_ALL, VM_PROT_READ);
EXPECT_EQ(basic_info.max_protection & VM_PROT_ALL, VM_PROT_READ);
-#elif defined(OS_POSIX) && !defined(OS_IOS)
+#elif BUILDFLAG(IS_POSIX)
std::string proc_maps;
ASSERT_TRUE(base::debug::ReadProcMaps(&proc_maps));
std::vector<base::debug::MappedMemoryRegion> regions;
ASSERT_TRUE(base::debug::ParseProcMaps(proc_maps, ®ions));
- auto it =
- std::find_if(regions.begin(), regions.end(),
- [addr](const base::debug::MappedMemoryRegion& region) {
- return region.start == reinterpret_cast<uintptr_t>(addr);
- });
+ auto it = ranges::find_if(
+ regions, [addr](const base::debug::MappedMemoryRegion& region) {
+ return region.start == reinterpret_cast<uintptr_t>(addr);
+ });
ASSERT_TRUE(it != regions.end());
// PROT_READ may imply PROT_EXEC on some architectures, so just check that
// permissions don't contain PROT_WRITE bit.
EXPECT_FALSE(it->permissions & base::debug::MappedMemoryRegion::WRITE);
-#elif defined(OS_WIN)
+#elif BUILDFLAG(IS_WIN)
MEMORY_BASIC_INFORMATION memory_info;
size_t result = VirtualQueryEx(GetCurrentProcess(), addr, &memory_info,
sizeof(memory_info));
@@ -258,7 +275,7 @@
logging::GetLastSystemErrorCode());
EXPECT_EQ(memory_info.AllocationProtect, static_cast<DWORD>(PAGE_READONLY));
EXPECT_EQ(memory_info.Protect, static_cast<DWORD>(PAGE_READONLY));
-#elif defined(OS_FUCHSIA)
+#elif BUILDFLAG(IS_FUCHSIA)
// TODO(alexilin): We cannot call zx_object_get_info ZX_INFO_PROCESS_MAPS in
// this process. Consider to create an auxiliary process that will read the
// test process maps.
@@ -266,16 +283,16 @@
}
bool TryToRestoreWritablePermissions(void* addr, size_t len) {
-#if defined(OS_POSIX) && !defined(OS_IOS)
+#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_IOS)
int result = mprotect(addr, len, PROT_READ | PROT_WRITE);
return result != -1;
-#elif defined(OS_WIN)
+#elif BUILDFLAG(IS_WIN)
DWORD old_protection;
return VirtualProtect(addr, len, PAGE_READWRITE, &old_protection);
-#elif defined(OS_FUCHSIA)
- zx_status_t status = zx::vmar::root_self()->protect(
- reinterpret_cast<uintptr_t>(addr), len,
- ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE);
+#elif BUILDFLAG(IS_FUCHSIA)
+ zx_status_t status =
+ zx::vmar::root_self()->protect(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
+ reinterpret_cast<uintptr_t>(addr), len);
return status == ZX_OK;
#else
return false;
@@ -418,66 +435,5 @@
EXPECT_DEATH_IF_SUPPORTED(region.ConvertToUnsafe(), kErrorRegex);
}
-// Check that taking from a SharedMemoryHandle works.
-TEST_F(PlatformSharedMemoryRegionTest, TakeFromSharedMemoryHandle) {
- SharedMemory shm;
- auto region = PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- shm.TakeHandle(), PlatformSharedMemoryRegion::Mode::kUnsafe);
- ASSERT_FALSE(region.IsValid());
-
- shm.CreateAndMapAnonymous(10);
- region = PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- shm.TakeHandle(), PlatformSharedMemoryRegion::Mode::kUnsafe);
- ASSERT_TRUE(region.IsValid());
-
-#if !(defined(OS_MACOSX) && !defined(OS_IOS))
- // Note that it's not possible on all platforms for TakeFromSharedMemoryHandle
- // to conveniently check if the SharedMemoryHandle is readonly or
- // not. Therefore it is actually possible to get an kUnsafe
- // PlatformSharedMemoryRegion from a readonly handle on some platforms.
- SharedMemoryCreateOptions options;
- options.size = 10;
- options.share_read_only = true;
- shm.Create(options);
- EXPECT_DEATH_IF_SUPPORTED(
- PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- shm.GetReadOnlyHandle(), PlatformSharedMemoryRegion::Mode::kUnsafe),
- "");
-#endif // !(defined(OS_MACOSX) && !defined(OS_IOS))
-}
-
-// Check that taking from a readonly SharedMemoryHandle works.
-TEST_F(PlatformSharedMemoryRegionTest, TakeFromReadOnlySharedMemoryHandle) {
- SharedMemory shm;
- // Note that getting a read-only handle from an unmapped SharedMemory will
- // fail, so the invalid region case cannot be tested.
- SharedMemoryCreateOptions options;
- options.size = 10;
- options.share_read_only = true;
- shm.Create(options);
- auto readonly_handle = shm.GetReadOnlyHandle();
-#if defined(OS_ANDROID)
- readonly_handle.SetRegionReadOnly();
-#endif
- auto region = PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- readonly_handle, PlatformSharedMemoryRegion::Mode::kReadOnly);
- ASSERT_TRUE(region.IsValid());
-}
-
-// Check that taking from a SharedMemoryHandle in writable mode fails.
-TEST_F(PlatformSharedMemoryRegionTest, WritableTakeFromSharedMemoryHandle) {
- SharedMemory shm;
- EXPECT_DEATH_IF_SUPPORTED(
- PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- shm.TakeHandle(), PlatformSharedMemoryRegion::Mode::kWritable),
- "");
-
- shm.CreateAndMapAnonymous(10);
- EXPECT_DEATH_IF_SUPPORTED(
- PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- shm.TakeHandle(), PlatformSharedMemoryRegion::Mode::kWritable),
- "");
-}
-
} // namespace subtle
} // namespace base
diff --git a/base/memory/platform_shared_memory_region_win.cc b/base/memory/platform_shared_memory_region_win.cc
index 691ce87..e387015 100644
--- a/base/memory/platform_shared_memory_region_win.cc
+++ b/base/memory/platform_shared_memory_region_win.cc
@@ -1,35 +1,25 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include <aclapi.h>
+#include <stddef.h>
+#include <stdint.h>
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/bits.h"
+#include "base/logging.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/process/process_handle.h"
-#include "base/rand_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/win/windows_version.h"
-#include "starboard/types.h"
+#include "base/strings/string_util.h"
-namespace base {
-namespace subtle {
+namespace base::subtle {
namespace {
-// Emits UMA metrics about encountered errors. Pass zero (0) for |winerror|
-// if there is no associated Windows error.
-void LogError(PlatformSharedMemoryRegion::CreateError error, DWORD winerror) {
- UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error);
- static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
- if (winerror != ERROR_SUCCESS)
- UmaHistogramSparse("SharedMemory.CreateWinError", winerror);
-}
-
typedef enum _SECTION_INFORMATION_CLASS {
SectionBasicInformation,
} SECTION_INFORMATION_CLASS;
@@ -47,16 +37,6 @@
ULONG SectionInformationLength,
PULONG ResultLength);
-// Returns the length of the memory section starting at the supplied address.
-size_t GetMemorySectionSize(void* address) {
- MEMORY_BASIC_INFORMATION memory_info;
- if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
- return 0;
- return memory_info.RegionSize -
- (static_cast<char*>(address) -
- static_cast<char*>(memory_info.AllocationBase));
-}
-
// Checks if the section object is safe to map. At the moment this just means
// it's not an image section.
bool IsSectionSafeToMap(HANDLE handle) {
@@ -93,9 +73,6 @@
HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
static_cast<DWORD>(rounded_size), name);
if (!h) {
- LogError(
- PlatformSharedMemoryRegion::CreateError::CREATE_FILE_MAPPING_FAILURE,
- GetLastError());
return nullptr;
}
@@ -108,9 +85,6 @@
DCHECK(rv);
if (!success) {
- LogError(
- PlatformSharedMemoryRegion::CreateError::REDUCE_PERMISSIONS_FAILURE,
- GetLastError());
return nullptr;
}
@@ -125,7 +99,7 @@
Mode mode,
size_t size,
const UnguessableToken& guid) {
- if (!handle.IsValid())
+ if (!handle.is_valid())
return {};
if (size == 0)
@@ -134,34 +108,21 @@
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
- if (!IsSectionSafeToMap(handle.Get()))
+ if (!IsSectionSafeToMap(handle.get()))
return {};
CHECK(
- CheckPlatformHandlePermissionsCorrespondToMode(handle.Get(), mode, size));
+ CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
-// static
-PlatformSharedMemoryRegion
-PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- const SharedMemoryHandle& handle,
- Mode mode) {
- CHECK(mode == Mode::kReadOnly || mode == Mode::kUnsafe);
- if (!handle.IsValid())
- return {};
-
- return Take(base::win::ScopedHandle(handle.GetHandle()), mode,
- handle.GetSize(), handle.GetGUID());
-}
-
HANDLE PlatformSharedMemoryRegion::GetPlatformHandle() const {
- return handle_.Get();
+ return handle_.get();
}
bool PlatformSharedMemoryRegion::IsValid() const {
- return handle_.IsValid();
+ return handle_.is_valid();
}
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
@@ -174,7 +135,7 @@
HANDLE duped_handle;
ProcessHandle process = GetCurrentProcess();
BOOL success =
- ::DuplicateHandle(process, handle_.Get(), process, &duped_handle, 0,
+ ::DuplicateHandle(process, handle_.get(), process, &duped_handle, 0,
FALSE, DUPLICATE_SAME_ACCESS);
if (!success)
return {};
@@ -190,12 +151,12 @@
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
- win::ScopedHandle handle_copy(handle_.Take());
+ win::ScopedHandle handle_copy(handle_.release());
HANDLE duped_handle;
ProcessHandle process = GetCurrentProcess();
BOOL success =
- ::DuplicateHandle(process, handle_copy.Get(), process, &duped_handle,
+ ::DuplicateHandle(process, handle_copy.get(), process, &duped_handle,
FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
if (!success)
return false;
@@ -216,30 +177,6 @@
return true;
}
-bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
- size_t size,
- void** memory,
- size_t* mapped_size) const {
- bool write_allowed = mode_ != Mode::kReadOnly;
- // Try to map the shared memory. On the first failure, release any reserved
- // address space for a single entry.
- for (int i = 0; i < 2; ++i) {
- *memory = MapViewOfFile(
- handle_.Get(), FILE_MAP_READ | (write_allowed ? FILE_MAP_WRITE : 0),
- static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), size);
- if (*memory)
- break;
- ReleaseReservation();
- }
- if (!*memory) {
- DPLOG(ERROR) << "Failed executing MapViewOfFile";
- return false;
- }
-
- *mapped_size = GetMemorySectionSize(*memory);
- return true;
-}
-
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
@@ -247,13 +184,13 @@
// per mapping on average.
static const size_t kSectionSize = 65536;
if (size == 0) {
- LogError(CreateError::SIZE_ZERO, 0);
return {};
}
- size_t rounded_size = bits::Align(size, kSectionSize);
- if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
- LogError(CreateError::SIZE_TOO_LARGE, 0);
+ // Aligning may overflow so check that the result doesn't decrease.
+ size_t rounded_size = bits::AlignUp(size, kSectionSize);
+ if (rounded_size < size ||
+ rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
return {};
}
@@ -264,36 +201,21 @@
ACL dacl;
SECURITY_DESCRIPTOR sd;
if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
- LogError(CreateError::INITIALIZE_ACL_FAILURE, GetLastError());
return {};
}
if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
- LogError(CreateError::INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
return {};
}
if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
- LogError(CreateError::SET_SECURITY_DESC_FAILURE, GetLastError());
return {};
}
- string16 name;
- if (base::win::GetVersion() < base::win::VERSION_WIN8_1) {
- // Windows < 8.1 ignores DACLs on certain unnamed objects (like shared
- // sections). So, we generate a random name when we need to enforce
- // read-only.
- uint64_t rand_values[4];
- RandBytes(&rand_values, sizeof(rand_values));
- name = StringPrintf(L"CrSharedMem_%016llx%016llx%016llx%016llx",
- rand_values[0], rand_values[1], rand_values[2],
- rand_values[3]);
- DCHECK(!name.empty());
- }
-
+ std::u16string name;
SECURITY_ATTRIBUTES sa = {sizeof(sa), &sd, FALSE};
// Ask for the file mapping with reduced permisions to avoid passing the
// access control permissions granted by default into unpriviledged process.
HANDLE h = CreateFileMappingWithReducedPermissions(
- &sa, rounded_size, name.empty() ? nullptr : name.c_str());
+ &sa, rounded_size, name.empty() ? nullptr : as_wcstr(name));
if (h == nullptr) {
// The error is logged within CreateFileMappingWithReducedPermissions().
return {};
@@ -302,18 +224,16 @@
win::ScopedHandle scoped_h(h);
// Check if the shared memory pre-exists.
if (GetLastError() == ERROR_ALREADY_EXISTS) {
- LogError(CreateError::ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
return {};
}
- LogError(CreateError::SUCCESS, ERROR_SUCCESS);
return PlatformSharedMemoryRegion(std::move(scoped_h), mode, size,
UnguessableToken::Create());
}
// static
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
- PlatformHandle handle,
+ PlatformSharedMemoryHandle handle,
Mode mode,
size_t size) {
// Call ::DuplicateHandle() with FILE_MAP_WRITE as a desired access to check
@@ -347,5 +267,4 @@
const UnguessableToken& guid)
: handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
-} // namespace subtle
-} // namespace base
+} // namespace base::subtle
diff --git a/base/memory/protected_memory.cc b/base/memory/protected_memory.cc
deleted file mode 100644
index 157a677..0000000
--- a/base/memory/protected_memory.cc
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/protected_memory.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-
-#if !defined(COMPONENT_BUILD)
-PROTECTED_MEMORY_SECTION int AutoWritableMemory::writers = 0;
-#endif // !defined(COMPONENT_BUILD)
-
-base::LazyInstance<Lock>::Leaky AutoWritableMemory::writers_lock =
- LAZY_INSTANCE_INITIALIZER;
-
-} // namespace base
diff --git a/base/memory/protected_memory.h b/base/memory/protected_memory.h
deleted file mode 100644
index 3cb2ec3..0000000
--- a/base/memory/protected_memory.h
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Protected memory is memory holding security-sensitive data intended to be
-// left read-only for the majority of its lifetime to avoid being overwritten
-// by attackers. ProtectedMemory is a simple wrapper around platform-specific
-// APIs to set memory read-write and read-only when required. Protected memory
-// should be set read-write for the minimum amount of time required.
-
-// Normally mutable variables are held in read-write memory and constant data
-// is held in read-only memory to ensure it is not accidentally overwritten.
-// In some cases we want to hold mutable variables in read-only memory, except
-// when they are being written to, to ensure that they are not tampered with.
-//
-// ProtectedMemory is a container class intended to hold a single variable in
-// read-only memory, except when explicitly set read-write. The variable can be
-// set read-write by creating a scoped AutoWritableMemory object by calling
-// AutoWritableMemory::Create(), the memory stays writable until the returned
-// object goes out of scope and is destructed. The wrapped variable can be
-// accessed using operator* and operator->.
-//
-// Instances of ProtectedMemory must be declared in the PROTECTED_MEMORY_SECTION
-// and as global variables. Because protected memory variables are globals, the
-// the same rules apply disallowing non-trivial constructors and destructors.
-// Global definitions are required to avoid the linker placing statics in
-// inlinable functions into a comdat section and setting the protected memory
-// section read-write when they are merged.
-//
-// EXAMPLE:
-//
-// struct Items { void* item1; };
-// static PROTECTED_MEMORY_SECTION base::ProtectedMemory<Items> items;
-// void InitializeItems() {
-// // Explicitly set items read-write before writing to it.
-// auto writer = base::AutoWritableMemory::Create(items);
-// items->item1 = /* ... */;
-// assert(items->item1 != nullptr);
-// // items is set back to read-only on the destruction of writer
-// }
-//
-// using FnPtr = void (*)(void);
-// PROTECTED_MEMORY_SECTION base::ProtectedMemory<FnPtr> fnPtr;
-// FnPtr ResolveFnPtr(void) {
-// // The Initializer nested class is a helper class for creating a static
-// // initializer for a ProtectedMemory variable. It implicitly sets the
-// // variable read-write during initialization.
-// static base::ProtectedMemory<FnPtr>::Initializer I(&fnPtr,
-// reinterpret_cast<FnPtr>(dlsym(/* ... */)));
-// return *fnPtr;
-// }
-
-#ifndef BASE_MEMORY_PROTECTED_MEMORY_H_
-#define BASE_MEMORY_PROTECTED_MEMORY_H_
-
-#include "base/lazy_instance.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/protected_memory_buildflags.h"
-#include "base/synchronization/lock.h"
-#include "build/build_config.h"
-
-#define PROTECTED_MEMORY_ENABLED 1
-
-// Linking with lld is required to workaround crbug.com/792777.
-// TODO(vtsyrklevich): Remove once support for gold on Android/CrOs is dropped
-#if defined(OS_LINUX) && BUILDFLAG(USE_LLD)
-// Define the section read-only
-__asm__(".section protected_memory, \"a\"\n\t");
-#define PROTECTED_MEMORY_SECTION __attribute__((section("protected_memory")))
-
-// Explicitly mark these variables hidden so the symbols are local to the
-// currently built component. Otherwise they are created with global (external)
-// linkage and component builds would break because a single pair of these
-// symbols would override the rest.
-__attribute__((visibility("hidden"))) extern char __start_protected_memory;
-__attribute__((visibility("hidden"))) extern char __stop_protected_memory;
-
-#elif defined(OS_MACOSX) && !defined(OS_IOS)
-// The segment the section is in is defined read-only with a linker flag in
-// build/config/mac/BUILD.gn
-#define PROTECTED_MEMORY_SECTION \
- __attribute__((section("PROTECTED_MEMORY, protected_memory")))
-extern char __start_protected_memory __asm(
- "section$start$PROTECTED_MEMORY$protected_memory");
-extern char __stop_protected_memory __asm(
- "section$end$PROTECTED_MEMORY$protected_memory");
-
-#elif defined(OS_WIN)
-// Define a read-write prot section. The $a, $mem, and $z 'sub-sections' are
-// merged alphabetically so $a and $z are used to define the start and end of
-// the protected memory section, and $mem holds protected variables.
-// (Note: Sections in Portable Executables are equivalent to segments in other
-// executable formats, so this section is mapped into its own pages.)
-#pragma section("prot$a", read, write)
-#pragma section("prot$mem", read, write)
-#pragma section("prot$z", read, write)
-
-// We want the protected memory section to be read-only, not read-write so we
-// instruct the linker to set the section read-only at link time. We do this
-// at link time instead of compile time, because defining the prot section
-// read-only would cause mis-compiles due to optimizations assuming that the
-// section contents are constant.
-#pragma comment(linker, "/SECTION:prot,R")
-
-__declspec(allocate("prot$a")) __declspec(selectany)
-char __start_protected_memory;
-__declspec(allocate("prot$z")) __declspec(selectany)
-char __stop_protected_memory;
-
-#define PROTECTED_MEMORY_SECTION __declspec(allocate("prot$mem"))
-
-#else
-#undef PROTECTED_MEMORY_ENABLED
-#define PROTECTED_MEMORY_ENABLED 0
-#define PROTECTED_MEMORY_SECTION
-#endif
-
-namespace base {
-
-template <typename T>
-class ProtectedMemory {
- public:
- ProtectedMemory() = default;
-
- // Expose direct access to the encapsulated variable
- T& operator*() { return data; }
- const T& operator*() const { return data; }
- T* operator->() { return &data; }
- const T* operator->() const { return &data; }
-
- // Helper class for creating simple ProtectedMemory static initializers.
- class Initializer {
- public:
- // Defined out-of-line below to break circular definition dependency between
- // ProtectedMemory and AutoWritableMemory.
- Initializer(ProtectedMemory<T>* PM, const T& Init);
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(Initializer);
- };
-
- private:
- T data;
-
- DISALLOW_COPY_AND_ASSIGN(ProtectedMemory);
-};
-
-// DCHECK that the byte at |ptr| is read-only.
-BASE_EXPORT void AssertMemoryIsReadOnly(const void* ptr);
-
-// Abstract out platform-specific methods to get the beginning and end of the
-// PROTECTED_MEMORY_SECTION. ProtectedMemoryEnd returns a pointer to the byte
-// past the end of the PROTECTED_MEMORY_SECTION.
-#if PROTECTED_MEMORY_ENABLED
-constexpr void* ProtectedMemoryStart = &__start_protected_memory;
-constexpr void* ProtectedMemoryEnd = &__stop_protected_memory;
-#endif
-
-#if defined(COMPONENT_BUILD)
-namespace internal {
-
-// For component builds we want to define a separate global writers variable
-// (explained below) in every DSO that includes this header. To do that we use
-// this template to define a global without duplicate symbol errors.
-template <typename T>
-struct DsoSpecific {
- static T value;
-};
-template <typename T>
-T DsoSpecific<T>::value = 0;
-
-} // namespace internal
-#endif // defined(COMPONENT_BUILD)
-
-// A class that sets a given ProtectedMemory variable writable while the
-// AutoWritableMemory is in scope. This class implements the logic for setting
-// the protected memory region read-only/read-write in a thread-safe manner.
-class AutoWritableMemory {
- private:
- // 'writers' is a global holding the number of ProtectedMemory instances set
- // writable, used to avoid races setting protected memory readable/writable.
- // When this reaches zero the protected memory region is set read only.
- // Access is controlled by writers_lock.
-#if defined(COMPONENT_BUILD)
- // For component builds writers is a reference to an int defined separately in
- // every DSO.
- static constexpr int& writers = internal::DsoSpecific<int>::value;
-#else
- // Otherwise, we declare writers in the protected memory section to avoid the
- // scenario where an attacker could overwrite it with a large value and invoke
- // code that constructs and destructs an AutoWritableMemory. After such a call
- // protected memory would still be set writable because writers > 0.
- static int writers;
-#endif // defined(COMPONENT_BUILD)
-
- // Synchronizes access to the writers variable and the simultaneous actions
- // that need to happen alongside writers changes, e.g. setting the protected
- // memory region readable when writers is decremented to 0.
- static BASE_EXPORT base::LazyInstance<Lock>::Leaky writers_lock;
-
- // Abstract out platform-specific memory APIs. |end| points to the byte past
- // the end of the region of memory having its memory protections changed.
- BASE_EXPORT bool SetMemoryReadWrite(void* start, void* end);
- BASE_EXPORT bool SetMemoryReadOnly(void* start, void* end);
-
- // If this is the first writer (e.g. writers == 0) set the writers variable
- // read-write. Next, increment writers and set the requested memory writable.
- AutoWritableMemory(void* ptr, void* ptr_end) {
-#if PROTECTED_MEMORY_ENABLED
- DCHECK(ptr >= ProtectedMemoryStart && ptr_end <= ProtectedMemoryEnd);
-
- {
- base::AutoLock auto_lock(writers_lock.Get());
- if (writers == 0) {
- AssertMemoryIsReadOnly(ptr);
-#if !defined(COMPONENT_BUILD)
- AssertMemoryIsReadOnly(&writers);
- CHECK(SetMemoryReadWrite(&writers, &writers + 1));
-#endif // !defined(COMPONENT_BUILD)
- }
-
- writers++;
- }
-
- CHECK(SetMemoryReadWrite(ptr, ptr_end));
-#endif // PROTECTED_MEMORY_ENABLED
- }
-
- public:
- // Wrap the private constructor to create an easy-to-use interface to
- // construct AutoWritableMemory objects.
- template <typename T>
- static AutoWritableMemory Create(ProtectedMemory<T>& PM) {
- T* ptr = &*PM;
- return AutoWritableMemory(ptr, ptr + 1);
- }
-
- // Move constructor just increments writers
- AutoWritableMemory(AutoWritableMemory&& original) {
-#if PROTECTED_MEMORY_ENABLED
- base::AutoLock auto_lock(writers_lock.Get());
- CHECK_GT(writers, 0);
- writers++;
-#endif // PROTECTED_MEMORY_ENABLED
- }
-
- // On destruction decrement writers, and if no other writers exist, set the
- // entire protected memory region read-only.
- ~AutoWritableMemory() {
-#if PROTECTED_MEMORY_ENABLED
- base::AutoLock auto_lock(writers_lock.Get());
- CHECK_GT(writers, 0);
- writers--;
-
- if (writers == 0) {
- CHECK(SetMemoryReadOnly(ProtectedMemoryStart, ProtectedMemoryEnd));
-#if !defined(COMPONENT_BUILD)
- AssertMemoryIsReadOnly(&writers);
-#endif // !defined(COMPONENT_BUILD)
- }
-#endif // PROTECTED_MEMORY_ENABLED
- }
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(AutoWritableMemory);
-};
-
-template <typename T>
-ProtectedMemory<T>::Initializer::Initializer(ProtectedMemory<T>* PM,
- const T& Init) {
- AutoWritableMemory writer = AutoWritableMemory::Create(*PM);
- **PM = Init;
-}
-
-} // namespace base
-
-#endif // BASE_MEMORY_PROTECTED_MEMORY_H_
diff --git a/base/memory/protected_memory_cfi.h b/base/memory/protected_memory_cfi.h
deleted file mode 100644
index a90023b..0000000
--- a/base/memory/protected_memory_cfi.h
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Helper routines to call function pointers stored in protected memory with
-// Control Flow Integrity indirect call checking disabled. Some indirect calls,
-// e.g. dynamically resolved symbols in another DSO, can not be accounted for by
-// CFI-icall. These routines allow those symbols to be called without CFI-icall
-// checking safely by ensuring that they are placed in protected memory.
-
-#ifndef BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
-#define BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
-
-#include <utility>
-
-#include "base/cfi_buildflags.h"
-#include "base/compiler_specific.h"
-#include "base/macros.h"
-#include "base/memory/protected_memory.h"
-#include "build/build_config.h"
-
-#if BUILDFLAG(CFI_ICALL_CHECK) && !PROTECTED_MEMORY_ENABLED
-#error "CFI-icall enabled for platform without protected memory support"
-#endif // BUILDFLAG(CFI_ICALL_CHECK) && !PROTECTED_MEMORY_ENABLED
-
-namespace base {
-namespace internal {
-
-// This class is used to exempt calls to function pointers stored in
-// ProtectedMemory from cfi-icall checking. It's not secure to use directly, it
-// should only be used by the UnsanitizedCfiCall() functions below. Given an
-// UnsanitizedCfiCall object, you can use operator() to call the encapsulated
-// function pointer without cfi-icall checking.
-template <typename FunctionType>
-class UnsanitizedCfiCall {
- public:
- explicit UnsanitizedCfiCall(FunctionType function) : function_(function) {}
- UnsanitizedCfiCall(UnsanitizedCfiCall&&) = default;
-
- template <typename... Args>
- NO_SANITIZE("cfi-icall")
- auto operator()(Args&&... args) {
- return function_(std::forward<Args>(args)...);
- }
-
- private:
- FunctionType function_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(UnsanitizedCfiCall);
-};
-
-} // namespace internal
-
-// These functions can be used to call function pointers in ProtectedMemory
-// without cfi-icall checking. They are intended to be used to create an
-// UnsanitizedCfiCall object and immediately call it. UnsanitizedCfiCall objects
-// should not initialized directly or stored because they hold a function
-// pointer that will be called without CFI-icall checking in mutable memory. The
-// functions can be used as shown below:
-
-// ProtectedMemory<void (*)(int)> p;
-// UnsanitizedCfiCall(p)(5); /* In place of (*p)(5); */
-
-template <typename T>
-auto UnsanitizedCfiCall(const ProtectedMemory<T>& PM) {
-#if PROTECTED_MEMORY_ENABLED
- DCHECK(&PM >= ProtectedMemoryStart && &PM < ProtectedMemoryEnd);
-#endif // PROTECTED_MEMORY_ENABLED
- return internal::UnsanitizedCfiCall<T>(*PM);
-}
-
-// struct S { void (*fp)(int); } s;
-// ProtectedMemory<S> p;
-// UnsanitizedCfiCall(p, &S::fp)(5); /* In place of p->fp(5); */
-
-template <typename T, typename Member>
-auto UnsanitizedCfiCall(const ProtectedMemory<T>& PM, Member member) {
-#if PROTECTED_MEMORY_ENABLED
- DCHECK(&PM >= ProtectedMemoryStart && &PM < ProtectedMemoryEnd);
-#endif // PROTECTED_MEMORY_ENABLED
- return internal::UnsanitizedCfiCall<decltype(*PM.*member)>(*PM.*member);
-}
-
-} // namespace base
-
-#endif // BASE_MEMORY_PROTECTED_MEMORY_CFI_H_
diff --git a/base/memory/protected_memory_posix.cc b/base/memory/protected_memory_posix.cc
deleted file mode 100644
index 9bcbc6c..0000000
--- a/base/memory/protected_memory_posix.cc
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/protected_memory.h"
-
-#include <sys/mman.h>
-#include <unistd.h>
-
-#if defined(OS_LINUX)
-#include <sys/resource.h>
-#endif // defined(OS_LINUX)
-
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-#include <mach/mach.h>
-#include <mach/mach_vm.h>
-#endif // defined(OS_MACOSX) && !defined(OS_IOS)
-
-#include "base/posix/eintr_wrapper.h"
-#include "base/process/process_metrics.h"
-#include "base/synchronization/lock.h"
-#include "build/build_config.h"
-#include "starboard/types.h"
-
-namespace base {
-
-namespace {
-
-bool SetMemory(void* start, void* end, int prot) {
- DCHECK(end > start);
- const uintptr_t page_mask = ~(base::GetPageSize() - 1);
- const uintptr_t page_start = reinterpret_cast<uintptr_t>(start) & page_mask;
- return mprotect(reinterpret_cast<void*>(page_start),
- reinterpret_cast<uintptr_t>(end) - page_start, prot) == 0;
-}
-
-} // namespace
-
-bool AutoWritableMemory::SetMemoryReadWrite(void* start, void* end) {
- return SetMemory(start, end, PROT_READ | PROT_WRITE);
-}
-
-bool AutoWritableMemory::SetMemoryReadOnly(void* start, void* end) {
- return SetMemory(start, end, PROT_READ);
-}
-
-#if defined(OS_LINUX)
-void AssertMemoryIsReadOnly(const void* ptr) {
-#if DCHECK_IS_ON()
- const uintptr_t page_mask = ~(base::GetPageSize() - 1);
- const uintptr_t page_start = reinterpret_cast<uintptr_t>(ptr) & page_mask;
-
- // Note: We've casted away const here, which should not be meaningful since
- // if the memory is written to we will abort immediately.
- int result =
- getrlimit(RLIMIT_NPROC, reinterpret_cast<struct rlimit*>(page_start));
- DCHECK_EQ(result, -1);
- DCHECK_EQ(errno, EFAULT);
-#endif // DCHECK_IS_ON()
-}
-#elif defined(OS_MACOSX) && !defined(OS_IOS)
-void AssertMemoryIsReadOnly(const void* ptr) {
-#if DCHECK_IS_ON()
- mach_port_t object_name;
- vm_region_basic_info_64 region_info;
- mach_vm_size_t size = 1;
- mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
-
- kern_return_t kr = mach_vm_region(
- mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&ptr), &size,
- VM_REGION_BASIC_INFO_64, reinterpret_cast<vm_region_info_t>(®ion_info),
- &count, &object_name);
- DCHECK_EQ(kr, KERN_SUCCESS);
- DCHECK_EQ(region_info.protection, VM_PROT_READ);
-#endif // DCHECK_IS_ON()
-}
-#endif // defined(OS_LINUX) || (defined(OS_MACOSX) && !defined(OS_IOS))
-
-} // namespace base
diff --git a/base/memory/protected_memory_unittest.cc b/base/memory/protected_memory_unittest.cc
deleted file mode 100644
index b7daed3..0000000
--- a/base/memory/protected_memory_unittest.cc
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/protected_memory.h"
-#include "base/cfi_buildflags.h"
-#include "base/memory/protected_memory_cfi.h"
-#include "base/synchronization/lock.h"
-#include "base/test/gtest_util.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-
-namespace {
-
-struct Data {
- Data() = default;
- Data(int foo_) : foo(foo_) {}
- int foo;
-};
-
-} // namespace
-
-class ProtectedMemoryTest : public ::testing::Test {
- protected:
- // Run tests one at a time. Some of the negative tests can not be made thread
- // safe.
- void SetUp() final { lock.Acquire(); }
- void TearDown() final { lock.Release(); }
-
- Lock lock;
-};
-
-PROTECTED_MEMORY_SECTION ProtectedMemory<int> init;
-
-TEST_F(ProtectedMemoryTest, Initializer) {
- static ProtectedMemory<int>::Initializer I(&init, 4);
- EXPECT_EQ(*init, 4);
-}
-
-PROTECTED_MEMORY_SECTION ProtectedMemory<Data> data;
-
-TEST_F(ProtectedMemoryTest, Basic) {
- AutoWritableMemory writer = AutoWritableMemory::Create(data);
- data->foo = 5;
- EXPECT_EQ(data->foo, 5);
-}
-
-#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
-
-#if PROTECTED_MEMORY_ENABLED
-TEST_F(ProtectedMemoryTest, ReadOnlyOnStart) {
- EXPECT_DEATH({ data->foo = 6; AutoWritableMemory::Create(data); }, "");
-}
-
-TEST_F(ProtectedMemoryTest, ReadOnlyAfterSetWritable) {
- { AutoWritableMemory writer = AutoWritableMemory::Create(data); }
- EXPECT_DEATH({ data->foo = 7; }, "");
-}
-
-TEST_F(ProtectedMemoryTest, AssertMemoryIsReadOnly) {
- AssertMemoryIsReadOnly(&data->foo);
- { AutoWritableMemory::Create(data); }
- AssertMemoryIsReadOnly(&data->foo);
-
- ProtectedMemory<Data> writable_data;
- EXPECT_DCHECK_DEATH({ AssertMemoryIsReadOnly(&writable_data->foo); });
-}
-
-TEST_F(ProtectedMemoryTest, FailsIfDefinedOutsideOfProtectMemoryRegion) {
- ProtectedMemory<Data> data;
- EXPECT_DCHECK_DEATH({ AutoWritableMemory::Create(data); });
-}
-
-TEST_F(ProtectedMemoryTest, UnsanitizedCfiCallOutsideOfProtectedMemoryRegion) {
- ProtectedMemory<void (*)(void)> data;
- EXPECT_DCHECK_DEATH({ UnsanitizedCfiCall(data)(); });
-}
-#endif // PROTECTED_MEMORY_ENABLED
-
-namespace {
-
-struct BadIcall {
- BadIcall() = default;
- BadIcall(int (*fp_)(int)) : fp(fp_) {}
- int (*fp)(int);
-};
-
-unsigned int bad_icall(int i) {
- return 4 + i;
-}
-
-} // namespace
-
-PROTECTED_MEMORY_SECTION ProtectedMemory<BadIcall> icall_pm1;
-
-TEST_F(ProtectedMemoryTest, BadMemberCall) {
- static ProtectedMemory<BadIcall>::Initializer I(
- &icall_pm1, BadIcall(reinterpret_cast<int (*)(int)>(&bad_icall)));
-
- EXPECT_EQ(UnsanitizedCfiCall(icall_pm1, &BadIcall::fp)(1), 5);
-#if !BUILDFLAG(CFI_ICALL_CHECK)
- EXPECT_EQ(icall_pm1->fp(1), 5);
-#elif BUILDFLAG(CFI_ENFORCEMENT_TRAP) || BUILDFLAG(CFI_ENFORCEMENT_DIAGNOSTIC)
- EXPECT_DEATH({ icall_pm1->fp(1); }, "");
-#endif
-}
-
-PROTECTED_MEMORY_SECTION ProtectedMemory<int (*)(int)> icall_pm2;
-
-TEST_F(ProtectedMemoryTest, BadFnPtrCall) {
- static ProtectedMemory<int (*)(int)>::Initializer I(
- &icall_pm2, reinterpret_cast<int (*)(int)>(&bad_icall));
-
- EXPECT_EQ(UnsanitizedCfiCall(icall_pm2)(1), 5);
-#if !BUILDFLAG(CFI_ICALL_CHECK)
- EXPECT_EQ((*icall_pm2)(1), 5);
-#elif BUILDFLAG(CFI_ENFORCEMENT_TRAP) || BUILDFLAG(CFI_ENFORCEMENT_DIAGNOSTIC)
- EXPECT_DEATH({ (*icall_pm2)(1); }, "");
-#endif
-}
-
-#endif // defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
-
-} // namespace base
diff --git a/base/memory/protected_memory_win.cc b/base/memory/protected_memory_win.cc
deleted file mode 100644
index 4927412..0000000
--- a/base/memory/protected_memory_win.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/protected_memory.h"
-
-#include <windows.h>
-
-#include "base/process/process_metrics.h"
-#include "base/synchronization/lock.h"
-#include "build/build_config.h"
-#include "starboard/types.h"
-
-namespace base {
-
-namespace {
-
-bool SetMemory(void* start, void* end, DWORD prot) {
- DCHECK(end > start);
- const uintptr_t page_mask = ~(base::GetPageSize() - 1);
- const uintptr_t page_start = reinterpret_cast<uintptr_t>(start) & page_mask;
- DWORD old_prot;
- return VirtualProtect(reinterpret_cast<void*>(page_start),
- reinterpret_cast<uintptr_t>(end) - page_start, prot,
- &old_prot) != 0;
-}
-
-} // namespace
-
-bool AutoWritableMemory::SetMemoryReadWrite(void* start, void* end) {
- return SetMemory(start, end, PAGE_READWRITE);
-}
-
-bool AutoWritableMemory::SetMemoryReadOnly(void* start, void* end) {
- return SetMemory(start, end, PAGE_READONLY);
-}
-
-void AssertMemoryIsReadOnly(const void* ptr) {
-#if DCHECK_IS_ON()
- const uintptr_t page_mask = ~(base::GetPageSize() - 1);
- const uintptr_t page_start = reinterpret_cast<uintptr_t>(ptr) & page_mask;
-
- MEMORY_BASIC_INFORMATION info;
- SIZE_T result =
- VirtualQuery(reinterpret_cast<LPCVOID>(page_start), &info, sizeof(info));
- DCHECK_GT(result, 0U);
- DCHECK(info.Protect == PAGE_READONLY);
-#endif // DCHECK_IS_ON()
-}
-
-} // namespace base
diff --git a/base/memory/ptr_util.h b/base/memory/ptr_util.h
index 42f4f49..5a54886 100644
--- a/base/memory/ptr_util.h
+++ b/base/memory/ptr_util.h
@@ -1,4 +1,4 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,7 +6,6 @@
#define BASE_MEMORY_PTR_UTIL_H_
#include <memory>
-#include <utility>
namespace base {
diff --git a/base/memory/ptr_util_unittest.cc b/base/memory/ptr_util_unittest.cc
index a7c852d..e3c63a4 100644
--- a/base/memory/ptr_util_unittest.cc
+++ b/base/memory/ptr_util_unittest.cc
@@ -1,10 +1,11 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
+// Copyright 2015 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/ptr_util.h"
-#include "starboard/types.h"
+#include <stddef.h>
+
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -35,4 +36,5 @@
owned_counter.reset();
EXPECT_EQ(0u, DeleteCounter::count());
}
+
} // namespace base
diff --git a/base/memory/raw_ptr.h b/base/memory/raw_ptr.h
new file mode 100644
index 0000000..03acb73
--- /dev/null
+++ b/base/memory/raw_ptr.h
@@ -0,0 +1,13 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_PTR_H_
+#define BASE_MEMORY_RAW_PTR_H_
+
+// Although `raw_ptr` is part of the standalone PA distribution, it is
+// easier to use the shorter path in `//base/memory`. We retain this
+// facade header for ease of typing.
+#include "base/allocator/partition_allocator/pointers/raw_ptr.h" // IWYU pragma: export
+
+#endif // BASE_MEMORY_RAW_PTR_H_
diff --git a/base/memory/raw_ptr.md b/base/memory/raw_ptr.md
new file mode 100644
index 0000000..85adc0d
--- /dev/null
+++ b/base/memory/raw_ptr.md
@@ -0,0 +1,617 @@
+# raw_ptr<T> (aka MiraclePtr, aka BackupRefPtr)
+
+`raw_ptr<T>` is a non-owning smart pointer that has improved memory-safety over
+raw pointers. It behaves just like a raw pointer on platforms where
+USE_BACKUP_REF_PTR is off, and almost like one when it's on. The main
+difference is that when USE_BACKUP_REF_PTR is enabled, it's zero-initialized and
+cleared on destruction and move. (You should continue to explicitly initialize
+raw_ptr members to ensure consistent behavior on platforms where USE_BACKUP_REF_PTR
+is disabled.) Unlike `std::unique_ptr<T>`, `base::scoped_refptr<T>`, etc., it
+doesn’t manage ownership or lifetime of an allocated object - you are still
+responsible for freeing the object when no longer used, just as you would
+with a raw C++ pointer.
+
+`raw_ptr<T>` is beneficial for security, because it can prevent a significant
+percentage of Use-after-Free
+(UaF) bugs from being exploitable (by poisoning the freed memory and
+quarantining it as long as a dangling `raw_ptr<T>` exists).
+`raw_ptr<T>` has limited impact on stability - dereferencing
+a dangling pointer remains Undefined Behavior (although poisoning may
+lead to earlier, easier to debug crashes).
+Note that the security protection is not yet enabled by default.
+
+`raw_ptr<T>` is a part of
+[the MiraclePtr project](https://docs.google.com/document/d/1pnnOAIz_DMWDI4oIOFoMAqLnf_MZ2GsrJNb_dbQ3ZBg/edit?usp=sharing)
+and currently implements
+[the BackupRefPtr algorithm](https://docs.google.com/document/d/1m0c63vXXLyGtIGBi9v6YFANum7-IRC3-dmiYBCWqkMk/edit?usp=sharing).
+If needed, please reach out to
+[memory-safety-dev@chromium.org](https://groups.google.com/u/1/a/chromium.org/g/memory-safety-dev)
+or (Google-internal)
+[chrome-memory-safety@google.com](https://groups.google.com/a/google.com/g/chrome-memory-safety)
+with questions or concerns.
+
+[TOC]
+
+## When to use |raw_ptr<T>|
+
+[The Chromium C++ Style Guide](../../styleguide/c++/c++.md#non_owning-pointers-in-class-fields)
+asks to use `raw_ptr<T>` for class and struct fields in place of
+a raw C++ pointer `T*` whenever possible, except in Renderer-only code.
+This guide offers more details.
+
+The usage guidelines are *not* enforced currently (the MiraclePtr team will turn
+on enforcement via Chromium Clang Plugin after confirming performance results
+via Stable channel experiments). Afterwards we plan to allow
+exclusions via:
+- [manual-paths-to-ignore.txt](../../tools/clang/rewrite_raw_ptr_fields/manual-paths-to-ignore.txt)
+ to exclude at a directory level. Examples:
+ - Renderer-only code (i.e. code in paths that contain `/renderer/` or
+ `third_party/blink/public/web/`)
+ - Code that cannot depend on `//base`
+ - Code in `//ppapi`
+- `RAW_PTR_EXCLUSION` C++ attribute to exclude individual fields. Examples:
+ - Cases where `raw_ptr<T>` won't compile (e.g. cases covered in
+ [the "Unsupported cases leading to compile errors" section](#Unsupported-cases-leading-to-compile-errors)).
+ Make sure to also look at
+ [the "Recoverable compile-time problems" section](#Recoverable-compile_time-problems).
+ - Cases where the pointer always points outside of PartitionAlloc
+ (e.g. literals, stack allocated memory, shared memory, mmap'ed memory,
+ V8/Oilpan/Java heaps, TLS, etc.).
+ - (Very rare) cases that cause regression on perf bots.
+ - (Very rare) cases where `raw_ptr<T>` can lead to runtime errors.
+ Make sure to look at
+ [the "Extra pointer rules" section](#Extra-pointer-rules)
+ before resorting to this exclusion.
+- No explicit exclusions will be needed for:
+ - `const char*`, `const wchar_t*`, etc.
+ - Function pointers
+ - ObjC pointers
+
+## Examples of using |raw_ptr<T>| instead of raw C++ pointers
+
+Consider an example struct that uses raw C++ pointer fields:
+
+```cpp
+struct Example {
+ int* int_ptr;
+ void* void_ptr;
+ SomeClass* object_ptr;
+ const SomeClass* ptr_to_const;
+ SomeClass* const const_ptr;
+};
+```
+
+When using `raw_ptr<T>` the struct above would look as follows:
+
+```cpp
+#include "base/memory/raw_ptr.h"
+
+struct Example {
+ raw_ptr<int> int_ptr;
+ raw_ptr<void> void_ptr;
+ raw_ptr<SomeClass> object_ptr;
+ raw_ptr<const SomeClass> ptr_to_const;
+ const raw_ptr<SomeClass> const_ptr;
+};
+```
+
+In most cases, only the type in the field declaration needs to change.
+In particular, `raw_ptr<T>` implements
+`operator->`, `operator*` and other operators
+that one expects from a raw pointer.
+Cases where other code needs to be modified are described in
+[the "Recoverable compile-time problems" section](#Recoverable-compile_time-problems)
+below.
+
+## Performance
+
+### Performance impact of using |raw_ptr<T>| instead of |T\*|
+
+Compared to a raw C++ pointer, on platforms where USE_BACKUP_REF_PTR is on,
+`raw_ptr<T>` incurs additional runtime
+overhead for initialization, destruction, and assignment (including
+`ptr++` and `ptr += ...`).
+There is no overhead when dereferencing or extracting a pointer (including
+`*ptr`, `ptr->foobar`, `ptr.get()`, or implicit conversions to a raw C++
+pointer).
+Finally, `raw_ptr<T>` has exactly the same memory footprint as `T*`
+(i.e. `sizeof(raw_ptr<T>) == sizeof(T*)`).
+
+One source of the performance overhead is
+a check whether a pointer `T*` points to a protected memory pool.
+This happens in `raw_ptr<T>`'s
+constructor, destructor, and assignment operators.
+If the pointed memory is unprotected,
+then `raw_ptr<T>` behaves just like a `T*`
+and the runtime overhead is limited to the extra check.
+(The security protection incurs additional overhead
+described in
+[the "Performance impact of enabling Use-after-Free protection" section](#Performance-impact-of-enabling-Use_after_Free-protection)
+below.)
+
+Some additional overhead comes from setting `raw_ptr<T>` to `nullptr`
+when default-constructed, destructed, or moved.
+
+During
+[the "Big Rewrite"](https://groups.google.com/a/chromium.org/g/chromium-dev/c/vAEeVifyf78/m/SkBUc6PhBAAJ)
+most Chromium `T*` fields have been rewritten to `raw_ptr<T>`
+(excluding fields in Renderer-only code).
+The cumulative performance impact of such rewrite
+has been measured by earlier A/B binary experiments.
+There was no measurable impact, except that 32-bit platforms
+have seen a slight increase in jankiness metrics
+(for more detailed results see
+[the document here](https://docs.google.com/document/d/1MfDT-JQh_UIpSQw3KQttjbQ_drA7zw1gQDwU3cbB6_c/edit?usp=sharing)).
+
+### Performance impact of enabling Use-after-Free protection
+
+When the Use-after-Free protection is enabled, then `raw_ptr<T>` has some
+additional performance overhead. This protection is currently disabled
+by default. We will enable the protection incrementally, starting with
+more non-Renderer processes first.
+
+The protection can increase memory usage:
+- For each memory allocation Chromium's allocator (PartitionAlloc)
+ allocates extra 16 bytes (4 bytes to store the BackupRefPtr's
+ ref-count associated with the allocation, the rest to maintain
+ alignment requirements).
+- Freed memory is quarantined and not available for reuse as long
+ as dangling `raw_ptr<T>` pointers exist.
+- Enabling protection requires additional partitions in PartitionAlloc,
+ which increases memory fragmentation.
+
+The protection can increase runtime costs - `raw_ptr<T>`'s constructor,
+destructor, and assignment operators (including `ptr++` and `ptr += ...`) need
+to maintain BackupRefPtr's ref-count.
+
+## When it is okay to continue using raw C++ pointers
+
+### Unsupported cases leading to compile errors
+
+Using raw_ptr<T> in the following scenarios will lead to build errors.
+Continue to use raw C++ pointers in those cases:
+- Function pointers
+- Pointers to Objective-C objects
+- Pointer fields in classes/structs that are used as global or static variables
+ (see more details in the
+ [Rewrite exclusion statistics](https://docs.google.com/document/d/1uAsWnwy8HfIJhDPSh1efohnqfGsv2LJmYTRBj0JzZh8/edit#heading=h.dg4eebu87wg9)
+ )
+- Pointer fields that require non-null, constexpr initialization
+ (see more details in the
+ [Rewrite exclusion statistics](https://docs.google.com/document/d/1uAsWnwy8HfIJhDPSh1efohnqfGsv2LJmYTRBj0JzZh8/edit#heading=h.dg4eebu87wg9)
+ )
+- Pointer fields in classes/structs that have to be trivially constructible or
+ destructible
+- Code that doesn’t depend on `//base` (including non-Chromium repositories and
+ third party libraries)
+- Code in `//ppapi`
+
+### Pointers to unprotected memory (performance optimization)
+
+Using `raw_ptr<T>` offers no security benefits (no UaF protection) for pointers
+that don’t point to protected memory (only PartitionAlloc-managed heap allocations
+in non-Renderer processes are protected).
+Therefore in the following cases raw C++ pointers may be used instead of
+`raw_ptr<T>`:
+- Pointer fields that can only point outside PartitionAlloc, including literals,
+ stack allocated memory, shared memory, mmap'ed memory, V8/Oilpan/Java heaps,
+ TLS, etc.
+- `const char*` (and `const wchar_t*`) pointer fields, unless you’re convinced
+ they can point to a heap-allocated object, not just a string literal
+- Pointer fields that can only point to aligned allocations (requested via
+ PartitionAlloc’s `AlignedAlloc` or `memalign` family of functions, with
+ alignment higher than `base::kAlignment`)
+- Pointer fields in Renderer-only code. (This might change in the future
+ as we explore expanding `raw_ptr<T>` usage in https://crbug.com/1273204.)
+
+### Other perf optimizations
+
+As a performance optimization, raw C++ pointers may be used instead of
+`raw_ptr<T>` if it would have a significant
+[performance impact](#Performance).
+
+### Pointers in locations other than fields
+
+Use raw C++ pointers instead of `raw_ptr<T>` in the following scenarios:
+- Pointers in local variables and function/method parameters.
+ This includes pointer fields in classes/structs that are used only on the stack.
+ (We plan to enforce this in the Chromium Clang Plugin. Using `raw_ptr<T>`
+ here would cumulatively lead to performance regression and the security
+ benefit of UaF protection is lower for such short-lived pointers.)
+- Pointer fields in unions. (Naive usage this will lead to
+ [a C++ compile
+ error](https://docs.google.com/document/d/1uAsWnwy8HfIJhDPSh1efohnqfGsv2LJmYTRBj0JzZh8/edit#heading=h.fvvnv6htvlg3).
+ Avoiding the error requires the `raw_ptr<T>` destructor to be explicitly
+ called before destroying the union, if the field is holding a value. Doing
+ this manual destruction wrong might lead to leaks or double-dereferences.)
+- Pointers whose addresses are used only as identifiers and which are
+ never dereferenced (e.g. keys in a map). There is a performance gain
+ by not using `raw_ptr` in this case; prefer to use `uintptr_t` to
+ emphasize that the entity can dangle and must not be dereferenced.
+
+You don’t have to, but may use `raw_ptr<T>`, in the following scenarios:
+- Pointers that are used as an element type of collections/wrappers. E.g.
+ `std::vector<T*>` and `std::vector<raw_ptr<T>>` are both okay, but prefer the
+ latter if the collection is a class field (note that some of the perf
+ optimizations above might still apply and argue for using a raw C++ pointer).
+
+
+## Extra pointer rules
+
+`raw_ptr<T>` requires following some extra rules compared to a raw C++ pointer:
+- Don’t assign invalid, non-null addresses (this includes previously valid and
+ now freed memory,
+ [Win32 handles](https://crbug.com/1262017), and more). You can only assign an
+ address of memory that is allocated at the time of assignment. Exceptions:
+ - a pointer to the end of a valid allocation (but not even 1 byte further)
+ - a pointer to the last page of the address space, e.g. for sentinels like
+ `reinterpret_cast<void*>(-1)`
+- Don’t initialize or assign `raw_ptr<T>` memory directly
+ (e.g. `reinterpret_cast<ClassWithRawPtr*>(buffer)` or
+ `memcpy(reinterpret_cast<void*>(&obj_with_raw_ptr), buffer)`.
+- Don’t assign to a `raw_ptr<T>` concurrently, even if the same value.
+- Don’t rely on moved-from pointers to keep their old value. Unlike raw
+ pointers, `raw_ptr<T>` is cleared upon moving.
+- Don't use the pointer after it is destructed. Unlike raw pointers,
+ `raw_ptr<T>` is cleared upon destruction. This may happen e.g. when fields are
+ ordered such that the pointer field is destructed before the class field whose
+ destructor uses that pointer field (e.g. see
+ [Esoteric Issues](https://docs.google.com/document/d/14Ol_adOdNpy4Ge-XReI7CXNKMzs_LL5vucDQIERDQyg/edit#heading=h.yoba1l8bnfmv)).
+- Don’t assign to a `raw_ptr<T>` until its constructor has run. This may happen
+ when a base class’s constructor uses a not-yet-initialized field of a derived
+ class (e.g. see
+ [Applying MiraclePtr](https://docs.google.com/document/d/1cnpd5Rwesq7DCZiD8FIJfPGHvQN3-Gul6xib_4hwfBg/edit?ts=5ed2d317#heading=h.4ry5d9a6fuxs)).
+
+Some of these would result in undefined behavior (UB) even in the world without
+`raw_ptr<T>` (e.g. see
+[Field destruction order](https://groups.google.com/a/chromium.org/g/memory-safety-dev/c/3sEmSnFc61I/m/ZtaeWGslAQAJ)),
+but you’d likely get away without any consequences. In the `raw_ptr<T>` world,
+an obscure crash may occur. Those crashes often manifest themselves as SEGV or
+`CHECK` inside `RawPtrBackupRefImpl::AcquireInternal()` or
+`RawPtrBackupRefImpl::ReleaseInternal()`, but you may also experience memory
+corruption or a silent drop of UaF protection.
+
+## Recoverable compile-time problems
+
+### Explicit |raw_ptr.get()| might be needed
+
+If a raw pointer is needed, but an implicit cast from `raw_ptr<SomeClass>` to
+`SomeClass*` doesn't work, then the raw pointer needs to be obtained by explicitly
+calling `.get()`. Examples:
+- `auto* raw_ptr_var = wrapped_ptr_.get()` (`auto*` requires the initializer to
+ be a raw pointer)
+ - Alternatively you can change `auto*` to `auto&`. Avoid using `auto` as it’ll
+ copy the pointer, which incurs a performance overhead.
+- `return condition ? raw_ptr : wrapped_ptr_.get();` (ternary operator needs
+ identical types in both branches)
+- `base::WrapUniquePtr(wrapped_ptr_.get());` (implicit cast doesn't kick in for
+ arguments in templates)
+- `printf("%p", wrapped_ptr_.get());` (can't pass class type arguments to
+ variadic functions)
+- `reinterpret_cast<SomeClass*>(wrapped_ptr_.get())` (`const_cast` and
+ `reinterpret_cast` sometimes require their argument to be a raw pointer;
+ `static_cast` should "Just Work")
+- `T2 t2 = t1_wrapped_ptr_.get();` (where there is an implicit conversion
+ constructor `T2(T1*)` the compiler can handle one implicit conversion, but not
+ two)
+- In general, when type is inferred by a compiler and then used in a context
+ where a pointer is expected.
+
+### Out-of-line constructor/destructor might be needed
+
+Out-of-line constructor/destructor may be newly required by the chromium style
+clang plugin. Error examples:
+- `error: [chromium-style] Complex class/struct needs an explicit out-of-line
+ destructor.`
+- `error: [chromium-style] Complex class/struct needs an explicit out-of-line
+ constructor.`
+
+`raw_ptr<T>` uses a non-trivial constructor/destructor, so classes that used to
+be POD or have a trivial destructor may require an out-of-line
+constructor/destructor to satisfy the chromium style clang plugin.
+
+
+### In-out arguments need to be refactored
+
+Due to implementation difficulties,
+`raw_ptr<T>` doesn't support an address-of operator.
+This means that the following code will not compile:
+
+```cpp
+void GetSomeClassPtr(SomeClass** out_arg) {
+ *out_arg = ...;
+}
+
+struct MyStruct {
+ void Example() {
+ GetSomeClassPtr(&wrapped_ptr_); // <- won't compile
+ }
+
+ raw_ptr<SomeClass> wrapped_ptr_;
+};
+```
+
+The typical fix is to change the type of the out argument:
+
+```cpp
+void GetSomeClassPtr(raw_ptr<SomeClass>* out_arg) {
+ *out_arg = ...;
+}
+```
+
+Similarly this code:
+
+```cpp
+void FillPtr(SomeClass*& out_arg) {
+ out_arg = ...;
+}
+```
+
+would have to be changed to this:
+
+```cpp
+void FillPtr(raw_ptr<SomeClass>& out_arg) {
+ out_arg = ...;
+}
+```
+
+Similarly this code:
+
+```cpp
+SomeClass*& GetPtr() {
+ return wrapper_ptr_;
+}
+```
+
+would have to be changed to this:
+
+```cpp
+raw_ptr<SomeClass>& GetPtr() {
+ return wrapper_ptr_;
+}
+```
+
+### Modern |nullptr| is required
+
+As recommended by the Google C++ Style Guide,
+[use nullptr instead of NULL](https://google.github.io/styleguide/cppguide.html#0_and_nullptr/NULL) -
+the latter might result in compile-time errors when used with `raw_ptr<T>`.
+
+Example:
+
+```cpp
+struct SomeStruct {
+ raw_ptr<int> ptr_field;
+};
+
+void bar() {
+ SomeStruct some_struct;
+ some_struct.ptr_field = NULL;
+}
+```
+
+Error:
+```err
+../../base/memory/checked_ptr_unittest.cc:139:25: error: use of overloaded
+operator '=' is ambiguous (with operand types raw_ptr<int>' and 'long')
+ some_struct.ptr_field = NULL;
+ ~~~~~~~~~~~~~~~~~~~~~ ^ ~~~~
+../../base/memory/raw_ptr.h:369:29: note: candidate function
+ ALWAYS_INLINE raw_ptr& operator=(std::nullptr_t) noexcept {
+ ^
+../../base/memory/raw_ptr.h:374:29: note: candidate function
+ ALWAYS_INLINE raw_ptr& operator=(T* p)
+ noexcept {
+```
+
+### [rare] Explicit overload or template specialization for |raw_ptr<T>|
+
+In rare cases, the default template code won’t compile when `raw_ptr<...>` is
+substituted for a template argument. In such cases, it might be necessary to
+provide an explicit overload or template specialization for `raw_ptr<T>`.
+
+Example (more details in
+[Applying MiraclePtr](https://docs.google.com/document/d/1cnpd5Rwesq7DCZiD8FIJfPGHvQN3-Gul6xib_4hwfBg/edit?ts=5ed2d317#heading=h.o2pf3fg0zzf) and the
+[Add CheckedPtr support for cbor_extract::Element](https://chromium-review.googlesource.com/c/chromium/src/+/2224954)
+CL):
+
+```cpp
+// An explicit overload (taking raw_ptr<T> as an argument)
+// was needed below:
+template <typename S>
+constexpr StepOrByte<S> Element(
+ const Is required,
+ raw_ptr<const std::string> S::*member, // <- HERE
+ uintptr_t offset) {
+ return ElementImpl<S>(required, offset, internal::Type::kString);
+}
+```
+
+## AddressSanitizer support
+
+For years, AddressSanitizer has been the main tool for diagnosing memory
+corruption issues in Chromium. MiraclePtr alters the security properties of some
+of some such issues, so ideally it should be integrated with ASan. That way an
+engineer would be able to check whether a given use-after-free vulnerability is
+covered by the protection without having to switch between ASan and non-ASan
+builds.
+
+Unfortunately, MiraclePtr relies heavily on PartitionAlloc, and ASan needs its
+own allocator to work. As a result, the default implementation of `raw_ptr<T>`
+can't be used with ASan builds. Instead, a special version of `raw_ptr<T>` has
+been implemented, which is based on the ASan quarantine and acts as a
+sufficiently close approximation for diagnostic purposes. At crash time, the
+tool will tell the user if the dangling pointer access would have been protected
+by MiraclePtr *in a regular build*.
+
+You can configure the diagnostic tool by modifying the parameters of the feature
+flag `PartitionAllocBackupRefPtr`. For example, launching Chromium as follows:
+
+```
+path/to/chrome --enable-features=PartitionAllocBackupRefPtr:enabled-processes/browser-only/asan-enable-dereference-check/true/asan-enable-extraction-check/true/asan-enable-instantiation-check/true
+```
+
+activates all available checks in the browser process.
+
+### Available checks
+
+MiraclePtr provides ASan users with three kinds of security checks, which differ
+in when a particular check occurs:
+
+#### Dereference
+
+This is the basic check type that helps diagnose regular heap-use-after-free
+bugs. It's enabled by default.
+
+#### Extraction
+
+The user will be warned if a dangling pointer is extracted from a `raw_ptr<T>`
+variable. If the pointer is then dereferenced, an ASan error report will follow.
+In some cases, extra work on the reproduction case is required to reach the
+faulty memory access. However, even without memory corruption, relying on the
+value of a dangling pointer may lead to problems. For example, it's a common
+(anti-)pattern in Chromium to use a raw pointer as a key in a container.
+Consider the following example:
+
+```
+std::map<T*, std::unique_ptr<Ext>> g_map;
+
+struct A {
+ A() {
+ g_map[this] = std::make_unique<Ext>(this);
+ }
+
+ ~A() {
+ g_map.erase(this);
+ }
+};
+
+raw_ptr<A> dangling = new A;
+// ...
+delete dangling.get();
+A* replacement = new A;
+// ...
+auto it = g_map.find(dangling);
+if (it == g_map.end())
+ return 0;
+it->second.DoStuff();
+```
+
+Depending on whether the allocator reuses the same memory region for the second
+`A` object, the program may inadvertently call `DoStuff()` on the wrong `Ext`
+instance. This, in turn, may corrupt the state of the program or bypass security
+controls if the two `A` objects belong to different security contexts.
+
+Given the proportion of false positives reported in the mode, it is disabled by
+default. It's mainly intended to be used by security researchers who are willing
+to spend a significant amount of time investigating these early warnings.
+
+#### Instantiation
+
+This check detects violations of the rule that when instantiating a `raw_ptr<T>`
+from a `T*` , it is only allowed if the `T*` is a valid (i.e. not dangling)
+pointer. This rule exists to help avoid an issue called "pointer laundering"
+which can result in unsafe `raw_ptr<T>` instances that point to memory that is
+no longer in quarantine. This is important, since subsequent use of these
+`raw_ptr<T>` might appear to be safe.
+
+In order for "pointer laundering" to occur, we need (1) a dangling `T*`
+(pointing to memory that has been freed) to be assigned to a `raw_ptr<T>`, while
+(2) there is no other `raw_ptr<T>` pointing to the same object/allocation at the
+time of assignment.
+
+The check only detects (1), a dangling `T*` being assigned to a `raw_ptr<T>`, so
+in order to determine whether "pointer laundering" has occurred, we need to
+determine whether (2) could plausibly occur, not just in the specific
+reproduction testcase, but in the more general case.
+
+In the absence of thorough reasoning about (2), the assumption here should be
+that any failure of this check is a security issue of the same severity as an
+unprotected use-after-free.
+
+### Protection status
+
+When ASan generates a heap-use-after-free report, it will include a new section
+near the bottom, which starts with the line `MiraclePtr Status: <status>`. At
+the moment, it has three possible options:
+
+#### Protected
+
+The system is sufficiently confident that MiraclePtr makes the discovered issue
+unexploitable. In the future, the security severity of such bugs will be
+reduced.
+
+#### Manual analysis required
+
+Dangling pointer extraction was detected before the crash, but there might be
+extra code between the extraction and dereference. Most of the time, the code in
+question will look similar to the following:
+
+```
+struct A {
+ raw_ptr<T> dangling_;
+};
+
+void trigger(A* a) {
+ // ...
+ T* local = a->dangling_;
+ DoStuff();
+ local->DoOtherStuff();
+ // ...
+}
+```
+
+In this scenario, even though `dangling_` points to freed memory, that memory
+is protected and will stay in quarantine until `dangling_` (and all other
+`raw_ptr<T>` variables pointing to the same region) changes its value or gets
+destroyed. Therefore, the expression `a_->dangling->DoOtherStuff()` wouldn't
+trigger an exploitable use-after-free.
+
+You will need to make sure that `DoStuff()` is sufficiently trivial and can't
+(not only for the particular reproduction case, but *even in principle*) make
+`dangling_` change its value or get destroyed. If that's the case, the
+`DoOtherStuff()` call may be considered protected. The tool will provide you
+with the stack trace for both the extraction and dereference events.
+
+#### Not protected
+
+The dangling `T*` doesn't appear to originate from a `raw_ptr<T>` variable,
+which means MiraclePtr can't prevent this issue from being exploited. In
+practice, there may still be a `raw_ptr<T>` in a different part of the code that
+protects the same allocation indirectly, but such protection won't be considered
+robust enough to impact security-related decisions.
+
+### Limitations
+
+The main limitation of MiraclePtr in ASan builds is the main limitation of ASan
+itself: the capacity of the quarantine is limited. Eventually, every allocation
+in quarantine will get reused regardless of whether there are still references
+to it.
+
+In the context of MiraclePtr combined with ASan, it's a problem when:
+
+1. A heap allocation that isn't supported by MiraclePtr is made. At the moment,
+ the only such class is allocations made early during the process startup
+ before MiraclePtr can be activated.
+2. Its address is assigned to a `raw_ptr<T>` variable.
+3. The allocation gets freed.
+4. A new allocation is made in the same memory region as the first one, but this
+ time it is supported.
+5. The second allocation gets freed.
+6. The `raw_ptr<T>` variable is accessed.
+
+In this case, MiraclePtr will incorrectly assume the memory access is protected.
+Luckily, considering the small number of unprotected allocations in Chromium,
+the size of the quarantine, and the fact that most reproduction cases take
+relatively short time to run, the odds of this happening are very low.
+
+The problem is relatively easy to spot if you look at the ASan report: the
+allocation and deallocation stack traces won't be consistent across runs and
+the allocation type won't match the use stack trace.
+
+If you encounter a suspicious ASan report, it may be helpful to re-run Chromium
+with an increased quarantine capacity as follows:
+
+```
+ASAN_OPTIONS=quarantine_size_mb=1024 path/to/chrome
+```
diff --git a/base/memory/raw_ptr_asan_bound_arg_tracker.cc b/base/memory/raw_ptr_asan_bound_arg_tracker.cc
new file mode 100644
index 0000000..670ac8c
--- /dev/null
+++ b/base/memory/raw_ptr_asan_bound_arg_tracker.cc
@@ -0,0 +1,70 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/raw_ptr_asan_bound_arg_tracker.h"
+
+#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+
+#include <sanitizer/allocator_interface.h>
+#include <sanitizer/asan_interface.h>
+
+#include "base/memory/raw_ptr_asan_service.h"
+#include "third_party/abseil-cpp/absl/base/attributes.h"
+
+namespace base {
+
+namespace {
+
+// We use thread-local storage instead of sequence-local storage for consistency
+// with PendingReport in RawPtrAsanService.
+ABSL_CONST_INIT thread_local RawPtrAsanBoundArgTracker::ProtectedArgsVector*
+ protected_args = nullptr;
+
+} // namespace
+
+// static
+uintptr_t RawPtrAsanBoundArgTracker::GetProtectedArgPtr(uintptr_t ptr) {
+ if (!protected_args) {
+ return 0;
+ }
+
+ for (auto protected_arg_ptr : *protected_args) {
+ uintptr_t allocation_base = 0;
+ size_t allocation_size = 0;
+ __asan_locate_address(reinterpret_cast<void*>(protected_arg_ptr), nullptr,
+ 0, reinterpret_cast<void**>(&allocation_base),
+ &allocation_size);
+ if (allocation_base <= ptr && ptr < allocation_base + allocation_size) {
+ return allocation_base;
+ }
+ }
+
+ return 0;
+}
+
+RawPtrAsanBoundArgTracker::RawPtrAsanBoundArgTracker()
+ : enabled_(RawPtrAsanService::GetInstance().IsEnabled()) {
+ if (enabled_) {
+ prev_protected_args_ = protected_args;
+ protected_args = &protected_args_;
+ }
+}
+
+RawPtrAsanBoundArgTracker::~RawPtrAsanBoundArgTracker() {
+ if (enabled_) {
+ protected_args = prev_protected_args_;
+ }
+}
+
+void RawPtrAsanBoundArgTracker::Add(uintptr_t ptr) {
+ if (ptr) {
+ protected_args_->push_back(ptr);
+ }
+}
+
+} // namespace base
+
+#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
diff --git a/base/memory/raw_ptr_asan_bound_arg_tracker.h b/base/memory/raw_ptr_asan_bound_arg_tracker.h
new file mode 100644
index 0000000..6841a6d
--- /dev/null
+++ b/base/memory/raw_ptr_asan_bound_arg_tracker.h
@@ -0,0 +1,123 @@
+// Copyright 2022 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_PTR_ASAN_BOUND_ARG_TRACKER_H_
+#define BASE_MEMORY_RAW_PTR_ASAN_BOUND_ARG_TRACKER_H_
+
+#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/containers/stack_container.h"
+#include "base/memory/raw_ptr.h"
+
+namespace base {
+namespace internal {
+template <typename, typename>
+struct Invoker;
+
+template <typename T, typename UnretainedTrait, RawPtrTraits PtrTraits>
+class UnretainedWrapper;
+
+template <typename T, typename UnretainedTrait, RawPtrTraits PtrTraits>
+class UnretainedRefWrapper;
+} // namespace internal
+
+// Tracks the lifetimes of bound pointer arguments during callback invocation.
+//
+// Example:
+// T* unsafe_ptr = new T();
+// PostTask(base::BindOnce(&T::DoSomething, base::Unretained(unsafe_ptr)));
+// delete unsafe_ptr;
+//
+// When the callback executes, the callee has no access to the raw_ptr<T> inside
+// base::Unretained, so it is not possible for it to be invalidated until the
+// callback finishes execution; so there is always at least one live raw_ptr<T>
+// pointing to `this` for the duration of the call to T::DoSomething.
+//
+// This class is responsible for tracking and checking which allocations are
+// currently protected in this way, and it is only intended to be used inside
+// the Bind implementation. This should not be used directly.
+class BASE_EXPORT RawPtrAsanBoundArgTracker {
+ public:
+ static constexpr size_t kInlineArgsCount = 3;
+ using ProtectedArgsVector = base::StackVector<uintptr_t, kInlineArgsCount>;
+
+ // Check whether ptr is an address inside an allocation pointed to by one of
+ // the currently protected callback arguments. If it is, then this function
+ // returns the base address of that allocation, otherwise it returns 0.
+ static uintptr_t GetProtectedArgPtr(uintptr_t ptr);
+
+ private:
+ template <typename, typename>
+ friend struct internal::Invoker;
+
+ void Add(uintptr_t pointer);
+
+ RawPtrAsanBoundArgTracker();
+ ~RawPtrAsanBoundArgTracker();
+
+ // Base case for any type that isn't base::Unretained, we do nothing.
+ template <typename T>
+ void AddArg(const T& arg) {}
+
+ // No specialization for raw_ptr<T> directly, since bound raw_ptr<T>
+ // arguments are stored in UnretainedWrapper.
+
+ // When argument is base::Unretained, add the argument to the set of
+ // arguments protected in this scope.
+ template <typename T, typename UnretainedTrait, RawPtrTraits PtrTraits>
+ void AddArg(
+ const internal::UnretainedWrapper<T, UnretainedTrait, PtrTraits>& arg) {
+ if constexpr (raw_ptr_traits::IsSupportedType<T>::value) {
+ auto inner = arg.get();
+ // The argument may unwrap into a raw_ptr or a T* depending if it is
+ // allowed to dangle.
+ if constexpr (IsRawPtrV<decltype(inner)>) {
+ Add(reinterpret_cast<uintptr_t>(inner.get()));
+ } else {
+ Add(reinterpret_cast<uintptr_t>(inner));
+ }
+ }
+ }
+
+ // When argument is a reference type that's supported by raw_ptr, add the
+ // argument to the set of arguments protected in this scope.
+ template <typename T, typename UnretainedTrait, RawPtrTraits PtrTraits>
+ void AddArg(
+ const internal::UnretainedRefWrapper<T, UnretainedTrait, PtrTraits>&
+ arg) {
+ if constexpr (raw_ptr_traits::IsSupportedType<T>::value) {
+ Add(reinterpret_cast<uintptr_t>(&arg.get()));
+ }
+ }
+
+ template <typename... Args>
+ void AddArgs(Args&&... args) {
+ if (enabled_) {
+ (AddArg(std::forward<Args>(args)), ...);
+ }
+ }
+
+ // Cache whether or not BRP-ASan is running when we enter the argument
+ // tracking scope so that we ensure that our actions on leaving the scope are
+ // consistent even if the runtime flags are changed.
+ bool enabled_;
+
+ // We save the previously bound arguments, so that we can restore them when
+ // this callback returns. This helps with coverage while avoiding false
+ // positives due to nested run loops/callback re-entrancy.
+ raw_ptr<ProtectedArgsVector> prev_protected_args_;
+ ProtectedArgsVector protected_args_;
+};
+
+} // namespace base
+
+#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+#endif // BASE_MEMORY_RAW_PTR_ASAN_BOUND_ARG_TRACKER_H_
diff --git a/base/memory/raw_ptr_asan_hooks.cc b/base/memory/raw_ptr_asan_hooks.cc
new file mode 100644
index 0000000..2033afc
--- /dev/null
+++ b/base/memory/raw_ptr_asan_hooks.cc
@@ -0,0 +1,122 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/raw_ptr_asan_hooks.h"
+
+#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+
+#include <cstring>
+
+#include <sanitizer/asan_interface.h>
+
+#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
+#include "base/memory/raw_ptr_asan_service.h"
+
+namespace base::internal {
+
+namespace {
+bool IsFreedHeapPointer(uintptr_t address) {
+ // Use `__asan_region_is_poisoned` instead of `__asan_address_is_poisoned`
+ // because the latter may crash on an invalid pointer.
+ if (!__asan_region_is_poisoned(reinterpret_cast<void*>(address), 1)) {
+ return false;
+ }
+
+ // Make sure the address is on the heap and is not in a redzone.
+ void* region_ptr;
+ size_t region_size;
+ const char* allocation_type = __asan_locate_address(
+ reinterpret_cast<void*>(address), nullptr, 0, ®ion_ptr, ®ion_size);
+
+ auto region_base = reinterpret_cast<uintptr_t>(region_ptr);
+ if (strcmp(allocation_type, "heap") != 0 || address < region_base ||
+ address >=
+ region_base + region_size) { // We exclude pointers one past the end
+ // of an allocations from the analysis
+ // for now because they're to fragile.
+ return false;
+ }
+
+ // Make sure the allocation has been actually freed rather than
+ // user-poisoned.
+ int free_thread_id = -1;
+ __asan_get_free_stack(region_ptr, nullptr, 0, &free_thread_id);
+ return free_thread_id != -1;
+}
+
+// Force a non-optimizable memory load operation to trigger an ASan crash.
+NOINLINE NOT_TAIL_CALLED void CrashImmediatelyOnUseAfterFree(
+ uintptr_t address) {
+ NO_CODE_FOLDING();
+ auto unused = *reinterpret_cast<char const volatile*>(address);
+ asm volatile("" : "+r"(unused));
+}
+
+void WrapPtr(uintptr_t address) {
+ auto& service = RawPtrAsanService::GetInstance();
+
+ if (service.is_instantiation_check_enabled() && IsFreedHeapPointer(address)) {
+ RawPtrAsanService::SetPendingReport(
+ RawPtrAsanService::ReportType::kInstantiation,
+ reinterpret_cast<void*>(address));
+ service.CrashOnDanglingInstantiation(reinterpret_cast<void*>(address));
+ }
+}
+
+void ReleaseWrappedPtr(uintptr_t) {}
+
+void SafelyUnwrapForDereference(uintptr_t address) {
+ if (RawPtrAsanService::GetInstance().is_dereference_check_enabled() &&
+ IsFreedHeapPointer(address)) {
+ RawPtrAsanService::SetPendingReport(
+ RawPtrAsanService::ReportType::kDereference,
+ reinterpret_cast<void*>(address));
+ CrashImmediatelyOnUseAfterFree(address);
+ }
+}
+
+void SafelyUnwrapForExtraction(uintptr_t address) {
+ auto& service = RawPtrAsanService::GetInstance();
+
+ if ((service.is_extraction_check_enabled() ||
+ service.is_dereference_check_enabled()) &&
+ IsFreedHeapPointer(address)) {
+ RawPtrAsanService::SetPendingReport(
+ RawPtrAsanService::ReportType::kExtraction,
+ reinterpret_cast<void*>(address));
+ // If the dereference check is enabled, we still record the extraction event
+ // to catch the potential subsequent dangling dereference, but don't report
+ // the extraction itself.
+ if (service.is_extraction_check_enabled()) {
+ service.WarnOnDanglingExtraction(reinterpret_cast<void*>(address));
+ }
+ }
+}
+
+void UnsafelyUnwrapForComparison(uintptr_t) {}
+
+void Advance(uintptr_t, uintptr_t) {}
+
+void Duplicate(uintptr_t) {}
+
+} // namespace
+
+const RawPtrHooks* GetRawPtrAsanHooks() {
+ static constexpr RawPtrHooks hooks = {
+ WrapPtr,
+ ReleaseWrappedPtr,
+ SafelyUnwrapForDereference,
+ SafelyUnwrapForExtraction,
+ UnsafelyUnwrapForComparison,
+ Advance,
+ Duplicate,
+ };
+
+ return &hooks;
+}
+
+} // namespace base::internal
+
+#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
diff --git a/base/memory/raw_ptr_asan_hooks.h b/base/memory/raw_ptr_asan_hooks.h
new file mode 100644
index 0000000..42185c5
--- /dev/null
+++ b/base/memory/raw_ptr_asan_hooks.h
@@ -0,0 +1,22 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_PTR_ASAN_HOOKS_H_
+#define BASE_MEMORY_RAW_PTR_ASAN_HOOKS_H_
+
+#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+
+#include "base/memory/raw_ptr.h"
+
+namespace base::internal {
+
+const RawPtrHooks* GetRawPtrAsanHooks();
+
+}
+
+#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+
+#endif // BASE_MEMORY_RAW_PTR_ASAN_HOOKS_H_
diff --git a/base/memory/raw_ptr_asan_service.cc b/base/memory/raw_ptr_asan_service.cc
new file mode 100644
index 0000000..194c9c6
--- /dev/null
+++ b/base/memory/raw_ptr_asan_service.cc
@@ -0,0 +1,366 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/raw_ptr_asan_service.h"
+
+#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+
+#include <sanitizer/allocator_interface.h>
+#include <sanitizer/asan_interface.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "base/check_op.h"
+#include "base/compiler_specific.h"
+#include "base/debug/asan_service.h"
+#include "base/immediate_crash.h"
+#include "base/logging.h"
+#include "base/memory/raw_ptr.h"
+#include "base/memory/raw_ptr_asan_bound_arg_tracker.h"
+#include "base/memory/raw_ptr_asan_hooks.h"
+#include "base/process/process.h"
+#include "base/strings/stringprintf.h"
+#include "base/task/thread_pool/thread_group.h"
+#include "third_party/abseil-cpp/absl/base/attributes.h"
+
+namespace base {
+
+RawPtrAsanService RawPtrAsanService::instance_;
+
+namespace {
+
+// https://github.com/llvm/llvm-project/blob/b84673b3f424882c4c1961fb2c49b6302b68f344/compiler-rt/lib/asan/asan_mapping.h#L154
+constexpr size_t kShadowScale = 3;
+// https://github.com/llvm/llvm-project/blob/b84673b3f424882c4c1961fb2c49b6302b68f344/compiler-rt/lib/asan/asan_allocator.cpp#L143
+constexpr size_t kChunkHeaderSize = 16;
+// https://github.com/llvm/llvm-project/blob/b84673b3f424882c4c1961fb2c49b6302b68f344/compiler-rt/lib/asan/asan_internal.h#L138
+constexpr uint8_t kAsanHeapLeftRedzoneMagic = 0xfa;
+// https://github.com/llvm/llvm-project/blob/b84673b3f424882c4c1961fb2c49b6302b68f344/compiler-rt/lib/asan/asan_internal.h#L145
+constexpr uint8_t kAsanUserPoisonedMemoryMagic = 0xf7;
+
+// Intentionally use thread-local-storage here. Making this sequence-local
+// doesn't prevent sharing of PendingReport contents between unrelated tasks, so
+// we keep this at a lower-level and avoid introducing additional assumptions
+// about Chrome's sequence model.
+ABSL_CONST_INIT thread_local RawPtrAsanService::PendingReport pending_report;
+
+} // namespace
+
+// Mark the first eight bytes of every allocation's header as "user poisoned".
+// This allows us to filter out allocations made before BRP-ASan is activated.
+// The change shouldn't reduce the regular ASan coverage.
+
+// static
+NO_SANITIZE("address")
+void RawPtrAsanService::MallocHook(const volatile void* ptr, size_t size) {
+ uint8_t* header =
+ static_cast<uint8_t*>(const_cast<void*>(ptr)) - kChunkHeaderSize;
+ *RawPtrAsanService::GetInstance().GetShadow(header) =
+ kAsanUserPoisonedMemoryMagic;
+}
+
+NO_SANITIZE("address")
+bool RawPtrAsanService::IsSupportedAllocation(void* allocation_start) const {
+ uint8_t* header = static_cast<uint8_t*>(allocation_start) - kChunkHeaderSize;
+ return *GetShadow(header) == kAsanUserPoisonedMemoryMagic;
+}
+
+NO_SANITIZE("address")
+void RawPtrAsanService::Configure(
+ EnableDereferenceCheck enable_dereference_check,
+ EnableExtractionCheck enable_extraction_check,
+ EnableInstantiationCheck enable_instantiation_check) {
+ CHECK_EQ(mode_, Mode::kUninitialized);
+
+ Mode new_mode = enable_dereference_check || enable_extraction_check ||
+ enable_instantiation_check
+ ? Mode::kEnabled
+ : Mode::kDisabled;
+ if (new_mode == Mode::kEnabled) {
+ // The constants we use aren't directly exposed by the API, so
+ // validate them at runtime as carefully as possible.
+ size_t shadow_scale;
+ __asan_get_shadow_mapping(&shadow_scale, &shadow_offset_);
+ CHECK_EQ(shadow_scale, kShadowScale);
+
+ uint8_t* dummy_alloc = new uint8_t;
+ CHECK_EQ(*GetShadow(dummy_alloc - kChunkHeaderSize),
+ kAsanHeapLeftRedzoneMagic);
+
+ __asan_poison_memory_region(dummy_alloc, 1);
+ CHECK_EQ(*GetShadow(dummy_alloc), kAsanUserPoisonedMemoryMagic);
+ delete dummy_alloc;
+
+ __sanitizer_install_malloc_and_free_hooks(MallocHook, FreeHook);
+ debug::AsanService::GetInstance()->AddErrorCallback(ErrorReportCallback);
+ internal::InstallRawPtrHooks(base::internal::GetRawPtrAsanHooks());
+
+ is_dereference_check_enabled_ = !!enable_dereference_check;
+ is_extraction_check_enabled_ = !!enable_extraction_check;
+ is_instantiation_check_enabled_ = !!enable_instantiation_check;
+ }
+
+ mode_ = new_mode;
+}
+
+uint8_t* RawPtrAsanService::GetShadow(void* ptr) const {
+ return reinterpret_cast<uint8_t*>(
+ (reinterpret_cast<uintptr_t>(ptr) >> kShadowScale) + shadow_offset_);
+}
+
+// static
+void RawPtrAsanService::SetPendingReport(ReportType type,
+ const volatile void* ptr) {
+ // The actual ASan crash may occur at an offset from the pointer passed
+ // here, so track the whole region.
+ void* region_base;
+ size_t region_size;
+ __asan_locate_address(const_cast<void*>(ptr), nullptr, 0, ®ion_base,
+ ®ion_size);
+
+ pending_report = {type, reinterpret_cast<uintptr_t>(region_base),
+ region_size};
+}
+
+namespace {
+enum class ProtectionStatus {
+ kNotProtected,
+ kManualAnalysisRequired,
+ kProtected,
+};
+
+const char* ProtectionStatusToString(ProtectionStatus status) {
+ switch (status) {
+ case ProtectionStatus::kNotProtected:
+ return "NOT PROTECTED";
+ case ProtectionStatus::kManualAnalysisRequired:
+ return "MANUAL ANALYSIS REQUIRED";
+ case ProtectionStatus::kProtected:
+ return "PROTECTED";
+ }
+}
+
+// ASan doesn't have an API to get the current thread's identifier.
+// We have to create a dummy allocation to determine it.
+int GetCurrentThreadId() {
+ int* dummy = new int;
+ int id = -1;
+ __asan_get_alloc_stack(dummy, nullptr, 0, &id);
+ delete dummy;
+ return id;
+}
+} // namespace
+
+// static
+void RawPtrAsanService::ErrorReportCallback(const char* report, bool*) {
+ if (strcmp(__asan_get_report_description(), "heap-use-after-free") != 0) {
+ return;
+ }
+
+ struct {
+ ProtectionStatus protection_status;
+ const char* crash_details;
+ const char* protection_details;
+ } crash_info;
+
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(__asan_get_report_address());
+ uintptr_t bound_arg_ptr = RawPtrAsanBoundArgTracker::GetProtectedArgPtr(ptr);
+ if (pending_report.allocation_base <= ptr &&
+ ptr < pending_report.allocation_base + pending_report.allocation_size) {
+ bool is_supported_allocation =
+ RawPtrAsanService::GetInstance().IsSupportedAllocation(
+ reinterpret_cast<void*>(pending_report.allocation_base));
+ switch (pending_report.type) {
+ case ReportType::kDereference: {
+ if (is_supported_allocation) {
+ crash_info = {ProtectionStatus::kProtected,
+ "This crash occurred while a raw_ptr<T> object "
+ "containing a dangling pointer was being dereferenced.",
+ "MiraclePtr is expected to make this crash "
+ "non-exploitable once fully enabled."};
+ } else {
+ crash_info = {ProtectionStatus::kNotProtected,
+ "This crash occurred while accessing a region that was "
+ "allocated before MiraclePtr was activated.",
+ "This crash is still exploitable with MiraclePtr."};
+ }
+ break;
+ }
+ case ReportType::kExtraction: {
+ if (is_supported_allocation && bound_arg_ptr) {
+ crash_info = {
+ ProtectionStatus::kProtected,
+ "This crash occurred inside a callback where a raw_ptr<T> "
+ "pointing to the same region was bound to one of the arguments.",
+ "MiraclePtr is expected to make this crash non-exploitable once "
+ "fully enabled."};
+ } else if (is_supported_allocation) {
+ crash_info = {
+ ProtectionStatus::kManualAnalysisRequired,
+ "A pointer to the same region was extracted from a raw_ptr<T> "
+ "object prior to this crash.",
+ "To determine the protection status, enable extraction warnings "
+ "and check whether the raw_ptr<T> object can be destroyed or "
+ "overwritten between the extraction and use."};
+ } else {
+ crash_info = {ProtectionStatus::kNotProtected,
+ "This crash occurred while accessing a region that was "
+ "allocated before MiraclePtr was activated.",
+ "This crash is still exploitable with MiraclePtr."};
+ }
+ break;
+ }
+ case ReportType::kInstantiation: {
+ crash_info = {ProtectionStatus::kNotProtected,
+ "A pointer to an already freed region was assigned to a "
+ "raw_ptr<T> object, which may lead to memory corruption.",
+ "This crash is still exploitable with MiraclePtr."};
+ }
+ }
+ } else if (bound_arg_ptr) {
+ // Note - this branch comes second to avoid hiding invalid instantiations,
+ // as we still consider it to be an error to instantiate a raw_ptr<T> from
+ // an invalid T* even if that T* is guaranteed to be quarantined.
+ bool is_supported_allocation =
+ RawPtrAsanService::GetInstance().IsSupportedAllocation(
+ reinterpret_cast<void*>(bound_arg_ptr));
+ if (is_supported_allocation) {
+ crash_info = {
+ ProtectionStatus::kProtected,
+ "This crash occurred inside a callback where a raw_ptr<T> pointing "
+ "to the same region was bound to one of the arguments.",
+ "MiraclePtr is expected to make this crash non-exploitable once "
+ "fully enabled."};
+ } else {
+ crash_info = {ProtectionStatus::kNotProtected,
+ "This crash occurred while accessing a region that was "
+ "allocated before MiraclePtr was activated.",
+ "This crash is still exploitable with MiraclePtr."};
+ }
+ } else {
+ crash_info = {
+ ProtectionStatus::kNotProtected,
+ "No raw_ptr<T> access to this region was detected prior to this crash.",
+ "This crash is still exploitable with MiraclePtr."};
+ }
+
+ // The race condition check below may override the protection status.
+ if (crash_info.protection_status != ProtectionStatus::kNotProtected) {
+ int free_thread_id = -1;
+ __asan_get_free_stack(reinterpret_cast<void*>(ptr), nullptr, 0,
+ &free_thread_id);
+ if (free_thread_id != GetCurrentThreadId()) {
+ crash_info.protection_status = ProtectionStatus::kManualAnalysisRequired;
+ crash_info.protection_details =
+ "The \"use\" and \"free\" threads don't match. This crash is likely "
+ "to have been caused by a race condition that is mislabeled as a "
+ "use-after-free. Make sure that the \"free\" is sequenced after the "
+ "\"use\" (e.g. both are on the same sequence, or the \"free\" is in "
+ "a task posted after the \"use\"). Otherwise, the crash is still "
+ "exploitable with MiraclePtr.";
+ } else if (internal::ThreadGroup::CurrentThreadHasGroup()) {
+ // We need to be especially careful with ThreadPool threads. Otherwise,
+ // we might miss false-positives where the "use" and "free" happen on
+ // different sequences but the same thread by chance.
+ crash_info.protection_status = ProtectionStatus::kManualAnalysisRequired;
+ crash_info.protection_details =
+ "This crash occurred in the thread pool. The sequence which invoked "
+ "the \"free\" is unknown, so the crash may have been caused by a "
+ "race condition that is mislabeled as a use-after-free. Make sure "
+ "that the \"free\" is sequenced after the \"use\" (e.g. both are on "
+ "the same sequence, or the \"free\" is in a task posted after the "
+ "\"use\"). Otherwise, the crash is still exploitable with "
+ "MiraclePtr.";
+ }
+ }
+
+ debug::AsanService::GetInstance()->Log(
+ "\nMiraclePtr Status: %s\n%s\n%s\n"
+ "Refer to "
+ "https://chromium.googlesource.com/chromium/src/+/main/base/memory/"
+ "raw_ptr.md for details.",
+ ProtectionStatusToString(crash_info.protection_status),
+ crash_info.crash_details, crash_info.protection_details);
+}
+
+namespace {
+enum class MessageLevel {
+ kWarning,
+ kError,
+};
+
+const char* LevelToString(MessageLevel level) {
+ switch (level) {
+ case MessageLevel::kWarning:
+ return "WARNING";
+ case MessageLevel::kError:
+ return "ERROR";
+ }
+}
+
+// Prints AddressSanitizer-like custom error messages.
+void Log(MessageLevel level,
+ uintptr_t address,
+ const char* type,
+ const char* description) {
+#if __has_builtin(__builtin_extract_return_addr) && \
+ __has_builtin(__builtin_return_address)
+ void* pc = __builtin_extract_return_addr(__builtin_return_address(0));
+#else
+ void* pc = nullptr;
+#endif
+
+#if __has_builtin(__builtin_frame_address)
+ void* bp = __builtin_frame_address(0);
+#else
+ void* bp = nullptr;
+#endif
+
+ void* local_stack;
+ void* sp = &local_stack;
+
+ debug::AsanService::GetInstance()->Log(
+ "=================================================================\n"
+ "==%d==%s: MiraclePtr: %s on address %p at pc %p bp %p sp %p",
+ Process::Current().Pid(), LevelToString(level), type, address, pc, bp,
+ sp);
+ __sanitizer_print_stack_trace();
+ __asan_describe_address(reinterpret_cast<void*>(address));
+ debug::AsanService::GetInstance()->Log(
+ "%s\n"
+ "=================================================================",
+ description);
+}
+} // namespace
+
+void RawPtrAsanService::WarnOnDanglingExtraction(
+ const volatile void* ptr) const {
+ Log(MessageLevel::kWarning, reinterpret_cast<uintptr_t>(ptr),
+ "dangling-pointer-extraction",
+ "A regular ASan report will follow if the extracted pointer is "
+ "dereferenced later.\n"
+ "Otherwise, it is still likely a bug to rely on the address of an "
+ "already freed allocation.\n"
+ "Refer to "
+ "https://chromium.googlesource.com/chromium/src/+/main/base/memory/"
+ "raw_ptr.md for details.");
+}
+
+void RawPtrAsanService::CrashOnDanglingInstantiation(
+ const volatile void* ptr) const {
+ Log(MessageLevel::kError, reinterpret_cast<uintptr_t>(ptr),
+ "dangling-pointer-instantiation",
+ "This crash occurred due to an attempt to assign a dangling pointer to a "
+ "raw_ptr<T> variable, which might lead to use-after-free.\n"
+ "Note that this report might be a false positive if at the moment of the "
+ "crash another raw_ptr<T> is guaranteed to keep the allocation alive.\n"
+ "Refer to "
+ "https://chromium.googlesource.com/chromium/src/+/main/base/memory/"
+ "raw_ptr.md for details.");
+ base::ImmediateCrash();
+}
+
+} // namespace base
+
+#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
diff --git a/base/memory/raw_ptr_asan_service.h b/base/memory/raw_ptr_asan_service.h
new file mode 100644
index 0000000..bff9336
--- /dev/null
+++ b/base/memory/raw_ptr_asan_service.h
@@ -0,0 +1,101 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_PTR_ASAN_SERVICE_H_
+#define BASE_MEMORY_RAW_PTR_ASAN_SERVICE_H_
+
+#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+#include <cstddef>
+#include <cstdint>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/types/strong_alias.h"
+
+namespace base {
+
+using EnableDereferenceCheck =
+ base::StrongAlias<class EnableDereferenceCheckTag, bool>;
+using EnableExtractionCheck =
+ base::StrongAlias<class EnableExtractionCheckTag, bool>;
+using EnableInstantiationCheck =
+ base::StrongAlias<class EnableInstantiationCheckTag, bool>;
+
+class BASE_EXPORT RawPtrAsanService {
+ public:
+ enum class ReportType {
+ kDereference,
+ kExtraction,
+ kInstantiation,
+ };
+
+ struct PendingReport {
+ ReportType type = ReportType::kDereference;
+ uintptr_t allocation_base = 0;
+ size_t allocation_size = 0;
+ };
+
+ void Configure(EnableDereferenceCheck,
+ EnableExtractionCheck,
+ EnableInstantiationCheck);
+
+ bool IsSupportedAllocation(void*) const;
+
+ bool IsEnabled() const { return mode_ == Mode::kEnabled; }
+
+ ALWAYS_INLINE NO_SANITIZE(
+ "address") bool is_dereference_check_enabled() const {
+ return is_dereference_check_enabled_;
+ }
+
+ ALWAYS_INLINE NO_SANITIZE(
+ "address") bool is_extraction_check_enabled() const {
+ return is_extraction_check_enabled_;
+ }
+
+ ALWAYS_INLINE NO_SANITIZE(
+ "address") bool is_instantiation_check_enabled() const {
+ return is_instantiation_check_enabled_;
+ }
+
+ ALWAYS_INLINE NO_SANITIZE("address") static RawPtrAsanService& GetInstance() {
+ return instance_;
+ }
+
+ void WarnOnDanglingExtraction(const volatile void* ptr) const;
+ void CrashOnDanglingInstantiation(const volatile void* ptr) const;
+
+ static void SetPendingReport(ReportType type, const volatile void* ptr);
+
+ private:
+ enum class Mode {
+ kUninitialized,
+ kDisabled,
+ kEnabled,
+ };
+
+ uint8_t* GetShadow(void* ptr) const;
+
+ static void MallocHook(const volatile void*, size_t);
+ static void FreeHook(const volatile void*) {}
+ static void ErrorReportCallback(const char* report,
+ bool* should_exit_cleanly);
+
+ Mode mode_ = Mode::kUninitialized;
+ bool is_dereference_check_enabled_ = false;
+ bool is_extraction_check_enabled_ = false;
+ bool is_instantiation_check_enabled_ = false;
+
+ size_t shadow_offset_ = 0;
+
+ static RawPtrAsanService instance_; // Not a static local variable because
+ // `GetInstance()` is used in hot paths.
+};
+
+} // namespace base
+
+#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+#endif // BASE_MEMORY_RAW_PTR_ASAN_SERVICE_H_
diff --git a/base/memory/raw_ptr_asan_unittest.cc b/base/memory/raw_ptr_asan_unittest.cc
new file mode 100644
index 0000000..3a42db7
--- /dev/null
+++ b/base/memory/raw_ptr_asan_unittest.cc
@@ -0,0 +1,453 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_alloc_buildflags.h"
+
+#if BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
+
+#include <sanitizer/asan_interface.h>
+#include <thread>
+
+#include "base/debug/asan_service.h"
+#include "base/functional/bind.h"
+#include "base/functional/callback.h"
+#include "base/memory/raw_ptr.h"
+#include "base/memory/raw_ptr_asan_service.h"
+#include "base/task/thread_pool.h"
+#include "base/test/bind.h"
+#include "base/test/task_environment.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base::internal {
+
+struct AsanStruct {
+ int x;
+
+ void func() { ++x; }
+};
+
+#define ASAN_BRP_PROTECTED(x) "MiraclePtr Status: PROTECTED\\n.*" x
+#define ASAN_BRP_MANUAL_ANALYSIS(x) \
+ "MiraclePtr Status: MANUAL ANALYSIS REQUIRED\\n.*" x
+#define ASAN_BRP_NOT_PROTECTED(x) "MiraclePtr Status: NOT PROTECTED\\n.*" x
+
+const char kAsanBrpProtected_Dereference[] =
+ ASAN_BRP_PROTECTED("dangling pointer was being dereferenced");
+const char kAsanBrpProtected_Callback[] = ASAN_BRP_PROTECTED(
+ "crash occurred inside a callback where a raw_ptr<T> pointing to the same "
+ "region");
+const char kAsanBrpMaybeProtected_Extraction[] = ASAN_BRP_MANUAL_ANALYSIS(
+ "pointer to the same region was extracted from a raw_ptr<T>");
+const char kAsanBrpNotProtected_EarlyAllocation[] = ASAN_BRP_NOT_PROTECTED(
+ "crash occurred while accessing a region that was allocated before "
+ "MiraclePtr was activated");
+const char kAsanBrpNotProtected_NoRawPtrAccess[] =
+ ASAN_BRP_NOT_PROTECTED("No raw_ptr<T> access to this region was detected");
+const char kAsanBrpMaybeProtected_Race[] =
+ ASAN_BRP_MANUAL_ANALYSIS("\\nThe \"use\" and \"free\" threads don't match");
+const char kAsanBrpMaybeProtected_ThreadPool[] =
+ ASAN_BRP_MANUAL_ANALYSIS("\\nThis crash occurred in the thread pool");
+
+// Instantiation failure message format is special.
+const char kAsanBrp_Instantiation[] =
+ "crash occurred due to an attempt to assign a dangling pointer";
+
+#undef ASAN_BRP_PROTECTED
+#undef ASAN_BRP_MANUAL_ANALYSIS
+#undef ASAN_BRP_NOT_PROTECTED
+
+class AsanBackupRefPtrTest : public testing::Test {
+ protected:
+ void SetUp() override {
+ base::debug::AsanService::GetInstance()->Initialize();
+
+ if (!RawPtrAsanService::GetInstance().IsEnabled()) {
+ base::RawPtrAsanService::GetInstance().Configure(
+ base::EnableDereferenceCheck(true), base::EnableExtractionCheck(true),
+ base::EnableInstantiationCheck(true));
+ } else {
+ ASSERT_TRUE(base::RawPtrAsanService::GetInstance()
+ .is_dereference_check_enabled());
+ ASSERT_TRUE(
+ base::RawPtrAsanService::GetInstance().is_extraction_check_enabled());
+ ASSERT_TRUE(base::RawPtrAsanService::GetInstance()
+ .is_instantiation_check_enabled());
+ }
+ }
+
+ static void SetUpTestSuite() { early_allocation_ptr_ = new AsanStruct; }
+ static void TearDownTestSuite() { delete early_allocation_ptr_; }
+ static raw_ptr<AsanStruct> early_allocation_ptr_;
+};
+
+raw_ptr<AsanStruct> AsanBackupRefPtrTest::early_allocation_ptr_ = nullptr;
+
+TEST_F(AsanBackupRefPtrTest, Dereference) {
+ raw_ptr<AsanStruct> protected_ptr = new AsanStruct;
+
+ // The four statements below should succeed.
+ (*protected_ptr).x = 1;
+ (*protected_ptr).func();
+ ++(protected_ptr->x);
+ protected_ptr->func();
+
+ delete protected_ptr.get();
+
+ EXPECT_DEATH_IF_SUPPORTED((*protected_ptr).x = 1,
+ kAsanBrpProtected_Dereference);
+ EXPECT_DEATH_IF_SUPPORTED((*protected_ptr).func(),
+ kAsanBrpProtected_Dereference);
+ EXPECT_DEATH_IF_SUPPORTED(++(protected_ptr->x),
+ kAsanBrpProtected_Dereference);
+ EXPECT_DEATH_IF_SUPPORTED(protected_ptr->func(),
+ kAsanBrpProtected_Dereference);
+
+ // The following statement should not trigger a dereference, so it should
+ // succeed without crashing even though *protected_ptr is no longer valid.
+ [[maybe_unused]] AsanStruct* ptr = protected_ptr;
+}
+
+TEST_F(AsanBackupRefPtrTest, Extraction) {
+ raw_ptr<AsanStruct> protected_ptr = new AsanStruct;
+
+ AsanStruct* ptr1 = protected_ptr; // Shouldn't crash.
+ ptr1->x = 0;
+
+ delete protected_ptr.get();
+
+ EXPECT_DEATH_IF_SUPPORTED(
+ {
+ AsanStruct* ptr2 = protected_ptr;
+ ptr2->x = 1;
+ },
+ kAsanBrpMaybeProtected_Extraction);
+}
+
+TEST_F(AsanBackupRefPtrTest, Instantiation) {
+ AsanStruct* ptr = new AsanStruct;
+
+ raw_ptr<AsanStruct> protected_ptr1 = ptr; // Shouldn't crash.
+ protected_ptr1 = nullptr;
+
+ delete ptr;
+
+ EXPECT_DEATH_IF_SUPPORTED(
+ { [[maybe_unused]] raw_ptr<AsanStruct> protected_ptr2 = ptr; },
+ kAsanBrp_Instantiation);
+}
+
+TEST_F(AsanBackupRefPtrTest, InstantiationInvalidPointer) {
+ void* ptr1 = reinterpret_cast<void*>(0xfefefefefefefefe);
+
+ [[maybe_unused]] raw_ptr<void> protected_ptr1 = ptr1; // Shouldn't crash.
+
+ size_t shadow_scale, shadow_offset;
+ __asan_get_shadow_mapping(&shadow_scale, &shadow_offset);
+ [[maybe_unused]] raw_ptr<void> protected_ptr2 =
+ reinterpret_cast<void*>(shadow_offset); // Shouldn't crash.
+}
+
+TEST_F(AsanBackupRefPtrTest, UserPoisoned) {
+ AsanStruct* ptr = new AsanStruct;
+ __asan_poison_memory_region(ptr, sizeof(AsanStruct));
+
+ [[maybe_unused]] raw_ptr<AsanStruct> protected_ptr1 =
+ ptr; // Shouldn't crash.
+
+ delete ptr; // Should crash now.
+ EXPECT_DEATH_IF_SUPPORTED(
+ { [[maybe_unused]] raw_ptr<AsanStruct> protected_ptr2 = ptr; },
+ kAsanBrp_Instantiation);
+}
+
+TEST_F(AsanBackupRefPtrTest, EarlyAllocationDetection) {
+ raw_ptr<AsanStruct> late_allocation_ptr = new AsanStruct;
+ EXPECT_FALSE(RawPtrAsanService::GetInstance().IsSupportedAllocation(
+ early_allocation_ptr_.get()));
+ EXPECT_TRUE(RawPtrAsanService::GetInstance().IsSupportedAllocation(
+ late_allocation_ptr.get()));
+
+ delete late_allocation_ptr.get();
+ delete early_allocation_ptr_.get();
+
+ EXPECT_FALSE(RawPtrAsanService::GetInstance().IsSupportedAllocation(
+ early_allocation_ptr_.get()));
+ EXPECT_TRUE(RawPtrAsanService::GetInstance().IsSupportedAllocation(
+ late_allocation_ptr.get()));
+
+ EXPECT_DEATH_IF_SUPPORTED({ early_allocation_ptr_->func(); },
+ kAsanBrpNotProtected_EarlyAllocation);
+ EXPECT_DEATH_IF_SUPPORTED({ late_allocation_ptr->func(); },
+ kAsanBrpProtected_Dereference);
+
+ early_allocation_ptr_ = nullptr;
+}
+
+TEST_F(AsanBackupRefPtrTest, BoundRawPtr) {
+ // This test is for the handling of raw_ptr<T> type objects being passed
+ // directly to Bind.
+
+ raw_ptr<AsanStruct> protected_ptr = new AsanStruct;
+
+ // First create our test callbacks while `*protected_ptr` is still valid, and
+ // we will then invoke them after deleting `*protected_ptr`.
+
+ // `ptr` is protected in this callback, as raw_ptr<T> will be mapped to an
+ // UnretainedWrapper containing a raw_ptr<T> which is guaranteed to outlive
+ // the function call.
+ auto ptr_callback = base::BindOnce(
+ [](AsanStruct* ptr) {
+ // This will crash and should be detected as a protected access.
+ ptr->func();
+ },
+ protected_ptr);
+
+ // Now delete `*protected_ptr` and check that the callbacks we created are
+ // handled correctly.
+ delete protected_ptr.get();
+ protected_ptr = nullptr;
+
+ EXPECT_DEATH_IF_SUPPORTED(std::move(ptr_callback).Run(),
+ kAsanBrpProtected_Callback);
+}
+
+TEST_F(AsanBackupRefPtrTest, BoundArgumentsProtected) {
+ raw_ptr<AsanStruct> protected_ptr = new AsanStruct;
+ raw_ptr<AsanStruct> protected_ptr2 = new AsanStruct;
+
+ // First create our test callbacks while `*protected_ptr` is still valid, and
+ // we will then invoke them after deleting `*protected_ptr`.
+
+ // `ptr` is protected in this callback even after `*ptr` has been deleted,
+ // since the allocation will be kept alive by the internal `raw_ptr<T>` inside
+ // base::Unretained().
+ auto safe_callback = base::BindOnce(
+ [](AsanStruct* ptr) {
+ // This will crash and should be detected as a protected access.
+ ptr->func();
+ },
+ base::Unretained(protected_ptr));
+
+ // Both `inner_ptr` and `outer_ptr` are protected in these callbacks, since
+ // both are bound before `*ptr` is deleted. This test is making sure that
+ // `inner_ptr` is treated as protected.
+ auto safe_nested_inner_callback = base::BindOnce(
+ [](AsanStruct* outer_ptr, base::OnceClosure inner_callback) {
+ std::move(inner_callback).Run();
+ // This will never be executed, as we will crash in inner_callback
+ ASSERT_TRUE(false);
+ },
+ base::Unretained(protected_ptr),
+ base::BindOnce(
+ [](AsanStruct* inner_ptr) {
+ // This will crash and should be detected as a protected access.
+ inner_ptr->func();
+ },
+ base::Unretained(protected_ptr2)));
+
+ // Both `inner_ptr` and `outer_ptr` are protected in these callbacks, since
+ // both are bound before `*ptr` is deleted. This test is making sure that
+ // `outer_ptr` is still treated as protected after `inner_callback` has run.
+ auto safe_nested_outer_callback = base::BindOnce(
+ [](AsanStruct* outer_ptr, base::OnceClosure inner_callback) {
+ std::move(inner_callback).Run();
+ // This will crash and should be detected as a protected access.
+ outer_ptr->func();
+ },
+ base::Unretained(protected_ptr),
+ base::BindOnce(
+ [](AsanStruct* inner_ptr) {
+ // Do nothing - we don't want to trip the protection inside the
+ // inner callback.
+ },
+ base::Unretained(protected_ptr2)));
+
+ // Now delete `*protected_ptr` and check that the callbacks we created are
+ // handled correctly.
+ delete protected_ptr.get();
+ delete protected_ptr2.get();
+ protected_ptr = nullptr;
+ protected_ptr2 = nullptr;
+
+ EXPECT_DEATH_IF_SUPPORTED(std::move(safe_callback).Run(),
+ kAsanBrpProtected_Callback);
+ EXPECT_DEATH_IF_SUPPORTED(std::move(safe_nested_inner_callback).Run(),
+ kAsanBrpProtected_Callback);
+ EXPECT_DEATH_IF_SUPPORTED(std::move(safe_nested_outer_callback).Run(),
+ kAsanBrpProtected_Callback);
+}
+
+TEST_F(AsanBackupRefPtrTest, BoundArgumentsNotProtected) {
+ raw_ptr<AsanStruct> protected_ptr = new AsanStruct;
+
+ // First create our test callbacks while `*protected_ptr` is still valid, and
+ // we will then invoke them after deleting `*protected_ptr`.
+
+ // `ptr` is not protected in this callback after `*ptr` has been deleted, as
+ // integer-type bind arguments do not use an internal `raw_ptr<T>`.
+ auto unsafe_callback = base::BindOnce(
+ [](uintptr_t address) {
+ AsanStruct* ptr = reinterpret_cast<AsanStruct*>(address);
+ // This will crash and should not be detected as a protected access.
+ ptr->func();
+ },
+ reinterpret_cast<uintptr_t>(protected_ptr.get()));
+
+ // In this case, `outer_ptr` is protected in these callbacks, since it is
+ // bound before `*ptr` is deleted. We want to make sure that the access to
+ // `inner_ptr` is not automatically treated as protected (although it actually
+ // is) because we're trying to limit the protection scope to be very
+ // conservative here.
+ auto unsafe_nested_inner_callback = base::BindOnce(
+ [](AsanStruct* outer_ptr, base::OnceClosure inner_callback) {
+ std::move(inner_callback).Run();
+ // This will never be executed, as we will crash in inner_callback
+ NOTREACHED();
+ },
+ base::Unretained(protected_ptr),
+ base::BindOnce(
+ [](uintptr_t inner_address) {
+ AsanStruct* inner_ptr =
+ reinterpret_cast<AsanStruct*>(inner_address);
+ // This will crash and should be detected as maybe protected, since
+ // it follows an extraction operation when the outer callback is
+ // invoked
+ inner_ptr->func();
+ },
+ reinterpret_cast<uintptr_t>(protected_ptr.get())));
+
+ // In this case, `inner_ptr` is protected in these callbacks, since it is
+ // bound before `*ptr` is deleted. We want to make sure that the access to
+ // `outer_ptr` is not automatically treated as protected, since it isn't.
+ auto unsafe_nested_outer_callback = base::BindOnce(
+ [](uintptr_t outer_address, base::OnceClosure inner_callback) {
+ { std::move(inner_callback).Run(); }
+ AsanStruct* outer_ptr = reinterpret_cast<AsanStruct*>(outer_address);
+ // This will crash and should be detected as maybe protected, since it
+ // follows an extraction operation when the inner callback is invoked.
+ outer_ptr->func();
+ },
+ reinterpret_cast<uintptr_t>(protected_ptr.get()),
+ base::BindOnce(
+ [](AsanStruct* inner_ptr) {
+ // Do nothing - we don't want to trip the protection inside the
+ // inner callback.
+ },
+ base::Unretained(protected_ptr)));
+
+ // Now delete `*protected_ptr` and check that the callbacks we created are
+ // handled correctly.
+ delete protected_ptr.get();
+ protected_ptr = nullptr;
+
+ EXPECT_DEATH_IF_SUPPORTED(std::move(unsafe_callback).Run(),
+ kAsanBrpNotProtected_NoRawPtrAccess);
+ EXPECT_DEATH_IF_SUPPORTED(std::move(unsafe_nested_inner_callback).Run(),
+ kAsanBrpMaybeProtected_Extraction);
+ EXPECT_DEATH_IF_SUPPORTED(std::move(unsafe_nested_outer_callback).Run(),
+ kAsanBrpMaybeProtected_Extraction);
+}
+
+TEST_F(AsanBackupRefPtrTest, BoundArgumentsInstantiation) {
+ // This test is ensuring that instantiations of `raw_ptr` inside callbacks are
+ // handled correctly.
+
+ raw_ptr<AsanStruct> protected_ptr = new AsanStruct;
+
+ // First create our test callback while `*protected_ptr` is still valid.
+ auto callback = base::BindRepeating(
+ [](AsanStruct* ptr) {
+ // This will crash if `*protected_ptr` is not valid.
+ [[maybe_unused]] raw_ptr<AsanStruct> copy_ptr = ptr;
+ },
+ base::Unretained(protected_ptr));
+
+ // It is allowed to create a new `raw_ptr<T>` inside a callback while
+ // `*protected_ptr` is still valid.
+ callback.Run();
+
+ delete protected_ptr.get();
+ protected_ptr = nullptr;
+
+ // It is not allowed to create a new `raw_ptr<T>` inside a callback once
+ // `*protected_ptr` is no longer valid.
+ EXPECT_DEATH_IF_SUPPORTED(std::move(callback).Run(), kAsanBrp_Instantiation);
+}
+
+TEST_F(AsanBackupRefPtrTest, BoundReferences) {
+ auto ptr = ::std::make_unique<AsanStruct>();
+
+ // This test is ensuring that reference parameters inside callbacks are
+ // handled correctly.
+
+ // We should not crash during unwrapping a reference parameter if the
+ // parameter is not accessed inside the callback.
+ auto no_crash_callback = base::BindOnce(
+ [](AsanStruct& ref) {
+ // There should be no crash here as we don't access ref.
+ },
+ std::reference_wrapper(*ptr));
+
+ // `ref` is protected in this callback even after `*ptr` has been deleted,
+ // since the allocation will be kept alive by the internal `raw_ref<T>` inside
+ // base::UnretainedRefWrapper().
+ auto callback = base::BindOnce(
+ [](AsanStruct& ref) {
+ // This will crash and should be detected as protected
+ ref.func();
+ },
+ std::reference_wrapper(*ptr));
+
+ ptr.reset();
+
+ std::move(no_crash_callback).Run();
+
+ EXPECT_DEATH_IF_SUPPORTED(std::move(callback).Run(),
+ kAsanBrpProtected_Callback);
+}
+
+TEST_F(AsanBackupRefPtrTest, FreeOnAnotherThread) {
+ auto ptr = ::std::make_unique<AsanStruct>();
+ raw_ptr<AsanStruct> protected_ptr = ptr.get();
+
+ std::thread thread([&ptr] { ptr.reset(); });
+ thread.join();
+
+ EXPECT_DEATH_IF_SUPPORTED(protected_ptr->func(), kAsanBrpMaybeProtected_Race);
+}
+
+TEST_F(AsanBackupRefPtrTest, AccessOnThreadPoolThread) {
+ auto ptr = ::std::make_unique<AsanStruct>();
+ raw_ptr<AsanStruct> protected_ptr = ptr.get();
+
+ test::TaskEnvironment env;
+ RunLoop run_loop;
+
+ ThreadPool::PostTaskAndReply(
+ FROM_HERE, {}, base::BindLambdaForTesting([&ptr, &protected_ptr] {
+ ptr.reset();
+ EXPECT_DEATH_IF_SUPPORTED(protected_ptr->func(),
+ kAsanBrpMaybeProtected_ThreadPool);
+ }),
+ base::BindLambdaForTesting([&run_loop]() { run_loop.Quit(); }));
+ run_loop.Run();
+}
+
+TEST_F(AsanBackupRefPtrTest, DanglingUnretained) {
+ // The test should finish without crashing.
+
+ raw_ptr<AsanStruct> protected_ptr = new AsanStruct;
+ delete protected_ptr.get();
+
+ auto ptr_callback = base::BindOnce(
+ [](AsanStruct* ptr) {
+ // Do nothing - we only check the behavior of `BindOnce` in this test.
+ },
+ protected_ptr);
+}
+
+} // namespace base::internal
+
+#endif // BUILDFLAG(USE_ASAN_BACKUP_REF_PTR)
diff --git a/base/memory/raw_ptr_exclusion.h b/base/memory/raw_ptr_exclusion.h
new file mode 100644
index 0000000..3ce1d60
--- /dev/null
+++ b/base/memory/raw_ptr_exclusion.h
@@ -0,0 +1,13 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_PTR_EXCLUSION_H_
+#define BASE_MEMORY_RAW_PTR_EXCLUSION_H_
+
+// Although `raw_ptr` is part of the standalone PA distribution, it is
+// easier to use the shorter path in `//base/memory`. We retain this
+// facade header for ease of typing.
+#include "base/allocator/partition_allocator/pointers/raw_ptr_exclusion.h" // IWYU pragma: export
+
+#endif // BASE_MEMORY_RAW_PTR_EXCLUSION_H_
diff --git a/base/memory/raw_ref.h b/base/memory/raw_ref.h
new file mode 100644
index 0000000..6a599d9
--- /dev/null
+++ b/base/memory/raw_ref.h
@@ -0,0 +1,13 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_RAW_REF_H_
+#define BASE_MEMORY_RAW_REF_H_
+
+// Although `raw_ref` is part of the standalone PA distribution, it is
+// easier to use the shorter path in `//base/memory`. We retain this
+// facade header for ease of typing.
+#include "base/allocator/partition_allocator/pointers/raw_ref.h" // IWYU pragma: export
+
+#endif // BASE_MEMORY_RAW_REF_H_
diff --git a/base/memory/raw_scoped_refptr_mismatch_checker.h b/base/memory/raw_scoped_refptr_mismatch_checker.h
index ab8b2ab..0e08a84 100644
--- a/base/memory/raw_scoped_refptr_mismatch_checker.h
+++ b/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright 2011 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,6 +7,8 @@
#include <type_traits>
+#include "base/memory/raw_ptr.h"
+#include "base/memory/raw_ref.h"
#include "base/template_util.h"
// It is dangerous to post a task with a T* argument where T is a subtype of
@@ -27,22 +29,23 @@
template <typename T>
struct IsRefCountedType<T,
- void_t<decltype(std::declval<T*>()->AddRef()),
- decltype(std::declval<T*>()->Release())>>
+ std::void_t<decltype(std::declval<T*>()->AddRef()),
+ decltype(std::declval<T*>()->Release())>>
: std::true_type {};
+// Human readable translation: you needed to be a scoped_refptr if you are a raw
+// pointer type and are convertible to a RefCounted(Base|ThreadSafeBase) type.
template <typename T>
-struct NeedsScopedRefptrButGetsRawPtr {
+struct NeedsScopedRefptrButGetsRawPtr
+ : std::disjunction<
+ // TODO(danakj): Should ban native references and
+ // std::reference_wrapper here too.
+ std::conjunction<base::IsRawRef<T>,
+ IsRefCountedType<base::RemoveRawRefT<T>>>,
+ std::conjunction<base::IsPointer<T>,
+ IsRefCountedType<base::RemovePointerT<T>>>> {
static_assert(!std::is_reference<T>::value,
"NeedsScopedRefptrButGetsRawPtr requires non-reference type.");
-
- enum {
- // Human readable translation: you needed to be a scoped_refptr if you are a
- // raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase)
- // type.
- value = std::is_pointer<T>::value &&
- IsRefCountedType<std::remove_pointer_t<T>>::value
- };
};
} // namespace internal
diff --git a/base/memory/read_only_shared_memory_region.cc b/base/memory/read_only_shared_memory_region.cc
index a19a7aa..12ea8b6 100644
--- a/base/memory/read_only_shared_memory_region.cc
+++ b/base/memory/read_only_shared_memory_region.cc
@@ -1,38 +1,41 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Cobalt does not support multiple process and shared memory.
-#if !defined(STARBOARD)
-
#include "base/memory/read_only_shared_memory_region.h"
#include <utility>
-#include "base/memory/shared_memory.h"
#include "build/build_config.h"
namespace base {
+ReadOnlySharedMemoryRegion::CreateFunction*
+ ReadOnlySharedMemoryRegion::create_hook_ = nullptr;
+
// static
-MappedReadOnlyRegion ReadOnlySharedMemoryRegion::Create(size_t size) {
+MappedReadOnlyRegion ReadOnlySharedMemoryRegion::Create(
+ size_t size,
+ SharedMemoryMapper* mapper) {
+ if (create_hook_)
+ return create_hook_(size, mapper);
+
subtle::PlatformSharedMemoryRegion handle =
subtle::PlatformSharedMemoryRegion::CreateWritable(size);
if (!handle.IsValid())
return {};
- void* memory_ptr = nullptr;
- size_t mapped_size = 0;
- if (!handle.MapAt(0, handle.GetSize(), &memory_ptr, &mapped_size))
+ auto result = handle.MapAt(0, handle.GetSize(), mapper);
+ if (!result.has_value())
return {};
- WritableSharedMemoryMapping mapping(memory_ptr, size, mapped_size,
- handle.GetGUID());
-#if defined(OS_MACOSX) && !defined(OS_IOS)
- handle.ConvertToReadOnly(memory_ptr);
+ WritableSharedMemoryMapping mapping(result.value(), size, handle.GetGUID(),
+ mapper);
+#if BUILDFLAG(IS_MAC)
+ handle.ConvertToReadOnly(mapping.memory());
#else
handle.ConvertToReadOnly();
-#endif // defined(OS_MACOSX) && !defined(OS_IOS)
+#endif // BUILDFLAG(IS_MAC)
ReadOnlySharedMemoryRegion region(std::move(handle));
if (!region.IsValid() || !mapping.IsValid())
@@ -65,23 +68,24 @@
return ReadOnlySharedMemoryRegion(handle_.Duplicate());
}
-ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::Map() const {
- return MapAt(0, handle_.GetSize());
+ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::Map(
+ SharedMemoryMapper* mapper) const {
+ return MapAt(0, handle_.GetSize(), mapper);
}
ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::MapAt(
- off_t offset,
- size_t size) const {
+ uint64_t offset,
+ size_t size,
+ SharedMemoryMapper* mapper) const {
if (!IsValid())
return {};
- void* memory = nullptr;
- size_t mapped_size = 0;
- if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+ auto result = handle_.MapAt(offset, size, mapper);
+ if (!result.has_value())
return {};
- return ReadOnlySharedMemoryMapping(memory, size, mapped_size,
- handle_.GetGUID());
+ return ReadOnlySharedMemoryMapping(result.value(), size, handle_.GetGUID(),
+ mapper);
}
bool ReadOnlySharedMemoryRegion::IsValid() const {
@@ -98,5 +102,3 @@
}
} // namespace base
-
-#endif // !defined(STARBOARD)
\ No newline at end of file
diff --git a/base/memory/read_only_shared_memory_region.h b/base/memory/read_only_shared_memory_region.h
index 837cdce..1a83029 100644
--- a/base/memory/read_only_shared_memory_region.h
+++ b/base/memory/read_only_shared_memory_region.h
@@ -1,25 +1,22 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
-// Cobalt does not support multiple process and shared memory.
-#if !defined(STARBOARD)
-
-#include <utility>
-
-#include "base/macros.h"
+#include "base/base_export.h"
+#include "base/check.h"
+#include "base/check_op.h"
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
+#include <stdint.h>
+
namespace base {
struct MappedReadOnlyRegion;
-// Starboard doesn't curretly support multiple processes or shared memory.
-#if !defined(STARBOARD)
// Scoped move-only handle to a region of platform shared memory. The instance
// owns the platform handle it wraps. Mappings created by this region are
// read-only. These mappings remain valid even after the region handle is moved
@@ -37,13 +34,9 @@
// This means that the caller's process is the only process that can modify
// the region content. If you need to pass write access to another process,
// consider using WritableSharedMemoryRegion or UnsafeSharedMemoryRegion.
- //
- // This call will fail if the process does not have sufficient permissions to
- // create a shared memory region itself. See
- // mojo::CreateReadOnlySharedMemoryRegion in
- // mojo/public/cpp/base/shared_memory_utils.h for creating a shared memory
- // region from a an unprivileged process where a broker must be used.
- static MappedReadOnlyRegion Create(size_t size);
+ static MappedReadOnlyRegion Create(size_t size,
+ SharedMemoryMapper* mapper = nullptr);
+ using CreateFunction = decltype(Create);
// Returns a ReadOnlySharedMemoryRegion built from a platform-specific handle
// that was taken from another ReadOnlySharedMemoryRegion instance. Returns an
@@ -68,6 +61,10 @@
ReadOnlySharedMemoryRegion(ReadOnlySharedMemoryRegion&&);
ReadOnlySharedMemoryRegion& operator=(ReadOnlySharedMemoryRegion&&);
+ ReadOnlySharedMemoryRegion(const ReadOnlySharedMemoryRegion&) = delete;
+ ReadOnlySharedMemoryRegion& operator=(const ReadOnlySharedMemoryRegion&) =
+ delete;
+
// Destructor closes shared memory region if valid.
// All created mappings will remain valid.
~ReadOnlySharedMemoryRegion();
@@ -82,14 +79,23 @@
// read-only access. The mapped address is guaranteed to have an alignment of
// at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
// Returns a valid ReadOnlySharedMemoryMapping instance on success, invalid
- // otherwise.
- ReadOnlySharedMemoryMapping Map() const;
+ // otherwise. A custom |SharedMemoryMapper| for mapping (and later unmapping)
+ // the region can be provided using the optional |mapper| parameter.
+ ReadOnlySharedMemoryMapping Map(SharedMemoryMapper* mapper = nullptr) const;
- // Same as above, but maps only |size| bytes of the shared memory region
- // starting with the given |offset|. |offset| must be aligned to value of
- // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
- // requested bytes are out of the region limits.
- ReadOnlySharedMemoryMapping MapAt(off_t offset, size_t size) const;
+ // Similar to `Map()`, but maps only `size` bytes of the shared memory block
+ // at byte `offset`. Returns an invalid mapping if requested bytes are out of
+ // the region limits.
+ //
+ // `offset` does not need to be aligned; if `offset` is not a multiple of
+ // `subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment`, then the
+ // returned mapping will not respect alignment either. Internally, `offset`
+ // and `size` are still first adjusted to respect alignment when mapping in
+ // the shared memory region, but the returned mapping will be "unadjusted" to
+ // match the exact `offset` and `size` requested.
+ ReadOnlySharedMemoryMapping MapAt(uint64_t offset,
+ size_t size,
+ SharedMemoryMapper* mapper = nullptr) const;
// Whether the underlying platform handle is valid.
bool IsValid() const;
@@ -106,15 +112,25 @@
return handle_.GetGUID();
}
+ // Returns a platform shared memory handle. |this| remains the owner of the
+ // handle.
+ subtle::PlatformSharedMemoryHandle GetPlatformHandle() const {
+ DCHECK(IsValid());
+ return handle_.GetPlatformHandle();
+ }
+
private:
+ friend class SharedMemoryHooks;
+
explicit ReadOnlySharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle);
- subtle::PlatformSharedMemoryRegion handle_;
+ static void set_create_hook(CreateFunction* hook) { create_hook_ = hook; }
- DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryRegion);
+ static CreateFunction* create_hook_;
+
+ subtle::PlatformSharedMemoryRegion handle_;
};
-#endif
// Helper struct for return value of ReadOnlySharedMemoryRegion::Create().
struct MappedReadOnlyRegion {
@@ -123,7 +139,7 @@
// Helper function to check return value of
// ReadOnlySharedMemoryRegion::Create(). |region| and |mapping| either both
// valid or invalid.
- bool IsValid() {
+ bool IsValid() const {
DCHECK_EQ(region.IsValid(), mapping.IsValid());
return region.IsValid() && mapping.IsValid();
}
@@ -131,5 +147,4 @@
} // namespace base
-#endif // !defined(STARBOARD)
#endif // BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
diff --git a/base/memory/ref_counted.cc b/base/memory/ref_counted.cc
index f3d35cf..62fafbc 100644
--- a/base/memory/ref_counted.cc
+++ b/base/memory/ref_counted.cc
@@ -1,9 +1,13 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright 2011 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/ref_counted.h"
+#include <limits>
+#include <ostream>
+#include <type_traits>
+
#include "base/threading/thread_collision_warner.h"
namespace base {
@@ -32,19 +36,39 @@
}
#endif
-// This is a security check. In 32-bit-archs, an attacker would run out of
-// address space after allocating at most 2^32 scoped_refptrs. This replicates
-// that boundary for 64-bit-archs.
+// For security and correctness, we check the arithmetic on ref counts.
+//
+// In an attempt to avoid binary bloat (from inlining the `CHECK`), we define
+// these functions out-of-line. However, compilers are wily. Further testing may
+// show that `NOINLINE` helps or hurts.
+//
#if defined(ARCH_CPU_64_BITS)
void RefCountedBase::AddRefImpl() const {
- // Check if |ref_count_| overflow only on 64 bit archs since the number of
- // objects may exceed 2^32.
- // To avoid the binary size bloat, use non-inline function here.
- CHECK(++ref_count_ > 0);
+ // An attacker could induce use-after-free bugs, and potentially exploit them,
+ // by creating so many references to a ref-counted object that the reference
+ // count overflows. On 32-bit architectures, there is not enough address space
+ // to succeed. But on 64-bit architectures, it might indeed be possible.
+ // Therefore, we can elide the check for arithmetic overflow on 32-bit, but we
+ // must check on 64-bit.
+ //
+ // Make sure the addition didn't wrap back around to 0. This form of check
+ // works because we assert that `ref_count_` is an unsigned integer type.
+ CHECK(++ref_count_ != 0);
+}
+
+void RefCountedBase::ReleaseImpl() const {
+ // Make sure the subtraction didn't wrap back around from 0 to the max value.
+ // That could cause memory leaks, and may induce application-semantic
+ // correctness or safety bugs. (E.g. what if we really needed that object to
+ // be destroyed at the right time?)
+ //
+ // Note that unlike with overflow, underflow could also happen on 32-bit
+ // architectures. Arguably, we should do this check on32-bit machines too.
+ CHECK(--ref_count_ != std::numeric_limits<decltype(ref_count_)>::max());
}
#endif
-#if !defined(ARCH_CPU_X86_FAMILY)
+#if !(defined(ARCH_CPU_X86_FAMILY) || defined(__ARM_FEATURE_ATOMICS))
bool RefCountedThreadSafeBase::Release() const {
return ReleaseImpl();
}
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index d4d71f8..f70e307 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -1,29 +1,38 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_REF_COUNTED_H_
#define BASE_MEMORY_REF_COUNTED_H_
+#include <stddef.h>
+
+#include <limits>
#include <utility>
#include "base/atomic_ref_count.h"
#include "base/base_export.h"
+#include "base/check.h"
+#include "base/check_op.h"
#include "base/compiler_specific.h"
+#include "base/dcheck_is_on.h"
+// TODO(dcheng): Remove this separately.
#include "base/gtest_prod_util.h"
-#include "base/logging.h"
-#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/sequence_checker.h"
+#include "base/template_util.h"
#include "base/threading/thread_collision_warner.h"
#include "build/build_config.h"
-#include "starboard/types.h"
+#include "third_party/abseil-cpp/absl/utility/utility.h"
namespace base {
namespace subtle {
class BASE_EXPORT RefCountedBase {
public:
+ RefCountedBase(const RefCountedBase&) = delete;
+ RefCountedBase& operator=(const RefCountedBase&) = delete;
+
bool HasOneRef() const { return ref_count_ == 1; }
bool HasAtLeastOneRef() const { return ref_count_ >= 1; }
@@ -43,21 +52,18 @@
~RefCountedBase() {
#if DCHECK_IS_ON()
- DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
+ // RefCounted object deleted without calling Release()
+ DCHECK(in_dtor_);
#endif
}
void AddRef() const {
- // TODO(maruel): Add back once it doesn't assert 500 times/sec.
- // Current thread books the critical section "AddRelease"
- // without release it.
- // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
- DCHECK(!needs_adopt_ref_)
- << "This RefCounted object is created with non-zero reference count."
- << " The first reference to such a object has to be made by AdoptRef or"
- << " MakeRefCounted.";
+ // This RefCounted object is created with non-zero reference count.
+ // The first reference to such a object has to be made by AdoptRef or
+ // MakeRefCounted.
+ DCHECK(!needs_adopt_ref_);
if (ref_count_ >= 1) {
DCHECK(CalledOnValidSequence());
}
@@ -68,12 +74,7 @@
// Returns true if the object should self-delete.
bool Release() const {
- --ref_count_;
-
- // TODO(maruel): Add back once it doesn't assert 500 times/sec.
- // Current thread books the critical section "AddRelease"
- // without release it.
- // DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
+ ReleaseImpl();
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
@@ -114,7 +115,7 @@
template <typename U>
friend scoped_refptr<U> base::AdoptRef(U*);
- FRIEND_TEST_ALL_PREFIXES(RefCountedDeathTest, TestOverflowCheck);
+ friend class RefCountedOverflowTest;
void Adopted() const {
#if DCHECK_IS_ON()
@@ -125,8 +126,10 @@
#if defined(ARCH_CPU_64_BITS)
void AddRefImpl() const;
+ void ReleaseImpl() const;
#else
void AddRefImpl() const { ++ref_count_; }
+ void ReleaseImpl() const { --ref_count_; }
#endif
#if DCHECK_IS_ON()
@@ -134,6 +137,8 @@
#endif
mutable uint32_t ref_count_ = 0;
+ static_assert(std::is_unsigned<decltype(ref_count_)>::value,
+ "ref_count_ must be an unsigned type.");
#if DCHECK_IS_ON()
mutable bool needs_adopt_ref_ = false;
@@ -142,12 +147,13 @@
#endif
DFAKE_MUTEX(add_release_);
-
- DISALLOW_COPY_AND_ASSIGN(RefCountedBase);
};
class BASE_EXPORT RefCountedThreadSafeBase {
public:
+ RefCountedThreadSafeBase(const RefCountedThreadSafeBase&) = delete;
+ RefCountedThreadSafeBase& operator=(const RefCountedThreadSafeBase&) = delete;
+
bool HasOneRef() const;
bool HasAtLeastOneRef() const;
@@ -167,9 +173,15 @@
#endif
// Release and AddRef are suitable for inlining on X86 because they generate
-// very small code sequences. On other platforms (ARM), it causes a size
-// regression and is probably not worth it.
-#if defined(ARCH_CPU_X86_FAMILY)
+// very small code sequences.
+//
+// ARM64 devices supporting ARMv8.1-A atomic instructions generate very little
+// code, e.g. fetch_add() with acquire ordering is a single instruction (ldadd),
+// vs LL/SC in previous ARM architectures. Inline it there as well.
+//
+// On other platforms (e.g. ARM), it causes a size regression and is probably
+// not worth it.
+#if defined(ARCH_CPU_X86_FAMILY) || defined(__ARM_FEATURE_ATOMICS)
// Returns true if the object should self-delete.
bool Release() const { return ReleaseImpl(); }
void AddRef() const { AddRefImpl(); }
@@ -185,6 +197,8 @@
template <typename U>
friend scoped_refptr<U> base::AdoptRef(U*);
+ friend class RefCountedOverflowTest;
+
void Adopted() const {
#if DCHECK_IS_ON()
DCHECK(needs_adopt_ref_);
@@ -195,23 +209,25 @@
ALWAYS_INLINE void AddRefImpl() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
- DCHECK(!needs_adopt_ref_)
- << "This RefCounted object is created with non-zero reference count."
- << " The first reference to such a object has to be made by AdoptRef or"
- << " MakeRefCounted.";
+ // This RefCounted object is created with non-zero reference count.
+ // The first reference to such a object has to be made by AdoptRef or
+ // MakeRefCounted.
+ DCHECK(!needs_adopt_ref_);
#endif
- ref_count_.Increment();
+ CHECK_NE(ref_count_.Increment(), std::numeric_limits<int>::max());
}
ALWAYS_INLINE void AddRefWithCheckImpl() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
- DCHECK(!needs_adopt_ref_)
- << "This RefCounted object is created with non-zero reference count."
- << " The first reference to such a object has to be made by AdoptRef or"
- << " MakeRefCounted.";
+ // This RefCounted object is created with non-zero reference count.
+ // The first reference to such a object has to be made by AdoptRef or
+ // MakeRefCounted.
+ DCHECK(!needs_adopt_ref_);
#endif
- CHECK(ref_count_.Increment() > 0);
+ int pre_increment_count = ref_count_.Increment();
+ CHECK_GT(pre_increment_count, 0);
+ CHECK_NE(pre_increment_count, std::numeric_limits<int>::max());
}
ALWAYS_INLINE bool ReleaseImpl() const {
@@ -233,8 +249,6 @@
mutable bool needs_adopt_ref_ = false;
mutable bool in_dtor_ = false;
#endif
-
- DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
};
} // namespace subtle
@@ -272,9 +286,11 @@
// ~MyFoo();
// };
//
-// You should always make your destructor non-public, to avoid any code deleting
-// the object accidently while there are references to it.
-//
+// Usage Notes:
+// 1. You should always make your destructor non-public, to avoid any code
+// deleting the object accidentally while there are references to it.
+// 2. You should always make the ref-counted base class a friend of your class,
+// so that it can access the destructor.
//
// The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs
// to trap unsafe cross thread usage. A subclass instance of RefCounted can be
@@ -304,9 +320,8 @@
// And start-from-one ref count is a step to merge WTF::RefCounted into
// base::RefCounted.
//
-#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() \
- static constexpr ::base::subtle::StartRefCountFromOneTag \
- kRefCountPreference = ::base::subtle::kStartRefCountFromOneTag
+#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() \
+ using RefCountPreferenceTag = ::base::subtle::StartRefCountFromOneTag
template <class T, typename Traits>
class RefCounted;
@@ -321,10 +336,12 @@
template <class T, typename Traits = DefaultRefCountedTraits<T>>
class RefCounted : public subtle::RefCountedBase {
public:
- static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
- subtle::kStartRefCountFromZeroTag;
+ using RefCountPreferenceTag = subtle::StartRefCountFromZeroTag;
- RefCounted() : subtle::RefCountedBase(T::kRefCountPreference) {}
+ RefCounted() : subtle::RefCountedBase(subtle::GetRefCountPreference<T>()) {}
+
+ RefCounted(const RefCounted&) = delete;
+ RefCounted& operator=(const RefCounted&) = delete;
void AddRef() const {
subtle::RefCountedBase::AddRef();
@@ -350,8 +367,6 @@
static void DeleteInternal(const U* x) {
delete x;
}
-
- DISALLOW_COPY_AND_ASSIGN(RefCounted);
};
// Forward declaration.
@@ -388,13 +403,15 @@
template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
public:
- static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
- subtle::kStartRefCountFromZeroTag;
+ using RefCountPreferenceTag = subtle::StartRefCountFromZeroTag;
explicit RefCountedThreadSafe()
- : subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
+ : subtle::RefCountedThreadSafeBase(subtle::GetRefCountPreference<T>()) {}
- void AddRef() const { AddRefImpl(T::kRefCountPreference); }
+ RefCountedThreadSafe(const RefCountedThreadSafe&) = delete;
+ RefCountedThreadSafe& operator=(const RefCountedThreadSafe&) = delete;
+
+ void AddRef() const { AddRefImpl(subtle::GetRefCountPreference<T>()); }
void Release() const {
if (subtle::RefCountedThreadSafeBase::Release()) {
@@ -420,8 +437,6 @@
void AddRefImpl(subtle::StartRefCountFromOneTag) const {
subtle::RefCountedThreadSafeBase::AddRefWithCheck();
}
-
- DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafe);
};
//
@@ -435,6 +450,9 @@
RefCountedData() : data() {}
RefCountedData(const T& in_value) : data(in_value) {}
RefCountedData(T&& in_value) : data(std::move(in_value)) {}
+ template <typename... Args>
+ explicit RefCountedData(absl::in_place_t, Args&&... args)
+ : data(std::forward<Args>(args)...) {}
T data;
@@ -443,6 +461,16 @@
~RefCountedData() = default;
};
+template <typename T>
+bool operator==(const RefCountedData<T>& lhs, const RefCountedData<T>& rhs) {
+ return lhs.data == rhs.data;
+}
+
+template <typename T>
+bool operator!=(const RefCountedData<T>& lhs, const RefCountedData<T>& rhs) {
+ return !(lhs == rhs);
+}
+
} // namespace base
#endif // BASE_MEMORY_REF_COUNTED_H_
diff --git a/base/memory/ref_counted_delete_on_sequence.h b/base/memory/ref_counted_delete_on_sequence.h
index 4a8ac74..94c4266 100644
--- a/base/memory/ref_counted_delete_on_sequence.h
+++ b/base/memory/ref_counted_delete_on_sequence.h
@@ -1,4 +1,4 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,11 +7,10 @@
#include <utility>
+#include "base/check.h"
#include "base/location.h"
-#include "base/logging.h"
-#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/sequenced_task_runner.h"
+#include "base/task/sequenced_task_runner.h"
namespace base {
@@ -33,19 +32,22 @@
template <class T>
class RefCountedDeleteOnSequence : public subtle::RefCountedThreadSafeBase {
public:
- static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
- subtle::kStartRefCountFromZeroTag;
+ using RefCountPreferenceTag = subtle::StartRefCountFromZeroTag;
// A SequencedTaskRunner for the current sequence can be acquired by calling
- // SequencedTaskRunnerHandle::Get().
- RefCountedDeleteOnSequence(
+ // SequencedTaskRunner::GetCurrentDefault().
+ explicit RefCountedDeleteOnSequence(
scoped_refptr<SequencedTaskRunner> owning_task_runner)
- : subtle::RefCountedThreadSafeBase(T::kRefCountPreference),
+ : subtle::RefCountedThreadSafeBase(subtle::GetRefCountPreference<T>()),
owning_task_runner_(std::move(owning_task_runner)) {
DCHECK(owning_task_runner_);
}
- void AddRef() const { AddRefImpl(T::kRefCountPreference); }
+ RefCountedDeleteOnSequence(const RefCountedDeleteOnSequence&) = delete;
+ RefCountedDeleteOnSequence& operator=(const RefCountedDeleteOnSequence&) =
+ delete;
+
+ void AddRef() const { AddRefImpl(subtle::GetRefCountPreference<T>()); }
void Release() const {
if (subtle::RefCountedThreadSafeBase::Release())
@@ -81,8 +83,6 @@
}
const scoped_refptr<SequencedTaskRunner> owning_task_runner_;
-
- DISALLOW_COPY_AND_ASSIGN(RefCountedDeleteOnSequence);
};
} // namespace base
diff --git a/base/memory/ref_counted_memory.cc b/base/memory/ref_counted_memory.cc
index c3058a1..b43e509 100644
--- a/base/memory/ref_counted_memory.cc
+++ b/base/memory/ref_counted_memory.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,16 +6,15 @@
#include <utility>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/memory/read_only_shared_memory_region.h"
-#include "starboard/memory.h"
namespace base {
bool RefCountedMemory::Equals(
const scoped_refptr<RefCountedMemory>& other) const {
return other.get() && size() == other->size() &&
- (memcmp(front(), other->front(), size()) == 0);
+ (size() == 0 || (memcmp(front(), other->front(), size()) == 0));
}
RefCountedMemory::RefCountedMemory() = default;
@@ -35,8 +34,10 @@
RefCountedBytes::RefCountedBytes() = default;
RefCountedBytes::RefCountedBytes(const std::vector<unsigned char>& initializer)
- : data_(initializer) {
-}
+ : data_(initializer) {}
+
+RefCountedBytes::RefCountedBytes(base::span<const unsigned char> initializer)
+ : data_(initializer.begin(), initializer.end()) {}
RefCountedBytes::RefCountedBytes(const unsigned char* p, size_t size)
: data_(p, p + size) {}
@@ -66,13 +67,7 @@
RefCountedString::~RefCountedString() = default;
-// static
-scoped_refptr<RefCountedString> RefCountedString::TakeString(
- std::string* to_destroy) {
- auto self = MakeRefCounted<RefCountedString>();
- to_destroy->swap(self->data_);
- return self;
-}
+RefCountedString::RefCountedString(std::string str) : data_(std::move(str)) {}
const unsigned char* RefCountedString::front() const {
return data_.empty() ? nullptr
@@ -83,26 +78,19 @@
return data_.size();
}
-// Cobalt does not support multiple process and shared memory.
-#if !defined(STARBOARD)
-RefCountedSharedMemory::RefCountedSharedMemory(
- std::unique_ptr<SharedMemory> shm,
- size_t size)
- : shm_(std::move(shm)), size_(size) {
- DCHECK(shm_);
- DCHECK(shm_->memory());
- DCHECK_GT(size_, 0U);
- DCHECK_LE(size_, shm_->mapped_size());
+RefCountedString16::RefCountedString16() = default;
+
+RefCountedString16::~RefCountedString16() = default;
+
+RefCountedString16::RefCountedString16(std::u16string str)
+ : data_(std::move(str)) {}
+
+const unsigned char* RefCountedString16::front() const {
+ return reinterpret_cast<const unsigned char*>(data_.data());
}
-RefCountedSharedMemory::~RefCountedSharedMemory() = default;
-
-const unsigned char* RefCountedSharedMemory::front() const {
- return static_cast<const unsigned char*>(shm_->memory());
-}
-
-size_t RefCountedSharedMemory::size() const {
- return size_;
+size_t RefCountedString16::size() const {
+ return data_.size() * sizeof(char16_t);
}
RefCountedSharedMemoryMapping::RefCountedSharedMemoryMapping(
@@ -130,6 +118,5 @@
return nullptr;
return MakeRefCounted<RefCountedSharedMemoryMapping>(std::move(mapping));
}
-#endif // !defined(STARBOARD)
} // namespace base
diff --git a/base/memory/ref_counted_memory.h b/base/memory/ref_counted_memory.h
index dc09d0c..0621967 100644
--- a/base/memory/ref_counted_memory.h
+++ b/base/memory/ref_counted_memory.h
@@ -1,27 +1,24 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_REF_COUNTED_MEMORY_H_
#define BASE_MEMORY_REF_COUNTED_MEMORY_H_
+#include <stddef.h>
+
#include <memory>
#include <string>
#include <vector>
#include "base/base_export.h"
-#include "base/macros.h"
+#include "base/containers/span.h"
#include "base/memory/ref_counted.h"
-#include "base/memory/shared_memory.h"
#include "base/memory/shared_memory_mapping.h"
-#include "starboard/types.h"
namespace base {
-// Cobalt does not support multiple process and shared memory.
-#if !defined(STARBOARD)
class ReadOnlySharedMemoryRegion;
-#endif
// A generic interface to memory. This object is reference counted because most
// of its subclasses own the data they carry, and this interface needs to
@@ -44,6 +41,10 @@
return reinterpret_cast<const T*>(front());
}
+ // Alias for front() to make it possible for RefCountedMemory to implicitly
+ // convert to span.
+ const unsigned char* data() const { return front(); }
+
protected:
friend class RefCountedThreadSafe<RefCountedMemory>;
RefCountedMemory();
@@ -59,6 +60,9 @@
: data_(static_cast<const unsigned char*>(length ? data : nullptr)),
length_(length) {}
+ RefCountedStaticMemory(const RefCountedStaticMemory&) = delete;
+ RefCountedStaticMemory& operator=(const RefCountedStaticMemory&) = delete;
+
// RefCountedMemory:
const unsigned char* front() const override;
size_t size() const override;
@@ -68,8 +72,6 @@
const unsigned char* data_;
size_t length_;
-
- DISALLOW_COPY_AND_ASSIGN(RefCountedStaticMemory);
};
// An implementation of RefCountedMemory, where the data is stored in a STL
@@ -80,6 +82,7 @@
// Constructs a RefCountedBytes object by copying from |initializer|.
explicit RefCountedBytes(const std::vector<unsigned char>& initializer);
+ explicit RefCountedBytes(base::span<const unsigned char> initializer);
// Constructs a RefCountedBytes object by copying |size| bytes from |p|.
RefCountedBytes(const unsigned char* p, size_t size);
@@ -88,6 +91,9 @@
// |size| bytes.
explicit RefCountedBytes(size_t size);
+ RefCountedBytes(const RefCountedBytes&) = delete;
+ RefCountedBytes& operator=(const RefCountedBytes&) = delete;
+
// Constructs a RefCountedBytes object by performing a swap. (To non
// destructively build a RefCountedBytes, use the constructor that takes a
// vector.)
@@ -113,8 +119,6 @@
~RefCountedBytes() override;
std::vector<unsigned char> data_;
-
- DISALLOW_COPY_AND_ASSIGN(RefCountedBytes);
};
// An implementation of RefCountedMemory, where the bytes are stored in a STL
@@ -122,11 +126,10 @@
class BASE_EXPORT RefCountedString : public RefCountedMemory {
public:
RefCountedString();
+ explicit RefCountedString(std::string value);
- // Constructs a RefCountedString object by performing a swap. (To non
- // destructively build a RefCountedString, use the default constructor and
- // copy into object->data()).
- static scoped_refptr<RefCountedString> TakeString(std::string* to_destroy);
+ RefCountedString(const RefCountedString&) = delete;
+ RefCountedString& operator=(const RefCountedString&) = delete;
// RefCountedMemory:
const unsigned char* front() const override;
@@ -139,35 +142,29 @@
~RefCountedString() override;
std::string data_;
-
- DISALLOW_COPY_AND_ASSIGN(RefCountedString);
};
-// Starboard doesn't curretly support multiple processes or shared memory.
-#if !defined(STARBOARD)
-// An implementation of RefCountedMemory, where the bytes are stored in
-// SharedMemory.
-class BASE_EXPORT RefCountedSharedMemory : public RefCountedMemory {
+// An implementation of RefCountedMemory, where the bytes are stored in a
+// std::u16string.
+class BASE_EXPORT RefCountedString16 : public base::RefCountedMemory {
public:
- // Constructs a RefCountedMemory object by taking ownership of an already
- // mapped SharedMemory object.
- RefCountedSharedMemory(std::unique_ptr<SharedMemory> shm, size_t size);
+ RefCountedString16();
+ explicit RefCountedString16(std::u16string value);
+
+ RefCountedString16(const RefCountedString16&) = delete;
+ RefCountedString16& operator=(const RefCountedString16&) = delete;
// RefCountedMemory:
const unsigned char* front() const override;
size_t size() const override;
+ protected:
+ ~RefCountedString16() override;
+
private:
- ~RefCountedSharedMemory() override;
-
- const std::unique_ptr<SharedMemory> shm_;
- const size_t size_;
-
- DISALLOW_COPY_AND_ASSIGN(RefCountedSharedMemory);
+ std::u16string data_;
};
-// Cobalt does not support multiple process and shared memory.
-#if !defined(STARBOARD)
// An implementation of RefCountedMemory, where the bytes are stored in
// ReadOnlySharedMemoryMapping.
class BASE_EXPORT RefCountedSharedMemoryMapping : public RefCountedMemory {
@@ -176,6 +173,10 @@
// mapped ReadOnlySharedMemoryMapping object.
explicit RefCountedSharedMemoryMapping(ReadOnlySharedMemoryMapping mapping);
+ RefCountedSharedMemoryMapping(const RefCountedSharedMemoryMapping&) = delete;
+ RefCountedSharedMemoryMapping& operator=(
+ const RefCountedSharedMemoryMapping&) = delete;
+
// Convenience method to map all of |region| and take ownership of the
// mapping. Returns an empty scoped_refptr if the map operation fails.
static scoped_refptr<RefCountedSharedMemoryMapping> CreateFromWholeRegion(
@@ -190,11 +191,7 @@
const ReadOnlySharedMemoryMapping mapping_;
const size_t size_;
-
- DISALLOW_COPY_AND_ASSIGN(RefCountedSharedMemoryMapping);
};
-#endif // !defined(STARBOARD)
-#endif
} // namespace base
diff --git a/base/memory/ref_counted_memory_unittest.cc b/base/memory/ref_counted_memory_unittest.cc
index 62dc44a..f347249 100644
--- a/base/memory/ref_counted_memory_unittest.cc
+++ b/base/memory/ref_counted_memory_unittest.cc
@@ -1,14 +1,15 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright 2011 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/ref_counted_memory.h"
+#include <stdint.h>
+
#include <utility>
+#include "base/containers/span.h"
#include "base/memory/read_only_shared_memory_region.h"
-#include "starboard/memory.h"
-#include "starboard/types.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -17,11 +18,22 @@
namespace base {
+namespace {
+
+void ConvertToByteSpanAndCheckSize(span<const uint8_t> data,
+ size_t expected_size) {
+ EXPECT_EQ(expected_size, data.size());
+}
+
+} // namespace
+
TEST(RefCountedMemoryUnitTest, RefCountedStaticMemory) {
auto mem = MakeRefCounted<RefCountedStaticMemory>("static mem00", 10);
EXPECT_EQ(10U, mem->size());
EXPECT_EQ("static mem", std::string(mem->front_as<char>(), mem->size()));
+
+ ConvertToByteSpanAndCheckSize(*mem, 10);
}
TEST(RefCountedMemoryUnitTest, RefCountedBytes) {
@@ -39,16 +51,18 @@
scoped_refptr<RefCountedMemory> mem2;
{
const unsigned char kData[] = {12, 11, 99};
- mem2 = MakeRefCounted<RefCountedBytes>(kData, arraysize(kData));
+ mem2 = MakeRefCounted<RefCountedBytes>(kData, std::size(kData));
}
ASSERT_EQ(3U, mem2->size());
EXPECT_EQ(12U, mem2->front()[0]);
EXPECT_EQ(11U, mem2->front()[1]);
EXPECT_EQ(99U, mem2->front()[2]);
+
+ ConvertToByteSpanAndCheckSize(*mem2, 3);
}
TEST(RefCountedMemoryUnitTest, RefCountedBytesMutable) {
- auto mem = base::MakeRefCounted<RefCountedBytes>(10);
+ auto mem = MakeRefCounted<RefCountedBytes>(10);
ASSERT_EQ(10U, mem->size());
EXPECT_THAT(mem->data(), Each(0U));
@@ -62,69 +76,20 @@
}
TEST(RefCountedMemoryUnitTest, RefCountedString) {
- std::string s("destroy me");
- scoped_refptr<RefCountedMemory> mem = RefCountedString::TakeString(&s);
-
- EXPECT_EQ(0U, s.size());
+ scoped_refptr<RefCountedMemory> mem =
+ base::MakeRefCounted<base::RefCountedString>(std::string("destroy me"));
ASSERT_EQ(10U, mem->size());
EXPECT_EQ('d', mem->front()[0]);
EXPECT_EQ('e', mem->front()[1]);
EXPECT_EQ('e', mem->front()[9]);
+
+ ConvertToByteSpanAndCheckSize(*mem, 10);
}
-#if !defined(STARBOARD)
-TEST(RefCountedMemoryUnitTest, RefCountedSharedMemory) {
- static const char kData[] = "shm_dummy_data";
- auto shm = std::make_unique<SharedMemory>();
- ASSERT_TRUE(shm->CreateAndMapAnonymous(sizeof(kData)));
- memcpy(shm->memory(), kData, sizeof(kData));
-
- auto mem =
- MakeRefCounted<RefCountedSharedMemory>(std::move(shm), sizeof(kData));
- ASSERT_EQ(sizeof(kData), mem->size());
- EXPECT_EQ('s', mem->front()[0]);
- EXPECT_EQ('h', mem->front()[1]);
- EXPECT_EQ('_', mem->front()[9]);
-}
-
-TEST(RefCountedMemoryUnitTest, RefCountedSharedMemoryMapping) {
- static const char kData[] = "mem_region_dummy_data";
- scoped_refptr<RefCountedSharedMemoryMapping> mem;
- {
- MappedReadOnlyRegion region =
- ReadOnlySharedMemoryRegion::Create(sizeof(kData));
- ReadOnlySharedMemoryMapping ro_mapping = region.region.Map();
- WritableSharedMemoryMapping rw_mapping = std::move(region.mapping);
- ASSERT_TRUE(rw_mapping.IsValid());
- memcpy(rw_mapping.memory(), kData, sizeof(kData));
- mem = MakeRefCounted<RefCountedSharedMemoryMapping>(std::move(ro_mapping));
- }
-
- ASSERT_LE(sizeof(kData), mem->size());
- EXPECT_EQ('e', mem->front()[1]);
- EXPECT_EQ('m', mem->front()[2]);
- EXPECT_EQ('o', mem->front()[8]);
-
- {
- MappedReadOnlyRegion region =
- ReadOnlySharedMemoryRegion::Create(sizeof(kData));
- WritableSharedMemoryMapping rw_mapping = std::move(region.mapping);
- ASSERT_TRUE(rw_mapping.IsValid());
- memcpy(rw_mapping.memory(), kData, sizeof(kData));
- mem = RefCountedSharedMemoryMapping::CreateFromWholeRegion(region.region);
- }
-
- ASSERT_LE(sizeof(kData), mem->size());
- EXPECT_EQ('_', mem->front()[3]);
- EXPECT_EQ('r', mem->front()[4]);
- EXPECT_EQ('i', mem->front()[7]);
-}
-#endif
-
TEST(RefCountedMemoryUnitTest, Equals) {
- std::string s1("same");
- scoped_refptr<RefCountedMemory> mem1 = RefCountedString::TakeString(&s1);
+ scoped_refptr<RefCountedMemory> mem1 =
+ base::MakeRefCounted<base::RefCountedString>(std::string("same"));
std::vector<unsigned char> d2 = {'s', 'a', 'm', 'e'};
scoped_refptr<RefCountedMemory> mem2 = RefCountedBytes::TakeVector(&d2);
@@ -132,7 +97,8 @@
EXPECT_TRUE(mem1->Equals(mem2));
std::string s3("diff");
- scoped_refptr<RefCountedMemory> mem3 = RefCountedString::TakeString(&s3);
+ scoped_refptr<RefCountedMemory> mem3 =
+ base::MakeRefCounted<base::RefCountedString>(std::move(s3));
EXPECT_FALSE(mem1->Equals(mem3));
EXPECT_FALSE(mem2->Equals(mem3));
@@ -140,7 +106,8 @@
TEST(RefCountedMemoryUnitTest, EqualsNull) {
std::string s("str");
- scoped_refptr<RefCountedMemory> mem = RefCountedString::TakeString(&s);
+ scoped_refptr<RefCountedMemory> mem =
+ base::MakeRefCounted<base::RefCountedString>(std::move(s));
EXPECT_FALSE(mem->Equals(nullptr));
}
diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc
index ff52d18..bde7610 100644
--- a/base/memory/ref_counted_unittest.cc
+++ b/base/memory/ref_counted_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,6 +7,7 @@
#include <type_traits>
#include <utility>
+#include "base/memory/raw_ptr.h"
#include "base/test/gtest_util.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -173,16 +174,7 @@
private:
friend class base::RefCounted<CheckRefptrNull>;
- scoped_refptr<CheckRefptrNull>* ptr_ = nullptr;
-};
-
-class Overflow : public base::RefCounted<Overflow> {
- public:
- Overflow() = default;
-
- private:
- friend class base::RefCounted<Overflow>;
- ~Overflow() = default;
+ raw_ptr<scoped_refptr<CheckRefptrNull>> ptr_ = nullptr;
};
} // namespace
@@ -655,6 +647,15 @@
EXPECT_EQ(obj.get(), nullptr);
}
+TEST(RefCountedUnitTest, TestResetByNullptrAssignment) {
+ // Check that assigning nullptr resets the object.
+ auto obj = base::MakeRefCounted<ScopedRefPtrCountBase>();
+ EXPECT_NE(obj.get(), nullptr);
+
+ obj = nullptr;
+ EXPECT_EQ(obj.get(), nullptr);
+}
+
TEST(RefCountedUnitTest, CheckScopedRefptrNullBeforeObjectDestruction) {
scoped_refptr<CheckRefptrNull> obj = base::MakeRefCounted<CheckRefptrNull>();
obj->set_scoped_refptr(&obj);
@@ -683,12 +684,88 @@
}
#if defined(ARCH_CPU_64_BITS)
-TEST(RefCountedDeathTest, TestOverflowCheck) {
- EXPECT_DCHECK_DEATH({
- auto p = base::MakeRefCounted<Overflow>();
- p->ref_count_ = std::numeric_limits<uint32_t>::max();
- p->AddRef();
- });
+class RefCountedOverflowTest : public ::testing::Test {
+ public:
+ static uint32_t& GetMutableRefCount(RefCountedBase* ref_counted) {
+ return ref_counted->ref_count_;
+ }
+
+ static std::atomic_int& GetMutableRefCount(
+ RefCountedThreadSafeBase* ref_counted) {
+ return ref_counted->ref_count_.ref_count_;
+ }
+};
+
+TEST_F(RefCountedOverflowTest, NonThreadSafeStartFromZero) {
+ class Overflow : public base::RefCounted<Overflow> {
+ public:
+ Overflow() { EXPECT_FALSE(HasOneRef()); }
+
+ private:
+ friend class base::RefCounted<Overflow>;
+ ~Overflow() = default;
+ };
+
+ auto p = base::MakeRefCounted<Overflow>();
+ GetMutableRefCount(p.get()) = std::numeric_limits<uint32_t>::max();
+ EXPECT_CHECK_DEATH(p->AddRef());
+ // Ensure `p` doesn't leak and fail lsan builds.
+ GetMutableRefCount(p.get()) = 1;
+}
+
+TEST_F(RefCountedOverflowTest, NonThreadSafeStartFromOne) {
+ class Overflow : public base::RefCounted<Overflow> {
+ public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
+ Overflow() { EXPECT_TRUE(HasOneRef()); }
+
+ private:
+ friend class base::RefCounted<Overflow>;
+ ~Overflow() = default;
+ };
+
+ auto p = base::MakeRefCounted<Overflow>();
+ GetMutableRefCount(p.get()) = std::numeric_limits<uint32_t>::max();
+ EXPECT_CHECK_DEATH(p->AddRef());
+ // Ensure `p` doesn't leak and fail lsan builds.
+ GetMutableRefCount(p.get()) = 1;
+}
+
+TEST_F(RefCountedOverflowTest, ThreadSafeStartFromZero) {
+ class Overflow : public base::RefCountedThreadSafe<Overflow> {
+ public:
+ Overflow() { EXPECT_FALSE(HasOneRef()); }
+
+ private:
+ friend class base::RefCountedThreadSafe<Overflow>;
+ ~Overflow() = default;
+ };
+
+ auto p = base::MakeRefCounted<Overflow>();
+ GetMutableRefCount(p.get()) = std::numeric_limits<int>::max();
+ EXPECT_CHECK_DEATH(p->AddRef());
+ // Ensure `p` doesn't leak and fail lsan builds.
+ GetMutableRefCount(p.get()) = 1;
+}
+
+TEST_F(RefCountedOverflowTest, ThreadSafeStartFromOne) {
+ class Overflow : public base::RefCountedThreadSafe<Overflow> {
+ public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+
+ Overflow() { EXPECT_TRUE(HasOneRef()); }
+
+ private:
+ friend class base::RefCountedThreadSafe<Overflow>;
+ ~Overflow() = default;
+ };
+
+ auto p = base::MakeRefCounted<Overflow>();
+ GetMutableRefCount(p.get()) = std::numeric_limits<int>::max();
+ EXPECT_CHECK_DEATH(p->AddRef());
+ // Ensure `p` doesn't leak and fail lsan builds.
+ GetMutableRefCount(p.get()) = 1;
}
#endif
diff --git a/base/memory/ref_counted_unittest.nc b/base/memory/ref_counted_unittest.nc
index b8c371f..d5aa4fa 100644
--- a/base/memory/ref_counted_unittest.nc
+++ b/base/memory/ref_counted_unittest.nc
@@ -1,8 +1,9 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/ref_counted.h"
+#include "base/memory/ref_counted_delete_on_sequence.h"
namespace base {
@@ -14,10 +15,7 @@
~InitialRefCountIsZero() {}
};
-// TODO(hans): Remove .* and update the static_assert expectations once we roll
-// past Clang r313315. https://crbug.com/765692.
-
-#if defined(NCTEST_ADOPT_REF_TO_ZERO_START) // [r"fatal error: static_assert failed .*\"Use AdoptRef only for the reference count starts from one\.\""]
+#if defined(NCTEST_ADOPT_REF_TO_ZERO_START) // [r"fatal error: static assertion failed due to requirement 'std::is_same<base::subtle::StartRefCountFromOneTag, base::subtle::StartRefCountFromZeroTag>::value': Use AdoptRef only if the reference count starts from one\."]
void WontCompile() {
AdoptRef(new InitialRefCountIsZero());
@@ -25,4 +23,125 @@
#endif
+#if defined(NCTEST_WRONG_REFCOUNT_BASE_CLASS) // [r"fatal error: static assertion failed due to requirement 'std::is_base_of_v<base::Foo, base::Bar>': T implements RefCounted<U>, but U is not a base of T\."]
+
+class Foo : public base::RefCounted<Foo> {
+ private:
+ friend class base::RefCounted<Foo>;
+ ~Foo() {}
+};
+
+class Bar : public base::RefCounted<Foo> {
+ private:
+ friend class base::RefCounted<Bar>;
+ ~Bar() {}
+};
+
+void WontCompile() {
+ scoped_refptr<Bar> ptr;
+}
+
+#endif
+
+#if defined(NCTEST_WRONG_REFCOUNT_THREADSAFE_BASE_CLASS) // [r"fatal error: static assertion failed due to requirement 'std::is_base_of_v<base::Foo, base::Bar>': T implements RefCountedThreadSafe<U>, but U is not a base of T\."]
+
+class Foo : public base::RefCountedThreadSafe<Foo> {
+ private:
+ friend class base::RefCountedThreadSafe<Foo>;
+ ~Foo() {}
+};
+
+class Bar : public base::RefCountedThreadSafe<Foo> {
+ private:
+ friend class base::RefCountedThreadSafe<Bar>;
+ ~Bar() {}
+};
+
+void WontCompile() {
+ scoped_refptr<Bar> ptr;
+}
+
+#endif
+
+#if defined(NCTEST_WRONG_REFCOUNT_ON_SEQUENCE_BASE_CLASS) // [r"fatal error: static assertion failed due to requirement 'std::is_base_of_v<base::Foo, base::Bar>': T implements RefCountedDeleteOnSequence<U>, but U is not a base of T\."]
+
+class Foo : public base::RefCountedDeleteOnSequence<Foo> {
+ private:
+ friend class base::RefCountedDeleteOnSequence<Foo>;
+ friend class base::DeleteHelper<Foo>;
+ ~Foo() {}
+};
+
+class Bar : public base::RefCountedDeleteOnSequence<Foo> {
+ private:
+ friend class base::RefCountedDeleteOnSequence<Bar>;
+ friend class base::DeleteHelper<Bar>;
+ ~Bar() {}
+};
+
+void WontCompile() {
+ scoped_refptr<Bar> ptr;
+}
+
+#endif
+
+#if defined(NCTEST_SUBCLASS_OVERRIDES_REFCOUNT_PREFERENCE) // [r"fatal error: static assertion failed due to requirement .*: It's unsafe to override the ref count preference\. Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE from subclasses\."]
+
+class Base : public base::RefCounted<Base> {
+ protected:
+ friend class base::RefCounted<Base>;
+ ~Base() {}
+};
+
+class Derived : public Base {
+ public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+};
+
+void WontCompile() {
+ scoped_refptr<Derived> ptr;
+}
+
+#endif
+
+#if defined(NCTEST_SUBCLASS_OVERRIDES_REFCOUNT_PREFERENCE_THREADSAFE) // [r"fatal error: static assertion failed due to requirement .*: It's unsafe to override the ref count preference\. Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE from subclasses\."]
+
+class Base : public base::RefCountedThreadSafe<Base> {
+ protected:
+ friend class base::RefCountedThreadSafe<Base>;
+ ~Base() {}
+};
+
+class Derived : public Base {
+ public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+};
+
+void WontCompile() {
+ scoped_refptr<Derived> ptr;
+}
+
+#endif
+
+#if defined(NCTEST_SUBCLASS_OVERRIDES_REFCOUNT_PREFERENCE_SEQUENCE) // [r"fatal error: static assertion failed due to requirement .*: It's unsafe to override the ref count preference\. Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE from subclasses\."]
+
+class Base : public base::RefCountedDeleteOnSequence<Base> {
+ protected:
+ friend class base::RefCountedDeleteOnSequence<Base>;
+ friend class base::DeleteHelper<Base>;
+ ~Base() {}
+};
+
+class Derived : public Base {
+ public:
+ REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE();
+};
+
+void WontCompile() {
+ scoped_refptr<Derived> ptr;
+}
+
+#endif
+
+
} // namespace base
diff --git a/base/memory/rust_cfg_win_unittest.cc b/base/memory/rust_cfg_win_unittest.cc
new file mode 100644
index 0000000..020fde1
--- /dev/null
+++ b/base/memory/rust_cfg_win_unittest.cc
@@ -0,0 +1,31 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+#include "base/command_line.h"
+#include "base/files/file_path.h"
+#include "base/process/launch.h"
+#include "base/process/process.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+// Run a Rust executable that attempts to call a function through an invalid
+// pointer. This triggers a control flow guard exception and exits the process
+// with STATUS_STACK_BUFFER_OVERRUN.
+TEST(RustCfgWin, CfgCatchesInvalidIndirectCall) {
+ base::LaunchOptions o;
+ o.start_hidden = true;
+ // From //build/rust/tests/test_control_flow_guard.
+ base::CommandLine cmd(base::FilePath(
+ FILE_PATH_LITERAL("test_control_flow_guard.exe")));
+ base::Process proc = base::LaunchProcess(cmd, o);
+ int exit_code;
+ EXPECT_TRUE(proc.WaitForExit(&exit_code));
+ const auto u_exit_code = static_cast<unsigned long>(exit_code);
+ EXPECT_EQ(u_exit_code, STATUS_STACK_BUFFER_OVERRUN);
+}
+
+} // namespace
diff --git a/base/memory/safe_ref.h b/base/memory/safe_ref.h
new file mode 100644
index 0000000..258b5b0
--- /dev/null
+++ b/base/memory/safe_ref.h
@@ -0,0 +1,165 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SAFE_REF_H_
+#define BASE_MEMORY_SAFE_REF_H_
+
+#include "base/check.h"
+#include "base/memory/weak_ptr.h"
+
+#include <utility>
+
+namespace base {
+
+// SafeRef smart pointers are used to represent a non-owning pointer to an
+// object, where the pointer is always intended to be valid. These are useful in
+// the same cases that a raw pointer `T*` (or a `T&`) would traditionally be
+// used, as the owner of the SafeRef knows the lifetime of the pointed-to object
+// from other means and will not use the pointer after the pointed-to object is
+// destroyed. However, unlike a `T*` or `T&`, a logic bug will manifest as a
+// benign crash instead of as a Use-after-Free.
+//
+// SafeRef pointers can not be null (as expressed by the "Ref" suffix instead of
+// "Ptr"). A SafeRef can be wrapped in an absl::optional if it should not always
+// point to something valid. (A SafePtr sibling type can be introduced if this
+// is problematic, or if consuming moves are needed!)
+//
+// If code wants to track the lifetime of the object directly through its
+// pointer, and dynamically handle the case of the pointer outliving the object
+// it points to, then base::WeakPtr should be used instead.
+//
+// The SafeRef pointer is constructed from a base::WeakPtrFactory's GetSafeRef()
+// method. Since it is tied to the base::WeakPtrFactory, it will consider its
+// pointee invalid when the base::WeakPtrFactory is invalidated, in the same way
+// as base::WeakPtr does, including after a call to InvalidateWeakPtrs().
+//
+// THREAD SAFETY: SafeRef pointers (like base::WeakPtr) may only be used on the
+// sequence (or thread) where the associated base::WeakPtrFactory will be
+// invalidated and/or destroyed. They are safe to passively hold or to destroy
+// on any thread though.
+//
+// This class is expected to one day be replaced by a more flexible and safe
+// smart pointer abstraction which is not tied to base::WeakPtrFactory, such as
+// raw_ptr<T> from the MiraclePtr project (though perhaps a non-nullable raw_ref
+// equivalent).
+template <typename T>
+class SafeRef {
+ public:
+ // No default constructor, since there's no null state. Use an optional
+ // SafeRef if the pointer may not be present.
+
+ // Copy construction and assignment.
+ SafeRef(const SafeRef& other) : ref_(other.ref_), ptr_(other.ptr_) {
+ // Avoid use-after-move.
+ CHECK(ref_.IsValid());
+ }
+ SafeRef& operator=(const SafeRef& other) {
+ ref_ = other.ref_;
+ ptr_ = other.ptr_;
+ // Avoid use-after-move.
+ CHECK(ref_.IsValid());
+ return *this;
+ }
+
+ // Move construction and assignment.
+ SafeRef(SafeRef&& other)
+ : ref_(std::move(other.ref_)), ptr_(std::move(other.ptr_)) {
+ // Avoid use-after-move.
+ CHECK(ref_.IsValid());
+ }
+ SafeRef& operator=(SafeRef&& other) {
+ ref_ = std::move(other.ref_);
+ ptr_ = std::move(other.ptr_);
+ // Avoid use-after-move.
+ CHECK(ref_.IsValid());
+ return *this;
+ }
+
+ // Copy conversion from SafeRef<U>.
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ SafeRef(const SafeRef<U>& other)
+ : ref_(other.ref_),
+ ptr_(other.ptr_) // raw_ptr<U> converts to raw_ptr<T>.
+ {
+ // Avoid use-after-move.
+ CHECK(ref_.IsValid());
+ }
+ template <typename U>
+ SafeRef& operator=(const SafeRef<U>& other) {
+ ref_ = other.ref_;
+ ptr_ = other.ptr_; // raw_ptr<U> converts to raw_ptr<T>.
+ // Avoid use-after-move.
+ CHECK(ref_.IsValid());
+ return *this;
+ }
+
+ // Move conversion from SafeRef<U>.
+ template <typename U>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ SafeRef(SafeRef<U>&& other)
+ : ref_(std::move(other.ref_)),
+ ptr_(std::move(other.ptr_)) // raw_ptr<U> converts to raw_ptr<T>.
+ {
+ // Avoid use-after-move.
+ CHECK(ref_.IsValid());
+ }
+ template <typename U>
+ SafeRef& operator=(SafeRef<U>&& other) {
+ ref_ = std::move(other.ref_);
+ ptr_ = std::move(other.ptr_); // raw_ptr<U> converts to raw_ptr<T>.
+ // Avoid use-after-move.
+ CHECK(ref_.IsValid());
+ return *this;
+ }
+
+ // Provide access to the underlying T as a reference. Will CHECK() if the T
+ // pointee is no longer alive.
+ T& operator*() const {
+ CHECK(ref_.IsValid());
+ return *ptr_;
+ }
+
+ // Used to call methods on the underlying T. Will CHECK() if the T pointee is
+ // no longer alive.
+ T* operator->() const {
+ CHECK(ref_.IsValid());
+ return &*ptr_;
+ }
+
+ private:
+ template <typename U>
+ friend class SafeRef;
+ template <typename U>
+ friend SafeRef<U> internal::MakeSafeRefFromWeakPtrInternals(
+ internal::WeakReference&& ref,
+ U* ptr);
+
+ // Construction from a from a WeakPtr's internals. Will CHECK() if the WeakPtr
+ // is already invalid.
+ explicit SafeRef(internal::WeakReference&& ref, T* ptr)
+ : ref_(std::move(ref)), ptr_(ptr) {
+ CHECK(ref_.IsValid());
+ }
+
+ internal::WeakReference ref_;
+
+ // This pointer is only valid when ref_.is_valid() is true. Otherwise, its
+ // value is undefined (as opposed to nullptr). Unlike WeakPtr, this raw_ptr is
+ // not allowed to dangle.
+ raw_ptr<T> ptr_;
+};
+
+namespace internal {
+template <typename T>
+SafeRef<T> MakeSafeRefFromWeakPtrInternals(internal::WeakReference&& ref,
+ T* ptr) {
+ return SafeRef<T>(std::move(ref), ptr);
+}
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_MEMORY_SAFE_REF_H_
diff --git a/base/memory/safe_ref_unittest.cc b/base/memory/safe_ref_unittest.cc
new file mode 100644
index 0000000..e90301d
--- /dev/null
+++ b/base/memory/safe_ref_unittest.cc
@@ -0,0 +1,283 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/safe_ref.h"
+
+#include <utility>
+
+#include "base/functional/bind.h"
+#include "base/functional/callback.h"
+#include "base/memory/raw_ptr_exclusion.h"
+#include "base/memory/weak_ptr.h"
+#include "base/test/gtest_util.h"
+#include "base/test/memory/dangling_ptr_instrumentation.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+namespace base {
+namespace {
+
+struct ReallyBaseClass {};
+struct BaseClass : ReallyBaseClass {
+ virtual ~BaseClass() = default;
+ void VirtualMethod() {}
+};
+struct OtherBaseClass {
+ virtual ~OtherBaseClass() = default;
+ virtual void VirtualMethod() {}
+};
+
+struct WithWeak final : BaseClass, OtherBaseClass {
+ ~WithWeak() final { self = nullptr; }
+
+ void Method() {}
+
+ int i = 1;
+ raw_ptr<WithWeak> self{this};
+ base::WeakPtrFactory<WithWeak> factory{this};
+};
+
+TEST(SafeRefTest, FromWeakPtrFactory) {
+ WithWeak with;
+ SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+}
+
+TEST(SafeRefTest, Operators) {
+ WithWeak with;
+ SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+ // operator->.
+ EXPECT_EQ(safe->self->i, 1); // Will crash if not live.
+ // operator*.
+ EXPECT_EQ((*safe).self->i, 1); // Will crash if not live.
+}
+
+TEST(SafeRefTest, CanCopyAndMove) {
+ WithWeak with;
+ SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+ EXPECT_EQ(safe->self->i, 1); // Will crash if not live.
+ SafeRef<WithWeak> safe2 = safe; // Copy.
+ EXPECT_EQ(safe2->self->i, 1); // Will crash if not live.
+ EXPECT_EQ(safe->self->i, 1); // Will crash if not live.
+ SafeRef<WithWeak> safe3 = std::move(safe); // Move.
+ EXPECT_EQ(safe3->self->i, 1); // Will crash if not live.
+}
+
+TEST(SafeRefTest, AssignCopyAndMove) {
+ WithWeak with;
+ WithWeak with2;
+ WithWeak with3;
+
+ // Ensure `with`s outlive `safe`s
+ SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+ SafeRef<WithWeak> safe2(with2.factory.GetSafeRef());
+ EXPECT_NE(safe->self, &with2);
+ safe = safe2;
+ EXPECT_EQ(safe->self, &with2);
+
+ SafeRef<WithWeak> safe3(with3.factory.GetSafeRef());
+ EXPECT_NE(safe->self, &with3);
+ safe = std::move(safe3);
+ EXPECT_EQ(safe->self, &with3);
+}
+
+TEST(SafeRefDeathTest, ArrowOperatorCrashIfBadPointer) {
+ absl::optional<WithWeak> with(absl::in_place);
+ SafeRef<WithWeak> safe(with->factory.GetSafeRef());
+ with.reset();
+ EXPECT_CHECK_DEATH(safe.operator->()); // Will crash since not live.
+}
+
+TEST(SafeRefDeathTest, StarOperatorCrashIfBadPointer) {
+ absl::optional<WithWeak> with(absl::in_place);
+ SafeRef<WithWeak> safe(with->factory.GetSafeRef());
+ with.reset();
+ EXPECT_CHECK_DEATH(safe.operator*()); // Will crash since not live.
+}
+
+TEST(SafeRefTest, ConversionToBaseClassFromCopyConstruct) {
+ WithWeak with;
+ SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+ SafeRef<OtherBaseClass> base_safe = safe;
+ EXPECT_EQ(static_cast<WithWeak*>(&*base_safe), &with);
+}
+
+TEST(SafeRefTest, ConversionToBaseClassFromMoveConstruct) {
+ WithWeak with;
+ SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+ SafeRef<OtherBaseClass> base_safe = std::move(safe);
+ EXPECT_EQ(static_cast<WithWeak*>(&*base_safe), &with);
+}
+
+TEST(SafeRefTest, ConversionToBaseClassFromCopyAssign) {
+ WithWeak with;
+ SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+ SafeRef<OtherBaseClass> base_safe(with.factory.GetSafeRef());
+ base_safe = safe;
+ EXPECT_EQ(static_cast<WithWeak*>(&*base_safe), &with);
+}
+
+TEST(SafeRefTest, ConversionToBaseClassFromMoveAssign) {
+ WithWeak with;
+ SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+ SafeRef<OtherBaseClass> base_safe(with.factory.GetSafeRef());
+ base_safe = std::move(safe);
+ EXPECT_EQ(static_cast<WithWeak*>(&*base_safe), &with);
+}
+
+TEST(SafeRefTest, CanDerefConst) {
+ WithWeak with;
+ const SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+ EXPECT_EQ(safe->self->i, 1);
+ EXPECT_EQ((*safe).self->i, 1);
+}
+
+TEST(SafeRefTest, InvalidAfterMoveConstruction) {
+ WithWeak with;
+ SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+ SafeRef<WithWeak> safe2 = std::move(safe);
+ // Will crash if not live.
+ EXPECT_EQ(safe2->self->i, 1);
+ // `safe` was previously moved-from, so using it in any way should crash now.
+ { EXPECT_CHECK_DEATH(SafeRef<WithWeak> safe3(safe)); }
+ {
+ SafeRef<WithWeak> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = safe);
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<WithWeak> safe3(std::move(safe))); }
+ {
+ SafeRef<WithWeak> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = std::move(safe));
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<BaseClass> safe3(safe)); }
+ {
+ SafeRef<BaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = safe);
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<BaseClass> safe3(std::move(safe))); }
+ {
+ SafeRef<BaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = std::move(safe));
+ }
+ EXPECT_CHECK_DEATH((void)safe->self->i);
+}
+
+TEST(SafeRefTest, InvalidAfterMoveAssignment) {
+ WithWeak with;
+ SafeRef<WithWeak> safe(with.factory.GetSafeRef());
+ SafeRef<WithWeak> safe2(with.factory.GetSafeRef());
+ safe2 = std::move(safe);
+ // Will crash if not live.
+ EXPECT_EQ(safe2->self->i, 1);
+ // `safe` was previously moved-from, so using it in any way should crash now.
+ { EXPECT_CHECK_DEATH(SafeRef<WithWeak> safe3(safe)); }
+ {
+ SafeRef<WithWeak> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = safe);
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<WithWeak> safe3(std::move(safe))); }
+ {
+ SafeRef<WithWeak> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = std::move(safe));
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<BaseClass> safe3(safe)); }
+ {
+ SafeRef<BaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = safe);
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<BaseClass> safe3(std::move(safe))); }
+ {
+ SafeRef<BaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = std::move(safe));
+ }
+ EXPECT_CHECK_DEATH((void)safe->self->i);
+}
+
+TEST(SafeRefTest, InvalidAfterMoveConversionConstruction) {
+ WithWeak with;
+ SafeRef<BaseClass> safe(with.factory.GetSafeRef());
+ SafeRef<BaseClass> safe2 = std::move(safe);
+ // Will crash if not live.
+ EXPECT_EQ(static_cast<WithWeak*>(&*safe2)->self->i, 1);
+ // `safe` was previously moved-from, so using it in any way should crash now.
+ { EXPECT_CHECK_DEATH(SafeRef<BaseClass> safe3(safe)); }
+ {
+ SafeRef<BaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = safe);
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<BaseClass> safe3(std::move(safe))); }
+ {
+ SafeRef<BaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = std::move(safe));
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<ReallyBaseClass> safe3(safe)); }
+ {
+ SafeRef<ReallyBaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = safe);
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<ReallyBaseClass> safe3(std::move(safe))); }
+ {
+ SafeRef<ReallyBaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = std::move(safe));
+ }
+ EXPECT_CHECK_DEATH((void)static_cast<WithWeak*>(&*safe)->self->i);
+}
+
+TEST(SafeRefTest, InvalidAfterMoveConversionAssignment) {
+ WithWeak with;
+ SafeRef<BaseClass> safe(with.factory.GetSafeRef());
+ SafeRef<BaseClass> safe2(with.factory.GetSafeRef());
+ safe2 = std::move(safe);
+ // // Will crash if not live.
+ EXPECT_EQ(static_cast<WithWeak*>(&*safe2)->self->i, 1);
+ // `safe` was previously moved-from, so using it in any way should crash now.
+ { EXPECT_CHECK_DEATH(SafeRef<BaseClass> safe3(safe)); }
+ {
+ SafeRef<BaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = safe);
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<BaseClass> safe3(std::move(safe))); }
+ {
+ SafeRef<BaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = std::move(safe));
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<ReallyBaseClass> safe3(safe)); }
+ {
+ SafeRef<ReallyBaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = safe);
+ }
+ { EXPECT_CHECK_DEATH(SafeRef<ReallyBaseClass> safe3(std::move(safe))); }
+ {
+ SafeRef<ReallyBaseClass> safe3(with.factory.GetSafeRef());
+ EXPECT_CHECK_DEATH(safe3 = std::move(safe));
+ }
+ EXPECT_CHECK_DEATH((void)static_cast<WithWeak*>(&*safe)->self->i);
+}
+
+TEST(SafeRefTest, Bind) {
+ WithWeak with;
+ BindOnce(&WithWeak::Method, with.factory.GetSafeRef()).Run();
+}
+
+TEST(SafeRefTest, DanglingPointerDetector) {
+ auto instrumentation = test::DanglingPtrInstrumentation::Create();
+ if (!instrumentation.has_value()) {
+ GTEST_SKIP() << instrumentation.error();
+ }
+ {
+ auto with = std::make_unique<WithWeak>();
+ SafeRef<WithWeak> safe(with->factory.GetSafeRef());
+ EXPECT_EQ(instrumentation->dangling_ptr_detected(), 0u);
+ EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+
+ with.reset();
+ EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+ EXPECT_EQ(instrumentation->dangling_ptr_released(), 0u);
+ }
+ EXPECT_EQ(instrumentation->dangling_ptr_detected(), 1u);
+ EXPECT_EQ(instrumentation->dangling_ptr_released(), 1u);
+}
+
+} // namespace
+} // namespace base
diff --git a/base/memory/scoped_policy.h b/base/memory/scoped_policy.h
index 5dbf204..4ba3f4a 100644
--- a/base/memory/scoped_policy.h
+++ b/base/memory/scoped_policy.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -14,7 +14,7 @@
// ownership claim.
ASSUME,
- // The scoped object will retain the the object and any initial ownership is
+ // The scoped object will retain the object and any initial ownership is
// not changed.
RETAIN
};
diff --git a/base/memory/scoped_refptr.h b/base/memory/scoped_refptr.h
index 27b48a1..adedf31 100644
--- a/base/memory/scoped_refptr.h
+++ b/base/memory/scoped_refptr.h
@@ -1,18 +1,19 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_SCOPED_REFPTR_H_
#define BASE_MEMORY_SCOPED_REFPTR_H_
+#include <stddef.h>
+
#include <iosfwd>
#include <type_traits>
#include <utility>
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "starboard/types.h"
+#include "base/memory/raw_ptr_exclusion.h"
template <class T>
class scoped_refptr;
@@ -23,35 +24,99 @@
class RefCounted;
template <class, typename>
class RefCountedThreadSafe;
+template <class>
+class RefCountedDeleteOnSequence;
+class SequencedTaskRunner;
+class WrappedPromise;
template <typename T>
scoped_refptr<T> AdoptRef(T* t);
+namespace internal {
+
+class BasePromise;
+
+} // namespace internal
+
namespace subtle {
enum AdoptRefTag { kAdoptRefTag };
enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
+template <typename TagType>
+struct RefCountPreferenceTagTraits;
+
+template <>
+struct RefCountPreferenceTagTraits<StartRefCountFromZeroTag> {
+ static constexpr StartRefCountFromZeroTag kTag = kStartRefCountFromZeroTag;
+};
+
+template <>
+struct RefCountPreferenceTagTraits<StartRefCountFromOneTag> {
+ static constexpr StartRefCountFromOneTag kTag = kStartRefCountFromOneTag;
+};
+
+template <typename T, typename Tag = typename T::RefCountPreferenceTag>
+constexpr Tag GetRefCountPreference() {
+ return RefCountPreferenceTagTraits<Tag>::kTag;
+}
+
+// scoped_refptr<T> is typically used with one of several RefCounted<T> base
+// classes or with custom AddRef and Release methods. These overloads dispatch
+// on which was used.
+
template <typename T, typename U, typename V>
constexpr bool IsRefCountPreferenceOverridden(const T*,
const RefCounted<U, V>*) {
- return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
- std::decay_t<decltype(U::kRefCountPreference)>>::value;
+ return !std::is_same_v<std::decay_t<decltype(GetRefCountPreference<T>())>,
+ std::decay_t<decltype(GetRefCountPreference<U>())>>;
}
template <typename T, typename U, typename V>
constexpr bool IsRefCountPreferenceOverridden(
const T*,
const RefCountedThreadSafe<U, V>*) {
- return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
- std::decay_t<decltype(U::kRefCountPreference)>>::value;
+ return !std::is_same_v<std::decay_t<decltype(GetRefCountPreference<T>())>,
+ std::decay_t<decltype(GetRefCountPreference<U>())>>;
+}
+
+template <typename T, typename U>
+constexpr bool IsRefCountPreferenceOverridden(
+ const T*,
+ const RefCountedDeleteOnSequence<U>*) {
+ return !std::is_same_v<std::decay_t<decltype(GetRefCountPreference<T>())>,
+ std::decay_t<decltype(GetRefCountPreference<U>())>>;
}
constexpr bool IsRefCountPreferenceOverridden(...) {
return false;
}
+template <typename T, typename U, typename V>
+constexpr void AssertRefCountBaseMatches(const T*, const RefCounted<U, V>*) {
+ static_assert(std::is_base_of_v<U, T>,
+ "T implements RefCounted<U>, but U is not a base of T.");
+}
+
+template <typename T, typename U, typename V>
+constexpr void AssertRefCountBaseMatches(const T*,
+ const RefCountedThreadSafe<U, V>*) {
+ static_assert(
+ std::is_base_of_v<U, T>,
+ "T implements RefCountedThreadSafe<U>, but U is not a base of T.");
+}
+
+template <typename T, typename U>
+constexpr void AssertRefCountBaseMatches(const T*,
+ const RefCountedDeleteOnSequence<U>*) {
+ static_assert(
+ std::is_base_of_v<U, T>,
+ "T implements RefCountedDeleteOnSequence<U>, but U is not a base of T.");
+}
+
+constexpr void AssertRefCountBaseMatches(...) {}
+
} // namespace subtle
// Creates a scoped_refptr from a raw pointer without incrementing the reference
@@ -59,9 +124,9 @@
// from 1 instead of 0.
template <typename T>
scoped_refptr<T> AdoptRef(T* obj) {
- using Tag = std::decay_t<decltype(T::kRefCountPreference)>;
+ using Tag = std::decay_t<decltype(subtle::GetRefCountPreference<T>())>;
static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
- "Use AdoptRef only for the reference count starts from one.");
+ "Use AdoptRef only if the reference count starts from one.");
DCHECK(obj);
DCHECK(obj->HasOneRef());
@@ -88,7 +153,7 @@
template <typename T, typename... Args>
scoped_refptr<T> MakeRefCounted(Args&&... args) {
T* obj = new T(std::forward<Args>(args)...);
- return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
+ return subtle::AdoptRefIfNeeded(obj, subtle::GetRefCountPreference<T>());
}
// Takes an instance of T, which is a ref counted type, and wraps the object
@@ -163,17 +228,32 @@
// to another component (if a component merely needs to use t on the stack
// without keeping a ref: pass t as a raw T*).
template <class T>
-class scoped_refptr {
+class TRIVIAL_ABI scoped_refptr {
public:
typedef T element_type;
constexpr scoped_refptr() = default;
- // Constructs from raw pointer. constexpr if |p| is null.
+#ifdef COBALT_PENDING_CLEAN_UP
constexpr scoped_refptr(T* p) : ptr_(p) {
if (ptr_)
AddRef(ptr_);
}
+#else
+ // Allow implicit construction from nullptr.
+ constexpr scoped_refptr(std::nullptr_t) {}
+
+ // Constructs from a raw pointer. Note that this constructor allows implicit
+ // conversion from T* to scoped_refptr<T> which is strongly discouraged. If
+ // you are creating a new ref-counted object please use
+ // base::MakeRefCounted<T>() or base::WrapRefCounted<T>(). Otherwise you
+ // should move or copy construct from an existing scoped_refptr<T> to the
+ // ref-counted object.
+ scoped_refptr(T* p) : ptr_(p) {
+ if (ptr_)
+ AddRef(ptr_);
+ }
+#endif
// Copy constructor. This is required in addition to the copy conversion
// constructor below.
@@ -213,6 +293,7 @@
// necessary implicit conversion with scoped_refptr.get().
operator T*() const { return ptr_; }
#endif
+
T& operator*() const {
DCHECK(ptr_);
return *ptr_;
@@ -223,6 +304,11 @@
return ptr_;
}
+ scoped_refptr& operator=(std::nullptr_t) {
+ reset();
+ return *this;
+ }
+
#if !defined(STARBOARD)
scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); }
#endif
@@ -237,6 +323,10 @@
// object, if it existed.
void reset() { scoped_refptr().swap(*this); }
+ // Returns the owned pointer (if any), releasing ownership to the caller. The
+ // caller is responsible for managing the lifetime of the reference.
+ [[nodiscard]] T* release();
+
void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
explicit operator bool() const { return ptr_ != nullptr; }
@@ -264,11 +354,19 @@
}
protected:
- T* ptr_ = nullptr;
+ // This field is not a raw_ptr<> because it was filtered by the rewriter for:
+ // #union, #addr-of, #global-scope
+ RAW_PTR_EXCLUSION T* ptr_ = nullptr;
private:
template <typename U>
friend scoped_refptr<U> base::AdoptRef(U*);
+ friend class ::base::SequencedTaskRunner;
+
+ // Friend access so these classes can use the constructor below as part of a
+ // binary size optimization.
+ friend class ::base::internal::BasePromise;
+ friend class ::base::WrappedPromise;
scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
@@ -284,15 +382,24 @@
static void Release(T* ptr);
};
+template <typename T>
+T* scoped_refptr<T>::release() {
+ T* ptr = ptr_;
+ ptr_ = nullptr;
+ return ptr;
+}
+
// static
template <typename T>
void scoped_refptr<T>::AddRef(T* ptr) {
+ base::subtle::AssertRefCountBaseMatches(ptr, ptr);
ptr->AddRef();
}
// static
template <typename T>
void scoped_refptr<T>::Release(T* ptr) {
+ base::subtle::AssertRefCountBaseMatches(ptr, ptr);
ptr->Release();
}
@@ -308,12 +415,12 @@
}
template <typename T>
-bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t /*null*/) {
+bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t null) {
return !static_cast<bool>(lhs);
}
template <typename T>
-bool operator==(std::nullptr_t /*null*/, const scoped_refptr<T>& rhs) {
+bool operator==(std::nullptr_t null, const scoped_refptr<T>& rhs) {
return !static_cast<bool>(rhs);
}
diff --git a/base/memory/shared_memory.h b/base/memory/shared_memory.h
deleted file mode 100644
index d6ac8bc..0000000
--- a/base/memory/shared_memory.h
+++ /dev/null
@@ -1,257 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_SHARED_MEMORY_H_
-#define BASE_MEMORY_SHARED_MEMORY_H_
-
-// Starboard doesn't curretly support multiple processes or shared memory.
-#if !defined(STARBOARD)
-
-#include <string>
-
-#include "base/base_export.h"
-#include "base/hash.h"
-#include "base/macros.h"
-#include "base/memory/shared_memory_handle.h"
-#include "base/process/process_handle.h"
-#include "base/strings/string16.h"
-#include "build/build_config.h"
-
-#if defined(OS_POSIX) || defined(OS_FUCHSIA)
-#include <stdio.h>
-#include <sys/types.h>
-#include <semaphore.h>
-#include "base/file_descriptor_posix.h"
-#include "base/files/file_util.h"
-#include "base/files/scoped_file.h"
-#endif
-
-#if defined(OS_WIN)
-#include "base/win/scoped_handle.h"
-#include "starboard/types.h"
-#endif
-
-namespace base {
-
-class FilePath;
-
-// Options for creating a shared memory object.
-struct BASE_EXPORT SharedMemoryCreateOptions {
-#if defined(OS_MACOSX) && !defined(OS_IOS)
- // The type of OS primitive that should back the SharedMemory object.
- SharedMemoryHandle::Type type = SharedMemoryHandle::MACH;
-#elif !defined(OS_FUCHSIA)
- // DEPRECATED (crbug.com/345734):
- // If NULL, the object is anonymous. This pointer is owned by the caller
- // and must live through the call to Create().
- const std::string* name_deprecated = nullptr;
-
- // DEPRECATED (crbug.com/345734):
- // If true, and the shared memory already exists, Create() will open the
- // existing shared memory and ignore the size parameter. If false,
- // shared memory must not exist. This flag is meaningless unless
- // name_deprecated is non-NULL.
- bool open_existing_deprecated = false;
-#endif // defined(OS_MACOSX) && !defined(OS_IOS)
-
- // Size of the shared memory object to be created.
- // When opening an existing object, this has no effect.
- size_t size = 0;
-
- // If true, mappings might need to be made executable later.
- bool executable = false;
-
- // If true, the file can be shared read-only to a process.
- bool share_read_only = false;
-};
-
-// Platform abstraction for shared memory.
-// SharedMemory consumes a SharedMemoryHandle [potentially one that it created]
-// to map a shared memory OS resource into the virtual address space of the
-// current process.
-class BASE_EXPORT SharedMemory {
- public:
- SharedMemory();
-
-#if defined(OS_WIN)
- // Similar to the default constructor, except that this allows for
- // calling LockDeprecated() to acquire the named mutex before either Create or
- // Open are called on Windows.
- explicit SharedMemory(const string16& name);
-#endif
-
- // Create a new SharedMemory object from an existing, open
- // shared memory file.
- //
- // WARNING: This does not reduce the OS-level permissions on the handle; it
- // only affects how the SharedMemory will be mmapped. Use
- // GetReadOnlyHandle to drop permissions. TODO(jln,jyasskin): DCHECK
- // that |read_only| matches the permissions of the handle.
- SharedMemory(const SharedMemoryHandle& handle, bool read_only);
-
- // Closes any open files.
- ~SharedMemory();
-
- // Return true iff the given handle is valid (i.e. not the distingished
- // invalid value; NULL for a HANDLE and -1 for a file descriptor)
- static bool IsHandleValid(const SharedMemoryHandle& handle);
-
- // Closes a shared memory handle.
- static void CloseHandle(const SharedMemoryHandle& handle);
-
- // Returns the maximum number of handles that can be open at once per process.
- static size_t GetHandleLimit();
-
- // Duplicates The underlying OS primitive. Returns an invalid handle on
- // failure. The caller is responsible for destroying the duplicated OS
- // primitive.
- static SharedMemoryHandle DuplicateHandle(const SharedMemoryHandle& handle);
-
-#if defined(OS_POSIX)
- // This method requires that the SharedMemoryHandle is backed by a POSIX fd.
- static int GetFdFromSharedMemoryHandle(const SharedMemoryHandle& handle);
-#endif
-
- // Creates a shared memory object as described by the options struct.
- // Returns true on success and false on failure.
- bool Create(const SharedMemoryCreateOptions& options);
-
- // Creates and maps an anonymous shared memory segment of size size.
- // Returns true on success and false on failure.
- bool CreateAndMapAnonymous(size_t size);
-
- // Creates an anonymous shared memory segment of size size.
- // Returns true on success and false on failure.
- bool CreateAnonymous(size_t size) {
- SharedMemoryCreateOptions options;
- options.size = size;
- return Create(options);
- }
-
-#if (!defined(OS_MACOSX) || defined(OS_IOS)) && !defined(OS_FUCHSIA)
- // DEPRECATED (crbug.com/345734):
- // Creates or opens a shared memory segment based on a name.
- // If open_existing is true, and the shared memory already exists,
- // opens the existing shared memory and ignores the size parameter.
- // If open_existing is false, shared memory must not exist.
- // size is the size of the block to be created.
- // Returns true on success, false on failure.
- bool CreateNamedDeprecated(
- const std::string& name, bool open_existing, size_t size) {
- SharedMemoryCreateOptions options;
- options.name_deprecated = &name;
- options.open_existing_deprecated = open_existing;
- options.size = size;
- return Create(options);
- }
-
- // Deletes resources associated with a shared memory segment based on name.
- // Not all platforms require this call.
- bool Delete(const std::string& name);
-
- // Opens a shared memory segment based on a name.
- // If read_only is true, opens for read-only access.
- // Returns true on success, false on failure.
- bool Open(const std::string& name, bool read_only);
-#endif // !defined(OS_MACOSX) || defined(OS_IOS)
-
- // Maps the shared memory into the caller's address space.
- // Returns true on success, false otherwise. The memory address
- // is accessed via the memory() accessor. The mapped address is guaranteed to
- // have an alignment of at least MAP_MINIMUM_ALIGNMENT. This method will fail
- // if this object is currently mapped.
- bool Map(size_t bytes) {
- return MapAt(0, bytes);
- }
-
- // Same as above, but with |offset| to specify from begining of the shared
- // memory block to map.
- // |offset| must be alignent to value of |SysInfo::VMAllocationGranularity()|.
- bool MapAt(off_t offset, size_t bytes);
- enum { MAP_MINIMUM_ALIGNMENT = 32 };
-
- // Unmaps the shared memory from the caller's address space.
- // Returns true if successful; returns false on error or if the
- // memory is not mapped.
- bool Unmap();
-
- // The size requested when the map is first created.
- size_t requested_size() const { return requested_size_; }
-
- // The actual size of the mapped memory (may be larger than requested).
- size_t mapped_size() const { return mapped_size_; }
-
- // Gets a pointer to the opened memory space if it has been
- // Mapped via Map(). Returns NULL if it is not mapped.
- void* memory() const { return memory_; }
-
- // Returns the underlying OS handle for this segment.
- // Use of this handle for anything other than an opaque
- // identifier is not portable.
- SharedMemoryHandle handle() const;
-
- // Returns the underlying OS handle for this segment. The caller takes
- // ownership of the handle and memory is unmapped. This is equivalent to
- // duplicating the handle and then calling Unmap() and Close() on this object,
- // without the overhead of duplicating the handle.
- SharedMemoryHandle TakeHandle();
-
- // Closes the open shared memory segment. The memory will remain mapped if
- // it was previously mapped.
- // It is safe to call Close repeatedly.
- void Close();
-
- // Returns a read-only handle to this shared memory region. The caller takes
- // ownership of the handle. For POSIX handles, CHECK-fails if the region
- // wasn't Created or Opened with share_read_only=true, which is required to
- // make the handle read-only. When the handle is passed to the IPC subsystem,
- // that takes ownership of the handle. As such, it's not valid to pass the
- // sample handle to the IPC subsystem twice. Returns an invalid handle on
- // failure.
- SharedMemoryHandle GetReadOnlyHandle() const;
-
- // Returns an ID for the mapped region. This is ID of the SharedMemoryHandle
- // that was mapped. The ID is valid even after the SharedMemoryHandle is
- // Closed, as long as the region is not unmapped.
- const UnguessableToken& mapped_id() const { return mapped_id_; }
-
- private:
-#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID) && \
- (!defined(OS_MACOSX) || defined(OS_IOS))
- bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
-#endif
-
-#if defined(OS_WIN)
- // If true indicates this came from an external source so needs extra checks
- // before being mapped.
- bool external_section_ = false;
- string16 name_;
-#elif !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
- // If valid, points to the same memory region as shm_, but with readonly
- // permissions.
- SharedMemoryHandle readonly_shm_;
-#endif
-
-#if defined(OS_MACOSX) && !defined(OS_IOS)
- // The mechanism by which the memory is mapped. Only valid if |memory_| is not
- // |nullptr|.
- SharedMemoryHandle::Type mapped_memory_mechanism_ = SharedMemoryHandle::MACH;
-#endif
-
- // The OS primitive that backs the shared memory region.
- SharedMemoryHandle shm_;
-
- size_t mapped_size_ = 0;
- void* memory_ = nullptr;
- bool read_only_ = false;
- size_t requested_size_ = 0;
- base::UnguessableToken mapped_id_;
-
- DISALLOW_COPY_AND_ASSIGN(SharedMemory);
-};
-
-} // namespace base
-
-#endif // !defined(STARBOARD)
-#endif // BASE_MEMORY_SHARED_MEMORY_H_
diff --git a/base/memory/shared_memory_android.cc b/base/memory/shared_memory_android.cc
deleted file mode 100644
index 8375453..0000000
--- a/base/memory/shared_memory_android.cc
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory.h"
-
-#include <sys/mman.h>
-
-#include "base/bits.h"
-#include "base/logging.h"
-#include "base/process/process_metrics.h"
-#include "starboard/types.h"
-#include "third_party/ashmem/ashmem.h"
-
-namespace base {
-
-// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
-// will automatically pin the region. We never explicitly call pin/unpin. When
-// all the file descriptors from different processes associated with the region
-// are closed, the memory buffer will go away.
-
-bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- DCHECK(!shm_.IsValid());
-
- // Align size as required by ashmem_create_region() API documentation.
- size_t rounded_size = bits::Align(options.size, GetPageSize());
-
- if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max()))
- return false;
-
- // "name" is just a label in ashmem. It is visible in /proc/pid/maps.
- int fd = ashmem_create_region(
- options.name_deprecated ? options.name_deprecated->c_str() : "",
- rounded_size);
- shm_ = SharedMemoryHandle::ImportHandle(fd, options.size);
- if (!shm_.IsValid()) {
- DLOG(ERROR) << "Shared memory creation failed";
- return false;
- }
-
- int flags = PROT_READ | PROT_WRITE | (options.executable ? PROT_EXEC : 0);
- int err = ashmem_set_prot_region(shm_.GetHandle(), flags);
- if (err < 0) {
- DLOG(ERROR) << "Error " << err << " when setting protection of ashmem";
- return false;
- }
-
- requested_size_ = options.size;
-
- return true;
-}
-
-bool SharedMemory::Delete(const std::string& name) {
- // Like on Windows, this is intentionally returning true as ashmem will
- // automatically releases the resource when all FDs on it are closed.
- return true;
-}
-
-bool SharedMemory::Open(const std::string& name, bool read_only) {
- // ashmem doesn't support name mapping
- NOTIMPLEMENTED();
- return false;
-}
-
-void SharedMemory::Close() {
- if (shm_.IsValid()) {
- shm_.Close();
- shm_ = SharedMemoryHandle();
- }
-}
-
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
- // There are no read-only Ashmem descriptors on Android.
- // Instead, the protection mask is a property of the region itself.
- SharedMemoryHandle handle = shm_.Duplicate();
- handle.SetReadOnly();
- return handle;
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_fuchsia.cc b/base/memory/shared_memory_fuchsia.cc
deleted file mode 100644
index ffad5b2..0000000
--- a/base/memory/shared_memory_fuchsia.cc
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory.h"
-
-#include <limits>
-
-#include <lib/zx/vmar.h>
-#include <lib/zx/vmo.h>
-#include <zircon/rights.h>
-
-#include "base/bits.h"
-#include "base/fuchsia/fuchsia_logging.h"
-#include "base/memory/shared_memory_tracker.h"
-#include "base/process/process_metrics.h"
-#include "starboard/types.h"
-
-namespace base {
-
-SharedMemory::SharedMemory() {}
-
-SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
- : shm_(handle), read_only_(read_only) {}
-
-SharedMemory::~SharedMemory() {
- Unmap();
- Close();
-}
-
-// static
-bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
- return handle.IsValid();
-}
-
-// static
-void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
- DCHECK(handle.IsValid());
- handle.Close();
-}
-
-// static
-size_t SharedMemory::GetHandleLimit() {
- // Duplicated from the internal Magenta kernel constant kMaxHandleCount
- // (kernel/lib/zircon/zircon.cpp).
- return 256 * 1024u;
-}
-
-bool SharedMemory::CreateAndMapAnonymous(size_t size) {
- return CreateAnonymous(size) && Map(size);
-}
-
-bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- requested_size_ = options.size;
- mapped_size_ = bits::Align(requested_size_, GetPageSize());
- zx::vmo vmo;
- zx_status_t status =
- zx::vmo::create(mapped_size_, ZX_VMO_NON_RESIZABLE, &vmo);
- if (status != ZX_OK) {
- ZX_DLOG(ERROR, status) << "zx_vmo_create";
- return false;
- }
-
- if (!options.executable) {
- // If options.executable isn't set, drop that permission by replacement.
- const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
- status = vmo.replace(kNoExecFlags, &vmo);
- if (status != ZX_OK) {
- ZX_DLOG(ERROR, status) << "zx_handle_replace";
- return false;
- }
- }
-
- shm_ = SharedMemoryHandle(vmo.release(), mapped_size_,
- UnguessableToken::Create());
- return true;
-}
-
-bool SharedMemory::MapAt(off_t offset, size_t bytes) {
- if (!shm_.IsValid())
- return false;
-
- if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
- return false;
-
- if (memory_)
- return false;
-
- int flags = ZX_VM_FLAG_PERM_READ;
- if (!read_only_)
- flags |= ZX_VM_FLAG_PERM_WRITE;
- uintptr_t addr;
- zx_status_t status = zx::vmar::root_self()->map(
- 0, *zx::unowned_vmo(shm_.GetHandle()), offset, bytes, flags, &addr);
- if (status != ZX_OK) {
- ZX_DLOG(ERROR, status) << "zx_vmar_map";
- return false;
- }
- memory_ = reinterpret_cast<void*>(addr);
-
- mapped_size_ = bytes;
- mapped_id_ = shm_.GetGUID();
- SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
- return true;
-}
-
-bool SharedMemory::Unmap() {
- if (!memory_)
- return false;
-
- SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
-
- uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
- zx_status_t status = zx::vmar::root_self()->unmap(addr, mapped_size_);
- if (status != ZX_OK) {
- ZX_DLOG(ERROR, status) << "zx_vmar_unmap";
- return false;
- }
-
- memory_ = nullptr;
- mapped_id_ = UnguessableToken();
- return true;
-}
-
-void SharedMemory::Close() {
- if (shm_.IsValid()) {
- shm_.Close();
- shm_ = SharedMemoryHandle();
- }
-}
-
-SharedMemoryHandle SharedMemory::handle() const {
- return shm_;
-}
-
-SharedMemoryHandle SharedMemory::TakeHandle() {
- SharedMemoryHandle handle(shm_);
- handle.SetOwnershipPassesToIPC(true);
- Unmap();
- shm_ = SharedMemoryHandle();
- return handle;
-}
-
-SharedMemoryHandle SharedMemory::DuplicateHandle(
- const SharedMemoryHandle& handle) {
- return handle.Duplicate();
-}
-
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
- zx::vmo duped_handle;
- const int kNoWriteOrExec =
- ZX_DEFAULT_VMO_RIGHTS &
- ~(ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE | ZX_RIGHT_SET_PROPERTY);
- zx_status_t status = zx::unowned_vmo(shm_.GetHandle())
- ->duplicate(kNoWriteOrExec, &duped_handle);
- if (status != ZX_OK)
- return SharedMemoryHandle();
-
- SharedMemoryHandle handle(duped_handle.release(), shm_.GetSize(),
- shm_.GetGUID());
- handle.SetOwnershipPassesToIPC(true);
- return handle;
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_handle.cc b/base/memory/shared_memory_handle.cc
deleted file mode 100644
index 00775d4..0000000
--- a/base/memory/shared_memory_handle.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory_handle.h"
-
-namespace base {
-
-// As we don't support shared memory in Cobalt, we use the stub impl of
-// SharedMemoryHandle and needs a default ctor to make the compiler happy.
-#if defined(STARBOARD)
-SharedMemoryHandle::SharedMemoryHandle() {}
-
-bool SharedMemoryHandle::IsValid() const {
- return false;
-}
-#endif
-
-SharedMemoryHandle::SharedMemoryHandle(const SharedMemoryHandle& handle) =
- default;
-
-SharedMemoryHandle& SharedMemoryHandle::operator=(
- const SharedMemoryHandle& handle) = default;
-
-base::UnguessableToken SharedMemoryHandle::GetGUID() const {
- return guid_;
-}
-
-size_t SharedMemoryHandle::GetSize() const {
- return size_;
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_handle.h b/base/memory/shared_memory_handle.h
deleted file mode 100644
index 4354617..0000000
--- a/base/memory/shared_memory_handle.h
+++ /dev/null
@@ -1,241 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
-#define BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
-
-
-#include "base/unguessable_token.h"
-#include "build/build_config.h"
-
-#if defined(OS_WIN)
-#include "base/process/process_handle.h"
-#include "base/win/windows_types.h"
-#elif defined(OS_MACOSX) && !defined(OS_IOS)
-#include <mach/mach.h>
-#include "base/base_export.h"
-#include "base/file_descriptor_posix.h"
-#include "base/macros.h"
-#include "base/process/process_handle.h"
-#elif defined(OS_POSIX)
-#include <sys/types.h>
-#include "base/file_descriptor_posix.h"
-#elif defined(OS_FUCHSIA)
-#include <zircon/types.h>
-
-#include "starboard/types.h"
-#endif
-
-namespace base {
-
-// SharedMemoryHandle is the smallest possible IPC-transportable "reference" to
-// a shared memory OS resource. A "reference" can be consumed exactly once [by
-// base::SharedMemory] to map the shared memory OS resource into the virtual
-// address space of the current process.
-// TODO(erikchen): This class should have strong ownership semantics to prevent
-// leaks of the underlying OS resource. https://crbug.com/640840.
-class BASE_EXPORT SharedMemoryHandle {
- public:
- // The default constructor returns an invalid SharedMemoryHandle.
- SharedMemoryHandle();
-
- // Standard copy constructor. The new instance shares the underlying OS
- // primitives.
- SharedMemoryHandle(const SharedMemoryHandle& handle);
-
- // Standard assignment operator. The updated instance shares the underlying
- // OS primitives.
- SharedMemoryHandle& operator=(const SharedMemoryHandle& handle);
-
- // Closes the underlying OS resource.
- // The fact that this method needs to be "const" is an artifact of the
- // original interface for base::SharedMemory::CloseHandle.
- // TODO(erikchen): This doesn't clear the underlying reference, which seems
- // like a bug, but is how this class has always worked. Fix this:
- // https://crbug.com/716072.
- void Close() const;
-
- // Whether ownership of the underlying OS resource is implicitly passed to
- // the IPC subsystem during serialization.
- void SetOwnershipPassesToIPC(bool ownership_passes);
- bool OwnershipPassesToIPC() const;
-
- // Whether the underlying OS resource is valid.
- bool IsValid() const;
-
- // Duplicates the underlying OS resource. Using the return value as a
- // parameter to an IPC message will cause the IPC subsystem to consume the OS
- // resource.
- SharedMemoryHandle Duplicate() const;
-
- // Uniques identifies the shared memory region that the underlying OS resource
- // points to. Multiple SharedMemoryHandles that point to the same shared
- // memory region will have the same GUID. Preserved across IPC.
- base::UnguessableToken GetGUID() const;
-
- // Returns the size of the memory region that SharedMemoryHandle points to.
- size_t GetSize() const;
-
-#if defined(OS_WIN)
- // Takes implicit ownership of |h|.
- // |guid| uniquely identifies the shared memory region pointed to by the
- // underlying OS resource. If the HANDLE is associated with another
- // SharedMemoryHandle, the caller must pass the |guid| of that
- // SharedMemoryHandle. Otherwise, the caller should generate a new
- // UnguessableToken.
- // Passing the wrong |size| has no immediate consequence, but may cause errors
- // when trying to map the SharedMemoryHandle at a later point in time.
- SharedMemoryHandle(HANDLE h, size_t size, const base::UnguessableToken& guid);
- HANDLE GetHandle() const;
-#elif defined(OS_FUCHSIA)
- // Takes implicit ownership of |h|.
- // |guid| uniquely identifies the shared memory region pointed to by the
- // underlying OS resource. If the zx_handle_t is associated with another
- // SharedMemoryHandle, the caller must pass the |guid| of that
- // SharedMemoryHandle. Otherwise, the caller should generate a new
- // UnguessableToken.
- // Passing the wrong |size| has no immediate consequence, but may cause errors
- // when trying to map the SharedMemoryHandle at a later point in time.
- SharedMemoryHandle(zx_handle_t h,
- size_t size,
- const base::UnguessableToken& guid);
- zx_handle_t GetHandle() const;
-#elif defined(OS_MACOSX) && !defined(OS_IOS)
- enum Type {
- // The SharedMemoryHandle is backed by a POSIX fd.
- POSIX,
- // The SharedMemoryHandle is backed by the Mach primitive "memory object".
- MACH,
- };
-
- // Makes a Mach-based SharedMemoryHandle of the given size. On error,
- // subsequent calls to IsValid() return false.
- // Passing the wrong |size| has no immediate consequence, but may cause errors
- // when trying to map the SharedMemoryHandle at a later point in time.
- SharedMemoryHandle(mach_vm_size_t size, const base::UnguessableToken& guid);
-
- // Makes a Mach-based SharedMemoryHandle from |memory_object|, a named entry
- // in the current task. The memory region has size |size|.
- // Passing the wrong |size| has no immediate consequence, but may cause errors
- // when trying to map the SharedMemoryHandle at a later point in time.
- SharedMemoryHandle(mach_port_t memory_object,
- mach_vm_size_t size,
- const base::UnguessableToken& guid);
-
- Type GetType() const { return type_; }
-
- // Exposed so that the SharedMemoryHandle can be transported between
- // processes.
- mach_port_t GetMemoryObject() const;
-
- // The SharedMemoryHandle must be valid.
- // Returns whether the SharedMemoryHandle was successfully mapped into memory.
- // On success, |memory| is an output variable that contains the start of the
- // mapped memory.
- bool MapAt(off_t offset, size_t bytes, void** memory, bool read_only);
-#elif defined(OS_POSIX)
- // Creates a SharedMemoryHandle from an |fd| supplied from an external
- // service.
- // Passing the wrong |size| has no immediate consequence, but may cause errors
- // when trying to map the SharedMemoryHandle at a later point in time.
- static SharedMemoryHandle ImportHandle(int fd, size_t size);
-
- // Returns the underlying OS resource.
- int GetHandle() const;
-
- // Invalidates [but doesn't close] the underlying OS resource. This will leak
- // unless the caller is careful.
- int Release();
-#endif
-
-#if defined(OS_ANDROID)
- // Marks the current file descriptor as read-only, for the purpose of
- // mapping. This is independent of the region's read-only status.
- void SetReadOnly() { read_only_ = true; }
-
- // Returns true iff the descriptor is to be used for read-only
- // mappings.
- bool IsReadOnly() const { return read_only_; }
-
- // Returns true iff the corresponding region is read-only.
- bool IsRegionReadOnly() const;
-
- // Try to set the region read-only. This will fail any future attempt
- // at read-write mapping.
- bool SetRegionReadOnly() const;
-#endif
-
-#if defined(OS_POSIX)
- // Constructs a SharedMemoryHandle backed by a FileDescriptor. The newly
- // created instance has the same ownership semantics as base::FileDescriptor.
- // This typically means that the SharedMemoryHandle takes ownership of the
- // |fd| if |auto_close| is true. Unfortunately, it's common for existing code
- // to make shallow copies of SharedMemoryHandle, and the one that is finally
- // passed into a base::SharedMemory is the one that "consumes" the fd.
- //
- // |guid| uniquely identifies the shared memory region pointed to by the
- // underlying OS resource. If |file_descriptor| is associated with another
- // SharedMemoryHandle, the caller must pass the |guid| of that
- // SharedMemoryHandle. Otherwise, the caller should generate a new
- // UnguessableToken.
- // Passing the wrong |size| has no immediate consequence, but may cause errors
- // when trying to map the SharedMemoryHandle at a later point in time.
- SharedMemoryHandle(const base::FileDescriptor& file_descriptor,
- size_t size,
- const base::UnguessableToken& guid);
-#endif
-
- private:
-#if defined(OS_WIN)
- HANDLE handle_ = nullptr;
-
- // Whether passing this object as a parameter to an IPC message passes
- // ownership of |handle_| to the IPC stack. This is meant to mimic the
- // behavior of the |auto_close| parameter of FileDescriptor. This member only
- // affects attachment-brokered SharedMemoryHandles.
- // Defaults to |false|.
- bool ownership_passes_to_ipc_ = false;
-#elif defined(OS_FUCHSIA)
- zx_handle_t handle_ = ZX_HANDLE_INVALID;
- bool ownership_passes_to_ipc_ = false;
-#elif defined(OS_MACOSX) && !defined(OS_IOS)
- friend class SharedMemory;
- friend bool CheckReadOnlySharedMemoryHandleForTesting(
- SharedMemoryHandle handle);
-
- Type type_ = MACH;
-
- // Each instance of a SharedMemoryHandle is backed either by a POSIX fd or a
- // mach port. |type_| determines the backing member.
- union {
- FileDescriptor file_descriptor_;
-
- struct {
- mach_port_t memory_object_ = MACH_PORT_NULL;
-
- // Whether passing this object as a parameter to an IPC message passes
- // ownership of |memory_object_| to the IPC stack. This is meant to mimic
- // the behavior of the |auto_close| parameter of FileDescriptor.
- // Defaults to |false|.
- bool ownership_passes_to_ipc_ = false;
- };
- };
-#elif defined(OS_ANDROID)
- friend class SharedMemory;
-
- FileDescriptor file_descriptor_;
- bool read_only_ = false;
-#elif defined(OS_POSIX)
- FileDescriptor file_descriptor_;
-#endif
-
- base::UnguessableToken guid_;
-
- // The size of the region referenced by the SharedMemoryHandle.
- size_t size_ = 0;
-};
-
-} // namespace base
-
-#endif // BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
diff --git a/base/memory/shared_memory_handle_android.cc b/base/memory/shared_memory_handle_android.cc
deleted file mode 100644
index b3dcc3b..0000000
--- a/base/memory/shared_memory_handle_android.cc
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory_handle.h"
-
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include "base/logging.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/posix/unix_domain_socket.h"
-#include "base/unguessable_token.h"
-#include "starboard/types.h"
-#include "third_party/ashmem/ashmem.h"
-
-namespace base {
-
-static int GetAshmemRegionProtectionMask(int fd) {
- int prot = ashmem_get_prot_region(fd);
- if (prot < 0) {
- DPLOG(ERROR) << "ashmem_get_prot_region";
- return -1;
- }
- return prot;
-}
-
-SharedMemoryHandle::SharedMemoryHandle() = default;
-
-SharedMemoryHandle::SharedMemoryHandle(
- const base::FileDescriptor& file_descriptor,
- size_t size,
- const base::UnguessableToken& guid)
- : guid_(guid), size_(size) {
- DCHECK_GE(file_descriptor.fd, 0);
- file_descriptor_ = file_descriptor;
-}
-
-// static
-SharedMemoryHandle SharedMemoryHandle::ImportHandle(int fd, size_t size) {
- SharedMemoryHandle handle;
- handle.file_descriptor_.fd = fd;
- handle.file_descriptor_.auto_close = false;
- handle.guid_ = UnguessableToken::Create();
- handle.size_ = size;
- return handle;
-}
-
-int SharedMemoryHandle::GetHandle() const {
- DCHECK(IsValid());
- return file_descriptor_.fd;
-}
-
-bool SharedMemoryHandle::IsValid() const {
- return file_descriptor_.fd >= 0;
-}
-
-void SharedMemoryHandle::Close() const {
- DCHECK(IsValid());
- if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
- PLOG(ERROR) << "close";
-}
-
-int SharedMemoryHandle::Release() {
- int old_fd = file_descriptor_.fd;
- file_descriptor_.fd = -1;
- return old_fd;
-}
-
-SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
- DCHECK(IsValid());
- SharedMemoryHandle result;
- int duped_handle = HANDLE_EINTR(dup(file_descriptor_.fd));
- if (duped_handle >= 0) {
- result = SharedMemoryHandle(FileDescriptor(duped_handle, true), GetSize(),
- GetGUID());
- if (IsReadOnly())
- result.SetReadOnly();
- }
- return result;
-}
-
-void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
- file_descriptor_.auto_close = ownership_passes;
-}
-
-bool SharedMemoryHandle::OwnershipPassesToIPC() const {
- return file_descriptor_.auto_close;
-}
-
-bool SharedMemoryHandle::IsRegionReadOnly() const {
- int prot = GetAshmemRegionProtectionMask(file_descriptor_.fd);
- return (prot >= 0 && (prot & PROT_WRITE) == 0);
-}
-
-bool SharedMemoryHandle::SetRegionReadOnly() const {
- int fd = file_descriptor_.fd;
- int prot = GetAshmemRegionProtectionMask(fd);
- if (prot < 0)
- return false;
-
- if ((prot & PROT_WRITE) == 0) {
- // Region is already read-only.
- return true;
- }
-
- prot &= ~PROT_WRITE;
- int ret = ashmem_set_prot_region(fd, prot);
- if (ret != 0) {
- DPLOG(ERROR) << "ashmem_set_prot_region";
- return false;
- }
- return true;
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_handle_fuchsia.cc b/base/memory/shared_memory_handle_fuchsia.cc
deleted file mode 100644
index b559bac..0000000
--- a/base/memory/shared_memory_handle_fuchsia.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory_handle.h"
-
-#include <zircon/syscalls.h>
-
-#include "base/logging.h"
-#include "base/unguessable_token.h"
-#include "starboard/types.h"
-
-namespace base {
-
-SharedMemoryHandle::SharedMemoryHandle() {}
-
-SharedMemoryHandle::SharedMemoryHandle(zx_handle_t h,
- size_t size,
- const base::UnguessableToken& guid)
- : handle_(h), guid_(guid), size_(size) {}
-
-void SharedMemoryHandle::Close() const {
- DCHECK(handle_ != ZX_HANDLE_INVALID);
- zx_handle_close(handle_);
-}
-
-bool SharedMemoryHandle::IsValid() const {
- return handle_ != ZX_HANDLE_INVALID;
-}
-
-SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
- zx_handle_t duped_handle;
- zx_status_t status =
- zx_handle_duplicate(handle_, ZX_RIGHT_SAME_RIGHTS, &duped_handle);
- if (status != ZX_OK)
- return SharedMemoryHandle();
-
- SharedMemoryHandle handle(duped_handle, GetSize(), GetGUID());
- handle.SetOwnershipPassesToIPC(true);
- return handle;
-}
-
-zx_handle_t SharedMemoryHandle::GetHandle() const {
- return handle_;
-}
-
-void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
- ownership_passes_to_ipc_ = ownership_passes;
-}
-
-bool SharedMemoryHandle::OwnershipPassesToIPC() const {
- return ownership_passes_to_ipc_;
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_handle_mac.cc b/base/memory/shared_memory_handle_mac.cc
deleted file mode 100644
index 4db9edb..0000000
--- a/base/memory/shared_memory_handle_mac.cc
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory_handle.h"
-
-#include <mach/mach_vm.h>
-#include <sys/mman.h>
-#include <unistd.h>
-
-#include "base/mac/mac_util.h"
-#include "base/mac/mach_logging.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/unguessable_token.h"
-#include "starboard/types.h"
-
-namespace base {
-
-SharedMemoryHandle::SharedMemoryHandle() {}
-
-SharedMemoryHandle::SharedMemoryHandle(
- const base::FileDescriptor& file_descriptor,
- size_t size,
- const base::UnguessableToken& guid)
- : type_(POSIX),
- file_descriptor_(file_descriptor),
- guid_(guid),
- size_(size) {}
-
-SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size,
- const base::UnguessableToken& guid) {
- type_ = MACH;
- mach_port_t named_right;
- kern_return_t kr = mach_make_memory_entry_64(
- mach_task_self(),
- &size,
- 0, // Address.
- MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
- &named_right,
- MACH_PORT_NULL); // Parent handle.
- if (kr != KERN_SUCCESS) {
- memory_object_ = MACH_PORT_NULL;
- return;
- }
-
- memory_object_ = named_right;
- size_ = size;
- ownership_passes_to_ipc_ = false;
- guid_ = guid;
-}
-
-SharedMemoryHandle::SharedMemoryHandle(mach_port_t memory_object,
- mach_vm_size_t size,
- const base::UnguessableToken& guid)
- : type_(MACH),
- memory_object_(memory_object),
- ownership_passes_to_ipc_(false),
- guid_(guid),
- size_(size) {}
-
-SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
- switch (type_) {
- case POSIX: {
- if (!IsValid())
- return SharedMemoryHandle();
- int duped_fd = HANDLE_EINTR(dup(file_descriptor_.fd));
- if (duped_fd < 0)
- return SharedMemoryHandle();
- return SharedMemoryHandle(FileDescriptor(duped_fd, true), size_, guid_);
- }
- case MACH: {
- if (!IsValid())
- return SharedMemoryHandle();
-
- // Increment the ref count.
- kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
- MACH_PORT_RIGHT_SEND, 1);
- DCHECK_EQ(kr, KERN_SUCCESS);
- SharedMemoryHandle handle(*this);
- handle.SetOwnershipPassesToIPC(true);
- return handle;
- }
- }
-}
-
-bool SharedMemoryHandle::IsValid() const {
- switch (type_) {
- case POSIX:
- return file_descriptor_.fd >= 0;
- case MACH:
- return memory_object_ != MACH_PORT_NULL;
- }
-}
-
-mach_port_t SharedMemoryHandle::GetMemoryObject() const {
- DCHECK_EQ(type_, MACH);
- return memory_object_;
-}
-
-bool SharedMemoryHandle::MapAt(off_t offset,
- size_t bytes,
- void** memory,
- bool read_only) {
- DCHECK(IsValid());
- switch (type_) {
- case SharedMemoryHandle::POSIX:
- *memory = mmap(nullptr, bytes, PROT_READ | (read_only ? 0 : PROT_WRITE),
- MAP_SHARED, file_descriptor_.fd, offset);
- return *memory != MAP_FAILED;
- case SharedMemoryHandle::MACH:
- kern_return_t kr = mach_vm_map(
- mach_task_self(),
- reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
- bytes,
- 0, // Alignment mask
- VM_FLAGS_ANYWHERE,
- memory_object_,
- offset,
- FALSE, // Copy
- VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE), // Current protection
- VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK, // Maximum protection
- VM_INHERIT_NONE);
- return kr == KERN_SUCCESS;
- }
-}
-
-void SharedMemoryHandle::Close() const {
- if (!IsValid())
- return;
-
- switch (type_) {
- case POSIX:
- if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
- DPLOG(ERROR) << "Error closing fd";
- break;
- case MACH:
- kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
- if (kr != KERN_SUCCESS)
- MACH_DLOG(ERROR, kr) << "Error deallocating mach port";
- break;
- }
-}
-
-void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
- DCHECK_EQ(type_, MACH);
- ownership_passes_to_ipc_ = ownership_passes;
-}
-
-bool SharedMemoryHandle::OwnershipPassesToIPC() const {
- DCHECK_EQ(type_, MACH);
- return ownership_passes_to_ipc_;
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_handle_posix.cc b/base/memory/shared_memory_handle_posix.cc
deleted file mode 100644
index 852e6a2..0000000
--- a/base/memory/shared_memory_handle_posix.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory_handle.h"
-
-#include <unistd.h>
-
-#include "base/logging.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/unguessable_token.h"
-#include "starboard/types.h"
-
-namespace base {
-
-SharedMemoryHandle::SharedMemoryHandle() = default;
-
-SharedMemoryHandle::SharedMemoryHandle(
- const base::FileDescriptor& file_descriptor,
- size_t size,
- const base::UnguessableToken& guid)
- : file_descriptor_(file_descriptor), guid_(guid), size_(size) {}
-
-// static
-SharedMemoryHandle SharedMemoryHandle::ImportHandle(int fd, size_t size) {
- SharedMemoryHandle handle;
- handle.file_descriptor_.fd = fd;
- handle.file_descriptor_.auto_close = false;
- handle.guid_ = UnguessableToken::Create();
- handle.size_ = size;
- return handle;
-}
-
-int SharedMemoryHandle::GetHandle() const {
- return file_descriptor_.fd;
-}
-
-bool SharedMemoryHandle::IsValid() const {
- return file_descriptor_.fd >= 0;
-}
-
-void SharedMemoryHandle::Close() const {
- if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
- PLOG(ERROR) << "close";
-}
-
-int SharedMemoryHandle::Release() {
- int old_fd = file_descriptor_.fd;
- file_descriptor_.fd = -1;
- return old_fd;
-}
-
-SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
- if (!IsValid())
- return SharedMemoryHandle();
-
- int duped_handle = HANDLE_EINTR(dup(file_descriptor_.fd));
- if (duped_handle < 0)
- return SharedMemoryHandle();
- return SharedMemoryHandle(FileDescriptor(duped_handle, true), GetSize(),
- GetGUID());
-}
-
-void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
- file_descriptor_.auto_close = ownership_passes;
-}
-
-bool SharedMemoryHandle::OwnershipPassesToIPC() const {
- return file_descriptor_.auto_close;
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_handle_win.cc b/base/memory/shared_memory_handle_win.cc
deleted file mode 100644
index c8339ab..0000000
--- a/base/memory/shared_memory_handle_win.cc
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory_handle.h"
-
-#include "base/logging.h"
-#include "base/unguessable_token.h"
-
-#include <windows.h>
-
-#include "starboard/types.h"
-
-namespace base {
-
-SharedMemoryHandle::SharedMemoryHandle() {}
-
-SharedMemoryHandle::SharedMemoryHandle(HANDLE h,
- size_t size,
- const base::UnguessableToken& guid)
- : handle_(h), guid_(guid), size_(size) {}
-
-void SharedMemoryHandle::Close() const {
- DCHECK(handle_ != nullptr);
- ::CloseHandle(handle_);
-}
-
-bool SharedMemoryHandle::IsValid() const {
- return handle_ != nullptr;
-}
-
-SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
- HANDLE duped_handle;
- ProcessHandle process = GetCurrentProcess();
- BOOL success = ::DuplicateHandle(process, handle_, process, &duped_handle, 0,
- FALSE, DUPLICATE_SAME_ACCESS);
- if (!success)
- return SharedMemoryHandle();
-
- base::SharedMemoryHandle handle(duped_handle, GetSize(), GetGUID());
- handle.SetOwnershipPassesToIPC(true);
- return handle;
-}
-
-HANDLE SharedMemoryHandle::GetHandle() const {
- return handle_;
-}
-
-void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
- ownership_passes_to_ipc_ = ownership_passes;
-}
-
-bool SharedMemoryHandle::OwnershipPassesToIPC() const {
- return ownership_passes_to_ipc_;
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_helper.cc b/base/memory/shared_memory_helper.cc
deleted file mode 100644
index 25a3059..0000000
--- a/base/memory/shared_memory_helper.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory_helper.h"
-
-#if defined(OS_CHROMEOS)
-#include <sys/resource.h>
-#include <sys/time.h>
-
-#include "base/debug/alias.h"
-#endif // defined(OS_CHROMEOS)
-
-#include "base/threading/thread_restrictions.h"
-#include "starboard/common/string.h"
-#include "starboard/memory.h"
-#include "starboard/types.h"
-
-namespace base {
-
-struct ScopedPathUnlinkerTraits {
- static const FilePath* InvalidValue() { return nullptr; }
-
- static void Free(const FilePath* path) {
- if (unlink(path->value().c_str()))
- PLOG(WARNING) << "unlink";
- }
-};
-
-// Unlinks the FilePath when the object is destroyed.
-using ScopedPathUnlinker =
- ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
-
-#if !defined(OS_ANDROID)
-bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
- ScopedFD* fd,
- ScopedFD* readonly_fd,
- FilePath* path) {
-#if defined(OS_LINUX)
- // It doesn't make sense to have a open-existing private piece of shmem
- DCHECK(!options.open_existing_deprecated);
-#endif // defined(OS_LINUX)
- // Q: Why not use the shm_open() etc. APIs?
- // A: Because they're limited to 4mb on OS X. FFFFFFFUUUUUUUUUUU
- FilePath directory;
- ScopedPathUnlinker path_unlinker;
- if (!GetShmemTempDir(options.executable, &directory))
- return false;
-
- fd->reset(base::CreateAndOpenFdForTemporaryFileInDir(directory, path));
-
- if (!fd->is_valid())
- return false;
-
- // Deleting the file prevents anyone else from mapping it in (making it
- // private), and prevents the need for cleanup (once the last fd is
- // closed, it is truly freed).
- path_unlinker.reset(path);
-
- if (options.share_read_only) {
- // Also open as readonly so that we can GetReadOnlyHandle.
- readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
- if (!readonly_fd->is_valid()) {
- DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
- fd->reset();
- return false;
- }
- }
- return true;
-}
-
-bool PrepareMapFile(ScopedFD fd,
- ScopedFD readonly_fd,
- int* mapped_file,
- int* readonly_mapped_file) {
- DCHECK_EQ(-1, *mapped_file);
- DCHECK_EQ(-1, *readonly_mapped_file);
- if (!fd.is_valid())
- return false;
-
- // This function theoretically can block on the disk, but realistically
- // the temporary files we create will just go into the buffer cache
- // and be deleted before they ever make it out to disk.
- base::ThreadRestrictions::ScopedAllowIO allow_io;
-
- if (readonly_fd.is_valid()) {
- struct stat st = {};
- if (fstat(fd.get(), &st))
- NOTREACHED();
-
- struct stat readonly_st = {};
- if (fstat(readonly_fd.get(), &readonly_st))
- NOTREACHED();
- if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
- LOG(ERROR) << "writable and read-only inodes don't match; bailing";
- return false;
- }
- }
-
- *mapped_file = HANDLE_EINTR(dup(fd.get()));
- if (*mapped_file == -1) {
- NOTREACHED() << "Call to dup failed, errno=" << errno;
-
-#if defined(OS_CHROMEOS)
- if (errno == EMFILE) {
- // We're out of file descriptors and are probably about to crash somewhere
- // else in Chrome anyway. Let's collect what FD information we can and
- // crash.
- // Added for debugging crbug.com/733718
- int original_fd_limit = 16384;
- struct rlimit rlim;
- if (getrlimit(RLIMIT_NOFILE, &rlim) == 0) {
- original_fd_limit = rlim.rlim_cur;
- if (rlim.rlim_max > rlim.rlim_cur) {
- // Increase fd limit so breakpad has a chance to write a minidump.
- rlim.rlim_cur = rlim.rlim_max;
- if (setrlimit(RLIMIT_NOFILE, &rlim) != 0) {
- PLOG(ERROR) << "setrlimit() failed";
- }
- }
- } else {
- PLOG(ERROR) << "getrlimit() failed";
- }
-
- const char kFileDataMarker[] = "FDATA";
- char buf[PATH_MAX];
- char fd_path[PATH_MAX];
- char crash_buffer[32 * 1024] = {0};
- char* crash_ptr = crash_buffer;
- base::debug::Alias(crash_buffer);
-
- // Put a marker at the start of our data so we can confirm where it
- // begins.
- crash_ptr = strncpy(crash_ptr, kFileDataMarker,
- SbStringGetLength(kFileDataMarker));
- for (int i = original_fd_limit; i >= 0; --i) {
- memset(buf, 0, arraysize(buf));
- memset(fd_path, 0, arraysize(fd_path));
- snprintf(fd_path, arraysize(fd_path) - 1, "/proc/self/fd/%d", i);
- ssize_t count = readlink(fd_path, buf, arraysize(buf) - 1);
- if (count < 0) {
- PLOG(ERROR) << "readlink failed for: " << fd_path;
- continue;
- }
-
- if (crash_ptr + count + 1 < crash_buffer + arraysize(crash_buffer)) {
- crash_ptr = strncpy(crash_ptr, buf, count + 1);
- }
- LOG(ERROR) << i << ": " << buf;
- }
- LOG(FATAL) << "Logged for file descriptor exhaustion, crashing now";
- }
-#endif // defined(OS_CHROMEOS)
- }
- *readonly_mapped_file = readonly_fd.release();
-
- return true;
-}
-#endif // !defined(OS_ANDROID)
-
-} // namespace base
diff --git a/base/memory/shared_memory_helper.h b/base/memory/shared_memory_helper.h
deleted file mode 100644
index 53dde90..0000000
--- a/base/memory/shared_memory_helper.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_SHARED_MEMORY_HELPER_H_
-#define BASE_MEMORY_SHARED_MEMORY_HELPER_H_
-
-#include "base/memory/shared_memory.h"
-#include "build/build_config.h"
-
-#include <fcntl.h>
-
-#include "starboard/types.h"
-
-namespace base {
-
-#if !defined(OS_ANDROID)
-// Makes a temporary file, fdopens it, and then unlinks it. |fd| is populated
-// with the opened fd. |readonly_fd| is populated with the opened fd if
-// options.share_read_only is true. |path| is populated with the location of
-// the file before it was unlinked.
-// Returns false if there's an unhandled failure.
-bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
- ScopedFD* fd,
- ScopedFD* readonly_fd,
- FilePath* path);
-
-// Takes the outputs of CreateAnonymousSharedMemory and maps them properly to
-// |mapped_file| or |readonly_mapped_file|, depending on which one is populated.
-bool PrepareMapFile(ScopedFD fd,
- ScopedFD readonly_fd,
- int* mapped_file,
- int* readonly_mapped_file);
-#endif // !defined(OS_ANDROID)
-
-} // namespace base
-
-#endif // BASE_MEMORY_SHARED_MEMORY_HELPER_H_
diff --git a/base/memory/shared_memory_hooks.h b/base/memory/shared_memory_hooks.h
new file mode 100644
index 0000000..0e72076
--- /dev/null
+++ b/base/memory/shared_memory_hooks.h
@@ -0,0 +1,45 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_HOOKS_H_
+#define BASE_MEMORY_SHARED_MEMORY_HOOKS_H_
+
+#include "base/memory/read_only_shared_memory_region.h"
+#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/memory/writable_shared_memory_region.h"
+
+namespace mojo {
+class SharedMemoryUtils;
+namespace core::ipcz_driver {
+class BaseSharedMemoryService;
+}
+} // namespace mojo
+
+namespace base {
+
+class SharedMemoryHooks {
+ public:
+ SharedMemoryHooks() = delete;
+
+ private:
+ friend class SharedMemoryHooksTest;
+ friend mojo::SharedMemoryUtils;
+ friend class mojo::core::ipcz_driver::BaseSharedMemoryService;
+
+ // Allows shared memory region creation to be hooked. Useful for sandboxed
+ // processes that are restricted from invoking the platform APIs directly.
+ // Intentionally private so callers need to be explicitly friended.
+ static void SetCreateHooks(
+ ReadOnlySharedMemoryRegion::CreateFunction* read_only_hook,
+ UnsafeSharedMemoryRegion::CreateFunction* unsafe_hook,
+ WritableSharedMemoryRegion::CreateFunction* writable_hook) {
+ ReadOnlySharedMemoryRegion::set_create_hook(read_only_hook);
+ UnsafeSharedMemoryRegion::set_create_hook(unsafe_hook);
+ WritableSharedMemoryRegion::set_create_hook(writable_hook);
+ }
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_HOOKS_H_
diff --git a/base/memory/shared_memory_hooks_unittest.cc b/base/memory/shared_memory_hooks_unittest.cc
new file mode 100644
index 0000000..8ea4864
--- /dev/null
+++ b/base/memory/shared_memory_hooks_unittest.cc
@@ -0,0 +1,85 @@
+// Copyright 2019 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_hooks.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+namespace base {
+
+class SharedMemoryHooksTest : public ::testing::Test {
+ protected:
+ void TearDown() override { SetCreateHooks(nullptr, nullptr, nullptr); }
+
+ void SetCreateHooks(
+ ReadOnlySharedMemoryRegion::CreateFunction* read_only_hook,
+ UnsafeSharedMemoryRegion::CreateFunction* unsafe_hook,
+ WritableSharedMemoryRegion::CreateFunction* writable_hook) {
+ SharedMemoryHooks::SetCreateHooks(read_only_hook, unsafe_hook,
+ writable_hook);
+ }
+};
+
+absl::optional<size_t> requested_read_only_shmem_size;
+absl::optional<size_t> requested_unsafe_shmem_size;
+absl::optional<size_t> requested_writable_shmem_size;
+
+MappedReadOnlyRegion ReadOnlyShmemCreateHook(size_t size, SharedMemoryMapper* mapper) {
+ requested_read_only_shmem_size = size;
+ return {};
+}
+
+UnsafeSharedMemoryRegion UnsafeShmemCreateHook(size_t size) {
+ requested_unsafe_shmem_size = size;
+ return {};
+}
+
+WritableSharedMemoryRegion WritableShmemCreateHook(size_t size) {
+ requested_writable_shmem_size = size;
+ return {};
+}
+
+TEST_F(SharedMemoryHooksTest, Basic) {
+ {
+ auto region = ReadOnlySharedMemoryRegion::Create(3);
+ EXPECT_TRUE(region.IsValid());
+ EXPECT_FALSE(requested_read_only_shmem_size.has_value());
+ }
+
+ {
+ auto region = UnsafeSharedMemoryRegion::Create(25);
+ EXPECT_TRUE(region.IsValid());
+ EXPECT_FALSE(requested_unsafe_shmem_size.has_value());
+ }
+
+ {
+ auto region = WritableSharedMemoryRegion::Create(777);
+ EXPECT_TRUE(region.IsValid());
+ EXPECT_FALSE(requested_writable_shmem_size.has_value());
+ }
+
+ SetCreateHooks(&ReadOnlyShmemCreateHook, &UnsafeShmemCreateHook,
+ &WritableShmemCreateHook);
+
+ {
+ auto region = ReadOnlySharedMemoryRegion::Create(3);
+ EXPECT_FALSE(region.IsValid());
+ EXPECT_EQ(3u, *requested_read_only_shmem_size);
+ }
+
+ {
+ auto region = UnsafeSharedMemoryRegion::Create(25);
+ EXPECT_FALSE(region.IsValid());
+ EXPECT_EQ(25u, *requested_unsafe_shmem_size);
+ }
+
+ {
+ auto region = WritableSharedMemoryRegion::Create(777);
+ EXPECT_FALSE(region.IsValid());
+ EXPECT_EQ(777u, *requested_writable_shmem_size);
+ }
+}
+
+} // namespace base
diff --git a/base/memory/shared_memory_mac.cc b/base/memory/shared_memory_mac.cc
deleted file mode 100644
index 504732f..0000000
--- a/base/memory/shared_memory_mac.cc
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory.h"
-
-#include <errno.h>
-#include <mach/mach_vm.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include "base/files/file_util.h"
-#include "base/files/scoped_file.h"
-#include "base/logging.h"
-#include "base/mac/foundation_util.h"
-#include "base/mac/mac_util.h"
-#include "base/mac/scoped_mach_vm.h"
-#include "base/memory/shared_memory_helper.h"
-#include "base/memory/shared_memory_tracker.h"
-#include "base/metrics/field_trial.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/posix/safe_strerror.h"
-#include "base/process/process_metrics.h"
-#include "base/scoped_generic.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/threading/thread_restrictions.h"
-#include "base/unguessable_token.h"
-#include "build/build_config.h"
-#include "starboard/types.h"
-
-#if defined(OS_IOS)
-#error "MacOS only - iOS uses shared_memory_posix.cc"
-#endif
-
-namespace base {
-
-namespace {
-
-// Returns whether the operation succeeded.
-// |new_handle| is an output variable, populated on success. The caller takes
-// ownership of the underlying memory object.
-// |handle| is the handle to copy.
-// If |handle| is already mapped, |mapped_addr| is its mapped location.
-// Otherwise, |mapped_addr| should be |nullptr|.
-bool MakeMachSharedMemoryHandleReadOnly(SharedMemoryHandle* new_handle,
- SharedMemoryHandle handle,
- void* mapped_addr) {
- if (!handle.IsValid())
- return false;
-
- size_t size = handle.GetSize();
-
- // Map if necessary.
- void* temp_addr = mapped_addr;
- base::mac::ScopedMachVM scoper;
- if (!temp_addr) {
- // Intentionally lower current prot and max prot to |VM_PROT_READ|.
- kern_return_t kr = mach_vm_map(
- mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
- size, 0, VM_FLAGS_ANYWHERE, handle.GetMemoryObject(), 0, FALSE,
- VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
- if (kr != KERN_SUCCESS)
- return false;
- scoper.reset(reinterpret_cast<vm_address_t>(temp_addr),
- mach_vm_round_page(size));
- }
-
- // Make new memory object.
- mach_port_t named_right;
- kern_return_t kr = mach_make_memory_entry_64(
- mach_task_self(), reinterpret_cast<memory_object_size_t*>(&size),
- reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
- &named_right, MACH_PORT_NULL);
- if (kr != KERN_SUCCESS)
- return false;
-
- *new_handle = SharedMemoryHandle(named_right, size, handle.GetGUID());
- return true;
-}
-
-} // namespace
-
-SharedMemory::SharedMemory() {}
-
-SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
- : mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
- shm_(handle),
- read_only_(read_only) {}
-
-SharedMemory::~SharedMemory() {
- Unmap();
- Close();
-}
-
-// static
-bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
- return handle.IsValid();
-}
-
-// static
-void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
- handle.Close();
-}
-
-// static
-size_t SharedMemory::GetHandleLimit() {
- return GetMaxFds();
-}
-
-// static
-SharedMemoryHandle SharedMemory::DuplicateHandle(
- const SharedMemoryHandle& handle) {
- return handle.Duplicate();
-}
-
-// static
-int SharedMemory::GetFdFromSharedMemoryHandle(
- const SharedMemoryHandle& handle) {
- return handle.file_descriptor_.fd;
-}
-
-bool SharedMemory::CreateAndMapAnonymous(size_t size) {
- return CreateAnonymous(size) && Map(size);
-}
-
-// Chromium mostly only uses the unique/private shmem as specified by
-// "name == L"". The exception is in the StatsTable.
-bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- DCHECK(!shm_.IsValid());
- if (options.size == 0)
- return false;
-
- if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
- return false;
-
- if (options.type == SharedMemoryHandle::MACH) {
- shm_ = SharedMemoryHandle(options.size, UnguessableToken::Create());
- requested_size_ = options.size;
- return shm_.IsValid();
- }
-
- // This function theoretically can block on the disk. Both profiling of real
- // users and local instrumentation shows that this is a real problem.
- // https://code.google.com/p/chromium/issues/detail?id=466437
- ThreadRestrictions::ScopedAllowIO allow_io;
-
- ScopedFD fd;
- ScopedFD readonly_fd;
-
- FilePath path;
- bool result = CreateAnonymousSharedMemory(options, &fd, &readonly_fd, &path);
- if (!result)
- return false;
- // Should be guaranteed by CreateAnonymousSharedMemory().
- DCHECK(fd.is_valid());
-
- // Get current size.
- struct stat stat;
- if (fstat(fd.get(), &stat) != 0)
- return false;
- const size_t current_size = stat.st_size;
- if (current_size != options.size) {
- if (HANDLE_EINTR(ftruncate(fd.get(), options.size)) != 0)
- return false;
- }
- requested_size_ = options.size;
-
- int mapped_file = -1;
- int readonly_mapped_file = -1;
- result = PrepareMapFile(std::move(fd), std::move(readonly_fd), &mapped_file,
- &readonly_mapped_file);
- shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), options.size,
- UnguessableToken::Create());
- readonly_shm_ =
- SharedMemoryHandle(FileDescriptor(readonly_mapped_file, false),
- options.size, shm_.GetGUID());
- return result;
-}
-
-bool SharedMemory::MapAt(off_t offset, size_t bytes) {
- if (!shm_.IsValid())
- return false;
- if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
- return false;
- if (memory_)
- return false;
-
- bool success = shm_.MapAt(offset, bytes, &memory_, read_only_);
- if (success) {
- mapped_size_ = bytes;
- DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
- (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
- mapped_memory_mechanism_ = shm_.type_;
- mapped_id_ = shm_.GetGUID();
- SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
- } else {
- memory_ = nullptr;
- }
-
- return success;
-}
-
-bool SharedMemory::Unmap() {
- if (!memory_)
- return false;
-
- SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
- switch (mapped_memory_mechanism_) {
- case SharedMemoryHandle::POSIX:
- munmap(memory_, mapped_size_);
- break;
- case SharedMemoryHandle::MACH:
- mach_vm_deallocate(mach_task_self(),
- reinterpret_cast<mach_vm_address_t>(memory_),
- mapped_size_);
- break;
- }
- memory_ = nullptr;
- mapped_size_ = 0;
- mapped_id_ = UnguessableToken();
- return true;
-}
-
-SharedMemoryHandle SharedMemory::handle() const {
- return shm_;
-}
-
-SharedMemoryHandle SharedMemory::TakeHandle() {
- SharedMemoryHandle dup = DuplicateHandle(handle());
- Unmap();
- Close();
- return dup;
-}
-
-void SharedMemory::Close() {
- shm_.Close();
- shm_ = SharedMemoryHandle();
- if (shm_.type_ == SharedMemoryHandle::POSIX) {
- if (readonly_shm_.IsValid()) {
- readonly_shm_.Close();
- readonly_shm_ = SharedMemoryHandle();
- }
- }
-}
-
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
- if (shm_.type_ == SharedMemoryHandle::POSIX) {
- // We could imagine re-opening the file from /dev/fd, but that can't make it
- // readonly on Mac: https://codereview.chromium.org/27265002/#msg10.
- CHECK(readonly_shm_.IsValid());
- return readonly_shm_.Duplicate();
- }
-
- DCHECK(shm_.IsValid());
- SharedMemoryHandle new_handle;
- bool success = MakeMachSharedMemoryHandleReadOnly(&new_handle, shm_, memory_);
- if (success)
- new_handle.SetOwnershipPassesToIPC(true);
- return new_handle;
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_mac_unittest.cc b/base/memory/shared_memory_mac_unittest.cc
deleted file mode 100644
index c476368..0000000
--- a/base/memory/shared_memory_mac_unittest.cc
+++ /dev/null
@@ -1,457 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <mach/mach.h>
-#include <mach/mach_vm.h>
-#include <servers/bootstrap.h>
-
-#include "base/command_line.h"
-#include "base/mac/mac_util.h"
-#include "base/mac/mach_logging.h"
-#include "base/mac/scoped_mach_port.h"
-#include "base/macros.h"
-#include "base/memory/shared_memory.h"
-#include "base/process/process_handle.h"
-#include "base/rand_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/sys_info.h"
-#include "base/test/multiprocess_test.h"
-#include "base/test/test_timeouts.h"
-#include "base/unguessable_token.h"
-#include "starboard/memory.h"
-#include "starboard/types.h"
-#include "testing/multiprocess_func_list.h"
-
-namespace base {
-
-namespace {
-
-// Gets the current and maximum protection levels of the memory region.
-// Returns whether the operation was successful.
-// |current| and |max| are output variables only populated on success.
-bool GetProtections(void* address, size_t size, int* current, int* max) {
- vm_region_info_t region_info;
- mach_vm_address_t mem_address = reinterpret_cast<mach_vm_address_t>(address);
- mach_vm_size_t mem_size = size;
- vm_region_basic_info_64 basic_info;
-
- region_info = reinterpret_cast<vm_region_recurse_info_t>(&basic_info);
- vm_region_flavor_t flavor = VM_REGION_BASIC_INFO_64;
- memory_object_name_t memory_object;
- mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
-
- kern_return_t kr =
- mach_vm_region(mach_task_self(), &mem_address, &mem_size, flavor,
- region_info, &count, &memory_object);
- if (kr != KERN_SUCCESS) {
- MACH_LOG(ERROR, kr) << "Failed to get region info.";
- return false;
- }
-
- *current = basic_info.protection;
- *max = basic_info.max_protection;
- return true;
-}
-
-// Creates a new SharedMemory with the given |size|, filled with 'a'.
-std::unique_ptr<SharedMemory> CreateSharedMemory(int size) {
- SharedMemoryHandle shm(size, UnguessableToken::Create());
- if (!shm.IsValid()) {
- LOG(ERROR) << "Failed to make SharedMemoryHandle";
- return nullptr;
- }
- std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
- shared_memory->Map(size);
- memset(shared_memory->memory(), 'a', size);
- return shared_memory;
-}
-
-static const std::string g_service_switch_name = "service_name";
-
-// Structs used to pass a mach port from client to server.
-struct MachSendPortMessage {
- mach_msg_header_t header;
- mach_msg_body_t body;
- mach_msg_port_descriptor_t data;
-};
-struct MachReceivePortMessage {
- mach_msg_header_t header;
- mach_msg_body_t body;
- mach_msg_port_descriptor_t data;
- mach_msg_trailer_t trailer;
-};
-
-// Makes the current process into a Mach Server with the given |service_name|.
-mach_port_t BecomeMachServer(const char* service_name) {
- mach_port_t port;
- kern_return_t kr = bootstrap_check_in(bootstrap_port, service_name, &port);
- MACH_CHECK(kr == KERN_SUCCESS, kr) << "BecomeMachServer";
- return port;
-}
-
-// Returns the mach port for the Mach Server with the given |service_name|.
-mach_port_t LookupServer(const char* service_name) {
- mach_port_t server_port;
- kern_return_t kr =
- bootstrap_look_up(bootstrap_port, service_name, &server_port);
- MACH_CHECK(kr == KERN_SUCCESS, kr) << "LookupServer";
- return server_port;
-}
-
-mach_port_t MakeReceivingPort() {
- mach_port_t client_port;
- kern_return_t kr =
- mach_port_allocate(mach_task_self(), // our task is acquiring
- MACH_PORT_RIGHT_RECEIVE, // a new receive right
- &client_port); // with this name
- MACH_CHECK(kr == KERN_SUCCESS, kr) << "MakeReceivingPort";
- return client_port;
-}
-
-// Blocks until a mach message is sent to |server_port|. This mach message
-// must contain a mach port. Returns that mach port.
-mach_port_t ReceiveMachPort(mach_port_t port_to_listen_on) {
- MachReceivePortMessage recv_msg;
- mach_msg_header_t* recv_hdr = &(recv_msg.header);
- recv_hdr->msgh_local_port = port_to_listen_on;
- recv_hdr->msgh_size = sizeof(recv_msg);
- kern_return_t kr =
- mach_msg(recv_hdr, // message buffer
- MACH_RCV_MSG, // option indicating service
- 0, // send size
- recv_hdr->msgh_size, // size of header + body
- port_to_listen_on, // receive name
- MACH_MSG_TIMEOUT_NONE, // no timeout, wait forever
- MACH_PORT_NULL); // no notification port
- MACH_CHECK(kr == KERN_SUCCESS, kr) << "ReceiveMachPort";
- mach_port_t other_task_port = recv_msg.data.name;
- return other_task_port;
-}
-
-// Passes a copy of the send right of |port_to_send| to |receiving_port|.
-void SendMachPort(mach_port_t receiving_port,
- mach_port_t port_to_send,
- int disposition) {
- MachSendPortMessage send_msg;
- mach_msg_header_t* send_hdr;
- send_hdr = &(send_msg.header);
- send_hdr->msgh_bits =
- MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0) | MACH_MSGH_BITS_COMPLEX;
- send_hdr->msgh_size = sizeof(send_msg);
- send_hdr->msgh_remote_port = receiving_port;
- send_hdr->msgh_local_port = MACH_PORT_NULL;
- send_hdr->msgh_reserved = 0;
- send_hdr->msgh_id = 0;
- send_msg.body.msgh_descriptor_count = 1;
- send_msg.data.name = port_to_send;
- send_msg.data.disposition = disposition;
- send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
- int kr = mach_msg(send_hdr, // message buffer
- MACH_SEND_MSG, // option indicating send
- send_hdr->msgh_size, // size of header + body
- 0, // receive limit
- MACH_PORT_NULL, // receive name
- MACH_MSG_TIMEOUT_NONE, // no timeout, wait forever
- MACH_PORT_NULL); // no notification port
- MACH_CHECK(kr == KERN_SUCCESS, kr) << "SendMachPort";
-}
-
-std::string CreateRandomServiceName() {
- return StringPrintf("SharedMemoryMacMultiProcessTest.%llu", RandUint64());
-}
-
-// Sets up the mach communication ports with the server. Returns a port to which
-// the server will send mach objects.
-mach_port_t CommonChildProcessSetUp() {
- CommandLine cmd_line = *CommandLine::ForCurrentProcess();
- std::string service_name =
- cmd_line.GetSwitchValueASCII(g_service_switch_name);
- mac::ScopedMachSendRight server_port(LookupServer(service_name.c_str()));
- mach_port_t client_port = MakeReceivingPort();
-
- // Send the port that this process is listening on to the server.
- SendMachPort(server_port.get(), client_port, MACH_MSG_TYPE_MAKE_SEND);
- return client_port;
-}
-
-// The number of active names in the current task's port name space.
-mach_msg_type_number_t GetActiveNameCount() {
- mach_port_name_array_t name_array;
- mach_msg_type_number_t names_count;
- mach_port_type_array_t type_array;
- mach_msg_type_number_t types_count;
- kern_return_t kr = mach_port_names(mach_task_self(), &name_array,
- &names_count, &type_array, &types_count);
- MACH_CHECK(kr == KERN_SUCCESS, kr) << "GetActiveNameCount";
- return names_count;
-}
-
-} // namespace
-
-class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
- public:
- SharedMemoryMacMultiProcessTest() {}
-
- CommandLine MakeCmdLine(const std::string& procname) override {
- CommandLine command_line = MultiProcessTest::MakeCmdLine(procname);
- // Pass the service name to the child process.
- command_line.AppendSwitchASCII(g_service_switch_name, service_name_);
- return command_line;
- }
-
- void SetUpChild(const std::string& name) {
- // Make a random service name so that this test doesn't conflict with other
- // similar tests.
- service_name_ = CreateRandomServiceName();
- server_port_.reset(BecomeMachServer(service_name_.c_str()));
- child_process_ = SpawnChild(name);
- client_port_.reset(ReceiveMachPort(server_port_.get()));
- }
-
- static const int s_memory_size = 99999;
-
- protected:
- std::string service_name_;
-
- // A port on which the main process listens for mach messages from the child
- // process.
- mac::ScopedMachReceiveRight server_port_;
-
- // A port on which the child process listens for mach messages from the main
- // process.
- mac::ScopedMachSendRight client_port_;
-
- base::Process child_process_;
- DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
-};
-
-// Tests that content written to shared memory in the server process can be read
-// by the child process.
-TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
- SetUpChild("MachBasedSharedMemoryClient");
-
- std::unique_ptr<SharedMemory> shared_memory(
- CreateSharedMemory(s_memory_size));
-
- // Send the underlying memory object to the client process.
- SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
- MACH_MSG_TYPE_COPY_SEND);
- int rv = -1;
- ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
- TestTimeouts::action_timeout(), &rv));
- EXPECT_EQ(0, rv);
-}
-
-MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryClient) {
- mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
- // The next mach port should be for a memory object.
- mach_port_t memory_object = ReceiveMachPort(client_port.get());
- SharedMemoryHandle shm(memory_object,
- SharedMemoryMacMultiProcessTest::s_memory_size,
- UnguessableToken::Create());
- SharedMemory shared_memory(shm, false);
- shared_memory.Map(SharedMemoryMacMultiProcessTest::s_memory_size);
- const char* start = static_cast<const char*>(shared_memory.memory());
- for (int i = 0; i < SharedMemoryMacMultiProcessTest::s_memory_size; ++i) {
- DCHECK_EQ(start[i], 'a');
- }
- return 0;
-}
-
-// Tests that mapping shared memory with an offset works correctly.
-TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
- SetUpChild("MachBasedSharedMemoryWithOffsetClient");
-
- SharedMemoryHandle shm(s_memory_size, UnguessableToken::Create());
- ASSERT_TRUE(shm.IsValid());
- SharedMemory shared_memory(shm, false);
- shared_memory.Map(s_memory_size);
-
- size_t page_size = SysInfo::VMAllocationGranularity();
- char* start = static_cast<char*>(shared_memory.memory());
- memset(start, 'a', page_size);
- memset(start + page_size, 'b', page_size);
- memset(start + 2 * page_size, 'c', page_size);
-
- // Send the underlying memory object to the client process.
- SendMachPort(
- client_port_.get(), shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
- int rv = -1;
- ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
- TestTimeouts::action_timeout(), &rv));
- EXPECT_EQ(0, rv);
-}
-
-MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryWithOffsetClient) {
- mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
- // The next mach port should be for a memory object.
- mach_port_t memory_object = ReceiveMachPort(client_port.get());
- SharedMemoryHandle shm(memory_object,
- SharedMemoryMacMultiProcessTest::s_memory_size,
- UnguessableToken::Create());
- SharedMemory shared_memory(shm, false);
- size_t page_size = SysInfo::VMAllocationGranularity();
- shared_memory.MapAt(page_size, 2 * page_size);
- const char* start = static_cast<const char*>(shared_memory.memory());
- for (size_t i = 0; i < page_size; ++i) {
- DCHECK_EQ(start[i], 'b');
- }
- for (size_t i = page_size; i < 2 * page_size; ++i) {
- DCHECK_EQ(start[i], 'c');
- }
- return 0;
-}
-
-// Tests that duplication and closing has the right effect on Mach reference
-// counts.
-TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicateAndClose) {
- mach_msg_type_number_t active_name_count = GetActiveNameCount();
-
- // Making a new SharedMemoryHandle increments the name count.
- SharedMemoryHandle shm(s_memory_size, UnguessableToken::Create());
- ASSERT_TRUE(shm.IsValid());
- EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
-
- // Duplicating the SharedMemoryHandle increments the ref count, but doesn't
- // make a new name.
- shm.Duplicate();
- EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
-
- // Closing the SharedMemoryHandle decrements the ref count. The first time has
- // no effect.
- shm.Close();
- EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
-
- // Closing the SharedMemoryHandle decrements the ref count. The second time
- // destroys the port.
- shm.Close();
- EXPECT_EQ(active_name_count, GetActiveNameCount());
-}
-
-// Tests that Mach shared memory can be mapped and unmapped.
-TEST_F(SharedMemoryMacMultiProcessTest, MachUnmapMap) {
- mach_msg_type_number_t active_name_count = GetActiveNameCount();
-
- std::unique_ptr<SharedMemory> shared_memory =
- CreateSharedMemory(s_memory_size);
- ASSERT_TRUE(shared_memory->Unmap());
- ASSERT_TRUE(shared_memory->Map(s_memory_size));
- shared_memory.reset();
- EXPECT_EQ(active_name_count, GetActiveNameCount());
-}
-
-// Tests that passing a SharedMemoryHandle to a SharedMemory object also passes
-// ownership, and that destroying the SharedMemory closes the SharedMemoryHandle
-// as well.
-TEST_F(SharedMemoryMacMultiProcessTest, MachSharedMemoryTakesOwnership) {
- mach_msg_type_number_t active_name_count = GetActiveNameCount();
-
- // Making a new SharedMemoryHandle increments the name count.
- SharedMemoryHandle shm(s_memory_size, UnguessableToken::Create());
- ASSERT_TRUE(shm.IsValid());
- EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
-
- // Name count doesn't change when mapping the memory.
- std::unique_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
- shared_memory->Map(s_memory_size);
- EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
-
- // Destroying the SharedMemory object frees the resource.
- shared_memory.reset();
- EXPECT_EQ(active_name_count, GetActiveNameCount());
-}
-
-// Tests that the read-only flag works.
-TEST_F(SharedMemoryMacMultiProcessTest, MachReadOnly) {
- std::unique_ptr<SharedMemory> shared_memory(
- CreateSharedMemory(s_memory_size));
-
- SharedMemoryHandle shm2 = shared_memory->handle().Duplicate();
- ASSERT_TRUE(shm2.IsValid());
- SharedMemory shared_memory2(shm2, true);
- shared_memory2.Map(s_memory_size);
- ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
-}
-
-// Tests that duplication of the underlying handle works.
-TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicate) {
- mach_msg_type_number_t active_name_count = GetActiveNameCount();
-
- {
- std::unique_ptr<SharedMemory> shared_memory(
- CreateSharedMemory(s_memory_size));
-
- SharedMemoryHandle shm2 = shared_memory->handle().Duplicate();
- ASSERT_TRUE(shm2.IsValid());
- SharedMemory shared_memory2(shm2, true);
- shared_memory2.Map(s_memory_size);
-
- ASSERT_EQ(0, memcmp(shared_memory->memory(),
- shared_memory2.memory(), s_memory_size));
- }
-
- EXPECT_EQ(active_name_count, GetActiveNameCount());
-}
-
-// Tests that the method GetReadOnlyHandle() creates a memory object that
-// is read only.
-TEST_F(SharedMemoryMacMultiProcessTest, MachReadonly) {
- std::unique_ptr<SharedMemory> shared_memory(
- CreateSharedMemory(s_memory_size));
-
- // Check the protection levels.
- int current_prot, max_prot;
- ASSERT_TRUE(GetProtections(shared_memory->memory(),
- shared_memory->mapped_size(), ¤t_prot,
- &max_prot));
- ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, current_prot);
- ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, max_prot);
-
- // Make a new memory object.
- SharedMemoryHandle shm2 = shared_memory->GetReadOnlyHandle();
- ASSERT_TRUE(shm2.IsValid());
- EXPECT_EQ(shared_memory->handle().GetGUID(), shm2.GetGUID());
-
- // Mapping with |readonly| set to |false| should fail.
- SharedMemory shared_memory2(shm2, false);
- shared_memory2.Map(s_memory_size);
- ASSERT_EQ(nullptr, shared_memory2.memory());
-
- // Now trying mapping with |readonly| set to |true|.
- SharedMemory shared_memory3(shm2.Duplicate(), true);
- shared_memory3.Map(s_memory_size);
- ASSERT_NE(nullptr, shared_memory3.memory());
-
- // Check the protection levels.
- ASSERT_TRUE(GetProtections(shared_memory3.memory(),
- shared_memory3.mapped_size(), ¤t_prot,
- &max_prot));
- ASSERT_EQ(VM_PROT_READ, current_prot);
- ASSERT_EQ(VM_PROT_READ, max_prot);
-
- // The memory should still be readonly, since the underlying memory object
- // is readonly.
- ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
-}
-
-// Tests that the method GetReadOnlyHandle() doesn't leak.
-TEST_F(SharedMemoryMacMultiProcessTest, MachReadonlyLeak) {
- mach_msg_type_number_t active_name_count = GetActiveNameCount();
-
- {
- std::unique_ptr<SharedMemory> shared_memory(
- CreateSharedMemory(s_memory_size));
-
- SharedMemoryHandle shm2 = shared_memory->GetReadOnlyHandle();
- ASSERT_TRUE(shm2.IsValid());
-
- // Intentionally map with |readonly| set to |false|.
- SharedMemory shared_memory2(shm2, false);
- shared_memory2.Map(s_memory_size);
- }
-
- EXPECT_EQ(active_name_count, GetActiveNameCount());
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_mapper.cc b/base/memory/shared_memory_mapper.cc
new file mode 100644
index 0000000..05f7f0c
--- /dev/null
+++ b/base/memory/shared_memory_mapper.cc
@@ -0,0 +1,17 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_mapper.h"
+
+#include "base/memory/platform_shared_memory_mapper.h"
+
+namespace base {
+
+// static
+SharedMemoryMapper* SharedMemoryMapper::GetDefaultInstance() {
+ static PlatformSharedMemoryMapper instance;
+ return &instance;
+}
+
+} // namespace base
diff --git a/base/memory/shared_memory_mapper.h b/base/memory/shared_memory_mapper.h
new file mode 100644
index 0000000..ab45f37
--- /dev/null
+++ b/base/memory/shared_memory_mapper.h
@@ -0,0 +1,44 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_MAPPER_H_
+#define BASE_MEMORY_SHARED_MEMORY_MAPPER_H_
+
+#include "base/base_export.h"
+#include "base/containers/span.h"
+#include "base/memory/platform_shared_memory_handle.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+#include <stdint.h>
+
+namespace base {
+
+// Interface to implement mapping and unmapping of shared memory regions into
+// the virtual address space. The default implementation,
+// |PlatformSharedMemoryMapper| uses the platform-specific APIs to map the
+// region anywhere in the address space. Other implementations can be used for
+// example to always map the regions into an existing address space reservation.
+// Implementations of this interface should generally be statically allocated
+// as SharedMemoryMappings keep a reference to their mapper.
+class BASE_EXPORT SharedMemoryMapper {
+ public:
+ // Returns the default shared memory mapper.
+ static SharedMemoryMapper* GetDefaultInstance();
+
+ // Maps the shared memory region identified through the provided platform
+ // handle into the caller's address space.
+ virtual absl::optional<span<uint8_t>> Map(
+ subtle::PlatformSharedMemoryHandle handle,
+ bool write_allowed,
+ uint64_t offset,
+ size_t size) = 0;
+
+ // Unmaps the specified region of shared memory from the caller's address
+ // space.
+ virtual void Unmap(span<uint8_t> mapping) = 0;
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_MAPPER_H_
diff --git a/base/memory/shared_memory_mapping.cc b/base/memory/shared_memory_mapping.cc
index 0348ff1..0fc4ba5 100644
--- a/base/memory/shared_memory_mapping.cc
+++ b/base/memory/shared_memory_mapping.cc
@@ -1,55 +1,37 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/shared_memory_mapping.h"
+#include <cstdint>
#include <utility>
+#include "base/bits.h"
#include "base/logging.h"
+#include "base/memory/shared_memory_security_policy.h"
#include "base/memory/shared_memory_tracker.h"
+#include "base/system/sys_info.h"
#include "base/unguessable_token.h"
#include "build/build_config.h"
-#if defined(OS_POSIX)
-#include <sys/mman.h>
-#endif
-
-#if defined(OS_WIN)
-#include <aclapi.h>
-#endif
-
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-#include <mach/mach_vm.h>
-#include "base/mac/mach_logging.h"
-#endif
-
-#if defined(OS_FUCHSIA)
-#include <lib/zx/vmar.h>
-#include "base/fuchsia/fuchsia_logging.h"
-#include "starboard/types.h"
-#endif
-
namespace base {
SharedMemoryMapping::SharedMemoryMapping() = default;
-SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping)
- : memory_(mapping.memory_),
+SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping) noexcept
+ : mapped_span_(std::exchange(mapping.mapped_span_, span<uint8_t>())),
size_(mapping.size_),
- mapped_size_(mapping.mapped_size_),
- guid_(mapping.guid_) {
- mapping.memory_ = nullptr;
-}
+ guid_(mapping.guid_),
+ mapper_(mapping.mapper_) {}
SharedMemoryMapping& SharedMemoryMapping::operator=(
- SharedMemoryMapping&& mapping) {
+ SharedMemoryMapping&& mapping) noexcept {
Unmap();
- memory_ = mapping.memory_;
+ mapped_span_ = std::exchange(mapping.mapped_span_, span<uint8_t>());
size_ = mapping.size_;
- mapped_size_ = mapping.mapped_size_;
guid_ = mapping.guid_;
- mapping.memory_ = nullptr;
+ mapper_ = mapping.mapper_;
return *this;
}
@@ -57,11 +39,12 @@
Unmap();
}
-SharedMemoryMapping::SharedMemoryMapping(void* memory,
+SharedMemoryMapping::SharedMemoryMapping(span<uint8_t> mapped_span,
size_t size,
- size_t mapped_size,
- const UnguessableToken& guid)
- : memory_(memory), size_(size), mapped_size_(mapped_size), guid_(guid) {
+ const UnguessableToken& guid,
+ SharedMemoryMapper* mapper)
+ : mapped_span_(mapped_span), size_(size), guid_(guid), mapper_(mapper) {
+ // Note: except on Windows, `mapped_span_.size() == size_`.
SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
}
@@ -69,48 +52,47 @@
if (!IsValid())
return;
+ SharedMemorySecurityPolicy::ReleaseReservationForMapping(size_);
SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
-#if defined(OS_WIN)
- if (!UnmapViewOfFile(memory_))
- DPLOG(ERROR) << "UnmapViewOfFile";
-#elif defined(OS_FUCHSIA)
- uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
- zx_status_t status = zx::vmar::root_self()->unmap(addr, mapped_size_);
- if (status != ZX_OK)
- ZX_DLOG(ERROR, status) << "zx_vmar_unmap";
-#elif defined(OS_MACOSX) && !defined(OS_IOS)
- kern_return_t kr = mach_vm_deallocate(
- mach_task_self(), reinterpret_cast<mach_vm_address_t>(memory_),
- mapped_size_);
- MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "mach_vm_deallocate";
-#else
- if (munmap(memory_, mapped_size_) < 0)
- DPLOG(ERROR) << "munmap";
-#endif
+
+ SharedMemoryMapper* mapper = mapper_;
+ if (!mapper)
+ mapper = SharedMemoryMapper::GetDefaultInstance();
+
+ // The backing mapper expects offset to be aligned to
+ // `SysInfo::VMAllocationGranularity()`, so replicate the alignment that was
+ // done when originally mapping in the region.
+ uint8_t* aligned_data =
+ bits::AlignDown(mapped_span_.data(), SysInfo::VMAllocationGranularity());
+ size_t adjusted_size =
+ mapped_span_.size() +
+ static_cast<size_t>(mapped_span_.data() - aligned_data);
+ span<uint8_t> span_to_unmap = make_span(aligned_data, adjusted_size);
+ mapper->Unmap(span_to_unmap);
}
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping() = default;
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
- ReadOnlySharedMemoryMapping&&) = default;
+ ReadOnlySharedMemoryMapping&&) noexcept = default;
ReadOnlySharedMemoryMapping& ReadOnlySharedMemoryMapping::operator=(
- ReadOnlySharedMemoryMapping&&) = default;
+ ReadOnlySharedMemoryMapping&&) noexcept = default;
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
- void* address,
+ span<uint8_t> mapped_span,
size_t size,
- size_t mapped_size,
- const UnguessableToken& guid)
- : SharedMemoryMapping(address, size, mapped_size, guid) {}
+ const UnguessableToken& guid,
+ SharedMemoryMapper* mapper)
+ : SharedMemoryMapping(mapped_span, size, guid, mapper) {}
WritableSharedMemoryMapping::WritableSharedMemoryMapping() = default;
WritableSharedMemoryMapping::WritableSharedMemoryMapping(
- WritableSharedMemoryMapping&&) = default;
+ WritableSharedMemoryMapping&&) noexcept = default;
WritableSharedMemoryMapping& WritableSharedMemoryMapping::operator=(
- WritableSharedMemoryMapping&&) = default;
+ WritableSharedMemoryMapping&&) noexcept = default;
WritableSharedMemoryMapping::WritableSharedMemoryMapping(
- void* address,
+ span<uint8_t> mapped_span,
size_t size,
- size_t mapped_size,
- const UnguessableToken& guid)
- : SharedMemoryMapping(address, size, mapped_size, guid) {}
+ const UnguessableToken& guid,
+ SharedMemoryMapper* mapper)
+ : SharedMemoryMapping(mapped_span, size, guid, mapper) {}
} // namespace base
diff --git a/base/memory/shared_memory_mapping.h b/base/memory/shared_memory_mapping.h
index 7c77790..011e1d8 100644
--- a/base/memory/shared_memory_mapping.h
+++ b/base/memory/shared_memory_mapping.h
@@ -1,16 +1,18 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
#define BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
-#if !defined(STARBOARD)
-
#include <cstddef>
+#include <type_traits>
+#include "base/base_export.h"
+#include "base/check.h"
#include "base/containers/span.h"
-#include "base/macros.h"
+#include "base/memory/raw_ptr.h"
+#include "base/memory/shared_memory_mapper.h"
#include "base/unguessable_token.h"
namespace base {
@@ -32,15 +34,18 @@
SharedMemoryMapping();
// Move operations are allowed.
- SharedMemoryMapping(SharedMemoryMapping&& mapping);
- SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping);
+ SharedMemoryMapping(SharedMemoryMapping&& mapping) noexcept;
+ SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping) noexcept;
+
+ SharedMemoryMapping(const SharedMemoryMapping&) = delete;
+ SharedMemoryMapping& operator=(const SharedMemoryMapping&) = delete;
// Unmaps the region if the mapping is valid.
virtual ~SharedMemoryMapping();
// Returns true iff the mapping is valid. False means there is no
// corresponding area of memory.
- bool IsValid() const { return memory_ != nullptr; }
+ bool IsValid() const { return !mapped_span_.empty(); }
// Returns the logical size of the mapping in bytes. This is precisely the
// size requested by whoever created the mapping, and it is always less than
@@ -55,7 +60,7 @@
// constraints. This is undefined for invalid instances.
size_t mapped_size() const {
DCHECK(IsValid());
- return mapped_size_;
+ return mapped_span_.size();
}
// Returns 128-bit GUID of the region this mapping belongs to.
@@ -65,23 +70,23 @@
}
protected:
- SharedMemoryMapping(void* address,
+ SharedMemoryMapping(span<uint8_t> mapped_span,
size_t size,
- size_t mapped_size,
- const UnguessableToken& guid);
- void* raw_memory_ptr() const { return memory_; }
+ const UnguessableToken& guid,
+ SharedMemoryMapper* mapper);
+ void* raw_memory_ptr() const {
+ return reinterpret_cast<void*>(mapped_span_.data());
+ }
private:
friend class SharedMemoryTracker;
void Unmap();
- void* memory_ = nullptr;
+ span<uint8_t> mapped_span_;
size_t size_ = 0;
- size_t mapped_size_ = 0;
UnguessableToken guid_;
-
- DISALLOW_COPY_AND_ASSIGN(SharedMemoryMapping);
+ raw_ptr<SharedMemoryMapper> mapper_ = nullptr;
};
// Class modeling a read-only mapping of a shared memory region into the
@@ -92,18 +97,26 @@
// Default constructor initializes an invalid instance.
ReadOnlySharedMemoryMapping();
- // Move operations are allowed.
- ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&);
- ReadOnlySharedMemoryMapping& operator=(ReadOnlySharedMemoryMapping&&);
+ ReadOnlySharedMemoryMapping(const ReadOnlySharedMemoryMapping&) = delete;
+ ReadOnlySharedMemoryMapping& operator=(const ReadOnlySharedMemoryMapping&) =
+ delete;
- // Returns the base address of the mapping. This is read-only memory. This is
- // page-aligned. This is nullptr for invalid instances.
+ // Move operations are allowed.
+ ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&) noexcept;
+ ReadOnlySharedMemoryMapping& operator=(
+ ReadOnlySharedMemoryMapping&&) noexcept;
+
+ // Returns the base address of the read-only mapping. Returns nullptr for
+ // invalid instances.
const void* memory() const { return raw_memory_ptr(); }
// Returns a pointer to a page-aligned const T if the mapping is valid and
// large enough to contain a T, or nullptr otherwise.
template <typename T>
const T* GetMemoryAs() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
if (!IsValid())
return nullptr;
if (sizeof(T) > size())
@@ -119,6 +132,9 @@
// page-aligned.
template <typename T>
span<const T> GetMemoryAsSpan() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
if (!IsValid())
return span<const T>();
size_t count = size() / sizeof(T);
@@ -130,6 +146,9 @@
// first element, if any, is guaranteed to be page-aligned.
template <typename T>
span<const T> GetMemoryAsSpan(size_t count) const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
if (!IsValid())
return span<const T>();
if (size() / sizeof(T) < count)
@@ -139,12 +158,10 @@
private:
friend class ReadOnlySharedMemoryRegion;
- ReadOnlySharedMemoryMapping(void* address,
+ ReadOnlySharedMemoryMapping(span<uint8_t> mapped_span,
size_t size,
- size_t mapped_size,
- const UnguessableToken& guid);
-
- DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryMapping);
+ const UnguessableToken& guid,
+ SharedMemoryMapper* mapper);
};
// Class modeling a writable mapping of a shared memory region into the
@@ -155,18 +172,26 @@
// Default constructor initializes an invalid instance.
WritableSharedMemoryMapping();
- // Move operations are allowed.
- WritableSharedMemoryMapping(WritableSharedMemoryMapping&&);
- WritableSharedMemoryMapping& operator=(WritableSharedMemoryMapping&&);
+ WritableSharedMemoryMapping(const WritableSharedMemoryMapping&) = delete;
+ WritableSharedMemoryMapping& operator=(const WritableSharedMemoryMapping&) =
+ delete;
- // Returns the base address of the mapping. This is writable memory. This is
- // page-aligned. This is nullptr for invalid instances.
+ // Move operations are allowed.
+ WritableSharedMemoryMapping(WritableSharedMemoryMapping&&) noexcept;
+ WritableSharedMemoryMapping& operator=(
+ WritableSharedMemoryMapping&&) noexcept;
+
+ // Returns the base address of the writable mapping. Returns nullptr for
+ // invalid instances.
void* memory() const { return raw_memory_ptr(); }
// Returns a pointer to a page-aligned T if the mapping is valid and large
// enough to contain a T, or nullptr otherwise.
template <typename T>
T* GetMemoryAs() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
if (!IsValid())
return nullptr;
if (sizeof(T) > size())
@@ -181,6 +206,9 @@
// The first element, if any, is guaranteed to be page-aligned.
template <typename T>
span<T> GetMemoryAsSpan() const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
if (!IsValid())
return span<T>();
size_t count = size() / sizeof(T);
@@ -192,6 +220,9 @@
// element, if any, is guaranteed to be page-aligned.
template <typename T>
span<T> GetMemoryAsSpan(size_t count) const {
+ static_assert(std::is_trivially_copyable<T>::value,
+ "Copying non-trivially-copyable object across memory spaces "
+ "is dangerous");
if (!IsValid())
return span<T>();
if (size() / sizeof(T) < count)
@@ -202,20 +233,17 @@
private:
friend WritableSharedMemoryMapping MapAtForTesting(
subtle::PlatformSharedMemoryRegion* region,
- off_t offset,
+ uint64_t offset,
size_t size);
friend class ReadOnlySharedMemoryRegion;
friend class WritableSharedMemoryRegion;
friend class UnsafeSharedMemoryRegion;
- WritableSharedMemoryMapping(void* address,
+ WritableSharedMemoryMapping(span<uint8_t> mapped_span,
size_t size,
- size_t mapped_size,
- const UnguessableToken& guid);
-
- DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryMapping);
+ const UnguessableToken& guid,
+ SharedMemoryMapper* mapper);
};
} // namespace base
-#endif // !defined(STARBOARD)
#endif // BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
diff --git a/base/memory/shared_memory_mapping_unittest.cc b/base/memory/shared_memory_mapping_unittest.cc
index 2a56087..83cc3d6 100644
--- a/base/memory/shared_memory_mapping_unittest.cc
+++ b/base/memory/shared_memory_mapping_unittest.cc
@@ -1,15 +1,19 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/shared_memory_mapping.h"
-#include <algorithm>
+#include <stdint.h>
+
#include <limits>
#include "base/containers/span.h"
#include "base/memory/read_only_shared_memory_region.h"
-#include "starboard/types.h"
+#include "base/memory/writable_shared_memory_region.h"
+#include "base/ranges/algorithm.h"
+#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -62,7 +66,7 @@
span<const uint32_t> read_span = read_mapping_.GetMemoryAsSpan<uint32_t>();
ASSERT_EQ(2u, read_span.size());
- std::fill(write_span.begin(), write_span.end(), 0);
+ ranges::fill(write_span, 0);
EXPECT_EQ(0u, read_span[0]);
EXPECT_EQ(0u, read_span[1]);
@@ -87,7 +91,7 @@
span<const uint32_t> read_span_2 = read_mapping_.GetMemoryAsSpan<uint32_t>(1);
ASSERT_EQ(1u, read_span_2.size());
- std::fill(write_span.begin(), write_span.end(), 0);
+ ranges::fill(write_span, 0);
EXPECT_EQ(0u, read_span[0]);
EXPECT_EQ(0u, read_span[1]);
EXPECT_EQ(0u, read_span_2[0]);
@@ -98,7 +102,7 @@
EXPECT_EQ(0x08070605u, read_span[1]);
EXPECT_EQ(0x04030201u, read_span_2[0]);
- std::fill(write_span_2.begin(), write_span_2.end(), 0);
+ ranges::fill(write_span_2, 0);
EXPECT_EQ(0u, read_span[0]);
EXPECT_EQ(0x08070605u, read_span[1]);
EXPECT_EQ(0u, read_span_2[0]);
@@ -142,4 +146,36 @@
.empty());
}
+// TODO(dcheng): This test is temporarily disabled on iOS. iOS devices allow
+// the creation of a 1GB shared memory region, but don't allow the region to be
+// mapped.
+#if !BUILDFLAG(IS_IOS)
+// TODO(crbug.com/1334079) Fix flakiness and re-enable on Linux and ChromeOS.
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#define MAYBE_TotalMappedSizeLimit DISABLED_TotalMappedSizeLimit
+#else
+#define MAYBE_TotalMappedSizeLimit TotalMappedSizeLimit
+#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+TEST_F(SharedMemoryMappingTest, MAYBE_TotalMappedSizeLimit) {
+ // Nothing interesting to test if the address space isn't 64 bits, since
+ // there's no real limit enforced on 32 bits other than complete address
+ // space exhaustion.
+ // Also exclude NaCl since pointers are 32 bits on all architectures:
+ // https://bugs.chromium.org/p/nativeclient/issues/detail?id=1162
+#if defined(ARCH_CPU_64_BITS) && !BUILDFLAG(IS_NACL)
+ auto region = WritableSharedMemoryRegion::Create(1024 * 1024 * 1024);
+ ASSERT_TRUE(region.IsValid());
+ // The limit is 32GB of mappings on 64-bit platforms, so the final mapping
+ // should fail.
+ std::vector<WritableSharedMemoryMapping> mappings(32);
+ for (size_t i = 0; i < mappings.size(); ++i) {
+ SCOPED_TRACE(i);
+ auto& mapping = mappings[i];
+ mapping = region.Map();
+ EXPECT_EQ(&mapping != &mappings.back(), mapping.IsValid());
+ }
+#endif // defined(ARCH_CPU_64_BITS)
+}
+#endif // !BUILDFLAG(IS_IOS)
+
} // namespace base
diff --git a/base/memory/shared_memory_nacl.cc b/base/memory/shared_memory_nacl.cc
deleted file mode 100644
index 981e7a6..0000000
--- a/base/memory/shared_memory_nacl.cc
+++ /dev/null
@@ -1,138 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include <limits>
-
-#include "base/logging.h"
-#include "base/memory/shared_memory_tracker.h"
-#include "starboard/types.h"
-
-namespace base {
-
-SharedMemory::SharedMemory()
- : mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
-
-SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
- : shm_(handle),
- mapped_size_(0),
- memory_(NULL),
- read_only_(read_only),
- requested_size_(0) {}
-
-SharedMemory::~SharedMemory() {
- Unmap();
- Close();
-}
-
-// static
-bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
- return handle.IsValid();
-}
-
-// static
-void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
- DCHECK(handle.IsValid());
- handle.Close();
-}
-
-// static
-SharedMemoryHandle SharedMemory::DuplicateHandle(
- const SharedMemoryHandle& handle) {
- return handle.Duplicate();
-}
-
-bool SharedMemory::CreateAndMapAnonymous(size_t size) {
- // Untrusted code can't create descriptors or handles.
- return false;
-}
-
-bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- // Untrusted code can't create descriptors or handles.
- return false;
-}
-
-bool SharedMemory::Delete(const std::string& name) {
- return false;
-}
-
-bool SharedMemory::Open(const std::string& name, bool read_only) {
- return false;
-}
-
-bool SharedMemory::MapAt(off_t offset, size_t bytes) {
- if (!shm_.IsValid())
- return false;
-
- if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
- return false;
-
- if (memory_)
- return false;
-
- memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
- MAP_SHARED, shm_.GetHandle(), offset);
-
- bool mmap_succeeded = memory_ != MAP_FAILED && memory_ != NULL;
- if (mmap_succeeded) {
- mapped_size_ = bytes;
- DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
- (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
- mapped_id_ = shm_.GetGUID();
- SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
- } else {
- memory_ = NULL;
- }
-
- return mmap_succeeded;
-}
-
-bool SharedMemory::Unmap() {
- if (memory_ == NULL)
- return false;
-
- SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
- if (munmap(memory_, mapped_size_) < 0)
- DPLOG(ERROR) << "munmap";
- memory_ = NULL;
- mapped_size_ = 0;
- mapped_id_ = UnguessableToken();
- return true;
-}
-
-SharedMemoryHandle SharedMemory::handle() const {
- SharedMemoryHandle handle_copy = shm_;
- handle_copy.SetOwnershipPassesToIPC(false);
- return handle_copy;
-}
-
-SharedMemoryHandle SharedMemory::TakeHandle() {
- SharedMemoryHandle handle_copy = shm_;
- handle_copy.SetOwnershipPassesToIPC(true);
- Unmap();
- shm_ = SharedMemoryHandle();
- return handle_copy;
-}
-
-void SharedMemory::Close() {
- if (shm_.IsValid()) {
- shm_.Close();
- shm_ = SharedMemoryHandle();
- }
-}
-
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
- // Untrusted code can't create descriptors or handles, which is needed to
- // drop permissions.
- return SharedMemoryHandle();
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_posix.cc b/base/memory/shared_memory_posix.cc
deleted file mode 100644
index 07f9471..0000000
--- a/base/memory/shared_memory_posix.cc
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory.h"
-
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <unistd.h>
-
-#include "base/files/file_util.h"
-#include "base/files/scoped_file.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/shared_memory_helper.h"
-#include "base/memory/shared_memory_tracker.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/posix/safe_strerror.h"
-#include "base/process/process_metrics.h"
-#include "base/scoped_generic.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/threading/thread_restrictions.h"
-#include "base/trace_event/trace_event.h"
-#include "base/unguessable_token.h"
-#include "build/build_config.h"
-
-#if defined(OS_ANDROID)
-#include "base/os_compat_android.h"
-#include "starboard/types.h"
-#include "third_party/ashmem/ashmem.h"
-#endif
-
-#if defined(OS_MACOSX) && !defined(OS_IOS)
-#error "MacOS uses shared_memory_mac.cc"
-#endif
-
-namespace base {
-
-SharedMemory::SharedMemory() = default;
-
-SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
- : shm_(handle), read_only_(read_only) {}
-
-SharedMemory::~SharedMemory() {
- Unmap();
- Close();
-}
-
-// static
-bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
- return handle.IsValid();
-}
-
-// static
-void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
- DCHECK(handle.IsValid());
- handle.Close();
-}
-
-// static
-size_t SharedMemory::GetHandleLimit() {
- return GetMaxFds();
-}
-
-// static
-SharedMemoryHandle SharedMemory::DuplicateHandle(
- const SharedMemoryHandle& handle) {
- return handle.Duplicate();
-}
-
-// static
-int SharedMemory::GetFdFromSharedMemoryHandle(
- const SharedMemoryHandle& handle) {
- return handle.GetHandle();
-}
-
-bool SharedMemory::CreateAndMapAnonymous(size_t size) {
- return CreateAnonymous(size) && Map(size);
-}
-
-#if !defined(OS_ANDROID)
-
-// Chromium mostly only uses the unique/private shmem as specified by
-// "name == L"". The exception is in the StatsTable.
-// TODO(jrg): there is no way to "clean up" all unused named shmem if
-// we restart from a crash. (That isn't a new problem, but it is a problem.)
-// In case we want to delete it later, it may be useful to save the value
-// of mem_filename after FilePathForMemoryName().
-bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- DCHECK(!shm_.IsValid());
- if (options.size == 0) return false;
-
- if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
- return false;
-
- // This function theoretically can block on the disk, but realistically
- // the temporary files we create will just go into the buffer cache
- // and be deleted before they ever make it out to disk.
- ThreadRestrictions::ScopedAllowIO allow_io;
-
- bool fix_size = true;
- ScopedFD fd;
- ScopedFD readonly_fd;
- FilePath path;
- if (!options.name_deprecated || options.name_deprecated->empty()) {
- bool result =
- CreateAnonymousSharedMemory(options, &fd, &readonly_fd, &path);
- if (!result)
- return false;
- } else {
- if (!FilePathForMemoryName(*options.name_deprecated, &path))
- return false;
-
- // Make sure that the file is opened without any permission
- // to other users on the system.
- const mode_t kOwnerOnly = S_IRUSR | S_IWUSR;
-
- // First, try to create the file.
- fd.reset(HANDLE_EINTR(
- open(path.value().c_str(), O_RDWR | O_CREAT | O_EXCL, kOwnerOnly)));
- if (!fd.is_valid() && options.open_existing_deprecated) {
- // If this doesn't work, try and open an existing file in append mode.
- // Opening an existing file in a world writable directory has two main
- // security implications:
- // - Attackers could plant a file under their control, so ownership of
- // the file is checked below.
- // - Attackers could plant a symbolic link so that an unexpected file
- // is opened, so O_NOFOLLOW is passed to open().
-#if !defined(OS_AIX)
- fd.reset(HANDLE_EINTR(
- open(path.value().c_str(), O_RDWR | O_APPEND | O_NOFOLLOW)));
-#else
- // AIX has no 64-bit support for open flags such as -
- // O_CLOEXEC, O_NOFOLLOW and O_TTY_INIT.
- fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDWR | O_APPEND)));
-#endif
- // Check that the current user owns the file.
- // If uid != euid, then a more complex permission model is used and this
- // API is not appropriate.
- const uid_t real_uid = getuid();
- const uid_t effective_uid = geteuid();
- struct stat sb;
- if (fd.is_valid() &&
- (fstat(fd.get(), &sb) != 0 || sb.st_uid != real_uid ||
- sb.st_uid != effective_uid)) {
- LOG(ERROR) <<
- "Invalid owner when opening existing shared memory file.";
- close(fd.get());
- return false;
- }
-
- // An existing file was opened, so its size should not be fixed.
- fix_size = false;
- }
-
- if (options.share_read_only) {
- // Also open as readonly so that we can GetReadOnlyHandle.
- readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
- if (!readonly_fd.is_valid()) {
- DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
- close(fd.get());
- return false;
- }
- }
- }
- if (fd.is_valid() && fix_size) {
- // Get current size.
- struct stat stat;
- if (fstat(fd.get(), &stat) != 0)
- return false;
- const size_t current_size = stat.st_size;
- if (current_size != options.size) {
- if (HANDLE_EINTR(ftruncate(fd.get(), options.size)) != 0)
- return false;
- }
- requested_size_ = options.size;
- }
- if (!fd.is_valid()) {
- PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
- FilePath dir = path.DirName();
- if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
- PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
- if (dir.value() == "/dev/shm") {
- LOG(FATAL) << "This is frequently caused by incorrect permissions on "
- << "/dev/shm. Try 'sudo chmod 1777 /dev/shm' to fix.";
- }
- }
- return false;
- }
-
- int mapped_file = -1;
- int readonly_mapped_file = -1;
-
- bool result = PrepareMapFile(std::move(fd), std::move(readonly_fd),
- &mapped_file, &readonly_mapped_file);
- shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), options.size,
- UnguessableToken::Create());
- readonly_shm_ =
- SharedMemoryHandle(FileDescriptor(readonly_mapped_file, false),
- options.size, shm_.GetGUID());
- return result;
-}
-
-// Our current implementation of shmem is with mmap()ing of files.
-// These files need to be deleted explicitly.
-// In practice this call is only needed for unit tests.
-bool SharedMemory::Delete(const std::string& name) {
- FilePath path;
- if (!FilePathForMemoryName(name, &path))
- return false;
-
- if (PathExists(path))
- return DeleteFile(path, false);
-
- // Doesn't exist, so success.
- return true;
-}
-
-bool SharedMemory::Open(const std::string& name, bool read_only) {
- FilePath path;
- if (!FilePathForMemoryName(name, &path))
- return false;
-
- read_only_ = read_only;
-
- int mode = read_only ? O_RDONLY : O_RDWR;
- ScopedFD fd(HANDLE_EINTR(open(path.value().c_str(), mode)));
- ScopedFD readonly_fd(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
- if (!readonly_fd.is_valid()) {
- DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
- return false;
- }
- int mapped_file = -1;
- int readonly_mapped_file = -1;
- bool result = PrepareMapFile(std::move(fd), std::move(readonly_fd),
- &mapped_file, &readonly_mapped_file);
- // This form of sharing shared memory is deprecated. https://crbug.com/345734.
- // However, we can't get rid of it without a significant refactor because its
- // used to communicate between two versions of the same service process, very
- // early in the life cycle.
- // Technically, we should also pass the GUID from the original shared memory
- // region. We don't do that - this means that we will overcount this memory,
- // which thankfully isn't relevant since Chrome only communicates with a
- // single version of the service process.
- // We pass the size |0|, which is a dummy size and wrong, but otherwise
- // harmless.
- shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), 0u,
- UnguessableToken::Create());
- readonly_shm_ = SharedMemoryHandle(
- FileDescriptor(readonly_mapped_file, false), 0, shm_.GetGUID());
- return result;
-}
-#endif // !defined(OS_ANDROID)
-
-bool SharedMemory::MapAt(off_t offset, size_t bytes) {
- if (!shm_.IsValid())
- return false;
-
- if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
- return false;
-
- if (memory_)
- return false;
-
-#if defined(OS_ANDROID)
- // On Android, Map can be called with a size and offset of zero to use the
- // ashmem-determined size.
- if (bytes == 0) {
- DCHECK_EQ(0, offset);
- int ashmem_bytes = ashmem_get_size_region(shm_.GetHandle());
- if (ashmem_bytes < 0)
- return false;
- bytes = ashmem_bytes;
- }
-
- // Sanity check. This shall catch invalid uses of the SharedMemory APIs
- // but will not protect against direct mmap() attempts.
- if (shm_.IsReadOnly()) {
- // Use a DCHECK() to call writable mappings with read-only descriptors
- // in debug builds immediately. Return an error for release builds
- // or during unit-testing (assuming a ScopedLogAssertHandler was installed).
- DCHECK(read_only_)
- << "Trying to map a region writable with a read-only descriptor.";
- if (!read_only_) {
- return false;
- }
- if (!shm_.SetRegionReadOnly()) { // Ensure the region is read-only.
- return false;
- }
- }
-#endif
-
- memory_ = mmap(nullptr, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
- MAP_SHARED, shm_.GetHandle(), offset);
-
- bool mmap_succeeded = memory_ && memory_ != reinterpret_cast<void*>(-1);
- if (mmap_succeeded) {
- mapped_size_ = bytes;
- mapped_id_ = shm_.GetGUID();
- DCHECK_EQ(0U,
- reinterpret_cast<uintptr_t>(memory_) &
- (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
- SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
- } else {
- memory_ = nullptr;
- }
-
- return mmap_succeeded;
-}
-
-bool SharedMemory::Unmap() {
- if (!memory_)
- return false;
-
- SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
- munmap(memory_, mapped_size_);
- memory_ = nullptr;
- mapped_size_ = 0;
- mapped_id_ = UnguessableToken();
- return true;
-}
-
-SharedMemoryHandle SharedMemory::handle() const {
- return shm_;
-}
-
-SharedMemoryHandle SharedMemory::TakeHandle() {
- SharedMemoryHandle handle_copy = shm_;
- handle_copy.SetOwnershipPassesToIPC(true);
- Unmap();
- shm_ = SharedMemoryHandle();
- return handle_copy;
-}
-
-#if !defined(OS_ANDROID)
-void SharedMemory::Close() {
- if (shm_.IsValid()) {
- shm_.Close();
- shm_ = SharedMemoryHandle();
- }
- if (readonly_shm_.IsValid()) {
- readonly_shm_.Close();
- readonly_shm_ = SharedMemoryHandle();
- }
-}
-
-// For the given shmem named |mem_name|, return a filename to mmap()
-// (and possibly create). Modifies |filename|. Return false on
-// error, or true of we are happy.
-bool SharedMemory::FilePathForMemoryName(const std::string& mem_name,
- FilePath* path) {
- // mem_name will be used for a filename; make sure it doesn't
- // contain anything which will confuse us.
- DCHECK_EQ(std::string::npos, mem_name.find('/'));
- DCHECK_EQ(std::string::npos, mem_name.find('\0'));
-
- FilePath temp_dir;
- if (!GetShmemTempDir(false, &temp_dir))
- return false;
-
-#if defined(GOOGLE_CHROME_BUILD)
- static const char kShmem[] = "com.google.Chrome.shmem.";
-#else
- static const char kShmem[] = "org.chromium.Chromium.shmem.";
-#endif
- CR_DEFINE_STATIC_LOCAL(const std::string, name_base, (kShmem));
- *path = temp_dir.AppendASCII(name_base + mem_name);
- return true;
-}
-
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
- CHECK(readonly_shm_.IsValid());
- return readonly_shm_.Duplicate();
-}
-#endif // !defined(OS_ANDROID)
-
-} // namespace base
diff --git a/base/memory/shared_memory_region_unittest.cc b/base/memory/shared_memory_region_unittest.cc
index d68177e..48c67cc 100644
--- a/base/memory/shared_memory_region_unittest.cc
+++ b/base/memory/shared_memory_region_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,13 +6,11 @@
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/read_only_shared_memory_region.h"
-#include "base/memory/shared_memory.h"
#include "base/memory/unsafe_shared_memory_region.h"
#include "base/memory/writable_shared_memory_region.h"
-#include "base/sys_info.h"
+#include "base/system/sys_info.h"
#include "base/test/test_shared_memory_util.h"
#include "build/build_config.h"
-#include "starboard/memory.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -51,7 +49,7 @@
UnsafeSharedMemoryRegion,
ReadOnlySharedMemoryRegion>
AllRegionTypes;
-TYPED_TEST_CASE(SharedMemoryRegionTest, AllRegionTypes);
+TYPED_TEST_SUITE(SharedMemoryRegionTest, AllRegionTypes);
TYPED_TEST(SharedMemoryRegionTest, NonValidRegion) {
TypeParam region;
@@ -70,14 +68,12 @@
typename TypeParam::MappingType mapping = moved_region.Map();
ASSERT_TRUE(mapping.IsValid());
EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
- EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(),
- kRegionSize),
+ EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
0);
// Verify that the second mapping reflects changes in the first.
memset(this->rw_mapping_.memory(), '#', kRegionSize);
- EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(),
- kRegionSize),
+ EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
0);
}
@@ -95,14 +91,12 @@
typename TypeParam::MappingType mapping = this->region_.Map();
ASSERT_TRUE(mapping.IsValid());
EXPECT_NE(this->rw_mapping_.memory(), mapping.memory());
- EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(),
- kRegionSize),
+ EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
0);
// Verify that the second mapping reflects changes in the first.
memset(this->rw_mapping_.memory(), '#', kRegionSize);
- EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(),
- kRegionSize),
+ EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
0);
// Close the region and unmap the first memory segment, verify the second
@@ -133,8 +127,7 @@
// Verify that the second mapping reflects changes in the first.
memset(this->rw_mapping_.memory(), '#', kRegionSize);
- EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(),
- kRegionSize),
+ EXPECT_EQ(memcmp(this->rw_mapping_.memory(), mapping.memory(), kRegionSize),
0);
}
@@ -164,9 +157,7 @@
const size_t kDataSize = kPageSize * 2;
const size_t kCount = kDataSize / sizeof(uint32_t);
- TypeParam region;
- WritableSharedMemoryMapping rw_mapping;
- std::tie(region, rw_mapping) = CreateMappedRegion<TypeParam>(kDataSize);
+ auto [region, rw_mapping] = CreateMappedRegion<TypeParam>(kDataSize);
ASSERT_TRUE(region.IsValid());
ASSERT_TRUE(rw_mapping.IsValid());
uint32_t* ptr = static_cast<uint32_t*>(rw_mapping.memory());
@@ -175,32 +166,21 @@
ptr[i] = i;
rw_mapping = WritableSharedMemoryMapping();
- off_t bytes_offset = kPageSize;
- typename TypeParam::MappingType mapping =
- region.MapAt(bytes_offset, kDataSize - bytes_offset);
- ASSERT_TRUE(mapping.IsValid());
- off_t int_offset = bytes_offset / sizeof(uint32_t);
- const uint32_t* ptr2 = static_cast<const uint32_t*>(mapping.memory());
- for (size_t i = int_offset; i < kCount; ++i) {
- EXPECT_EQ(ptr2[i - int_offset], i);
+ for (size_t bytes_offset = sizeof(uint32_t); bytes_offset <= kPageSize;
+ bytes_offset += sizeof(uint32_t)) {
+ typename TypeParam::MappingType mapping =
+ region.MapAt(bytes_offset, kDataSize - bytes_offset);
+ ASSERT_TRUE(mapping.IsValid());
+
+ size_t int_offset = bytes_offset / sizeof(uint32_t);
+ const uint32_t* ptr2 = static_cast<const uint32_t*>(mapping.memory());
+ for (size_t i = int_offset; i < kCount; ++i) {
+ EXPECT_EQ(ptr2[i - int_offset], i);
+ }
}
}
-TYPED_TEST(SharedMemoryRegionTest, MapAtNotAlignedOffsetFails) {
- const size_t kDataSize = SysInfo::VMAllocationGranularity();
-
- TypeParam region;
- WritableSharedMemoryMapping rw_mapping;
- std::tie(region, rw_mapping) = CreateMappedRegion<TypeParam>(kDataSize);
- ASSERT_TRUE(region.IsValid());
- ASSERT_TRUE(rw_mapping.IsValid());
- off_t offset = kDataSize / 2;
- typename TypeParam::MappingType mapping =
- region.MapAt(offset, kDataSize - offset);
- EXPECT_FALSE(mapping.IsValid());
-}
-
TYPED_TEST(SharedMemoryRegionTest, MapZeroBytesFails) {
typename TypeParam::MappingType mapping = this->region_.MapAt(0, 0);
EXPECT_FALSE(mapping.IsValid());
@@ -219,7 +199,7 @@
typedef ::testing::Types<UnsafeSharedMemoryRegion, ReadOnlySharedMemoryRegion>
DuplicatableRegionTypes;
-TYPED_TEST_CASE(DuplicatableSharedMemoryRegionTest, DuplicatableRegionTypes);
+TYPED_TEST_SUITE(DuplicatableSharedMemoryRegionTest, DuplicatableRegionTypes);
TYPED_TEST(DuplicatableSharedMemoryRegionTest, Duplicate) {
TypeParam dup_region = this->region_.Duplicate();
@@ -289,17 +269,4 @@
EXPECT_DEATH_IF_SUPPORTED(memset(memory_ptr, 'G', kRegionSize), "");
}
-class UnsafeSharedMemoryRegionTest : public ::testing::Test {};
-
-TEST_F(UnsafeSharedMemoryRegionTest, CreateFromHandleTest) {
- SharedMemory shm;
-
- auto region = UnsafeSharedMemoryRegion::CreateFromHandle(shm.TakeHandle());
- ASSERT_FALSE(region.IsValid());
-
- shm.CreateAndMapAnonymous(10);
- region = UnsafeSharedMemoryRegion::CreateFromHandle(shm.TakeHandle());
- ASSERT_TRUE(region.IsValid());
-}
-
} // namespace base
diff --git a/base/memory/shared_memory_security_policy.cc b/base/memory/shared_memory_security_policy.cc
new file mode 100644
index 0000000..57d193b
--- /dev/null
+++ b/base/memory/shared_memory_security_policy.cc
@@ -0,0 +1,93 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_security_policy.h"
+
+#include <algorithm>
+#include <atomic>
+#include <limits>
+
+#include "base/bits.h"
+#include "base/memory/page_size.h"
+#include "base/numerics/checked_math.h"
+#include "build/build_config.h"
+#include "third_party/abseil-cpp/absl/types/optional.h"
+
+namespace base {
+
+namespace {
+
+// Note: pointers are 32 bits on all architectures in NaCl. See
+// https://bugs.chromium.org/p/nativeclient/issues/detail?id=1162
+#if defined(ARCH_CPU_32_BITS) || BUILDFLAG(IS_NACL)
+// No effective limit on 32-bit, since there simply isn't enough address space
+// for ASLR to be particularly effective.
+constexpr size_t kTotalMappedSizeLimit = std::numeric_limits<size_t>::max();
+#elif defined(ARCH_CPU_64_BITS)
+// 32 GB of mappings ought to be enough for anybody.
+constexpr size_t kTotalMappedSizeLimit = 32ULL * 1024 * 1024 * 1024;
+#endif
+
+static std::atomic_size_t total_mapped_size_;
+
+absl::optional<size_t> AlignWithPageSize(size_t size) {
+#if BUILDFLAG(IS_WIN)
+ // TODO(crbug.com/210609): Matches alignment requirements defined in
+ // platform_shared_memory_region_win.cc:PlatformSharedMemoryRegion::Create.
+ // Remove this when NaCl is gone.
+ static const size_t kSectionSize = 65536;
+ const size_t page_size = std::max(kSectionSize, GetPageSize());
+#else
+ const size_t page_size = GetPageSize();
+#endif // BUILDFLAG(IS_WIN)
+ size_t rounded_size = bits::AlignUp(size, page_size);
+
+ // Fail on overflow.
+ if (rounded_size < size)
+ return absl::nullopt;
+
+ return rounded_size;
+}
+
+} // namespace
+
+// static
+bool SharedMemorySecurityPolicy::AcquireReservationForMapping(size_t size) {
+ size_t previous_mapped_size =
+ total_mapped_size_.load(std::memory_order_relaxed);
+ size_t total_mapped_size;
+
+ absl::optional<size_t> page_aligned_size = AlignWithPageSize(size);
+
+ if (!page_aligned_size)
+ return false;
+
+ // Relaxed memory ordering is all that's needed since all atomicity is all
+ // that's required. If the value is stale, compare_exchange_weak() will fail
+ // and the loop will retry the operation with an updated total mapped size.
+ do {
+ if (!CheckAdd(previous_mapped_size, *page_aligned_size)
+ .AssignIfValid(&total_mapped_size)) {
+ return false;
+ }
+ if (total_mapped_size >= kTotalMappedSizeLimit)
+ return false;
+ } while (!total_mapped_size_.compare_exchange_weak(
+ previous_mapped_size, total_mapped_size, std::memory_order_relaxed,
+ std::memory_order_relaxed));
+
+ return true;
+}
+
+// static
+void SharedMemorySecurityPolicy::ReleaseReservationForMapping(size_t size) {
+ // Note #1: relaxed memory ordering is sufficient since atomicity is all
+ // that's required.
+ // Note #2: |size| should never overflow when aligned to page size, since
+ // this should only be called if AcquireReservationForMapping() returned true.
+ absl::optional<size_t> page_aligned_size = AlignWithPageSize(size);
+ total_mapped_size_.fetch_sub(*page_aligned_size, std::memory_order_relaxed);
+}
+
+} // namespace base
diff --git a/base/memory/shared_memory_security_policy.h b/base/memory/shared_memory_security_policy.h
new file mode 100644
index 0000000..d810399
--- /dev/null
+++ b/base/memory/shared_memory_security_policy.h
@@ -0,0 +1,44 @@
+// Copyright 2020 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_SECURITY_POLICY_H_
+#define BASE_MEMORY_SHARED_MEMORY_SECURITY_POLICY_H_
+
+#include <stddef.h>
+
+#include "base/base_export.h"
+
+namespace mojo {
+namespace core {
+class ChannelLinux;
+} // namespace core
+} // namespace mojo
+
+namespace base {
+
+namespace subtle {
+class PlatformSharedMemoryRegion;
+} // namespace subtle
+
+// Helper to enforce a limit for the total amount of shared memory that can be
+// mapped. This can help prevent an attacker from spraying the address space of
+// a process with shared memory mappings to bypass ASLR. For more details, see
+// https://googleprojectzero.blogspot.com/2019/04/virtually-unlimited-memory-escaping.html
+class BASE_EXPORT SharedMemorySecurityPolicy {
+ private:
+ friend class subtle::PlatformSharedMemoryRegion;
+ friend class SharedMemoryMapping;
+ friend class mojo::core::ChannelLinux;
+
+ // Checks that a mapping with |size| can be created. Returns false if there is
+ // an overflow in internal calculations, or the max limit has been reached.
+ [[nodiscard]] static bool AcquireReservationForMapping(size_t size);
+
+ // Releases a reservation that was previously acquired.
+ static void ReleaseReservationForMapping(size_t size);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_SECURITY_POLICY_H_
diff --git a/base/memory/shared_memory_tracker.cc b/base/memory/shared_memory_tracker.cc
index 5ca7c84..e1f00d5 100644
--- a/base/memory/shared_memory_tracker.cc
+++ b/base/memory/shared_memory_tracker.cc
@@ -1,14 +1,20 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/shared_memory_tracker.h"
-#include "base/memory/shared_memory.h"
+#include "base/check.h"
+#include "base/notreached.h"
#include "base/strings/string_number_conversions.h"
-#include "base/trace_event/memory_allocator_dump_guid.h"
-#include "base/trace_event/memory_dump_manager.h"
-#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/base_tracing.h"
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_dump_manager.h" // no-presubmit-check
+#include "base/trace_event/process_memory_dump.h" // no-presubmit-check
+#include "third_party/abseil-cpp/absl/types/optional.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
namespace base {
@@ -34,16 +40,6 @@
return trace_event::MemoryAllocatorDumpGuid(dump_name);
}
-// static
-const trace_event::MemoryAllocatorDump*
-SharedMemoryTracker::GetOrCreateSharedMemoryDump(
- const SharedMemory* shared_memory,
- trace_event::ProcessMemoryDump* pmd) {
- return GetOrCreateSharedMemoryDumpInternal(shared_memory->memory(),
- shared_memory->mapped_size(),
- shared_memory->mapped_id(), pmd);
-}
-
const trace_event::MemoryAllocatorDump*
SharedMemoryTracker::GetOrCreateSharedMemoryDump(
const SharedMemoryMapping& shared_memory,
@@ -54,14 +50,6 @@
}
void SharedMemoryTracker::IncrementMemoryUsage(
- const SharedMemory& shared_memory) {
- AutoLock hold(usages_lock_);
- DCHECK(usages_.find(shared_memory.memory()) == usages_.end());
- usages_.emplace(shared_memory.memory(), UsageInfo(shared_memory.mapped_size(),
- shared_memory.mapped_id()));
-}
-
-void SharedMemoryTracker::IncrementMemoryUsage(
const SharedMemoryMapping& mapping) {
AutoLock hold(usages_lock_);
DCHECK(usages_.find(mapping.raw_memory_ptr()) == usages_.end());
@@ -70,13 +58,6 @@
}
void SharedMemoryTracker::DecrementMemoryUsage(
- const SharedMemory& shared_memory) {
- AutoLock hold(usages_lock_);
- DCHECK(usages_.find(shared_memory.memory()) != usages_.end());
- usages_.erase(shared_memory.memory());
-}
-
-void SharedMemoryTracker::DecrementMemoryUsage(
const SharedMemoryMapping& mapping) {
AutoLock hold(usages_lock_);
DCHECK(usages_.find(mapping.raw_memory_ptr()) != usages_.end());
@@ -84,8 +65,10 @@
}
SharedMemoryTracker::SharedMemoryTracker() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "SharedMemoryTracker", nullptr);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
SharedMemoryTracker::~SharedMemoryTracker() = default;
@@ -109,6 +92,7 @@
size_t mapped_size,
const UnguessableToken& mapped_id,
trace_event::ProcessMemoryDump* pmd) {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
const std::string dump_name = GetDumpNameForTracing(mapped_id);
trace_event::MemoryAllocatorDump* local_dump =
pmd->GetAllocatorDump(dump_name);
@@ -119,7 +103,7 @@
// If resident size is not available, a virtual size is used as fallback.
size_t size = virtual_size;
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
- base::Optional<size_t> resident_size =
+ absl::optional<size_t> resident_size =
trace_event::ProcessMemoryDump::CountResidentBytesInSharedMemory(
mapped_memory, mapped_size);
if (resident_size.has_value())
@@ -142,6 +126,10 @@
pmd->AddOverridableOwnershipEdge(local_dump->guid(), global_dump->guid(),
0 /* importance */);
return local_dump;
+#else // BUILDFLAG(ENABLE_BASE_TRACING)
+ NOTREACHED();
+ return nullptr;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
} // namespace
diff --git a/base/memory/shared_memory_tracker.h b/base/memory/shared_memory_tracker.h
index 66d130a..f1f944a 100644
--- a/base/memory/shared_memory_tracker.h
+++ b/base/memory/shared_memory_tracker.h
@@ -1,4 +1,4 @@
-// Copyright 2017 The Chromium Authors. All rights reserved.
+// Copyright 2017 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -8,14 +8,13 @@
#include <map>
#include <string>
-#include "base/memory/shared_memory.h"
+#include "base/base_export.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/synchronization/lock.h"
-#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
-#if !defined(STARBOARD)
namespace trace_event {
class MemoryAllocatorDump;
class MemoryAllocatorDumpGuid;
@@ -28,6 +27,9 @@
// Returns a singleton instance.
static SharedMemoryTracker* GetInstance();
+ SharedMemoryTracker(const SharedMemoryTracker&) = delete;
+ SharedMemoryTracker& operator=(const SharedMemoryTracker&) = delete;
+
static std::string GetDumpNameForTracing(const UnguessableToken& id);
static trace_event::MemoryAllocatorDumpGuid GetGlobalDumpIdForTracing(
@@ -37,20 +39,13 @@
// inside the given |pmd|. Also adds the necessary edges for the dump when
// creating the dump.
static const trace_event::MemoryAllocatorDump* GetOrCreateSharedMemoryDump(
- const SharedMemory* shared_memory,
- trace_event::ProcessMemoryDump* pmd);
- // We're in the middle of a refactor https://crbug.com/795291. Eventually, the
- // first call will go away.
- static const trace_event::MemoryAllocatorDump* GetOrCreateSharedMemoryDump(
const SharedMemoryMapping& shared_memory,
trace_event::ProcessMemoryDump* pmd);
// Records shared memory usage on valid mapping.
- void IncrementMemoryUsage(const SharedMemory& shared_memory);
void IncrementMemoryUsage(const SharedMemoryMapping& mapping);
// Records shared memory usage on unmapping.
- void DecrementMemoryUsage(const SharedMemory& shared_memory);
void DecrementMemoryUsage(const SharedMemoryMapping& mapping);
// Root dump name for all shared memory dumps.
@@ -79,14 +74,10 @@
UnguessableToken mapped_id;
};
- // Used to lock when |usages_| is modified or read.
Lock usages_lock_;
- std::map<void*, UsageInfo> usages_;
-
- DISALLOW_COPY_AND_ASSIGN(SharedMemoryTracker);
+ std::map<void*, UsageInfo> usages_ GUARDED_BY(usages_lock_);
};
-#endif
} // namespace base
#endif // BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
deleted file mode 100644
index 676feb6..0000000
--- a/base/memory/shared_memory_unittest.cc
+++ /dev/null
@@ -1,977 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory.h"
-
-#include <memory>
-
-#include "starboard/types.h"
-
-#include "starboard/memory.h"
-
-#include "base/atomicops.h"
-#include "base/base_switches.h"
-#include "base/bind.h"
-#include "base/command_line.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/shared_memory_handle.h"
-#include "base/process/kill.h"
-#include "base/rand_util.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_piece.h"
-#include "base/strings/string_util.h"
-#include "base/sys_info.h"
-#include "base/test/multiprocess_test.h"
-#include "base/threading/platform_thread.h"
-#include "base/time/time.h"
-#include "base/unguessable_token.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/multiprocess_func_list.h"
-
-#if defined(OS_ANDROID)
-#include "base/callback.h"
-#endif
-
-#if defined(OS_POSIX)
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#endif
-
-#if defined(OS_LINUX)
-#include <sys/syscall.h>
-#endif
-
-#if defined(OS_WIN)
-#include "base/win/scoped_handle.h"
-#endif
-
-#if defined(OS_FUCHSIA)
-#include <lib/zx/vmar.h>
-#include <lib/zx/vmo.h>
-#endif
-
-namespace base {
-
-namespace {
-
-#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
-// Each thread will open the shared memory. Each thread will take a different 4
-// byte int pointer, and keep changing it, with some small pauses in between.
-// Verify that each thread's value in the shared memory is always correct.
-class MultipleThreadMain : public PlatformThread::Delegate {
- public:
- explicit MultipleThreadMain(int16_t id) : id_(id) {}
- ~MultipleThreadMain() override = default;
-
- static void CleanUp() {
- SharedMemory memory;
- memory.Delete(s_test_name_);
- }
-
- // PlatformThread::Delegate interface.
- void ThreadMain() override {
- const uint32_t kDataSize = 1024;
- SharedMemory memory;
- bool rv = memory.CreateNamedDeprecated(s_test_name_, true, kDataSize);
- EXPECT_TRUE(rv);
- rv = memory.Map(kDataSize);
- EXPECT_TRUE(rv);
- int* ptr = static_cast<int*>(memory.memory()) + id_;
- EXPECT_EQ(0, *ptr);
-
- for (int idx = 0; idx < 100; idx++) {
- *ptr = idx;
- PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
- EXPECT_EQ(*ptr, idx);
- }
- // Reset back to 0 for the next test that uses the same name.
- *ptr = 0;
-
- memory.Close();
- }
-
- private:
- int16_t id_;
-
- static const char s_test_name_[];
-
- DISALLOW_COPY_AND_ASSIGN(MultipleThreadMain);
-};
-
-const char MultipleThreadMain::s_test_name_[] =
- "SharedMemoryOpenThreadTest";
-#endif // !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
-
-enum class Mode {
- Default,
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
- DisableDevShm = 1,
-#endif
-};
-
-class SharedMemoryTest : public ::testing::TestWithParam<Mode> {
- public:
- void SetUp() override {
- switch (GetParam()) {
- case Mode::Default:
- break;
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
- case Mode::DisableDevShm:
- CommandLine* cmdline = CommandLine::ForCurrentProcess();
- cmdline->AppendSwitch(switches::kDisableDevShmUsage);
- break;
-#endif // defined(OS_LINUX) && !defined(OS_CHROMEOS)
- }
- }
-};
-
-} // namespace
-
-// Android/Mac/Fuchsia doesn't support SharedMemory::Open/Delete/
-// CreateNamedDeprecated(openExisting=true)
-#if !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
-
-TEST_P(SharedMemoryTest, OpenClose) {
- const uint32_t kDataSize = 1024;
- std::string test_name = "SharedMemoryOpenCloseTest";
-
- // Open two handles to a memory segment, confirm that they are mapped
- // separately yet point to the same space.
- SharedMemory memory1;
- bool rv = memory1.Delete(test_name);
- EXPECT_TRUE(rv);
- rv = memory1.Delete(test_name);
- EXPECT_TRUE(rv);
- rv = memory1.Open(test_name, false);
- EXPECT_FALSE(rv);
- rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
- EXPECT_TRUE(rv);
- rv = memory1.Map(kDataSize);
- EXPECT_TRUE(rv);
- SharedMemory memory2;
- rv = memory2.Open(test_name, false);
- EXPECT_TRUE(rv);
- rv = memory2.Map(kDataSize);
- EXPECT_TRUE(rv);
- EXPECT_NE(memory1.memory(), memory2.memory()); // Compare the pointers.
-
- // Make sure we don't segfault. (it actually happened!)
- ASSERT_NE(memory1.memory(), static_cast<void*>(nullptr));
- ASSERT_NE(memory2.memory(), static_cast<void*>(nullptr));
-
- // Write data to the first memory segment, verify contents of second.
- memset(memory1.memory(), '1', kDataSize);
- EXPECT_EQ(memcmp(memory1.memory(), memory2.memory(), kDataSize), 0);
-
- // Close the first memory segment, and verify the second has the right data.
- memory1.Close();
- char* start_ptr = static_cast<char*>(memory2.memory());
- char* end_ptr = start_ptr + kDataSize;
- for (char* ptr = start_ptr; ptr < end_ptr; ptr++)
- EXPECT_EQ(*ptr, '1');
-
- // Close the second memory segment.
- memory2.Close();
-
- rv = memory1.Delete(test_name);
- EXPECT_TRUE(rv);
- rv = memory2.Delete(test_name);
- EXPECT_TRUE(rv);
-}
-
-TEST_P(SharedMemoryTest, OpenExclusive) {
- const uint32_t kDataSize = 1024;
- const uint32_t kDataSize2 = 2048;
- std::ostringstream test_name_stream;
- test_name_stream << "SharedMemoryOpenExclusiveTest."
- << Time::Now().ToDoubleT();
- std::string test_name = test_name_stream.str();
-
- // Open two handles to a memory segment and check that
- // open_existing_deprecated works as expected.
- SharedMemory memory1;
- bool rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
- EXPECT_TRUE(rv);
-
- // Memory1 knows it's size because it created it.
- EXPECT_EQ(memory1.requested_size(), kDataSize);
-
- rv = memory1.Map(kDataSize);
- EXPECT_TRUE(rv);
-
- // The mapped memory1 must be at least the size we asked for.
- EXPECT_GE(memory1.mapped_size(), kDataSize);
-
- // The mapped memory1 shouldn't exceed rounding for allocation granularity.
- EXPECT_LT(memory1.mapped_size(),
- kDataSize + SysInfo::VMAllocationGranularity());
-
- memset(memory1.memory(), 'G', kDataSize);
-
- SharedMemory memory2;
- // Should not be able to create if openExisting is false.
- rv = memory2.CreateNamedDeprecated(test_name, false, kDataSize2);
- EXPECT_FALSE(rv);
-
- // Should be able to create with openExisting true.
- rv = memory2.CreateNamedDeprecated(test_name, true, kDataSize2);
- EXPECT_TRUE(rv);
-
- // Memory2 shouldn't know the size because we didn't create it.
- EXPECT_EQ(memory2.requested_size(), 0U);
-
- // We should be able to map the original size.
- rv = memory2.Map(kDataSize);
- EXPECT_TRUE(rv);
-
- // The mapped memory2 must be at least the size of the original.
- EXPECT_GE(memory2.mapped_size(), kDataSize);
-
- // The mapped memory2 shouldn't exceed rounding for allocation granularity.
- EXPECT_LT(memory2.mapped_size(),
- kDataSize2 + SysInfo::VMAllocationGranularity());
-
- // Verify that opening memory2 didn't truncate or delete memory 1.
- char* start_ptr = static_cast<char*>(memory2.memory());
- char* end_ptr = start_ptr + kDataSize;
- for (char* ptr = start_ptr; ptr < end_ptr; ptr++) {
- EXPECT_EQ(*ptr, 'G');
- }
-
- memory1.Close();
- memory2.Close();
-
- rv = memory1.Delete(test_name);
- EXPECT_TRUE(rv);
-}
-#endif // !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
-
-// Check that memory is still mapped after its closed.
-TEST_P(SharedMemoryTest, CloseNoUnmap) {
- const size_t kDataSize = 4096;
-
- SharedMemory memory;
- ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
- char* ptr = static_cast<char*>(memory.memory());
- ASSERT_NE(ptr, static_cast<void*>(nullptr));
- memset(ptr, 'G', kDataSize);
-
- memory.Close();
-
- EXPECT_EQ(ptr, memory.memory());
- EXPECT_TRUE(!memory.handle().IsValid());
-
- for (size_t i = 0; i < kDataSize; i++) {
- EXPECT_EQ('G', ptr[i]);
- }
-
- memory.Unmap();
- EXPECT_EQ(nullptr, memory.memory());
-}
-
-#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
-// Create a set of N threads to each open a shared memory segment and write to
-// it. Verify that they are always reading/writing consistent data.
-TEST_P(SharedMemoryTest, MultipleThreads) {
- const int kNumThreads = 5;
-
- MultipleThreadMain::CleanUp();
- // On POSIX we have a problem when 2 threads try to create the shmem
- // (a file) at exactly the same time, since create both creates the
- // file and zerofills it. We solve the problem for this unit test
- // (make it not flaky) by starting with 1 thread, then
- // intentionally don't clean up its shmem before running with
- // kNumThreads.
-
- int threadcounts[] = { 1, kNumThreads };
- for (size_t i = 0; i < arraysize(threadcounts); i++) {
- int numthreads = threadcounts[i];
- std::unique_ptr<PlatformThreadHandle[]> thread_handles;
- std::unique_ptr<MultipleThreadMain* []> thread_delegates;
-
- thread_handles.reset(new PlatformThreadHandle[numthreads]);
- thread_delegates.reset(new MultipleThreadMain*[numthreads]);
-
- // Spawn the threads.
- for (int16_t index = 0; index < numthreads; index++) {
- PlatformThreadHandle pth;
- thread_delegates[index] = new MultipleThreadMain(index);
- EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth));
- thread_handles[index] = pth;
- }
-
- // Wait for the threads to finish.
- for (int index = 0; index < numthreads; index++) {
- PlatformThread::Join(thread_handles[index]);
- delete thread_delegates[index];
- }
- }
- MultipleThreadMain::CleanUp();
-}
-#endif
-
-// Allocate private (unique) shared memory with an empty string for a
-// name. Make sure several of them don't point to the same thing as
-// we might expect if the names are equal.
-TEST_P(SharedMemoryTest, AnonymousPrivate) {
- int i, j;
- int count = 4;
- bool rv;
- const uint32_t kDataSize = 8192;
-
- std::unique_ptr<SharedMemory[]> memories(new SharedMemory[count]);
- std::unique_ptr<int* []> pointers(new int*[count]);
- ASSERT_TRUE(memories.get());
- ASSERT_TRUE(pointers.get());
-
- for (i = 0; i < count; i++) {
- rv = memories[i].CreateAndMapAnonymous(kDataSize);
- EXPECT_TRUE(rv);
- int* ptr = static_cast<int*>(memories[i].memory());
- EXPECT_TRUE(ptr);
- pointers[i] = ptr;
- }
-
- for (i = 0; i < count; i++) {
- // zero out the first int in each except for i; for that one, make it 100.
- for (j = 0; j < count; j++) {
- if (i == j)
- pointers[j][0] = 100;
- else
- pointers[j][0] = 0;
- }
- // make sure there is no bleeding of the 100 into the other pointers
- for (j = 0; j < count; j++) {
- if (i == j)
- EXPECT_EQ(100, pointers[j][0]);
- else
- EXPECT_EQ(0, pointers[j][0]);
- }
- }
-
- for (int i = 0; i < count; i++) {
- memories[i].Close();
- }
-}
-
-TEST_P(SharedMemoryTest, GetReadOnlyHandle) {
- StringPiece contents = "Hello World";
-
- SharedMemory writable_shmem;
- SharedMemoryCreateOptions options;
- options.size = contents.size();
- options.share_read_only = true;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
- // The Mach functionality is tested in shared_memory_mac_unittest.cc.
- options.type = SharedMemoryHandle::POSIX;
-#endif
- ASSERT_TRUE(writable_shmem.Create(options));
- ASSERT_TRUE(writable_shmem.Map(options.size));
- memcpy(writable_shmem.memory(), contents.data(), contents.size());
- EXPECT_TRUE(writable_shmem.Unmap());
-
- SharedMemoryHandle readonly_handle = writable_shmem.GetReadOnlyHandle();
- EXPECT_EQ(writable_shmem.handle().GetGUID(), readonly_handle.GetGUID());
- EXPECT_EQ(writable_shmem.handle().GetSize(), readonly_handle.GetSize());
- ASSERT_TRUE(readonly_handle.IsValid());
- SharedMemory readonly_shmem(readonly_handle, /*readonly=*/true);
-
- ASSERT_TRUE(readonly_shmem.Map(contents.size()));
- EXPECT_EQ(contents,
- StringPiece(static_cast<const char*>(readonly_shmem.memory()),
- contents.size()));
- EXPECT_TRUE(readonly_shmem.Unmap());
-
-#if defined(OS_ANDROID)
- // On Android, mapping a region through a read-only descriptor makes the
- // region read-only. Any writable mapping attempt should fail.
- ASSERT_FALSE(writable_shmem.Map(contents.size()));
-#else
- // Make sure the writable instance is still writable.
- ASSERT_TRUE(writable_shmem.Map(contents.size()));
- StringPiece new_contents = "Goodbye";
- memcpy(writable_shmem.memory(), new_contents.data(),
- new_contents.size());
- EXPECT_EQ(new_contents,
- StringPiece(static_cast<const char*>(writable_shmem.memory()),
- new_contents.size()));
-#endif
-
- // We'd like to check that if we send the read-only segment to another
- // process, then that other process can't reopen it read/write. (Since that
- // would be a security hole.) Setting up multiple processes is hard in a
- // unittest, so this test checks that the *current* process can't reopen the
- // segment read/write. I think the test here is stronger than we actually
- // care about, but there's a remote possibility that sending a file over a
- // pipe would transform it into read/write.
- SharedMemoryHandle handle = readonly_shmem.handle();
-
-#if defined(OS_ANDROID)
- // The "read-only" handle is still writable on Android:
- // http://crbug.com/320865
- (void)handle;
-#elif defined(OS_FUCHSIA)
- uintptr_t addr;
- EXPECT_NE(ZX_OK, zx::vmar::root_self()->map(
- 0, *zx::unowned_vmo(handle.GetHandle()), 0,
- contents.size(), ZX_VM_FLAG_PERM_WRITE, &addr))
- << "Shouldn't be able to map as writable.";
-
- zx::vmo duped_handle;
- EXPECT_NE(ZX_OK, zx::unowned_vmo(handle.GetHandle())
- ->duplicate(ZX_RIGHT_WRITE, &duped_handle))
- << "Shouldn't be able to duplicate the handle into a writable one.";
-
- EXPECT_EQ(ZX_OK, zx::unowned_vmo(handle.GetHandle())
- ->duplicate(ZX_RIGHT_READ, &duped_handle))
- << "Should be able to duplicate the handle into a readable one.";
-#elif defined(OS_POSIX)
- int handle_fd = SharedMemory::GetFdFromSharedMemoryHandle(handle);
- EXPECT_EQ(O_RDONLY, fcntl(handle_fd, F_GETFL) & O_ACCMODE)
- << "The descriptor itself should be read-only.";
-
- errno = 0;
- void* writable = mmap(nullptr, contents.size(), PROT_READ | PROT_WRITE,
- MAP_SHARED, handle_fd, 0);
- int mmap_errno = errno;
- EXPECT_EQ(MAP_FAILED, writable)
- << "It shouldn't be possible to re-mmap the descriptor writable.";
- EXPECT_EQ(EACCES, mmap_errno) << strerror(mmap_errno);
- if (writable != MAP_FAILED)
- EXPECT_EQ(0, munmap(writable, readonly_shmem.mapped_size()));
-
-#elif defined(OS_WIN)
- EXPECT_EQ(NULL, MapViewOfFile(handle.GetHandle(), FILE_MAP_WRITE, 0, 0, 0))
- << "Shouldn't be able to map memory writable.";
-
- HANDLE temp_handle;
- BOOL rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
- GetCurrentProcess(), &temp_handle,
- FILE_MAP_ALL_ACCESS, false, 0);
- EXPECT_EQ(FALSE, rv)
- << "Shouldn't be able to duplicate the handle into a writable one.";
- if (rv)
- win::ScopedHandle writable_handle(temp_handle);
- rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
- GetCurrentProcess(), &temp_handle, FILE_MAP_READ,
- false, 0);
- EXPECT_EQ(TRUE, rv)
- << "Should be able to duplicate the handle into a readable one.";
- if (rv)
- win::ScopedHandle writable_handle(temp_handle);
-#else
-#error Unexpected platform; write a test that tries to make 'handle' writable.
-#endif // defined(OS_POSIX) || defined(OS_WIN)
-}
-
-TEST_P(SharedMemoryTest, ShareToSelf) {
- StringPiece contents = "Hello World";
-
- SharedMemory shmem;
- ASSERT_TRUE(shmem.CreateAndMapAnonymous(contents.size()));
- memcpy(shmem.memory(), contents.data(), contents.size());
- EXPECT_TRUE(shmem.Unmap());
-
- SharedMemoryHandle shared_handle = shmem.handle().Duplicate();
- ASSERT_TRUE(shared_handle.IsValid());
- EXPECT_TRUE(shared_handle.OwnershipPassesToIPC());
- EXPECT_EQ(shared_handle.GetGUID(), shmem.handle().GetGUID());
- EXPECT_EQ(shared_handle.GetSize(), shmem.handle().GetSize());
- SharedMemory shared(shared_handle, /*readonly=*/false);
-
- ASSERT_TRUE(shared.Map(contents.size()));
- EXPECT_EQ(
- contents,
- StringPiece(static_cast<const char*>(shared.memory()), contents.size()));
-
- shared_handle = shmem.handle().Duplicate();
- ASSERT_TRUE(shared_handle.IsValid());
- ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
- SharedMemory readonly(shared_handle, /*readonly=*/true);
-
- ASSERT_TRUE(readonly.Map(contents.size()));
- EXPECT_EQ(contents,
- StringPiece(static_cast<const char*>(readonly.memory()),
- contents.size()));
-}
-
-TEST_P(SharedMemoryTest, ShareWithMultipleInstances) {
- static const StringPiece kContents = "Hello World";
-
- SharedMemory shmem;
- ASSERT_TRUE(shmem.CreateAndMapAnonymous(kContents.size()));
- // We do not need to unmap |shmem| to let |shared| map.
- const StringPiece shmem_contents(static_cast<const char*>(shmem.memory()),
- shmem.requested_size());
-
- SharedMemoryHandle shared_handle = shmem.handle().Duplicate();
- ASSERT_TRUE(shared_handle.IsValid());
- SharedMemory shared(shared_handle, /*readonly=*/false);
- ASSERT_TRUE(shared.Map(kContents.size()));
- // The underlying shared memory is created by |shmem|, so both
- // |shared|.requested_size() and |readonly|.requested_size() are zero.
- ASSERT_EQ(0U, shared.requested_size());
- const StringPiece shared_contents(static_cast<const char*>(shared.memory()),
- shmem.requested_size());
-
- shared_handle = shmem.handle().Duplicate();
- ASSERT_TRUE(shared_handle.IsValid());
- ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
- SharedMemory readonly(shared_handle, /*readonly=*/true);
- ASSERT_TRUE(readonly.Map(kContents.size()));
- ASSERT_EQ(0U, readonly.requested_size());
- const StringPiece readonly_contents(
- static_cast<const char*>(readonly.memory()),
- shmem.requested_size());
-
- // |shmem| should be able to update the content.
- memcpy(shmem.memory(), kContents.data(), kContents.size());
-
- ASSERT_EQ(kContents, shmem_contents);
- ASSERT_EQ(kContents, shared_contents);
- ASSERT_EQ(kContents, readonly_contents);
-
- // |shared| should also be able to update the content.
- memcpy(shared.memory(), ToLowerASCII(kContents).c_str(),
- kContents.size());
-
- ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), shmem_contents);
- ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), shared_contents);
- ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), readonly_contents);
-}
-
-TEST_P(SharedMemoryTest, MapAt) {
- ASSERT_TRUE(SysInfo::VMAllocationGranularity() >= sizeof(uint32_t));
- const size_t kCount = SysInfo::VMAllocationGranularity();
- const size_t kDataSize = kCount * sizeof(uint32_t);
-
- SharedMemory memory;
- ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
- uint32_t* ptr = static_cast<uint32_t*>(memory.memory());
- ASSERT_NE(ptr, static_cast<void*>(nullptr));
-
- for (size_t i = 0; i < kCount; ++i) {
- ptr[i] = i;
- }
-
- memory.Unmap();
-
- off_t offset = SysInfo::VMAllocationGranularity();
- ASSERT_TRUE(memory.MapAt(offset, kDataSize - offset));
- offset /= sizeof(uint32_t);
- ptr = static_cast<uint32_t*>(memory.memory());
- ASSERT_NE(ptr, static_cast<void*>(nullptr));
- for (size_t i = offset; i < kCount; ++i) {
- EXPECT_EQ(ptr[i - offset], i);
- }
-}
-
-TEST_P(SharedMemoryTest, MapTwice) {
- const uint32_t kDataSize = 1024;
- SharedMemory memory;
- bool rv = memory.CreateAndMapAnonymous(kDataSize);
- EXPECT_TRUE(rv);
-
- void* old_address = memory.memory();
-
- rv = memory.Map(kDataSize);
- EXPECT_FALSE(rv);
- EXPECT_EQ(old_address, memory.memory());
-}
-
-#if defined(OS_POSIX)
-// This test is not applicable for iOS (crbug.com/399384).
-#if !defined(OS_IOS)
-// Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
-TEST_P(SharedMemoryTest, AnonymousExecutable) {
-#if defined(OS_LINUX)
- // On Chromecast both /dev/shm and /tmp are mounted with 'noexec' option,
- // which makes this test fail. But Chromecast doesn't use NaCL so we don't
- // need this.
- if (!IsPathExecutable(FilePath("/dev/shm")) &&
- !IsPathExecutable(FilePath("/tmp"))) {
- return;
- }
-#endif // OS_LINUX
- const uint32_t kTestSize = 1 << 16;
-
- SharedMemory shared_memory;
- SharedMemoryCreateOptions options;
- options.size = kTestSize;
- options.executable = true;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
- // The Mach functionality is tested in shared_memory_mac_unittest.cc.
- options.type = SharedMemoryHandle::POSIX;
-#endif
-
- EXPECT_TRUE(shared_memory.Create(options));
- EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
-
- EXPECT_EQ(0, mprotect(shared_memory.memory(), shared_memory.requested_size(),
- PROT_READ | PROT_EXEC));
-}
-#endif // !defined(OS_IOS)
-
-#if defined(OS_ANDROID)
-// This test is restricted to Android since there is no way on other platforms
-// to guarantee that a region can never be mapped with PROT_EXEC. E.g. on
-// Linux, anonymous shared regions come from /dev/shm which can be mounted
-// without 'noexec'. In this case, anything can perform an mprotect() to
-// change the protection mask of a given page.
-TEST(SharedMemoryTest, AnonymousIsNotExecutableByDefault) {
- const uint32_t kTestSize = 1 << 16;
-
- SharedMemory shared_memory;
- SharedMemoryCreateOptions options;
- options.size = kTestSize;
-
- EXPECT_TRUE(shared_memory.Create(options));
- EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
-
- errno = 0;
- EXPECT_EQ(-1, mprotect(shared_memory.memory(), shared_memory.requested_size(),
- PROT_READ | PROT_EXEC));
- EXPECT_EQ(EACCES, errno);
-}
-#endif // OS_ANDROID
-
-// Android supports a different permission model than POSIX for its "ashmem"
-// shared memory implementation. So the tests about file permissions are not
-// included on Android. Fuchsia does not use a file-backed shared memory
-// implementation.
-
-#if !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
-
-// Set a umask and restore the old mask on destruction.
-class ScopedUmaskSetter {
- public:
- explicit ScopedUmaskSetter(mode_t target_mask) {
- old_umask_ = umask(target_mask);
- }
- ~ScopedUmaskSetter() { umask(old_umask_); }
- private:
- mode_t old_umask_;
- DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedUmaskSetter);
-};
-
-// Create a shared memory object, check its permissions.
-TEST_P(SharedMemoryTest, FilePermissionsAnonymous) {
- const uint32_t kTestSize = 1 << 8;
-
- SharedMemory shared_memory;
- SharedMemoryCreateOptions options;
- options.size = kTestSize;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
- // The Mach functionality is tested in shared_memory_mac_unittest.cc.
- options.type = SharedMemoryHandle::POSIX;
-#endif
- // Set a file mode creation mask that gives all permissions.
- ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
-
- EXPECT_TRUE(shared_memory.Create(options));
-
- int shm_fd =
- SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
- struct stat shm_stat;
- EXPECT_EQ(0, fstat(shm_fd, &shm_stat));
- // Neither the group, nor others should be able to read the shared memory
- // file.
- EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
- EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
-}
-
-// Create a shared memory object, check its permissions.
-TEST_P(SharedMemoryTest, FilePermissionsNamed) {
- const uint32_t kTestSize = 1 << 8;
-
- SharedMemory shared_memory;
- SharedMemoryCreateOptions options;
- options.size = kTestSize;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
- // The Mach functionality is tested in shared_memory_mac_unittest.cc.
- options.type = SharedMemoryHandle::POSIX;
-#endif
-
- // Set a file mode creation mask that gives all permissions.
- ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
-
- EXPECT_TRUE(shared_memory.Create(options));
-
- int fd = SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
- struct stat shm_stat;
- EXPECT_EQ(0, fstat(fd, &shm_stat));
- // Neither the group, nor others should have been able to open the shared
- // memory file while its name existed.
- EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
- EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
-}
-#endif // !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
-
-#endif // defined(OS_POSIX)
-
-// Map() will return addresses which are aligned to the platform page size, this
-// varies from platform to platform though. Since we'd like to advertise a
-// minimum alignment that callers can count on, test for it here.
-TEST_P(SharedMemoryTest, MapMinimumAlignment) {
- static const int kDataSize = 8192;
-
- SharedMemory shared_memory;
- ASSERT_TRUE(shared_memory.CreateAndMapAnonymous(kDataSize));
- EXPECT_EQ(0U, reinterpret_cast<uintptr_t>(
- shared_memory.memory()) & (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
- shared_memory.Close();
-}
-
-#if defined(OS_WIN)
-TEST_P(SharedMemoryTest, UnsafeImageSection) {
- const char kTestSectionName[] = "UnsafeImageSection";
- wchar_t path[MAX_PATH];
- EXPECT_GT(::GetModuleFileName(nullptr, path, arraysize(path)), 0U);
-
- // Map the current executable image to save us creating a new PE file on disk.
- base::win::ScopedHandle file_handle(::CreateFile(
- path, GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING, 0, nullptr));
- EXPECT_TRUE(file_handle.IsValid());
- base::win::ScopedHandle section_handle(
- ::CreateFileMappingA(file_handle.Get(), nullptr,
- PAGE_READONLY | SEC_IMAGE, 0, 0, kTestSectionName));
- EXPECT_TRUE(section_handle.IsValid());
-
- // Check direct opening by name, from handle and duplicated from handle.
- SharedMemory shared_memory_open;
- EXPECT_TRUE(shared_memory_open.Open(kTestSectionName, true));
- EXPECT_FALSE(shared_memory_open.Map(1));
- EXPECT_EQ(nullptr, shared_memory_open.memory());
-
- SharedMemory shared_memory_handle_local(
- SharedMemoryHandle(section_handle.Take(), 1, UnguessableToken::Create()),
- true);
- EXPECT_FALSE(shared_memory_handle_local.Map(1));
- EXPECT_EQ(nullptr, shared_memory_handle_local.memory());
-
- // Check that a handle without SECTION_QUERY also can't be mapped as it can't
- // be checked.
- SharedMemory shared_memory_handle_dummy;
- SharedMemoryCreateOptions options;
- options.size = 0x1000;
- EXPECT_TRUE(shared_memory_handle_dummy.Create(options));
- HANDLE handle_no_query;
- EXPECT_TRUE(::DuplicateHandle(
- ::GetCurrentProcess(), shared_memory_handle_dummy.handle().GetHandle(),
- ::GetCurrentProcess(), &handle_no_query, FILE_MAP_READ, FALSE, 0));
- SharedMemory shared_memory_handle_no_query(
- SharedMemoryHandle(handle_no_query, options.size,
- UnguessableToken::Create()),
- true);
- EXPECT_FALSE(shared_memory_handle_no_query.Map(1));
- EXPECT_EQ(nullptr, shared_memory_handle_no_query.memory());
-}
-#endif // defined(OS_WIN)
-
-// iOS does not allow multiple processes.
-// Android ashmem does not support named shared memory.
-// Fuchsia SharedMemory does not support named shared memory.
-// Mac SharedMemory does not support named shared memory. crbug.com/345734
-#if !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) && \
- !defined(OS_FUCHSIA)
-// On POSIX it is especially important we test shmem across processes,
-// not just across threads. But the test is enabled on all platforms.
-class SharedMemoryProcessTest : public MultiProcessTest {
- public:
- static void CleanUp() {
- SharedMemory memory;
- memory.Delete(s_test_name_);
- }
-
- static int TaskTestMain() {
- int errors = 0;
- SharedMemory memory;
- bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
- EXPECT_TRUE(rv);
- if (rv != true)
- errors++;
- rv = memory.Map(s_data_size_);
- EXPECT_TRUE(rv);
- if (rv != true)
- errors++;
- int* ptr = static_cast<int*>(memory.memory());
-
- // This runs concurrently in multiple processes. Writes need to be atomic.
- subtle::Barrier_AtomicIncrement(ptr, 1);
- memory.Close();
- return errors;
- }
-
- static const char s_test_name_[];
- static const uint32_t s_data_size_;
-};
-
-const char SharedMemoryProcessTest::s_test_name_[] = "MPMem";
-const uint32_t SharedMemoryProcessTest::s_data_size_ = 1024;
-
-TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
- const int kNumTasks = 5;
-
- SharedMemoryProcessTest::CleanUp();
-
- // Create a shared memory region. Set the first word to 0.
- SharedMemory memory;
- bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
- ASSERT_TRUE(rv);
- rv = memory.Map(s_data_size_);
- ASSERT_TRUE(rv);
- int* ptr = static_cast<int*>(memory.memory());
- *ptr = 0;
-
- // Start |kNumTasks| processes, each of which atomically increments the first
- // word by 1.
- Process processes[kNumTasks];
- for (int index = 0; index < kNumTasks; ++index) {
- processes[index] = SpawnChild("SharedMemoryTestMain");
- ASSERT_TRUE(processes[index].IsValid());
- }
-
- // Check that each process exited correctly.
- int exit_code = 0;
- for (int index = 0; index < kNumTasks; ++index) {
- EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
- EXPECT_EQ(0, exit_code);
- }
-
- // Check that the shared memory region reflects |kNumTasks| increments.
- ASSERT_EQ(kNumTasks, *ptr);
-
- memory.Close();
- SharedMemoryProcessTest::CleanUp();
-}
-
-MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
- return SharedMemoryProcessTest::TaskTestMain();
-}
-#endif // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) &&
- // !defined(OS_FUCHSIA)
-
-TEST_P(SharedMemoryTest, MappedId) {
- const uint32_t kDataSize = 1024;
- SharedMemory memory;
- SharedMemoryCreateOptions options;
- options.size = kDataSize;
-#if defined(OS_MACOSX) && !defined(OS_IOS)
- // The Mach functionality is tested in shared_memory_mac_unittest.cc.
- options.type = SharedMemoryHandle::POSIX;
-#endif
-
- EXPECT_TRUE(memory.Create(options));
- base::UnguessableToken id = memory.handle().GetGUID();
- EXPECT_FALSE(id.is_empty());
- EXPECT_TRUE(memory.mapped_id().is_empty());
-
- EXPECT_TRUE(memory.Map(kDataSize));
- EXPECT_EQ(id, memory.mapped_id());
-
- memory.Close();
- EXPECT_EQ(id, memory.mapped_id());
-
- memory.Unmap();
- EXPECT_TRUE(memory.mapped_id().is_empty());
-}
-
-INSTANTIATE_TEST_CASE_P(Default,
- SharedMemoryTest,
- ::testing::Values(Mode::Default));
-#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
-INSTANTIATE_TEST_CASE_P(SkipDevShm,
- SharedMemoryTest,
- ::testing::Values(Mode::DisableDevShm));
-#endif // defined(OS_LINUX) && !defined(OS_CHROMEOS)
-
-#if defined(OS_ANDROID)
-TEST(SharedMemoryTest, ReadOnlyRegions) {
- const uint32_t kDataSize = 1024;
- SharedMemory memory;
- SharedMemoryCreateOptions options;
- options.size = kDataSize;
- EXPECT_TRUE(memory.Create(options));
-
- EXPECT_FALSE(memory.handle().IsRegionReadOnly());
-
- // Check that it is possible to map the region directly from the fd.
- int region_fd = memory.handle().GetHandle();
- EXPECT_GE(region_fd, 0);
- void* address = mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- region_fd, 0);
- bool success = address && address != MAP_FAILED;
- ASSERT_TRUE(address);
- ASSERT_NE(address, MAP_FAILED);
- if (success) {
- EXPECT_EQ(0, munmap(address, kDataSize));
- }
-
- ASSERT_TRUE(memory.handle().SetRegionReadOnly());
- EXPECT_TRUE(memory.handle().IsRegionReadOnly());
-
- // Check that it is no longer possible to map the region read/write.
- errno = 0;
- address = mmap(nullptr, kDataSize, PROT_READ | PROT_WRITE, MAP_SHARED,
- region_fd, 0);
- success = address && address != MAP_FAILED;
- ASSERT_FALSE(success);
- ASSERT_EQ(EPERM, errno);
- if (success) {
- EXPECT_EQ(0, munmap(address, kDataSize));
- }
-}
-
-TEST(SharedMemoryTest, ReadOnlyDescriptors) {
- const uint32_t kDataSize = 1024;
- SharedMemory memory;
- SharedMemoryCreateOptions options;
- options.size = kDataSize;
- EXPECT_TRUE(memory.Create(options));
-
- EXPECT_FALSE(memory.handle().IsRegionReadOnly());
-
- // Getting a read-only descriptor should not make the region read-only itself.
- SharedMemoryHandle ro_handle = memory.GetReadOnlyHandle();
- EXPECT_FALSE(memory.handle().IsRegionReadOnly());
-
- // Mapping a writable region from a read-only descriptor should not
- // be possible, it will DCHECK() in debug builds (see test below),
- // while returning false on release ones.
- {
- bool dcheck_fired = false;
- logging::ScopedLogAssertHandler log_assert(
- base::BindRepeating([](bool* flag, const char*, int, base::StringPiece,
- base::StringPiece) { *flag = true; },
- base::Unretained(&dcheck_fired)));
-
- SharedMemory rw_region(ro_handle.Duplicate(), /* read_only */ false);
- EXPECT_FALSE(rw_region.Map(kDataSize));
- EXPECT_EQ(DCHECK_IS_ON() ? true : false, dcheck_fired);
- }
-
- // Nor shall it turn the region read-only itself.
- EXPECT_FALSE(ro_handle.IsRegionReadOnly());
-
- // Mapping a read-only region from a read-only descriptor should work.
- SharedMemory ro_region(ro_handle.Duplicate(), /* read_only */ true);
- EXPECT_TRUE(ro_region.Map(kDataSize));
-
- // And it should turn the region read-only too.
- EXPECT_TRUE(ro_handle.IsRegionReadOnly());
- EXPECT_TRUE(memory.handle().IsRegionReadOnly());
- EXPECT_FALSE(memory.Map(kDataSize));
-
- ro_handle.Close();
-}
-
-#endif // OS_ANDROID
-
-} // namespace base
diff --git a/base/memory/shared_memory_win.cc b/base/memory/shared_memory_win.cc
deleted file mode 100644
index 0f4e965..0000000
--- a/base/memory/shared_memory_win.cc
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/shared_memory.h"
-
-#include <aclapi.h>
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/logging.h"
-#include "base/memory/shared_memory_tracker.h"
-#include "base/metrics/histogram_functions.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/rand_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/unguessable_token.h"
-#include "base/win/windows_version.h"
-#include "starboard/types.h"
-
-namespace base {
-namespace {
-
-// Errors that can occur during Shared Memory construction.
-// These match tools/metrics/histograms/histograms.xml.
-// This enum is append-only.
-enum CreateError {
- SUCCESS = 0,
- SIZE_ZERO = 1,
- SIZE_TOO_LARGE = 2,
- INITIALIZE_ACL_FAILURE = 3,
- INITIALIZE_SECURITY_DESC_FAILURE = 4,
- SET_SECURITY_DESC_FAILURE = 5,
- CREATE_FILE_MAPPING_FAILURE = 6,
- REDUCE_PERMISSIONS_FAILURE = 7,
- ALREADY_EXISTS = 8,
- CREATE_ERROR_LAST = ALREADY_EXISTS
-};
-
-// Emits UMA metrics about encountered errors. Pass zero (0) for |winerror|
-// if there is no associated Windows error.
-void LogError(CreateError error, DWORD winerror) {
- UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error,
- CREATE_ERROR_LAST + 1);
- static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
- if (winerror != ERROR_SUCCESS)
- UmaHistogramSparse("SharedMemory.CreateWinError", winerror);
-}
-
-typedef enum _SECTION_INFORMATION_CLASS {
- SectionBasicInformation,
-} SECTION_INFORMATION_CLASS;
-
-typedef struct _SECTION_BASIC_INFORMATION {
- PVOID BaseAddress;
- ULONG Attributes;
- LARGE_INTEGER Size;
-} SECTION_BASIC_INFORMATION, *PSECTION_BASIC_INFORMATION;
-
-typedef ULONG(__stdcall* NtQuerySectionType)(
- HANDLE SectionHandle,
- SECTION_INFORMATION_CLASS SectionInformationClass,
- PVOID SectionInformation,
- ULONG SectionInformationLength,
- PULONG ResultLength);
-
-// Returns the length of the memory section starting at the supplied address.
-size_t GetMemorySectionSize(void* address) {
- MEMORY_BASIC_INFORMATION memory_info;
- if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
- return 0;
- return memory_info.RegionSize - (static_cast<char*>(address) -
- static_cast<char*>(memory_info.AllocationBase));
-}
-
-// Checks if the section object is safe to map. At the moment this just means
-// it's not an image section.
-bool IsSectionSafeToMap(HANDLE handle) {
- static NtQuerySectionType nt_query_section_func;
- if (!nt_query_section_func) {
- nt_query_section_func = reinterpret_cast<NtQuerySectionType>(
- ::GetProcAddress(::GetModuleHandle(L"ntdll.dll"), "NtQuerySection"));
- DCHECK(nt_query_section_func);
- }
-
- // The handle must have SECTION_QUERY access for this to succeed.
- SECTION_BASIC_INFORMATION basic_information = {};
- ULONG status =
- nt_query_section_func(handle, SectionBasicInformation, &basic_information,
- sizeof(basic_information), nullptr);
- if (status)
- return false;
- return (basic_information.Attributes & SEC_IMAGE) != SEC_IMAGE;
-}
-
-// Returns a HANDLE on success and |nullptr| on failure.
-// This function is similar to CreateFileMapping, but removes the permissions
-// WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE.
-//
-// A newly created file mapping has two sets of permissions. It has access
-// control permissions (WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE) and
-// file permissions (FILE_MAP_READ, FILE_MAP_WRITE, etc.). ::DuplicateHandle()
-// with the parameter DUPLICATE_SAME_ACCESS copies both sets of permissions.
-//
-// The Chrome sandbox prevents HANDLEs with the WRITE_DAC permission from being
-// duplicated into unprivileged processes. But the only way to copy file
-// permissions is with the parameter DUPLICATE_SAME_ACCESS. This means that
-// there is no way for a privileged process to duplicate a file mapping into an
-// unprivileged process while maintaining the previous file permissions.
-//
-// By removing all access control permissions of a file mapping immediately
-// after creation, ::DuplicateHandle() effectively only copies the file
-// permissions.
-HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
- size_t rounded_size,
- LPCWSTR name) {
- HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
- static_cast<DWORD>(rounded_size), name);
- if (!h) {
- LogError(CREATE_FILE_MAPPING_FAILURE, GetLastError());
- return nullptr;
- }
-
- HANDLE h2;
- BOOL success = ::DuplicateHandle(
- GetCurrentProcess(), h, GetCurrentProcess(), &h2,
- FILE_MAP_READ | FILE_MAP_WRITE | SECTION_QUERY, FALSE, 0);
- BOOL rv = ::CloseHandle(h);
- DCHECK(rv);
-
- if (!success) {
- LogError(REDUCE_PERMISSIONS_FAILURE, GetLastError());
- return nullptr;
- }
-
- return h2;
-}
-
-} // namespace.
-
-SharedMemory::SharedMemory() {}
-
-SharedMemory::SharedMemory(const string16& name) : name_(name) {}
-
-SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
- : external_section_(true), shm_(handle), read_only_(read_only) {}
-
-SharedMemory::~SharedMemory() {
- Unmap();
- Close();
-}
-
-// static
-bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
- return handle.IsValid();
-}
-
-// static
-void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
- handle.Close();
-}
-
-// static
-size_t SharedMemory::GetHandleLimit() {
- // Rounded down from value reported here:
- // http://blogs.technet.com/b/markrussinovich/archive/2009/09/29/3283844.aspx
- return static_cast<size_t>(1 << 23);
-}
-
-// static
-SharedMemoryHandle SharedMemory::DuplicateHandle(
- const SharedMemoryHandle& handle) {
- return handle.Duplicate();
-}
-
-bool SharedMemory::CreateAndMapAnonymous(size_t size) {
- return CreateAnonymous(size) && Map(size);
-}
-
-bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- // TODO(crbug.com/210609): NaCl forces us to round up 64k here, wasting 32k
- // per mapping on average.
- static const size_t kSectionMask = 65536 - 1;
- DCHECK(!options.executable);
- DCHECK(!shm_.IsValid());
- if (options.size == 0) {
- LogError(SIZE_ZERO, 0);
- return false;
- }
-
- // Check maximum accounting for overflow.
- if (options.size >
- static_cast<size_t>(std::numeric_limits<int>::max()) - kSectionMask) {
- LogError(SIZE_TOO_LARGE, 0);
- return false;
- }
-
- size_t rounded_size = (options.size + kSectionMask) & ~kSectionMask;
- name_ = options.name_deprecated ?
- ASCIIToUTF16(*options.name_deprecated) : L"";
- SECURITY_ATTRIBUTES sa = {sizeof(sa), nullptr, FALSE};
- SECURITY_DESCRIPTOR sd;
- ACL dacl;
-
- if (name_.empty()) {
- // Add an empty DACL to enforce anonymous read-only sections.
- sa.lpSecurityDescriptor = &sd;
- if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
- LogError(INITIALIZE_ACL_FAILURE, GetLastError());
- return false;
- }
- if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
- LogError(INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
- return false;
- }
- if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
- LogError(SET_SECURITY_DESC_FAILURE, GetLastError());
- return false;
- }
-
- if (base::win::GetVersion() < base::win::VERSION_WIN8_1) {
- // Windows < 8.1 ignores DACLs on certain unnamed objects (like shared
- // sections). So, we generate a random name when we need to enforce
- // read-only.
- uint64_t rand_values[4];
- RandBytes(&rand_values, sizeof(rand_values));
- name_ = StringPrintf(L"CrSharedMem_%016llx%016llx%016llx%016llx",
- rand_values[0], rand_values[1], rand_values[2],
- rand_values[3]);
- DCHECK(!name_.empty());
- }
- }
-
- shm_ = SharedMemoryHandle(
- CreateFileMappingWithReducedPermissions(
- &sa, rounded_size, name_.empty() ? nullptr : name_.c_str()),
- rounded_size, UnguessableToken::Create());
- if (!shm_.IsValid()) {
- // The error is logged within CreateFileMappingWithReducedPermissions().
- return false;
- }
-
- requested_size_ = options.size;
-
- // Check if the shared memory pre-exists.
- if (GetLastError() == ERROR_ALREADY_EXISTS) {
- // If the file already existed, set requested_size_ to 0 to show that
- // we don't know the size.
- requested_size_ = 0;
- external_section_ = true;
- if (!options.open_existing_deprecated) {
- Close();
- // From "if" above: GetLastError() == ERROR_ALREADY_EXISTS.
- LogError(ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
- return false;
- }
- }
-
- LogError(SUCCESS, ERROR_SUCCESS);
- return true;
-}
-
-bool SharedMemory::Delete(const std::string& name) {
- // intentionally empty -- there is nothing for us to do on Windows.
- return true;
-}
-
-bool SharedMemory::Open(const std::string& name, bool read_only) {
- DCHECK(!shm_.IsValid());
- DWORD access = FILE_MAP_READ | SECTION_QUERY;
- if (!read_only)
- access |= FILE_MAP_WRITE;
- name_ = ASCIIToUTF16(name);
- read_only_ = read_only;
-
- // This form of sharing shared memory is deprecated. https://crbug.com/345734.
- // However, we can't get rid of it without a significant refactor because its
- // used to communicate between two versions of the same service process, very
- // early in the life cycle.
- // Technically, we should also pass the GUID from the original shared memory
- // region. We don't do that - this means that we will overcount this memory,
- // which thankfully isn't relevant since Chrome only communicates with a
- // single version of the service process.
- // We pass the size |0|, which is a dummy size and wrong, but otherwise
- // harmless.
- shm_ = SharedMemoryHandle(
- OpenFileMapping(access, false, name_.empty() ? nullptr : name_.c_str()),
- 0u, UnguessableToken::Create());
- if (!shm_.IsValid())
- return false;
- // If a name specified assume it's an external section.
- if (!name_.empty())
- external_section_ = true;
- // Note: size_ is not set in this case.
- return true;
-}
-
-bool SharedMemory::MapAt(off_t offset, size_t bytes) {
- if (!shm_.IsValid()) {
- DLOG(ERROR) << "Invalid SharedMemoryHandle.";
- return false;
- }
-
- if (bytes > static_cast<size_t>(std::numeric_limits<int>::max())) {
- DLOG(ERROR) << "Bytes required exceeds the 2G limitation.";
- return false;
- }
-
- if (memory_) {
- DLOG(ERROR) << "The SharedMemory has been mapped already.";
- return false;
- }
-
- if (external_section_ && !IsSectionSafeToMap(shm_.GetHandle())) {
- DLOG(ERROR) << "SharedMemoryHandle is not safe to be mapped.";
- return false;
- }
-
- // Try to map the shared memory. On the first failure, release any reserved
- // address space for a single retry.
- for (int i = 0; i < 2; ++i) {
- memory_ = MapViewOfFile(
- shm_.GetHandle(),
- read_only_ ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE,
- static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), bytes);
- if (memory_)
- break;
- ReleaseReservation();
- }
- if (!memory_) {
- DPLOG(ERROR) << "Failed executing MapViewOfFile";
- return false;
- }
-
- DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
- (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
- mapped_size_ = GetMemorySectionSize(memory_);
- mapped_id_ = shm_.GetGUID();
- SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
- return true;
-}
-
-bool SharedMemory::Unmap() {
- if (!memory_)
- return false;
-
- SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
- UnmapViewOfFile(memory_);
- memory_ = nullptr;
- mapped_id_ = UnguessableToken();
- return true;
-}
-
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
- HANDLE result;
- ProcessHandle process = GetCurrentProcess();
- if (!::DuplicateHandle(process, shm_.GetHandle(), process, &result,
- FILE_MAP_READ | SECTION_QUERY, FALSE, 0)) {
- return SharedMemoryHandle();
- }
- SharedMemoryHandle handle =
- SharedMemoryHandle(result, shm_.GetSize(), shm_.GetGUID());
- handle.SetOwnershipPassesToIPC(true);
- return handle;
-}
-
-void SharedMemory::Close() {
- if (shm_.IsValid()) {
- shm_.Close();
- shm_ = SharedMemoryHandle();
- }
-}
-
-SharedMemoryHandle SharedMemory::handle() const {
- return shm_;
-}
-
-SharedMemoryHandle SharedMemory::TakeHandle() {
- SharedMemoryHandle handle(shm_);
- handle.SetOwnershipPassesToIPC(true);
- Unmap();
- shm_ = SharedMemoryHandle();
- return handle;
-}
-
-} // namespace base
diff --git a/base/memory/shared_memory_win_unittest.cc b/base/memory/shared_memory_win_unittest.cc
deleted file mode 100644
index 0857284..0000000
--- a/base/memory/shared_memory_win_unittest.cc
+++ /dev/null
@@ -1,226 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <windows.h>
-#include <sddl.h>
-
-#include <memory>
-
-#include "base/command_line.h"
-#include "base/memory/free_deleter.h"
-#include "base/memory/shared_memory.h"
-#include "base/process/process.h"
-#include "base/rand_util.h"
-#include "base/strings/stringprintf.h"
-#include "base/strings/sys_string_conversions.h"
-#include "base/test/multiprocess_test.h"
-#include "base/test/test_timeouts.h"
-#include "base/win/scoped_handle.h"
-#include "base/win/win_util.h"
-#include "starboard/memory.h"
-#include "starboard/types.h"
-#include "testing/multiprocess_func_list.h"
-
-namespace base {
-namespace {
-const char* kHandleSwitchName = "shared_memory_win_test_switch";
-
-// Creates a process token with a low integrity SID.
-win::ScopedHandle CreateLowIntegritySID() {
- HANDLE process_token_raw = nullptr;
- BOOL success = ::OpenProcessToken(GetCurrentProcess(),
- TOKEN_DUPLICATE | TOKEN_ADJUST_DEFAULT |
- TOKEN_QUERY | TOKEN_ASSIGN_PRIMARY,
- &process_token_raw);
- if (!success)
- return base::win::ScopedHandle();
- win::ScopedHandle process_token(process_token_raw);
-
- HANDLE lowered_process_token_raw = nullptr;
- success =
- ::DuplicateTokenEx(process_token.Get(), 0, NULL, SecurityImpersonation,
- TokenPrimary, &lowered_process_token_raw);
- if (!success)
- return base::win::ScopedHandle();
- win::ScopedHandle lowered_process_token(lowered_process_token_raw);
-
- // Low integrity SID
- WCHAR integrity_sid_string[20] = L"S-1-16-4096";
- PSID integrity_sid = nullptr;
- success = ::ConvertStringSidToSid(integrity_sid_string, &integrity_sid);
- if (!success)
- return base::win::ScopedHandle();
-
- TOKEN_MANDATORY_LABEL TIL = {};
- TIL.Label.Attributes = SE_GROUP_INTEGRITY;
- TIL.Label.Sid = integrity_sid;
- success = ::SetTokenInformation(
- lowered_process_token.Get(), TokenIntegrityLevel, &TIL,
- sizeof(TOKEN_MANDATORY_LABEL) + GetLengthSid(integrity_sid));
- if (!success)
- return base::win::ScopedHandle();
- return lowered_process_token;
-}
-
-// Reads a HANDLE from the pipe as a raw int, least significant digit first.
-win::ScopedHandle ReadHandleFromPipe(HANDLE pipe) {
- // Read from parent pipe.
- const size_t buf_size = 1000;
- char buffer[buf_size];
- memset(buffer, 0, buf_size);
- DWORD bytes_read;
- BOOL success = ReadFile(pipe, buffer, buf_size, &bytes_read, NULL);
-
- if (!success || bytes_read == 0) {
- LOG(ERROR) << "Failed to read handle from pipe.";
- return win::ScopedHandle();
- }
-
- int handle_as_int = 0;
- int power_of_ten = 1;
- for (unsigned int i = 0; i < bytes_read; ++i) {
- handle_as_int += buffer[i] * power_of_ten;
- power_of_ten *= 10;
- }
-
- return win::ScopedHandle(reinterpret_cast<HANDLE>(handle_as_int));
-}
-
-// Writes a HANDLE to a pipe as a raw int, least significant digit first.
-void WriteHandleToPipe(HANDLE pipe, HANDLE handle) {
- uint32_t handle_as_int = base::win::HandleToUint32(handle);
-
- std::unique_ptr<char, base::FreeDeleter> buffer(
- static_cast<char*>(malloc(1000)));
- size_t index = 0;
- while (handle_as_int > 0) {
- buffer.get()[index] = handle_as_int % 10;
- handle_as_int /= 10;
- ++index;
- }
-
- ::ConnectNamedPipe(pipe, nullptr);
- DWORD written;
- ASSERT_TRUE(::WriteFile(pipe, buffer.get(), index, &written, NULL));
-}
-
-// Creates a communication pipe with the given name.
-win::ScopedHandle CreateCommunicationPipe(const std::wstring& name) {
- return win::ScopedHandle(CreateNamedPipe(name.c_str(), // pipe name
- PIPE_ACCESS_DUPLEX, PIPE_WAIT, 255,
- 1000, 1000, 0, NULL));
-}
-
-// Generates a random name for a communication pipe.
-std::wstring CreateCommunicationPipeName() {
- uint64_t rand_values[4];
- RandBytes(&rand_values, sizeof(rand_values));
- std::wstring child_pipe_name = StringPrintf(
- L"\\\\.\\pipe\\SharedMemoryWinTest_%016llx%016llx%016llx%016llx",
- rand_values[0], rand_values[1], rand_values[2], rand_values[3]);
- return child_pipe_name;
-}
-
-class SharedMemoryWinTest : public base::MultiProcessTest {
- protected:
- CommandLine MakeCmdLine(const std::string& procname) override {
- CommandLine line = base::MultiProcessTest::MakeCmdLine(procname);
- line.AppendSwitchASCII(kHandleSwitchName, communication_pipe_name_);
- return line;
- }
-
- std::string communication_pipe_name_;
-};
-
-MULTIPROCESS_TEST_MAIN(LowerPermissions) {
- std::string handle_name =
- CommandLine::ForCurrentProcess()->GetSwitchValueASCII(kHandleSwitchName);
- std::wstring handle_name16 = SysUTF8ToWide(handle_name);
- win::ScopedHandle parent_pipe(
- ::CreateFile(handle_name16.c_str(), // pipe name
- GENERIC_READ,
- 0, // no sharing
- NULL, // default security attributes
- OPEN_EXISTING, // opens existing pipe
- 0, // default attributes
- NULL)); // no template file
- if (parent_pipe.Get() == INVALID_HANDLE_VALUE) {
- LOG(ERROR) << "Failed to open communication pipe.";
- return 1;
- }
-
- win::ScopedHandle received_handle = ReadHandleFromPipe(parent_pipe.Get());
- if (!received_handle.Get()) {
- LOG(ERROR) << "Failed to read handle from pipe.";
- return 1;
- }
-
- // Attempting to add the WRITE_DAC permission should fail.
- HANDLE duped_handle;
- BOOL success = ::DuplicateHandle(GetCurrentProcess(), received_handle.Get(),
- GetCurrentProcess(), &duped_handle,
- FILE_MAP_READ | WRITE_DAC, FALSE, 0);
- if (success) {
- LOG(ERROR) << "Should not have been able to add WRITE_DAC permission.";
- return 1;
- }
-
- // Attempting to add the FILE_MAP_WRITE permission should fail.
- success = ::DuplicateHandle(GetCurrentProcess(), received_handle.Get(),
- GetCurrentProcess(), &duped_handle,
- FILE_MAP_READ | FILE_MAP_WRITE, FALSE, 0);
- if (success) {
- LOG(ERROR) << "Should not have been able to add FILE_MAP_WRITE permission.";
- return 1;
- }
-
- // Attempting to duplicate the HANDLE with the same permissions should
- // succeed.
- success = ::DuplicateHandle(GetCurrentProcess(), received_handle.Get(),
- GetCurrentProcess(), &duped_handle, FILE_MAP_READ,
- FALSE, 0);
- if (!success) {
- LOG(ERROR) << "Failed to duplicate handle.";
- return 4;
- }
- ::CloseHandle(duped_handle);
- return 0;
-}
-
-TEST_F(SharedMemoryWinTest, LowerPermissions) {
- std::wstring communication_pipe_name = CreateCommunicationPipeName();
- communication_pipe_name_ = SysWideToUTF8(communication_pipe_name);
-
- win::ScopedHandle communication_pipe =
- CreateCommunicationPipe(communication_pipe_name);
- ASSERT_TRUE(communication_pipe.Get());
-
- win::ScopedHandle lowered_process_token = CreateLowIntegritySID();
- ASSERT_TRUE(lowered_process_token.Get());
-
- base::LaunchOptions options;
- options.as_user = lowered_process_token.Get();
- base::Process process = SpawnChildWithOptions("LowerPermissions", options);
- ASSERT_TRUE(process.IsValid());
-
- SharedMemory memory;
- memory.CreateAndMapAnonymous(1001);
-
- // Duplicate into child process, giving only FILE_MAP_READ permissions.
- HANDLE raw_handle = nullptr;
- ::DuplicateHandle(::GetCurrentProcess(), memory.handle().GetHandle(),
- process.Handle(), &raw_handle,
- FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
- ASSERT_TRUE(raw_handle);
-
- WriteHandleToPipe(communication_pipe.Get(), raw_handle);
-
- int exit_code;
- EXPECT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
- &exit_code));
- EXPECT_EQ(0, exit_code);
-}
-
-} // namespace
-} // namespace base
diff --git a/base/memory/singleton.h b/base/memory/singleton.h
index 2586aa9..6875243 100644
--- a/base/memory/singleton.h
+++ b/base/memory/singleton.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright 2011 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
@@ -27,12 +27,10 @@
#ifndef BASE_MEMORY_SINGLETON_H_
#define BASE_MEMORY_SINGLETON_H_
-#include "base/at_exit.h"
-#include "base/atomicops.h"
-#include "base/base_export.h"
+#include <atomic>
+
+#include "base/dcheck_is_on.h"
#include "base/lazy_instance_helpers.h"
-#include "base/logging.h"
-#include "base/macros.h"
#include "base/threading/thread_restrictions.h"
namespace base {
@@ -104,7 +102,7 @@
// WARNING: User has to support a New() which returns null.
static Type* New() {
// Only constructs once and returns pointer; otherwise returns null.
- if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
+ if (dead_.exchange(true, std::memory_order_relaxed))
return nullptr;
return new (buffer_) Type();
@@ -121,18 +119,20 @@
static const bool kAllowedToAccessOnNonjoinableThread = true;
#endif
- static void ResurrectForTesting() { subtle::NoBarrier_Store(&dead_, 0); }
+ static void ResurrectForTesting() {
+ dead_.store(false, std::memory_order_relaxed);
+ }
private:
alignas(Type) static char buffer_[sizeof(Type)];
// Signal the object was already deleted, so it is not revived.
- static subtle::Atomic32 dead_;
+ static std::atomic<bool> dead_;
};
template <typename Type>
alignas(Type) char StaticMemorySingletonTraits<Type>::buffer_[sizeof(Type)];
template <typename Type>
-subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
+std::atomic<bool> StaticMemorySingletonTraits<Type>::dead_ = false;
// The Singleton<Type, Traits, DifferentiatingType> class manages a single
// instance of Type which will be created on first use and will be destroyed at
@@ -153,12 +153,15 @@
// class FooClass {
// public:
// static FooClass* GetInstance(); <-- See comment below on this.
+//
+// FooClass(const FooClass&) = delete;
+// FooClass& operator=(const FooClass&) = delete;
+//
// void Bar() { ... }
+//
// private:
// FooClass() { ... }
// friend struct base::DefaultSingletonTraits<FooClass>;
-//
-// DISALLOW_COPY_AND_ASSIGN(FooClass);
// };
//
// In your source file:
@@ -219,43 +222,62 @@
typename DifferentiatingType = Type>
class Singleton {
private:
- // Classes using the Singleton<T> pattern should declare a GetInstance()
- // method and call Singleton::get() from within that.
- friend Type* Type::GetInstance();
+ // A class T using the Singleton<T> pattern should declare a GetInstance()
+ // method and call Singleton::get() from within that. T may also declare a
+ // GetInstanceIfExists() method to invoke Singleton::GetIfExists().
+ friend Type;
// This class is safe to be constructed and copy-constructed since it has no
// member.
- // Return a pointer to the one true instance of the class.
+ // Returns a pointer to the one true instance of the class.
static Type* get() {
#if DCHECK_IS_ON()
if (!Traits::kAllowedToAccessOnNonjoinableThread)
- ThreadRestrictions::AssertSingletonAllowed();
+ internal::AssertSingletonAllowed();
#endif
return subtle::GetOrCreateLazyPointer(
- &instance_, &CreatorFunc, nullptr,
+ instance_, &CreatorFunc, nullptr,
Traits::kRegisterAtExit ? OnExit : nullptr, nullptr);
}
+ // Returns the same result as get() if the instance exists but doesn't
+ // construct it (and returns null) if it doesn't.
+ static Type* GetIfExists() {
+#if DCHECK_IS_ON()
+ if (!Traits::kAllowedToAccessOnNonjoinableThread)
+ internal::AssertSingletonAllowed();
+#endif
+
+ if (!instance_.load(std::memory_order_relaxed))
+ return nullptr;
+
+ // Need to invoke get() nonetheless as some Traits return null after
+ // destruction (even though |instance_| still holds garbage).
+ return get();
+ }
+
// Internal method used as an adaptor for GetOrCreateLazyPointer(). Do not use
// outside of that use case.
- static Type* CreatorFunc(void* creator_arg) { return Traits::New(); }
+ static Type* CreatorFunc(void* /* creator_arg*/) { return Traits::New(); }
// Adapter function for use with AtExit(). This should be called single
// threaded, so don't use atomic operations.
// Calling OnExit while singleton is in use by other threads is a mistake.
- static void OnExit(void* unused) {
+ static void OnExit(void* /*unused*/) {
// AtExit should only ever be register after the singleton instance was
// created. We should only ever get here with a valid instance_ pointer.
- Traits::Delete(reinterpret_cast<Type*>(subtle::NoBarrier_Load(&instance_)));
- instance_ = 0;
+ Traits::Delete(
+ reinterpret_cast<Type*>(instance_.load(std::memory_order_relaxed)));
+ instance_.store(0, std::memory_order_relaxed);
}
- static subtle::AtomicWord instance_;
+ static std::atomic<uintptr_t> instance_;
};
template <typename Type, typename Traits, typename DifferentiatingType>
-subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::instance_ = 0;
+std::atomic<uintptr_t> Singleton<Type, Traits, DifferentiatingType>::instance_ =
+ 0;
} // namespace base
diff --git a/base/memory/singleton_unittest.cc b/base/memory/singleton_unittest.cc
index 4d3f407..f825724 100644
--- a/base/memory/singleton_unittest.cc
+++ b/base/memory/singleton_unittest.cc
@@ -1,10 +1,12 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/memory/singleton.h"
+#include <stdint.h>
+
#include "base/at_exit.h"
-#include "starboard/types.h"
+#include "base/memory/aligned_memory.h"
+#include "base/memory/singleton.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -155,10 +157,15 @@
}
CallbackFunc* GetStaticSingleton() {
- return &CallbackSingletonWithStaticTrait::GetInstance()->callback_;
+ CallbackSingletonWithStaticTrait* instance =
+ CallbackSingletonWithStaticTrait::GetInstance();
+ if (instance == nullptr) {
+ return nullptr;
+ } else {
+ return &instance->callback_;
+ }
}
-
class SingletonTest : public testing::Test {
public:
SingletonTest() = default;
@@ -272,9 +279,6 @@
VerifiesCallbacksNotCalled();
}
-#define EXPECT_ALIGNED(ptr, align) \
- EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
-
TEST_F(SingletonTest, Alignment) {
// Create some static singletons with increasing sizes and alignment
// requirements. By ordering this way, the linker will need to do some work to
@@ -283,22 +287,15 @@
AlignedTestSingleton<int32_t>::GetInstance();
AlignedTestSingleton<AlignedData<32>>* align32 =
AlignedTestSingleton<AlignedData<32>>::GetInstance();
-#if !defined(STARBOARD)
AlignedTestSingleton<AlignedData<128>>* align128 =
AlignedTestSingleton<AlignedData<128>>::GetInstance();
AlignedTestSingleton<AlignedData<4096>>* align4096 =
AlignedTestSingleton<AlignedData<4096>>::GetInstance();
-#endif
- EXPECT_ALIGNED(align4, 4);
- EXPECT_ALIGNED(align32, 32);
-// At least on Raspi, alignas with big alignment numbers does not work and
-// that is compliant with C++ standard as the alignment is larger than
-// std::max_align_t.
-#if !defined(STARBOARD)
- EXPECT_ALIGNED(align128, 128);
- EXPECT_ALIGNED(align4096, 4096);
-#endif
+ EXPECT_TRUE(IsAligned(align4, 4));
+ EXPECT_TRUE(IsAligned(align32, 32));
+ EXPECT_TRUE(IsAligned(align128, 128));
+ EXPECT_TRUE(IsAligned(align4096, 4096));
}
} // namespace
diff --git a/base/memory/stack_allocated.h b/base/memory/stack_allocated.h
new file mode 100644
index 0000000..4eee701
--- /dev/null
+++ b/base/memory/stack_allocated.h
@@ -0,0 +1,57 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_STACK_ALLOCATED_H_
+#define BASE_MEMORY_STACK_ALLOCATED_H_
+
+#if defined(__clang__)
+#define STACK_ALLOCATED_IGNORE(reason) \
+ __attribute__((annotate("stack_allocated_ignore")))
+#else // !defined(__clang__)
+#define STACK_ALLOCATED_IGNORE(reason)
+#endif // !defined(__clang__)
+
+// If a class or one of its ancestor classes is annotated with STACK_ALLOCATED()
+// in its class definition, then instances of the class may not be allocated on
+// the heap or as a member variable of a non-stack-allocated class.
+#define STACK_ALLOCATED() \
+ public: \
+ using IsStackAllocatedTypeMarker [[maybe_unused]] = int; \
+ \
+ private: \
+ void* operator new(size_t) = delete; \
+ void* operator new(size_t, ::base::NotNullTag, void*) = delete; \
+ void* operator new(size_t, void*) = delete
+
+namespace base {
+
+// NotNullTag was originally added to WebKit here:
+// https://trac.webkit.org/changeset/103243/webkit
+// ...with the stated goal of improving the performance of the placement new
+// operator and potentially enabling the -fomit-frame-pointer compiler flag.
+//
+// TODO(szager): The placement new operator which uses this tag is currently
+// defined in third_party/blink/renderer/platform/wtf/allocator/allocator.h,
+// in the global namespace. It should probably move to /base.
+//
+// It's unknown at the time of writing whether it still provides any benefit
+// (or if it ever did). It is used by placing the kNotNull tag before the
+// address of the object when calling placement new.
+//
+// If the kNotNull tag is specified to placement new for a null pointer,
+// Undefined Behaviour can result.
+//
+// Example:
+//
+// union { int i; } u;
+//
+// // Typically placement new looks like this.
+// new (&u.i) int(3);
+// // But we can promise `&u.i` is not null like this.
+// new (base::NotNullTag::kNotNull, &u.i) int(3);
+enum class NotNullTag { kNotNull };
+
+} // namespace base
+
+#endif // BASE_MEMORY_STACK_ALLOCATED_H_
diff --git a/base/memory/unsafe_shared_memory_pool.cc b/base/memory/unsafe_shared_memory_pool.cc
new file mode 100644
index 0000000..2ba8fb1
--- /dev/null
+++ b/base/memory/unsafe_shared_memory_pool.cc
@@ -0,0 +1,107 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/unsafe_shared_memory_pool.h"
+
+#include "base/logging.h"
+
+namespace {
+constexpr size_t kMaxStoredBuffers = 32;
+} // namespace
+
+namespace base {
+
+UnsafeSharedMemoryPool::UnsafeSharedMemoryPool() = default;
+
+UnsafeSharedMemoryPool::~UnsafeSharedMemoryPool() = default;
+
+UnsafeSharedMemoryPool::Handle::Handle(
+ PassKey<UnsafeSharedMemoryPool>,
+ UnsafeSharedMemoryRegion region,
+ WritableSharedMemoryMapping mapping,
+ scoped_refptr<UnsafeSharedMemoryPool> pool)
+ : region_(std::move(region)),
+ mapping_(std::move(mapping)),
+ pool_(std::move(pool)) {
+ CHECK(pool_);
+ DCHECK(region_.IsValid());
+ DCHECK(mapping_.IsValid());
+}
+
+UnsafeSharedMemoryPool::Handle::~Handle() {
+ pool_->ReleaseBuffer(std::move(region_), std::move(mapping_));
+}
+
+const UnsafeSharedMemoryRegion& UnsafeSharedMemoryPool::Handle::GetRegion()
+ const {
+ return region_;
+}
+
+const WritableSharedMemoryMapping& UnsafeSharedMemoryPool::Handle::GetMapping()
+ const {
+ return mapping_;
+}
+
+std::unique_ptr<UnsafeSharedMemoryPool::Handle>
+UnsafeSharedMemoryPool::MaybeAllocateBuffer(size_t region_size) {
+ AutoLock lock(lock_);
+
+ DCHECK_GE(region_size, 0u);
+ if (is_shutdown_)
+ return nullptr;
+
+ // Only change the configured size if bigger region is requested to avoid
+ // unncecessary reallocations.
+ if (region_size > region_size_) {
+ regions_.clear();
+ region_size_ = region_size;
+ }
+ if (!regions_.empty()) {
+ auto region = std::move(regions_.back());
+ regions_.pop_back();
+ DCHECK_GE(region.first.GetSize(), region_size_);
+ auto handle = std::make_unique<Handle>(PassKey<UnsafeSharedMemoryPool>(),
+ std::move(region.first),
+ std::move(region.second), this);
+ return handle;
+ }
+
+ auto region = UnsafeSharedMemoryRegion::Create(region_size_);
+ if (!region.IsValid())
+ return nullptr;
+
+ WritableSharedMemoryMapping mapping = region.Map();
+ if (!mapping.IsValid())
+ return nullptr;
+
+ return std::make_unique<Handle>(PassKey<UnsafeSharedMemoryPool>(),
+ std::move(region), std::move(mapping), this);
+}
+
+void UnsafeSharedMemoryPool::Shutdown() {
+ AutoLock lock(lock_);
+ DCHECK(!is_shutdown_);
+ is_shutdown_ = true;
+ regions_.clear();
+}
+
+void UnsafeSharedMemoryPool::ReleaseBuffer(
+ UnsafeSharedMemoryRegion region,
+ WritableSharedMemoryMapping mapping) {
+ AutoLock lock(lock_);
+ // Only return regions which are at least as big as the current configuration.
+ if (is_shutdown_ || regions_.size() >= kMaxStoredBuffers ||
+ !region.IsValid() || region.GetSize() < region_size_) {
+ DLOG(WARNING) << "Not returning SharedMemoryRegion to the pool:"
+ << " is_shutdown: " << (is_shutdown_ ? "true" : "false")
+ << " stored regions: " << regions_.size()
+ << " configured size: " << region_size_
+ << " this region size: " << region.GetSize()
+ << " valid: " << (region.IsValid() ? "true" : "false");
+ return;
+ }
+ regions_.emplace_back(std::move(region), std::move(mapping));
+}
+
+} // namespace base
diff --git a/base/memory/unsafe_shared_memory_pool.h b/base/memory/unsafe_shared_memory_pool.h
new file mode 100644
index 0000000..3592f26
--- /dev/null
+++ b/base/memory/unsafe_shared_memory_pool.h
@@ -0,0 +1,85 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_UNSAFE_SHARED_MEMORY_POOL_H_
+#define BASE_MEMORY_UNSAFE_SHARED_MEMORY_POOL_H_
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/unsafe_shared_memory_region.h"
+#include "base/synchronization/lock.h"
+#include "base/types/pass_key.h"
+
+namespace base {
+
+// UnsafeSharedMemoryPool manages allocation and pooling of
+// UnsafeSharedMemoryRegions. Using pool saves cost of repeated shared memory
+// allocations. Up-to 32 regions would be pooled. It is thread-safe. May return
+// bigger regions than requested. If a requested size is increased, all stored
+// regions are purged. Regions are returned to the buffer on destruction of
+// |SharedMemoryHandle| if they are of a correct size.
+class BASE_EXPORT UnsafeSharedMemoryPool
+ : public RefCountedThreadSafe<UnsafeSharedMemoryPool> {
+ public:
+ // Used to store the allocation result.
+ // This class returns memory to the pool upon destruction.
+ class BASE_EXPORT Handle {
+ public:
+ Handle(PassKey<UnsafeSharedMemoryPool>,
+ UnsafeSharedMemoryRegion region,
+ WritableSharedMemoryMapping mapping,
+ scoped_refptr<UnsafeSharedMemoryPool> pool);
+
+ ~Handle();
+ // Disallow copy and assign.
+ Handle(const Handle&) = delete;
+ Handle& operator=(const Handle&) = delete;
+
+ const UnsafeSharedMemoryRegion& GetRegion() const;
+
+ const WritableSharedMemoryMapping& GetMapping() const;
+
+ private:
+ UnsafeSharedMemoryRegion region_;
+ WritableSharedMemoryMapping mapping_;
+ scoped_refptr<UnsafeSharedMemoryPool> pool_;
+ };
+
+ UnsafeSharedMemoryPool();
+ // Disallow copy and assign.
+ UnsafeSharedMemoryPool(const UnsafeSharedMemoryPool&) = delete;
+ UnsafeSharedMemoryPool& operator=(const UnsafeSharedMemoryPool&) = delete;
+
+ // Allocates a region of the given |size| or reuses a previous allocation if
+ // possible.
+ std::unique_ptr<Handle> MaybeAllocateBuffer(size_t size);
+
+ // Shuts down the pool, freeing all currently unused allocations and freeing
+ // outstanding ones as they are returned.
+ void Shutdown();
+
+ private:
+ friend class RefCountedThreadSafe<UnsafeSharedMemoryPool>;
+ ~UnsafeSharedMemoryPool();
+
+ void ReleaseBuffer(UnsafeSharedMemoryRegion region,
+ WritableSharedMemoryMapping mapping);
+
+ Lock lock_;
+ // All shared memory regions cached internally are guaranteed to be
+ // at least `region_size_` bytes in size.
+ size_t region_size_ GUARDED_BY(lock_) = 0u;
+ // Cached unused regions and their mappings.
+ std::vector<std::pair<UnsafeSharedMemoryRegion, WritableSharedMemoryMapping>>
+ regions_ GUARDED_BY(lock_);
+ bool is_shutdown_ GUARDED_BY(lock_) = false;
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_UNSAFE_SHARED_MEMORY_POOL_H_
diff --git a/base/memory/unsafe_shared_memory_pool_unittest.cc b/base/memory/unsafe_shared_memory_pool_unittest.cc
new file mode 100644
index 0000000..67c7e8a
--- /dev/null
+++ b/base/memory/unsafe_shared_memory_pool_unittest.cc
@@ -0,0 +1,50 @@
+// Copyright 2021 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/unsafe_shared_memory_pool.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(UnsafeSharedMemoryPoolTest, CreatesRegion) {
+ scoped_refptr<UnsafeSharedMemoryPool> pool(
+ base::MakeRefCounted<UnsafeSharedMemoryPool>());
+ auto handle = pool->MaybeAllocateBuffer(1000);
+ ASSERT_TRUE(handle);
+ EXPECT_TRUE(handle->GetRegion().IsValid());
+ EXPECT_TRUE(handle->GetMapping().IsValid());
+}
+
+TEST(UnsafeSharedMemoryPoolTest, ReusesRegions) {
+ scoped_refptr<UnsafeSharedMemoryPool> pool(
+ base::MakeRefCounted<UnsafeSharedMemoryPool>());
+ auto handle = pool->MaybeAllocateBuffer(1000u);
+ ASSERT_TRUE(handle);
+ auto id1 = handle->GetRegion().GetGUID();
+
+ // Return memory to the pool.
+ handle.reset();
+
+ handle = pool->MaybeAllocateBuffer(1000u);
+ // Should reuse the freed region.
+ EXPECT_EQ(id1, handle->GetRegion().GetGUID());
+}
+
+TEST(UnsafeSharedMemoryPoolTest, RespectsSize) {
+ scoped_refptr<UnsafeSharedMemoryPool> pool(
+ base::MakeRefCounted<UnsafeSharedMemoryPool>());
+ auto handle = pool->MaybeAllocateBuffer(1000u);
+ ASSERT_TRUE(handle);
+ EXPECT_GE(handle->GetRegion().GetSize(), 1000u);
+
+ handle = pool->MaybeAllocateBuffer(100u);
+ ASSERT_TRUE(handle);
+ EXPECT_GE(handle->GetRegion().GetSize(), 100u);
+
+ handle = pool->MaybeAllocateBuffer(1100u);
+ ASSERT_TRUE(handle);
+ EXPECT_GE(handle->GetRegion().GetSize(), 1100u);
+}
+} // namespace base
diff --git a/base/memory/unsafe_shared_memory_region.cc b/base/memory/unsafe_shared_memory_region.cc
index a28ef9c..ee398d8 100644
--- a/base/memory/unsafe_shared_memory_region.cc
+++ b/base/memory/unsafe_shared_memory_region.cc
@@ -1,4 +1,4 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,12 +6,18 @@
#include <utility>
-#include "base/memory/shared_memory.h"
+#include "base/check_op.h"
namespace base {
+UnsafeSharedMemoryRegion::CreateFunction*
+ UnsafeSharedMemoryRegion::create_hook_ = nullptr;
+
// static
UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Create(size_t size) {
+ if (create_hook_)
+ return create_hook_(size);
+
subtle::PlatformSharedMemoryRegion handle =
subtle::PlatformSharedMemoryRegion::CreateUnsafe(size);
@@ -19,20 +25,6 @@
}
// static
-UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::CreateFromHandle(
- const SharedMemoryHandle& handle) {
- if (!handle.IsValid())
- return UnsafeSharedMemoryRegion();
- auto platform_region =
- subtle::PlatformSharedMemoryRegion::TakeFromSharedMemoryHandle(
- handle, subtle::PlatformSharedMemoryRegion::Mode::kUnsafe);
- if (!platform_region.IsValid()) {
- return UnsafeSharedMemoryRegion();
- }
- return Deserialize(std::move(platform_region));
-}
-
-// static
UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Deserialize(
subtle::PlatformSharedMemoryRegion handle) {
return UnsafeSharedMemoryRegion(std::move(handle));
@@ -56,22 +48,24 @@
return UnsafeSharedMemoryRegion(handle_.Duplicate());
}
-WritableSharedMemoryMapping UnsafeSharedMemoryRegion::Map() const {
- return MapAt(0, handle_.GetSize());
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::Map(
+ SharedMemoryMapper* mapper) const {
+ return MapAt(0, handle_.GetSize(), mapper);
}
-WritableSharedMemoryMapping UnsafeSharedMemoryRegion::MapAt(off_t offset,
- size_t size) const {
+WritableSharedMemoryMapping UnsafeSharedMemoryRegion::MapAt(
+ uint64_t offset,
+ size_t size,
+ SharedMemoryMapper* mapper) const {
if (!IsValid())
return {};
- void* memory = nullptr;
- size_t mapped_size = 0;
- if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+ auto result = handle_.MapAt(offset, size, mapper);
+ if (!result.has_value())
return {};
- return WritableSharedMemoryMapping(memory, size, mapped_size,
- handle_.GetGUID());
+ return WritableSharedMemoryMapping(result.value(), size, handle_.GetGUID(),
+ mapper);
}
bool UnsafeSharedMemoryRegion::IsValid() const {
diff --git a/base/memory/unsafe_shared_memory_region.h b/base/memory/unsafe_shared_memory_region.h
index fae6032..24d93ef 100644
--- a/base/memory/unsafe_shared_memory_region.h
+++ b/base/memory/unsafe_shared_memory_region.h
@@ -1,16 +1,17 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
+#include "base/base_export.h"
+#include "base/check.h"
#include "base/memory/platform_shared_memory_region.h"
-#include "base/memory/shared_memory_handle.h"
#include "base/memory/shared_memory_mapping.h"
+#include <stdint.h>
+
namespace base {
// Scoped move-only handle to a region of platform shared memory. The instance
@@ -32,20 +33,8 @@
using MappingType = WritableSharedMemoryMapping;
// Creates a new UnsafeSharedMemoryRegion instance of a given size that can be
// used for mapping writable shared memory into the virtual address space.
- //
- // This call will fail if the process does not have sufficient permissions to
- // create a shared memory region itself. See
- // mojo::CreateUnsafeSharedMemoryRegion in
- // mojo/public/cpp/base/shared_memory_utils.h for creating a shared memory
- // region from a an unprivileged process where a broker must be used.
static UnsafeSharedMemoryRegion Create(size_t size);
-
- // Creates a new UnsafeSharedMemoryRegion from a SharedMemoryHandle. This
- // consumes the handle, which should not be used again.
- // TODO(crbug.com/795291): this should only be used while transitioning from
- // the old shared memory API, and should be removed when done.
- static UnsafeSharedMemoryRegion CreateFromHandle(
- const base::SharedMemoryHandle& handle);
+ using CreateFunction = decltype(Create);
// Returns an UnsafeSharedMemoryRegion built from a platform-specific handle
// that was taken from another UnsafeSharedMemoryRegion instance. Returns an
@@ -70,6 +59,9 @@
UnsafeSharedMemoryRegion(UnsafeSharedMemoryRegion&&);
UnsafeSharedMemoryRegion& operator=(UnsafeSharedMemoryRegion&&);
+ UnsafeSharedMemoryRegion(const UnsafeSharedMemoryRegion&) = delete;
+ UnsafeSharedMemoryRegion& operator=(const UnsafeSharedMemoryRegion&) = delete;
+
// Destructor closes shared memory region if valid.
// All created mappings will remain valid.
~UnsafeSharedMemoryRegion();
@@ -84,14 +76,23 @@
// access. The mapped address is guaranteed to have an alignment of
// at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
// Returns a valid WritableSharedMemoryMapping instance on success, invalid
- // otherwise.
- WritableSharedMemoryMapping Map() const;
+ // otherwise. A custom |SharedMemoryMapper| for mapping (and later unmapping)
+ // the region can be provided using the optional |mapper| parameter.
+ WritableSharedMemoryMapping Map(SharedMemoryMapper* mapper = nullptr) const;
- // Same as above, but maps only |size| bytes of the shared memory region
- // starting with the given |offset|. |offset| must be aligned to value of
- // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
- // requested bytes are out of the region limits.
- WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
+ // Similar to `Map()`, but maps only `size` bytes of the shared memory block
+ // at byte `offset`. Returns an invalid mapping if requested bytes are out of
+ // the region limits.
+ //
+ // `offset` does not need to be aligned; if `offset` is not a multiple of
+ // `subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment`, then the
+ // returned mapping will not respect alignment either. Internally, `offset`
+ // and `size` are still first adjusted to respect alignment when mapping in
+ // the shared memory region, but the returned mapping will be "unadjusted" to
+ // match the exact `offset` and `size` requested.
+ WritableSharedMemoryMapping MapAt(uint64_t offset,
+ size_t size,
+ SharedMemoryMapper* mapper = nullptr) const;
// Whether the underlying platform handle is valid.
bool IsValid() const;
@@ -108,23 +109,23 @@
return handle_.GetGUID();
}
- private:
- FRIEND_TEST_ALL_PREFIXES(DiscardableSharedMemoryTest,
- LockShouldFailIfPlatformLockPagesFails);
- friend class DiscardableSharedMemory;
-
- explicit UnsafeSharedMemoryRegion(subtle::PlatformSharedMemoryRegion handle);
-
// Returns a platform shared memory handle. |this| remains the owner of the
// handle.
- subtle::PlatformSharedMemoryRegion::PlatformHandle GetPlatformHandle() const {
+ subtle::PlatformSharedMemoryHandle GetPlatformHandle() const {
DCHECK(IsValid());
return handle_.GetPlatformHandle();
}
- subtle::PlatformSharedMemoryRegion handle_;
+ private:
+ friend class SharedMemoryHooks;
- DISALLOW_COPY_AND_ASSIGN(UnsafeSharedMemoryRegion);
+ explicit UnsafeSharedMemoryRegion(subtle::PlatformSharedMemoryRegion handle);
+
+ static void set_create_hook(CreateFunction* hook) { create_hook_ = hook; }
+
+ static CreateFunction* create_hook_;
+
+ subtle::PlatformSharedMemoryRegion handle_;
};
} // namespace base
diff --git a/base/memory/values_equivalent.h b/base/memory/values_equivalent.h
new file mode 100644
index 0000000..f0104b6
--- /dev/null
+++ b/base/memory/values_equivalent.h
@@ -0,0 +1,69 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_VALUES_EQUIVALENT_H_
+#define BASE_MEMORY_VALUES_EQUIVALENT_H_
+
+#include <functional>
+#include <memory>
+
+#include "base/memory/scoped_refptr.h"
+
+namespace base {
+
+// Compares two pointers for equality, returns the dereferenced value comparison
+// if both are non-null.
+// Behaves like std::optional<T>::operator==(const std::optional<T>&) but for
+// pointers, with an optional predicate.
+// If `p` is specified, `p(const T& x, const T& y)` should return whether `x`
+// and `y` are equal. It's called with `(*a, *b)` when `a != b && a && b`.
+template <typename T, typename Predicate = std::equal_to<>>
+bool ValuesEquivalent(const T* a, const T* b, Predicate p = {}) {
+ if (a == b)
+ return true;
+ if (!a || !b)
+ return false;
+ return p(*a, *b);
+}
+
+// Specialize for smart pointers like std::unique_ptr and base::scoped_refptr
+// that provide a T* get() method.
+// Example usage:
+// struct Example {
+// std::unique_ptr<Child> child;
+// bool operator==(const Example& other) const {
+// return base::ValuesEquivalent(child, other.child);
+// }
+// };
+template <typename T,
+ typename Predicate = std::equal_to<>,
+ std::enable_if_t<
+ std::is_pointer_v<decltype(std::declval<T>().get())>>* = nullptr>
+bool ValuesEquivalent(const T& x, const T& y, Predicate p = {}) {
+ return ValuesEquivalent(x.get(), y.get(), std::move(p));
+}
+
+// Specialize for smart pointers like blink::Persistent and blink::Member that
+// provide a T* Get() method.
+// Example usage:
+// namespace blink {
+// struct Example : public GarbageCollected<Example> {
+// Member<Child> child;
+// bool operator==(const Example& other) const {
+// return base::ValuesEquivalent(child, other.child);
+// }
+// void Trace(Visitor*) const;
+// };
+// } // namespace blink
+template <typename T,
+ typename Predicate = std::equal_to<>,
+ std::enable_if_t<
+ std::is_pointer_v<decltype(std::declval<T>().Get())>>* = nullptr>
+bool ValuesEquivalent(const T& x, const T& y, Predicate p = {}) {
+ return ValuesEquivalent(x.Get(), y.Get(), std::move(p));
+}
+
+} // namespace base
+
+#endif // BASE_MEMORY_VALUES_EQUIVALENT_H_
diff --git a/base/memory/values_equivalent_unittest.cc b/base/memory/values_equivalent_unittest.cc
new file mode 100644
index 0000000..a0d7f2f
--- /dev/null
+++ b/base/memory/values_equivalent_unittest.cc
@@ -0,0 +1,116 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/values_equivalent.h"
+
+#include "base/memory/raw_ptr.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_refptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(ValuesEquivalentTest, Comparisons) {
+ int a = 1234;
+ int b1 = 5678;
+ int b2 = 5678;
+
+ EXPECT_TRUE(ValuesEquivalent<int>(nullptr, nullptr));
+ EXPECT_FALSE(ValuesEquivalent<int>(&a, nullptr));
+ EXPECT_FALSE(ValuesEquivalent<int>(nullptr, &a));
+ EXPECT_FALSE(ValuesEquivalent(&a, &b1));
+ EXPECT_TRUE(ValuesEquivalent(&a, &a));
+ EXPECT_TRUE(ValuesEquivalent(&b1, &b2));
+}
+
+TEST(ValuesEquivalentTest, UniquePtr) {
+ auto a = std::make_unique<int>(1234);
+ auto b1 = std::make_unique<int>(5678);
+ auto b2 = std::make_unique<int>(5678);
+
+ EXPECT_TRUE(ValuesEquivalent(std::unique_ptr<int>(), std::unique_ptr<int>()));
+ EXPECT_FALSE(ValuesEquivalent(a, std::unique_ptr<int>()));
+ EXPECT_FALSE(ValuesEquivalent(std::unique_ptr<int>(), a));
+ EXPECT_FALSE(ValuesEquivalent(a, b1));
+ EXPECT_TRUE(ValuesEquivalent(a, a));
+ EXPECT_TRUE(ValuesEquivalent(b1, b2));
+}
+
+TEST(ValuesEquivalentTest, ScopedRefPtr) {
+ struct Wrapper : public RefCounted<Wrapper> {
+ explicit Wrapper(int value) : value(value) {}
+ int value;
+ bool operator==(const Wrapper& other) const { return value == other.value; }
+
+ protected:
+ friend class RefCounted<Wrapper>;
+ virtual ~Wrapper() = default;
+ };
+
+ auto a = MakeRefCounted<Wrapper>(1234);
+ auto b1 = MakeRefCounted<Wrapper>(5678);
+ auto b2 = MakeRefCounted<Wrapper>(5678);
+
+ EXPECT_TRUE(
+ ValuesEquivalent(scoped_refptr<Wrapper>(), scoped_refptr<Wrapper>()));
+ EXPECT_FALSE(ValuesEquivalent(a, scoped_refptr<Wrapper>()));
+ EXPECT_FALSE(ValuesEquivalent(scoped_refptr<Wrapper>(), a));
+ EXPECT_FALSE(ValuesEquivalent(a, b1));
+ EXPECT_TRUE(ValuesEquivalent(a, a));
+ EXPECT_TRUE(ValuesEquivalent(b1, b2));
+}
+
+TEST(ValuesEquivalentTest, CapitalGetPtr) {
+ class IntPointer {
+ public:
+ explicit IntPointer(int* pointer) : pointer_(pointer) {}
+ const int* Get() const { return pointer_; }
+
+ private:
+ raw_ptr<int> pointer_ = nullptr;
+ };
+
+ auto a = 1234;
+ auto b1 = 5678;
+ auto b2 = 5678;
+
+ EXPECT_TRUE(ValuesEquivalent(IntPointer(nullptr), IntPointer(nullptr)));
+ EXPECT_FALSE(ValuesEquivalent(IntPointer(&a), IntPointer(nullptr)));
+ EXPECT_FALSE(ValuesEquivalent(IntPointer(nullptr), IntPointer(&a)));
+ EXPECT_FALSE(ValuesEquivalent(IntPointer(&a), IntPointer(&b1)));
+ EXPECT_TRUE(ValuesEquivalent(IntPointer(&a), IntPointer(&a)));
+ EXPECT_TRUE(ValuesEquivalent(IntPointer(&b1), IntPointer(&b2)));
+}
+
+TEST(ValuesEquivalentTest, BypassEqualsOperator) {
+ struct NeverEqual {
+ bool operator==(const NeverEqual& other) const { return false; }
+ } a, b;
+
+ ASSERT_FALSE(a == a);
+ ASSERT_FALSE(a == b);
+
+ EXPECT_TRUE(ValuesEquivalent(&a, &a));
+ EXPECT_FALSE(ValuesEquivalent(&a, &b));
+}
+
+TEST(ValuesEquavalentTest, Predicate) {
+ auto is_same_or_next = [](int a, int b) { return a == b || a == b + 1; };
+ int x = 1;
+ int y = 2;
+ int z = 3;
+
+ EXPECT_TRUE(ValuesEquivalent(&x, &x, is_same_or_next));
+ EXPECT_FALSE(ValuesEquivalent(&x, &y, is_same_or_next));
+ EXPECT_FALSE(ValuesEquivalent(&x, &z, is_same_or_next));
+ EXPECT_TRUE(ValuesEquivalent(&y, &x, is_same_or_next));
+ EXPECT_FALSE(ValuesEquivalent(&y, &z, is_same_or_next));
+ EXPECT_FALSE(ValuesEquivalent(&z, &x, is_same_or_next));
+ EXPECT_TRUE(ValuesEquivalent(&z, &y, is_same_or_next));
+ EXPECT_TRUE(ValuesEquivalent<int>(nullptr, nullptr, is_same_or_next));
+ EXPECT_FALSE(ValuesEquivalent<int>(&x, nullptr, is_same_or_next));
+ EXPECT_FALSE(ValuesEquivalent<int>(nullptr, &x, is_same_or_next));
+}
+
+} // namespace base
diff --git a/base/memory/weak_auto_reset.h b/base/memory/weak_auto_reset.h
new file mode 100644
index 0000000..a6f9f1c
--- /dev/null
+++ b/base/memory/weak_auto_reset.h
@@ -0,0 +1,71 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_WEAK_AUTO_RESET_H_
+#define BASE_MEMORY_WEAK_AUTO_RESET_H_
+
+#include "base/memory/weak_ptr.h"
+
+namespace base {
+
+// Sets a field of an object to a specified value, then returns it to its
+// original value when the WeakAutoReset instance goes out of scope. Because a
+// weak pointer is used, if the target object is destroyed, no attempt is made
+// to restore the original value and no UAF occurs.
+//
+// Note that as of C++17 we can use CTAD to infer template parameters from
+// constructor args; it is valid to write:
+// WeakAutoReset war(myobj->GetWeakPtr(), &MyClass::member_, new_value);
+// without specifying explicit types in the classname.
+template <class T, class U>
+class WeakAutoReset {
+ public:
+ // Create an empty object that does nothing, you may move a value into this
+ // object via assignment.
+ WeakAutoReset() = default;
+
+ // Sets member `field` of object pointed to by `ptr` to `new_value`. `ptr`
+ // must be valid at time of construction. If `ptr` is still valid when this
+ // object goes out of scope, the member will be returned to its original
+ // value.
+ WeakAutoReset(base::WeakPtr<T> ptr, U T::*field, U new_value)
+ : ptr_(ptr),
+ field_(field),
+ old_value_(std::exchange(ptr.get()->*field, std::move(new_value))) {}
+
+ // Move constructor.
+ WeakAutoReset(WeakAutoReset&& other)
+ : ptr_(std::move(other.ptr_)),
+ field_(std::exchange(other.field_, nullptr)),
+ old_value_(std::move(other.old_value_)) {}
+
+ // Move assignment operator.
+ WeakAutoReset& operator=(WeakAutoReset&& other) {
+ if (this != &other) {
+ // If we're already tracking a value, make sure to restore it before
+ // overwriting our target.
+ Reset();
+ ptr_ = std::move(other.ptr_);
+ field_ = std::exchange(other.field_, nullptr);
+ old_value_ = std::move(other.old_value_);
+ }
+ return *this;
+ }
+
+ ~WeakAutoReset() { Reset(); }
+
+ private:
+ void Reset() {
+ if (ptr_)
+ ptr_.get()->*field_ = std::move(old_value_);
+ }
+
+ base::WeakPtr<T> ptr_;
+ U T::*field_ = nullptr;
+ U old_value_ = U();
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_WEAK_AUTO_RESET_H_
diff --git a/base/memory/weak_auto_reset_unittest.cc b/base/memory/weak_auto_reset_unittest.cc
new file mode 100644
index 0000000..7bb5b81
--- /dev/null
+++ b/base/memory/weak_auto_reset_unittest.cc
@@ -0,0 +1,295 @@
+// Copyright 2022 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/weak_auto_reset.h"
+
+#include <memory>
+
+#include "base/memory/weak_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+template <class T>
+class HasWeakFactory {
+ public:
+ HasWeakFactory() = default;
+ ~HasWeakFactory() = default;
+
+ // Returns a WeakAutoReset that temporarily sets value_ to `value`.
+ auto SetValueScoped(T value) {
+ return WeakAutoReset(factory_.GetWeakPtr(), &HasWeakFactory::value_,
+ std::move(value));
+ }
+
+ void set_value(T value) { value_ = std::move(value); }
+ const T& value() const { return value_; }
+
+ WeakPtr<HasWeakFactory> GetWeakPtr() { return factory_.GetWeakPtr(); }
+
+ private:
+ T value_ = T();
+ WeakPtrFactory<HasWeakFactory> factory_{this};
+};
+
+} // namespace
+
+TEST(WeakAutoResetTest, DefaultConstructor) {
+ WeakAutoReset<HasWeakFactory<int>, int> empty;
+}
+
+TEST(WeakAutoResetTest, SingleAutoReset) {
+ HasWeakFactory<int> hwf;
+ {
+ WeakAutoReset reset = hwf.SetValueScoped(1);
+ EXPECT_EQ(1, hwf.value());
+ }
+ EXPECT_EQ(0, hwf.value());
+}
+
+TEST(WeakAutoResetTest, SingleAutoResetObjectDestroyed) {
+ auto hwf = std::make_unique<HasWeakFactory<int>>();
+ WeakAutoReset reset = hwf->SetValueScoped(1);
+ EXPECT_EQ(1, hwf->value());
+ hwf.reset();
+ // ASAN will crash here if we don't correctly detect that hwf has gone away.
+}
+
+TEST(WeakAutoResetTest, MultipleNested) {
+ HasWeakFactory<int> hwf;
+ {
+ WeakAutoReset reset = hwf.SetValueScoped(1);
+ EXPECT_EQ(1, hwf.value());
+ {
+ WeakAutoReset reset2 = hwf.SetValueScoped(2);
+ EXPECT_EQ(2, hwf.value());
+ }
+ EXPECT_EQ(1, hwf.value());
+ }
+ EXPECT_EQ(0, hwf.value());
+}
+
+TEST(WeakAutoResetTest, MultipleNestedObjectDestroyed) {
+ auto hwf = std::make_unique<HasWeakFactory<int>>();
+ WeakAutoReset reset = hwf->SetValueScoped(1);
+ EXPECT_EQ(1, hwf->value());
+ WeakAutoReset reset2 = hwf->SetValueScoped(2);
+ EXPECT_EQ(2, hwf->value());
+ hwf.reset();
+ // ASAN will crash here if we don't correctly detect that hwf has gone away.
+}
+
+TEST(WeakAutoResetTest, MoveAssignmentTransfersOwnership) {
+ HasWeakFactory<int> hwf;
+ // Create an auto-reset outside of a scope.
+ WeakAutoReset reset = hwf.SetValueScoped(1);
+ {
+ WeakAutoReset<HasWeakFactory<int>, int> reset2;
+ EXPECT_EQ(1, hwf.value());
+ // Move the auto-reset to an instance inside the scope. This should not
+ // cause the value to reset.
+ reset2 = std::move(reset);
+ EXPECT_EQ(1, hwf.value());
+ }
+ // Because the active auto-reset went away with the scope, the original value
+ // should be restored.
+ EXPECT_EQ(0, hwf.value());
+}
+
+TEST(WeakAutoResetTest, MoveAssignmentResetsOldValue) {
+ HasWeakFactory<int> hwf1;
+ HasWeakFactory<int> hwf2;
+ WeakAutoReset reset = hwf1.SetValueScoped(1);
+ WeakAutoReset reset2 = hwf2.SetValueScoped(2);
+ EXPECT_EQ(1, hwf1.value());
+ EXPECT_EQ(2, hwf2.value());
+
+ // Overwriting the first with the second should reset the first value, but not
+ // the second.
+ reset = std::move(reset2);
+ EXPECT_EQ(0, hwf1.value());
+ EXPECT_EQ(2, hwf2.value());
+
+ // Overwriting the moved value with a default value should have no effect.
+ reset2 = WeakAutoReset<HasWeakFactory<int>, int>();
+
+ // Overwriting the live auto-reset with a default value should reset the other
+ // value.
+ reset = WeakAutoReset<HasWeakFactory<int>, int>();
+ EXPECT_EQ(0, hwf1.value());
+ EXPECT_EQ(0, hwf2.value());
+}
+
+TEST(WeakAutoResetTest, MoveAssignmentToSelfIsNoOp) {
+ HasWeakFactory<int> hwf;
+ {
+ WeakAutoReset reset = hwf.SetValueScoped(1);
+ EXPECT_EQ(1, hwf.value());
+
+ // Move the auto-reset to itself. This should have no effect. We'll need to
+ // create an intermediate so that we don't get a compile error.
+ auto* const reset_ref = &reset;
+ reset = std::move(*reset_ref);
+ EXPECT_EQ(1, hwf.value());
+ }
+ // The auto-reset goes out of scope, resetting the value.
+ EXPECT_EQ(0, hwf.value());
+}
+
+TEST(WeakAutoResetTest, DeleteTargetObjectAfterMoveIsSafe) {
+ auto hwf = std::make_unique<HasWeakFactory<int>>();
+ WeakAutoReset reset = hwf->SetValueScoped(1);
+ WeakAutoReset reset2 = std::move(reset);
+ hwf.reset();
+ // ASAN will crash here if we don't correctly detect that hwf has gone away.
+}
+
+using HasWeakFactoryPointer = std::unique_ptr<HasWeakFactory<int>>;
+
+TEST(WeakAutoResetTest, TestSafelyMovesValue) {
+ // We'll use an object that owns another object while keeping a weak reference
+ // to the inner object to determine its lifetime.
+ auto inner = std::make_unique<HasWeakFactory<int>>();
+ auto weak_ptr = inner->GetWeakPtr();
+ auto outer = std::make_unique<HasWeakFactory<HasWeakFactoryPointer>>();
+ outer->set_value(std::move(inner));
+ ASSERT_TRUE(weak_ptr);
+
+ {
+ // Transfer ownership of the inner object to the auto-reset.
+ WeakAutoReset reset = outer->SetValueScoped(HasWeakFactoryPointer());
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_FALSE(outer->value());
+ }
+
+ // Transfer ownership back to the outer object.
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_TRUE(outer->value());
+
+ // Destroying the outer object destroys the inner object.
+ outer.reset();
+ EXPECT_FALSE(weak_ptr);
+}
+
+TEST(WeakAutoResetTest, TestSafelyMovesValueAndThenDestroysIt) {
+ // We'll use an object that owns another object while keeping a weak reference
+ // to the inner object to determine its lifetime.
+ auto inner = std::make_unique<HasWeakFactory<int>>();
+ auto weak_ptr = inner->GetWeakPtr();
+ auto outer = std::make_unique<HasWeakFactory<HasWeakFactoryPointer>>();
+ outer->set_value(std::move(inner));
+ ASSERT_TRUE(weak_ptr);
+
+ {
+ // Transfer ownership of the inner object to the auto-reset.
+ WeakAutoReset reset = outer->SetValueScoped(HasWeakFactoryPointer());
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_FALSE(outer->value());
+
+ // Destroy the outer object. The auto-reset still owns the old inner object.
+ outer.reset();
+ EXPECT_TRUE(weak_ptr);
+ }
+
+ // Onwership can't be transferred back so the inner object is destroyed.
+ EXPECT_FALSE(weak_ptr);
+}
+
+TEST(WeakAutoResetTest, TestMoveConstructorMovesOldValue) {
+ // We'll use an object that owns another object while keeping a weak reference
+ // to the inner object to determine its lifetime.
+ auto inner = std::make_unique<HasWeakFactory<int>>();
+ auto weak_ptr = inner->GetWeakPtr();
+ auto outer = std::make_unique<HasWeakFactory<HasWeakFactoryPointer>>();
+ outer->set_value(std::move(inner));
+ ASSERT_TRUE(weak_ptr);
+
+ {
+ // Transfer ownership of the inner object to the auto-reset.
+ WeakAutoReset reset = outer->SetValueScoped(HasWeakFactoryPointer());
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_FALSE(outer->value());
+
+ {
+ // Move ownership of the old object to a new auto-reset.
+ WeakAutoReset reset2(std::move(reset));
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_FALSE(outer->value());
+ }
+
+ // Destroying the second auto-reset transfers ownership back to the outer
+ // object.
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_TRUE(outer->value());
+ }
+}
+
+TEST(WeakAutoResetTest, TestMoveAssignmentMovesOldValue) {
+ // We'll use an object that owns another object while keeping a weak reference
+ // to the inner object to determine its lifetime.
+ auto inner = std::make_unique<HasWeakFactory<int>>();
+ auto weak_ptr = inner->GetWeakPtr();
+ auto outer = std::make_unique<HasWeakFactory<HasWeakFactoryPointer>>();
+ outer->set_value(std::move(inner));
+ ASSERT_TRUE(weak_ptr);
+
+ {
+ // Create an auto-reset that will receive ownership later.
+ WeakAutoReset<HasWeakFactory<HasWeakFactoryPointer>, HasWeakFactoryPointer>
+ reset;
+
+ {
+ // Move ownership of the inner object to an auto-reset.
+ WeakAutoReset reset2 = outer->SetValueScoped(HasWeakFactoryPointer());
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_FALSE(outer->value());
+
+ // Transfer ownership to the other auto-reset.
+ reset = std::move(reset2);
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_FALSE(outer->value());
+ }
+
+ // The auto-reset that initially received the value is gone, but the one
+ // actually holding the value is still in scope.
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_FALSE(outer->value());
+ }
+
+ // Now both have gone out of scope, so the inner object should be returned to
+ // the outer one.
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_TRUE(outer->value());
+}
+
+TEST(WeakAutoResetTest, TestOldAndNewValuesAreSwapped) {
+ // We'll use an object that owns another object while keeping a weak reference
+ // to the inner object to determine its lifetime.
+ auto inner = std::make_unique<HasWeakFactory<int>>();
+ auto weak_ptr = inner->GetWeakPtr();
+ auto outer = std::make_unique<HasWeakFactory<HasWeakFactoryPointer>>();
+ outer->set_value(std::move(inner));
+ ASSERT_TRUE(weak_ptr);
+
+ // Create a second inner object that we'll swap with the first.
+ auto replacement = std::make_unique<HasWeakFactory<int>>();
+ auto weak_ptr2 = replacement->GetWeakPtr();
+
+ {
+ // Swap the values.
+ WeakAutoReset reset = outer->SetValueScoped(std::move(replacement));
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_TRUE(weak_ptr2);
+ EXPECT_EQ(weak_ptr2.get(), outer->value().get());
+ }
+
+ // Unswap the values. The replacement is discarded.
+ EXPECT_TRUE(weak_ptr);
+ EXPECT_FALSE(weak_ptr2);
+ EXPECT_EQ(weak_ptr.get(), outer->value().get());
+}
+
+} // namespace base
diff --git a/base/memory/weak_ptr.cc b/base/memory/weak_ptr.cc
index c993fcb..70a0213 100644
--- a/base/memory/weak_ptr.cc
+++ b/base/memory/weak_ptr.cc
@@ -1,11 +1,16 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright 2011 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/weak_ptr.h"
-namespace base {
-namespace internal {
+#if DCHECK_IS_ON()
+#include <ostream>
+
+#include "base/debug/stack_trace.h"
+#endif
+
+namespace base::internal {
WeakReference::Flag::Flag() {
// Flags only become bound when checked for validity, or invalidated,
@@ -18,15 +23,19 @@
// The flag being invalidated with a single ref implies that there are no
// weak pointers in existence. Allow deletion on other thread in this case.
#if DCHECK_IS_ON()
- DCHECK(sequence_checker_.CalledOnValidSequence() || HasOneRef())
- << "WeakPtrs must be invalidated on the same sequenced thread.";
+ std::unique_ptr<debug::StackTrace> bound_at;
+ DCHECK(sequence_checker_.CalledOnValidSequence(&bound_at) || HasOneRef())
+ << "WeakPtrs must be invalidated on the same sequenced thread as where "
+ << "they are bound.\n"
+ << (bound_at ? "This was bound at:\n" + bound_at->ToString() : "")
+ << "Check failed at:";
#endif
invalidated_.Set();
}
bool WeakReference::Flag::IsValid() const {
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_)
- << "WeakPtrs must be checked on the same sequenced thread.";
+ // WeakPtrs must be checked on the same sequenced thread.
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return !invalidated_.IsSet();
}
@@ -34,17 +43,28 @@
return !invalidated_.IsSet();
}
+#if DCHECK_IS_ON()
+void WeakReference::Flag::DetachFromSequence() {
+ DETACH_FROM_SEQUENCE(sequence_checker_);
+}
+#endif
+
WeakReference::Flag::~Flag() = default;
WeakReference::WeakReference() = default;
-
WeakReference::WeakReference(const scoped_refptr<Flag>& flag) : flag_(flag) {}
-
WeakReference::~WeakReference() = default;
-WeakReference::WeakReference(WeakReference&& other) = default;
-
WeakReference::WeakReference(const WeakReference& other) = default;
+WeakReference& WeakReference::operator=(const WeakReference& other) = default;
+
+WeakReference::WeakReference(WeakReference&& other) noexcept = default;
+WeakReference& WeakReference::operator=(WeakReference&& other) noexcept =
+ default;
+
+void WeakReference::Reset() {
+ flag_ = nullptr;
+}
bool WeakReference::IsValid() const {
return flag_ && flag_->IsValid();
@@ -54,34 +74,26 @@
return flag_ && flag_->MaybeValid();
}
-WeakReferenceOwner::WeakReferenceOwner() = default;
+WeakReferenceOwner::WeakReferenceOwner()
+ : flag_(MakeRefCounted<WeakReference::Flag>()) {}
WeakReferenceOwner::~WeakReferenceOwner() {
- Invalidate();
+ flag_->Invalidate();
}
WeakReference WeakReferenceOwner::GetRef() const {
- // If we hold the last reference to the Flag then create a new one.
+#if DCHECK_IS_ON()
+ // If we hold the last reference to the Flag then detach the SequenceChecker.
if (!HasRefs())
- flag_ = new WeakReference::Flag();
+ flag_->DetachFromSequence();
+#endif
return WeakReference(flag_);
}
void WeakReferenceOwner::Invalidate() {
- if (flag_) {
- flag_->Invalidate();
- flag_ = nullptr;
- }
-}
-
-WeakPtrBase::WeakPtrBase() : ptr_(0) {}
-
-WeakPtrBase::~WeakPtrBase() = default;
-
-WeakPtrBase::WeakPtrBase(const WeakReference& ref, uintptr_t ptr)
- : ref_(ref), ptr_(ptr) {
- DCHECK(ptr_);
+ flag_->Invalidate();
+ flag_ = MakeRefCounted<WeakReference::Flag>();
}
WeakPtrFactoryBase::WeakPtrFactoryBase(uintptr_t ptr) : ptr_(ptr) {
@@ -92,5 +104,4 @@
ptr_ = 0;
}
-} // namespace internal
-} // namespace base
+} // namespace base::internal
diff --git a/base/memory/weak_ptr.h b/base/memory/weak_ptr.h
index 329f4b7..de38325 100644
--- a/base/memory/weak_ptr.h
+++ b/base/memory/weak_ptr.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -16,25 +16,26 @@
//
// class Controller {
// public:
-// Controller() : weak_factory_(this) {}
// void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
// void WorkComplete(const Result& result) { ... }
// private:
// // Member variables should appear before the WeakPtrFactory, to ensure
// // that any WeakPtrs to Controller are invalidated before its members
// // variable's destructors are executed, rendering them invalid.
-// WeakPtrFactory<Controller> weak_factory_;
+// WeakPtrFactory<Controller> weak_factory_{this};
// };
//
// class Worker {
// public:
-// static void StartNew(const WeakPtr<Controller>& controller) {
-// Worker* worker = new Worker(controller);
+// static void StartNew(WeakPtr<Controller> controller) {
+// // Move WeakPtr when possible to avoid atomic refcounting churn on its
+// // internal state.
+// Worker* worker = new Worker(std::move(controller));
// // Kick off asynchronous processing...
// }
// private:
-// Worker(const WeakPtr<Controller>& controller)
-// : controller_(controller) {}
+// Worker(WeakPtr<Controller> controller)
+// : controller_(std::move(controller)) {}
// void DidCompleteAsynchronousProcessing(const Result& result) {
// if (controller_)
// controller_->WorkComplete(result);
@@ -72,16 +73,21 @@
#include <cstddef>
#include <type_traits>
+#include <utility>
#include "base/base_export.h"
-#include "base/logging.h"
-#include "base/macros.h"
+#include "base/check.h"
+#include "base/compiler_specific.h"
+#include "base/dcheck_is_on.h"
+#include "base/memory/raw_ptr.h"
#include "base/memory/ref_counted.h"
#include "base/sequence_checker.h"
#include "base/synchronization/atomic_flag.h"
namespace base {
+template <typename T>
+class SafeRef;
template <typename T> class SupportsWeakPtr;
template <typename T> class WeakPtr;
@@ -89,7 +95,7 @@
// These classes are part of the WeakPtr implementation.
// DO NOT USE THESE CLASSES DIRECTLY YOURSELF.
-class BASE_EXPORT WeakReference {
+class BASE_EXPORT TRIVIAL_ABI WeakReference {
public:
// Although Flag is bound to a specific SequencedTaskRunner, it may be
// deleted from another via base::WeakPtr::~WeakPtr().
@@ -102,6 +108,10 @@
bool MaybeValid() const;
+#if DCHECK_IS_ON()
+ void DetachFromSequence();
+#endif
+
private:
friend class base::RefCountedThreadSafe<Flag>;
@@ -115,12 +125,24 @@
explicit WeakReference(const scoped_refptr<Flag>& flag);
~WeakReference();
- WeakReference(WeakReference&& other);
WeakReference(const WeakReference& other);
- WeakReference& operator=(WeakReference&& other) = default;
- WeakReference& operator=(const WeakReference& other) = default;
+ WeakReference& operator=(const WeakReference& other);
+ WeakReference(WeakReference&& other) noexcept;
+ WeakReference& operator=(WeakReference&& other) noexcept;
+
+ void Reset();
+ // Returns whether the WeakReference is valid, meaning the WeakPtrFactory has
+ // not invalidated the pointer. Unlike, RefIsMaybeValid(), this may only be
+ // called from the same sequence as where the WeakPtr was created.
bool IsValid() const;
+ // Returns false if the WeakReference is confirmed to be invalid. This call is
+ // safe to make from any thread, e.g. to optimize away unnecessary work, but
+ // RefIsValid() must always be called, on the correct sequence, before
+ // actually using the pointer.
+ //
+ // Warning: as with any object, this call is only thread-safe if the WeakPtr
+ // instance isn't being re-assigned or reset() racily with this call.
bool MaybeValid() const;
private:
@@ -134,41 +156,12 @@
WeakReference GetRef() const;
- bool HasRefs() const { return flag_ && !flag_->HasOneRef(); }
+ bool HasRefs() const { return !flag_->HasOneRef(); }
void Invalidate();
private:
- mutable scoped_refptr<WeakReference::Flag> flag_;
-};
-
-// This class simplifies the implementation of WeakPtr's type conversion
-// constructor by avoiding the need for a public accessor for ref_. A
-// WeakPtr<T> cannot access the private members of WeakPtr<U>, so this
-// base class gives us a way to access ref_ in a protected fashion.
-class BASE_EXPORT WeakPtrBase {
- public:
- WeakPtrBase();
- ~WeakPtrBase();
-
- WeakPtrBase(const WeakPtrBase& other) = default;
- WeakPtrBase(WeakPtrBase&& other) = default;
- WeakPtrBase& operator=(const WeakPtrBase& other) = default;
- WeakPtrBase& operator=(WeakPtrBase&& other) = default;
-
- void reset() {
- ref_ = internal::WeakReference();
- ptr_ = 0;
- }
-
- protected:
- WeakPtrBase(const WeakReference& ref, uintptr_t ptr);
-
- WeakReference ref_;
-
- // This pointer is only valid when ref_.is_valid() is true. Otherwise, its
- // value is undefined (as opposed to nullptr).
- uintptr_t ptr_;
+ scoped_refptr<WeakReference::Flag> flag_;
};
// This class provides a common implementation of common functions that would
@@ -196,12 +189,17 @@
// static_cast the Base* to a Derived*.
template <typename Derived, typename Base>
static WeakPtr<Derived> AsWeakPtrImpl(SupportsWeakPtr<Base>* t) {
- WeakPtr<Base> ptr = t->AsWeakPtr();
- return WeakPtr<Derived>(
- ptr.ref_, static_cast<Derived*>(reinterpret_cast<Base*>(ptr.ptr_)));
+ WeakPtr<Base> weak = t->AsWeakPtr();
+ return WeakPtr<Derived>(weak.CloneWeakReference(),
+ static_cast<Derived*>(weak.ptr_));
}
};
+// Forward declaration from safe_ptr.h.
+template <typename T>
+SafeRef<T> MakeSafeRefFromWeakPtrInternals(internal::WeakReference&& ref,
+ T* ptr);
+
} // namespace internal
template <typename T> class WeakPtrFactory;
@@ -220,53 +218,77 @@
// foo->method();
//
template <typename T>
-class WeakPtr : public internal::WeakPtrBase {
+class TRIVIAL_ABI WeakPtr {
public:
WeakPtr() = default;
-
+ // NOLINTNEXTLINE(google-explicit-constructor)
WeakPtr(std::nullptr_t) {}
// Allow conversion from U to T provided U "is a" T. Note that this
// is separate from the (implicit) copy and move constructors.
- template <typename U>
- WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other) {
- // Need to cast from U* to T* to do pointer adjustment in case of multiple
- // inheritance. This also enforces the "U is a T" rule.
- T* t = reinterpret_cast<U*>(other.ptr_);
- ptr_ = reinterpret_cast<uintptr_t>(t);
- }
- template <typename U>
- WeakPtr(WeakPtr<U>&& other) : WeakPtrBase(std::move(other)) {
- // Need to cast from U* to T* to do pointer adjustment in case of multiple
- // inheritance. This also enforces the "U is a T" rule.
- T* t = reinterpret_cast<U*>(other.ptr_);
- ptr_ = reinterpret_cast<uintptr_t>(t);
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ WeakPtr(const WeakPtr<U>& other) : ref_(other.ref_), ptr_(other.ptr_) {}
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ WeakPtr& operator=(const WeakPtr<U>& other) {
+ ref_ = other.ref_;
+ ptr_ = other.ptr_;
+ return *this;
}
- T* get() const {
- return ref_.IsValid() ? reinterpret_cast<T*>(ptr_) : nullptr;
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ WeakPtr(WeakPtr<U>&& other)
+ : ref_(std::move(other.ref_)), ptr_(std::move(other.ptr_)) {}
+ template <typename U,
+ typename = std::enable_if_t<std::is_convertible_v<U*, T*>>>
+ // NOLINTNEXTLINE(google-explicit-constructor)
+ WeakPtr& operator=(WeakPtr<U>&& other) {
+ ref_ = std::move(other.ref_);
+ ptr_ = std::move(other.ptr_);
+ return *this;
}
+ T* get() const { return ref_.IsValid() ? ptr_ : nullptr; }
+
#if defined(STARBOARD)
// TODO[Cobalt]: Remove the implicit convertor.
operator T*() const { return get(); }
#endif
+ // Provide access to the underlying T as a reference. Will CHECK() if the T
+ // pointee is no longer alive.
T& operator*() const {
- DCHECK(get() != nullptr);
- return *get();
+ CHECK(ref_.IsValid());
+ return *ptr_;
}
+
+ // Used to call methods on the underlying T. Will CHECK() if the T pointee is
+ // no longer alive.
T* operator->() const {
- DCHECK(get() != nullptr);
- return get();
+ CHECK(ref_.IsValid());
+ return ptr_;
}
// Allow conditionals to test validity, e.g. if (weak_ptr) {...};
explicit operator bool() const { return get() != nullptr; }
+ // Resets the WeakPtr to hold nothing.
+ //
+ // The `get()` method will return `nullptr` thereafter, and `MaybeValid()`
+ // will be `false`.
+ void reset() {
+ ref_.Reset();
+ ptr_ = nullptr;
+ }
+
// Returns false if the WeakPtr is confirmed to be invalid. This call is safe
// to make from any thread, e.g. to optimize away unnecessary work, but
- // operator bool() must always be called, on the correct sequence, before
+ // RefIsValid() must always be called, on the correct sequence, before
// actually using the pointer.
//
// Warning: as with any object, this call is only thread-safe if the WeakPtr
@@ -283,9 +305,24 @@
template <typename U> friend class WeakPtr;
friend class SupportsWeakPtr<T>;
friend class WeakPtrFactory<T>;
+ friend class WeakPtrFactory<std::remove_const_t<T>>;
- WeakPtr(const internal::WeakReference& ref, T* ptr)
- : WeakPtrBase(ref, reinterpret_cast<uintptr_t>(ptr)) {}
+ WeakPtr(internal::WeakReference&& ref, T* ptr)
+ : ref_(std::move(ref)), ptr_(ptr) {
+ DCHECK(ptr);
+ }
+
+ internal::WeakReference CloneWeakReference() const { return ref_; }
+
+ internal::WeakReference ref_;
+
+ // This pointer is only valid when ref_.is_valid() is true. Otherwise, its
+ // value is undefined (as opposed to nullptr). The pointer is allowed to
+ // dangle as we verify its liveness through `ref_` before allowing access to
+ // the pointee. We don't use raw_ptr<T> here to prevent WeakPtr from keeping
+ // the memory allocation in quarantine, as it can't be accessed through the
+ // WeakPtr.
+ RAW_PTR_EXCLUSION T* ptr_ = nullptr;
};
#if !defined(STARBOARD)
@@ -326,16 +363,50 @@
template <class T>
class WeakPtrFactory : public internal::WeakPtrFactoryBase {
public:
+ WeakPtrFactory() = delete;
+
explicit WeakPtrFactory(T* ptr)
: WeakPtrFactoryBase(reinterpret_cast<uintptr_t>(ptr)) {}
+ WeakPtrFactory(const WeakPtrFactory&) = delete;
+ WeakPtrFactory& operator=(const WeakPtrFactory&) = delete;
+
~WeakPtrFactory() = default;
+ WeakPtr<const T> GetWeakPtr() const {
+ return WeakPtr<const T>(weak_reference_owner_.GetRef(),
+ reinterpret_cast<const T*>(ptr_));
+ }
+
+ template <int&... ExplicitArgumentBarrier,
+ typename U = T,
+ typename = std::enable_if_t<!std::is_const_v<U>>>
WeakPtr<T> GetWeakPtr() {
return WeakPtr<T>(weak_reference_owner_.GetRef(),
reinterpret_cast<T*>(ptr_));
}
+ template <int&... ExplicitArgumentBarrier,
+ typename U = T,
+ typename = std::enable_if_t<!std::is_const_v<U>>>
+ WeakPtr<T> GetMutableWeakPtr() const {
+ return WeakPtr<T>(weak_reference_owner_.GetRef(),
+ reinterpret_cast<T*>(ptr_));
+ }
+
+ // Returns a smart pointer that is valid until the WeakPtrFactory is
+ // invalidated. Unlike WeakPtr, this smart pointer cannot be null, and cannot
+ // be checked to see if the WeakPtrFactory is invalidated. It's intended to
+ // express that the pointer will not (intentionally) outlive the `T` object it
+ // points to, and to crash safely in the case of a bug instead of causing a
+ // use-after-free. This type provides an alternative to WeakPtr to prevent
+ // use-after-free bugs without also introducing "fuzzy lifetimes" that can be
+ // checked for at runtime.
+ SafeRef<T> GetSafeRef() const {
+ return internal::MakeSafeRefFromWeakPtrInternals(
+ weak_reference_owner_.GetRef(), reinterpret_cast<T*>(ptr_));
+ }
+
// Call this method to invalidate all existing weak pointers.
void InvalidateWeakPtrs() {
DCHECK(ptr_);
@@ -347,9 +418,6 @@
DCHECK(ptr_);
return weak_reference_owner_.HasRefs();
}
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory);
};
// A class may extend from SupportsWeakPtr to let others take weak pointers to
@@ -362,6 +430,9 @@
public:
SupportsWeakPtr() = default;
+ SupportsWeakPtr(const SupportsWeakPtr&) = delete;
+ SupportsWeakPtr& operator=(const SupportsWeakPtr&) = delete;
+
WeakPtr<T> AsWeakPtr() {
return WeakPtr<T>(weak_reference_owner_.GetRef(), static_cast<T*>(this));
}
@@ -378,7 +449,6 @@
private:
internal::WeakReferenceOwner weak_reference_owner_;
- DISALLOW_COPY_AND_ASSIGN(SupportsWeakPtr);
};
// Helper function that uses type deduction to safely return a WeakPtr<Derived>
diff --git a/base/memory/weak_ptr_unittest.cc b/base/memory/weak_ptr_unittest.cc
index a4629df..6704c8d 100644
--- a/base/memory/weak_ptr_unittest.cc
+++ b/base/memory/weak_ptr_unittest.cc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -7,11 +7,11 @@
#include <memory>
#include <string>
-#include "base/bind.h"
#include "base/debug/leak_annotations.h"
+#include "base/functional/bind.h"
#include "base/location.h"
-#include "base/single_thread_task_runner.h"
#include "base/synchronization/waitable_event.h"
+#include "base/task/single_thread_task_runner.h"
#include "base/test/gtest_util.h"
#include "base/test/test_timeouts.h"
#include "base/threading/thread.h"
@@ -79,8 +79,8 @@
WeakPtr<Target> target;
};
struct TargetWithFactory : public Target {
- TargetWithFactory() : factory(this) {}
- WeakPtrFactory<Target> factory;
+ TargetWithFactory() {}
+ WeakPtrFactory<Target> factory{this};
};
// Helper class to create and destroy weak pointer copies
@@ -318,13 +318,22 @@
}
TEST(WeakPtrTest, DerivedTargetMultipleInheritance) {
- DerivedTargetMultipleInheritance d;
- Target& b = d;
- EXPECT_NE(static_cast<void*>(&d), static_cast<void*>(&b));
- const WeakPtr<Target> pb = AsWeakPtr(&b);
- EXPECT_EQ(pb.get(), &b);
- const WeakPtr<DerivedTargetMultipleInheritance> pd = AsWeakPtr(&d);
- EXPECT_EQ(pd.get(), &d);
+ DerivedTargetMultipleInheritance derived_target;
+ Target& target = derived_target;
+ EXPECT_NE(static_cast<void*>(&derived_target), static_cast<void*>(&target));
+
+ WeakPtr<Target> target_weak_ptr = AsWeakPtr(&target);
+ EXPECT_EQ(target_weak_ptr.get(), &target);
+
+ WeakPtr<DerivedTargetMultipleInheritance> derived_target_weak_ptr =
+ AsWeakPtr(&derived_target);
+ EXPECT_EQ(derived_target_weak_ptr.get(), &derived_target);
+
+ target_weak_ptr = derived_target_weak_ptr;
+ EXPECT_EQ(target_weak_ptr.get(), &target);
+
+ target_weak_ptr = std::move(derived_target_weak_ptr);
+ EXPECT_EQ(target_weak_ptr.get(), &target);
}
TEST(WeakPtrFactoryTest, BooleanTesting) {
@@ -371,6 +380,36 @@
EXPECT_EQ(nullptr, null_ptr);
}
+struct ReallyBaseClass {};
+struct BaseClass : ReallyBaseClass {
+ virtual ~BaseClass() = default;
+ void VirtualMethod() {}
+};
+struct OtherBaseClass {
+ virtual ~OtherBaseClass() = default;
+ virtual void VirtualMethod() {}
+};
+struct WithWeak final : BaseClass, OtherBaseClass {
+ WeakPtrFactory<WithWeak> factory{this};
+};
+
+TEST(WeakPtrTest, ConversionOffsetsPointer) {
+ WithWeak with;
+ WeakPtr<WithWeak> ptr(with.factory.GetWeakPtr());
+ {
+ // Copy construction.
+ WeakPtr<OtherBaseClass> base_ptr(ptr);
+ EXPECT_EQ(static_cast<WithWeak*>(&*base_ptr), &with);
+ }
+ {
+ // Move construction.
+ WeakPtr<OtherBaseClass> base_ptr(std::move(ptr));
+ EXPECT_EQ(static_cast<WithWeak*>(&*base_ptr), &with);
+ }
+
+ // WeakPtr doesn't have conversion operators for assignment.
+}
+
TEST(WeakPtrTest, InvalidateWeakPtrs) {
int data;
WeakPtrFactory<int> factory(&data);
@@ -527,11 +566,11 @@
{
// Main thread creates another WeakPtr, but this does not trigger implicitly
// thread ownership move.
- Arrow arrow;
- arrow.target = target->AsWeakPtr();
+ Arrow scoped_arrow;
+ scoped_arrow.target = target->AsWeakPtr();
// The new WeakPtr is owned by background thread.
- EXPECT_EQ(target, background.DeRef(&arrow));
+ EXPECT_EQ(target, background.DeRef(&scoped_arrow));
}
// Target can only be deleted on background thread.
@@ -687,6 +726,51 @@
background.DeleteArrow(arrow);
}
+TEST(WeakPtrTest, ConstUpCast) {
+ Target target;
+
+ // WeakPtrs can upcast from non-const T to const T.
+ WeakPtr<const Target> const_weak_ptr = target.AsWeakPtr();
+
+ // WeakPtrs don't enable conversion from const T to nonconst T.
+ static_assert(
+ !std::is_constructible_v<WeakPtr<Target>, WeakPtr<const Target>>);
+}
+
+TEST(WeakPtrTest, ConstGetWeakPtr) {
+ struct TestTarget {
+ const char* Method() const { return "const method"; }
+ const char* Method() { return "non-const method"; }
+
+ WeakPtrFactory<TestTarget> weak_ptr_factory{this};
+ } non_const_test_target;
+
+ const TestTarget& const_test_target = non_const_test_target;
+
+ EXPECT_EQ(const_test_target.weak_ptr_factory.GetWeakPtr()->Method(),
+ "const method");
+ EXPECT_EQ(non_const_test_target.weak_ptr_factory.GetWeakPtr()->Method(),
+ "non-const method");
+ EXPECT_EQ(const_test_target.weak_ptr_factory.GetMutableWeakPtr()->Method(),
+ "non-const method");
+}
+
+TEST(WeakPtrTest, GetMutableWeakPtr) {
+ struct TestStruct {
+ int member = 0;
+ WeakPtrFactory<TestStruct> weak_ptr_factory{this};
+ };
+ TestStruct test_struct;
+ EXPECT_EQ(test_struct.member, 0);
+
+ // GetMutableWeakPtr() grants non-const access to T.
+ const TestStruct& const_test_struct = test_struct;
+ WeakPtr<TestStruct> weak_ptr =
+ const_test_struct.weak_ptr_factory.GetMutableWeakPtr();
+ weak_ptr->member = 1;
+ EXPECT_EQ(test_struct.member, 1);
+}
+
TEST(WeakPtrDeathTest, WeakPtrCopyDoesNotChangeThreadBinding) {
// The default style "fast" does not support multi-threaded tests
// (introduces deadlock on Linux).
@@ -798,4 +882,26 @@
ASSERT_DCHECK_DEATH(arrow.target.get());
}
+TEST(WeakPtrDeathTest, ArrowOperatorChecksOnBadDereference) {
+ // The default style "fast" does not support multi-threaded tests
+ // (introduces deadlock on Linux).
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+ auto target = std::make_unique<Target>();
+ WeakPtr<Target> weak = target->AsWeakPtr();
+ target.reset();
+ EXPECT_CHECK_DEATH(weak->AsWeakPtr());
+}
+
+TEST(WeakPtrDeathTest, StarOperatorChecksOnBadDereference) {
+ // The default style "fast" does not support multi-threaded tests
+ // (introduces deadlock on Linux).
+ ::testing::FLAGS_gtest_death_test_style = "threadsafe";
+
+ auto target = std::make_unique<Target>();
+ WeakPtr<Target> weak = target->AsWeakPtr();
+ target.reset();
+ EXPECT_CHECK_DEATH((*weak).AsWeakPtr());
+}
+
} // namespace base
diff --git a/base/memory/weak_ptr_unittest.nc b/base/memory/weak_ptr_unittest.nc
index b96b033..6bd21da 100644
--- a/base/memory/weak_ptr_unittest.nc
+++ b/base/memory/weak_ptr_unittest.nc
@@ -1,4 +1,4 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Copyright 2012 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -17,7 +17,7 @@
struct Unrelated {};
struct DerivedUnrelated : Unrelated {};
-#if defined(NCTEST_AUTO_DOWNCAST) // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
+#if defined(NCTEST_AUTO_DOWNCAST) // [r"no viable conversion from 'WeakPtr<Producer>' to 'WeakPtr<DerivedProducer>'"]
void WontCompile() {
Producer f;
@@ -25,7 +25,7 @@
WeakPtr<DerivedProducer> derived_ptr = ptr;
}
-#elif defined(NCTEST_STATIC_DOWNCAST) // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
+#elif defined(NCTEST_STATIC_DOWNCAST) // [r"no matching conversion for static_cast from 'WeakPtr<Producer>' to 'WeakPtr<DerivedProducer>'"]
void WontCompile() {
Producer f;
@@ -34,7 +34,7 @@
static_cast<WeakPtr<DerivedProducer> >(ptr);
}
-#elif defined(NCTEST_AUTO_REF_DOWNCAST) // [r"fatal error: non-const lvalue reference to type 'WeakPtr<base::DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<base::Producer>'"]
+#elif defined(NCTEST_AUTO_REF_DOWNCAST) // [r"fatal error: non-const lvalue reference to type 'WeakPtr<DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<Producer>'"]
void WontCompile() {
Producer f;
@@ -42,7 +42,7 @@
WeakPtr<DerivedProducer>& derived_ptr = ptr;
}
-#elif defined(NCTEST_STATIC_REF_DOWNCAST) // [r"fatal error: non-const lvalue reference to type 'WeakPtr<base::DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<base::Producer>'"]
+#elif defined(NCTEST_STATIC_REF_DOWNCAST) // [r"fatal error: non-const lvalue reference to type 'WeakPtr<DerivedProducer>' cannot bind to a value of unrelated type 'WeakPtr<Producer>'"]
void WontCompile() {
Producer f;
@@ -59,7 +59,7 @@
SupportsWeakPtr<Producer>::StaticAsWeakPtr<DerivedProducer>(&f);
}
-#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST) // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
+#elif defined(NCTEST_UNSAFE_HELPER_DOWNCAST) // [r"no viable conversion from 'WeakPtr<base::Producer>' to 'WeakPtr<DerivedProducer>'"]
void WontCompile() {
Producer f;
@@ -73,14 +73,14 @@
WeakPtr<DerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
}
-#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST) // [r"cannot initialize a variable of type 'base::DerivedProducer \*' with an rvalue of type 'base::Producer \*'"]
+#elif defined(NCTEST_UNSAFE_WRONG_INSANTIATED_HELPER_DOWNCAST) // [r"no viable conversion from 'WeakPtr<base::Producer>' to 'WeakPtr<DerivedProducer>'"]
void WontCompile() {
Producer f;
WeakPtr<DerivedProducer> ptr = AsWeakPtr<Producer>(&f);
}
-#elif defined(NCTEST_UNSAFE_HELPER_CAST) // [r"cannot initialize a variable of type 'base::OtherDerivedProducer \*' with an rvalue of type 'base::DerivedProducer \*'"]
+#elif defined(NCTEST_UNSAFE_HELPER_CAST) // [r"no viable conversion from 'WeakPtr<base::DerivedProducer>' to 'WeakPtr<OtherDerivedProducer>'"]
void WontCompile() {
DerivedProducer f;
@@ -94,14 +94,14 @@
WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<OtherDerivedProducer>(&f);
}
-#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST) // [r"cannot initialize a variable of type 'base::OtherDerivedProducer \*' with an rvalue of type 'base::DerivedProducer \*'"]
+#elif defined(NCTEST_UNSAFE_WRONG_INSTANTIATED_HELPER_SIDECAST) // [r"no viable conversion from 'WeakPtr<base::DerivedProducer>' to 'WeakPtr<OtherDerivedProducer>'"]
void WontCompile() {
DerivedProducer f;
WeakPtr<OtherDerivedProducer> ptr = AsWeakPtr<DerivedProducer>(&f);
}
-#elif defined(NCTEST_UNRELATED_HELPER) // [r"cannot initialize a variable of type 'base::Unrelated \*' with an rvalue of type 'base::DerivedProducer \*'"]
+#elif defined(NCTEST_UNRELATED_HELPER) // [r"no viable conversion from 'WeakPtr<base::DerivedProducer>' to 'WeakPtr<Unrelated>'"]
void WontCompile() {
DerivedProducer f;
@@ -115,30 +115,43 @@
WeakPtr<Unrelated> ptr = AsWeakPtr<Unrelated>(&f);
}
-// TODO(hans): Remove .* and update the static_assert expectations once we roll
-// past Clang r313315. https://crbug.com/765692.
-
-#elif defined(NCTEST_COMPLETELY_UNRELATED_HELPER) // [r"fatal error: static_assert failed .*\"AsWeakPtr argument must inherit from SupportsWeakPtr\""]
+#elif defined(NCTEST_COMPLETELY_UNRELATED_HELPER) // [r"fatal error: static assertion failed due to requirement 'std::is_base_of<base::internal::SupportsWeakPtrBase, base::Unrelated>::value': AsWeakPtr argument must inherit from SupportsWeakPtr"]
void WontCompile() {
Unrelated f;
WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
}
-#elif defined(NCTEST_DERIVED_COMPLETELY_UNRELATED_HELPER) // [r"fatal error: static_assert failed .*\"AsWeakPtr argument must inherit from SupportsWeakPtr\""]
+#elif defined(NCTEST_DERIVED_COMPLETELY_UNRELATED_HELPER) // [r"fatal error: static assertion failed due to requirement 'std::is_base_of<base::internal::SupportsWeakPtrBase, base::DerivedUnrelated>::value': AsWeakPtr argument must inherit from SupportsWeakPtr"]
void WontCompile() {
DerivedUnrelated f;
WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
}
-#elif defined(NCTEST_AMBIGUOUS_ANCESTORS) // [r"fatal error: use of undeclared identifier 'AsWeakPtrImpl'"]
+#elif defined(NCTEST_AMBIGUOUS_ANCESTORS) // [r"fatal error: no matching function for call to 'AsWeakPtrImpl'"]
void WontCompile() {
MultiplyDerivedProducer f;
WeakPtr<MultiplyDerivedProducer> ptr = AsWeakPtr(&f);
}
+#elif defined(NCTEST_GETMUTABLEWEAKPTR_CONST_T) // [r"fatal error: no matching member function for call to 'GetMutableWeakPtr'"]
+
+void WontCompile() {
+ Unrelated unrelated;
+ const WeakPtrFactory<const Unrelated> factory(&unrelated);
+ factory.GetMutableWeakPtr();
+}
+
+#elif defined(NCTEST_GETMUTABLEWEAKPTR_NOT_T) // [r"fatal error: no matching member function for call to 'GetMutableWeakPtr'"]
+
+void WontCompile() {
+ DerivedUnrelated derived_unrelated;
+ const WeakPtrFactory<DerivedUnrelated> factory(&derived_unrelated);
+ factory.GetMutableWeakPtr<Unrelated>();
+}
+
#endif
}
diff --git a/base/memory/writable_shared_memory_region.cc b/base/memory/writable_shared_memory_region.cc
index 063e672..be96323 100644
--- a/base/memory/writable_shared_memory_region.cc
+++ b/base/memory/writable_shared_memory_region.cc
@@ -1,4 +1,4 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -6,13 +6,18 @@
#include <utility>
-#include "base/memory/shared_memory.h"
#include "build/build_config.h"
namespace base {
+WritableSharedMemoryRegion::CreateFunction*
+ WritableSharedMemoryRegion::create_hook_ = nullptr;
+
// static
WritableSharedMemoryRegion WritableSharedMemoryRegion::Create(size_t size) {
+ if (create_hook_)
+ return create_hook_(size);
+
subtle::PlatformSharedMemoryRegion handle =
subtle::PlatformSharedMemoryRegion::CreateWritable(size);
@@ -58,23 +63,24 @@
WritableSharedMemoryRegion&& region) = default;
WritableSharedMemoryRegion::~WritableSharedMemoryRegion() = default;
-WritableSharedMemoryMapping WritableSharedMemoryRegion::Map() const {
- return MapAt(0, handle_.GetSize());
+WritableSharedMemoryMapping WritableSharedMemoryRegion::Map(
+ SharedMemoryMapper* mapper) const {
+ return MapAt(0, handle_.GetSize(), mapper);
}
WritableSharedMemoryMapping WritableSharedMemoryRegion::MapAt(
- off_t offset,
- size_t size) const {
+ uint64_t offset,
+ size_t size,
+ SharedMemoryMapper* mapper) const {
if (!IsValid())
return {};
- void* memory = nullptr;
- size_t mapped_size = 0;
- if (!handle_.MapAt(offset, size, &memory, &mapped_size))
+ auto result = handle_.MapAt(offset, size, mapper);
+ if (!result.has_value())
return {};
- return WritableSharedMemoryMapping(memory, size, mapped_size,
- handle_.GetGUID());
+ return WritableSharedMemoryMapping(result.value(), size, handle_.GetGUID(),
+ mapper);
}
bool WritableSharedMemoryRegion::IsValid() const {
diff --git a/base/memory/writable_shared_memory_region.h b/base/memory/writable_shared_memory_region.h
index edd25aa..b3e8fda 100644
--- a/base/memory/writable_shared_memory_region.h
+++ b/base/memory/writable_shared_memory_region.h
@@ -1,15 +1,19 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
-#include "base/macros.h"
+#include "base/base_export.h"
+#include "base/check.h"
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/read_only_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/memory/unsafe_shared_memory_region.h"
+#include "build/build_config.h"
+
+#include <stdint.h>
namespace base {
@@ -22,19 +26,18 @@
// ReadOnlySharedMemoryRegion. However, unlike ReadOnlySharedMemoryRegion and
// UnsafeSharedMemoryRegion, ownership of this region (while writable) is unique
// and may only be transferred, not duplicated.
+//
+// Unlike ReadOnlySharedMemoryRegion and UnsafeSharedMemoryRegion,
+// WritableSharedMemoryRegion doesn't provide GetPlatformHandle() method to
+// ensure that the region is never duplicated while writable.
class BASE_EXPORT WritableSharedMemoryRegion {
public:
using MappingType = WritableSharedMemoryMapping;
// Creates a new WritableSharedMemoryRegion instance of a given
// size that can be used for mapping writable shared memory into the virtual
// address space.
- //
- // This call will fail if the process does not have sufficient permissions to
- // create a shared memory region itself. See
- // mojo::CreateWritableSharedMemoryRegion in
- // mojo/public/cpp/base/shared_memory_utils.h for creating a shared memory
- // region from a an unprivileged process where a broker must be used.
static WritableSharedMemoryRegion Create(size_t size);
+ using CreateFunction = decltype(Create);
// Returns a WritableSharedMemoryRegion built from a platform handle that was
// taken from another WritableSharedMemoryRegion instance. Returns an invalid
@@ -69,6 +72,10 @@
WritableSharedMemoryRegion(WritableSharedMemoryRegion&&);
WritableSharedMemoryRegion& operator=(WritableSharedMemoryRegion&&);
+ WritableSharedMemoryRegion(const WritableSharedMemoryRegion&) = delete;
+ WritableSharedMemoryRegion& operator=(const WritableSharedMemoryRegion&) =
+ delete;
+
// Destructor closes shared memory region if valid.
// All created mappings will remain valid.
~WritableSharedMemoryRegion();
@@ -77,14 +84,23 @@
// access. The mapped address is guaranteed to have an alignment of
// at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
// Returns a valid WritableSharedMemoryMapping instance on success, invalid
- // otherwise.
- WritableSharedMemoryMapping Map() const;
+ // otherwise. A custom |SharedMemoryMapper| for mapping (and later unmapping)
+ // the region can be provided using the optional |mapper| parameter.
+ WritableSharedMemoryMapping Map(SharedMemoryMapper* mapper = nullptr) const;
- // Same as above, but maps only |size| bytes of the shared memory block
- // starting with the given |offset|. |offset| must be aligned to value of
- // |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
- // requested bytes are out of the region limits.
- WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
+ // Similar to `Map()`, but maps only `size` bytes of the shared memory block
+ // at byte `offset`. Returns an invalid mapping if requested bytes are out of
+ // the region limits.
+ //
+ // `offset` does not need to be aligned; if `offset` is not a multiple of
+ // `subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment`, then the
+ // returned mapping will not respect alignment either. Internally, `offset`
+ // and `size` are still first adjusted to respect alignment when mapping in
+ // the shared memory region, but the returned mapping will be "unadjusted" to
+ // match the exact `offset` and `size` requested.
+ WritableSharedMemoryMapping MapAt(uint64_t offset,
+ size_t size,
+ SharedMemoryMapper* mapper = nullptr) const;
// Whether underlying platform handles are valid.
bool IsValid() const;
@@ -101,13 +117,26 @@
return handle_.GetGUID();
}
+#if BUILDFLAG(IS_WIN)
+ // On Windows it is necessary in rare cases to take a writable handle from a
+ // region that will be converted to read-only. On this platform it is a safe
+ // operation, as the handle returned from this method will remain writable
+ // after the region is converted to read-only. However, it breaks chromium's
+ // WritableSharedMemoryRegion semantics and so should be use with care.
+ HANDLE UnsafeGetPlatformHandle() const { return handle_.GetPlatformHandle(); }
+#endif
+
private:
+ friend class SharedMemoryHooks;
+
explicit WritableSharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle);
- subtle::PlatformSharedMemoryRegion handle_;
+ static void set_create_hook(CreateFunction* hook) { create_hook_ = hook; }
- DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryRegion);
+ static CreateFunction* create_hook_;
+
+ subtle::PlatformSharedMemoryRegion handle_;
};
} // namespace base