blob: ce850453b6054fa791c73f97fc0fedb9414a3a16 [file] [log] [blame]
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
#define V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_
#include <stdint.h>
#include <atomic>
#include "include/cppgc/allocation.h"
#include "include/cppgc/internal/gc-info.h"
#include "include/cppgc/internal/name-trait.h"
#include "src/base/atomic-utils.h"
#include "src/base/bit-field.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/gc-info-table.h"
#include "src/heap/cppgc/globals.h"
namespace cppgc {
class Visitor;
namespace internal {
// HeapObjectHeader contains meta data per object and is prepended to each
// object.
//
// +-----------------+------+------------------------------------------+
// | name | bits | |
// +-----------------+------+------------------------------------------+
// | padding | 32 | Only present on 64-bit platform. |
// +-----------------+------+------------------------------------------+
// | GCInfoIndex | 14 | |
// | unused | 1 | |
// | in construction | 1 | In construction encoded as |false|. |
// +-----------------+------+------------------------------------------+
// | size | 14 | 17 bits because allocations are aligned. |
// | unused | 1 | |
// | mark bit | 1 | |
// +-----------------+------+------------------------------------------+
//
// Notes:
// - See |GCInfoTable| for constraints on GCInfoIndex.
// - |size| for regular objects is encoded with 14 bits but can actually
// represent sizes up to |kBlinkPageSize| (2^17) because allocations are
// always 8 byte aligned (see kAllocationGranularity).
// - |size| for large objects is encoded as 0. The size of a large object is
// stored in |LargeObjectPage::PayloadSize()|.
// - |mark bit| and |in construction| bits are located in separate 16-bit halves
// to allow potentially accessing them non-atomically.
class HeapObjectHeader {
public:
static constexpr size_t kSizeLog2 = 17;
static constexpr size_t kMaxSize = (size_t{1} << kSizeLog2) - 1;
static constexpr uint16_t kLargeObjectSizeInHeader = 0;
inline static HeapObjectHeader& FromPayload(void* address);
inline static const HeapObjectHeader& FromPayload(const void* address);
inline HeapObjectHeader(size_t size, GCInfoIndex gc_info_index);
// The payload starts directly after the HeapObjectHeader.
inline Address Payload() const;
template <AccessMode mode = AccessMode::kNonAtomic>
inline GCInfoIndex GetGCInfoIndex() const;
template <AccessMode mode = AccessMode::kNonAtomic>
inline size_t GetSize() const;
inline void SetSize(size_t size);
template <AccessMode mode = AccessMode::kNonAtomic>
inline bool IsLargeObject() const;
template <AccessMode = AccessMode::kNonAtomic>
bool IsInConstruction() const;
inline void MarkAsFullyConstructed();
// Use MarkObjectAsFullyConstructed() to mark an object as being constructed.
template <AccessMode = AccessMode::kNonAtomic>
bool IsMarked() const;
template <AccessMode = AccessMode::kNonAtomic>
void Unmark();
inline bool TryMarkAtomic();
template <AccessMode = AccessMode::kNonAtomic>
bool IsYoung() const;
template <AccessMode = AccessMode::kNonAtomic>
bool IsFree() const;
inline bool IsFinalizable() const;
void Finalize();
V8_EXPORT_PRIVATE HeapObjectName GetName() const;
V8_EXPORT_PRIVATE void Trace(Visitor*) const;
private:
enum class EncodedHalf : uint8_t { kLow, kHigh };
// Used in |encoded_high_|.
using FullyConstructedField = v8::base::BitField16<bool, 0, 1>;
using UnusedField1 = FullyConstructedField::Next<bool, 1>;
using GCInfoIndexField = UnusedField1::Next<GCInfoIndex, 14>;
// Used in |encoded_low_|.
using MarkBitField = v8::base::BitField16<bool, 0, 1>;
using UnusedField2 = MarkBitField::Next<bool, 1>;
using SizeField = void; // Use EncodeSize/DecodeSize instead.
static constexpr size_t DecodeSize(uint16_t encoded) {
// Essentially, gets optimized to << 1.
using SizeField = UnusedField2::Next<size_t, 14>;
return SizeField::decode(encoded) * kAllocationGranularity;
}
static constexpr uint16_t EncodeSize(size_t size) {
// Essentially, gets optimized to >> 1.
using SizeField = UnusedField2::Next<size_t, 14>;
return SizeField::encode(size / kAllocationGranularity);
}
V8_EXPORT_PRIVATE void CheckApiConstants();
template <AccessMode, EncodedHalf part,
std::memory_order memory_order = std::memory_order_seq_cst>
inline uint16_t LoadEncoded() const;
template <AccessMode mode, EncodedHalf part,
std::memory_order memory_order = std::memory_order_seq_cst>
inline void StoreEncoded(uint16_t bits, uint16_t mask);
#if defined(V8_TARGET_ARCH_64_BIT)
uint32_t padding_ = 0;
#endif // defined(V8_TARGET_ARCH_64_BIT)
uint16_t encoded_high_;
uint16_t encoded_low_;
};
// static
HeapObjectHeader& HeapObjectHeader::FromPayload(void* payload) {
return *reinterpret_cast<HeapObjectHeader*>(static_cast<Address>(payload) -
sizeof(HeapObjectHeader));
}
// static
const HeapObjectHeader& HeapObjectHeader::FromPayload(const void* payload) {
return *reinterpret_cast<const HeapObjectHeader*>(
static_cast<ConstAddress>(payload) - sizeof(HeapObjectHeader));
}
HeapObjectHeader::HeapObjectHeader(size_t size, GCInfoIndex gc_info_index) {
#if defined(V8_TARGET_ARCH_64_BIT)
USE(padding_);
#endif // defined(V8_TARGET_ARCH_64_BIT)
DCHECK_LT(gc_info_index, GCInfoTable::kMaxIndex);
DCHECK_EQ(0u, size & (sizeof(HeapObjectHeader) - 1));
DCHECK_GE(kMaxSize, size);
encoded_low_ = EncodeSize(size);
// Objects may get published to the marker without any other synchronization
// (e.g., write barrier) in which case the in-construction bit is read
// concurrently which requires reading encoded_high_ atomically. It is ok if
// this write is not observed by the marker, since the sweeper sets the
// in-construction bit to 0 and we can rely on that to guarantee a correct
// answer when checking if objects are in-construction.
v8::base::AsAtomicPtr(&encoded_high_)
->store(GCInfoIndexField::encode(gc_info_index),
std::memory_order_relaxed);
DCHECK(IsInConstruction());
#ifdef DEBUG
CheckApiConstants();
#endif // DEBUG
}
Address HeapObjectHeader::Payload() const {
return reinterpret_cast<Address>(const_cast<HeapObjectHeader*>(this)) +
sizeof(HeapObjectHeader);
}
template <AccessMode mode>
GCInfoIndex HeapObjectHeader::GetGCInfoIndex() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
return GCInfoIndexField::decode(encoded);
}
template <AccessMode mode>
size_t HeapObjectHeader::GetSize() const {
// Size is immutable after construction while either marking or sweeping
// is running so relaxed load (if mode == kAtomic) is enough.
uint16_t encoded_low_value =
LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
const size_t size = DecodeSize(encoded_low_value);
return size;
}
void HeapObjectHeader::SetSize(size_t size) {
DCHECK(!IsMarked());
encoded_low_ |= EncodeSize(size);
}
template <AccessMode mode>
bool HeapObjectHeader::IsLargeObject() const {
return GetSize<mode>() == kLargeObjectSizeInHeader;
}
template <AccessMode mode>
bool HeapObjectHeader::IsInConstruction() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kHigh, std::memory_order_acquire>();
return !FullyConstructedField::decode(encoded);
}
void HeapObjectHeader::MarkAsFullyConstructed() {
MakeGarbageCollectedTraitInternal::MarkObjectAsFullyConstructed(Payload());
}
template <AccessMode mode>
bool HeapObjectHeader::IsMarked() const {
const uint16_t encoded =
LoadEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>();
return MarkBitField::decode(encoded);
}
template <AccessMode mode>
void HeapObjectHeader::Unmark() {
DCHECK(IsMarked<mode>());
StoreEncoded<mode, EncodedHalf::kLow, std::memory_order_relaxed>(
MarkBitField::encode(false), MarkBitField::kMask);
}
bool HeapObjectHeader::TryMarkAtomic() {
auto* atomic_encoded = v8::base::AsAtomicPtr(&encoded_low_);
uint16_t old_value = atomic_encoded->load(std::memory_order_relaxed);
const uint16_t new_value = old_value | MarkBitField::encode(true);
if (new_value == old_value) {
return false;
}
return atomic_encoded->compare_exchange_strong(old_value, new_value,
std::memory_order_relaxed);
}
template <AccessMode mode>
bool HeapObjectHeader::IsYoung() const {
return !IsMarked<mode>();
}
template <AccessMode mode>
bool HeapObjectHeader::IsFree() const {
return GetGCInfoIndex<mode>() == kFreeListGCInfoIndex;
}
bool HeapObjectHeader::IsFinalizable() const {
const GCInfo& gc_info = GlobalGCInfoTable::GCInfoFromIndex(GetGCInfoIndex());
return gc_info.finalize;
}
template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
uint16_t HeapObjectHeader::LoadEncoded() const {
const uint16_t& half =
part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
if (mode == AccessMode::kNonAtomic) return half;
return v8::base::AsAtomicPtr(&half)->load(memory_order);
}
template <AccessMode mode, HeapObjectHeader::EncodedHalf part,
std::memory_order memory_order>
void HeapObjectHeader::StoreEncoded(uint16_t bits, uint16_t mask) {
// Caveat: Not all changes to HeapObjectHeader's bitfields go through
// StoreEncoded. The following have their own implementations and need to be
// kept in sync:
// - HeapObjectHeader::TryMarkAtomic
// - MarkObjectAsFullyConstructed (API)
DCHECK_EQ(0u, bits & ~mask);
uint16_t& half = part == EncodedHalf::kLow ? encoded_low_ : encoded_high_;
if (mode == AccessMode::kNonAtomic) {
half = (half & ~mask) | bits;
return;
}
// We don't perform CAS loop here assuming that only none of the info that
// shares the same encoded halfs change at the same time.
auto* atomic_encoded = v8::base::AsAtomicPtr(&half);
uint16_t value = atomic_encoded->load(std::memory_order_relaxed);
value = (value & ~mask) | bits;
atomic_encoded->store(value, memory_order);
}
} // namespace internal
} // namespace cppgc
#endif // V8_HEAP_CPPGC_HEAP_OBJECT_HEADER_H_