| // Copyright 2012 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/heap/heap.h" |
| |
| #include <cinttypes> |
| #include <iomanip> |
| #include <unordered_map> |
| #include <unordered_set> |
| |
| #include "src/api/api-inl.h" |
| #include "src/base/bits.h" |
| #include "src/base/flags.h" |
| #include "src/base/once.h" |
| #include "src/base/utils/random-number-generator.h" |
| #include "src/builtins/accessors.h" |
| #include "src/codegen/assembler-inl.h" |
| #include "src/codegen/compilation-cache.h" |
| #include "src/debug/debug.h" |
| #include "src/deoptimizer/deoptimizer.h" |
| #include "src/execution/microtask-queue.h" |
| #include "src/execution/runtime-profiler.h" |
| #include "src/execution/v8threads.h" |
| #include "src/execution/vm-state-inl.h" |
| #include "src/handles/global-handles.h" |
| #include "src/heap/array-buffer-collector.h" |
| #include "src/heap/array-buffer-tracker-inl.h" |
| #include "src/heap/barrier.h" |
| #include "src/heap/code-stats.h" |
| #include "src/heap/combined-heap.h" |
| #include "src/heap/concurrent-marking.h" |
| #include "src/heap/embedder-tracing.h" |
| #include "src/heap/gc-idle-time-handler.h" |
| #include "src/heap/gc-tracer.h" |
| #include "src/heap/heap-controller.h" |
| #include "src/heap/heap-write-barrier-inl.h" |
| #include "src/heap/incremental-marking-inl.h" |
| #include "src/heap/incremental-marking.h" |
| #include "src/heap/mark-compact-inl.h" |
| #include "src/heap/mark-compact.h" |
| #include "src/heap/memory-reducer.h" |
| #include "src/heap/object-stats.h" |
| #include "src/heap/objects-visiting-inl.h" |
| #include "src/heap/objects-visiting.h" |
| #include "src/heap/read-only-heap.h" |
| #include "src/heap/remembered-set.h" |
| #include "src/heap/scavenge-job.h" |
| #include "src/heap/scavenger-inl.h" |
| #include "src/heap/store-buffer.h" |
| #include "src/heap/stress-marking-observer.h" |
| #include "src/heap/stress-scavenge-observer.h" |
| #include "src/heap/sweeper.h" |
| #include "src/init/bootstrapper.h" |
| #include "src/init/v8.h" |
| #include "src/interpreter/interpreter.h" |
| #include "src/logging/log.h" |
| #include "src/numbers/conversions.h" |
| #include "src/objects/data-handler.h" |
| #include "src/objects/feedback-vector.h" |
| #include "src/objects/free-space-inl.h" |
| #include "src/objects/hash-table-inl.h" |
| #include "src/objects/maybe-object.h" |
| #include "src/objects/shared-function-info.h" |
| #include "src/objects/slots-atomic-inl.h" |
| #include "src/objects/slots-inl.h" |
| #include "src/regexp/regexp.h" |
| #include "src/snapshot/embedded/embedded-data.h" |
| #include "src/snapshot/natives.h" |
| #include "src/snapshot/serializer-common.h" |
| #include "src/snapshot/snapshot.h" |
| #include "src/strings/string-stream.h" |
| #include "src/strings/unicode-decoder.h" |
| #include "src/strings/unicode-inl.h" |
| #include "src/tracing/trace-event.h" |
| #include "src/utils/utils-inl.h" |
| #include "src/utils/utils.h" |
| |
| // Has to be the last include (doesn't have include guards): |
| #include "src/objects/object-macros.h" |
| |
| #if defined(V8_OS_STARBOARD) |
| #include "src/poems.h" |
| #endif |
| |
| namespace v8 { |
| namespace internal { |
| |
| // These are outside the Heap class so they can be forward-declared |
| // in heap-write-barrier-inl.h. |
| bool Heap_PageFlagsAreConsistent(HeapObject object) { |
| return Heap::PageFlagsAreConsistent(object); |
| } |
| |
| void Heap_GenerationalBarrierSlow(HeapObject object, Address slot, |
| HeapObject value) { |
| Heap::GenerationalBarrierSlow(object, slot, value); |
| } |
| |
| void Heap_MarkingBarrierSlow(HeapObject object, Address slot, |
| HeapObject value) { |
| Heap::MarkingBarrierSlow(object, slot, value); |
| } |
| |
| void Heap_WriteBarrierForCodeSlow(Code host) { |
| Heap::WriteBarrierForCodeSlow(host); |
| } |
| |
| void Heap_GenerationalBarrierForCodeSlow(Code host, RelocInfo* rinfo, |
| HeapObject object) { |
| Heap::GenerationalBarrierForCodeSlow(host, rinfo, object); |
| } |
| |
| void Heap_MarkingBarrierForCodeSlow(Code host, RelocInfo* rinfo, |
| HeapObject object) { |
| Heap::MarkingBarrierForCodeSlow(host, rinfo, object); |
| } |
| |
| void Heap_MarkingBarrierForDescriptorArraySlow(Heap* heap, HeapObject host, |
| HeapObject descriptor_array, |
| int number_of_own_descriptors) { |
| Heap::MarkingBarrierForDescriptorArraySlow(heap, host, descriptor_array, |
| number_of_own_descriptors); |
| } |
| |
| void Heap_GenerationalEphemeronKeyBarrierSlow(Heap* heap, |
| EphemeronHashTable table, |
| Address slot) { |
| heap->RecordEphemeronKeyWrite(table, slot); |
| } |
| |
| void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { |
| DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset()); |
| set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); |
| } |
| |
| void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) { |
| DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero); |
| set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset)); |
| } |
| |
| void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) { |
| DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero); |
| set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset)); |
| } |
| |
| void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) { |
| DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset()); |
| set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset)); |
| } |
| |
| void Heap::SetSerializedObjects(FixedArray objects) { |
| DCHECK(isolate()->serializer_enabled()); |
| set_serialized_objects(objects); |
| } |
| |
| void Heap::SetSerializedGlobalProxySizes(FixedArray sizes) { |
| DCHECK(isolate()->serializer_enabled()); |
| set_serialized_global_proxy_sizes(sizes); |
| } |
| |
| bool Heap::GCCallbackTuple::operator==( |
| const Heap::GCCallbackTuple& other) const { |
| return other.callback == callback && other.data == data; |
| } |
| |
| Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=( |
| const Heap::GCCallbackTuple& other) V8_NOEXCEPT = default; |
| |
| struct Heap::StrongRootsList { |
| FullObjectSlot start; |
| FullObjectSlot end; |
| StrongRootsList* next; |
| }; |
| |
| class IdleScavengeObserver : public AllocationObserver { |
| public: |
| IdleScavengeObserver(Heap* heap, intptr_t step_size) |
| : AllocationObserver(step_size), heap_(heap) {} |
| |
| void Step(int bytes_allocated, Address, size_t) override { |
| heap_->ScheduleIdleScavengeIfNeeded(bytes_allocated); |
| } |
| |
| private: |
| Heap* heap_; |
| }; |
| |
| Heap::Heap() |
| : isolate_(isolate()), |
| memory_pressure_level_(MemoryPressureLevel::kNone), |
| global_pretenuring_feedback_(kInitialFeedbackCapacity), |
| external_string_table_(this) { |
| // Ensure old_generation_size_ is a multiple of kPageSize. |
| DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1)); |
| |
| set_native_contexts_list(Smi::kZero); |
| set_allocation_sites_list(Smi::kZero); |
| // Put a dummy entry in the remembered pages so we can find the list the |
| // minidump even if there are no real unmapped pages. |
| RememberUnmappedPage(kNullAddress, false); |
| } |
| |
| Heap::~Heap() = default; |
| |
| size_t Heap::MaxReserved() { |
| const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_; |
| return static_cast<size_t>(2 * max_semi_space_size_ + |
| kMaxNewLargeObjectSpaceSize + |
| max_old_generation_size_); |
| } |
| |
| size_t Heap::YoungGenerationSizeFromOldGenerationSize(size_t old_generation) { |
| // Compute the semi space size and cap it. |
| size_t ratio = old_generation <= kOldGenerationLowMemory |
| ? kOldGenerationToSemiSpaceRatioLowMemory |
| : kOldGenerationToSemiSpaceRatio; |
| size_t semi_space = old_generation / ratio; |
| semi_space = Min<size_t>(semi_space, kMaxSemiSpaceSize); |
| semi_space = Max<size_t>(semi_space, kMinSemiSpaceSize); |
| semi_space = RoundUp(semi_space, Page::kPageSize); |
| return YoungGenerationSizeFromSemiSpaceSize(semi_space); |
| } |
| |
| size_t Heap::HeapSizeFromPhysicalMemory(uint64_t physical_memory) { |
| // Compute the old generation size and cap it. |
| uint64_t old_generation = physical_memory / |
| kPhysicalMemoryToOldGenerationRatio * |
| kPointerMultiplier; |
| old_generation = |
| Min<uint64_t>(old_generation, MaxOldGenerationSize(physical_memory)); |
| old_generation = Max<uint64_t>(old_generation, V8HeapTrait::kMinSize); |
| old_generation = RoundUp(old_generation, Page::kPageSize); |
| |
| size_t young_generation = YoungGenerationSizeFromOldGenerationSize( |
| static_cast<size_t>(old_generation)); |
| return static_cast<size_t>(old_generation) + young_generation; |
| } |
| |
| void Heap::GenerationSizesFromHeapSize(size_t heap_size, |
| size_t* young_generation_size, |
| size_t* old_generation_size) { |
| // Initialize values for the case when the given heap size is too small. |
| *young_generation_size = 0; |
| *old_generation_size = 0; |
| // Binary search for the largest old generation size that fits to the given |
| // heap limit considering the correspondingly sized young generation. |
| size_t lower = 0, upper = heap_size; |
| while (lower + 1 < upper) { |
| size_t old_generation = lower + (upper - lower) / 2; |
| size_t young_generation = |
| YoungGenerationSizeFromOldGenerationSize(old_generation); |
| if (old_generation + young_generation <= heap_size) { |
| // This size configuration fits into the given heap limit. |
| *young_generation_size = young_generation; |
| *old_generation_size = old_generation; |
| lower = old_generation; |
| } else { |
| upper = old_generation; |
| } |
| } |
| } |
| |
| size_t Heap::MinYoungGenerationSize() { |
| return YoungGenerationSizeFromSemiSpaceSize(kMinSemiSpaceSize); |
| } |
| |
| size_t Heap::MinOldGenerationSize() { |
| size_t paged_space_count = |
| LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1; |
| return paged_space_count * Page::kPageSize; |
| } |
| |
| size_t Heap::MaxOldGenerationSize(uint64_t physical_memory) { |
| size_t max_size = V8HeapTrait::kMaxSize; |
| // Finch experiment: Increase the heap size from 2GB to 4GB for 64-bit |
| // systems with physical memory bigger than 16GB. |
| constexpr bool x64_bit = Heap::kPointerMultiplier >= 2; |
| if (FLAG_huge_max_old_generation_size && x64_bit && |
| physical_memory / GB > 16) { |
| DCHECK_EQ(max_size / GB, 2); |
| max_size *= 2; |
| } |
| return max_size; |
| } |
| |
| size_t Heap::YoungGenerationSizeFromSemiSpaceSize(size_t semi_space_size) { |
| return semi_space_size * (2 + kNewLargeObjectSpaceToSemiSpaceRatio); |
| } |
| |
| size_t Heap::SemiSpaceSizeFromYoungGenerationSize( |
| size_t young_generation_size) { |
| return young_generation_size / (2 + kNewLargeObjectSpaceToSemiSpaceRatio); |
| } |
| |
| size_t Heap::Capacity() { |
| if (!HasBeenSetUp()) return 0; |
| |
| return new_space_->Capacity() + OldGenerationCapacity(); |
| } |
| |
| size_t Heap::OldGenerationCapacity() { |
| if (!HasBeenSetUp()) return 0; |
| PagedSpaceIterator spaces(this); |
| size_t total = 0; |
| for (PagedSpace* space = spaces.Next(); space != nullptr; |
| space = spaces.Next()) { |
| total += space->Capacity(); |
| } |
| return total + lo_space_->SizeOfObjects() + code_lo_space_->SizeOfObjects(); |
| } |
| |
| size_t Heap::CommittedOldGenerationMemory() { |
| if (!HasBeenSetUp()) return 0; |
| |
| PagedSpaceIterator spaces(this); |
| size_t total = 0; |
| for (PagedSpace* space = spaces.Next(); space != nullptr; |
| space = spaces.Next()) { |
| total += space->CommittedMemory(); |
| } |
| return total + lo_space_->Size() + code_lo_space_->Size(); |
| } |
| |
| size_t Heap::CommittedMemoryOfUnmapper() { |
| if (!HasBeenSetUp()) return 0; |
| |
| return memory_allocator()->unmapper()->CommittedBufferedMemory(); |
| } |
| |
| size_t Heap::CommittedMemory() { |
| if (!HasBeenSetUp()) return 0; |
| |
| return new_space_->CommittedMemory() + new_lo_space_->Size() + |
| CommittedOldGenerationMemory(); |
| } |
| |
| |
| size_t Heap::CommittedPhysicalMemory() { |
| if (!HasBeenSetUp()) return 0; |
| |
| size_t total = 0; |
| for (SpaceIterator it(this); it.HasNext();) { |
| total += it.Next()->CommittedPhysicalMemory(); |
| } |
| |
| return total; |
| } |
| |
| size_t Heap::CommittedMemoryExecutable() { |
| if (!HasBeenSetUp()) return 0; |
| |
| return static_cast<size_t>(memory_allocator()->SizeExecutable()); |
| } |
| |
| |
| void Heap::UpdateMaximumCommitted() { |
| if (!HasBeenSetUp()) return; |
| |
| const size_t current_committed_memory = CommittedMemory(); |
| if (current_committed_memory > maximum_committed_) { |
| maximum_committed_ = current_committed_memory; |
| } |
| } |
| |
| size_t Heap::Available() { |
| if (!HasBeenSetUp()) return 0; |
| |
| size_t total = 0; |
| |
| for (SpaceIterator it(this); it.HasNext();) { |
| total += it.Next()->Available(); |
| } |
| |
| total += memory_allocator()->Available(); |
| return total; |
| } |
| |
| bool Heap::CanExpandOldGeneration(size_t size) { |
| if (force_oom_) return false; |
| if (OldGenerationCapacity() + size > max_old_generation_size_) return false; |
| // The OldGenerationCapacity does not account compaction spaces used |
| // during evacuation. Ensure that expanding the old generation does push |
| // the total allocated memory size over the maximum heap size. |
| return memory_allocator()->Size() + size <= MaxReserved(); |
| } |
| |
| bool Heap::HasBeenSetUp() { |
| // We will always have a new space when the heap is set up. |
| return new_space_ != nullptr; |
| } |
| |
| |
| GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, |
| const char** reason) { |
| // Is global GC requested? |
| if (space != NEW_SPACE && space != NEW_LO_SPACE) { |
| isolate_->counters()->gc_compactor_caused_by_request()->Increment(); |
| *reason = "GC in old space requested"; |
| return MARK_COMPACTOR; |
| } |
| |
| if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) { |
| *reason = "GC in old space forced by flags"; |
| return MARK_COMPACTOR; |
| } |
| |
| if (incremental_marking()->NeedsFinalization() && |
| AllocationLimitOvershotByLargeMargin()) { |
| *reason = "Incremental marking needs finalization"; |
| return MARK_COMPACTOR; |
| } |
| |
| // Over-estimate the new space size using capacity to allow some slack. |
| if (!CanExpandOldGeneration(new_space_->TotalCapacity() + |
| new_lo_space()->Size())) { |
| isolate_->counters() |
| ->gc_compactor_caused_by_oldspace_exhaustion() |
| ->Increment(); |
| *reason = "scavenge might not succeed"; |
| return MARK_COMPACTOR; |
| } |
| |
| // Default |
| *reason = nullptr; |
| return YoungGenerationCollector(); |
| } |
| |
| void Heap::SetGCState(HeapState state) { |
| gc_state_ = state; |
| } |
| |
| void Heap::PrintShortHeapStatistics() { |
| if (!FLAG_trace_gc_verbose) return; |
| PrintIsolate(isolate_, |
| "Memory allocator, used: %6zu KB," |
| " available: %6zu KB\n", |
| memory_allocator()->Size() / KB, |
| memory_allocator()->Available() / KB); |
| PrintIsolate(isolate_, |
| "Read-only space, used: %6zu KB" |
| ", available: %6zu KB" |
| ", committed: %6zu KB\n", |
| read_only_space_->Size() / KB, |
| read_only_space_->Available() / KB, |
| read_only_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, |
| "New space, used: %6zu KB" |
| ", available: %6zu KB" |
| ", committed: %6zu KB\n", |
| new_space_->Size() / KB, new_space_->Available() / KB, |
| new_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, |
| "New large object space, used: %6zu KB" |
| ", available: %6zu KB" |
| ", committed: %6zu KB\n", |
| new_lo_space_->SizeOfObjects() / KB, |
| new_lo_space_->Available() / KB, |
| new_lo_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, |
| "Old space, used: %6zu KB" |
| ", available: %6zu KB" |
| ", committed: %6zu KB\n", |
| old_space_->SizeOfObjects() / KB, old_space_->Available() / KB, |
| old_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, |
| "Code space, used: %6zu KB" |
| ", available: %6zu KB" |
| ", committed: %6zu KB\n", |
| code_space_->SizeOfObjects() / KB, code_space_->Available() / KB, |
| code_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, |
| "Map space, used: %6zu KB" |
| ", available: %6zu KB" |
| ", committed: %6zu KB\n", |
| map_space_->SizeOfObjects() / KB, map_space_->Available() / KB, |
| map_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, |
| "Large object space, used: %6zu KB" |
| ", available: %6zu KB" |
| ", committed: %6zu KB\n", |
| lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB, |
| lo_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, |
| "Code large object space, used: %6zu KB" |
| ", available: %6zu KB" |
| ", committed: %6zu KB\n", |
| code_lo_space_->SizeOfObjects() / KB, |
| code_lo_space_->Available() / KB, |
| code_lo_space_->CommittedMemory() / KB); |
| ReadOnlySpace* const ro_space = read_only_space_; |
| PrintIsolate(isolate_, |
| "All spaces, used: %6zu KB" |
| ", available: %6zu KB" |
| ", committed: %6zu KB\n", |
| (this->SizeOfObjects() + ro_space->SizeOfObjects()) / KB, |
| (this->Available() + ro_space->Available()) / KB, |
| (this->CommittedMemory() + ro_space->CommittedMemory()) / KB); |
| PrintIsolate(isolate_, |
| "Unmapper buffering %zu chunks of committed: %6zu KB\n", |
| memory_allocator()->unmapper()->NumberOfCommittedChunks(), |
| CommittedMemoryOfUnmapper() / KB); |
| PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n", |
| isolate()->isolate_data()->external_memory_ / KB); |
| PrintIsolate(isolate_, "Backing store memory: %6zu KB\n", |
| backing_store_bytes_ / KB); |
| PrintIsolate(isolate_, "External memory global %zu KB\n", |
| external_memory_callback_() / KB); |
| PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n", |
| total_gc_time_ms_); |
| } |
| |
| void Heap::PrintFreeListsStats() { |
| DCHECK(FLAG_trace_gc_freelists); |
| |
| if (FLAG_trace_gc_freelists_verbose) { |
| PrintIsolate(isolate_, |
| "Freelists statistics per Page: " |
| "[category: length || total free bytes]\n"); |
| } |
| |
| std::vector<int> categories_lengths( |
| old_space()->free_list()->number_of_categories(), 0); |
| std::vector<size_t> categories_sums( |
| old_space()->free_list()->number_of_categories(), 0); |
| unsigned int pageCnt = 0; |
| |
| // This loops computes freelists lengths and sum. |
| // If FLAG_trace_gc_freelists_verbose is enabled, it also prints |
| // the stats of each FreeListCategory of each Page. |
| for (Page* page : *old_space()) { |
| std::ostringstream out_str; |
| |
| if (FLAG_trace_gc_freelists_verbose) { |
| out_str << "Page " << std::setw(4) << pageCnt; |
| } |
| |
| for (int cat = kFirstCategory; |
| cat <= old_space()->free_list()->last_category(); cat++) { |
| FreeListCategory* free_list = |
| page->free_list_category(static_cast<FreeListCategoryType>(cat)); |
| int length = free_list->FreeListLength(); |
| size_t sum = free_list->SumFreeList(); |
| |
| if (FLAG_trace_gc_freelists_verbose) { |
| out_str << "[" << cat << ": " << std::setw(4) << length << " || " |
| << std::setw(6) << sum << " ]" |
| << (cat == old_space()->free_list()->last_category() ? "\n" |
| : ", "); |
| } |
| categories_lengths[cat] += length; |
| categories_sums[cat] += sum; |
| } |
| |
| if (FLAG_trace_gc_freelists_verbose) { |
| PrintIsolate(isolate_, "%s", out_str.str().c_str()); |
| } |
| |
| pageCnt++; |
| } |
| |
| // Print statistics about old_space (pages, free/wasted/used memory...). |
| PrintIsolate( |
| isolate_, |
| "%d pages. Free space: %.1f MB (waste: %.2f). " |
| "Usage: %.1f/%.1f (MB) -> %.2f%%.\n", |
| pageCnt, static_cast<double>(old_space_->Available()) / MB, |
| static_cast<double>(old_space_->Waste()) / MB, |
| static_cast<double>(old_space_->Size()) / MB, |
| static_cast<double>(old_space_->Capacity()) / MB, |
| static_cast<double>(old_space_->Size()) / old_space_->Capacity() * 100); |
| |
| // Print global statistics of each FreeListCategory (length & sum). |
| PrintIsolate(isolate_, |
| "FreeLists global statistics: " |
| "[category: length || total free KB]\n"); |
| std::ostringstream out_str; |
| for (int cat = kFirstCategory; |
| cat <= old_space()->free_list()->last_category(); cat++) { |
| out_str << "[" << cat << ": " << categories_lengths[cat] << " || " |
| << std::fixed << std::setprecision(2) |
| << static_cast<double>(categories_sums[cat]) / KB << " KB]" |
| << (cat == old_space()->free_list()->last_category() ? "\n" : ", "); |
| } |
| PrintIsolate(isolate_, "%s", out_str.str().c_str()); |
| } |
| |
| void Heap::DumpJSONHeapStatistics(std::stringstream& stream) { |
| HeapStatistics stats; |
| reinterpret_cast<v8::Isolate*>(isolate())->GetHeapStatistics(&stats); |
| |
| // clang-format off |
| #define DICT(s) "{" << s << "}" |
| #define LIST(s) "[" << s << "]" |
| #define ESCAPE(s) "\"" << s << "\"" |
| #define MEMBER(s) ESCAPE(s) << ":" |
| |
| auto SpaceStatistics = [this](int space_index) { |
| HeapSpaceStatistics space_stats; |
| reinterpret_cast<v8::Isolate*>(isolate())->GetHeapSpaceStatistics( |
| &space_stats, space_index); |
| std::stringstream stream; |
| stream << DICT( |
| MEMBER("name") |
| << ESCAPE(GetSpaceName(static_cast<AllocationSpace>(space_index))) |
| << "," |
| MEMBER("size") << space_stats.space_size() << "," |
| MEMBER("used_size") << space_stats.space_used_size() << "," |
| MEMBER("available_size") << space_stats.space_available_size() << "," |
| MEMBER("physical_size") << space_stats.physical_space_size()); |
| return stream.str(); |
| }; |
| |
| stream << DICT( |
| MEMBER("isolate") << ESCAPE(reinterpret_cast<void*>(isolate())) << "," |
| MEMBER("id") << gc_count() << "," |
| MEMBER("time_ms") << isolate()->time_millis_since_init() << "," |
| MEMBER("total_heap_size") << stats.total_heap_size() << "," |
| MEMBER("total_heap_size_executable") |
| << stats.total_heap_size_executable() << "," |
| MEMBER("total_physical_size") << stats.total_physical_size() << "," |
| MEMBER("total_available_size") << stats.total_available_size() << "," |
| MEMBER("used_heap_size") << stats.used_heap_size() << "," |
| MEMBER("heap_size_limit") << stats.heap_size_limit() << "," |
| MEMBER("malloced_memory") << stats.malloced_memory() << "," |
| MEMBER("external_memory") << stats.external_memory() << "," |
| MEMBER("peak_malloced_memory") << stats.peak_malloced_memory() << "," |
| MEMBER("spaces") << LIST( |
| SpaceStatistics(RO_SPACE) << "," << |
| SpaceStatistics(NEW_SPACE) << "," << |
| SpaceStatistics(OLD_SPACE) << "," << |
| SpaceStatistics(CODE_SPACE) << "," << |
| SpaceStatistics(MAP_SPACE) << "," << |
| SpaceStatistics(LO_SPACE) << "," << |
| SpaceStatistics(CODE_LO_SPACE) << "," << |
| SpaceStatistics(NEW_LO_SPACE))); |
| |
| #undef DICT |
| #undef LIST |
| #undef ESCAPE |
| #undef MEMBER |
| // clang-format on |
| } |
| |
| void Heap::ReportStatisticsAfterGC() { |
| for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount); |
| ++i) { |
| int count = deferred_counters_[i]; |
| deferred_counters_[i] = 0; |
| while (count > 0) { |
| count--; |
| isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i)); |
| } |
| } |
| } |
| |
| void Heap::AddHeapObjectAllocationTracker( |
| HeapObjectAllocationTracker* tracker) { |
| if (allocation_trackers_.empty()) DisableInlineAllocation(); |
| allocation_trackers_.push_back(tracker); |
| } |
| |
| void Heap::RemoveHeapObjectAllocationTracker( |
| HeapObjectAllocationTracker* tracker) { |
| allocation_trackers_.erase(std::remove(allocation_trackers_.begin(), |
| allocation_trackers_.end(), tracker), |
| allocation_trackers_.end()); |
| if (allocation_trackers_.empty()) EnableInlineAllocation(); |
| } |
| |
| void Heap::AddRetainingPathTarget(Handle<HeapObject> object, |
| RetainingPathOption option) { |
| if (!FLAG_track_retaining_path) { |
| PrintF("Retaining path tracking requires --track-retaining-path\n"); |
| } else { |
| Handle<WeakArrayList> array(retaining_path_targets(), isolate()); |
| int index = array->length(); |
| array = WeakArrayList::AddToEnd(isolate(), array, |
| MaybeObjectHandle::Weak(object)); |
| set_retaining_path_targets(*array); |
| DCHECK_EQ(array->length(), index + 1); |
| retaining_path_target_option_[index] = option; |
| } |
| } |
| |
| bool Heap::IsRetainingPathTarget(HeapObject object, |
| RetainingPathOption* option) { |
| WeakArrayList targets = retaining_path_targets(); |
| int length = targets.length(); |
| MaybeObject object_to_check = HeapObjectReference::Weak(object); |
| for (int i = 0; i < length; i++) { |
| MaybeObject target = targets.Get(i); |
| DCHECK(target->IsWeakOrCleared()); |
| if (target == object_to_check) { |
| DCHECK(retaining_path_target_option_.count(i)); |
| *option = retaining_path_target_option_[i]; |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| void Heap::PrintRetainingPath(HeapObject target, RetainingPathOption option) { |
| PrintF("\n\n\n"); |
| PrintF("#################################################\n"); |
| PrintF("Retaining path for %p:\n", reinterpret_cast<void*>(target.ptr())); |
| HeapObject object = target; |
| std::vector<std::pair<HeapObject, bool>> retaining_path; |
| Root root = Root::kUnknown; |
| bool ephemeron = false; |
| while (true) { |
| retaining_path.push_back(std::make_pair(object, ephemeron)); |
| if (option == RetainingPathOption::kTrackEphemeronPath && |
| ephemeron_retainer_.count(object)) { |
| object = ephemeron_retainer_[object]; |
| ephemeron = true; |
| } else if (retainer_.count(object)) { |
| object = retainer_[object]; |
| ephemeron = false; |
| } else { |
| if (retaining_root_.count(object)) { |
| root = retaining_root_[object]; |
| } |
| break; |
| } |
| } |
| int distance = static_cast<int>(retaining_path.size()); |
| for (auto node : retaining_path) { |
| HeapObject object = node.first; |
| bool ephemeron = node.second; |
| PrintF("\n"); |
| PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"); |
| PrintF("Distance from root %d%s: ", distance, |
| ephemeron ? " (ephemeron)" : ""); |
| object.ShortPrint(); |
| PrintF("\n"); |
| #ifdef OBJECT_PRINT |
| object.Print(); |
| PrintF("\n"); |
| #endif |
| --distance; |
| } |
| PrintF("\n"); |
| PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"); |
| PrintF("Root: %s\n", RootVisitor::RootName(root)); |
| PrintF("-------------------------------------------------\n"); |
| } |
| |
| void Heap::AddRetainer(HeapObject retainer, HeapObject object) { |
| if (retainer_.count(object)) return; |
| retainer_[object] = retainer; |
| RetainingPathOption option = RetainingPathOption::kDefault; |
| if (IsRetainingPathTarget(object, &option)) { |
| // Check if the retaining path was already printed in |
| // AddEphemeronRetainer(). |
| if (ephemeron_retainer_.count(object) == 0 || |
| option == RetainingPathOption::kDefault) { |
| PrintRetainingPath(object, option); |
| } |
| } |
| } |
| |
| void Heap::AddEphemeronRetainer(HeapObject retainer, HeapObject object) { |
| if (ephemeron_retainer_.count(object)) return; |
| ephemeron_retainer_[object] = retainer; |
| RetainingPathOption option = RetainingPathOption::kDefault; |
| if (IsRetainingPathTarget(object, &option) && |
| option == RetainingPathOption::kTrackEphemeronPath) { |
| // Check if the retaining path was already printed in AddRetainer(). |
| if (retainer_.count(object) == 0) { |
| PrintRetainingPath(object, option); |
| } |
| } |
| } |
| |
| void Heap::AddRetainingRoot(Root root, HeapObject object) { |
| if (retaining_root_.count(object)) return; |
| retaining_root_[object] = root; |
| RetainingPathOption option = RetainingPathOption::kDefault; |
| if (IsRetainingPathTarget(object, &option)) { |
| PrintRetainingPath(object, option); |
| } |
| } |
| |
| void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) { |
| deferred_counters_[feature]++; |
| } |
| |
| bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); } |
| |
| void Heap::GarbageCollectionPrologue() { |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE); |
| { |
| AllowHeapAllocation for_the_first_part_of_prologue; |
| gc_count_++; |
| |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| Verify(); |
| } |
| #endif |
| } |
| |
| // Reset GC statistics. |
| promoted_objects_size_ = 0; |
| previous_semi_space_copied_object_size_ = semi_space_copied_object_size_; |
| semi_space_copied_object_size_ = 0; |
| nodes_died_in_new_space_ = 0; |
| nodes_copied_in_new_space_ = 0; |
| nodes_promoted_ = 0; |
| |
| UpdateMaximumCommitted(); |
| |
| #ifdef DEBUG |
| DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); |
| |
| if (FLAG_gc_verbose) Print(); |
| #endif // DEBUG |
| |
| if (new_space_->IsAtMaximumCapacity()) { |
| maximum_size_scavenges_++; |
| } else { |
| maximum_size_scavenges_ = 0; |
| } |
| CheckNewSpaceExpansionCriteria(); |
| UpdateNewSpaceAllocationCounter(); |
| if (FLAG_track_retaining_path) { |
| retainer_.clear(); |
| ephemeron_retainer_.clear(); |
| retaining_root_.clear(); |
| } |
| memory_allocator()->unmapper()->PrepareForGC(); |
| } |
| |
| size_t Heap::SizeOfObjects() { |
| size_t total = 0; |
| |
| for (SpaceIterator it(this); it.HasNext();) { |
| total += it.Next()->SizeOfObjects(); |
| } |
| return total; |
| } |
| |
| // static |
| const char* Heap::GetSpaceName(AllocationSpace space) { |
| switch (space) { |
| case NEW_SPACE: |
| return "new_space"; |
| case OLD_SPACE: |
| return "old_space"; |
| case MAP_SPACE: |
| return "map_space"; |
| case CODE_SPACE: |
| return "code_space"; |
| case LO_SPACE: |
| return "large_object_space"; |
| case NEW_LO_SPACE: |
| return "new_large_object_space"; |
| case CODE_LO_SPACE: |
| return "code_large_object_space"; |
| case RO_SPACE: |
| return "read_only_space"; |
| } |
| UNREACHABLE(); |
| } |
| |
| void Heap::MergeAllocationSitePretenuringFeedback( |
| const PretenuringFeedbackMap& local_pretenuring_feedback) { |
| AllocationSite site; |
| for (auto& site_and_count : local_pretenuring_feedback) { |
| site = site_and_count.first; |
| MapWord map_word = site_and_count.first.map_word(); |
| if (map_word.IsForwardingAddress()) { |
| site = AllocationSite::cast(map_word.ToForwardingAddress()); |
| } |
| |
| // We have not validated the allocation site yet, since we have not |
| // dereferenced the site during collecting information. |
| // This is an inlined check of AllocationMemento::IsValid. |
| if (!site.IsAllocationSite() || site.IsZombie()) continue; |
| |
| const int value = static_cast<int>(site_and_count.second); |
| DCHECK_LT(0, value); |
| if (site.IncrementMementoFoundCount(value)) { |
| // For sites in the global map the count is accessed through the site. |
| global_pretenuring_feedback_.insert(std::make_pair(site, 0)); |
| } |
| } |
| } |
| |
| void Heap::AddAllocationObserversToAllSpaces( |
| AllocationObserver* observer, AllocationObserver* new_space_observer) { |
| DCHECK(observer && new_space_observer); |
| |
| for (SpaceIterator it(this); it.HasNext();) { |
| Space* space = it.Next(); |
| if (space == new_space()) { |
| space->AddAllocationObserver(new_space_observer); |
| } else { |
| space->AddAllocationObserver(observer); |
| } |
| } |
| } |
| |
| void Heap::RemoveAllocationObserversFromAllSpaces( |
| AllocationObserver* observer, AllocationObserver* new_space_observer) { |
| DCHECK(observer && new_space_observer); |
| |
| for (SpaceIterator it(this); it.HasNext();) { |
| Space* space = it.Next(); |
| if (space == new_space()) { |
| space->RemoveAllocationObserver(new_space_observer); |
| } else { |
| space->RemoveAllocationObserver(observer); |
| } |
| } |
| } |
| |
| class Heap::SkipStoreBufferScope { |
| public: |
| explicit SkipStoreBufferScope(StoreBuffer* store_buffer) |
| : store_buffer_(store_buffer) { |
| store_buffer_->MoveAllEntriesToRememberedSet(); |
| store_buffer_->SetMode(StoreBuffer::IN_GC); |
| } |
| |
| ~SkipStoreBufferScope() { |
| DCHECK(store_buffer_->Empty()); |
| store_buffer_->SetMode(StoreBuffer::NOT_IN_GC); |
| } |
| |
| private: |
| StoreBuffer* store_buffer_; |
| }; |
| |
| namespace { |
| inline bool MakePretenureDecision( |
| AllocationSite site, AllocationSite::PretenureDecision current_decision, |
| double ratio, bool maximum_size_scavenge) { |
| // Here we just allow state transitions from undecided or maybe tenure |
| // to don't tenure, maybe tenure, or tenure. |
| if ((current_decision == AllocationSite::kUndecided || |
| current_decision == AllocationSite::kMaybeTenure)) { |
| if (ratio >= AllocationSite::kPretenureRatio) { |
| // We just transition into tenure state when the semi-space was at |
| // maximum capacity. |
| if (maximum_size_scavenge) { |
| site.set_deopt_dependent_code(true); |
| site.set_pretenure_decision(AllocationSite::kTenure); |
| // Currently we just need to deopt when we make a state transition to |
| // tenure. |
| return true; |
| } |
| site.set_pretenure_decision(AllocationSite::kMaybeTenure); |
| } else { |
| site.set_pretenure_decision(AllocationSite::kDontTenure); |
| } |
| } |
| return false; |
| } |
| |
| inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site, |
| bool maximum_size_scavenge) { |
| bool deopt = false; |
| int create_count = site.memento_create_count(); |
| int found_count = site.memento_found_count(); |
| bool minimum_mementos_created = |
| create_count >= AllocationSite::kPretenureMinimumCreated; |
| double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics |
| ? static_cast<double>(found_count) / create_count |
| : 0.0; |
| AllocationSite::PretenureDecision current_decision = |
| site.pretenure_decision(); |
| |
| if (minimum_mementos_created) { |
| deopt = MakePretenureDecision(site, current_decision, ratio, |
| maximum_size_scavenge); |
| } |
| |
| if (FLAG_trace_pretenuring_statistics) { |
| PrintIsolate(isolate, |
| "pretenuring: AllocationSite(%p): (created, found, ratio) " |
| "(%d, %d, %f) %s => %s\n", |
| reinterpret_cast<void*>(site.ptr()), create_count, found_count, |
| ratio, site.PretenureDecisionName(current_decision), |
| site.PretenureDecisionName(site.pretenure_decision())); |
| } |
| |
| // Clear feedback calculation fields until the next gc. |
| site.set_memento_found_count(0); |
| site.set_memento_create_count(0); |
| return deopt; |
| } |
| } // namespace |
| |
| void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) { |
| global_pretenuring_feedback_.erase(site); |
| } |
| |
| bool Heap::DeoptMaybeTenuredAllocationSites() { |
| return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; |
| } |
| |
| void Heap::ProcessPretenuringFeedback() { |
| bool trigger_deoptimization = false; |
| if (FLAG_allocation_site_pretenuring) { |
| int tenure_decisions = 0; |
| int dont_tenure_decisions = 0; |
| int allocation_mementos_found = 0; |
| int allocation_sites = 0; |
| int active_allocation_sites = 0; |
| |
| AllocationSite site; |
| |
| // Step 1: Digest feedback for recorded allocation sites. |
| bool maximum_size_scavenge = MaximumSizeScavenge(); |
| for (auto& site_and_count : global_pretenuring_feedback_) { |
| allocation_sites++; |
| site = site_and_count.first; |
| // Count is always access through the site. |
| DCHECK_EQ(0, site_and_count.second); |
| int found_count = site.memento_found_count(); |
| // An entry in the storage does not imply that the count is > 0 because |
| // allocation sites might have been reset due to too many objects dying |
| // in old space. |
| if (found_count > 0) { |
| DCHECK(site.IsAllocationSite()); |
| active_allocation_sites++; |
| allocation_mementos_found += found_count; |
| if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) { |
| trigger_deoptimization = true; |
| } |
| if (site.GetAllocationType() == AllocationType::kOld) { |
| tenure_decisions++; |
| } else { |
| dont_tenure_decisions++; |
| } |
| } |
| } |
| |
| // Step 2: Deopt maybe tenured allocation sites if necessary. |
| bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites(); |
| if (deopt_maybe_tenured) { |
| ForeachAllocationSite( |
| allocation_sites_list(), |
| [&allocation_sites, &trigger_deoptimization](AllocationSite site) { |
| DCHECK(site.IsAllocationSite()); |
| allocation_sites++; |
| if (site.IsMaybeTenure()) { |
| site.set_deopt_dependent_code(true); |
| trigger_deoptimization = true; |
| } |
| }); |
| } |
| |
| if (trigger_deoptimization) { |
| isolate_->stack_guard()->RequestDeoptMarkedAllocationSites(); |
| } |
| |
| if (FLAG_trace_pretenuring_statistics && |
| (allocation_mementos_found > 0 || tenure_decisions > 0 || |
| dont_tenure_decisions > 0)) { |
| PrintIsolate(isolate(), |
| "pretenuring: deopt_maybe_tenured=%d visited_sites=%d " |
| "active_sites=%d " |
| "mementos=%d tenured=%d not_tenured=%d\n", |
| deopt_maybe_tenured ? 1 : 0, allocation_sites, |
| active_allocation_sites, allocation_mementos_found, |
| tenure_decisions, dont_tenure_decisions); |
| } |
| |
| global_pretenuring_feedback_.clear(); |
| global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity); |
| } |
| } |
| |
| void Heap::InvalidateCodeDeoptimizationData(Code code) { |
| MemoryChunk* chunk = MemoryChunk::FromHeapObject(code); |
| CodePageMemoryModificationScope modification_scope(chunk); |
| code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array()); |
| } |
| |
| void Heap::DeoptMarkedAllocationSites() { |
| // TODO(hpayer): If iterating over the allocation sites list becomes a |
| // performance issue, use a cache data structure in heap instead. |
| |
| ForeachAllocationSite(allocation_sites_list(), [this](AllocationSite site) { |
| if (site.deopt_dependent_code()) { |
| site.dependent_code().MarkCodeForDeoptimization( |
| isolate_, DependentCode::kAllocationSiteTenuringChangedGroup); |
| site.set_deopt_dependent_code(false); |
| } |
| }); |
| |
| Deoptimizer::DeoptimizeMarkedCode(isolate_); |
| } |
| |
| |
| void Heap::GarbageCollectionEpilogue() { |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE); |
| if (Heap::ShouldZapGarbage() || FLAG_clear_free_memory) { |
| ZapFromSpace(); |
| } |
| |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| Verify(); |
| } |
| #endif |
| |
| AllowHeapAllocation for_the_rest_of_the_epilogue; |
| |
| #ifdef DEBUG |
| if (FLAG_print_global_handles) isolate_->global_handles()->Print(); |
| if (FLAG_print_handles) PrintHandles(); |
| if (FLAG_gc_verbose) Print(); |
| if (FLAG_code_stats) ReportCodeStatistics("After GC"); |
| if (FLAG_check_handle_count) CheckHandleCount(); |
| #endif |
| |
| UpdateMaximumCommitted(); |
| |
| isolate_->counters()->alive_after_last_gc()->Set( |
| static_cast<int>(SizeOfObjects())); |
| |
| isolate_->counters()->string_table_capacity()->Set(string_table().Capacity()); |
| isolate_->counters()->number_of_symbols()->Set( |
| string_table().NumberOfElements()); |
| |
| if (CommittedMemory() > 0) { |
| isolate_->counters()->external_fragmentation_total()->AddSample( |
| static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory())); |
| |
| isolate_->counters()->heap_sample_total_committed()->AddSample( |
| static_cast<int>(CommittedMemory() / KB)); |
| isolate_->counters()->heap_sample_total_used()->AddSample( |
| static_cast<int>(SizeOfObjects() / KB)); |
| isolate_->counters()->heap_sample_map_space_committed()->AddSample( |
| static_cast<int>(map_space()->CommittedMemory() / KB)); |
| isolate_->counters()->heap_sample_code_space_committed()->AddSample( |
| static_cast<int>(code_space()->CommittedMemory() / KB)); |
| |
| isolate_->counters()->heap_sample_maximum_committed()->AddSample( |
| static_cast<int>(MaximumCommittedMemory() / KB)); |
| } |
| |
| #define UPDATE_COUNTERS_FOR_SPACE(space) \ |
| isolate_->counters()->space##_bytes_available()->Set( \ |
| static_cast<int>(space()->Available())); \ |
| isolate_->counters()->space##_bytes_committed()->Set( \ |
| static_cast<int>(space()->CommittedMemory())); \ |
| isolate_->counters()->space##_bytes_used()->Set( \ |
| static_cast<int>(space()->SizeOfObjects())); |
| #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \ |
| if (space()->CommittedMemory() > 0) { \ |
| isolate_->counters()->external_fragmentation_##space()->AddSample( \ |
| static_cast<int>(100 - \ |
| (space()->SizeOfObjects() * 100.0) / \ |
| space()->CommittedMemory())); \ |
| } |
| #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \ |
| UPDATE_COUNTERS_FOR_SPACE(space) \ |
| UPDATE_FRAGMENTATION_FOR_SPACE(space) |
| |
| UPDATE_COUNTERS_FOR_SPACE(new_space) |
| UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space) |
| UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space) |
| UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space) |
| UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space) |
| #undef UPDATE_COUNTERS_FOR_SPACE |
| #undef UPDATE_FRAGMENTATION_FOR_SPACE |
| #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE |
| |
| #ifdef DEBUG |
| ReportStatisticsAfterGC(); |
| #endif // DEBUG |
| |
| last_gc_time_ = MonotonicallyIncreasingTimeInMs(); |
| |
| { |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE); |
| ReduceNewSpaceSize(); |
| } |
| |
| if (FLAG_harmony_weak_refs) { |
| // TODO(marja): (spec): The exact condition on when to schedule the cleanup |
| // task is unclear. This version schedules the cleanup task for a |
| // JSFinalizationGroup whenever the GC has discovered new dirty WeakCells |
| // for it (at that point it might have leftover dirty WeakCells since an |
| // earlier invocation of the cleanup function didn't iterate through |
| // them). See https://github.com/tc39/proposal-weakrefs/issues/34 |
| HandleScope handle_scope(isolate()); |
| while (!isolate()->heap()->dirty_js_finalization_groups().IsUndefined( |
| isolate())) { |
| // Enqueue one microtask per JSFinalizationGroup. |
| Handle<JSFinalizationGroup> finalization_group( |
| JSFinalizationGroup::cast( |
| isolate()->heap()->dirty_js_finalization_groups()), |
| isolate()); |
| isolate()->heap()->set_dirty_js_finalization_groups( |
| finalization_group->next()); |
| finalization_group->set_next(ReadOnlyRoots(isolate()).undefined_value()); |
| Handle<NativeContext> context(finalization_group->native_context(), |
| isolate()); |
| // GC has no native context, but we use the creation context of the |
| // JSFinalizationGroup for the EnqueueTask operation. This is consitent |
| // with the Promise implementation, assuming the JSFinalizationGroup's |
| // creation context is the "caller's context" in promise functions. An |
| // alternative would be to use the native context of the cleanup |
| // function. This difference shouldn't be observable from JavaScript, |
| // since we enter the native context of the cleanup function before |
| // calling it. TODO(marja): Revisit when the spec clarifies this. See also |
| // https://github.com/tc39/proposal-weakrefs/issues/38 . |
| Handle<FinalizationGroupCleanupJobTask> task = |
| isolate()->factory()->NewFinalizationGroupCleanupJobTask( |
| finalization_group); |
| MicrotaskQueue* microtask_queue = context->microtask_queue(); |
| if (microtask_queue) microtask_queue->EnqueueMicrotask(*task); |
| } |
| } |
| } |
| |
| class GCCallbacksScope { |
| public: |
| explicit GCCallbacksScope(Heap* heap) : heap_(heap) { |
| heap_->gc_callbacks_depth_++; |
| } |
| ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; } |
| |
| bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; } |
| |
| private: |
| Heap* heap_; |
| }; |
| |
| |
| void Heap::HandleGCRequest() { |
| if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) { |
| CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting); |
| stress_scavenge_observer_->RequestedGCDone(); |
| } else if (HighMemoryPressure()) { |
| incremental_marking()->reset_request_type(); |
| CheckMemoryPressure(); |
| } else if (incremental_marking()->request_type() == |
| IncrementalMarking::COMPLETE_MARKING) { |
| incremental_marking()->reset_request_type(); |
| CollectAllGarbage(current_gc_flags_, |
| GarbageCollectionReason::kFinalizeMarkingViaStackGuard, |
| current_gc_callback_flags_); |
| } else if (incremental_marking()->request_type() == |
| IncrementalMarking::FINALIZATION && |
| incremental_marking()->IsMarking() && |
| !incremental_marking()->finalize_marking_completed()) { |
| incremental_marking()->reset_request_type(); |
| FinalizeIncrementalMarkingIncrementally( |
| GarbageCollectionReason::kFinalizeMarkingViaStackGuard); |
| } |
| } |
| |
| |
| void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) { |
| DCHECK(FLAG_idle_time_scavenge); |
| DCHECK_NOT_NULL(scavenge_job_); |
| scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated); |
| } |
| |
| TimedHistogram* Heap::GCTypePriorityTimer(GarbageCollector collector) { |
| if (IsYoungGenerationCollector(collector)) { |
| if (isolate_->IsIsolateInBackground()) { |
| return isolate_->counters()->gc_scavenger_background(); |
| } |
| return isolate_->counters()->gc_scavenger_foreground(); |
| } else { |
| if (!incremental_marking()->IsStopped()) { |
| if (ShouldReduceMemory()) { |
| if (isolate_->IsIsolateInBackground()) { |
| return isolate_->counters()->gc_finalize_reduce_memory_background(); |
| } |
| return isolate_->counters()->gc_finalize_reduce_memory_foreground(); |
| } else { |
| if (isolate_->IsIsolateInBackground()) { |
| return isolate_->counters()->gc_finalize_background(); |
| } |
| return isolate_->counters()->gc_finalize_foreground(); |
| } |
| } else { |
| if (isolate_->IsIsolateInBackground()) { |
| return isolate_->counters()->gc_compactor_background(); |
| } |
| return isolate_->counters()->gc_compactor_foreground(); |
| } |
| } |
| } |
| |
| TimedHistogram* Heap::GCTypeTimer(GarbageCollector collector) { |
| if (IsYoungGenerationCollector(collector)) { |
| return isolate_->counters()->gc_scavenger(); |
| } else { |
| if (!incremental_marking()->IsStopped()) { |
| if (ShouldReduceMemory()) { |
| return isolate_->counters()->gc_finalize_reduce_memory(); |
| } else { |
| return isolate_->counters()->gc_finalize(); |
| } |
| } else { |
| return isolate_->counters()->gc_compactor(); |
| } |
| } |
| } |
| |
| void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason, |
| const v8::GCCallbackFlags gc_callback_flags) { |
| // Since we are ignoring the return value, the exact choice of space does |
| // not matter, so long as we do not specify NEW_SPACE, which would not |
| // cause a full GC. |
| set_current_gc_flags(flags); |
| CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags); |
| set_current_gc_flags(kNoGCFlags); |
| } |
| |
| namespace { |
| |
| intptr_t CompareWords(int size, HeapObject a, HeapObject b) { |
| int slots = size / kTaggedSize; |
| DCHECK_EQ(a.Size(), size); |
| DCHECK_EQ(b.Size(), size); |
| Tagged_t* slot_a = reinterpret_cast<Tagged_t*>(a.address()); |
| Tagged_t* slot_b = reinterpret_cast<Tagged_t*>(b.address()); |
| for (int i = 0; i < slots; i++) { |
| if (*slot_a != *slot_b) { |
| return *slot_a - *slot_b; |
| } |
| slot_a++; |
| slot_b++; |
| } |
| return 0; |
| } |
| |
| void ReportDuplicates(int size, std::vector<HeapObject>* objects) { |
| if (objects->size() == 0) return; |
| |
| sort(objects->begin(), objects->end(), [size](HeapObject a, HeapObject b) { |
| intptr_t c = CompareWords(size, a, b); |
| if (c != 0) return c < 0; |
| return a < b; |
| }); |
| |
| std::vector<std::pair<int, HeapObject>> duplicates; |
| HeapObject current = (*objects)[0]; |
| int count = 1; |
| for (size_t i = 1; i < objects->size(); i++) { |
| if (CompareWords(size, current, (*objects)[i]) == 0) { |
| count++; |
| } else { |
| if (count > 1) { |
| duplicates.push_back(std::make_pair(count - 1, current)); |
| } |
| count = 1; |
| current = (*objects)[i]; |
| } |
| } |
| if (count > 1) { |
| duplicates.push_back(std::make_pair(count - 1, current)); |
| } |
| |
| int threshold = FLAG_trace_duplicate_threshold_kb * KB; |
| |
| sort(duplicates.begin(), duplicates.end()); |
| for (auto it = duplicates.rbegin(); it != duplicates.rend(); ++it) { |
| int duplicate_bytes = it->first * size; |
| if (duplicate_bytes < threshold) break; |
| PrintF("%d duplicates of size %d each (%dKB)\n", it->first, size, |
| duplicate_bytes / KB); |
| PrintF("Sample object: "); |
| it->second.Print(); |
| PrintF("============================\n"); |
| } |
| } |
| } // anonymous namespace |
| |
| void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) { |
| // Since we are ignoring the return value, the exact choice of space does |
| // not matter, so long as we do not specify NEW_SPACE, which would not |
| // cause a full GC. |
| // Major GC would invoke weak handle callbacks on weakly reachable |
| // handles, but won't collect weakly reachable objects until next |
| // major GC. Therefore if we collect aggressively and weak handle callback |
| // has been invoked, we rerun major GC to release objects which become |
| // garbage. |
| // Note: as weak callbacks can execute arbitrary code, we cannot |
| // hope that eventually there will be no weak callbacks invocations. |
| // Therefore stop recollecting after several attempts. |
| if (gc_reason == GarbageCollectionReason::kLastResort) { |
| InvokeNearHeapLimitCallback(); |
| } |
| RuntimeCallTimerScope runtime_timer( |
| isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage); |
| |
| // The optimizing compiler may be unnecessarily holding on to memory. |
| isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock); |
| isolate()->ClearSerializerData(); |
| set_current_gc_flags(kReduceMemoryFootprintMask); |
| isolate_->compilation_cache()->Clear(); |
| const int kMaxNumberOfAttempts = 7; |
| const int kMinNumberOfAttempts = 2; |
| const v8::GCCallbackFlags callback_flags = |
| gc_reason == GarbageCollectionReason::kLowMemoryNotification |
| ? v8::kGCCallbackFlagForced |
| : v8::kGCCallbackFlagCollectAllAvailableGarbage; |
| for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { |
| if (!CollectGarbage(OLD_SPACE, gc_reason, callback_flags) && |
| attempt + 1 >= kMinNumberOfAttempts) { |
| break; |
| } |
| } |
| |
| set_current_gc_flags(kNoGCFlags); |
| new_space_->Shrink(); |
| new_lo_space_->SetCapacity(new_space_->Capacity() * |
| kNewLargeObjectSpaceToSemiSpaceRatio); |
| UncommitFromSpace(); |
| EagerlyFreeExternalMemory(); |
| |
| if (FLAG_trace_duplicate_threshold_kb) { |
| std::map<int, std::vector<HeapObject>> objects_by_size; |
| PagedSpaceIterator spaces(this); |
| for (PagedSpace* space = spaces.Next(); space != nullptr; |
| space = spaces.Next()) { |
| PagedSpaceObjectIterator it(space); |
| for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) { |
| objects_by_size[obj.Size()].push_back(obj); |
| } |
| } |
| { |
| LargeObjectSpaceObjectIterator it(lo_space()); |
| for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) { |
| objects_by_size[obj.Size()].push_back(obj); |
| } |
| } |
| for (auto it = objects_by_size.rbegin(); it != objects_by_size.rend(); |
| ++it) { |
| ReportDuplicates(it->first, &it->second); |
| } |
| } |
| } |
| |
| void Heap::PreciseCollectAllGarbage(int flags, |
| GarbageCollectionReason gc_reason, |
| const GCCallbackFlags gc_callback_flags) { |
| if (!incremental_marking()->IsStopped()) { |
| FinalizeIncrementalMarkingAtomically(gc_reason); |
| } |
| CollectAllGarbage(flags, gc_reason, gc_callback_flags); |
| } |
| |
| void Heap::ReportExternalMemoryPressure() { |
| const GCCallbackFlags kGCCallbackFlagsForExternalMemory = |
| static_cast<GCCallbackFlags>( |
| kGCCallbackFlagSynchronousPhantomCallbackProcessing | |
| kGCCallbackFlagCollectAllExternalMemory); |
| if (isolate()->isolate_data()->external_memory_ > |
| (isolate()->isolate_data()->external_memory_at_last_mark_compact_ + |
| external_memory_hard_limit())) { |
| CollectAllGarbage( |
| kReduceMemoryFootprintMask, |
| GarbageCollectionReason::kExternalMemoryPressure, |
| static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage | |
| kGCCallbackFlagsForExternalMemory)); |
| return; |
| } |
| if (incremental_marking()->IsStopped()) { |
| if (incremental_marking()->CanBeActivated()) { |
| StartIncrementalMarking(GCFlagsForIncrementalMarking(), |
| GarbageCollectionReason::kExternalMemoryPressure, |
| kGCCallbackFlagsForExternalMemory); |
| } else { |
| CollectAllGarbage(i::Heap::kNoGCFlags, |
| GarbageCollectionReason::kExternalMemoryPressure, |
| kGCCallbackFlagsForExternalMemory); |
| } |
| } else { |
| // Incremental marking is turned on an has already been started. |
| const double kMinStepSize = 5; |
| const double kMaxStepSize = 10; |
| const double ms_step = Min( |
| kMaxStepSize, |
| Max(kMinStepSize, |
| static_cast<double>(isolate()->isolate_data()->external_memory_) / |
| isolate()->isolate_data()->external_memory_limit_ * |
| kMinStepSize)); |
| const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step; |
| // Extend the gc callback flags with external memory flags. |
| current_gc_callback_flags_ = static_cast<GCCallbackFlags>( |
| current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory); |
| incremental_marking()->AdvanceWithDeadline( |
| deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8); |
| } |
| } |
| |
| void Heap::EnsureFillerObjectAtTop() { |
| // There may be an allocation memento behind objects in new space. Upon |
| // evacuation of a non-full new space (or if we are on the last page) there |
| // may be uninitialized memory behind top. We fill the remainder of the page |
| // with a filler. |
| Address to_top = new_space_->top(); |
| Page* page = Page::FromAddress(to_top - kTaggedSize); |
| if (page->Contains(to_top)) { |
| int remaining_in_page = static_cast<int>(page->area_end() - to_top); |
| CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo); |
| } |
| } |
| |
| bool Heap::CollectGarbage(AllocationSpace space, |
| GarbageCollectionReason gc_reason, |
| const v8::GCCallbackFlags gc_callback_flags) { |
| const char* collector_reason = nullptr; |
| GarbageCollector collector = SelectGarbageCollector(space, &collector_reason); |
| is_current_gc_forced_ = gc_callback_flags & v8::kGCCallbackFlagForced; |
| |
| if (!CanExpandOldGeneration(new_space()->Capacity() + |
| new_lo_space()->Size())) { |
| InvokeNearHeapLimitCallback(); |
| } |
| |
| // Ensure that all pending phantom callbacks are invoked. |
| isolate()->global_handles()->InvokeSecondPassPhantomCallbacks(); |
| |
| // The VM is in the GC state until exiting this function. |
| VMState<GC> state(isolate()); |
| |
| #ifdef V8_ENABLE_ALLOCATION_TIMEOUT |
| // Reset the allocation timeout, but make sure to allow at least a few |
| // allocations after a collection. The reason for this is that we have a lot |
| // of allocation sequences and we assume that a garbage collection will allow |
| // the subsequent allocation attempts to go through. |
| if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) { |
| allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_)); |
| } |
| #endif |
| |
| EnsureFillerObjectAtTop(); |
| |
| if (IsYoungGenerationCollector(collector) && |
| !incremental_marking()->IsStopped()) { |
| if (FLAG_trace_incremental_marking) { |
| isolate()->PrintWithTimestamp( |
| "[IncrementalMarking] Scavenge during marking.\n"); |
| } |
| } |
| |
| bool next_gc_likely_to_collect_more = false; |
| size_t committed_memory_before = 0; |
| |
| if (collector == MARK_COMPACTOR) { |
| committed_memory_before = CommittedOldGenerationMemory(); |
| } |
| |
| { |
| tracer()->Start(collector, gc_reason, collector_reason); |
| DCHECK(AllowHeapAllocation::IsAllowed()); |
| DisallowHeapAllocation no_allocation_during_gc; |
| GarbageCollectionPrologue(); |
| |
| { |
| TimedHistogram* gc_type_timer = GCTypeTimer(collector); |
| TimedHistogramScope histogram_timer_scope(gc_type_timer, isolate_); |
| TRACE_EVENT0("v8", gc_type_timer->name()); |
| |
| TimedHistogram* gc_type_priority_timer = GCTypePriorityTimer(collector); |
| OptionalTimedHistogramScopeMode mode = |
| isolate_->IsMemorySavingsModeActive() |
| ? OptionalTimedHistogramScopeMode::DONT_TAKE_TIME |
| : OptionalTimedHistogramScopeMode::TAKE_TIME; |
| OptionalTimedHistogramScope histogram_timer_priority_scope( |
| gc_type_priority_timer, isolate_, mode); |
| |
| next_gc_likely_to_collect_more = |
| PerformGarbageCollection(collector, gc_callback_flags); |
| if (collector == MARK_COMPACTOR || collector == SCAVENGER) { |
| tracer()->RecordGCPhasesHistograms(gc_type_timer); |
| } |
| } |
| |
| // Clear is_current_gc_forced now that the current GC is complete. Do this |
| // before GarbageCollectionEpilogue() since that could trigger another |
| // unforced GC. |
| is_current_gc_forced_ = false; |
| |
| GarbageCollectionEpilogue(); |
| if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { |
| isolate()->CheckDetachedContextsAfterGC(); |
| } |
| |
| if (collector == MARK_COMPACTOR) { |
| size_t committed_memory_after = CommittedOldGenerationMemory(); |
| size_t used_memory_after = OldGenerationSizeOfObjects(); |
| MemoryReducer::Event event; |
| event.type = MemoryReducer::kMarkCompact; |
| event.time_ms = MonotonicallyIncreasingTimeInMs(); |
| // Trigger one more GC if |
| // - this GC decreased committed memory, |
| // - there is high fragmentation, |
| // - there are live detached contexts. |
| event.next_gc_likely_to_collect_more = |
| (committed_memory_before > committed_memory_after + MB) || |
| HasHighFragmentation(used_memory_after, committed_memory_after) || |
| (detached_contexts().length() > 0); |
| event.committed_memory = committed_memory_after; |
| if (deserialization_complete_) { |
| memory_reducer_->NotifyMarkCompact(event); |
| } |
| if (initial_max_old_generation_size_ < max_old_generation_size_ && |
| used_memory_after < initial_max_old_generation_size_threshold_) { |
| max_old_generation_size_ = initial_max_old_generation_size_; |
| } |
| } |
| |
| tracer()->Stop(collector); |
| } |
| |
| if (collector == MARK_COMPACTOR && |
| (gc_callback_flags & (kGCCallbackFlagForced | |
| kGCCallbackFlagCollectAllAvailableGarbage)) != 0) { |
| isolate()->CountUsage(v8::Isolate::kForcedGC); |
| } |
| |
| // Start incremental marking for the next cycle. We do this only for scavenger |
| // to avoid a loop where mark-compact causes another mark-compact. |
| if (IsYoungGenerationCollector(collector)) { |
| StartIncrementalMarkingIfAllocationLimitIsReached( |
| GCFlagsForIncrementalMarking(), |
| kGCCallbackScheduleIdleGarbageCollection); |
| } |
| |
| return next_gc_likely_to_collect_more; |
| } |
| |
| |
| int Heap::NotifyContextDisposed(bool dependant_context) { |
| if (!dependant_context) { |
| tracer()->ResetSurvivalEvents(); |
| old_generation_size_configured_ = false; |
| old_generation_allocation_limit_ = initial_old_generation_size_; |
| MemoryReducer::Event event; |
| event.type = MemoryReducer::kPossibleGarbage; |
| event.time_ms = MonotonicallyIncreasingTimeInMs(); |
| memory_reducer_->NotifyPossibleGarbage(event); |
| } |
| isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock); |
| |
| number_of_disposed_maps_ = retained_maps().length(); |
| tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs()); |
| return ++contexts_disposed_; |
| } |
| |
| void Heap::StartIncrementalMarking(int gc_flags, |
| GarbageCollectionReason gc_reason, |
| GCCallbackFlags gc_callback_flags) { |
| DCHECK(incremental_marking()->IsStopped()); |
| set_current_gc_flags(gc_flags); |
| current_gc_callback_flags_ = gc_callback_flags; |
| incremental_marking()->Start(gc_reason); |
| } |
| |
| void Heap::StartIncrementalMarkingIfAllocationLimitIsReached( |
| int gc_flags, const GCCallbackFlags gc_callback_flags) { |
| if (incremental_marking()->IsStopped()) { |
| IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached(); |
| if (reached_limit == IncrementalMarkingLimit::kSoftLimit) { |
| incremental_marking()->incremental_marking_job()->ScheduleTask(this); |
| } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) { |
| StartIncrementalMarking( |
| gc_flags, |
| OldGenerationSpaceAvailable() <= new_space_->Capacity() |
| ? GarbageCollectionReason::kAllocationLimit |
| : GarbageCollectionReason::kGlobalAllocationLimit, |
| gc_callback_flags); |
| } |
| } |
| } |
| |
| void Heap::StartIdleIncrementalMarking( |
| GarbageCollectionReason gc_reason, |
| const GCCallbackFlags gc_callback_flags) { |
| StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason, |
| gc_callback_flags); |
| } |
| |
| void Heap::MoveRange(HeapObject dst_object, const ObjectSlot dst_slot, |
| const ObjectSlot src_slot, int len, |
| WriteBarrierMode mode) { |
| DCHECK_NE(len, 0); |
| DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map()); |
| const ObjectSlot dst_end(dst_slot + len); |
| // Ensure no range overflow. |
| DCHECK(dst_slot < dst_end); |
| DCHECK(src_slot < src_slot + len); |
| |
| if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) { |
| if (dst_slot < src_slot) { |
| // Copy tagged values forward using relaxed load/stores that do not |
| // involve value decompression. |
| const AtomicSlot atomic_dst_end(dst_end); |
| AtomicSlot dst(dst_slot); |
| AtomicSlot src(src_slot); |
| while (dst < atomic_dst_end) { |
| *dst = *src; |
| ++dst; |
| ++src; |
| } |
| } else { |
| // Copy tagged values backwards using relaxed load/stores that do not |
| // involve value decompression. |
| const AtomicSlot atomic_dst_begin(dst_slot); |
| AtomicSlot dst(dst_slot + len - 1); |
| AtomicSlot src(src_slot + len - 1); |
| while (dst >= atomic_dst_begin) { |
| *dst = *src; |
| --dst; |
| --src; |
| } |
| } |
| } else { |
| MemMove(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize); |
| } |
| if (mode == SKIP_WRITE_BARRIER) return; |
| WriteBarrierForRange(dst_object, dst_slot, dst_end); |
| } |
| |
| // Instantiate Heap::CopyRange() for ObjectSlot and MaybeObjectSlot. |
| template void Heap::CopyRange<ObjectSlot>(HeapObject dst_object, |
| ObjectSlot dst_slot, |
| ObjectSlot src_slot, int len, |
| WriteBarrierMode mode); |
| template void Heap::CopyRange<MaybeObjectSlot>(HeapObject dst_object, |
| MaybeObjectSlot dst_slot, |
| MaybeObjectSlot src_slot, |
| int len, WriteBarrierMode mode); |
| |
| template <typename TSlot> |
| void Heap::CopyRange(HeapObject dst_object, const TSlot dst_slot, |
| const TSlot src_slot, int len, WriteBarrierMode mode) { |
| DCHECK_NE(len, 0); |
| |
| DCHECK_NE(dst_object.map(), ReadOnlyRoots(this).fixed_cow_array_map()); |
| const TSlot dst_end(dst_slot + len); |
| // Ensure ranges do not overlap. |
| DCHECK(dst_end <= src_slot || (src_slot + len) <= dst_slot); |
| |
| if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) { |
| // Copy tagged values using relaxed load/stores that do not involve value |
| // decompression. |
| const AtomicSlot atomic_dst_end(dst_end); |
| AtomicSlot dst(dst_slot); |
| AtomicSlot src(src_slot); |
| while (dst < atomic_dst_end) { |
| *dst = *src; |
| ++dst; |
| ++src; |
| } |
| } else { |
| MemCopy(dst_slot.ToVoidPtr(), src_slot.ToVoidPtr(), len * kTaggedSize); |
| } |
| if (mode == SKIP_WRITE_BARRIER) return; |
| WriteBarrierForRange(dst_object, dst_slot, dst_end); |
| } |
| |
| #ifdef VERIFY_HEAP |
| // Helper class for verifying the string table. |
| class StringTableVerifier : public ObjectVisitor { |
| public: |
| explicit StringTableVerifier(Isolate* isolate) : isolate_(isolate) {} |
| |
| void VisitPointers(HeapObject host, ObjectSlot start, |
| ObjectSlot end) override { |
| // Visit all HeapObject pointers in [start, end). |
| for (ObjectSlot p = start; p < end; ++p) { |
| DCHECK(!HasWeakHeapObjectTag(*p)); |
| if ((*p).IsHeapObject()) { |
| HeapObject object = HeapObject::cast(*p); |
| // Check that the string is actually internalized. |
| CHECK(object.IsTheHole(isolate_) || object.IsUndefined(isolate_) || |
| object.IsInternalizedString()); |
| } |
| } |
| } |
| void VisitPointers(HeapObject host, MaybeObjectSlot start, |
| MaybeObjectSlot end) override { |
| UNREACHABLE(); |
| } |
| |
| void VisitCodeTarget(Code host, RelocInfo* rinfo) override { UNREACHABLE(); } |
| |
| void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override { |
| UNREACHABLE(); |
| } |
| |
| private: |
| Isolate* isolate_; |
| }; |
| |
| static void VerifyStringTable(Isolate* isolate) { |
| StringTableVerifier verifier(isolate); |
| isolate->heap()->string_table().IterateElements(&verifier); |
| } |
| #endif // VERIFY_HEAP |
| |
| bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) { |
| bool gc_performed = true; |
| int counter = 0; |
| static const int kThreshold = 20; |
| while (gc_performed && counter++ < kThreshold) { |
| gc_performed = false; |
| for (int space = FIRST_SPACE; |
| space < static_cast<int>(SnapshotSpace::kNumberOfHeapSpaces); |
| space++) { |
| Reservation* reservation = &reservations[space]; |
| DCHECK_LE(1, reservation->size()); |
| if (reservation->at(0).size == 0) { |
| DCHECK_EQ(1, reservation->size()); |
| continue; |
| } |
| bool perform_gc = false; |
| if (space == MAP_SPACE) { |
| // We allocate each map individually to avoid fragmentation. |
| maps->clear(); |
| DCHECK_LE(reservation->size(), 2); |
| int reserved_size = 0; |
| for (const Chunk& c : *reservation) reserved_size += c.size; |
| DCHECK_EQ(0, reserved_size % Map::kSize); |
| int num_maps = reserved_size / Map::kSize; |
| for (int i = 0; i < num_maps; i++) { |
| AllocationResult allocation = |
| map_space()->AllocateRawUnaligned(Map::kSize); |
| HeapObject free_space; |
| if (allocation.To(&free_space)) { |
| // Mark with a free list node, in case we have a GC before |
| // deserializing. |
| Address free_space_address = free_space.address(); |
| CreateFillerObjectAt(free_space_address, Map::kSize, |
| ClearRecordedSlots::kNo); |
| maps->push_back(free_space_address); |
| } else { |
| perform_gc = true; |
| break; |
| } |
| } |
| } else if (space == LO_SPACE) { |
| // Just check that we can allocate during deserialization. |
| DCHECK_LE(reservation->size(), 2); |
| int reserved_size = 0; |
| for (const Chunk& c : *reservation) reserved_size += c.size; |
| perform_gc = !CanExpandOldGeneration(reserved_size); |
| } else { |
| for (auto& chunk : *reservation) { |
| AllocationResult allocation; |
| int size = chunk.size; |
| DCHECK_LE(static_cast<size_t>(size), |
| MemoryChunkLayout::AllocatableMemoryInMemoryChunk( |
| static_cast<AllocationSpace>(space))); |
| if (space == NEW_SPACE) { |
| allocation = new_space()->AllocateRawUnaligned(size); |
| } else { |
| // The deserializer will update the skip list. |
| allocation = paged_space(space)->AllocateRawUnaligned(size); |
| } |
| HeapObject free_space; |
| if (allocation.To(&free_space)) { |
| // Mark with a free list node, in case we have a GC before |
| // deserializing. |
| Address free_space_address = free_space.address(); |
| CreateFillerObjectAt(free_space_address, size, |
| ClearRecordedSlots::kNo); |
| DCHECK(IsPreAllocatedSpace(static_cast<SnapshotSpace>(space))); |
| chunk.start = free_space_address; |
| chunk.end = free_space_address + size; |
| } else { |
| perform_gc = true; |
| break; |
| } |
| } |
| } |
| if (perform_gc) { |
| // We cannot perfom a GC with an uninitialized isolate. This check |
| // fails for example if the max old space size is chosen unwisely, |
| // so that we cannot allocate space to deserialize the initial heap. |
| if (!deserialization_complete_) { |
| V8::FatalProcessOutOfMemory( |
| isolate(), "insufficient memory to create an Isolate"); |
| } |
| if (space == NEW_SPACE) { |
| CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer); |
| } else { |
| if (counter > 1) { |
| CollectAllGarbage(kReduceMemoryFootprintMask, |
| GarbageCollectionReason::kDeserializer); |
| } else { |
| CollectAllGarbage(kNoGCFlags, |
| GarbageCollectionReason::kDeserializer); |
| } |
| } |
| gc_performed = true; |
| break; // Abort for-loop over spaces and retry. |
| } |
| } |
| } |
| |
| return !gc_performed; |
| } |
| |
| |
| void Heap::EnsureFromSpaceIsCommitted() { |
| if (new_space_->CommitFromSpaceIfNeeded()) return; |
| |
| // Committing memory to from space failed. |
| // Memory is exhausted and we will die. |
| FatalProcessOutOfMemory("Committing semi space failed."); |
| } |
| |
| |
| void Heap::UpdateSurvivalStatistics(int start_new_space_size) { |
| if (start_new_space_size == 0) return; |
| |
| promotion_ratio_ = (static_cast<double>(promoted_objects_size_) / |
| static_cast<double>(start_new_space_size) * 100); |
| |
| if (previous_semi_space_copied_object_size_ > 0) { |
| promotion_rate_ = |
| (static_cast<double>(promoted_objects_size_) / |
| static_cast<double>(previous_semi_space_copied_object_size_) * 100); |
| } else { |
| promotion_rate_ = 0; |
| } |
| |
| semi_space_copied_rate_ = |
| (static_cast<double>(semi_space_copied_object_size_) / |
| static_cast<double>(start_new_space_size) * 100); |
| |
| double survival_rate = promotion_ratio_ + semi_space_copied_rate_; |
| tracer()->AddSurvivalRatio(survival_rate); |
| } |
| |
| bool Heap::PerformGarbageCollection( |
| GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { |
| DisallowJavascriptExecution no_js(isolate()); |
| |
| size_t freed_global_handles = 0; |
| |
| if (!IsYoungGenerationCollector(collector)) { |
| PROFILE(isolate_, CodeMovingGCEvent()); |
| } |
| |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| VerifyStringTable(this->isolate()); |
| } |
| #endif |
| |
| GCType gc_type = |
| collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge; |
| |
| { |
| GCCallbacksScope scope(this); |
| // Temporary override any embedder stack state as callbacks may create their |
| // own state on the stack and recursively trigger GC. |
| EmbedderStackStateScope embedder_scope( |
| local_embedder_heap_tracer(), |
| EmbedderHeapTracer::EmbedderStackState::kUnknown); |
| if (scope.CheckReenter()) { |
| AllowHeapAllocation allow_allocation; |
| AllowJavascriptExecution allow_js(isolate()); |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE); |
| VMState<EXTERNAL> state(isolate_); |
| HandleScope handle_scope(isolate_); |
| CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags); |
| } |
| } |
| |
| EnsureFromSpaceIsCommitted(); |
| |
| size_t start_young_generation_size = |
| Heap::new_space()->Size() + new_lo_space()->SizeOfObjects(); |
| |
| { |
| Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get()); |
| |
| switch (collector) { |
| case MARK_COMPACTOR: |
| UpdateOldGenerationAllocationCounter(); |
| // Perform mark-sweep with optional compaction. |
| MarkCompact(); |
| old_generation_size_configured_ = true; |
| // This should be updated before PostGarbageCollectionProcessing, which |
| // can cause another GC. Take into account the objects promoted during |
| // GC. |
| old_generation_allocation_counter_at_last_gc_ += |
| static_cast<size_t>(promoted_objects_size_); |
| old_generation_size_at_last_gc_ = OldGenerationSizeOfObjects(); |
| break; |
| case MINOR_MARK_COMPACTOR: |
| MinorMarkCompact(); |
| break; |
| case SCAVENGER: |
| if ((fast_promotion_mode_ && |
| CanExpandOldGeneration(new_space()->Size() + |
| new_lo_space()->Size()))) { |
| tracer()->NotifyYoungGenerationHandling( |
| YoungGenerationHandling::kFastPromotionDuringScavenge); |
| EvacuateYoungGeneration(); |
| } else { |
| tracer()->NotifyYoungGenerationHandling( |
| YoungGenerationHandling::kRegularScavenge); |
| |
| Scavenge(); |
| } |
| break; |
| } |
| |
| ProcessPretenuringFeedback(); |
| } |
| |
| UpdateSurvivalStatistics(static_cast<int>(start_young_generation_size)); |
| ConfigureInitialOldGenerationSize(); |
| |
| if (collector != MARK_COMPACTOR) { |
| // Objects that died in the new space might have been accounted |
| // as bytes marked ahead of schedule by the incremental marker. |
| incremental_marking()->UpdateMarkedBytesAfterScavenge( |
| start_young_generation_size - SurvivedYoungObjectSize()); |
| } |
| |
| if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) { |
| ComputeFastPromotionMode(); |
| } |
| |
| isolate_->counters()->objs_since_last_young()->Set(0); |
| |
| { |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES); |
| // First round weak callbacks are not supposed to allocate and trigger |
| // nested GCs. |
| freed_global_handles = |
| isolate_->global_handles()->InvokeFirstPassWeakCallbacks(); |
| } |
| |
| if (collector == MARK_COMPACTOR) { |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EMBEDDER_TRACING_EPILOGUE); |
| // TraceEpilogue may trigger operations that invalidate global handles. It |
| // has to be called *after* all other operations that potentially touch and |
| // reset global handles. It is also still part of the main garbage |
| // collection pause and thus needs to be called *before* any operation that |
| // can potentially trigger recursive garbage |
| local_embedder_heap_tracer()->TraceEpilogue(); |
| } |
| |
| { |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES); |
| gc_post_processing_depth_++; |
| { |
| AllowHeapAllocation allow_allocation; |
| AllowJavascriptExecution allow_js(isolate()); |
| freed_global_handles += |
| isolate_->global_handles()->PostGarbageCollectionProcessing( |
| collector, gc_callback_flags); |
| } |
| gc_post_processing_depth_--; |
| } |
| |
| isolate_->eternal_handles()->PostGarbageCollectionProcessing(); |
| |
| // Update relocatables. |
| Relocatable::PostGarbageCollectionProcessing(isolate_); |
| |
| RecomputeLimits(collector); |
| |
| { |
| GCCallbacksScope scope(this); |
| if (scope.CheckReenter()) { |
| AllowHeapAllocation allow_allocation; |
| AllowJavascriptExecution allow_js(isolate()); |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE); |
| VMState<EXTERNAL> state(isolate_); |
| HandleScope handle_scope(isolate_); |
| CallGCEpilogueCallbacks(gc_type, gc_callback_flags); |
| } |
| } |
| |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| VerifyStringTable(this->isolate()); |
| } |
| #endif |
| |
| return freed_global_handles > 0; |
| } |
| |
| void Heap::RecomputeLimits(GarbageCollector collector) { |
| if (!((collector == MARK_COMPACTOR) || |
| (HasLowYoungGenerationAllocationRate() && |
| old_generation_size_configured_))) { |
| return; |
| } |
| |
| double v8_gc_speed = |
| tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); |
| double v8_mutator_speed = |
| tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond(); |
| double v8_growing_factor = MemoryController<V8HeapTrait>::GrowingFactor( |
| this, max_old_generation_size_, v8_gc_speed, v8_mutator_speed); |
| double global_growing_factor = 0; |
| if (UseGlobalMemoryScheduling()) { |
| DCHECK_NOT_NULL(local_embedder_heap_tracer()); |
| double embedder_gc_speed = tracer()->EmbedderSpeedInBytesPerMillisecond(); |
| double embedder_speed = |
| tracer()->CurrentEmbedderAllocationThroughputInBytesPerMillisecond(); |
| double embedder_growing_factor = |
| (embedder_gc_speed > 0 && embedder_speed > 0) |
| ? MemoryController<GlobalMemoryTrait>::GrowingFactor( |
| this, max_global_memory_size_, embedder_gc_speed, |
| embedder_speed) |
| : 0; |
| global_growing_factor = Max(v8_growing_factor, embedder_growing_factor); |
| } |
| |
| size_t old_gen_size = OldGenerationSizeOfObjects(); |
| size_t new_space_capacity = new_space()->Capacity(); |
| HeapGrowingMode mode = CurrentHeapGrowingMode(); |
| |
| if (collector == MARK_COMPACTOR) { |
| // Register the amount of external allocated memory. |
| isolate()->isolate_data()->external_memory_at_last_mark_compact_ = |
| isolate()->isolate_data()->external_memory_; |
| isolate()->isolate_data()->external_memory_limit_ = |
| isolate()->isolate_data()->external_memory_ + |
| kExternalAllocationSoftLimit; |
| |
| old_generation_allocation_limit_ = |
| MemoryController<V8HeapTrait>::CalculateAllocationLimit( |
| this, old_gen_size, min_old_generation_size_, |
| max_old_generation_size_, new_space_capacity, v8_growing_factor, |
| mode); |
| if (UseGlobalMemoryScheduling()) { |
| DCHECK_GT(global_growing_factor, 0); |
| global_allocation_limit_ = |
| MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit( |
| this, GlobalSizeOfObjects(), min_global_memory_size_, |
| max_global_memory_size_, new_space_capacity, |
| global_growing_factor, mode); |
| } |
| CheckIneffectiveMarkCompact( |
| old_gen_size, tracer()->AverageMarkCompactMutatorUtilization()); |
| } else if (HasLowYoungGenerationAllocationRate() && |
| old_generation_size_configured_) { |
| size_t new_old_generation_limit = |
| MemoryController<V8HeapTrait>::CalculateAllocationLimit( |
| this, old_gen_size, min_old_generation_size_, |
| max_old_generation_size_, new_space_capacity, v8_growing_factor, |
| mode); |
| if (new_old_generation_limit < old_generation_allocation_limit_) { |
| old_generation_allocation_limit_ = new_old_generation_limit; |
| } |
| if (UseGlobalMemoryScheduling()) { |
| DCHECK_GT(global_growing_factor, 0); |
| size_t new_global_limit = |
| MemoryController<GlobalMemoryTrait>::CalculateAllocationLimit( |
| this, GlobalSizeOfObjects(), min_global_memory_size_, |
| max_global_memory_size_, new_space_capacity, |
| global_growing_factor, mode); |
| if (new_global_limit < global_allocation_limit_) { |
| global_allocation_limit_ = new_global_limit; |
| } |
| } |
| } |
| } |
| |
| void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { |
| RuntimeCallTimerScope runtime_timer( |
| isolate(), RuntimeCallCounterId::kGCPrologueCallback); |
| for (const GCCallbackTuple& info : gc_prologue_callbacks_) { |
| if (gc_type & info.gc_type) { |
| v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); |
| info.callback(isolate, gc_type, flags, info.data); |
| } |
| } |
| } |
| |
| void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) { |
| RuntimeCallTimerScope runtime_timer( |
| isolate(), RuntimeCallCounterId::kGCEpilogueCallback); |
| for (const GCCallbackTuple& info : gc_epilogue_callbacks_) { |
| if (gc_type & info.gc_type) { |
| v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); |
| info.callback(isolate, gc_type, flags, info.data); |
| } |
| } |
| } |
| |
| |
| void Heap::MarkCompact() { |
| PauseAllocationObserversScope pause_observers(this); |
| |
| SetGCState(MARK_COMPACT); |
| |
| LOG(isolate_, ResourceEvent("markcompact", "begin")); |
| |
| uint64_t size_of_objects_before_gc = SizeOfObjects(); |
| |
| CodeSpaceMemoryModificationScope code_modifcation(this); |
| |
| mark_compact_collector()->Prepare(); |
| |
| ms_count_++; |
| |
| MarkCompactPrologue(); |
| |
| mark_compact_collector()->CollectGarbage(); |
| |
| LOG(isolate_, ResourceEvent("markcompact", "end")); |
| |
| MarkCompactEpilogue(); |
| |
| if (FLAG_allocation_site_pretenuring) { |
| EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc); |
| } |
| } |
| |
| void Heap::MinorMarkCompact() { |
| #ifdef ENABLE_MINOR_MC |
| DCHECK(FLAG_minor_mc); |
| |
| PauseAllocationObserversScope pause_observers(this); |
| SetGCState(MINOR_MARK_COMPACT); |
| LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin")); |
| |
| TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC); |
| AlwaysAllocateScope always_allocate(isolate()); |
| IncrementalMarking::PauseBlackAllocationScope pause_black_allocation( |
| incremental_marking()); |
| ConcurrentMarking::PauseScope pause_scope(concurrent_marking()); |
| |
| minor_mark_compact_collector()->CollectGarbage(); |
| |
| LOG(isolate_, ResourceEvent("MinorMarkCompact", "end")); |
| SetGCState(NOT_IN_GC); |
| #else |
| UNREACHABLE(); |
| #endif // ENABLE_MINOR_MC |
| } |
| |
| void Heap::MarkCompactEpilogue() { |
| TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE); |
| SetGCState(NOT_IN_GC); |
| |
| isolate_->counters()->objs_since_last_full()->Set(0); |
| |
| incremental_marking()->Epilogue(); |
| |
| DCHECK(incremental_marking()->IsStopped()); |
| } |
| |
| |
| void Heap::MarkCompactPrologue() { |
| TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE); |
| isolate_->descriptor_lookup_cache()->Clear(); |
| RegExpResultsCache::Clear(string_split_cache()); |
| RegExpResultsCache::Clear(regexp_multiple_cache()); |
| |
| isolate_->compilation_cache()->MarkCompactPrologue(); |
| |
| FlushNumberStringCache(); |
| } |
| |
| |
| void Heap::CheckNewSpaceExpansionCriteria() { |
| if (FLAG_experimental_new_space_growth_heuristic) { |
| if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() && |
| survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) { |
| // Grow the size of new space if there is room to grow, and more than 10% |
| // have survived the last scavenge. |
| new_space_->Grow(); |
| survived_since_last_expansion_ = 0; |
| } |
| } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() && |
| survived_since_last_expansion_ > new_space_->TotalCapacity()) { |
| // Grow the size of new space if there is room to grow, and enough data |
| // has survived scavenge since the last expansion. |
| new_space_->Grow(); |
| survived_since_last_expansion_ = 0; |
| } |
| new_lo_space()->SetCapacity(new_space()->Capacity()); |
| } |
| |
| void Heap::EvacuateYoungGeneration() { |
| TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE); |
| base::MutexGuard guard(relocation_mutex()); |
| ConcurrentMarking::PauseScope pause_scope(concurrent_marking()); |
| if (!FLAG_concurrent_marking) { |
| DCHECK(fast_promotion_mode_); |
| DCHECK( |
| CanExpandOldGeneration(new_space()->Size() + new_lo_space()->Size())); |
| } |
| |
| mark_compact_collector()->sweeper()->EnsureIterabilityCompleted(); |
| |
| SetGCState(SCAVENGE); |
| LOG(isolate_, ResourceEvent("scavenge", "begin")); |
| |
| // Move pages from new->old generation. |
| PageRange range(new_space()->first_allocatable_address(), new_space()->top()); |
| for (auto it = range.begin(); it != range.end();) { |
| Page* p = (*++it)->prev_page(); |
| new_space()->from_space().RemovePage(p); |
| Page::ConvertNewToOld(p); |
| if (incremental_marking()->IsMarking()) |
| mark_compact_collector()->RecordLiveSlotsOnPage(p); |
| } |
| |
| // Reset new space. |
| if (!new_space()->Rebalance()) { |
| FatalProcessOutOfMemory("NewSpace::Rebalance"); |
| } |
| new_space()->ResetLinearAllocationArea(); |
| new_space()->set_age_mark(new_space()->top()); |
| |
| for (auto it = new_lo_space()->begin(); it != new_lo_space()->end();) { |
| LargePage* page = *it; |
| // Increment has to happen after we save the page, because it is going to |
| // be removed below. |
| it++; |
| lo_space()->PromoteNewLargeObject(page); |
| } |
| |
| // Fix up special trackers. |
| external_string_table_.PromoteYoung(); |
| // GlobalHandles are updated in PostGarbageCollectonProcessing |
| |
| size_t promoted = new_space()->Size() + new_lo_space()->Size(); |
| IncrementYoungSurvivorsCounter(promoted); |
| IncrementPromotedObjectsSize(promoted); |
| IncrementSemiSpaceCopiedObjectSize(0); |
| |
| LOG(isolate_, ResourceEvent("scavenge", "end")); |
| SetGCState(NOT_IN_GC); |
| } |
| |
| void Heap::Scavenge() { |
| TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE); |
| base::MutexGuard guard(relocation_mutex()); |
| ConcurrentMarking::PauseScope pause_scope(concurrent_marking()); |
| // There are soft limits in the allocation code, designed to trigger a mark |
| // sweep collection by failing allocations. There is no sense in trying to |
| // trigger one during scavenge: scavenges allocation should always succeed. |
| AlwaysAllocateScope scope(isolate()); |
| |
| // Bump-pointer allocations done during scavenge are not real allocations. |
| // Pause the inline allocation steps. |
| PauseAllocationObserversScope pause_observers(this); |
| IncrementalMarking::PauseBlackAllocationScope pause_black_allocation( |
| incremental_marking()); |
| |
| |
| mark_compact_collector()->sweeper()->EnsureIterabilityCompleted(); |
| |
| SetGCState(SCAVENGE); |
| |
| // Flip the semispaces. After flipping, to space is empty, from space has |
| // live objects. |
| new_space()->Flip(); |
| new_space()->ResetLinearAllocationArea(); |
| |
| // We also flip the young generation large object space. All large objects |
| // will be in the from space. |
| new_lo_space()->Flip(); |
| new_lo_space()->ResetPendingObject(); |
| |
| // Implements Cheney's copying algorithm |
| LOG(isolate_, ResourceEvent("scavenge", "begin")); |
| |
| scavenger_collector_->CollectGarbage(); |
| |
| LOG(isolate_, ResourceEvent("scavenge", "end")); |
| |
| SetGCState(NOT_IN_GC); |
| } |
| |
| void Heap::ComputeFastPromotionMode() { |
| const size_t survived_in_new_space = |
| survived_last_scavenge_ * 100 / new_space_->Capacity(); |
| fast_promotion_mode_ = |
| !FLAG_optimize_for_size && FLAG_fast_promotion_new_space && |
| !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() && |
| survived_in_new_space >= kMinPromotedPercentForFastPromotionMode; |
| if (FLAG_trace_gc_verbose && !FLAG_trace_gc_ignore_scavenger) { |
| PrintIsolate(isolate(), "Fast promotion mode: %s survival rate: %zu%%\n", |
| fast_promotion_mode_ ? "true" : "false", |
| survived_in_new_space); |
| } |
| } |
| |
| void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk) { |
| if (unprotected_memory_chunks_registry_enabled_) { |
| base::MutexGuard guard(&unprotected_memory_chunks_mutex_); |
| if (unprotected_memory_chunks_.insert(chunk).second) { |
| chunk->SetReadAndWritable(); |
| } |
| } |
| } |
| |
| void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object) { |
| UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object)); |
| } |
| |
| void Heap::UnregisterUnprotectedMemoryChunk(MemoryChunk* chunk) { |
| unprotected_memory_chunks_.erase(chunk); |
| } |
| |
| void Heap::ProtectUnprotectedMemoryChunks() { |
| DCHECK(unprotected_memory_chunks_registry_enabled_); |
| for (auto chunk = unprotected_memory_chunks_.begin(); |
| chunk != unprotected_memory_chunks_.end(); chunk++) { |
| CHECK(memory_allocator()->IsMemoryChunkExecutable(*chunk)); |
| (*chunk)->SetDefaultCodePermissions(); |
| } |
| unprotected_memory_chunks_.clear(); |
| } |
| |
| bool Heap::ExternalStringTable::Contains(String string) { |
| for (size_t i = 0; i < young_strings_.size(); ++i) { |
| if (young_strings_[i] == string) return true; |
| } |
| for (size_t i = 0; i < old_strings_.size(); ++i) { |
| if (old_strings_[i] == string) return true; |
| } |
| return false; |
| } |
| |
| void Heap::UpdateExternalString(String string, size_t old_payload, |
| size_t new_payload) { |
| DCHECK(string.IsExternalString()); |
| Page* page = Page::FromHeapObject(string); |
| |
| if (old_payload > new_payload) { |
| page->DecrementExternalBackingStoreBytes( |
| ExternalBackingStoreType::kExternalString, old_payload - new_payload); |
| } else { |
| page->IncrementExternalBackingStoreBytes( |
| ExternalBackingStoreType::kExternalString, new_payload - old_payload); |
| } |
| } |
| |
| String Heap::UpdateYoungReferenceInExternalStringTableEntry(Heap* heap, |
| FullObjectSlot p) { |
| HeapObject obj = HeapObject::cast(*p); |
| MapWord first_word = obj.map_word(); |
| |
| String new_string; |
| |
| if (InFromPage(obj)) { |
| if (!first_word.IsForwardingAddress()) { |
| // Unreachable external string can be finalized. |
| String string = String::cast(obj); |
| if (!string.IsExternalString()) { |
| // Original external string has been internalized. |
| DCHECK(string.IsThinString()); |
| return String(); |
| } |
| heap->FinalizeExternalString(string); |
| return String(); |
| } |
| new_string = String::cast(first_word.ToForwardingAddress()); |
| } else { |
| new_string = String::cast(obj); |
| } |
| |
| // String is still reachable. |
| if (new_string.IsThinString()) { |
| // Filtering Thin strings out of the external string table. |
| return String(); |
| } else if (new_string.IsExternalString()) { |
| MemoryChunk::MoveExternalBackingStoreBytes( |
| ExternalBackingStoreType::kExternalString, |
| Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string), |
| ExternalString::cast(new_string).ExternalPayloadSize()); |
| return new_string; |
| } |
| |
| // Internalization can replace external strings with non-external strings. |
| return new_string.IsExternalString() ? new_string : String(); |
| } |
| |
| void Heap::ExternalStringTable::VerifyYoung() { |
| #ifdef DEBUG |
| std::set<String> visited_map; |
| std::map<MemoryChunk*, size_t> size_map; |
| ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString; |
| for (size_t i = 0; i < young_strings_.size(); ++i) { |
| String obj = String::cast(young_strings_[i]); |
| MemoryChunk* mc = MemoryChunk::FromHeapObject(obj); |
| DCHECK(mc->InYoungGeneration()); |
| DCHECK(heap_->InYoungGeneration(obj)); |
| DCHECK(!obj.IsTheHole(heap_->isolate())); |
| DCHECK(obj.IsExternalString()); |
| // Note: we can have repeated elements in the table. |
| DCHECK_EQ(0, visited_map.count(obj)); |
| visited_map.insert(obj); |
| size_map[mc] += ExternalString::cast(obj).ExternalPayloadSize(); |
| } |
| for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin(); |
| it != size_map.end(); it++) |
| DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second); |
| #endif |
| } |
| |
| void Heap::ExternalStringTable::Verify() { |
| #ifdef DEBUG |
| std::set<String> visited_map; |
| std::map<MemoryChunk*, size_t> size_map; |
| ExternalBackingStoreType type = ExternalBackingStoreType::kExternalString; |
| VerifyYoung(); |
| for (size_t i = 0; i < old_strings_.size(); ++i) { |
| String obj = String::cast(old_strings_[i]); |
| MemoryChunk* mc = MemoryChunk::FromHeapObject(obj); |
| DCHECK(!mc->InYoungGeneration()); |
| DCHECK(!heap_->InYoungGeneration(obj)); |
| DCHECK(!obj.IsTheHole(heap_->isolate())); |
| DCHECK(obj.IsExternalString()); |
| // Note: we can have repeated elements in the table. |
| DCHECK_EQ(0, visited_map.count(obj)); |
| visited_map.insert(obj); |
| size_map[mc] += ExternalString::cast(obj).ExternalPayloadSize(); |
| } |
| for (std::map<MemoryChunk*, size_t>::iterator it = size_map.begin(); |
| it != size_map.end(); it++) |
| DCHECK_EQ(it->first->ExternalBackingStoreBytes(type), it->second); |
| #endif |
| } |
| |
| void Heap::ExternalStringTable::UpdateYoungReferences( |
| Heap::ExternalStringTableUpdaterCallback updater_func) { |
| if (young_strings_.empty()) return; |
| |
| FullObjectSlot start(&young_strings_[0]); |
| FullObjectSlot end(&young_strings_[young_strings_.size()]); |
| FullObjectSlot last = start; |
| |
| for (FullObjectSlot p = start; p < end; ++p) { |
| String target = updater_func(heap_, p); |
| |
| if (target.is_null()) continue; |
| |
| DCHECK(target.IsExternalString()); |
| |
| if (InYoungGeneration(target)) { |
| // String is still in new space. Update the table entry. |
| last.store(target); |
| ++last; |
| } else { |
| // String got promoted. Move it to the old string list. |
| old_strings_.push_back(target); |
| } |
| } |
| |
| DCHECK(last <= end); |
| young_strings_.resize(last - start); |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| VerifyYoung(); |
| } |
| #endif |
| } |
| |
| void Heap::ExternalStringTable::PromoteYoung() { |
| old_strings_.reserve(old_strings_.size() + young_strings_.size()); |
| std::move(std::begin(young_strings_), std::end(young_strings_), |
| std::back_inserter(old_strings_)); |
| young_strings_.clear(); |
| } |
| |
| void Heap::ExternalStringTable::IterateYoung(RootVisitor* v) { |
| if (!young_strings_.empty()) { |
| v->VisitRootPointers( |
| Root::kExternalStringsTable, nullptr, |
| FullObjectSlot(young_strings_.data()), |
| FullObjectSlot(young_strings_.data() + young_strings_.size())); |
| } |
| } |
| |
| void Heap::ExternalStringTable::IterateAll(RootVisitor* v) { |
| IterateYoung(v); |
| if (!old_strings_.empty()) { |
| v->VisitRootPointers( |
| Root::kExternalStringsTable, nullptr, |
| FullObjectSlot(old_strings_.data()), |
| FullObjectSlot(old_strings_.data() + old_strings_.size())); |
| } |
| } |
| |
| void Heap::UpdateYoungReferencesInExternalStringTable( |
| ExternalStringTableUpdaterCallback updater_func) { |
| external_string_table_.UpdateYoungReferences(updater_func); |
| } |
| |
| void Heap::ExternalStringTable::UpdateReferences( |
| Heap::ExternalStringTableUpdaterCallback updater_func) { |
| if (old_strings_.size() > 0) { |
| FullObjectSlot start(old_strings_.data()); |
| FullObjectSlot end(old_strings_.data() + old_strings_.size()); |
| for (FullObjectSlot p = start; p < end; ++p) |
| p.store(updater_func(heap_, p)); |
| } |
| |
| UpdateYoungReferences(updater_func); |
| } |
| |
| void Heap::UpdateReferencesInExternalStringTable( |
| ExternalStringTableUpdaterCallback updater_func) { |
| external_string_table_.UpdateReferences(updater_func); |
| } |
| |
| |
| void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) { |
| ProcessNativeContexts(retainer); |
| ProcessAllocationSites(retainer); |
| } |
| |
| |
| void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) { |
| ProcessNativeContexts(retainer); |
| } |
| |
| |
| void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) { |
| Object head = VisitWeakList<Context>(this, native_contexts_list(), retainer); |
| // Update the head of the list of contexts. |
| set_native_contexts_list(head); |
| } |
| |
| |
| void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) { |
| Object allocation_site_obj = |
| VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer); |
| set_allocation_sites_list(allocation_site_obj); |
| } |
| |
| void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) { |
| set_native_contexts_list(retainer->RetainAs(native_contexts_list())); |
| set_allocation_sites_list(retainer->RetainAs(allocation_sites_list())); |
| } |
| |
| void Heap::ForeachAllocationSite( |
| Object list, const std::function<void(AllocationSite)>& visitor) { |
| DisallowHeapAllocation disallow_heap_allocation; |
| Object current = list; |
| while (current.IsAllocationSite()) { |
| AllocationSite site = AllocationSite::cast(current); |
| visitor(site); |
| Object current_nested = site.nested_site(); |
| while (current_nested.IsAllocationSite()) { |
| AllocationSite nested_site = AllocationSite::cast(current_nested); |
| visitor(nested_site); |
| current_nested = nested_site.nested_site(); |
| } |
| current = site.weak_next(); |
| } |
| } |
| |
| void Heap::ResetAllAllocationSitesDependentCode(AllocationType allocation) { |
| DisallowHeapAllocation no_allocation_scope; |
| bool marked = false; |
| |
| ForeachAllocationSite(allocation_sites_list(), |
| [&marked, allocation, this](AllocationSite site) { |
| if (site.GetAllocationType() == allocation) { |
| site.ResetPretenureDecision(); |
| site.set_deopt_dependent_code(true); |
| marked = true; |
| RemoveAllocationSitePretenuringFeedback(site); |
| return; |
| } |
| }); |
| if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites(); |
| } |
| |
| void Heap::EvaluateOldSpaceLocalPretenuring( |
| uint64_t size_of_objects_before_gc) { |
| uint64_t size_of_objects_after_gc = SizeOfObjects(); |
| double old_generation_survival_rate = |
| (static_cast<double>(size_of_objects_after_gc) * 100) / |
| static_cast<double>(size_of_objects_before_gc); |
| |
| if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) { |
| // Too many objects died in the old generation, pretenuring of wrong |
| // allocation sites may be the cause for that. We have to deopt all |
| // dependent code registered in the allocation sites to re-evaluate |
| // our pretenuring decisions. |
| ResetAllAllocationSitesDependentCode(AllocationType::kOld); |
| if (FLAG_trace_pretenuring) { |
| PrintF( |
| "Deopt all allocation sites dependent code due to low survival " |
| "rate in the old generation %f\n", |
| old_generation_survival_rate); |
| } |
| } |
| } |
| |
| |
| void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { |
| DisallowHeapAllocation no_allocation; |
| // All external strings are listed in the external string table. |
| |
| class ExternalStringTableVisitorAdapter : public RootVisitor { |
| public: |
| explicit ExternalStringTableVisitorAdapter( |
| Isolate* isolate, v8::ExternalResourceVisitor* visitor) |
| : isolate_(isolate), visitor_(visitor) {} |
| void VisitRootPointers(Root root, const char* description, |
| FullObjectSlot start, FullObjectSlot end) override { |
| for (FullObjectSlot p = start; p < end; ++p) { |
| DCHECK((*p).IsExternalString()); |
| visitor_->VisitExternalString( |
| Utils::ToLocal(Handle<String>(String::cast(*p), isolate_))); |
| } |
| } |
| |
| private: |
| Isolate* isolate_; |
| v8::ExternalResourceVisitor* visitor_; |
| } external_string_table_visitor(isolate(), visitor); |
| |
| external_string_table_.IterateAll(&external_string_table_visitor); |
| } |
| |
| STATIC_ASSERT(IsAligned(FixedDoubleArray::kHeaderSize, kDoubleAlignment)); |
| |
| #ifdef V8_COMPRESS_POINTERS |
| // TODO(ishell, v8:8875): When pointer compression is enabled the kHeaderSize |
| // is only kTaggedSize aligned but we can keep using unaligned access since |
| // both x64 and arm64 architectures (where pointer compression supported) |
| // allow unaligned access to doubles. |
| STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kTaggedSize)); |
| #else |
| STATIC_ASSERT(IsAligned(ByteArray::kHeaderSize, kDoubleAlignment)); |
| #endif |
| |
| #ifdef V8_HOST_ARCH_32_BIT |
| // NOLINTNEXTLINE(runtime/references) (false positive) |
| STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) == kTaggedSize); |
| #endif |
| |
| |
| int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) { |
| switch (alignment) { |
| case kWordAligned: |
| return 0; |
| case kDoubleAligned: |
| case kDoubleUnaligned: |
| return kDoubleSize - kTaggedSize; |
| default: |
| UNREACHABLE(); |
| } |
| return 0; |
| } |
| |
| |
| int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) { |
| if (alignment == kDoubleAligned && (address & kDoubleAlignmentMask) != 0) |
| return kTaggedSize; |
| if (alignment == kDoubleUnaligned && (address & kDoubleAlignmentMask) == 0) |
| return kDoubleSize - kTaggedSize; // No fill if double is always aligned. |
| return 0; |
| } |
| |
| size_t Heap::GetCodeRangeReservedAreaSize() { |
| return kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize(); |
| } |
| |
| HeapObject Heap::PrecedeWithFiller(HeapObject object, int filler_size) { |
| CreateFillerObjectAt(object.address(), filler_size, ClearRecordedSlots::kNo); |
| return HeapObject::FromAddress(object.address() + filler_size); |
| } |
| |
| HeapObject Heap::AlignWithFiller(HeapObject object, int object_size, |
| int allocation_size, |
| AllocationAlignment alignment) { |
| int filler_size = allocation_size - object_size; |
| DCHECK_LT(0, filler_size); |
| int pre_filler = GetFillToAlign(object.address(), alignment); |
| if (pre_filler) { |
| object = PrecedeWithFiller(object, pre_filler); |
| filler_size -= pre_filler; |
| } |
| if (filler_size) { |
| CreateFillerObjectAt(object.address() + object_size, filler_size, |
| ClearRecordedSlots::kNo); |
| } |
| return object; |
| } |
| |
| void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) { |
| ArrayBufferTracker::RegisterNew(this, buffer); |
| } |
| |
| void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) { |
| ArrayBufferTracker::Unregister(this, buffer); |
| } |
| |
| void Heap::ConfigureInitialOldGenerationSize() { |
| if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) { |
| const size_t minimum_growing_step = |
| MemoryController<V8HeapTrait>::MinimumAllocationLimitGrowingStep( |
| CurrentHeapGrowingMode()); |
| const size_t new_old_generation_allocation_limit = |
| Max(OldGenerationSizeOfObjects() + minimum_growing_step, |
| static_cast<size_t>( |
| static_cast<double>(old_generation_allocation_limit_) * |
| (tracer()->AverageSurvivalRatio() / 100))); |
| if (new_old_generation_allocation_limit < |
| old_generation_allocation_limit_) { |
| old_generation_allocation_limit_ = new_old_generation_allocation_limit; |
| } else { |
| old_generation_size_configured_ = true; |
| } |
| if (UseGlobalMemoryScheduling()) { |
| const size_t new_global_memory_limit = Max( |
| GlobalSizeOfObjects() + minimum_growing_step, |
| static_cast<size_t>(static_cast<double>(global_allocation_limit_) * |
| (tracer()->AverageSurvivalRatio() / 100))); |
| if (new_global_memory_limit < global_allocation_limit_) { |
| global_allocation_limit_ = new_global_memory_limit; |
| } |
| } |
| } |
| } |
| |
| void Heap::FlushNumberStringCache() { |
| // Flush the number to string cache. |
| int len = number_string_cache().length(); |
| for (int i = 0; i < len; i++) { |
| number_string_cache().set_undefined(i); |
| } |
| } |
| |
| HeapObject Heap::CreateFillerObjectAt(Address addr, int size, |
| ClearRecordedSlots clear_slots_mode, |
| ClearFreedMemoryMode clear_memory_mode) { |
| if (size == 0) return HeapObject(); |
| HeapObject filler = HeapObject::FromAddress(addr); |
| if (size == kTaggedSize) { |
| filler.set_map_after_allocation( |
| Map::unchecked_cast(isolate()->root(RootIndex::kOnePointerFillerMap)), |
| SKIP_WRITE_BARRIER); |
| } else if (size == 2 * kTaggedSize) { |
| filler.set_map_after_allocation( |
| Map::unchecked_cast(isolate()->root(RootIndex::kTwoPointerFillerMap)), |
| SKIP_WRITE_BARRIER); |
| if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) { |
| Memory<Tagged_t>(addr + kTaggedSize) = |
| static_cast<Tagged_t>(kClearedFreeMemoryValue); |
| } |
| } else { |
| DCHECK_GT(size, 2 * kTaggedSize); |
| filler.set_map_after_allocation( |
| Map::unchecked_cast(isolate()->root(RootIndex::kFreeSpaceMap)), |
| SKIP_WRITE_BARRIER); |
| FreeSpace::cast(filler).relaxed_write_size(size); |
| if (clear_memory_mode == ClearFreedMemoryMode::kClearFreedMemory) { |
| MemsetTagged(ObjectSlot(addr) + 2, Object(kClearedFreeMemoryValue), |
| (size / kTaggedSize) - 2); |
| } |
| } |
| if (clear_slots_mode == ClearRecordedSlots::kYes) { |
| ClearRecordedSlotRange(addr, addr + size); |
| } |
| |
| // At this point, we may be deserializing the heap from a snapshot, and |
| // none of the maps have been created yet and are nullptr. |
| DCHECK((filler.map_slot().contains_value(kNullAddress) && |
| !deserialization_complete_) || |
| filler.map().IsMap()); |
| return filler; |
| } |
| |
| bool Heap::CanMoveObjectStart(HeapObject object) { |
| if (!FLAG_move_object_start) return false; |
|