| // Copyright 2012 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/heap/heap.h" |
| |
| #include <unordered_map> |
| #include <unordered_set> |
| |
| #include "src/accessors.h" |
| #include "src/api.h" |
| #include "src/assembler-inl.h" |
| #include "src/ast/context-slot-cache.h" |
| #include "src/base/bits.h" |
| #include "src/base/once.h" |
| #include "src/base/utils/random-number-generator.h" |
| #include "src/bootstrapper.h" |
| #include "src/code-stubs.h" |
| #include "src/compilation-cache.h" |
| #include "src/compiler-dispatcher/optimizing-compile-dispatcher.h" |
| #include "src/conversions.h" |
| #include "src/debug/debug.h" |
| #include "src/deoptimizer.h" |
| #include "src/feedback-vector.h" |
| #include "src/global-handles.h" |
| #include "src/heap/array-buffer-collector.h" |
| #include "src/heap/array-buffer-tracker-inl.h" |
| #include "src/heap/barrier.h" |
| #include "src/heap/code-stats.h" |
| #include "src/heap/concurrent-marking.h" |
| #include "src/heap/embedder-tracing.h" |
| #include "src/heap/gc-idle-time-handler.h" |
| #include "src/heap/gc-tracer.h" |
| #include "src/heap/incremental-marking.h" |
| #include "src/heap/item-parallel-job.h" |
| #include "src/heap/mark-compact-inl.h" |
| #include "src/heap/mark-compact.h" |
| #include "src/heap/memory-reducer.h" |
| #include "src/heap/object-stats.h" |
| #include "src/heap/objects-visiting-inl.h" |
| #include "src/heap/objects-visiting.h" |
| #include "src/heap/remembered-set.h" |
| #include "src/heap/scavenge-job.h" |
| #include "src/heap/scavenger-inl.h" |
| #include "src/heap/store-buffer.h" |
| #include "src/heap/stress-marking-observer.h" |
| #include "src/heap/stress-scavenge-observer.h" |
| #include "src/heap/sweeper.h" |
| #include "src/interpreter/interpreter.h" |
| #include "src/objects/data-handler.h" |
| #include "src/objects/shared-function-info.h" |
| #include "src/regexp/jsregexp.h" |
| #include "src/runtime-profiler.h" |
| #include "src/snapshot/natives.h" |
| #include "src/snapshot/serializer-common.h" |
| #include "src/snapshot/snapshot.h" |
| #include "src/tracing/trace-event.h" |
| #include "src/trap-handler/trap-handler.h" |
| #include "src/unicode-inl.h" |
| #include "src/utils-inl.h" |
| #include "src/utils.h" |
| #include "src/v8.h" |
| #include "src/vm-state-inl.h" |
| |
| // Has to be the last include (doesn't have include guards): |
| #include "src/objects/object-macros.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| void Heap::SetArgumentsAdaptorDeoptPCOffset(int pc_offset) { |
| DCHECK_EQ(Smi::kZero, arguments_adaptor_deopt_pc_offset()); |
| set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset)); |
| } |
| |
| void Heap::SetConstructStubCreateDeoptPCOffset(int pc_offset) { |
| // TODO(tebbi): Remove second half of DCHECK once |
| // FLAG_harmony_restrict_constructor_return is gone. |
| DCHECK(construct_stub_create_deopt_pc_offset() == Smi::kZero || |
| construct_stub_create_deopt_pc_offset() == Smi::FromInt(pc_offset)); |
| set_construct_stub_create_deopt_pc_offset(Smi::FromInt(pc_offset)); |
| } |
| |
| void Heap::SetConstructStubInvokeDeoptPCOffset(int pc_offset) { |
| // TODO(tebbi): Remove second half of DCHECK once |
| // FLAG_harmony_restrict_constructor_return is gone. |
| DCHECK(construct_stub_invoke_deopt_pc_offset() == Smi::kZero || |
| construct_stub_invoke_deopt_pc_offset() == Smi::FromInt(pc_offset)); |
| set_construct_stub_invoke_deopt_pc_offset(Smi::FromInt(pc_offset)); |
| } |
| |
| void Heap::SetInterpreterEntryReturnPCOffset(int pc_offset) { |
| DCHECK_EQ(Smi::kZero, interpreter_entry_return_pc_offset()); |
| set_interpreter_entry_return_pc_offset(Smi::FromInt(pc_offset)); |
| } |
| |
| void Heap::SetSerializedObjects(FixedArray* objects) { |
| DCHECK(isolate()->serializer_enabled()); |
| set_serialized_objects(objects); |
| } |
| |
| void Heap::SetSerializedGlobalProxySizes(FixedArray* sizes) { |
| DCHECK(isolate()->serializer_enabled()); |
| set_serialized_global_proxy_sizes(sizes); |
| } |
| |
| bool Heap::GCCallbackTuple::operator==( |
| const Heap::GCCallbackTuple& other) const { |
| return other.callback == callback && other.data == data; |
| } |
| |
| Heap::GCCallbackTuple& Heap::GCCallbackTuple::operator=( |
| const Heap::GCCallbackTuple& other) { |
| callback = other.callback; |
| gc_type = other.gc_type; |
| data = other.data; |
| return *this; |
| } |
| |
| struct Heap::StrongRootsList { |
| Object** start; |
| Object** end; |
| StrongRootsList* next; |
| }; |
| |
| class IdleScavengeObserver : public AllocationObserver { |
| public: |
| IdleScavengeObserver(Heap& heap, intptr_t step_size) |
| : AllocationObserver(step_size), heap_(heap) {} |
| |
| void Step(int bytes_allocated, Address, size_t) override { |
| heap_.ScheduleIdleScavengeIfNeeded(bytes_allocated); |
| } |
| |
| private: |
| Heap& heap_; |
| }; |
| |
| Heap::Heap() |
| : external_memory_(0), |
| external_memory_limit_(kExternalAllocationSoftLimit), |
| external_memory_at_last_mark_compact_(0), |
| isolate_(nullptr), |
| code_range_size_(0), |
| // semispace_size_ should be a power of 2 and old_generation_size_ should |
| // be a multiple of Page::kPageSize. |
| max_semi_space_size_(8 * (kPointerSize / 4) * MB), |
| initial_semispace_size_(kMinSemiSpaceSizeInKB * KB), |
| max_old_generation_size_(700ul * (kPointerSize / 4) * MB), |
| initial_max_old_generation_size_(max_old_generation_size_), |
| initial_old_generation_size_(max_old_generation_size_ / |
| kInitalOldGenerationLimitFactor), |
| old_generation_size_configured_(false), |
| // Variables set based on semispace_size_ and old_generation_size_ in |
| // ConfigureHeap. |
| // Will be 4 * reserved_semispace_size_ to ensure that young |
| // generation can be aligned to its size. |
| maximum_committed_(0), |
| survived_since_last_expansion_(0), |
| survived_last_scavenge_(0), |
| always_allocate_scope_count_(0), |
| memory_pressure_level_(MemoryPressureLevel::kNone), |
| out_of_memory_callback_(nullptr), |
| out_of_memory_callback_data_(nullptr), |
| contexts_disposed_(0), |
| number_of_disposed_maps_(0), |
| new_space_(nullptr), |
| old_space_(nullptr), |
| code_space_(nullptr), |
| map_space_(nullptr), |
| lo_space_(nullptr), |
| write_protect_code_memory_(false), |
| code_space_memory_modification_scope_depth_(0), |
| gc_state_(NOT_IN_GC), |
| gc_post_processing_depth_(0), |
| allocations_count_(0), |
| raw_allocations_hash_(0), |
| stress_marking_observer_(nullptr), |
| stress_scavenge_observer_(nullptr), |
| max_marking_limit_reached_(0.0), |
| ms_count_(0), |
| gc_count_(0), |
| mmap_region_base_(0), |
| remembered_unmapped_pages_index_(0), |
| old_generation_allocation_limit_(initial_old_generation_size_), |
| inline_allocation_disabled_(false), |
| tracer_(nullptr), |
| promoted_objects_size_(0), |
| promotion_ratio_(0), |
| semi_space_copied_object_size_(0), |
| previous_semi_space_copied_object_size_(0), |
| semi_space_copied_rate_(0), |
| nodes_died_in_new_space_(0), |
| nodes_copied_in_new_space_(0), |
| nodes_promoted_(0), |
| maximum_size_scavenges_(0), |
| last_idle_notification_time_(0.0), |
| last_gc_time_(0.0), |
| mark_compact_collector_(nullptr), |
| minor_mark_compact_collector_(nullptr), |
| array_buffer_collector_(nullptr), |
| memory_allocator_(nullptr), |
| store_buffer_(nullptr), |
| incremental_marking_(nullptr), |
| concurrent_marking_(nullptr), |
| gc_idle_time_handler_(nullptr), |
| memory_reducer_(nullptr), |
| live_object_stats_(nullptr), |
| dead_object_stats_(nullptr), |
| scavenge_job_(nullptr), |
| parallel_scavenge_semaphore_(0), |
| idle_scavenge_observer_(nullptr), |
| new_space_allocation_counter_(0), |
| old_generation_allocation_counter_at_last_gc_(0), |
| old_generation_size_at_last_gc_(0), |
| global_pretenuring_feedback_(kInitialFeedbackCapacity), |
| is_marking_flag_(false), |
| ring_buffer_full_(false), |
| ring_buffer_end_(0), |
| configured_(false), |
| current_gc_flags_(Heap::kNoGCFlags), |
| current_gc_callback_flags_(GCCallbackFlags::kNoGCCallbackFlags), |
| external_string_table_(this), |
| gc_callbacks_depth_(0), |
| deserialization_complete_(false), |
| strong_roots_list_(nullptr), |
| heap_iterator_depth_(0), |
| local_embedder_heap_tracer_(nullptr), |
| fast_promotion_mode_(false), |
| use_tasks_(true), |
| force_oom_(false), |
| delay_sweeper_tasks_for_testing_(false), |
| pending_layout_change_object_(nullptr) |
| #ifdef V8_ENABLE_ALLOCATION_TIMEOUT |
| , |
| allocation_timeout_(0) |
| #endif // V8_ENABLE_ALLOCATION_TIMEOUT |
| { |
| // Ensure old_generation_size_ is a multiple of kPageSize. |
| DCHECK_EQ(0, max_old_generation_size_ & (Page::kPageSize - 1)); |
| |
| memset(roots_, 0, sizeof(roots_[0]) * kRootListLength); |
| set_native_contexts_list(nullptr); |
| set_allocation_sites_list(Smi::kZero); |
| set_encountered_weak_collections(Smi::kZero); |
| // Put a dummy entry in the remembered pages so we can find the list the |
| // minidump even if there are no real unmapped pages. |
| RememberUnmappedPage(nullptr, false); |
| } |
| |
| size_t Heap::Capacity() { |
| if (!HasBeenSetUp()) return 0; |
| |
| return new_space_->Capacity() + OldGenerationCapacity(); |
| } |
| |
| size_t Heap::OldGenerationCapacity() { |
| if (!HasBeenSetUp()) return 0; |
| return old_space_->Capacity() + code_space_->Capacity() + |
| map_space_->Capacity() + lo_space_->SizeOfObjects(); |
| } |
| |
| size_t Heap::CommittedOldGenerationMemory() { |
| if (!HasBeenSetUp()) return 0; |
| |
| return old_space_->CommittedMemory() + code_space_->CommittedMemory() + |
| map_space_->CommittedMemory() + lo_space_->Size(); |
| } |
| |
| size_t Heap::CommittedMemory() { |
| if (!HasBeenSetUp()) return 0; |
| |
| return new_space_->CommittedMemory() + CommittedOldGenerationMemory(); |
| } |
| |
| |
| size_t Heap::CommittedPhysicalMemory() { |
| if (!HasBeenSetUp()) return 0; |
| |
| return new_space_->CommittedPhysicalMemory() + |
| old_space_->CommittedPhysicalMemory() + |
| code_space_->CommittedPhysicalMemory() + |
| map_space_->CommittedPhysicalMemory() + |
| lo_space_->CommittedPhysicalMemory(); |
| } |
| |
| size_t Heap::CommittedMemoryExecutable() { |
| if (!HasBeenSetUp()) return 0; |
| |
| return static_cast<size_t>(memory_allocator()->SizeExecutable()); |
| } |
| |
| |
| void Heap::UpdateMaximumCommitted() { |
| if (!HasBeenSetUp()) return; |
| |
| const size_t current_committed_memory = CommittedMemory(); |
| if (current_committed_memory > maximum_committed_) { |
| maximum_committed_ = current_committed_memory; |
| } |
| } |
| |
| size_t Heap::Available() { |
| if (!HasBeenSetUp()) return 0; |
| |
| size_t total = 0; |
| |
| for (SpaceIterator it(this); it.has_next();) { |
| total += it.next()->Available(); |
| } |
| return total; |
| } |
| |
| bool Heap::CanExpandOldGeneration(size_t size) { |
| if (force_oom_) return false; |
| if (OldGenerationCapacity() + size > MaxOldGenerationSize()) return false; |
| // The OldGenerationCapacity does not account compaction spaces used |
| // during evacuation. Ensure that expanding the old generation does push |
| // the total allocated memory size over the maximum heap size. |
| return memory_allocator()->Size() + size <= MaxReserved(); |
| } |
| |
| bool Heap::HasBeenSetUp() { |
| return old_space_ != nullptr && code_space_ != nullptr && |
| map_space_ != nullptr && lo_space_ != nullptr; |
| } |
| |
| |
| GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space, |
| const char** reason) { |
| // Is global GC requested? |
| if (space != NEW_SPACE) { |
| isolate_->counters()->gc_compactor_caused_by_request()->Increment(); |
| *reason = "GC in old space requested"; |
| return MARK_COMPACTOR; |
| } |
| |
| if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) { |
| *reason = "GC in old space forced by flags"; |
| return MARK_COMPACTOR; |
| } |
| |
| if (incremental_marking()->NeedsFinalization() && |
| AllocationLimitOvershotByLargeMargin()) { |
| *reason = "Incremental marking needs finalization"; |
| return MARK_COMPACTOR; |
| } |
| |
| // Over-estimate the new space size using capacity to allow some slack. |
| if (!CanExpandOldGeneration(new_space_->TotalCapacity())) { |
| isolate_->counters() |
| ->gc_compactor_caused_by_oldspace_exhaustion() |
| ->Increment(); |
| *reason = "scavenge might not succeed"; |
| return MARK_COMPACTOR; |
| } |
| |
| // Default |
| *reason = nullptr; |
| return YoungGenerationCollector(); |
| } |
| |
| void Heap::SetGCState(HeapState state) { |
| gc_state_ = state; |
| } |
| |
| void Heap::PrintShortHeapStatistics() { |
| if (!FLAG_trace_gc_verbose) return; |
| PrintIsolate(isolate_, "Memory allocator, used: %6" PRIuS |
| " KB," |
| " available: %6" PRIuS " KB\n", |
| memory_allocator()->Size() / KB, |
| memory_allocator()->Available() / KB); |
| PrintIsolate(isolate_, "New space, used: %6" PRIuS |
| " KB" |
| ", available: %6" PRIuS |
| " KB" |
| ", committed: %6" PRIuS " KB\n", |
| new_space_->Size() / KB, new_space_->Available() / KB, |
| new_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, "Old space, used: %6" PRIuS |
| " KB" |
| ", available: %6" PRIuS |
| " KB" |
| ", committed: %6" PRIuS " KB\n", |
| old_space_->SizeOfObjects() / KB, old_space_->Available() / KB, |
| old_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, "Code space, used: %6" PRIuS |
| " KB" |
| ", available: %6" PRIuS |
| " KB" |
| ", committed: %6" PRIuS "KB\n", |
| code_space_->SizeOfObjects() / KB, code_space_->Available() / KB, |
| code_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, "Map space, used: %6" PRIuS |
| " KB" |
| ", available: %6" PRIuS |
| " KB" |
| ", committed: %6" PRIuS " KB\n", |
| map_space_->SizeOfObjects() / KB, map_space_->Available() / KB, |
| map_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, "Large object space, used: %6" PRIuS |
| " KB" |
| ", available: %6" PRIuS |
| " KB" |
| ", committed: %6" PRIuS " KB\n", |
| lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB, |
| lo_space_->CommittedMemory() / KB); |
| PrintIsolate(isolate_, "All spaces, used: %6" PRIuS |
| " KB" |
| ", available: %6" PRIuS |
| " KB" |
| ", committed: %6" PRIuS "KB\n", |
| this->SizeOfObjects() / KB, this->Available() / KB, |
| this->CommittedMemory() / KB); |
| PrintIsolate(isolate_, "External memory reported: %6" PRId64 " KB\n", |
| external_memory_ / KB); |
| PrintIsolate(isolate_, "External memory global %zu KB\n", |
| external_memory_callback_() / KB); |
| PrintIsolate(isolate_, "Total time spent in GC : %.1f ms\n", |
| total_gc_time_ms_); |
| } |
| |
| void Heap::ReportStatisticsAfterGC() { |
| for (int i = 0; i < static_cast<int>(v8::Isolate::kUseCounterFeatureCount); |
| ++i) { |
| int count = deferred_counters_[i]; |
| deferred_counters_[i] = 0; |
| while (count > 0) { |
| count--; |
| isolate()->CountUsage(static_cast<v8::Isolate::UseCounterFeature>(i)); |
| } |
| } |
| } |
| |
| void Heap::AddRetainingPathTarget(Handle<HeapObject> object, |
| RetainingPathOption option) { |
| if (!FLAG_track_retaining_path) { |
| PrintF("Retaining path tracking requires --trace-retaining-path\n"); |
| } else { |
| int index = 0; |
| Handle<WeakFixedArray> array = WeakFixedArray::Add( |
| handle(retaining_path_targets(), isolate()), object, &index); |
| set_retaining_path_targets(*array); |
| retaining_path_target_option_[index] = option; |
| } |
| } |
| |
| bool Heap::IsRetainingPathTarget(HeapObject* object, |
| RetainingPathOption* option) { |
| if (!retaining_path_targets()->IsWeakFixedArray()) return false; |
| WeakFixedArray* targets = WeakFixedArray::cast(retaining_path_targets()); |
| int length = targets->Length(); |
| for (int i = 0; i < length; i++) { |
| if (targets->Get(i) == object) { |
| DCHECK(retaining_path_target_option_.count(i)); |
| *option = retaining_path_target_option_[i]; |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| namespace { |
| const char* RootToString(Root root) { |
| switch (root) { |
| #define ROOT_CASE(root_id, ignore, description) \ |
| case Root::root_id: \ |
| return description; |
| ROOT_ID_LIST(ROOT_CASE) |
| #undef ROOT_CASE |
| case Root::kCodeFlusher: |
| return "(Code flusher)"; |
| case Root::kPartialSnapshotCache: |
| return "(Partial snapshot cache)"; |
| case Root::kWeakCollections: |
| return "(Weak collections)"; |
| case Root::kWrapperTracing: |
| return "(Wrapper tracing)"; |
| case Root::kUnknown: |
| return "(Unknown)"; |
| } |
| UNREACHABLE(); |
| return nullptr; |
| } |
| } // namespace |
| |
| void Heap::PrintRetainingPath(HeapObject* target, RetainingPathOption option) { |
| PrintF("\n\n\n"); |
| PrintF("#################################################\n"); |
| PrintF("Retaining path for %p:\n", static_cast<void*>(target)); |
| HeapObject* object = target; |
| std::vector<std::pair<HeapObject*, bool>> retaining_path; |
| Root root = Root::kUnknown; |
| bool ephemeral = false; |
| while (true) { |
| retaining_path.push_back(std::make_pair(object, ephemeral)); |
| if (option == RetainingPathOption::kTrackEphemeralPath && |
| ephemeral_retainer_.count(object)) { |
| object = ephemeral_retainer_[object]; |
| ephemeral = true; |
| } else if (retainer_.count(object)) { |
| object = retainer_[object]; |
| ephemeral = false; |
| } else { |
| if (retaining_root_.count(object)) { |
| root = retaining_root_[object]; |
| } |
| break; |
| } |
| } |
| int distance = static_cast<int>(retaining_path.size()); |
| for (auto node : retaining_path) { |
| HeapObject* object = node.first; |
| bool ephemeral = node.second; |
| PrintF("\n"); |
| PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"); |
| PrintF("Distance from root %d%s: ", distance, |
| ephemeral ? " (ephemeral)" : ""); |
| object->ShortPrint(); |
| PrintF("\n"); |
| #ifdef OBJECT_PRINT |
| object->Print(); |
| PrintF("\n"); |
| #endif |
| --distance; |
| } |
| PrintF("\n"); |
| PrintF("^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n"); |
| PrintF("Root: %s\n", RootToString(root)); |
| PrintF("-------------------------------------------------\n"); |
| } |
| |
| void Heap::AddRetainer(HeapObject* retainer, HeapObject* object) { |
| if (retainer_.count(object)) return; |
| retainer_[object] = retainer; |
| RetainingPathOption option = RetainingPathOption::kDefault; |
| if (IsRetainingPathTarget(object, &option)) { |
| // Check if the retaining path was already printed in |
| // AddEphemeralRetainer(). |
| if (ephemeral_retainer_.count(object) == 0 || |
| option == RetainingPathOption::kDefault) { |
| PrintRetainingPath(object, option); |
| } |
| } |
| } |
| |
| void Heap::AddEphemeralRetainer(HeapObject* retainer, HeapObject* object) { |
| if (ephemeral_retainer_.count(object)) return; |
| ephemeral_retainer_[object] = retainer; |
| RetainingPathOption option = RetainingPathOption::kDefault; |
| if (IsRetainingPathTarget(object, &option) && |
| option == RetainingPathOption::kTrackEphemeralPath) { |
| // Check if the retaining path was already printed in AddRetainer(). |
| if (retainer_.count(object) == 0) { |
| PrintRetainingPath(object, option); |
| } |
| } |
| } |
| |
| void Heap::AddRetainingRoot(Root root, HeapObject* object) { |
| if (retaining_root_.count(object)) return; |
| retaining_root_[object] = root; |
| RetainingPathOption option = RetainingPathOption::kDefault; |
| if (IsRetainingPathTarget(object, &option)) { |
| PrintRetainingPath(object, option); |
| } |
| } |
| |
| void Heap::IncrementDeferredCount(v8::Isolate::UseCounterFeature feature) { |
| deferred_counters_[feature]++; |
| } |
| |
| bool Heap::UncommitFromSpace() { return new_space_->UncommitFromSpace(); } |
| |
| void Heap::GarbageCollectionPrologue() { |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_PROLOGUE); |
| { |
| AllowHeapAllocation for_the_first_part_of_prologue; |
| gc_count_++; |
| |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| Verify(); |
| } |
| #endif |
| } |
| |
| // Reset GC statistics. |
| promoted_objects_size_ = 0; |
| previous_semi_space_copied_object_size_ = semi_space_copied_object_size_; |
| semi_space_copied_object_size_ = 0; |
| nodes_died_in_new_space_ = 0; |
| nodes_copied_in_new_space_ = 0; |
| nodes_promoted_ = 0; |
| |
| UpdateMaximumCommitted(); |
| |
| #ifdef DEBUG |
| DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC); |
| |
| if (FLAG_gc_verbose) Print(); |
| #endif // DEBUG |
| |
| if (new_space_->IsAtMaximumCapacity()) { |
| maximum_size_scavenges_++; |
| } else { |
| maximum_size_scavenges_ = 0; |
| } |
| CheckNewSpaceExpansionCriteria(); |
| UpdateNewSpaceAllocationCounter(); |
| if (FLAG_track_retaining_path) { |
| retainer_.clear(); |
| ephemeral_retainer_.clear(); |
| retaining_root_.clear(); |
| } |
| } |
| |
| size_t Heap::SizeOfObjects() { |
| size_t total = 0; |
| |
| for (SpaceIterator it(this); it.has_next();) { |
| total += it.next()->SizeOfObjects(); |
| } |
| return total; |
| } |
| |
| |
| const char* Heap::GetSpaceName(int idx) { |
| switch (idx) { |
| case NEW_SPACE: |
| return "new_space"; |
| case OLD_SPACE: |
| return "old_space"; |
| case MAP_SPACE: |
| return "map_space"; |
| case CODE_SPACE: |
| return "code_space"; |
| case LO_SPACE: |
| return "large_object_space"; |
| default: |
| UNREACHABLE(); |
| } |
| return nullptr; |
| } |
| |
| void Heap::SetRootCodeStubs(NumberDictionary* value) { |
| roots_[kCodeStubsRootIndex] = value; |
| } |
| |
| void Heap::RepairFreeListsAfterDeserialization() { |
| PagedSpaces spaces(this); |
| for (PagedSpace* space = spaces.next(); space != nullptr; |
| space = spaces.next()) { |
| space->RepairFreeListsAfterDeserialization(); |
| } |
| } |
| |
| void Heap::MergeAllocationSitePretenuringFeedback( |
| const PretenuringFeedbackMap& local_pretenuring_feedback) { |
| AllocationSite* site = nullptr; |
| for (auto& site_and_count : local_pretenuring_feedback) { |
| site = site_and_count.first; |
| MapWord map_word = site_and_count.first->map_word(); |
| if (map_word.IsForwardingAddress()) { |
| site = AllocationSite::cast(map_word.ToForwardingAddress()); |
| } |
| |
| // We have not validated the allocation site yet, since we have not |
| // dereferenced the site during collecting information. |
| // This is an inlined check of AllocationMemento::IsValid. |
| if (!site->IsAllocationSite() || site->IsZombie()) continue; |
| |
| const int value = static_cast<int>(site_and_count.second); |
| DCHECK_LT(0, value); |
| if (site->IncrementMementoFoundCount(value)) { |
| // For sites in the global map the count is accessed through the site. |
| global_pretenuring_feedback_.insert(std::make_pair(site, 0)); |
| } |
| } |
| } |
| |
| void Heap::AddAllocationObserversToAllSpaces( |
| AllocationObserver* observer, AllocationObserver* new_space_observer) { |
| DCHECK(observer && new_space_observer); |
| |
| for (SpaceIterator it(this); it.has_next();) { |
| Space* space = it.next(); |
| if (space == new_space()) { |
| space->AddAllocationObserver(new_space_observer); |
| } else { |
| space->AddAllocationObserver(observer); |
| } |
| } |
| } |
| |
| void Heap::RemoveAllocationObserversFromAllSpaces( |
| AllocationObserver* observer, AllocationObserver* new_space_observer) { |
| DCHECK(observer && new_space_observer); |
| |
| for (SpaceIterator it(this); it.has_next();) { |
| Space* space = it.next(); |
| if (space == new_space()) { |
| space->RemoveAllocationObserver(new_space_observer); |
| } else { |
| space->RemoveAllocationObserver(observer); |
| } |
| } |
| } |
| |
| class Heap::SkipStoreBufferScope { |
| public: |
| explicit SkipStoreBufferScope(StoreBuffer* store_buffer) |
| : store_buffer_(store_buffer) { |
| store_buffer_->MoveAllEntriesToRememberedSet(); |
| store_buffer_->SetMode(StoreBuffer::IN_GC); |
| } |
| |
| ~SkipStoreBufferScope() { |
| DCHECK(store_buffer_->Empty()); |
| store_buffer_->SetMode(StoreBuffer::NOT_IN_GC); |
| } |
| |
| private: |
| StoreBuffer* store_buffer_; |
| }; |
| |
| namespace { |
| inline bool MakePretenureDecision( |
| AllocationSite* site, AllocationSite::PretenureDecision current_decision, |
| double ratio, bool maximum_size_scavenge) { |
| // Here we just allow state transitions from undecided or maybe tenure |
| // to don't tenure, maybe tenure, or tenure. |
| if ((current_decision == AllocationSite::kUndecided || |
| current_decision == AllocationSite::kMaybeTenure)) { |
| if (ratio >= AllocationSite::kPretenureRatio) { |
| // We just transition into tenure state when the semi-space was at |
| // maximum capacity. |
| if (maximum_size_scavenge) { |
| site->set_deopt_dependent_code(true); |
| site->set_pretenure_decision(AllocationSite::kTenure); |
| // Currently we just need to deopt when we make a state transition to |
| // tenure. |
| return true; |
| } |
| site->set_pretenure_decision(AllocationSite::kMaybeTenure); |
| } else { |
| site->set_pretenure_decision(AllocationSite::kDontTenure); |
| } |
| } |
| return false; |
| } |
| |
| inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite* site, |
| bool maximum_size_scavenge) { |
| bool deopt = false; |
| int create_count = site->memento_create_count(); |
| int found_count = site->memento_found_count(); |
| bool minimum_mementos_created = |
| create_count >= AllocationSite::kPretenureMinimumCreated; |
| double ratio = minimum_mementos_created || FLAG_trace_pretenuring_statistics |
| ? static_cast<double>(found_count) / create_count |
| : 0.0; |
| AllocationSite::PretenureDecision current_decision = |
| site->pretenure_decision(); |
| |
| if (minimum_mementos_created) { |
| deopt = MakePretenureDecision(site, current_decision, ratio, |
| maximum_size_scavenge); |
| } |
| |
| if (FLAG_trace_pretenuring_statistics) { |
| PrintIsolate(isolate, |
| "pretenuring: AllocationSite(%p): (created, found, ratio) " |
| "(%d, %d, %f) %s => %s\n", |
| static_cast<void*>(site), create_count, found_count, ratio, |
| site->PretenureDecisionName(current_decision), |
| site->PretenureDecisionName(site->pretenure_decision())); |
| } |
| |
| // Clear feedback calculation fields until the next gc. |
| site->set_memento_found_count(0); |
| site->set_memento_create_count(0); |
| return deopt; |
| } |
| } // namespace |
| |
| void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) { |
| global_pretenuring_feedback_.erase(site); |
| } |
| |
| bool Heap::DeoptMaybeTenuredAllocationSites() { |
| return new_space_->IsAtMaximumCapacity() && maximum_size_scavenges_ == 0; |
| } |
| |
| void Heap::ProcessPretenuringFeedback() { |
| bool trigger_deoptimization = false; |
| if (FLAG_allocation_site_pretenuring) { |
| int tenure_decisions = 0; |
| int dont_tenure_decisions = 0; |
| int allocation_mementos_found = 0; |
| int allocation_sites = 0; |
| int active_allocation_sites = 0; |
| |
| AllocationSite* site = nullptr; |
| |
| // Step 1: Digest feedback for recorded allocation sites. |
| bool maximum_size_scavenge = MaximumSizeScavenge(); |
| for (auto& site_and_count : global_pretenuring_feedback_) { |
| allocation_sites++; |
| site = site_and_count.first; |
| // Count is always access through the site. |
| DCHECK_EQ(0, site_and_count.second); |
| int found_count = site->memento_found_count(); |
| // An entry in the storage does not imply that the count is > 0 because |
| // allocation sites might have been reset due to too many objects dying |
| // in old space. |
| if (found_count > 0) { |
| DCHECK(site->IsAllocationSite()); |
| active_allocation_sites++; |
| allocation_mementos_found += found_count; |
| if (DigestPretenuringFeedback(isolate_, site, maximum_size_scavenge)) { |
| trigger_deoptimization = true; |
| } |
| if (site->GetPretenureMode() == TENURED) { |
| tenure_decisions++; |
| } else { |
| dont_tenure_decisions++; |
| } |
| } |
| } |
| |
| // Step 2: Deopt maybe tenured allocation sites if necessary. |
| bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites(); |
| if (deopt_maybe_tenured) { |
| Object* list_element = allocation_sites_list(); |
| while (list_element->IsAllocationSite()) { |
| site = AllocationSite::cast(list_element); |
| DCHECK(site->IsAllocationSite()); |
| allocation_sites++; |
| if (site->IsMaybeTenure()) { |
| site->set_deopt_dependent_code(true); |
| trigger_deoptimization = true; |
| } |
| list_element = site->weak_next(); |
| } |
| } |
| |
| if (trigger_deoptimization) { |
| isolate_->stack_guard()->RequestDeoptMarkedAllocationSites(); |
| } |
| |
| if (FLAG_trace_pretenuring_statistics && |
| (allocation_mementos_found > 0 || tenure_decisions > 0 || |
| dont_tenure_decisions > 0)) { |
| PrintIsolate(isolate(), |
| "pretenuring: deopt_maybe_tenured=%d visited_sites=%d " |
| "active_sites=%d " |
| "mementos=%d tenured=%d not_tenured=%d\n", |
| deopt_maybe_tenured ? 1 : 0, allocation_sites, |
| active_allocation_sites, allocation_mementos_found, |
| tenure_decisions, dont_tenure_decisions); |
| } |
| |
| global_pretenuring_feedback_.clear(); |
| global_pretenuring_feedback_.reserve(kInitialFeedbackCapacity); |
| } |
| } |
| |
| void Heap::InvalidateCodeEmbeddedObjects(Code* code) { |
| MemoryChunk* chunk = MemoryChunk::FromAddress(code->address()); |
| CodePageMemoryModificationScope modification_scope(chunk); |
| code->InvalidateEmbeddedObjects(); |
| } |
| |
| void Heap::InvalidateCodeDeoptimizationData(Code* code) { |
| MemoryChunk* chunk = MemoryChunk::FromAddress(code->address()); |
| CodePageMemoryModificationScope modification_scope(chunk); |
| code->set_deoptimization_data(empty_fixed_array()); |
| } |
| |
| void Heap::DeoptMarkedAllocationSites() { |
| // TODO(hpayer): If iterating over the allocation sites list becomes a |
| // performance issue, use a cache data structure in heap instead. |
| Object* list_element = allocation_sites_list(); |
| while (list_element->IsAllocationSite()) { |
| AllocationSite* site = AllocationSite::cast(list_element); |
| if (site->deopt_dependent_code()) { |
| site->dependent_code()->MarkCodeForDeoptimization( |
| isolate_, DependentCode::kAllocationSiteTenuringChangedGroup); |
| site->set_deopt_dependent_code(false); |
| } |
| list_element = site->weak_next(); |
| } |
| Deoptimizer::DeoptimizeMarkedCode(isolate_); |
| } |
| |
| |
| void Heap::GarbageCollectionEpilogue() { |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE); |
| // In release mode, we only zap the from space under heap verification. |
| if (Heap::ShouldZapGarbage()) { |
| ZapFromSpace(); |
| } |
| |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| Verify(); |
| } |
| #endif |
| |
| AllowHeapAllocation for_the_rest_of_the_epilogue; |
| |
| #ifdef DEBUG |
| if (FLAG_print_global_handles) isolate_->global_handles()->Print(); |
| if (FLAG_print_handles) PrintHandles(); |
| if (FLAG_gc_verbose) Print(); |
| if (FLAG_code_stats) ReportCodeStatistics("After GC"); |
| if (FLAG_check_handle_count) CheckHandleCount(); |
| #endif |
| |
| UpdateMaximumCommitted(); |
| |
| isolate_->counters()->alive_after_last_gc()->Set( |
| static_cast<int>(SizeOfObjects())); |
| |
| isolate_->counters()->string_table_capacity()->Set( |
| string_table()->Capacity()); |
| isolate_->counters()->number_of_symbols()->Set( |
| string_table()->NumberOfElements()); |
| |
| if (CommittedMemory() > 0) { |
| isolate_->counters()->external_fragmentation_total()->AddSample( |
| static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory())); |
| |
| isolate_->counters()->heap_sample_total_committed()->AddSample( |
| static_cast<int>(CommittedMemory() / KB)); |
| isolate_->counters()->heap_sample_total_used()->AddSample( |
| static_cast<int>(SizeOfObjects() / KB)); |
| isolate_->counters()->heap_sample_map_space_committed()->AddSample( |
| static_cast<int>(map_space()->CommittedMemory() / KB)); |
| isolate_->counters()->heap_sample_code_space_committed()->AddSample( |
| static_cast<int>(code_space()->CommittedMemory() / KB)); |
| |
| isolate_->counters()->heap_sample_maximum_committed()->AddSample( |
| static_cast<int>(MaximumCommittedMemory() / KB)); |
| } |
| |
| #define UPDATE_COUNTERS_FOR_SPACE(space) \ |
| isolate_->counters()->space##_bytes_available()->Set( \ |
| static_cast<int>(space()->Available())); \ |
| isolate_->counters()->space##_bytes_committed()->Set( \ |
| static_cast<int>(space()->CommittedMemory())); \ |
| isolate_->counters()->space##_bytes_used()->Set( \ |
| static_cast<int>(space()->SizeOfObjects())); |
| #define UPDATE_FRAGMENTATION_FOR_SPACE(space) \ |
| if (space()->CommittedMemory() > 0) { \ |
| isolate_->counters()->external_fragmentation_##space()->AddSample( \ |
| static_cast<int>(100 - \ |
| (space()->SizeOfObjects() * 100.0) / \ |
| space()->CommittedMemory())); \ |
| } |
| #define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \ |
| UPDATE_COUNTERS_FOR_SPACE(space) \ |
| UPDATE_FRAGMENTATION_FOR_SPACE(space) |
| |
| UPDATE_COUNTERS_FOR_SPACE(new_space) |
| UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_space) |
| UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space) |
| UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space) |
| UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space) |
| #undef UPDATE_COUNTERS_FOR_SPACE |
| #undef UPDATE_FRAGMENTATION_FOR_SPACE |
| #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE |
| |
| #ifdef DEBUG |
| ReportStatisticsAfterGC(); |
| #endif // DEBUG |
| |
| last_gc_time_ = MonotonicallyIncreasingTimeInMs(); |
| |
| { |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EPILOGUE_REDUCE_NEW_SPACE); |
| ReduceNewSpaceSize(); |
| } |
| } |
| |
| |
| void Heap::PreprocessStackTraces() { |
| WeakFixedArray::Iterator iterator(weak_stack_trace_list()); |
| FixedArray* elements; |
| while ((elements = iterator.Next<FixedArray>()) != nullptr) { |
| for (int j = 1; j < elements->length(); j += 4) { |
| Object* maybe_code = elements->get(j + 2); |
| // If GC happens while adding a stack trace to the weak fixed array, |
| // which has been copied into a larger backing store, we may run into |
| // a stack trace that has already been preprocessed. Guard against this. |
| if (!maybe_code->IsAbstractCode()) break; |
| AbstractCode* abstract_code = AbstractCode::cast(maybe_code); |
| int offset = Smi::ToInt(elements->get(j + 3)); |
| int pos = abstract_code->SourcePosition(offset); |
| elements->set(j + 2, Smi::FromInt(pos)); |
| } |
| } |
| // We must not compact the weak fixed list here, as we may be in the middle |
| // of writing to it, when the GC triggered. Instead, we reset the root value. |
| set_weak_stack_trace_list(Smi::kZero); |
| } |
| |
| |
| class GCCallbacksScope { |
| public: |
| explicit GCCallbacksScope(Heap* heap) : heap_(heap) { |
| heap_->gc_callbacks_depth_++; |
| } |
| ~GCCallbacksScope() { heap_->gc_callbacks_depth_--; } |
| |
| bool CheckReenter() { return heap_->gc_callbacks_depth_ == 1; } |
| |
| private: |
| Heap* heap_; |
| }; |
| |
| |
| void Heap::HandleGCRequest() { |
| if (FLAG_stress_scavenge > 0 && stress_scavenge_observer_->HasRequestedGC()) { |
| CollectAllGarbage(NEW_SPACE, GarbageCollectionReason::kTesting); |
| stress_scavenge_observer_->RequestedGCDone(); |
| } else if (HighMemoryPressure()) { |
| incremental_marking()->reset_request_type(); |
| CheckMemoryPressure(); |
| } else if (incremental_marking()->request_type() == |
| IncrementalMarking::COMPLETE_MARKING) { |
| incremental_marking()->reset_request_type(); |
| CollectAllGarbage(current_gc_flags_, |
| GarbageCollectionReason::kFinalizeMarkingViaStackGuard, |
| current_gc_callback_flags_); |
| } else if (incremental_marking()->request_type() == |
| IncrementalMarking::FINALIZATION && |
| incremental_marking()->IsMarking() && |
| !incremental_marking()->finalize_marking_completed()) { |
| incremental_marking()->reset_request_type(); |
| FinalizeIncrementalMarking( |
| GarbageCollectionReason::kFinalizeMarkingViaStackGuard); |
| } |
| } |
| |
| |
| void Heap::ScheduleIdleScavengeIfNeeded(int bytes_allocated) { |
| scavenge_job_->ScheduleIdleTaskIfNeeded(this, bytes_allocated); |
| } |
| |
| void Heap::FinalizeIncrementalMarking(GarbageCollectionReason gc_reason) { |
| if (FLAG_trace_incremental_marking) { |
| isolate()->PrintWithTimestamp( |
| "[IncrementalMarking] (%s).\n", |
| Heap::GarbageCollectionReasonToString(gc_reason)); |
| } |
| |
| HistogramTimerScope incremental_marking_scope( |
| isolate()->counters()->gc_incremental_marking_finalize()); |
| TRACE_EVENT0("v8", "V8.GCIncrementalMarkingFinalize"); |
| TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE); |
| |
| { |
| GCCallbacksScope scope(this); |
| if (scope.CheckReenter()) { |
| AllowHeapAllocation allow_allocation; |
| TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_PROLOGUE); |
| VMState<EXTERNAL> state(isolate_); |
| HandleScope handle_scope(isolate_); |
| CallGCPrologueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags); |
| } |
| } |
| incremental_marking()->FinalizeIncrementally(); |
| { |
| GCCallbacksScope scope(this); |
| if (scope.CheckReenter()) { |
| AllowHeapAllocation allow_allocation; |
| TRACE_GC(tracer(), GCTracer::Scope::MC_INCREMENTAL_EXTERNAL_EPILOGUE); |
| VMState<EXTERNAL> state(isolate_); |
| HandleScope handle_scope(isolate_); |
| CallGCEpilogueCallbacks(kGCTypeIncrementalMarking, kNoGCCallbackFlags); |
| } |
| } |
| } |
| |
| |
| HistogramTimer* Heap::GCTypeTimer(GarbageCollector collector) { |
| if (IsYoungGenerationCollector(collector)) { |
| return isolate_->counters()->gc_scavenger(); |
| } else { |
| if (!incremental_marking()->IsStopped()) { |
| if (ShouldReduceMemory()) { |
| return isolate_->counters()->gc_finalize_reduce_memory(); |
| } else { |
| return isolate_->counters()->gc_finalize(); |
| } |
| } else { |
| return isolate_->counters()->gc_compactor(); |
| } |
| } |
| } |
| |
| void Heap::CollectAllGarbage(int flags, GarbageCollectionReason gc_reason, |
| const v8::GCCallbackFlags gc_callback_flags) { |
| // Since we are ignoring the return value, the exact choice of space does |
| // not matter, so long as we do not specify NEW_SPACE, which would not |
| // cause a full GC. |
| set_current_gc_flags(flags); |
| CollectGarbage(OLD_SPACE, gc_reason, gc_callback_flags); |
| set_current_gc_flags(kNoGCFlags); |
| } |
| |
| void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) { |
| // Since we are ignoring the return value, the exact choice of space does |
| // not matter, so long as we do not specify NEW_SPACE, which would not |
| // cause a full GC. |
| // Major GC would invoke weak handle callbacks on weakly reachable |
| // handles, but won't collect weakly reachable objects until next |
| // major GC. Therefore if we collect aggressively and weak handle callback |
| // has been invoked, we rerun major GC to release objects which become |
| // garbage. |
| // Note: as weak callbacks can execute arbitrary code, we cannot |
| // hope that eventually there will be no weak callbacks invocations. |
| // Therefore stop recollecting after several attempts. |
| if (gc_reason == GarbageCollectionReason::kLastResort) { |
| InvokeOutOfMemoryCallback(); |
| } |
| RuntimeCallTimerScope runtime_timer( |
| isolate(), RuntimeCallCounterId::kGC_Custom_AllAvailableGarbage); |
| if (isolate()->concurrent_recompilation_enabled()) { |
| // The optimizing compiler may be unnecessarily holding on to memory. |
| DisallowHeapAllocation no_recursive_gc; |
| isolate()->optimizing_compile_dispatcher()->Flush( |
| OptimizingCompileDispatcher::BlockingBehavior::kDontBlock); |
| } |
| isolate()->ClearSerializerData(); |
| set_current_gc_flags(kMakeHeapIterableMask | kReduceMemoryFootprintMask); |
| isolate_->compilation_cache()->Clear(); |
| const int kMaxNumberOfAttempts = 7; |
| const int kMinNumberOfAttempts = 2; |
| for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { |
| if (!CollectGarbage(OLD_SPACE, gc_reason, |
| v8::kGCCallbackFlagCollectAllAvailableGarbage) && |
| attempt + 1 >= kMinNumberOfAttempts) { |
| break; |
| } |
| } |
| |
| set_current_gc_flags(kNoGCFlags); |
| new_space_->Shrink(); |
| UncommitFromSpace(); |
| } |
| |
| void Heap::ReportExternalMemoryPressure() { |
| const GCCallbackFlags kGCCallbackFlagsForExternalMemory = |
| static_cast<GCCallbackFlags>( |
| kGCCallbackFlagSynchronousPhantomCallbackProcessing | |
| kGCCallbackFlagCollectAllExternalMemory); |
| if (external_memory_ > |
| (external_memory_at_last_mark_compact_ + external_memory_hard_limit())) { |
| CollectAllGarbage( |
| kReduceMemoryFootprintMask | kFinalizeIncrementalMarkingMask, |
| GarbageCollectionReason::kExternalMemoryPressure, |
| static_cast<GCCallbackFlags>(kGCCallbackFlagCollectAllAvailableGarbage | |
| kGCCallbackFlagsForExternalMemory)); |
| return; |
| } |
| if (incremental_marking()->IsStopped()) { |
| if (incremental_marking()->CanBeActivated()) { |
| StartIncrementalMarking(i::Heap::kNoGCFlags, |
| GarbageCollectionReason::kExternalMemoryPressure, |
| kGCCallbackFlagsForExternalMemory); |
| } else { |
| CollectAllGarbage(i::Heap::kNoGCFlags, |
| GarbageCollectionReason::kExternalMemoryPressure, |
| kGCCallbackFlagsForExternalMemory); |
| } |
| } else { |
| // Incremental marking is turned on an has already been started. |
| const double kMinStepSize = 5; |
| const double kMaxStepSize = 10; |
| const double ms_step = |
| Min(kMaxStepSize, |
| Max(kMinStepSize, static_cast<double>(external_memory_) / |
| external_memory_limit_ * kMinStepSize)); |
| const double deadline = MonotonicallyIncreasingTimeInMs() + ms_step; |
| // Extend the gc callback flags with external memory flags. |
| current_gc_callback_flags_ = static_cast<GCCallbackFlags>( |
| current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory); |
| incremental_marking()->AdvanceIncrementalMarking( |
| deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8); |
| } |
| } |
| |
| void Heap::EnsureFillerObjectAtTop() { |
| // There may be an allocation memento behind objects in new space. Upon |
| // evacuation of a non-full new space (or if we are on the last page) there |
| // may be uninitialized memory behind top. We fill the remainder of the page |
| // with a filler. |
| Address to_top = new_space_->top(); |
| Page* page = Page::FromAddress(to_top - kPointerSize); |
| if (page->Contains(to_top)) { |
| int remaining_in_page = static_cast<int>(page->area_end() - to_top); |
| CreateFillerObjectAt(to_top, remaining_in_page, ClearRecordedSlots::kNo); |
| } |
| } |
| |
| bool Heap::CollectGarbage(AllocationSpace space, |
| GarbageCollectionReason gc_reason, |
| const v8::GCCallbackFlags gc_callback_flags) { |
| // The VM is in the GC state until exiting this function. |
| VMState<GC> state(isolate()); |
| |
| const char* collector_reason = nullptr; |
| GarbageCollector collector = SelectGarbageCollector(space, &collector_reason); |
| |
| #ifdef V8_ENABLE_ALLOCATION_TIMEOUT |
| // Reset the allocation timeout, but make sure to allow at least a few |
| // allocations after a collection. The reason for this is that we have a lot |
| // of allocation sequences and we assume that a garbage collection will allow |
| // the subsequent allocation attempts to go through. |
| if (FLAG_random_gc_interval > 0 || FLAG_gc_interval >= 0) { |
| allocation_timeout_ = Max(6, NextAllocationTimeout(allocation_timeout_)); |
| } |
| #endif |
| |
| EnsureFillerObjectAtTop(); |
| |
| if (IsYoungGenerationCollector(collector) && |
| !incremental_marking()->IsStopped()) { |
| if (FLAG_trace_incremental_marking) { |
| isolate()->PrintWithTimestamp( |
| "[IncrementalMarking] Scavenge during marking.\n"); |
| } |
| } |
| |
| bool next_gc_likely_to_collect_more = false; |
| size_t committed_memory_before = 0; |
| |
| if (collector == MARK_COMPACTOR) { |
| committed_memory_before = CommittedOldGenerationMemory(); |
| } |
| |
| { |
| tracer()->Start(collector, gc_reason, collector_reason); |
| DCHECK(AllowHeapAllocation::IsAllowed()); |
| DisallowHeapAllocation no_allocation_during_gc; |
| GarbageCollectionPrologue(); |
| |
| { |
| HistogramTimer* gc_type_timer = GCTypeTimer(collector); |
| HistogramTimerScope histogram_timer_scope(gc_type_timer); |
| TRACE_EVENT0("v8", gc_type_timer->name()); |
| |
| next_gc_likely_to_collect_more = |
| PerformGarbageCollection(collector, gc_callback_flags); |
| } |
| |
| GarbageCollectionEpilogue(); |
| if (collector == MARK_COMPACTOR && FLAG_track_detached_contexts) { |
| isolate()->CheckDetachedContextsAfterGC(); |
| } |
| |
| if (collector == MARK_COMPACTOR) { |
| size_t committed_memory_after = CommittedOldGenerationMemory(); |
| size_t used_memory_after = PromotedSpaceSizeOfObjects(); |
| MemoryReducer::Event event; |
| event.type = MemoryReducer::kMarkCompact; |
| event.time_ms = MonotonicallyIncreasingTimeInMs(); |
| // Trigger one more GC if |
| // - this GC decreased committed memory, |
| // - there is high fragmentation, |
| // - there are live detached contexts. |
| event.next_gc_likely_to_collect_more = |
| (committed_memory_before > committed_memory_after + MB) || |
| HasHighFragmentation(used_memory_after, committed_memory_after) || |
| (detached_contexts()->length() > 0); |
| event.committed_memory = committed_memory_after; |
| if (deserialization_complete_) { |
| memory_reducer_->NotifyMarkCompact(event); |
| } |
| memory_pressure_level_.SetValue(MemoryPressureLevel::kNone); |
| } |
| |
| tracer()->Stop(collector); |
| } |
| |
| if (collector == MARK_COMPACTOR && |
| (gc_callback_flags & (kGCCallbackFlagForced | |
| kGCCallbackFlagCollectAllAvailableGarbage)) != 0) { |
| isolate()->CountUsage(v8::Isolate::kForcedGC); |
| } |
| |
| // Start incremental marking for the next cycle. The heap snapshot |
| // generator needs incremental marking to stay off after it aborted. |
| // We do this only for scavenger to avoid a loop where mark-compact |
| // causes another mark-compact. |
| if (IsYoungGenerationCollector(collector) && |
| !ShouldAbortIncrementalMarking()) { |
| StartIncrementalMarkingIfAllocationLimitIsReached( |
| kNoGCFlags, kGCCallbackScheduleIdleGarbageCollection); |
| } |
| |
| return next_gc_likely_to_collect_more; |
| } |
| |
| |
| int Heap::NotifyContextDisposed(bool dependant_context) { |
| if (!dependant_context) { |
| tracer()->ResetSurvivalEvents(); |
| old_generation_size_configured_ = false; |
| MemoryReducer::Event event; |
| event.type = MemoryReducer::kPossibleGarbage; |
| event.time_ms = MonotonicallyIncreasingTimeInMs(); |
| memory_reducer_->NotifyPossibleGarbage(event); |
| } |
| if (isolate()->concurrent_recompilation_enabled()) { |
| // Flush the queued recompilation tasks. |
| isolate()->optimizing_compile_dispatcher()->Flush( |
| OptimizingCompileDispatcher::BlockingBehavior::kDontBlock); |
| } |
| number_of_disposed_maps_ = retained_maps()->Length(); |
| tracer()->AddContextDisposalTime(MonotonicallyIncreasingTimeInMs()); |
| return ++contexts_disposed_; |
| } |
| |
| void Heap::StartIncrementalMarking(int gc_flags, |
| GarbageCollectionReason gc_reason, |
| GCCallbackFlags gc_callback_flags) { |
| DCHECK(incremental_marking()->IsStopped()); |
| set_current_gc_flags(gc_flags); |
| current_gc_callback_flags_ = gc_callback_flags; |
| incremental_marking()->Start(gc_reason); |
| } |
| |
| void Heap::StartIncrementalMarkingIfAllocationLimitIsReached( |
| int gc_flags, const GCCallbackFlags gc_callback_flags) { |
| if (incremental_marking()->IsStopped()) { |
| IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached(); |
| if (reached_limit == IncrementalMarkingLimit::kSoftLimit) { |
| incremental_marking()->incremental_marking_job()->ScheduleTask(this); |
| } else if (reached_limit == IncrementalMarkingLimit::kHardLimit) { |
| StartIncrementalMarking(gc_flags, |
| GarbageCollectionReason::kAllocationLimit, |
| gc_callback_flags); |
| } |
| } |
| } |
| |
| void Heap::StartIdleIncrementalMarking( |
| GarbageCollectionReason gc_reason, |
| const GCCallbackFlags gc_callback_flags) { |
| gc_idle_time_handler_->ResetNoProgressCounter(); |
| StartIncrementalMarking(kReduceMemoryFootprintMask, gc_reason, |
| gc_callback_flags); |
| } |
| |
| |
| void Heap::MoveElements(FixedArray* array, int dst_index, int src_index, |
| int len) { |
| if (len == 0) return; |
| |
| DCHECK(array->map() != fixed_cow_array_map()); |
| Object** dst = array->data_start() + dst_index; |
| Object** src = array->data_start() + src_index; |
| if (FLAG_concurrent_marking && incremental_marking()->IsMarking()) { |
| if (dst < src) { |
| for (int i = 0; i < len; i++) { |
| base::AsAtomicPointer::Relaxed_Store( |
| dst + i, base::AsAtomicPointer::Relaxed_Load(src + i)); |
| } |
| } else { |
| for (int i = len - 1; i >= 0; i--) { |
| base::AsAtomicPointer::Relaxed_Store( |
| dst + i, base::AsAtomicPointer::Relaxed_Load(src + i)); |
| } |
| } |
| } else { |
| MemMove(dst, src, len * kPointerSize); |
| } |
| FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(this, array, dst_index, len); |
| } |
| |
| |
| #ifdef VERIFY_HEAP |
| // Helper class for verifying the string table. |
| class StringTableVerifier : public ObjectVisitor { |
| public: |
| void VisitPointers(HeapObject* host, Object** start, Object** end) override { |
| // Visit all HeapObject pointers in [start, end). |
| for (Object** p = start; p < end; p++) { |
| if ((*p)->IsHeapObject()) { |
| HeapObject* object = HeapObject::cast(*p); |
| Isolate* isolate = object->GetIsolate(); |
| // Check that the string is actually internalized. |
| CHECK(object->IsTheHole(isolate) || object->IsUndefined(isolate) || |
| object->IsInternalizedString()); |
| } |
| } |
| } |
| }; |
| |
| |
| static void VerifyStringTable(Heap* heap) { |
| StringTableVerifier verifier; |
| heap->string_table()->IterateElements(&verifier); |
| } |
| #endif // VERIFY_HEAP |
| |
| bool Heap::ReserveSpace(Reservation* reservations, std::vector<Address>* maps) { |
| bool gc_performed = true; |
| int counter = 0; |
| static const int kThreshold = 20; |
| while (gc_performed && counter++ < kThreshold) { |
| gc_performed = false; |
| for (int space = NEW_SPACE; space < SerializerDeserializer::kNumberOfSpaces; |
| space++) { |
| Reservation* reservation = &reservations[space]; |
| DCHECK_LE(1, reservation->size()); |
| if (reservation->at(0).size == 0) continue; |
| bool perform_gc = false; |
| if (space == MAP_SPACE) { |
| // We allocate each map individually to avoid fragmentation. |
| maps->clear(); |
| DCHECK_LE(reservation->size(), 2); |
| int reserved_size = 0; |
| for (const Chunk& c : *reservation) reserved_size += c.size; |
| DCHECK_EQ(0, reserved_size % Map::kSize); |
| int num_maps = reserved_size / Map::kSize; |
| for (int i = 0; i < num_maps; i++) { |
| // The deserializer will update the skip list. |
| AllocationResult allocation = map_space()->AllocateRawUnaligned( |
| Map::kSize, PagedSpace::IGNORE_SKIP_LIST); |
| HeapObject* free_space = nullptr; |
| if (allocation.To(&free_space)) { |
| // Mark with a free list node, in case we have a GC before |
| // deserializing. |
| Address free_space_address = free_space->address(); |
| CreateFillerObjectAt(free_space_address, Map::kSize, |
| ClearRecordedSlots::kNo); |
| maps->push_back(free_space_address); |
| } else { |
| perform_gc = true; |
| break; |
| } |
| } |
| } else if (space == LO_SPACE) { |
| // Just check that we can allocate during deserialization. |
| DCHECK_LE(reservation->size(), 2); |
| int reserved_size = 0; |
| for (const Chunk& c : *reservation) reserved_size += c.size; |
| perform_gc = !CanExpandOldGeneration(reserved_size); |
| } else { |
| for (auto& chunk : *reservation) { |
| AllocationResult allocation; |
| int size = chunk.size; |
| DCHECK_LE(static_cast<size_t>(size), |
| MemoryAllocator::PageAreaSize( |
| static_cast<AllocationSpace>(space))); |
| if (space == NEW_SPACE) { |
| allocation = new_space()->AllocateRawUnaligned(size); |
| } else { |
| // The deserializer will update the skip list. |
| allocation = paged_space(space)->AllocateRawUnaligned( |
| size, PagedSpace::IGNORE_SKIP_LIST); |
| } |
| HeapObject* free_space = nullptr; |
| if (allocation.To(&free_space)) { |
| // Mark with a free list node, in case we have a GC before |
| // deserializing. |
| Address free_space_address = free_space->address(); |
| CreateFillerObjectAt(free_space_address, size, |
| ClearRecordedSlots::kNo); |
| DCHECK_GT(SerializerDeserializer::kNumberOfPreallocatedSpaces, |
| space); |
| chunk.start = free_space_address; |
| chunk.end = free_space_address + size; |
| } else { |
| perform_gc = true; |
| break; |
| } |
| } |
| } |
| if (perform_gc) { |
| // We cannot perfom a GC with an uninitialized isolate. This check |
| // fails for example if the max old space size is chosen unwisely, |
| // so that we cannot allocate space to deserialize the initial heap. |
| if (!deserialization_complete_) { |
| V8::FatalProcessOutOfMemory( |
| "insufficient memory to create an Isolate"); |
| } |
| if (space == NEW_SPACE) { |
| CollectGarbage(NEW_SPACE, GarbageCollectionReason::kDeserializer); |
| } else { |
| if (counter > 1) { |
| CollectAllGarbage( |
| kReduceMemoryFootprintMask | kAbortIncrementalMarkingMask, |
| GarbageCollectionReason::kDeserializer); |
| } else { |
| CollectAllGarbage(kAbortIncrementalMarkingMask, |
| GarbageCollectionReason::kDeserializer); |
| } |
| } |
| gc_performed = true; |
| break; // Abort for-loop over spaces and retry. |
| } |
| } |
| } |
| |
| return !gc_performed; |
| } |
| |
| |
| void Heap::EnsureFromSpaceIsCommitted() { |
| if (new_space_->CommitFromSpaceIfNeeded()) return; |
| |
| // Committing memory to from space failed. |
| // Memory is exhausted and we will die. |
| V8::FatalProcessOutOfMemory("Committing semi space failed."); |
| } |
| |
| |
| void Heap::UpdateSurvivalStatistics(int start_new_space_size) { |
| if (start_new_space_size == 0) return; |
| |
| promotion_ratio_ = (static_cast<double>(promoted_objects_size_) / |
| static_cast<double>(start_new_space_size) * 100); |
| |
| if (previous_semi_space_copied_object_size_ > 0) { |
| promotion_rate_ = |
| (static_cast<double>(promoted_objects_size_) / |
| static_cast<double>(previous_semi_space_copied_object_size_) * 100); |
| } else { |
| promotion_rate_ = 0; |
| } |
| |
| semi_space_copied_rate_ = |
| (static_cast<double>(semi_space_copied_object_size_) / |
| static_cast<double>(start_new_space_size) * 100); |
| |
| double survival_rate = promotion_ratio_ + semi_space_copied_rate_; |
| tracer()->AddSurvivalRatio(survival_rate); |
| } |
| |
| bool Heap::PerformGarbageCollection( |
| GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) { |
| int freed_global_handles = 0; |
| |
| if (!IsYoungGenerationCollector(collector)) { |
| PROFILE(isolate_, CodeMovingGCEvent()); |
| } |
| |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| VerifyStringTable(this); |
| } |
| #endif |
| |
| GCType gc_type = |
| collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge; |
| |
| { |
| GCCallbacksScope scope(this); |
| if (scope.CheckReenter()) { |
| AllowHeapAllocation allow_allocation; |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_PROLOGUE); |
| VMState<EXTERNAL> state(isolate_); |
| HandleScope handle_scope(isolate_); |
| CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags); |
| } |
| } |
| |
| EnsureFromSpaceIsCommitted(); |
| |
| size_t start_new_space_size = Heap::new_space()->Size(); |
| |
| { |
| Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_); |
| |
| switch (collector) { |
| case MARK_COMPACTOR: |
| UpdateOldGenerationAllocationCounter(); |
| // Perform mark-sweep with optional compaction. |
| MarkCompact(); |
| old_generation_size_configured_ = true; |
| // This should be updated before PostGarbageCollectionProcessing, which |
| // can cause another GC. Take into account the objects promoted during |
| // GC. |
| old_generation_allocation_counter_at_last_gc_ += |
| static_cast<size_t>(promoted_objects_size_); |
| old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects(); |
| break; |
| case MINOR_MARK_COMPACTOR: |
| MinorMarkCompact(); |
| break; |
| case SCAVENGER: |
| if ((fast_promotion_mode_ && |
| CanExpandOldGeneration(new_space()->Size()))) { |
| tracer()->NotifyYoungGenerationHandling( |
| YoungGenerationHandling::kFastPromotionDuringScavenge); |
| EvacuateYoungGeneration(); |
| } else { |
| tracer()->NotifyYoungGenerationHandling( |
| YoungGenerationHandling::kRegularScavenge); |
| |
| Scavenge(); |
| } |
| break; |
| } |
| |
| ProcessPretenuringFeedback(); |
| } |
| |
| UpdateSurvivalStatistics(static_cast<int>(start_new_space_size)); |
| ConfigureInitialOldGenerationSize(); |
| |
| if (collector != MARK_COMPACTOR) { |
| // Objects that died in the new space might have been accounted |
| // as bytes marked ahead of schedule by the incremental marker. |
| incremental_marking()->UpdateMarkedBytesAfterScavenge( |
| start_new_space_size - SurvivedNewSpaceObjectSize()); |
| } |
| |
| if (!fast_promotion_mode_ || collector == MARK_COMPACTOR) { |
| ComputeFastPromotionMode(promotion_ratio_ + semi_space_copied_rate_); |
| } |
| |
| isolate_->counters()->objs_since_last_young()->Set(0); |
| |
| gc_post_processing_depth_++; |
| { |
| AllowHeapAllocation allow_allocation; |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES); |
| freed_global_handles = |
| isolate_->global_handles()->PostGarbageCollectionProcessing( |
| collector, gc_callback_flags); |
| } |
| gc_post_processing_depth_--; |
| |
| isolate_->eternal_handles()->PostGarbageCollectionProcessing(this); |
| |
| // Update relocatables. |
| Relocatable::PostGarbageCollectionProcessing(isolate_); |
| |
| double gc_speed = tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond(); |
| double mutator_speed = |
| tracer()->CurrentOldGenerationAllocationThroughputInBytesPerMillisecond(); |
| size_t old_gen_size = PromotedSpaceSizeOfObjects(); |
| if (collector == MARK_COMPACTOR) { |
| // Register the amount of external allocated memory. |
| external_memory_at_last_mark_compact_ = external_memory_; |
| external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit; |
| SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); |
| } else if (HasLowYoungGenerationAllocationRate() && |
| old_generation_size_configured_) { |
| DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed); |
| } |
| |
| { |
| GCCallbacksScope scope(this); |
| if (scope.CheckReenter()) { |
| AllowHeapAllocation allow_allocation; |
| TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_EPILOGUE); |
| VMState<EXTERNAL> state(isolate_); |
| HandleScope handle_scope(isolate_); |
| CallGCEpilogueCallbacks(gc_type, gc_callback_flags); |
| } |
| } |
| |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| VerifyStringTable(this); |
| } |
| #endif |
| |
| return freed_global_handles > 0; |
| } |
| |
| |
| void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) { |
| RuntimeCallTimerScope runtime_timer( |
| isolate(), RuntimeCallCounterId::kGCPrologueCallback); |
| for (const GCCallbackTuple& info : gc_prologue_callbacks_) { |
| if (gc_type & info.gc_type) { |
| v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); |
| info.callback(isolate, gc_type, flags, info.data); |
| } |
| } |
| } |
| |
| void Heap::CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags) { |
| RuntimeCallTimerScope runtime_timer( |
| isolate(), RuntimeCallCounterId::kGCEpilogueCallback); |
| for (const GCCallbackTuple& info : gc_epilogue_callbacks_) { |
| if (gc_type & info.gc_type) { |
| v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate()); |
| info.callback(isolate, gc_type, flags, info.data); |
| } |
| } |
| } |
| |
| |
| void Heap::MarkCompact() { |
| PauseAllocationObserversScope pause_observers(this); |
| |
| SetGCState(MARK_COMPACT); |
| |
| LOG(isolate_, ResourceEvent("markcompact", "begin")); |
| |
| uint64_t size_of_objects_before_gc = SizeOfObjects(); |
| |
| CodeSpaceMemoryModificationScope code_modifcation(this); |
| |
| mark_compact_collector()->Prepare(); |
| |
| ms_count_++; |
| |
| MarkCompactPrologue(); |
| |
| mark_compact_collector()->CollectGarbage(); |
| |
| LOG(isolate_, ResourceEvent("markcompact", "end")); |
| |
| MarkCompactEpilogue(); |
| |
| if (FLAG_allocation_site_pretenuring) { |
| EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc); |
| } |
| } |
| |
| void Heap::MinorMarkCompact() { |
| DCHECK(FLAG_minor_mc); |
| |
| SetGCState(MINOR_MARK_COMPACT); |
| LOG(isolate_, ResourceEvent("MinorMarkCompact", "begin")); |
| |
| TRACE_GC(tracer(), GCTracer::Scope::MINOR_MC); |
| AlwaysAllocateScope always_allocate(isolate()); |
| PauseAllocationObserversScope pause_observers(this); |
| IncrementalMarking::PauseBlackAllocationScope pause_black_allocation( |
| incremental_marking()); |
| CodeSpaceMemoryModificationScope code_modifcation(this); |
| ConcurrentMarking::PauseScope pause_scope(concurrent_marking()); |
| |
| minor_mark_compact_collector()->CollectGarbage(); |
| |
| LOG(isolate_, ResourceEvent("MinorMarkCompact", "end")); |
| SetGCState(NOT_IN_GC); |
| } |
| |
| void Heap::MarkCompactEpilogue() { |
| TRACE_GC(tracer(), GCTracer::Scope::MC_EPILOGUE); |
| SetGCState(NOT_IN_GC); |
| |
| isolate_->counters()->objs_since_last_full()->Set(0); |
| |
| incremental_marking()->Epilogue(); |
| |
| PreprocessStackTraces(); |
| DCHECK(incremental_marking()->IsStopped()); |
| } |
| |
| |
| void Heap::MarkCompactPrologue() { |
| TRACE_GC(tracer(), GCTracer::Scope::MC_PROLOGUE); |
| isolate_->context_slot_cache()->Clear(); |
| isolate_->descriptor_lookup_cache()->Clear(); |
| RegExpResultsCache::Clear(string_split_cache()); |
| RegExpResultsCache::Clear(regexp_multiple_cache()); |
| |
| isolate_->compilation_cache()->MarkCompactPrologue(); |
| |
| FlushNumberStringCache(); |
| } |
| |
| |
| void Heap::CheckNewSpaceExpansionCriteria() { |
| if (FLAG_experimental_new_space_growth_heuristic) { |
| if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() && |
| survived_last_scavenge_ * 100 / new_space_->TotalCapacity() >= 10) { |
| // Grow the size of new space if there is room to grow, and more than 10% |
| // have survived the last scavenge. |
| new_space_->Grow(); |
| survived_since_last_expansion_ = 0; |
| } |
| } else if (new_space_->TotalCapacity() < new_space_->MaximumCapacity() && |
| survived_since_last_expansion_ > new_space_->TotalCapacity()) { |
| // Grow the size of new space if there is room to grow, and enough data |
| // has survived scavenge since the last expansion. |
| new_space_->Grow(); |
| survived_since_last_expansion_ = 0; |
| } |
| } |
| |
| static bool IsUnscavengedHeapObject(Heap* heap, Object** p) { |
| return heap->InFromSpace(*p) && |
| !HeapObject::cast(*p)->map_word().IsForwardingAddress(); |
| } |
| |
| class ScavengeWeakObjectRetainer : public WeakObjectRetainer { |
| public: |
| explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {} |
| |
| virtual Object* RetainAs(Object* object) { |
| if (!heap_->InFromSpace(object)) { |
| return object; |
| } |
| |
| MapWord map_word = HeapObject::cast(object)->map_word(); |
| if (map_word.IsForwardingAddress()) { |
| return map_word.ToForwardingAddress(); |
| } |
| return nullptr; |
| } |
| |
| private: |
| Heap* heap_; |
| }; |
| |
| void Heap::EvacuateYoungGeneration() { |
| TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_FAST_PROMOTE); |
| base::LockGuard<base::Mutex> guard(relocation_mutex()); |
| ConcurrentMarking::PauseScope pause_scope(concurrent_marking()); |
| if (!FLAG_concurrent_marking) { |
| DCHECK(fast_promotion_mode_); |
| DCHECK(CanExpandOldGeneration(new_space()->Size())); |
| } |
| |
| mark_compact_collector()->sweeper()->EnsureIterabilityCompleted(); |
| |
| SetGCState(SCAVENGE); |
| LOG(isolate_, ResourceEvent("scavenge", "begin")); |
| |
| // Move pages from new->old generation. |
| PageRange range(new_space()->bottom(), new_space()->top()); |
| for (auto it = range.begin(); it != range.end();) { |
| Page* p = (*++it)->prev_page(); |
| p->Unlink(); |
| Page::ConvertNewToOld(p); |
| if (incremental_marking()->IsMarking()) |
| mark_compact_collector()->RecordLiveSlotsOnPage(p); |
| } |
| |
| // Reset new space. |
| if (!new_space()->Rebalance()) { |
| FatalProcessOutOfMemory("NewSpace::Rebalance"); |
| } |
| new_space()->ResetLinearAllocationArea(); |
| new_space()->set_age_mark(new_space()->top()); |
| |
| // Fix up special trackers. |
| external_string_table_.PromoteAllNewSpaceStrings(); |
| // GlobalHandles are updated in PostGarbageCollectonProcessing |
| |
| IncrementYoungSurvivorsCounter(new_space()->Size()); |
| IncrementPromotedObjectsSize(new_space()->Size()); |
| IncrementSemiSpaceCopiedObjectSize(0); |
| |
| LOG(isolate_, ResourceEvent("scavenge", "end")); |
| SetGCState(NOT_IN_GC); |
| } |
| |
| static bool IsLogging(Isolate* isolate) { |
| return FLAG_verify_predictable || isolate->logger()->is_logging() || |
| isolate->is_profiling() || |
| (isolate->heap_profiler() != nullptr && |
| isolate->heap_profiler()->is_tracking_object_moves()); |
| } |
| |
| class PageScavengingItem final : public ItemParallelJob::Item { |
| public: |
| explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {} |
| virtual ~PageScavengingItem() {} |
| |
| void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); } |
| |
| private: |
| MemoryChunk* const chunk_; |
| }; |
| |
| class ScavengingTask final : public ItemParallelJob::Task { |
| public: |
| ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier) |
| : ItemParallelJob::Task(heap->isolate()), |
| heap_(heap), |
| scavenger_(scavenger), |
| barrier_(barrier) {} |
| |
| void RunInParallel() final { |
| TRACE_BACKGROUND_GC( |
| heap_->tracer(), |
| GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL); |
| double scavenging_time = 0.0; |
| { |
| barrier_->Start(); |
| TimedScope scope(&scavenging_time); |
| PageScavengingItem* item = nullptr; |
| while ((item = GetItem<PageScavengingItem>()) != nullptr) { |
| item->Process(scavenger_); |
| item->MarkFinished(); |
| } |
| do { |
| scavenger_->Process(barrier_); |
| } while (!barrier_->Wait()); |
| scavenger_->Process(); |
| } |
| if (FLAG_trace_parallel_scavenge) { |
| PrintIsolate(heap_->isolate(), |
| "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n", |
| static_cast<void*>(this), scavenging_time, |
| scavenger_->bytes_copied(), scavenger_->bytes_promoted()); |
| } |
| }; |
| |
| private: |
| Heap* const heap_; |
| Scavenger* const scavenger_; |
| OneshotBarrier* const barrier_; |
| }; |
| |
| int Heap::NumberOfScavengeTasks() { |
| if (!FLAG_parallel_scavenge) return 1; |
| const int num_scavenge_tasks = |
| static_cast<int>(new_space()->TotalCapacity()) / MB; |
| return Max( |
| 1, |
| Min(Min(num_scavenge_tasks, kMaxScavengerTasks), |
| static_cast<int>( |
| V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()))); |
| } |
| |
| void Heap::Scavenge() { |
| TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE); |
| base::LockGuard<base::Mutex> guard(relocation_mutex()); |
| ConcurrentMarking::PauseScope pause_scope(concurrent_marking()); |
| // There are soft limits in the allocation code, designed to trigger a mark |
| // sweep collection by failing allocations. There is no sense in trying to |
| // trigger one during scavenge: scavenges allocation should always succeed. |
| AlwaysAllocateScope scope(isolate()); |
| |
| // Bump-pointer allocations done during scavenge are not real allocations. |
| // Pause the inline allocation steps. |
| PauseAllocationObserversScope pause_observers(this); |
| |
| IncrementalMarking::PauseBlackAllocationScope pause_black_allocation( |
| incremental_marking()); |
| |
| |
| mark_compact_collector()->sweeper()->EnsureIterabilityCompleted(); |
| |
| SetGCState(SCAVENGE); |
| |
| // Implements Cheney's copying algorithm |
| LOG(isolate_, ResourceEvent("scavenge", "begin")); |
| |
| // Flip the semispaces. After flipping, to space is empty, from space has |
| // live objects. |
| new_space_->Flip(); |
| new_space_->ResetLinearAllocationArea(); |
| |
| ItemParallelJob job(isolate()->cancelable_task_manager(), |
| ¶llel_scavenge_semaphore_); |
| const int kMainThreadId = 0; |
| Scavenger* scavengers[kMaxScavengerTasks]; |
| const bool is_logging = IsLogging(isolate()); |
| const int num_scavenge_tasks = NumberOfScavengeTasks(); |
| OneshotBarrier barrier; |
| Scavenger::CopiedList copied_list(num_scavenge_tasks); |
| Scavenger::PromotionList promotion_list(num_scavenge_tasks); |
| for (int i = 0; i < num_scavenge_tasks; i++) { |
| scavengers[i] = |
| new Scavenger(this, is_logging, &copied_list, &promotion_list, i); |
| job.AddTask(new ScavengingTask(this, scavengers[i], &barrier)); |
| } |
| |
| { |
| Sweeper* sweeper = mark_compact_collector()->sweeper(); |
| // Pause the concurrent sweeper. |
| Sweeper::PauseOrCompleteScope pause_scope(sweeper); |
| // Filter out pages from the sweeper that need to be processed for old to |
| // new slots by the Scavenger. After processing, the Scavenger adds back |
| // pages that are still unsweeped. This way the Scavenger has exclusive |
| // access to the slots of a page and can completely avoid any locks on |
| // the page itself. |
| Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope); |
| filter_scope.FilterOldSpaceSweepingPages( |
| [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); }); |
| RememberedSet<OLD_TO_NEW>::IterateMemoryChunks( |
| this, [&job](MemoryChunk* chunk) { |
| job.AddItem(new PageScavengingItem(chunk)); |
| }); |
| |
| RootScavengeVisitor root_scavenge_visitor(this, scavengers[kMainThreadId]); |
| |
| { |
| // Identify weak unmodified handles. Requires an unmodified graph. |
| TRACE_GC( |
| tracer(), |
| GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY); |
| isolate()->global_handles()->IdentifyWeakUnmodifiedObjects( |
| &JSObject::IsUnmodifiedApiObject); |
| } |
| { |
| // Copy roots. |
| TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS); |
| IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE); |
| } |
| { |
| // Weak collections are held strongly by the Scavenger. |
| TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK); |
| IterateEncounteredWeakCollections(&root_scavenge_visitor); |
| } |
| { |
| // Parallel phase scavenging all copied and promoted objects. |
| TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL); |
| job.Run(); |
| DCHECK(copied_list.IsGlobalEmpty()); |
| DCHECK(promotion_list.IsGlobalEmpty()); |
| } |
| { |
| // Scavenge weak global handles. |
| TRACE_GC(tracer(), |
| GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS); |
| isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending( |
| &IsUnscavengedHeapObject); |
| isolate() |
| ->global_handles() |
| ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers( |
| &root_scavenge_visitor); |
| scavengers[kMainThreadId]->Process(); |
| |
| DCHECK(copied_list.IsGlobalEmpty()); |
| DCHECK(promotion_list.IsGlobalEmpty()); |
| isolate() |
| ->global_handles() |
| ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles( |
| &root_scavenge_visitor, &IsUnscavengedHeapObject); |
| } |
| |
| for (int i = 0; i < num_scavenge_tasks; i++) { |
| scavengers[i]->Finalize(); |
| delete scavengers[i]; |
| } |
| } |
| |
| UpdateNewSpaceReferencesInExternalStringTable( |
| &UpdateNewSpaceReferenceInExternalStringTableEntry); |
| |
| incremental_marking()->UpdateMarkingWorklistAfterScavenge(); |
| |
| if (FLAG_concurrent_marking) { |
| // Ensure that concurrent marker does not track pages that are |
| // going to be unmapped. |
| for (Page* p : PageRange(new_space()->FromSpaceStart(), |
| new_space()->FromSpaceEnd())) { |
| concurrent_marking()->ClearLiveness(p); |
| } |
| } |
| |
| ScavengeWeakObjectRetainer weak_object_retainer(this); |
| ProcessYoungWeakReferences(&weak_object_retainer); |
| |
| // Set age mark. |
| new_space_->set_age_mark(new_space_->top()); |
| |
| { |
| TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS); |
| ArrayBufferTracker::PrepareToFreeDeadInNewSpace(this); |
| } |
| array_buffer_collector()->FreeAllocationsOnBackgroundThread(); |
| |
| RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(this, [](MemoryChunk* chunk) { |
| if (chunk->SweepingDone()) { |
| RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk); |
| } else { |
| RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk); |
| } |
| }); |
| |
| // Update how much has survived scavenge. |
| IncrementYoungSurvivorsCounter(SurvivedNewSpaceObjectSize()); |
| |
| // Scavenger may find new wrappers by iterating objects promoted onto a black |
| // page. |
| local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer(); |
| |
| LOG(isolate_, ResourceEvent("scavenge", "end")); |
| |
| SetGCState(NOT_IN_GC); |
| } |
| |
| void Heap::ComputeFastPromotionMode(double survival_rate) { |
| const size_t survived_in_new_space = |
| survived_last_scavenge_ * 100 / new_space_->Capacity(); |
| fast_promotion_mode_ = |
| !FLAG_optimize_for_size && FLAG_fast_promotion_new_space && |
| !ShouldReduceMemory() && new_space_->IsAtMaximumCapacity() && |
| survived_in_new_space >= kMinPromotedPercentForFastPromotionMode; |
| if (FLAG_trace_gc_verbose) { |
| PrintIsolate( |
| isolate(), "Fast promotion mode: %s survival rate: %" PRIuS "%%\n", |
| fast_promotion_mode_ ? "true" : "false", survived_in_new_space); |
| } |
| } |
| |
| String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap, |
| Object** p) { |
| MapWord first_word = HeapObject::cast(*p)->map_word(); |
| |
| if (!first_word.IsForwardingAddress()) { |
| // Unreachable external string can be finalized. |
| String* string = String::cast(*p); |
| if (!string->IsExternalString()) { |
| // Original external string has been internalized. |
| DCHECK(string->IsThinString()); |
| return nullptr; |
| } |
| heap->FinalizeExternalString(string); |
| return nullptr; |
| } |
| |
| // String is still reachable. |
| String* string = String::cast(first_word.ToForwardingAddress()); |
| if (string->IsThinString()) string = ThinString::cast(string)->actual(); |
| // Internalization can replace external strings with non-external strings. |
| return string->IsExternalString() ? string : nullptr; |
| } |
| |
| void Heap::ExternalStringTable::Verify() { |
| #ifdef DEBUG |
| for (size_t i = 0; i < new_space_strings_.size(); ++i) { |
| Object* obj = Object::cast(new_space_strings_[i]); |
| DCHECK(heap_->InNewSpace(obj)); |
| DCHECK(!obj->IsTheHole(heap_->isolate())); |
| } |
| for (size_t i = 0; i < old_space_strings_.size(); ++i) { |
| Object* obj = Object::cast(old_space_strings_[i]); |
| DCHECK(!heap_->InNewSpace(obj)); |
| DCHECK(!obj->IsTheHole(heap_->isolate())); |
| } |
| #endif |
| } |
| |
| void Heap::ExternalStringTable::UpdateNewSpaceReferences( |
| Heap::ExternalStringTableUpdaterCallback updater_func) { |
| if (new_space_strings_.empty()) return; |
| |
| Object** start = new_space_strings_.data(); |
| Object** end = start + new_space_strings_.size(); |
| Object** last = start; |
| |
| for (Object** p = start; p < end; ++p) { |
| String* target = updater_func(heap_, p); |
| |
| if (target == nullptr) continue; |
| |
| DCHECK(target->IsExternalString()); |
| |
| if (heap_->InNewSpace(target)) { |
| // String is still in new space. Update the table entry. |
| *last = target; |
| ++last; |
| } else { |
| // String got promoted. Move it to the old string list. |
| old_space_strings_.push_back(target); |
| } |
| } |
| |
| DCHECK_LE(last, end); |
| new_space_strings_.resize(static_cast<size_t>(last - start)); |
| #ifdef VERIFY_HEAP |
| if (FLAG_verify_heap) { |
| Verify(); |
| } |
| #endif |
| } |
| |
| void Heap::ExternalStringTable::PromoteAllNewSpaceStrings() { |
| old_space_strings_.reserve(old_space_strings_.size() + |
| new_space_strings_.size()); |
| std::move(std::begin(new_space_strings_), std::end(new_space_strings_), |
| std::back_inserter(old_space_strings_)); |
| new_space_strings_.clear(); |
| } |
| |
| void Heap::ExternalStringTable::IterateNewSpaceStrings(RootVisitor* v) { |
| if (!new_space_strings_.empty()) { |
| v->VisitRootPointers(Root::kExternalStringsTable, new_space_strings_.data(), |
| new_space_strings_.data() + new_space_strings_.size()); |
| } |
| } |
| |
| void Heap::ExternalStringTable::IterateAll(RootVisitor* v) { |
| IterateNewSpaceStrings(v); |
| if (!old_space_strings_.empty()) { |
| v->VisitRootPointers(Root::kExternalStringsTable, old_space_strings_.data(), |
| old_space_strings_.data() + old_space_strings_.size()); |
| } |
| } |
| |
| void Heap::UpdateNewSpaceReferencesInExternalStringTable( |
| ExternalStringTableUpdaterCallback updater_func) { |
| external_string_table_.UpdateNewSpaceReferences(updater_func); |
| } |
| |
| void Heap::ExternalStringTable::UpdateReferences( |
| Heap::ExternalStringTableUpdaterCallback updater_func) { |
| if (old_space_strings_.size() > 0) { |
| Object** start = old_space_strings_.data(); |
| Object** end = start + old_space_strings_.size(); |
| for (Object** p = start; p < end; ++p) *p = updater_func(heap_, p); |
| } |
| |
| UpdateNewSpaceReferences(updater_func); |
| } |
| |
| void Heap::UpdateReferencesInExternalStringTable( |
| ExternalStringTableUpdaterCallback updater_func) { |
| external_string_table_.UpdateReferences(updater_func); |
| } |
| |
| |
| void Heap::ProcessAllWeakReferences(WeakObjectRetainer* retainer) { |
| ProcessNativeContexts(retainer); |
| ProcessAllocationSites(retainer); |
| } |
| |
| |
| void Heap::ProcessYoungWeakReferences(WeakObjectRetainer* retainer) { |
| ProcessNativeContexts(retainer); |
| } |
| |
| |
| void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) { |
| Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer); |
| // Update the head of the list of contexts. |
| set_native_contexts_list(head); |
| } |
| |
| |
| void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) { |
| Object* allocation_site_obj = |
| VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer); |
| set_allocation_sites_list(allocation_site_obj); |
| } |
| |
| void Heap::ProcessWeakListRoots(WeakObjectRetainer* retainer) { |
| set_native_contexts_list(retainer->RetainAs(native_contexts_list())); |
| set_allocation_sites_list(retainer->RetainAs(allocation_sites_list())); |
| } |
| |
| void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) { |
| DisallowHeapAllocation no_allocation_scope; |
| Object* cur = allocation_sites_list(); |
| bool marked = false; |
| while (cur->IsAllocationSite()) { |
| AllocationSite* casted = AllocationSite::cast(cur); |
| if (casted->GetPretenureMode() == flag) { |
| casted->ResetPretenureDecision(); |
| casted->set_deopt_dependent_code(true); |
| marked = true; |
| RemoveAllocationSitePretenuringFeedback(casted); |
| } |
| cur = casted->weak_next(); |
| } |
| if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites(); |
| } |
| |
| |
| void Heap::EvaluateOldSpaceLocalPretenuring( |
| uint64_t size_of_objects_before_gc) { |
| uint64_t size_of_objects_after_gc = SizeOfObjects(); |
| double old_generation_survival_rate = |
| (static_cast<double>(size_of_objects_after_gc) * 100) / |
| static_cast<double>(size_of_objects_before_gc); |
| |
| if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) { |
| // Too many objects died in the old generation, pretenuring of wrong |
| // allocation sites may be the cause for that. We have to deopt all |
| // dependent code registered in the allocation sites to re-evaluate |
| // our pretenuring decisions. |
| ResetAllAllocationSitesDependentCode(TENURED); |
| if (FLAG_trace_pretenuring) { |
| PrintF( |
| "Deopt all allocation sites dependent code due to low survival " |
| "rate in the old generation %f\n", |
| old_generation_survival_rate); |
| } |
| } |
| } |
| |
| |
| void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { |
| DisallowHeapAllocation no_allocation; |
| // All external strings are listed in the external string table. |
| |
| class ExternalStringTableVisitorAdapter : public RootVisitor { |
| public: |
| explicit ExternalStringTableVisitorAdapter( |
| v8::ExternalResourceVisitor* visitor) |
| : visitor_(visitor) {} |
| virtual void VisitRootPointers(Root root, Object** start, Object** end) { |
| for (Object** p = start; p < end; p++) { |
| DCHECK((*p)->IsExternalString()); |
| visitor_->VisitExternalString( |
| Utils::ToLocal(Handle<String>(String::cast(*p)))); |
| } |
| } |
| |
| private: |
| v8::ExternalResourceVisitor* visitor_; |
| } external_string_table_visitor(visitor); |
| |
| external_string_table_.IterateAll(&external_string_table_visitor); |
| } |
| |
| STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) == |
| 0); // NOLINT |
| STATIC_ASSERT((FixedTypedArrayBase::kDataOffset & kDoubleAlignmentMask) == |
| 0); // NOLINT |
| #ifdef V8_HOST_ARCH_32_BIT |
| STATIC_ASSERT((HeapNumber::kValueOffset & kDoubleAlignmentMask) != |
| 0); // NOLINT |
| #endif |
| |
| |
| int Heap::GetMaximumFillToAlign(AllocationAlignment alignment) { |
| switch (alignment) { |
| case kWordAligned: |
| return 0; |
| case kDoubleAligned: |
| case kDoubleUnaligned: |
| return kDoubleSize - kPointerSize; |
| default: |
| UNREACHABLE(); |
| } |
| return 0; |
| } |
| |
| |
| int Heap::GetFillToAlign(Address address, AllocationAlignment alignment) { |
| intptr_t offset = OffsetFrom(address); |
| if (alignment == kDoubleAligned && (offset & kDoubleAlignmentMask) != 0) |
| return kPointerSize; |
| if (alignment == kDoubleUnaligned && (offset & kDoubleAlignmentMask) == 0) |
| return kDoubleSize - kPointerSize; // No fill if double is always aligned. |
| return 0; |
| } |
| |
| |
| HeapObject* Heap::PrecedeWithFiller(HeapObject* object, int filler_size) { |
| CreateFillerObjectAt(object->address(), filler_size, ClearRecordedSlots::kNo); |
| return HeapObject::FromAddress(object->address() + filler_size); |
| } |
| |
| |
| HeapObject* Heap::AlignWithFiller(HeapObject* object, int object_size, |
| int allocation_size, |
| AllocationAlignment alignment) { |
| int filler_size = allocation_size - object_size; |
| DCHECK_LT(0, filler_size); |
| int pre_filler = GetFillToAlign(object->address(), alignment); |
| if (pre_filler) { |
| object = PrecedeWithFiller(object, pre_filler); |
| filler_size -= pre_filler; |
| } |
| if (filler_size) |
| CreateFillerObjectAt(object->address() + object_size, filler_size, |
| ClearRecordedSlots::kNo); |
| return object; |
| } |
| |
| void Heap::RegisterNewArrayBuffer(JSArrayBuffer* buffer) { |
| ArrayBufferTracker::RegisterNew(this, buffer); |
| } |
| |
| |
| void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) { |
| ArrayBufferTracker::Unregister(this, buffer); |
| } |
| |
| void Heap::ConfigureInitialOldGenerationSize() { |
| if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) { |
| old_generation_allocation_limit_ = |
| Max(MinimumAllocationLimitGrowingStep(), |
| static_cast<size_t>( |
| static_cast<double>(old_generation_allocation_limit_) * |
| (tracer()->AverageSurvivalRatio() / 100))); |
| } |
| } |
| |
| AllocationResult Heap::AllocatePartialMap(InstanceType instance_type, |
| int instance_size) { |
| Object* result = nullptr; |
| AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE); |
| if (!allocation.To(&result)) return allocation; |
| // Map::cast cannot be used due to uninitialized map field. |
| Map* map = reinterpret_cast<Map*>(result); |
| map->set_map_after_allocation(reinterpret_cast<Map*>(root(kMetaMapRootIndex)), |
| SKIP_WRITE_BARRIER); |
| map->set_instance_type(instance_type); |
| map->set_instance_size(instance_size); |
| // Initialize to only containing tagged fields. |
| if (FLAG_unbox_double_fields) { |
| map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout()); |
| } |
| // GetVisitorId requires a properly initialized LayoutDescriptor. |
| map->set_visitor_id(Map::GetVisitorId(map)); |
| map->set_inobject_properties_start_or_constructor_function_index(0); |
| DCHECK(!map->IsJSObjectMap()); |
| map->SetInObjectUnusedPropertyFields(0); |
| map->set_bit_field(0); |
| map->set_bit_field2(0); |
| int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) | |
| Map::OwnsDescriptorsBit::encode(true) | |
| Map::ConstructionCounterBits::encode(Map::kNoSlackTracking); |
| map->set_bit_field3(bit_field3); |
| map->set_weak_cell_cache(Smi::kZero); |
| map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND); |
| return map; |
| } |
| |
| AllocationResult Heap::AllocateMap(InstanceType instance_type, |
| int instance_size, |
| ElementsKind elements_kind, |
| int inobject_properties) { |
| STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE); |
| DCHECK_IMPLIES(instance_type >= FIRST_JS_OBJECT_TYPE && |
| !Map::CanHaveFastTransitionableElementsKind(instance_type), |
| IsDictionaryElementsKind(elements_kind) || |
| IsTerminalElementsKind(elements_kind)); |
| HeapObject* result = nullptr; |
| AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE); |
| if (!allocation.To(&result)) return allocation; |
| |
| isolate()->counters()->maps_created()->Increment(); |
| result->set_map_after_allocation(meta_map(), SKIP_WRITE_BARRIER); |
| Map* map = Map::cast(result); |
| map->set_instance_type(instance_type); |
| map->set_prototype(null_value(), SKIP_WRITE_BARRIER); |
| map->set_constructor_or_backpointer(null_value(), SKIP_WRITE_BARRIER); |
| map->set_instance_size(instance_size); |
| if (map->IsJSObjectMap()) { |
| map->SetInObjectPropertiesStartInWords(instance_size / kPointerSize - |
| inobject_properties); |
| DCHECK_EQ(map->GetInObjectProperties(), inobject_properties); |
| } else { |
| DCHECK_EQ(inobject_properties, 0); |
| map->set_inobject_properties_start_or_constructor_function_index(0); |
| } |
| map->set_dependent_code(DependentCode::cast(empty_fixed_array()), |
| SKIP_WRITE_BARRIER); |
| map->set_weak_cell_cache(Smi::kZero); |
| map->set_raw_transitions(Smi::kZero); |
| map->SetInObjectUnusedPropertyFields(inobject_properties); |
| map->set_instance_descriptors(empty_descriptor_array()); |
| if (FLAG_unbox_double_fields) { |
| map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout()); |
| } |
| // Must be called only after |instance_type|, |instance_size| and |
| // |layout_descriptor| are set. |
| map->set_visitor_id(Map::GetVisitorId(map)); |
| map->set_bit_field(0); |
| map->set_bit_field2(Map::IsExtensibleBit::kMask); |
| int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) | |
| Map::OwnsDescriptorsBit::encode(true) | |
| Map::ConstructionCounterBits::encode(Map::kNoSlackTracking); |
| map->set_bit_field3(bit_field3); |
| map->set_elements_kind(elements_kind); |
| map->set_new_target_is_base(true); |
| if (FLAG_trace_maps) LOG(isolate(), MapCreate(map)); |
| return map; |
| } |
| |
| |
| AllocationResult Heap::AllocateFillerObject(int size, bool double_align, |
| AllocationSpace space) { |
| HeapObject* obj = nullptr; |
| { |
| AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned; |
| AllocationResult allocation = AllocateRaw(size, space, align); |
| if (!allocation.To(&obj)) return allocation; |
| } |
| #ifdef DEBUG |
| MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
| DCHECK(chunk->owner()->identity() == space); |
| #endif |
| CreateFillerObjectAt(obj->address(), size, ClearRecordedSlots::kNo); |
| return obj; |
| } |
| |
| |
| AllocationResult Heap::AllocateHeapNumber(MutableMode mode, |
| PretenureFlag pretenure) { |
| // Statically ensure that it is safe to allocate heap numbers in paged |
| // spaces. |
| int size = HeapNumber::kSize; |
| STATIC_ASSERT(HeapNumber::kSize <= kMaxRegularHeapObjectSize); |
| |
| AllocationSpace space = SelectSpace(pretenure); |
| |
| HeapObject* result = nullptr; |
| { |
| AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned); |
| if (!allocation.To(&result)) return allocation; |
| } |
| |
| Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map(); |
| HeapObject::cast(result)->set_map_after_allocation(map, SKIP_WRITE_BARRIER); |
| return result; |
| } |
| |
| AllocationResult Heap::AllocateBigInt(int length) { |
| if (length < 0 || length > BigInt::kMaxLength) { |
| v8::internal::Heap::FatalProcessOutOfMemory("invalid BigInt length", true); |
| } |
| int size = BigInt::SizeFor(length); |
| AllocationSpace space = SelectSpace(NOT_TENURED); |
| HeapObject* result = nullptr; |
| { |
| AllocationResult allocation = AllocateRaw(size, space); |
| if (!allocation.To(&result)) return allocation; |
| } |
| result->set_map_after_allocation(bigint_map(), SKIP_WRITE_BARRIER); |
| return result; |
| } |
| |
| AllocationResult Heap::AllocateCell(Object* value) { |
| int size = Cell::kSize; |
| STATIC_ASSERT(Cell::kSize <= kMaxRegularHeapObjectSize); |
| |
| HeapObject* result = nullptr; |
| { |
| AllocationResult allocation = AllocateRaw(size, OLD_SPACE); |
| if (!allocation.To(&result)) return allocation; |
| } |
| result->set_map_after_allocation(cell_map(), SKIP_WRITE_BARRIER); |
| Cell::cast(result)->set_value(value); |
| return result; |
| } |
| |
| AllocationResult Heap::AllocatePropertyCell(Name* name) { |
| DCHECK(name->IsUniqueName()); |
| int size = PropertyCell::kSize; |
| STATIC_ASSERT(PropertyCell::kSize <= kMaxRegularHeapObjectSize); |
| |
| HeapObject* result = nullptr; |
| AllocationResult allocation = AllocateRaw(size, OLD_SPACE); |
| if (!allocation.To(&result)) return allocation; |
| |
| result->set_map_after_allocation(global_property_cell_map(), |
| SKIP_WRITE_BARRIER); |
| PropertyCell* cell = PropertyCell::cast(result); |
| cell->set_dependent_code(DependentCode::cast(empty_fixed_array()), |
| SKIP_WRITE_BARRIER); |
| cell->set_property_details(PropertyDetails(Smi::kZero)); |
| cell->set_name(name); |
| cell->set_value(the_hole_value()); |
| return result; |
| } |
| |
| |
| AllocationResult Heap::AllocateWeakCell(HeapObject* value) { |
| int size = WeakCell::kSize; |
| STATIC_ASSERT(WeakCell::kSize <= kMaxRegularHeapObjectSize); |
| HeapObject* result = nullptr; |
| { |
| AllocationResult allocation = AllocateRaw(size, OLD_SPACE); |
| if (!allocation.To(&result)) return allocation; |
| } |
| result->set_map_after_allocation(weak_cell_map(), SKIP_WRITE_BARRIER); |
| WeakCell::cast(result)->initialize(value); |
| return result; |
| } |
| |
| |
| AllocationResult Heap::AllocateTransitionArray(int capacity) { |
| DCHECK_LT(0, capacity); |
| HeapObject* raw_array = nullptr; |
| { |
| AllocationResult allocation = AllocateRawFixedArray(capacity, TENURED); |
| if (!allocation.To(&raw_array)) return allocation; |
| } |
| raw_array->set_map_after_allocation(transition_array_map(), |
| SKIP_WRITE_BARRIER); |
| TransitionArray* array = TransitionArray::cast(raw_array); |
| array->set_length(capacity); |
| MemsetPointer(array->data_start(), undefined_value(), capacity); |
| // Transition arrays are tenured. When black allocation is on we have to |
| // add the transition array to the list of encountered_transition_arrays. |
| if (incremental_marking()->black_allocation()) { |
| mark_compact_collector()->AddTransitionArray(array); |
| } |
| return array; |
| } |
| |
| void Heap::CreateJSEntryStub() { |
| JSEntryStub stub(isolate(), StackFrame::ENTRY); |
| set_js_entry_code(*stub.GetCode()); |
| } |
| |
| |
| void Heap::CreateJSConstructEntryStub() { |
| JSEntryStub stub(isolate(), StackFrame::CONSTRUCT_ENTRY); |
| set_js_construct_entry_code(*stub.GetCode()); |
| } |
| |
| void Heap::CreateJSRunMicrotasksEntryStub() { |
| JSEntryStub stub(isolate(), JSEntryStub::SpecialTarget::kRunMicrotasks); |
| set_js_run_microtasks_entry_code(*stub.GetCode()); |
| } |
| |
| void Heap::CreateFixedStubs() { |
| // Here we create roots for fixed stubs. They are needed at GC |
| // for cooking and uncooking (check out frames.cc). |
| // The eliminates the need for doing dictionary lookup in the |
| // stub cache for these stubs. |
| HandleScope scope(isolate()); |
| // Canonicalize handles, so that we can share constant pool entries pointing |
| // to code targets without dereferencing their handles. |
| CanonicalHandleScope canonical(isolate()); |
| |
| // Create stubs that should be there, so we don't unexpectedly have to |
| // create them if we need them during the creation of another stub. |
| // Stub creation mixes raw pointers and handles in an unsafe manner so |
| // we cannot create stubs while we are creating stubs. |
| CodeStub::GenerateStubsAheadOfTime(isolate()); |
| |
| // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on |
| // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub |
| // is created. |
| |
| // gcc-4.4 has problem generating correct code of following snippet: |
| // { JSEntryStub stub; |
| // js_entry_code_ = *stub.GetCode(); |
| // } |
| // { JSConstructEntryStub stub; |
| // js_construct_entry_code_ = *stub.GetCode(); |
| // } |
| // To workaround the problem, make separate functions without inlining. |
| Heap::CreateJSEntryStub(); |
| Heap::CreateJSConstructEntryStub(); |
| Heap::CreateJSRunMicrotasksEntryStub(); |
| } |
| |
| bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) { |
| switch (root_index) { |
| case kNumberStringCacheRootIndex: |
| case kCodeStubsRootIndex: |
| case kScriptListRootIndex: |
| case kMaterializedObjectsRootIndex: |
| case kMicrotaskQueueRootIndex: |
| case kDetachedContextsRootIndex: |
| case kWeakObjectToCodeTableRootIndex: |
| case kWeakNewSpaceObjectToCodeListRootIndex: |
| case kRetainedMapsRootIndex: |
| case kRetainingPathTargetsRootIndex: |
| case kFeedbackVectorsForProfilingToolsRootIndex: |
| case kNoScriptSharedFunctionInfosRootIndex: |
| case kWeakStackTraceListRootIndex: |
| case kSerializedObjectsRootIndex: |
| case kSerializedGlobalProxySizesRootIndex: |
| case kPublicSymbolTableRootIndex: |
| case kApiSymbolTableRootIndex: |
| case kApiPrivateSymbolTableRootIndex: |
| case kMessageListenersRootIndex: |
| case kDeserializeLazyHandlerRootIndex: |
| case kDeserializeLazyHandlerWideRootIndex: |
| case kDeserializeLazyHandlerExtraWideRootIndex: |
| // Smi values |
| #define SMI_ENTRY(type, name, Name) case k##Name##RootIndex: |
| SMI_ROOT_LIST(SMI_ENTRY) |
| #undef SMI_ENTRY |
| // String table |
| case kStringTableRootIndex: |
| return true; |
| |
| default: |
| return false; |
| } |
| } |
| |
| bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) { |
| bool can_be = !RootCanBeWrittenAfterInitialization(root_index) && |
| !InNewSpace(root(root_index)); |
| DCHECK_IMPLIES(can_be, IsImmovable(HeapObject::cast(root(root_index)))); |
| return can_be; |
| } |
| |
| int Heap::FullSizeNumberStringCacheLength() { |
| // Compute the size of the number string cache based on the max newspace size. |
| // The number string cache has a minimum size based on twice the initial cache |
| // size to ensure that it is bigger after being made 'full size'. |
| size_t number_string_cache_size = max_semi_space_size_ / 512; |
| number_string_cache_size = |
| Max(static_cast<size_t>(kInitialNumberStringCacheSize * 2), |
| Min<size_t>(0x4000u, number_string_cache_size)); |
| // There is a string and a number per entry so the length is twice the number |
| // of entries. |
| return static_cast<int>(number_string_cache_size * 2); |
| } |
| |
| |
| void Heap::FlushNumberStringCache() { |
| // Flush the number to string cache. |
| int len = number_string_cache()->length(); |
| for (int i = 0; i < len; i++) { |
| number_string_cache()->set_undefined(i); |
| } |
| } |
| |
| |
| Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) { |
| return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]); |
| } |
| |
| |
| Heap::RootListIndex Heap::RootIndexForFixedTypedArray( |
| ExternalArrayType array_type) { |
| switch (array_type) { |
| #define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ |
| case kExternal##Type##Array: \ |
| return kFixed##Type##ArrayMapRootIndex; |
| |
| TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX) |
| #undef ARRAY_TYPE_TO_ROOT_INDEX |
| |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| |
| Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray( |
| ElementsKind elementsKind) { |
| switch (elementsKind) { |
| #define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \ |
| case TYPE##_ELEMENTS: \ |
| return kEmptyFixed##Type##ArrayRootIndex; |
| |
| TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX) |
| #undef ELEMENT_KIND_TO_ROOT_INDEX |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(const Map* map) { |
| return FixedTypedArrayBase::cast( |
| roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]); |
| } |
| |
| |
| AllocationResult Heap::AllocateForeign(Address address, |
| PretenureFlag pretenure) { |
| // Statically ensure that it is safe to allocate foreigns in paged spaces. |
| STATIC_ASSERT(Foreign::kSize <= kMaxRegularHeapObjectSize); |
| AllocationSpace space = (pretenure == TENURED) ? OLD_SPACE : NEW_SPACE; |
| Foreign* result = nullptr; |
| AllocationResult allocation = Allocate(foreign_map(), space); |
| if (!allocation.To(&result)) return allocation; |
| result->set_foreign_address(address); |
| return result; |
| } |
| |
| AllocationResult Heap::AllocateSmallOrderedHashSet(int capacity, |
| PretenureFlag pretenure) { |
| DCHECK_EQ(0, capacity % SmallOrderedHashSet::kLoadFactor); |
| CHECK_GE(SmallOrderedHashSet::kMaxCapacity, capacity); |
| |
| int size = SmallOrderedHashSet::Size(capacity); |
| AllocationSpace space = SelectSpace(pretenure); |
| HeapObject* result = nullptr; |
| { |
| AllocationResult allocation = AllocateRaw(size, space); |
| if (!allocation.To(&result)) return allocation; |
| } |
| |
| result->set_map_after_allocation(small_ordered_hash_set_map(), |
| SKIP_WRITE_BARRIER); |
| Handle<SmallOrderedHashSet> table(SmallOrderedHashSet::cast(result)); |
| table->Initialize(isolate(), capacity); |
| return result; |
| } |
| |
| AllocationResult Heap::AllocateSmallOrderedHashMap(int capacity, |
| PretenureFlag pretenure) { |
| DCHECK_EQ(0, capacity % SmallOrderedHashMap::kLoadFactor); |
| CHECK_GE(SmallOrderedHashMap::kMaxCapacity, capacity); |
| |
| int size = SmallOrderedHashMap::Size(capacity); |
| AllocationSpace space = SelectSpace(pretenure); |
| HeapObject* result = nullptr; |
| { |
| AllocationResult allocation = AllocateRaw(size, space); |
| if (!allocation.To(&result)) return allocation; |
| } |
| |
| result->set_map_after_allocation(small_ordered_hash_map_map(), |
| SKIP_WRITE_BARRIER); |
| Handle<SmallOrderedHashMap> table(SmallOrderedHashMap::cast(result)); |
| table->Initialize(isolate(), capacity); |
| return result; |
| } |
| |
| AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) { |
| if (length < 0 || length > ByteArray::kMaxLength) { |
| v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); |
| } |
| int size = ByteArray::SizeFor(length); |
| AllocationSpace space = SelectSpace(pretenure); |
| HeapObject* result = nullptr; |
| { |
| AllocationResult allocation = AllocateRaw(size, space); |
| if (!allocation.To(&result)) return allocation; |
| } |
| |
| result->set_map_after_allocation(byte_array_map(), SKIP_WRITE_BARRIER); |
| ByteArray::cast(result)->set_length(length); |
| ByteArray::cast(result)->clear_padding(); |
| return result; |
| } |
| |
| |
| AllocationResult Heap::AllocateBytecodeArray(int length, |
| const byte* const raw_bytecodes, |
| int frame_size, |
| int parameter_count, |
| FixedArray* constant_pool) { |
| if (length < 0 || length > BytecodeArray::kMaxLength) { |
| v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true); |
| } |
| // Bytecode array is pretenured, so constant pool array should be to. |
| DCHECK(!InNewSpace(constant_pool)); |
| |
| int size = BytecodeArray::SizeFor(length); |
| HeapObject* result = nullptr; |
| { |
| AllocationResult allocation = AllocateRaw(size, OLD_SPACE); |
| if (!allocation.To(&result)) return allocation; |
| } |
| |
| result->set_map_after_allocation(bytecode_array_map(), SKIP_WRITE_BARRIER); |
| BytecodeArray* instance = BytecodeArray::cast(result); |
| instance->set_length(length); |
| instance->set_frame_size(frame_size); |
| instance->set_parameter_count(parameter_count); |
| instance->set_incoming_new_target_or_generator_register( |
| interpreter::Register::invalid_value()); |
| instance->set_interrupt_budget(interpreter::Interpreter::kInterruptBudget); |
| instance->set_osr_loop_nesting_level(0); |
| instance->set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge); |
| instance->set_constant_pool(constant_pool); |
| instance->set_handler_table(empty_fixed_array()); |
| instance->set_source_position_table(empty_byte_array()); |
| CopyBytes(instance->GetFirstBytecodeAddress(), raw_bytecodes, length); |
| instance->clear_padding(); |
| |
| return result; |
| } |
| |
| HeapObject* Heap::CreateFillerObjectAt(Address addr, int size, |
| ClearRecordedSlots mode) { |
| if (size == 0) return nullptr; |
| HeapObject* filler = HeapObject::FromAddress(addr); |
| if (size == kPointerSize) { |
| filler->set_map_after_allocation( |
| reinterpret_cast<Map*>(root(kOnePointerFillerMapRootIndex)), |
| SKIP_WRITE_BARRIER); |
| } else if (size == 2 * kPointerSize) { |
| filler->set_map_after_allocation( |
| reinterpret_cast<Map*>(root(kTwoPointerFillerMapRootIndex)), |
| SKIP_WRITE_BARRIER); |
| } else { |
| DCHECK_GT(size, 2 * kPointerSize); |
| filler->set_map_after_allocation( |
| reinterpret_cast<Map*>(root(kFreeSpaceMapRootIndex)), |
| SKIP_WRITE_BARRIER); |
| FreeSpace::cast(filler)->relaxed_write_size(size); |
| } |
| if (mode == ClearRecordedSlots::kYes) { |
| ClearRecordedSlotRange(addr, addr + size); |
| } |
| |
| // At this point, we may be deserializing the heap from a snapshot, and |
| // none of the maps have been created yet and are nullptr. |
| DCHECK((filler->map() == nullptr && !deserialization_complete_) || |
| filler->map()->IsMap()); |
| return filler; |
| } |
| |
| |
| bool Heap::CanMoveObjectStart(HeapObject* object) { |
| if (!FLAG_move_object_start) return false; |
| |
| // Sampling heap profiler may have a reference to the object. |
| if (isolate()->heap_profiler()->is_sampling_allocations()) return false; |
| |
| Address address = object->address(); |
| |
| if (lo_space()->Contains(object)) return false; |
| |
| // We can move the object start if the page was already swept. |
| return Page::FromAddress(address)->SweepingDone(); |
| } |
| |
| bool Heap::IsImmovable(HeapObject* object) { |
| MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); |
| return
|