blob: b259cacb93688aa1666299b8092572e7192b930e [file] [log] [blame]
Kaido Kertf309f9a2021-04-30 12:09:15 -07001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
6#define V8_HEAP_INCREMENTAL_MARKING_H_
7
8#include "src/base/platform/mutex.h"
9#include "src/heap/heap.h"
10#include "src/heap/incremental-marking-job.h"
11#include "src/heap/mark-compact.h"
12#include "src/tasks/cancelable-task.h"
13
14namespace v8 {
15namespace internal {
16
17class HeapObject;
18class MarkBit;
19class Map;
20class Object;
21class PagedSpace;
22
23enum class StepOrigin { kV8, kTask };
24enum class StepResult {
25 kNoImmediateWork,
26 kMoreWorkRemaining,
27 kWaitingForFinalization
28};
29
30class V8_EXPORT_PRIVATE IncrementalMarking final {
31 public:
32 enum State : uint8_t { STOPPED, SWEEPING, MARKING, COMPLETE };
33
34 enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
35
36 enum GCRequestType { NONE, COMPLETE_MARKING, FINALIZATION };
37
38 using MarkingState = MarkCompactCollector::MarkingState;
39 using AtomicMarkingState = MarkCompactCollector::AtomicMarkingState;
40 using NonAtomicMarkingState = MarkCompactCollector::NonAtomicMarkingState;
41
42 class PauseBlackAllocationScope {
43 public:
44 explicit PauseBlackAllocationScope(IncrementalMarking* marking)
45 : marking_(marking), paused_(false) {
46 if (marking_->black_allocation()) {
47 paused_ = true;
48 marking_->PauseBlackAllocation();
49 }
50 }
51
52 ~PauseBlackAllocationScope() {
53 if (paused_) {
54 marking_->StartBlackAllocation();
55 }
56 }
57
58 private:
59 IncrementalMarking* marking_;
60 bool paused_;
61 };
62
63 // It's hard to know how much work the incremental marker should do to make
64 // progress in the face of the mutator creating new work for it. We start
65 // of at a moderate rate of work and gradually increase the speed of the
66 // incremental marker until it completes.
67 // Do some marking every time this much memory has been allocated or that many
68 // heavy (color-checking) write barriers have been invoked.
69 static const size_t kYoungGenerationAllocatedThreshold = 64 * KB;
70 static const size_t kOldGenerationAllocatedThreshold = 256 * KB;
71 static const size_t kMinStepSizeInBytes = 64 * KB;
72
73 static constexpr double kStepSizeInMs = 1;
74 static constexpr double kMaxStepSizeInMs = 5;
75
76#ifndef DEBUG
77 static constexpr size_t kV8ActivationThreshold = 8 * MB;
78 static constexpr size_t kGlobalActivationThreshold = 16 * MB;
79#else
80 static constexpr size_t kV8ActivationThreshold = 0;
81 static constexpr size_t kGlobalActivationThreshold = 0;
82#endif
83
84#ifdef V8_ATOMIC_MARKING_STATE
85 static const AccessMode kAtomicity = AccessMode::ATOMIC;
86#else
87 static const AccessMode kAtomicity = AccessMode::NON_ATOMIC;
88#endif
89
90 IncrementalMarking(Heap* heap, WeakObjects* weak_objects);
91
92 MarkingState* marking_state() { return &marking_state_; }
93
94 AtomicMarkingState* atomic_marking_state() { return &atomic_marking_state_; }
95
96 NonAtomicMarkingState* non_atomic_marking_state() {
97 return &non_atomic_marking_state_;
98 }
99
100 void NotifyLeftTrimming(HeapObject from, HeapObject to);
101
102 V8_INLINE void TransferColor(HeapObject from, HeapObject to);
103
104 State state() const {
105 DCHECK(state_ == STOPPED || FLAG_incremental_marking);
106 return state_;
107 }
108
109 bool finalize_marking_completed() const {
110 return finalize_marking_completed_;
111 }
112
113 void SetWeakClosureWasOverApproximatedForTesting(bool val) {
114 finalize_marking_completed_ = val;
115 }
116
117 inline bool IsStopped() const { return state() == STOPPED; }
118
119 inline bool IsSweeping() const { return state() == SWEEPING; }
120
121 inline bool IsMarking() const { return state() >= MARKING; }
122
123 inline bool IsMarkingIncomplete() const { return state() == MARKING; }
124
125 inline bool IsComplete() const { return state() == COMPLETE; }
126
127 inline bool IsReadyToOverApproximateWeakClosure() const {
128 return request_type_ == FINALIZATION && !finalize_marking_completed_;
129 }
130
131 inline bool NeedsFinalization() {
132 return IsMarking() &&
133 (request_type_ == FINALIZATION || request_type_ == COMPLETE_MARKING);
134 }
135
136 GCRequestType request_type() const { return request_type_; }
137
138 void reset_request_type() { request_type_ = NONE; }
139
140 bool CanBeActivated();
141
142 bool WasActivated();
143
144 void Start(GarbageCollectionReason gc_reason);
145
146 void FinalizeIncrementally();
147
148 void UpdateMarkingWorklistAfterScavenge();
149 void UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space);
150
151 void Hurry();
152
153 void Finalize();
154
155 void Stop();
156
157 void FinalizeMarking(CompletionAction action);
158
159 void MarkingComplete(CompletionAction action);
160
161 void Epilogue();
162
163 // Performs incremental marking steps and returns before the deadline_in_ms is
164 // reached. It may return earlier if the marker is already ahead of the
165 // marking schedule, which is indicated with StepResult::kDone.
166 StepResult AdvanceWithDeadline(double deadline_in_ms,
167 CompletionAction completion_action,
168 StepOrigin step_origin);
169
170 void FinalizeSweeping();
171 bool ContinueConcurrentSweeping();
172 void SupportConcurrentSweeping();
173
174 StepResult Step(double max_step_size_in_ms, CompletionAction action,
175 StepOrigin step_origin);
176
177 bool ShouldDoEmbedderStep();
178 StepResult EmbedderStep(double expected_duration_ms, double* duration_ms);
179
180 V8_INLINE void RestartIfNotMarking();
181
182 // Returns true if the function succeeds in transitioning the object
183 // from white to grey.
184 V8_INLINE bool WhiteToGreyAndPush(HeapObject obj);
185
186 // This function is used to color the object black before it undergoes an
187 // unsafe layout change. This is a part of synchronization protocol with
188 // the concurrent marker.
189 void MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj);
190
191 void MarkBlackBackground(HeapObject obj, int object_size);
192
193 bool IsCompacting() { return IsMarking() && is_compacting_; }
194
195 void ProcessBlackAllocatedObject(HeapObject obj);
196
197 Heap* heap() const { return heap_; }
198
199 IncrementalMarkingJob* incremental_marking_job() {
200 return &incremental_marking_job_;
201 }
202
203 bool black_allocation() { return black_allocation_; }
204
205 void StartBlackAllocationForTesting() {
206 if (!black_allocation_) {
207 StartBlackAllocation();
208 }
209 }
210
211 MarkingWorklists::Local* local_marking_worklists() const {
212 return collector_->local_marking_worklists();
213 }
214
215 void Deactivate();
216
217 // Ensures that the given region is black allocated if it is in the old
218 // generation.
219 void EnsureBlackAllocated(Address allocated, size_t size);
220
221 bool IsBelowActivationThresholds() const;
222
223 void IncrementLiveBytesBackground(MemoryChunk* chunk, intptr_t by) {
224 base::MutexGuard guard(&background_live_bytes_mutex_);
225 background_live_bytes_[chunk] += by;
226 }
227
228 private:
229 class Observer : public AllocationObserver {
230 public:
231 Observer(IncrementalMarking* incremental_marking, intptr_t step_size)
232 : AllocationObserver(step_size),
233 incremental_marking_(incremental_marking) {}
234
235 void Step(int bytes_allocated, Address, size_t) override;
236
237 private:
238 IncrementalMarking* incremental_marking_;
239 };
240
241 void StartMarking();
242
243 void StartBlackAllocation();
244 void PauseBlackAllocation();
245 void FinishBlackAllocation();
246
247 void MarkRoots();
248 bool ShouldRetainMap(Map map, int age);
249 // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to
250 // increase chances of reusing of map transition tree in future.
251 void RetainMaps();
252
253 void PublishWriteBarrierWorklists();
254
255 // Updates scheduled_bytes_to_mark_ to ensure marking progress based on
256 // time.
257 void ScheduleBytesToMarkBasedOnTime(double time_ms);
258 // Updates scheduled_bytes_to_mark_ to ensure marking progress based on
259 // allocations.
260 void ScheduleBytesToMarkBasedOnAllocation();
261 // Helper functions for ScheduleBytesToMarkBasedOnAllocation.
262 size_t StepSizeToKeepUpWithAllocations();
263 size_t StepSizeToMakeProgress();
264 void AddScheduledBytesToMark(size_t bytes_to_mark);
265
266 // Schedules more bytes to mark so that the marker is no longer ahead
267 // of schedule.
268 void FastForwardSchedule();
269 void FastForwardScheduleIfCloseToFinalization();
270
271 // Fetches marked byte counters from the concurrent marker.
272 void FetchBytesMarkedConcurrently();
273
274 // Returns the bytes to mark in the current step based on the scheduled
275 // bytes and already marked bytes.
276 size_t ComputeStepSizeInBytes(StepOrigin step_origin);
277
278 void AdvanceOnAllocation();
279
280 void SetState(State s) {
281 state_ = s;
282 heap_->SetIsMarkingFlag(s >= MARKING);
283 }
284
285 double CurrentTimeToMarkingTask() const;
286
287 Heap* const heap_;
288 MarkCompactCollector* const collector_;
289 WeakObjects* weak_objects_;
290
291 double start_time_ms_ = 0.0;
292 double time_to_force_completion_ = 0.0;
293 size_t initial_old_generation_size_ = 0;
294 size_t old_generation_allocation_counter_ = 0;
295 size_t bytes_marked_ = 0;
296 size_t scheduled_bytes_to_mark_ = 0;
297 double schedule_update_time_ms_ = 0.0;
298 // A sample of concurrent_marking()->TotalMarkedBytes() at the last
299 // incremental marking step. It is used for updating
300 // bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
301 size_t bytes_marked_concurrently_ = 0;
302
303 // Must use SetState() above to update state_
304 // Atomic since main thread can complete marking (= changing state), while a
305 // background thread's slow allocation path will check whether incremental
306 // marking is currently running.
307 std::atomic<State> state_;
308
309 bool is_compacting_ = false;
310 bool was_activated_ = false;
311 bool black_allocation_ = false;
312 bool finalize_marking_completed_ = false;
313 IncrementalMarkingJob incremental_marking_job_;
314
315 std::atomic<GCRequestType> request_type_{NONE};
316
317 Observer new_generation_observer_;
318 Observer old_generation_observer_;
319
320 MarkingState marking_state_;
321 AtomicMarkingState atomic_marking_state_;
322 NonAtomicMarkingState non_atomic_marking_state_;
323
324 base::Mutex background_live_bytes_mutex_;
325 std::unordered_map<MemoryChunk*, intptr_t> background_live_bytes_;
326
327 DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
328};
329} // namespace internal
330} // namespace v8
331
332#endif // V8_HEAP_INCREMENTAL_MARKING_H_