blob: 0f5f13cdf7a99036ad358927a2194fa4a6351cd3 [file] [log] [blame]
Kaido Kertf309f9a2021-04-30 12:09:15 -07001// Copyright 2017 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_WORKLIST_H_
6#define V8_HEAP_WORKLIST_H_
7
8#include <cstddef>
9#include <utility>
10
11#include "src/base/atomic-utils.h"
12#include "src/base/logging.h"
13#include "src/base/macros.h"
14#include "src/base/platform/mutex.h"
15#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
16
17namespace v8 {
18namespace internal {
19
20// A concurrent worklist based on segments. Each tasks gets private
21// push and pop segments. Empty pop segments are swapped with their
22// corresponding push segments. Full push segments are published to a global
23// pool of segments and replaced with empty segments.
24//
25// Work stealing is best effort, i.e., there is no way to inform other tasks
26// of the need of items.
27template <typename EntryType, int SEGMENT_SIZE>
28class Worklist {
29 public:
30 class View {
31 public:
32 View(Worklist<EntryType, SEGMENT_SIZE>* worklist, int task_id)
33 : worklist_(worklist), task_id_(task_id) {}
34
35 // Pushes an entry onto the worklist.
36 bool Push(EntryType entry) { return worklist_->Push(task_id_, entry); }
37
38 // Pops an entry from the worklist.
39 bool Pop(EntryType* entry) { return worklist_->Pop(task_id_, entry); }
40
41 // Returns true if the local portion of the worklist is empty.
42 bool IsLocalEmpty() { return worklist_->IsLocalEmpty(task_id_); }
43
44 // Returns true if the worklist is empty. Can only be used from the main
45 // thread without concurrent access.
46 bool IsEmpty() { return worklist_->IsEmpty(); }
47
48 bool IsGlobalPoolEmpty() { return worklist_->IsGlobalPoolEmpty(); }
49
50 size_t LocalPushSegmentSize() {
51 return worklist_->LocalPushSegmentSize(task_id_);
52 }
53
54 void FlushToGlobal() { worklist_->FlushToGlobal(task_id_); }
55
56 private:
57 Worklist<EntryType, SEGMENT_SIZE>* worklist_;
58 int task_id_;
59 };
60
61 static const int kMaxNumTasks = 8;
62 static const size_t kSegmentCapacity = SEGMENT_SIZE;
63
64 Worklist() : Worklist(kMaxNumTasks) {}
65
66 explicit Worklist(int num_tasks) : num_tasks_(num_tasks) {
67 DCHECK_LE(num_tasks, kMaxNumTasks);
68 for (int i = 0; i < num_tasks_; i++) {
69 private_push_segment(i) = NewSegment();
70 private_pop_segment(i) = NewSegment();
71 }
72 }
73
74 ~Worklist() {
75 CHECK(IsEmpty());
76 for (int i = 0; i < num_tasks_; i++) {
77 DCHECK_NOT_NULL(private_push_segment(i));
78 DCHECK_NOT_NULL(private_pop_segment(i));
79 delete private_push_segment(i);
80 delete private_pop_segment(i);
81 }
82 }
83
84 // Swaps content with the given worklist. Local buffers need to
85 // be empty, not thread safe.
86 void Swap(Worklist<EntryType, SEGMENT_SIZE>& other) {
87 CHECK(AreLocalsEmpty());
88 CHECK(other.AreLocalsEmpty());
89
90 global_pool_.Swap(other.global_pool_);
91 }
92
93 bool Push(int task_id, EntryType entry) {
94 DCHECK_LT(task_id, num_tasks_);
95 DCHECK_NOT_NULL(private_push_segment(task_id));
96 if (!private_push_segment(task_id)->Push(entry)) {
97 PublishPushSegmentToGlobal(task_id);
98 bool success = private_push_segment(task_id)->Push(entry);
99 USE(success);
100 DCHECK(success);
101 }
102 return true;
103 }
104
105 bool Pop(int task_id, EntryType* entry) {
106 DCHECK_LT(task_id, num_tasks_);
107 DCHECK_NOT_NULL(private_pop_segment(task_id));
108 if (!private_pop_segment(task_id)->Pop(entry)) {
109 if (!private_push_segment(task_id)->IsEmpty()) {
110 Segment* tmp = private_pop_segment(task_id);
111 private_pop_segment(task_id) = private_push_segment(task_id);
112 private_push_segment(task_id) = tmp;
113 } else if (!StealPopSegmentFromGlobal(task_id)) {
114 return false;
115 }
116 bool success = private_pop_segment(task_id)->Pop(entry);
117 USE(success);
118 DCHECK(success);
119 }
120 return true;
121 }
122
123 size_t LocalPushSegmentSize(int task_id) {
124 return private_push_segment(task_id)->Size();
125 }
126
127 bool IsLocalEmpty(int task_id) {
128 return private_pop_segment(task_id)->IsEmpty() &&
129 private_push_segment(task_id)->IsEmpty();
130 }
131
132 bool IsGlobalPoolEmpty() { return global_pool_.IsEmpty(); }
133
134 bool IsEmpty() {
135 if (!AreLocalsEmpty()) return false;
136 return global_pool_.IsEmpty();
137 }
138
139 bool AreLocalsEmpty() {
140 for (int i = 0; i < num_tasks_; i++) {
141 if (!IsLocalEmpty(i)) return false;
142 }
143 return true;
144 }
145
146 size_t LocalSize(int task_id) {
147 return private_pop_segment(task_id)->Size() +
148 private_push_segment(task_id)->Size();
149 }
150
151 // Thread-safe but may return an outdated result.
152 size_t GlobalPoolSize() const { return global_pool_.Size(); }
153
154 // Clears all segments. Frees the global segment pool.
155 //
156 // Assumes that no other tasks are running.
157 void Clear() {
158 for (int i = 0; i < num_tasks_; i++) {
159 private_pop_segment(i)->Clear();
160 private_push_segment(i)->Clear();
161 }
162 global_pool_.Clear();
163 }
164
165 // Calls the specified callback on each element of the deques and replaces
166 // the element with the result of the callback.
167 // The signature of the callback is
168 // bool Callback(EntryType old, EntryType* new).
169 // If the callback returns |false| then the element is removed from the
170 // worklist. Otherwise the |new| entry is updated.
171 //
172 // Assumes that no other tasks are running.
173 template <typename Callback>
174 void Update(Callback callback) {
175 for (int i = 0; i < num_tasks_; i++) {
176 private_pop_segment(i)->Update(callback);
177 private_push_segment(i)->Update(callback);
178 }
179 global_pool_.Update(callback);
180 }
181
182 // Calls the specified callback on each element of the deques.
183 // The signature of the callback is:
184 // void Callback(EntryType entry).
185 //
186 // Assumes that no other tasks are running.
187 template <typename Callback>
188 void Iterate(Callback callback) {
189 for (int i = 0; i < num_tasks_; i++) {
190 private_pop_segment(i)->Iterate(callback);
191 private_push_segment(i)->Iterate(callback);
192 }
193 global_pool_.Iterate(callback);
194 }
195
196 template <typename Callback>
197 void IterateGlobalPool(Callback callback) {
198 global_pool_.Iterate(callback);
199 }
200
201 void FlushToGlobal(int task_id) {
202 PublishPushSegmentToGlobal(task_id);
203 PublishPopSegmentToGlobal(task_id);
204 }
205
206 void MergeGlobalPool(Worklist* other) {
207 global_pool_.Merge(&other->global_pool_);
208 }
209
210 private:
211 FRIEND_TEST(WorkListTest, SegmentCreate);
212 FRIEND_TEST(WorkListTest, SegmentPush);
213 FRIEND_TEST(WorkListTest, SegmentPushPop);
214 FRIEND_TEST(WorkListTest, SegmentIsEmpty);
215 FRIEND_TEST(WorkListTest, SegmentIsFull);
216 FRIEND_TEST(WorkListTest, SegmentClear);
217 FRIEND_TEST(WorkListTest, SegmentFullPushFails);
218 FRIEND_TEST(WorkListTest, SegmentEmptyPopFails);
219 FRIEND_TEST(WorkListTest, SegmentUpdateFalse);
220 FRIEND_TEST(WorkListTest, SegmentUpdate);
221
222 class Segment {
223 public:
224 static const size_t kCapacity = kSegmentCapacity;
225
226 Segment() : index_(0) {}
227
228 bool Push(EntryType entry) {
229 if (IsFull()) return false;
230 entries_[index_++] = entry;
231 return true;
232 }
233
234 bool Pop(EntryType* entry) {
235 if (IsEmpty()) return false;
236 *entry = entries_[--index_];
237 return true;
238 }
239
240 size_t Size() const { return index_; }
241 bool IsEmpty() const { return index_ == 0; }
242 bool IsFull() const { return index_ == kCapacity; }
243 void Clear() { index_ = 0; }
244
245 template <typename Callback>
246 void Update(Callback callback) {
247 size_t new_index = 0;
248 for (size_t i = 0; i < index_; i++) {
249 if (callback(entries_[i], &entries_[new_index])) {
250 new_index++;
251 }
252 }
253 index_ = new_index;
254 }
255
256 template <typename Callback>
257 void Iterate(Callback callback) const {
258 for (size_t i = 0; i < index_; i++) {
259 callback(entries_[i]);
260 }
261 }
262
263 Segment* next() const { return next_; }
264 void set_next(Segment* segment) { next_ = segment; }
265
266 private:
267 Segment* next_;
268 size_t index_;
269 EntryType entries_[kCapacity];
270 };
271
272 struct PrivateSegmentHolder {
273 Segment* private_push_segment;
274 Segment* private_pop_segment;
275 char cache_line_padding[64];
276 };
277
278 class GlobalPool {
279 public:
280 GlobalPool() : top_(nullptr) {}
281
282 // Swaps contents, not thread safe.
283 void Swap(GlobalPool& other) {
284 Segment* temp = top_;
285 set_top(other.top_);
286 other.set_top(temp);
287 size_t other_size = other.size_.exchange(
288 size_.load(std::memory_order_relaxed), std::memory_order_relaxed);
289 size_.store(other_size, std::memory_order_relaxed);
290 }
291
292 V8_INLINE void Push(Segment* segment) {
293 base::MutexGuard guard(&lock_);
294 segment->set_next(top_);
295 set_top(segment);
296 size_.fetch_add(1, std::memory_order_relaxed);
297 }
298
299 V8_INLINE bool Pop(Segment** segment) {
300 base::MutexGuard guard(&lock_);
301 if (top_ != nullptr) {
302 DCHECK_LT(0U, size_);
303 size_.fetch_sub(1, std::memory_order_relaxed);
304 *segment = top_;
305 set_top(top_->next());
306 return true;
307 }
308 return false;
309 }
310
311 V8_INLINE bool IsEmpty() {
312 return base::AsAtomicPointer::Relaxed_Load(&top_) == nullptr;
313 }
314
315 V8_INLINE size_t Size() const {
316 // It is safe to read |size_| without a lock since this variable is
317 // atomic, keeping in mind that threads may not immediately see the new
318 // value when it is updated.
319 return size_.load(std::memory_order_relaxed);
320 }
321
322 void Clear() {
323 base::MutexGuard guard(&lock_);
324 size_.store(0, std::memory_order_relaxed);
325 Segment* current = top_;
326 while (current != nullptr) {
327 Segment* tmp = current;
328 current = current->next();
329 delete tmp;
330 }
331 set_top(nullptr);
332 }
333
334 // See Worklist::Update.
335 template <typename Callback>
336 void Update(Callback callback) {
337 base::MutexGuard guard(&lock_);
338 Segment* prev = nullptr;
339 Segment* current = top_;
340 size_t num_deleted = 0;
341 while (current != nullptr) {
342 current->Update(callback);
343 if (current->IsEmpty()) {
344 DCHECK_LT(0U, size_);
345 ++num_deleted;
346 if (prev == nullptr) {
347 top_ = current->next();
348 } else {
349 prev->set_next(current->next());
350 }
351 Segment* tmp = current;
352 current = current->next();
353 delete tmp;
354 } else {
355 prev = current;
356 current = current->next();
357 }
358 }
359 size_.fetch_sub(num_deleted, std::memory_order_relaxed);
360 }
361
362 // See Worklist::Iterate.
363 template <typename Callback>
364 void Iterate(Callback callback) {
365 base::MutexGuard guard(&lock_);
366 for (Segment* current = top_; current != nullptr;
367 current = current->next()) {
368 current->Iterate(callback);
369 }
370 }
371
372 void Merge(GlobalPool* other) {
373 Segment* top = nullptr;
374 size_t other_size = 0;
375 {
376 base::MutexGuard guard(&other->lock_);
377 if (!other->top_) return;
378 top = other->top_;
379 other_size = other->size_.load(std::memory_order_relaxed);
380 other->size_.store(0, std::memory_order_relaxed);
381 other->set_top(nullptr);
382 }
383
384 // It's safe to iterate through these segments because the top was
385 // extracted from |other|.
386 Segment* end = top;
387 while (end->next()) end = end->next();
388
389 {
390 base::MutexGuard guard(&lock_);
391 size_.fetch_add(other_size, std::memory_order_relaxed);
392 end->set_next(top_);
393 set_top(top);
394 }
395 }
396
397 private:
398 void set_top(Segment* segment) {
399 base::AsAtomicPointer::Relaxed_Store(&top_, segment);
400 }
401
402 base::Mutex lock_;
403 Segment* top_;
404 std::atomic<size_t> size_{0};
405 };
406
407 V8_INLINE Segment*& private_push_segment(int task_id) {
408 return private_segments_[task_id].private_push_segment;
409 }
410
411 V8_INLINE Segment*& private_pop_segment(int task_id) {
412 return private_segments_[task_id].private_pop_segment;
413 }
414
415 V8_INLINE void PublishPushSegmentToGlobal(int task_id) {
416 if (!private_push_segment(task_id)->IsEmpty()) {
417 global_pool_.Push(private_push_segment(task_id));
418 private_push_segment(task_id) = NewSegment();
419 }
420 }
421
422 V8_INLINE void PublishPopSegmentToGlobal(int task_id) {
423 if (!private_pop_segment(task_id)->IsEmpty()) {
424 global_pool_.Push(private_pop_segment(task_id));
425 private_pop_segment(task_id) = NewSegment();
426 }
427 }
428
429 V8_INLINE bool StealPopSegmentFromGlobal(int task_id) {
430 if (global_pool_.IsEmpty()) return false;
431 Segment* new_segment = nullptr;
432 if (global_pool_.Pop(&new_segment)) {
433 delete private_pop_segment(task_id);
434 private_pop_segment(task_id) = new_segment;
435 return true;
436 }
437 return false;
438 }
439
440 V8_INLINE Segment* NewSegment() {
441 // Bottleneck for filtering in crash dumps.
442 return new Segment();
443 }
444
445 PrivateSegmentHolder private_segments_[kMaxNumTasks];
446 GlobalPool global_pool_;
447 int num_tasks_;
448};
449
450} // namespace internal
451} // namespace v8
452
453#endif // V8_HEAP_WORKLIST_H_