blob: 5df422fa5cdb4acb0154cfec68445a5630bf07d6 [file] [log] [blame]
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/cppgc/concurrent-marker.h"
#include "include/cppgc/platform.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/liveness-broker.h"
#include "src/heap/cppgc/marking-state.h"
#include "src/heap/cppgc/marking-visitor.h"
namespace cppgc {
namespace internal {
namespace {
static constexpr double kMarkingScheduleRatioBeforeConcurrentPriorityIncrease =
0.5;
static constexpr size_t kDefaultDeadlineCheckInterval = 750u;
template <size_t kDeadlineCheckInterval = kDefaultDeadlineCheckInterval,
typename WorklistLocal, typename Callback>
bool DrainWorklistWithYielding(
JobDelegate* job_delegate, ConcurrentMarkingState& marking_state,
IncrementalMarkingSchedule& incremental_marking_schedule,
WorklistLocal& worklist_local, Callback callback) {
return DrainWorklistWithPredicate<kDeadlineCheckInterval>(
[&incremental_marking_schedule, &marking_state, job_delegate]() {
incremental_marking_schedule.AddConcurrentlyMarkedBytes(
marking_state.RecentlyMarkedBytes());
return job_delegate->ShouldYield();
},
worklist_local, callback);
}
size_t WorkSizeForConcurrentMarking(MarkingWorklists& marking_worklists) {
return marking_worklists.marking_worklist()->Size() +
marking_worklists.write_barrier_worklist()->Size() +
marking_worklists.previously_not_fully_constructed_worklist()->Size();
}
// Checks whether worklists' global pools hold any segment a concurrent marker
// can steal. This is called before the concurrent marker holds any Locals, so
// no need to check local segments.
bool HasWorkForConcurrentMarking(MarkingWorklists& marking_worklists) {
return !marking_worklists.marking_worklist()->IsEmpty() ||
!marking_worklists.write_barrier_worklist()->IsEmpty() ||
!marking_worklists.previously_not_fully_constructed_worklist()
->IsEmpty();
}
class ConcurrentMarkingTask final : public v8::JobTask {
public:
explicit ConcurrentMarkingTask(ConcurrentMarkerBase&);
void Run(JobDelegate* delegate) final;
size_t GetMaxConcurrency(size_t) const final;
private:
void ProcessWorklists(JobDelegate*, ConcurrentMarkingState&, Visitor&);
const ConcurrentMarkerBase& concurrent_marker_;
};
ConcurrentMarkingTask::ConcurrentMarkingTask(
ConcurrentMarkerBase& concurrent_marker)
: concurrent_marker_(concurrent_marker) {}
void ConcurrentMarkingTask::Run(JobDelegate* job_delegate) {
if (!HasWorkForConcurrentMarking(concurrent_marker_.marking_worklists()))
return;
ConcurrentMarkingState concurrent_marking_state(
concurrent_marker_.heap(), concurrent_marker_.marking_worklists(),
concurrent_marker_.heap().compactor().compaction_worklists());
std::unique_ptr<Visitor> concurrent_marking_visitor =
concurrent_marker_.CreateConcurrentMarkingVisitor(
concurrent_marking_state);
ProcessWorklists(job_delegate, concurrent_marking_state,
*concurrent_marking_visitor.get());
concurrent_marker_.incremental_marking_schedule().AddConcurrentlyMarkedBytes(
concurrent_marking_state.RecentlyMarkedBytes());
concurrent_marking_state.Publish();
}
size_t ConcurrentMarkingTask::GetMaxConcurrency(
size_t current_worker_count) const {
return WorkSizeForConcurrentMarking(concurrent_marker_.marking_worklists()) +
current_worker_count;
}
void ConcurrentMarkingTask::ProcessWorklists(
JobDelegate* job_delegate, ConcurrentMarkingState& concurrent_marking_state,
Visitor& concurrent_marking_visitor) {
do {
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state
.previously_not_fully_constructed_worklist(),
[&concurrent_marking_state,
&concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad();
concurrent_marking_state.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
concurrent_marking_visitor, *header);
})) {
return;
}
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state.marking_worklist(),
[&concurrent_marking_state, &concurrent_marking_visitor](
const MarkingWorklists::MarkingItem& item) {
BasePage::FromPayload(item.base_object_payload)
->SynchronizedLoad();
const HeapObjectHeader& header =
HeapObjectHeader::FromPayload(item.base_object_payload);
DCHECK(!header.IsInConstruction<AccessMode::kAtomic>());
DCHECK(header.IsMarked<AccessMode::kAtomic>());
concurrent_marking_state.AccountMarkedBytes(header);
item.callback(&concurrent_marking_visitor,
item.base_object_payload);
})) {
return;
}
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state.write_barrier_worklist(),
[&concurrent_marking_state,
&concurrent_marking_visitor](HeapObjectHeader* header) {
BasePage::FromPayload(header)->SynchronizedLoad();
concurrent_marking_state.AccountMarkedBytes(*header);
DynamicallyTraceMarkedObject<AccessMode::kAtomic>(
concurrent_marking_visitor, *header);
})) {
return;
}
if (!DrainWorklistWithYielding(
job_delegate, concurrent_marking_state,
concurrent_marker_.incremental_marking_schedule(),
concurrent_marking_state.ephemeron_pairs_for_processing_worklist(),
[&concurrent_marking_state](
const MarkingWorklists::EphemeronPairItem& item) {
concurrent_marking_state.ProcessEphemeron(item.key,
item.value_desc);
})) {
return;
}
} while (
!concurrent_marking_state.marking_worklist().IsLocalAndGlobalEmpty());
}
} // namespace
ConcurrentMarkerBase::ConcurrentMarkerBase(
HeapBase& heap, MarkingWorklists& marking_worklists,
IncrementalMarkingSchedule& incremental_marking_schedule,
cppgc::Platform* platform)
: heap_(heap),
marking_worklists_(marking_worklists),
incremental_marking_schedule_(incremental_marking_schedule),
platform_(platform) {}
void ConcurrentMarkerBase::Start() {
DCHECK(platform_);
concurrent_marking_handle_ =
platform_->PostJob(v8::TaskPriority::kUserVisible,
std::make_unique<ConcurrentMarkingTask>(*this));
}
void ConcurrentMarkerBase::Cancel() {
if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
concurrent_marking_handle_->Cancel();
}
void ConcurrentMarkerBase::JoinForTesting() {
if (concurrent_marking_handle_ && concurrent_marking_handle_->IsValid())
concurrent_marking_handle_->Join();
}
bool ConcurrentMarkerBase::IsActive() const {
return concurrent_marking_handle_ && concurrent_marking_handle_->IsRunning();
}
ConcurrentMarkerBase::~ConcurrentMarkerBase() {
CHECK_IMPLIES(concurrent_marking_handle_,
!concurrent_marking_handle_->IsValid());
}
bool ConcurrentMarkerBase::NotifyIncrementalMutatorStepCompleted() {
DCHECK(concurrent_marking_handle_);
if (HasWorkForConcurrentMarking(marking_worklists_)) {
// Notifies the scheduler that max concurrency might have increased.
// This will adjust the number of markers if necessary.
IncreaseMarkingPriorityIfNeeded();
concurrent_marking_handle_->NotifyConcurrencyIncrease();
return false;
}
return !concurrent_marking_handle_->IsActive();
}
void ConcurrentMarkerBase::IncreaseMarkingPriorityIfNeeded() {
if (!concurrent_marking_handle_->UpdatePriorityEnabled()) return;
if (concurrent_marking_priority_increased_) return;
// If concurrent tasks aren't executed, it might delay GC finalization.
// As long as GC is active so is the write barrier, which incurs a
// performance cost. Marking is estimated to take overall
// |MarkingSchedulingOracle::kEstimatedMarkingTimeMs|. If
// concurrent marking tasks have not reported any progress (i.e. the
// concurrently marked bytes count as not changed) in over
// |kMarkingScheduleRatioBeforeConcurrentPriorityIncrease| of
// that expected duration, we increase the concurrent task priority
// for the duration of the current GC. This is meant to prevent the
// GC from exceeding it's expected end time.
size_t current_concurrently_marked_bytes_ =
incremental_marking_schedule_.GetConcurrentlyMarkedBytes();
if (current_concurrently_marked_bytes_ > last_concurrently_marked_bytes_) {
last_concurrently_marked_bytes_ = current_concurrently_marked_bytes_;
last_concurrently_marked_bytes_update_ = v8::base::TimeTicks::Now();
} else if ((v8::base::TimeTicks::Now() -
last_concurrently_marked_bytes_update_)
.InMilliseconds() >
kMarkingScheduleRatioBeforeConcurrentPriorityIncrease *
IncrementalMarkingSchedule::kEstimatedMarkingTimeMs) {
concurrent_marking_handle_->UpdatePriority(
cppgc::TaskPriority::kUserBlocking);
concurrent_marking_priority_increased_ = true;
}
}
std::unique_ptr<Visitor> ConcurrentMarker::CreateConcurrentMarkingVisitor(
ConcurrentMarkingState& marking_state) const {
return std::make_unique<ConcurrentMarkingVisitor>(heap(), marking_state);
}
} // namespace internal
} // namespace cppgc