blob: c0905b945f03cc99adac60213347a1664574a5a6 [file] [log] [blame]
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/register-allocator.h"
#include <iomanip>
#include "src/base/iterator.h"
#include "src/base/small-vector.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/spill-placer.h"
#include "src/compiler/linkage.h"
#include "src/strings/string-stream.h"
#include "src/utils/vector.h"
namespace v8 {
namespace internal {
namespace compiler {
#define TRACE_COND(cond, ...) \
do { \
if (cond) PrintF(__VA_ARGS__); \
} while (false)
#define TRACE(...) TRACE_COND(data()->is_trace_alloc(), __VA_ARGS__)
namespace {
static constexpr int kFloat32Bit =
RepresentationBit(MachineRepresentation::kFloat32);
static constexpr int kSimd128Bit =
RepresentationBit(MachineRepresentation::kSimd128);
const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence,
const InstructionBlock* block) {
RpoNumber index = block->loop_header();
if (!index.IsValid()) return nullptr;
return sequence->InstructionBlockAt(index);
}
const InstructionBlock* GetInstructionBlock(const InstructionSequence* code,
LifetimePosition pos) {
return code->GetInstructionBlock(pos.ToInstructionIndex());
}
Instruction* GetLastInstruction(InstructionSequence* code,
const InstructionBlock* block) {
return code->InstructionAt(block->last_instruction_index());
}
} // namespace
void LiveRangeBoundArray::Initialize(Zone* zone, TopLevelLiveRange* range) {
size_t max_child_count = range->GetMaxChildCount();
start_ = zone->NewArray<LiveRangeBound>(max_child_count);
length_ = 0;
LiveRangeBound* curr = start_;
// The primary loop in ResolveControlFlow is not responsible for inserting
// connecting moves for spilled ranges.
for (LiveRange* i = range; i != nullptr; i = i->next(), ++curr, ++length_) {
new (curr) LiveRangeBound(i, i->spilled());
}
}
LiveRangeBound* LiveRangeBoundArray::Find(
const LifetimePosition position) const {
size_t left_index = 0;
size_t right_index = length_;
while (true) {
size_t current_index = left_index + (right_index - left_index) / 2;
DCHECK(right_index > current_index);
LiveRangeBound* bound = &start_[current_index];
if (bound->start_ <= position) {
if (position < bound->end_) return bound;
DCHECK(left_index < current_index);
left_index = current_index;
} else {
right_index = current_index;
}
}
}
LiveRangeBound* LiveRangeBoundArray::FindPred(const InstructionBlock* pred) {
LifetimePosition pred_end = LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
return Find(pred_end);
}
LiveRangeBound* LiveRangeBoundArray::FindSucc(const InstructionBlock* succ) {
LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
succ->first_instruction_index());
return Find(succ_start);
}
bool LiveRangeBoundArray::FindConnectableSubranges(
const InstructionBlock* block, const InstructionBlock* pred,
FindResult* result) const {
LifetimePosition pred_end = LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
LiveRangeBound* bound = Find(pred_end);
result->pred_cover_ = bound->range_;
LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
if (bound->CanCover(cur_start)) {
// Both blocks are covered by the same range, so there is nothing to
// connect.
return false;
}
bound = Find(cur_start);
if (bound->skip_) {
return false;
}
result->cur_cover_ = bound->range_;
DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
return (result->cur_cover_ != result->pred_cover_);
}
LiveRangeFinder::LiveRangeFinder(const TopTierRegisterAllocationData* data,
Zone* zone)
: data_(data),
bounds_length_(static_cast<int>(data_->live_ranges().size())),
bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
zone_(zone) {
for (int i = 0; i < bounds_length_; ++i) {
new (&bounds_[i]) LiveRangeBoundArray();
}
}
LiveRangeBoundArray* LiveRangeFinder::ArrayFor(int operand_index) {
DCHECK(operand_index < bounds_length_);
TopLevelLiveRange* range = data_->live_ranges()[operand_index];
DCHECK(range != nullptr && !range->IsEmpty());
LiveRangeBoundArray* array = &bounds_[operand_index];
if (array->ShouldInitialize()) {
array->Initialize(zone_, range);
}
return array;
}
using DelayedInsertionMapKey = std::pair<ParallelMove*, InstructionOperand>;
struct DelayedInsertionMapCompare {
bool operator()(const DelayedInsertionMapKey& a,
const DelayedInsertionMapKey& b) const {
if (a.first == b.first) {
return a.second.Compare(b.second);
}
return a.first < b.first;
}
};
using DelayedInsertionMap = ZoneMap<DelayedInsertionMapKey, InstructionOperand,
DelayedInsertionMapCompare>;
UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
void* hint, UsePositionHintType hint_type)
: operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) {
DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone);
bool register_beneficial = true;
UsePositionType type = UsePositionType::kRegisterOrSlot;
if (operand_ != nullptr && operand_->IsUnallocated()) {
const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
if (unalloc->HasRegisterPolicy()) {
type = UsePositionType::kRequiresRegister;
} else if (unalloc->HasSlotPolicy()) {
type = UsePositionType::kRequiresSlot;
register_beneficial = false;
} else if (unalloc->HasRegisterOrSlotOrConstantPolicy()) {
type = UsePositionType::kRegisterOrSlotOrConstant;
register_beneficial = false;
} else {
register_beneficial = !unalloc->HasRegisterOrSlotPolicy();
}
}
flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) |
RegisterBeneficialField::encode(register_beneficial) |
AssignedRegisterField::encode(kUnassignedRegister);
DCHECK(pos_.IsValid());
}
bool UsePosition::HasHint() const {
int hint_register;
return HintRegister(&hint_register);
}
bool UsePosition::HintRegister(int* register_code) const {
if (hint_ == nullptr) return false;
switch (HintTypeField::decode(flags_)) {
case UsePositionHintType::kNone:
case UsePositionHintType::kUnresolved:
return false;
case UsePositionHintType::kUsePos: {
UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_);
int assigned_register = AssignedRegisterField::decode(use_pos->flags_);
if (assigned_register == kUnassignedRegister) return false;
*register_code = assigned_register;
return true;
}
case UsePositionHintType::kOperand: {
InstructionOperand* operand =
reinterpret_cast<InstructionOperand*>(hint_);
*register_code = LocationOperand::cast(operand)->register_code();
return true;
}
case UsePositionHintType::kPhi: {
TopTierRegisterAllocationData::PhiMapValue* phi =
reinterpret_cast<TopTierRegisterAllocationData::PhiMapValue*>(hint_);
int assigned_register = phi->assigned_register();
if (assigned_register == kUnassignedRegister) return false;
*register_code = assigned_register;
return true;
}
}
UNREACHABLE();
}
UsePositionHintType UsePosition::HintTypeForOperand(
const InstructionOperand& op) {
switch (op.kind()) {
case InstructionOperand::CONSTANT:
case InstructionOperand::IMMEDIATE:
return UsePositionHintType::kNone;
case InstructionOperand::UNALLOCATED:
return UsePositionHintType::kUnresolved;
case InstructionOperand::ALLOCATED:
if (op.IsRegister() || op.IsFPRegister()) {
return UsePositionHintType::kOperand;
} else {
DCHECK(op.IsStackSlot() || op.IsFPStackSlot());
return UsePositionHintType::kNone;
}
case InstructionOperand::PENDING:
case InstructionOperand::INVALID:
break;
}
UNREACHABLE();
}
void UsePosition::SetHint(UsePosition* use_pos) {
DCHECK_NOT_NULL(use_pos);
hint_ = use_pos;
flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
}
void UsePosition::ResolveHint(UsePosition* use_pos) {
DCHECK_NOT_NULL(use_pos);
if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return;
hint_ = use_pos;
flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos);
}
void UsePosition::set_type(UsePositionType type, bool register_beneficial) {
DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial);
DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_));
flags_ = TypeField::encode(type) |
RegisterBeneficialField::encode(register_beneficial) |
HintTypeField::encode(HintTypeField::decode(flags_)) |
AssignedRegisterField::encode(kUnassignedRegister);
}
UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
DCHECK(Contains(pos) && pos != start());
UseInterval* after = zone->New<UseInterval>(pos, end_);
after->next_ = next_;
next_ = nullptr;
end_ = pos;
return after;
}
void LifetimePosition::Print() const { StdoutStream{} << *this << std::endl; }
std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) {
os << '@' << pos.ToInstructionIndex();
if (pos.IsGapPosition()) {
os << 'g';
} else {
os << 'i';
}
if (pos.IsStart()) {
os << 's';
} else {
os << 'e';
}
return os;
}
LiveRange::LiveRange(int relative_id, MachineRepresentation rep,
TopLevelLiveRange* top_level)
: relative_id_(relative_id),
bits_(0),
last_interval_(nullptr),
first_interval_(nullptr),
first_pos_(nullptr),
top_level_(top_level),
next_(nullptr),
current_interval_(nullptr),
last_processed_use_(nullptr),
current_hint_position_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedRepresentation(rep));
bits_ = AssignedRegisterField::encode(kUnassignedRegister) |
RepresentationField::encode(rep) |
ControlFlowRegisterHint::encode(kUnassignedRegister);
}
void LiveRange::VerifyPositions() const {
// Walk the positions, verifying that each is in an interval.
UseInterval* interval = first_interval_;
for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) {
CHECK(Start() <= pos->pos());
CHECK(pos->pos() <= End());
CHECK_NOT_NULL(interval);
while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) {
interval = interval->next();
CHECK_NOT_NULL(interval);
}
}
}
void LiveRange::VerifyIntervals() const {
DCHECK(first_interval()->start() == Start());
LifetimePosition last_end = first_interval()->end();
for (UseInterval* interval = first_interval()->next(); interval != nullptr;
interval = interval->next()) {
DCHECK(last_end <= interval->start());
last_end = interval->end();
}
DCHECK(last_end == End());
}
void LiveRange::set_assigned_register(int reg) {
DCHECK(!HasRegisterAssigned() && !spilled());
bits_ = AssignedRegisterField::update(bits_, reg);
}
void LiveRange::UnsetAssignedRegister() {
DCHECK(HasRegisterAssigned() && !spilled());
bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
}
void LiveRange::AttachToNext() {
DCHECK_NOT_NULL(next_);
DCHECK_NE(TopLevel()->last_child_covers_, next_);
last_interval_->set_next(next_->first_interval());
next_->first_interval_ = nullptr;
last_interval_ = next_->last_interval_;
next_->last_interval_ = nullptr;
if (first_pos() == nullptr) {
first_pos_ = next_->first_pos();
} else {
UsePosition* ptr = first_pos_;
while (ptr->next() != nullptr) {
ptr = ptr->next();
}
ptr->set_next(next_->first_pos());
}
next_->first_pos_ = nullptr;
LiveRange* old_next = next_;
next_ = next_->next_;
old_next->next_ = nullptr;
}
void LiveRange::Unspill() {
DCHECK(spilled());
set_spilled(false);
bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
}
void LiveRange::Spill() {
DCHECK(!spilled());
DCHECK(!TopLevel()->HasNoSpillType());
set_spilled(true);
bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister);
}
RegisterKind LiveRange::kind() const {
return IsFloatingPoint(representation()) ? RegisterKind::kDouble
: RegisterKind::kGeneral;
}
UsePosition* LiveRange::FirstHintPosition(int* register_index) {
if (!first_pos_) return nullptr;
if (current_hint_position_) {
if (current_hint_position_->pos() < first_pos_->pos()) {
current_hint_position_ = first_pos_;
}
if (current_hint_position_->pos() > End()) {
current_hint_position_ = nullptr;
}
}
bool needs_revisit = false;
UsePosition* pos = current_hint_position_;
for (; pos != nullptr; pos = pos->next()) {
if (pos->HintRegister(register_index)) {
break;
}
// Phi and use position hints can be assigned during allocation which
// would invalidate the cached hint position. Make sure we revisit them.
needs_revisit = needs_revisit ||
pos->hint_type() == UsePositionHintType::kPhi ||
pos->hint_type() == UsePositionHintType::kUsePos;
}
if (!needs_revisit) {
current_hint_position_ = pos;
}
#ifdef DEBUG
UsePosition* pos_check = first_pos_;
for (; pos_check != nullptr; pos_check = pos_check->next()) {
if (pos_check->HasHint()) {
break;
}
}
CHECK_EQ(pos, pos_check);
#endif
return pos;
}
UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
UsePosition* use_pos = last_processed_use_;
if (use_pos == nullptr || use_pos->pos() > start) {
use_pos = first_pos();
}
while (use_pos != nullptr && use_pos->pos() < start) {
use_pos = use_pos->next();
}
last_processed_use_ = use_pos;
return use_pos;
}
UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
LifetimePosition start) const {
UsePosition* pos = NextUsePosition(start);
while (pos != nullptr && !pos->RegisterIsBeneficial()) {
pos = pos->next();
}
return pos;
}
LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial(
const LifetimePosition& start) const {
UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start);
if (next_use == nullptr) return End();
return next_use->pos();
}
UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
LifetimePosition start) const {
UsePosition* pos = first_pos();
UsePosition* prev = nullptr;
while (pos != nullptr && pos->pos() < start) {
if (pos->RegisterIsBeneficial()) prev = pos;
pos = pos->next();
}
return prev;
}
UsePosition* LiveRange::NextUsePositionSpillDetrimental(
LifetimePosition start) const {
UsePosition* pos = NextUsePosition(start);
while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister &&
!pos->SpillDetrimental()) {
pos = pos->next();
}
return pos;
}
UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const {
UsePosition* pos = NextUsePosition(start);
while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) {
pos = pos->next();
}
return pos;
}
UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const {
for (UsePosition* pos = NextUsePosition(start); pos != nullptr;
pos = pos->next()) {
if (pos->type() != UsePositionType::kRequiresSlot) continue;
return pos;
}
return nullptr;
}
bool LiveRange::CanBeSpilled(LifetimePosition pos) const {
// We cannot spill a live range that has a use requiring a register
// at the current or the immediate next position.
UsePosition* use_pos = NextRegisterPosition(pos);
if (use_pos == nullptr) return true;
return use_pos->pos() > pos.NextStart().End();
}
bool LiveRange::IsTopLevel() const { return top_level_ == this; }
InstructionOperand LiveRange::GetAssignedOperand() const {
DCHECK(!IsEmpty());
if (HasRegisterAssigned()) {
DCHECK(!spilled());
return AllocatedOperand(LocationOperand::REGISTER, representation(),
assigned_register());
}
DCHECK(spilled());
DCHECK(!HasRegisterAssigned());
if (TopLevel()->HasSpillOperand()) {
InstructionOperand* op = TopLevel()->GetSpillOperand();
DCHECK(!op->IsUnallocated());
return *op;
}
return TopLevel()->GetSpillRangeOperand();
}
UseInterval* LiveRange::FirstSearchIntervalForPosition(
LifetimePosition position) const {
if (current_interval_ == nullptr) return first_interval_;
if (current_interval_->start() > position) {
current_interval_ = nullptr;
return first_interval_;
}
return current_interval_;
}
void LiveRange::AdvanceLastProcessedMarker(
UseInterval* to_start_of, LifetimePosition but_not_past) const {
if (to_start_of == nullptr) return;
if (to_start_of->start() > but_not_past) return;
LifetimePosition start = current_interval_ == nullptr
? LifetimePosition::Invalid()
: current_interval_->start();
if (to_start_of->start() > start) {
current_interval_ = to_start_of;
}
}
LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) {
int new_id = TopLevel()->GetNextChildId();
LiveRange* child = zone->New<LiveRange>(new_id, representation(), TopLevel());
child->set_bundle(bundle_);
// If we split, we do so because we're about to switch registers or move
// to/from a slot, so there's no value in connecting hints.
DetachAt(position, child, zone, DoNotConnectHints);
child->top_level_ = TopLevel();
child->next_ = next_;
next_ = child;
return child;
}
UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result,
Zone* zone,
HintConnectionOption connect_hints) {
DCHECK(Start() < position);
DCHECK(End() > position);
DCHECK(result->IsEmpty());
// Find the last interval that ends before the position. If the
// position is contained in one of the intervals in the chain, we
// split that interval and use the first part.
UseInterval* current = FirstSearchIntervalForPosition(position);
// If the split position coincides with the beginning of a use interval
// we need to split use positons in a special way.
bool split_at_start = false;
if (current->start() == position) {
// When splitting at start we need to locate the previous use interval.
current = first_interval_;
}
UseInterval* after = nullptr;
while (current != nullptr) {
if (current->Contains(position)) {
after = current->SplitAt(position, zone);
break;
}
UseInterval* next = current->next();
if (next->start() >= position) {
split_at_start = (next->start() == position);
after = next;
current->set_next(nullptr);
break;
}
current = next;
}
DCHECK_NOT_NULL(after);
// Partition original use intervals to the two live ranges.
UseInterval* before = current;
result->last_interval_ =
(last_interval_ == before)
? after // Only interval in the range after split.
: last_interval_; // Last interval of the original range.
result->first_interval_ = after;
last_interval_ = before;
// Find the last use position before the split and the first use
// position after it.
UsePosition* use_after = first_pos();
UsePosition* use_before = nullptr;
if (split_at_start) {
// The split position coincides with the beginning of a use interval (the
// end of a lifetime hole). Use at this position should be attributed to
// the split child because split child owns use interval covering it.
while (use_after != nullptr && use_after->pos() < position) {
use_before = use_after;
use_after = use_after->next();
}
} else {
while (use_after != nullptr && use_after->pos() <= position) {
use_before = use_after;
use_after = use_after->next();
}
}
// Partition original use positions to the two live ranges.
if (use_before != nullptr) {
use_before->set_next(nullptr);
} else {
first_pos_ = nullptr;
}
result->first_pos_ = use_after;
result->current_hint_position_ = current_hint_position_;
// Discard cached iteration state. It might be pointing
// to the use that no longer belongs to this live range.
last_processed_use_ = nullptr;
current_interval_ = nullptr;
if (connect_hints == ConnectHints && use_before != nullptr &&
use_after != nullptr) {
use_after->SetHint(use_before);
result->current_hint_position_ = use_after;
}
#ifdef DEBUG
VerifyChildStructure();
result->VerifyChildStructure();
#endif
return use_before;
}
void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) {
LiveRange* child = this;
for (; child != nullptr; child = child->next()) {
child->top_level_ = new_top_level;
}
}
void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
const InstructionOperand& spill_op) {
for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
DCHECK(Start() <= pos->pos() && pos->pos() <= End());
if (!pos->HasOperand()) continue;
switch (pos->type()) {
case UsePositionType::kRequiresSlot:
DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot());
InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
break;
case UsePositionType::kRequiresRegister:
DCHECK(op.IsRegister() || op.IsFPRegister());
V8_FALLTHROUGH;
case UsePositionType::kRegisterOrSlot:
case UsePositionType::kRegisterOrSlotOrConstant:
InstructionOperand::ReplaceWith(pos->operand(), &op);
break;
}
}
}
// This implements an ordering on live ranges so that they are ordered by their
// start positions. This is needed for the correctness of the register
// allocation algorithm. If two live ranges start at the same offset then there
// is a tie breaker based on where the value is first used. This part of the
// ordering is merely a heuristic.
bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
LifetimePosition start = Start();
LifetimePosition other_start = other->Start();
if (start == other_start) {
// Prefer register that has a controlflow hint to make sure it gets
// allocated first. This allows the control flow aware alloction to
// just put ranges back into the queue without other ranges interfering.
if (controlflow_hint() < other->controlflow_hint()) {
return true;
}
// The other has a smaller hint.
if (controlflow_hint() > other->controlflow_hint()) {
return false;
}
// Both have the same hint or no hint at all. Use first use position.
UsePosition* pos = first_pos();
UsePosition* other_pos = other->first_pos();
// To make the order total, handle the case where both positions are null.
if (pos == other_pos) return TopLevel()->vreg() < other->TopLevel()->vreg();
if (pos == nullptr) return false;
if (other_pos == nullptr) return true;
// To make the order total, handle the case where both positions are equal.
if (pos->pos() == other_pos->pos())
return TopLevel()->vreg() < other->TopLevel()->vreg();
return pos->pos() < other_pos->pos();
}
return start < other_start;
}
void LiveRange::SetUseHints(int register_index) {
for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) {
if (!pos->HasOperand()) continue;
switch (pos->type()) {
case UsePositionType::kRequiresSlot:
break;
case UsePositionType::kRequiresRegister:
case UsePositionType::kRegisterOrSlot:
case UsePositionType::kRegisterOrSlotOrConstant:
pos->set_assigned_register(register_index);
break;
}
}
}
bool LiveRange::CanCover(LifetimePosition position) const {
if (IsEmpty()) return false;
return Start() <= position && position < End();
}
bool LiveRange::Covers(LifetimePosition position) const {
if (!CanCover(position)) return false;
UseInterval* start_search = FirstSearchIntervalForPosition(position);
for (UseInterval* interval = start_search; interval != nullptr;
interval = interval->next()) {
DCHECK(interval->next() == nullptr ||
interval->next()->start() >= interval->start());
AdvanceLastProcessedMarker(interval, position);
if (interval->Contains(position)) return true;
if (interval->start() > position) return false;
}
return false;
}
LifetimePosition LiveRange::NextEndAfter(LifetimePosition position) const {
UseInterval* start_search = FirstSearchIntervalForPosition(position);
while (start_search->end() < position) {
start_search = start_search->next();
}
return start_search->end();
}
LifetimePosition LiveRange::NextStartAfter(LifetimePosition position) {
UseInterval* start_search = FirstSearchIntervalForPosition(position);
while (start_search->start() < position) {
start_search = start_search->next();
}
next_start_ = start_search->start();
return next_start_;
}
LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const {
UseInterval* b = other->first_interval();
if (b == nullptr) return LifetimePosition::Invalid();
LifetimePosition advance_last_processed_up_to = b->start();
UseInterval* a = FirstSearchIntervalForPosition(b->start());
while (a != nullptr && b != nullptr) {
if (a->start() > other->End()) break;
if (b->start() > End()) break;
LifetimePosition cur_intersection = a->Intersect(b);
if (cur_intersection.IsValid()) {
return cur_intersection;
}
if (a->start() < b->start()) {
a = a->next();
if (a == nullptr || a->start() > other->End()) break;
AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
} else {
b = b->next();
}
}
return LifetimePosition::Invalid();
}
void LiveRange::Print(const RegisterConfiguration* config,
bool with_children) const {
StdoutStream os;
PrintableLiveRange wrapper;
wrapper.register_configuration_ = config;
for (const LiveRange* i = this; i != nullptr; i = i->next()) {
wrapper.range_ = i;
os << wrapper << std::endl;
if (!with_children) break;
}
}
void LiveRange::Print(bool with_children) const {
Print(RegisterConfiguration::Default(), with_children);
}
bool LiveRange::RegisterFromBundle(int* hint) const {
if (bundle_ == nullptr || bundle_->reg() == kUnassignedRegister) return false;
*hint = bundle_->reg();
return true;
}
void LiveRange::UpdateBundleRegister(int reg) const {
if (bundle_ == nullptr || bundle_->reg() != kUnassignedRegister) return;
bundle_->set_reg(reg);
}
struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
SpillMoveInsertionList(int gap_index, InstructionOperand* operand,
SpillMoveInsertionList* next)
: gap_index(gap_index), operand(operand), next(next) {}
const int gap_index;
InstructionOperand* const operand;
SpillMoveInsertionList* next;
};
TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
: LiveRange(0, rep, this),
vreg_(vreg),
last_child_id_(0),
spill_operand_(nullptr),
spill_move_insertion_locations_(nullptr),
spilled_in_deferred_blocks_(false),
has_preassigned_slot_(false),
spill_start_index_(kMaxInt),
last_pos_(nullptr),
last_child_covers_(this) {
bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
}
void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
InstructionOperand* operand) {
DCHECK(HasNoSpillType());
spill_move_insertion_locations_ = zone->New<SpillMoveInsertionList>(
gap_index, operand, spill_move_insertion_locations_);
}
void TopLevelLiveRange::CommitSpillMoves(TopTierRegisterAllocationData* data,
const InstructionOperand& op) {
DCHECK_IMPLIES(op.IsConstant(),
GetSpillMoveInsertionLocations(data) == nullptr);
if (HasGeneralSpillRange()) {
SetLateSpillingSelected(false);
}
InstructionSequence* sequence = data->code();
Zone* zone = sequence->zone();
for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations(data);
to_spill != nullptr; to_spill = to_spill->next) {
Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
ParallelMove* move =
instr->GetOrCreateParallelMove(Instruction::START, zone);
move->AddMove(*to_spill->operand, op);
instr->block()->mark_needs_frame();
}
}
void TopLevelLiveRange::FilterSpillMoves(TopTierRegisterAllocationData* data,
const InstructionOperand& op) {
DCHECK_IMPLIES(op.IsConstant(),
GetSpillMoveInsertionLocations(data) == nullptr);
bool might_be_duplicated = has_slot_use() || spilled();
InstructionSequence* sequence = data->code();
SpillMoveInsertionList* previous = nullptr;
for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations(data);
to_spill != nullptr; previous = to_spill, to_spill = to_spill->next) {
Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
ParallelMove* move = instr->GetParallelMove(Instruction::START);
// Skip insertion if it's possible that the move exists already as a
// constraint move from a fixed output register to a slot.
bool found = false;
if (move != nullptr && (might_be_duplicated || has_preassigned_slot())) {
for (MoveOperands* move_op : *move) {
if (move_op->IsEliminated()) continue;
if (move_op->source().Equals(*to_spill->operand) &&
move_op->destination().Equals(op)) {
found = true;
if (has_preassigned_slot()) move_op->Eliminate();
break;
}
}
}
if (found || has_preassigned_slot()) {
// Remove the item from the list.
if (previous == nullptr) {
spill_move_insertion_locations_ = to_spill->next;
} else {
previous->next = to_spill->next;
}
// Even though this location doesn't need a spill instruction, the
// block does require a frame.
instr->block()->mark_needs_frame();
}
}
}
void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) {
DCHECK(HasNoSpillType());
DCHECK(!operand->IsUnallocated() && !operand->IsImmediate());
set_spill_type(SpillType::kSpillOperand);
spill_operand_ = operand;
}
void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) {
DCHECK(!HasSpillOperand());
DCHECK(spill_range);
spill_range_ = spill_range;
}
AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const {
SpillRange* spill_range = GetSpillRange();
int index = spill_range->assigned_slot();
return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index);
}
void TopLevelLiveRange::VerifyChildrenInOrder() const {
LifetimePosition last_end = End();
for (const LiveRange* child = this->next(); child != nullptr;
child = child->next()) {
DCHECK(last_end <= child->Start());
last_end = child->End();
}
}
LiveRange* TopLevelLiveRange::GetChildCovers(LifetimePosition pos) {
LiveRange* child = last_child_covers_;
DCHECK_NE(child, nullptr);
if (pos < child->Start()) {
// Cached value has advanced too far; start from the top.
child = this;
}
LiveRange* previous_child = nullptr;
while (child != nullptr && child->End() <= pos) {
previous_child = child;
child = child->next();
}
// If we've walked past the end, cache the last child instead. This allows
// future calls that are also past the end to be fast, since they will know
// that there is no need to reset the search to the beginning.
last_child_covers_ = child == nullptr ? previous_child : child;
return !child || !child->Covers(pos) ? nullptr : child;
}
void TopLevelLiveRange::Verify() const {
VerifyChildrenInOrder();
for (const LiveRange* child = this; child != nullptr; child = child->next()) {
VerifyChildStructure();
}
}
void TopLevelLiveRange::ShortenTo(LifetimePosition start, bool trace_alloc) {
TRACE_COND(trace_alloc, "Shorten live range %d to [%d\n", vreg(),
start.value());
DCHECK_NOT_NULL(first_interval_);
DCHECK(first_interval_->start() <= start);
DCHECK(start < first_interval_->end());
first_interval_->set_start(start);
}
void TopLevelLiveRange::EnsureInterval(LifetimePosition start,
LifetimePosition end, Zone* zone,
bool trace_alloc) {
TRACE_COND(trace_alloc, "Ensure live range %d in interval [%d %d[\n", vreg(),
start.value(), end.value());
LifetimePosition new_end = end;
while (first_interval_ != nullptr && first_interval_->start() <= end) {
if (first_interval_->end() > end) {
new_end = first_interval_->end();
}
first_interval_ = first_interval_->next();
}
UseInterval* new_interval = zone->New<UseInterval>(start, new_end);
new_interval->set_next(first_interval_);
first_interval_ = new_interval;
if (new_interval->next() == nullptr) {
last_interval_ = new_interval;
}
}
void TopLevelLiveRange::AddUseInterval(LifetimePosition start,
LifetimePosition end, Zone* zone,
bool trace_alloc) {
TRACE_COND(trace_alloc, "Add to live range %d interval [%d %d[\n", vreg(),
start.value(), end.value());
if (first_interval_ == nullptr) {
UseInterval* interval = zone->New<UseInterval>(start, end);
first_interval_ = interval;
last_interval_ = interval;
} else {
if (end == first_interval_->start()) {
first_interval_->set_start(start);
} else if (end < first_interval_->start()) {
UseInterval* interval = zone->New<UseInterval>(start, end);
interval->set_next(first_interval_);
first_interval_ = interval;
} else {
// Order of instruction's processing (see ProcessInstructions) guarantees
// that each new use interval either precedes, intersects with or touches
// the last added interval.
DCHECK(start <= first_interval_->end());
first_interval_->set_start(std::min(start, first_interval_->start()));
first_interval_->set_end(std::max(end, first_interval_->end()));
}
}
}
void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos, bool trace_alloc) {
LifetimePosition pos = use_pos->pos();
TRACE_COND(trace_alloc, "Add to live range %d use position %d\n", vreg(),
pos.value());
UsePosition* prev_hint = nullptr;
UsePosition* prev = nullptr;
UsePosition* current = first_pos_;
while (current != nullptr && current->pos() < pos) {
prev_hint = current->HasHint() ? current : prev_hint;
prev = current;
current = current->next();
}
if (prev == nullptr) {
use_pos->set_next(first_pos_);
first_pos_ = use_pos;
} else {
use_pos->set_next(prev->next());
prev->set_next(use_pos);
}
if (prev_hint == nullptr && use_pos->HasHint()) {
current_hint_position_ = use_pos;
}
}
static bool AreUseIntervalsIntersecting(UseInterval* interval1,
UseInterval* interval2) {
while (interval1 != nullptr && interval2 != nullptr) {
if (interval1->start() < interval2->start()) {
if (interval1->end() > interval2->start()) {
return true;
}
interval1 = interval1->next();
} else {
if (interval2->end() > interval1->start()) {
return true;
}
interval2 = interval2->next();
}
}
return false;
}
std::ostream& operator<<(std::ostream& os,
const PrintableLiveRange& printable_range) {
const LiveRange* range = printable_range.range_;
os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id()
<< " ";
if (range->TopLevel()->is_phi()) os << "phi ";
if (range->TopLevel()->is_non_loop_phi()) os << "nlphi ";
os << "{" << std::endl;
UseInterval* interval = range->first_interval();
UsePosition* use_pos = range->first_pos();
while (use_pos != nullptr) {
if (use_pos->HasOperand()) {
os << *use_pos->operand() << use_pos->pos() << " ";
}
use_pos = use_pos->next();
}
os << std::endl;
while (interval != nullptr) {
os << '[' << interval->start() << ", " << interval->end() << ')'
<< std::endl;
interval = interval->next();
}
os << "}";
return os;
}
namespace {
void PrintBlockRow(std::ostream& os, const InstructionBlocks& blocks) {
os << " ";
for (auto block : blocks) {
LifetimePosition start_pos = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
LifetimePosition end_pos = LifetimePosition::GapFromInstructionIndex(
block->last_instruction_index())
.NextFullStart();
int length = end_pos.value() - start_pos.value();
constexpr int kMaxPrefixLength = 32;
char buffer[kMaxPrefixLength];
int rpo_number = block->rpo_number().ToInt();
const char* deferred_marker = block->IsDeferred() ? "(deferred)" : "";
int max_prefix_length = std::min(length, kMaxPrefixLength);
int prefix = snprintf(buffer, max_prefix_length, "[-B%d-%s", rpo_number,
deferred_marker);
os << buffer;
int remaining = length - std::min(prefix, max_prefix_length) - 1;
for (int i = 0; i < remaining; ++i) os << '-';
os << ']';
}
os << '\n';
}
} // namespace
void LinearScanAllocator::PrintRangeRow(std::ostream& os,
const TopLevelLiveRange* toplevel) {
int position = 0;
os << std::setw(3) << toplevel->vreg() << ": ";
const char* kind_string;
switch (toplevel->spill_type()) {
case TopLevelLiveRange::SpillType::kSpillRange:
kind_string = "ss";
break;
case TopLevelLiveRange::SpillType::kDeferredSpillRange:
kind_string = "sd";
break;
case TopLevelLiveRange::SpillType::kSpillOperand:
kind_string = "so";
break;
default:
kind_string = "s?";
}
for (const LiveRange* range = toplevel; range != nullptr;
range = range->next()) {
for (UseInterval* interval = range->first_interval(); interval != nullptr;
interval = interval->next()) {
LifetimePosition start = interval->start();
LifetimePosition end = interval->end();
CHECK_GE(start.value(), position);
for (; start.value() > position; position++) {
os << ' ';
}
int length = end.value() - start.value();
constexpr int kMaxPrefixLength = 32;
char buffer[kMaxPrefixLength];
int max_prefix_length = std::min(length + 1, kMaxPrefixLength);
int prefix;
if (range->spilled()) {
prefix = snprintf(buffer, max_prefix_length, "|%s", kind_string);
} else {
prefix = snprintf(buffer, max_prefix_length, "|%s",
RegisterName(range->assigned_register()));
}
os << buffer;
position += std::min(prefix, max_prefix_length - 1);
CHECK_GE(end.value(), position);
const char line_style = range->spilled() ? '-' : '=';
for (; end.value() > position; position++) {
os << line_style;
}
}
}
os << '\n';
}
void LinearScanAllocator::PrintRangeOverview(std::ostream& os) {
PrintBlockRow(os, code()->instruction_blocks());
for (auto const toplevel : data()->fixed_live_ranges()) {
if (toplevel == nullptr) continue;
PrintRangeRow(os, toplevel);
}
int rowcount = 0;
for (auto toplevel : data()->live_ranges()) {
if (!CanProcessRange(toplevel)) continue;
if (rowcount++ % 10 == 0) PrintBlockRow(os, code()->instruction_blocks());
PrintRangeRow(os, toplevel);
}
}
SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
: live_ranges_(zone),
assigned_slot_(kUnassignedSlot),
byte_width_(ByteWidthForStackSlot(parent->representation())) {
// Spill ranges are created for top level. This is so that, when merging
// decisions are made, we consider the full extent of the virtual register,
// and avoid clobbering it.
UseInterval* result = nullptr;
UseInterval* node = nullptr;
// Copy the intervals for all ranges.
for (LiveRange* range = parent; range != nullptr; range = range->next()) {
UseInterval* src = range->first_interval();
while (src != nullptr) {
UseInterval* new_node = zone->New<UseInterval>(src->start(), src->end());
if (result == nullptr) {
result = new_node;
} else {
node->set_next(new_node);
}
node = new_node;
src = src->next();
}
}
use_interval_ = result;
live_ranges().push_back(parent);
end_position_ = node->end();
parent->SetSpillRange(this);
}
bool SpillRange::IsIntersectingWith(SpillRange* other) const {
if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
this->End() <= other->use_interval_->start() ||
other->End() <= this->use_interval_->start()) {
return false;
}
return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
}
bool SpillRange::TryMerge(SpillRange* other) {
if (HasSlot() || other->HasSlot()) return false;
if (byte_width() != other->byte_width() || IsIntersectingWith(other))
return false;
LifetimePosition max = LifetimePosition::MaxPosition();
if (End() < other->End() && other->End() != max) {
end_position_ = other->End();
}
other->end_position_ = max;
MergeDisjointIntervals(other->use_interval_);
other->use_interval_ = nullptr;
for (TopLevelLiveRange* range : other->live_ranges()) {
DCHECK(range->GetSpillRange() == other);
range->SetSpillRange(this);
}
live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
other->live_ranges().end());
other->live_ranges().clear();
return true;
}
void SpillRange::MergeDisjointIntervals(UseInterval* other) {
UseInterval* tail = nullptr;
UseInterval* current = use_interval_;
while (other != nullptr) {
// Make sure the 'current' list starts first
if (current == nullptr || current->start() > other->start()) {
std::swap(current, other);
}
// Check disjointness
DCHECK(other == nullptr || current->end() <= other->start());
// Append the 'current' node to the result accumulator and move forward
if (tail == nullptr) {
use_interval_ = current;
} else {
tail->set_next(current);
}
tail = current;
current = current->next();
}
// Other list is empty => we are done
}
void SpillRange::Print() const {
StdoutStream os;
os << "{" << std::endl;
for (TopLevelLiveRange* range : live_ranges()) {
os << range->vreg() << " ";
}
os << std::endl;
for (UseInterval* i = interval(); i != nullptr; i = i->next()) {
os << '[' << i->start() << ", " << i->end() << ')' << std::endl;
}
os << "}" << std::endl;
}
TopTierRegisterAllocationData::PhiMapValue::PhiMapValue(
PhiInstruction* phi, const InstructionBlock* block, Zone* zone)
: phi_(phi),
block_(block),
incoming_operands_(zone),
assigned_register_(kUnassignedRegister) {
incoming_operands_.reserve(phi->operands().size());
}
void TopTierRegisterAllocationData::PhiMapValue::AddOperand(
InstructionOperand* operand) {
incoming_operands_.push_back(operand);
}
void TopTierRegisterAllocationData::PhiMapValue::CommitAssignment(
const InstructionOperand& assigned) {
for (InstructionOperand* operand : incoming_operands_) {
InstructionOperand::ReplaceWith(operand, &assigned);
}
}
TopTierRegisterAllocationData::TopTierRegisterAllocationData(
const RegisterConfiguration* config, Zone* zone, Frame* frame,
InstructionSequence* code, RegisterAllocationFlags flags,
TickCounter* tick_counter, const char* debug_name)
: RegisterAllocationData(Type::kTopTier),
allocation_zone_(zone),
frame_(frame),
code_(code),
debug_name_(debug_name),
config_(config),
phi_map_(allocation_zone()),
live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()),
live_ranges_(code->VirtualRegisterCount() * 2, nullptr,
allocation_zone()),
fixed_live_ranges_(kNumberOfFixedRangesPerRegister *
this->config()->num_general_registers(),
nullptr, allocation_zone()),
fixed_float_live_ranges_(allocation_zone()),
fixed_double_live_ranges_(kNumberOfFixedRangesPerRegister *
this->config()->num_double_registers(),
nullptr, allocation_zone()),
fixed_simd128_live_ranges_(allocation_zone()),
spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
delayed_references_(allocation_zone()),
assigned_registers_(nullptr),
assigned_double_registers_(nullptr),
virtual_register_count_(code->VirtualRegisterCount()),
preassigned_slot_ranges_(zone),
spill_state_(code->InstructionBlockCount(), ZoneVector<LiveRange*>(zone),
zone),
flags_(flags),
tick_counter_(tick_counter) {
if (!kSimpleFPAliasing) {
fixed_float_live_ranges_.resize(
kNumberOfFixedRangesPerRegister * this->config()->num_float_registers(),
nullptr);
fixed_simd128_live_ranges_.resize(
kNumberOfFixedRangesPerRegister *
this->config()->num_simd128_registers(),
nullptr);
}
assigned_registers_ = code_zone()->New<BitVector>(
this->config()->num_general_registers(), code_zone());
assigned_double_registers_ = code_zone()->New<BitVector>(
this->config()->num_double_registers(), code_zone());
fixed_register_use_ = code_zone()->New<BitVector>(
this->config()->num_general_registers(), code_zone());
fixed_fp_register_use_ = code_zone()->New<BitVector>(
this->config()->num_double_registers(), code_zone());
this->frame()->SetAllocatedRegisters(assigned_registers_);
this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
}
MoveOperands* TopTierRegisterAllocationData::AddGapMove(
int index, Instruction::GapPosition position,
const InstructionOperand& from, const InstructionOperand& to) {
Instruction* instr = code()->InstructionAt(index);
ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone());
return moves->AddMove(from, to);
}
MachineRepresentation TopTierRegisterAllocationData::RepresentationFor(
int virtual_register) {
DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
return code()->GetRepresentation(virtual_register);
}
TopLevelLiveRange* TopTierRegisterAllocationData::GetOrCreateLiveRangeFor(
int index) {
if (index >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(index + 1, nullptr);
}
TopLevelLiveRange* result = live_ranges()[index];
if (result == nullptr) {
result = NewLiveRange(index, RepresentationFor(index));
live_ranges()[index] = result;
}
return result;
}
TopLevelLiveRange* TopTierRegisterAllocationData::NewLiveRange(
int index, MachineRepresentation rep) {
return allocation_zone()->New<TopLevelLiveRange>(index, rep);
}
int TopTierRegisterAllocationData::GetNextLiveRangeId() {
int vreg = virtual_register_count_++;
if (vreg >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(vreg + 1, nullptr);
}
return vreg;
}
TopLevelLiveRange* TopTierRegisterAllocationData::NextLiveRange(
MachineRepresentation rep) {
int vreg = GetNextLiveRangeId();
TopLevelLiveRange* ret = NewLiveRange(vreg, rep);
return ret;
}
TopTierRegisterAllocationData::PhiMapValue*
TopTierRegisterAllocationData::InitializePhiMap(const InstructionBlock* block,
PhiInstruction* phi) {
TopTierRegisterAllocationData::PhiMapValue* map_value =
allocation_zone()->New<TopTierRegisterAllocationData::PhiMapValue>(
phi, block, allocation_zone());
auto res =
phi_map_.insert(std::make_pair(phi->virtual_register(), map_value));
DCHECK(res.second);
USE(res);
return map_value;
}
TopTierRegisterAllocationData::PhiMapValue*
TopTierRegisterAllocationData::GetPhiMapValueFor(int virtual_register) {
auto it = phi_map_.find(virtual_register);
DCHECK(it != phi_map_.end());
return it->second;
}
TopTierRegisterAllocationData::PhiMapValue*
TopTierRegisterAllocationData::GetPhiMapValueFor(TopLevelLiveRange* top_range) {
return GetPhiMapValueFor(top_range->vreg());
}
bool TopTierRegisterAllocationData::ExistsUseWithoutDefinition() {
bool found = false;
BitVector::Iterator iterator(live_in_sets()[0]);
while (!iterator.Done()) {
found = true;
int operand_index = iterator.Current();
PrintF("Register allocator error: live v%d reached first block.\n",
operand_index);
LiveRange* range = GetOrCreateLiveRangeFor(operand_index);
PrintF(" (first use is at %d)\n", range->first_pos()->pos().value());
if (debug_name() == nullptr) {
PrintF("\n");
} else {
PrintF(" (function: %s)\n", debug_name());
}
iterator.Advance();
}
return found;
}
// If a range is defined in a deferred block, we can expect all the range
// to only cover positions in deferred blocks. Otherwise, a block on the
// hot path would be dominated by a deferred block, meaning it is unreachable
// without passing through the deferred block, which is contradictory.
// In particular, when such a range contributes a result back on the hot
// path, it will be as one of the inputs of a phi. In that case, the value
// will be transferred via a move in the Gap::END's of the last instruction
// of a deferred block.
bool TopTierRegisterAllocationData::RangesDefinedInDeferredStayInDeferred() {
const size_t live_ranges_size = live_ranges().size();
for (const TopLevelLiveRange* range : live_ranges()) {
CHECK_EQ(live_ranges_size,
live_ranges().size()); // TODO(neis): crbug.com/831822
if (range == nullptr || range->IsEmpty() ||
!code()
->GetInstructionBlock(range->Start().ToInstructionIndex())
->IsDeferred()) {
continue;
}
for (const UseInterval* i = range->first_interval(); i != nullptr;
i = i->next()) {
int first = i->FirstGapIndex();
int last = i->LastGapIndex();
for (int instr = first; instr <= last;) {
const InstructionBlock* block = code()->GetInstructionBlock(instr);
if (!block->IsDeferred()) return false;
instr = block->last_instruction_index() + 1;
}
}
}
return true;
}
SpillRange* TopTierRegisterAllocationData::AssignSpillRangeToLiveRange(
TopLevelLiveRange* range, SpillMode spill_mode) {
using SpillType = TopLevelLiveRange::SpillType;
DCHECK(!range->HasSpillOperand());
SpillRange* spill_range = range->GetAllocatedSpillRange();
if (spill_range == nullptr) {
spill_range = allocation_zone()->New<SpillRange>(range, allocation_zone());
}
if (spill_mode == SpillMode::kSpillDeferred &&
(range->spill_type() != SpillType::kSpillRange)) {
range->set_spill_type(SpillType::kDeferredSpillRange);
} else {
range->set_spill_type(SpillType::kSpillRange);
}
spill_ranges()[range->vreg()] = spill_range;
return spill_range;
}
void TopTierRegisterAllocationData::MarkFixedUse(MachineRepresentation rep,
int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
if (kSimpleFPAliasing) {
fixed_fp_register_use_->Add(index);
} else {
int alias_base_index = -1;
int aliases = config()->GetAliases(
rep, index, MachineRepresentation::kFloat64, &alias_base_index);
DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
fixed_fp_register_use_->Add(aliased_reg);
}
}
break;
case MachineRepresentation::kFloat64:
fixed_fp_register_use_->Add(index);
break;
default:
DCHECK(!IsFloatingPoint(rep));
fixed_register_use_->Add(index);
break;
}
}
bool TopTierRegisterAllocationData::HasFixedUse(MachineRepresentation rep,
int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
if (kSimpleFPAliasing) {
return fixed_fp_register_use_->Contains(index);
} else {
int alias_base_index = -1;
int aliases = config()->GetAliases(
rep, index, MachineRepresentation::kFloat64, &alias_base_index);
DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
bool result = false;
while (aliases-- && !result) {
int aliased_reg = alias_base_index + aliases;
result |= fixed_fp_register_use_->Contains(aliased_reg);
}
return result;
}
break;
case MachineRepresentation::kFloat64:
return fixed_fp_register_use_->Contains(index);
break;
default:
DCHECK(!IsFloatingPoint(rep));
return fixed_register_use_->Contains(index);
break;
}
}
void TopTierRegisterAllocationData::MarkAllocated(MachineRepresentation rep,
int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
if (kSimpleFPAliasing) {
assigned_double_registers_->Add(index);
} else {
int alias_base_index = -1;
int aliases = config()->GetAliases(
rep, index, MachineRepresentation::kFloat64, &alias_base_index);
DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
assigned_double_registers_->Add(aliased_reg);
}
}
break;
case MachineRepresentation::kFloat64:
assigned_double_registers_->Add(index);
break;
default:
DCHECK(!IsFloatingPoint(rep));
assigned_registers_->Add(index);
break;
}
}
bool TopTierRegisterAllocationData::IsBlockBoundary(
LifetimePosition pos) const {
return pos.IsFullStart() &&
(static_cast<size_t>(pos.ToInstructionIndex()) ==
code()->instructions().size() ||
code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
pos.ToInstructionIndex());
}
ConstraintBuilder::ConstraintBuilder(TopTierRegisterAllocationData* data)
: data_(data) {}
InstructionOperand* ConstraintBuilder::AllocateFixed(
UnallocatedOperand* operand, int pos, bool is_tagged, bool is_input) {
TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
DCHECK(operand->HasFixedPolicy());
InstructionOperand allocated;
MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
int virtual_register = operand->virtual_register();
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
rep = data()->RepresentationFor(virtual_register);
}
if (operand->HasFixedSlotPolicy()) {
allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep,
operand->fixed_slot_index());
} else if (operand->HasFixedRegisterPolicy()) {
DCHECK(!IsFloatingPoint(rep));
DCHECK(data()->config()->IsAllocatableGeneralCode(
operand->fixed_register_index()));
allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
operand->fixed_register_index());
} else if (operand->HasFixedFPRegisterPolicy()) {
DCHECK(IsFloatingPoint(rep));
DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep,
operand->fixed_register_index());
} else {
UNREACHABLE();
}
if (is_input && allocated.IsAnyRegister()) {
data()->MarkFixedUse(rep, operand->fixed_register_index());
}
InstructionOperand::ReplaceWith(operand, &allocated);
if (is_tagged) {
TRACE("Fixed reg is tagged at %d\n", pos);
Instruction* instr = code()->InstructionAt(pos);
if (instr->HasReferenceMap()) {
instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand));
}
}
return operand;
}
void ConstraintBuilder::MeetRegisterConstraints() {
for (InstructionBlock* block : code()->instruction_blocks()) {
data_->tick_counter()->TickAndMaybeEnterSafepoint();
MeetRegisterConstraints(block);
}
}
void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) {
int start = block->first_instruction_index();
int end = block->last_instruction_index();
DCHECK_NE(-1, start);
for (int i = start; i <= end; ++i) {
MeetConstraintsBefore(i);
if (i != end) MeetConstraintsAfter(i);
}
// Meet register constraints for the instruction in the end.
MeetRegisterConstraintsForLastInstructionInBlock(block);
}
void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock(
const InstructionBlock* block) {
int end = block->last_instruction_index();
Instruction* last_instruction = code()->InstructionAt(end);
for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
InstructionOperand* output_operand = last_instruction->OutputAt(i);
DCHECK(!output_operand->IsConstant());
UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
int output_vreg = output->virtual_register();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
bool assigned = false;
if (output->HasFixedPolicy()) {
AllocateFixed(output, -1, false, false);
// This value is produced on the stack, we never need to spill it.
if (output->IsStackSlot()) {
DCHECK(LocationOperand::cast(output)->index() <
data()->frame()->GetSpillSlotCount());
range->SetSpillOperand(LocationOperand::cast(output));
range->SetSpillStartIndex(end);
assigned = true;
}
for (const RpoNumber& succ : block->successors()) {
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
DCHECK_EQ(1, successor->PredecessorCount());
int gap_index = successor->first_instruction_index();
// Create an unconstrained operand for the same virtual register
// and insert a gap move from the fixed output to the operand.
UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
output_vreg);
data()->AddGapMove(gap_index, Instruction::START, *output, output_copy);
}
}
if (!assigned) {
for (const RpoNumber& succ : block->successors()) {
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
DCHECK_EQ(1, successor->PredecessorCount());
int gap_index = successor->first_instruction_index();
range->RecordSpillLocation(allocation_zone(), gap_index, output);
range->SetSpillStartIndex(gap_index);
}
}
}
}
void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
Instruction* first = code()->InstructionAt(instr_index);
// Handle fixed temporaries.
for (size_t i = 0; i < first->TempCount(); i++) {
UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false, false);
}
// Handle constant/fixed output operands.
for (size_t i = 0; i < first->OutputCount(); i++) {
InstructionOperand* output = first->OutputAt(i);
if (output->IsConstant()) {
int output_vreg = ConstantOperand::cast(output)->virtual_register();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg);
range->SetSpillStartIndex(instr_index + 1);
range->SetSpillOperand(output);
continue;
}
UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
TopLevelLiveRange* range =
data()->GetOrCreateLiveRangeFor(first_output->virtual_register());
bool assigned = false;
if (first_output->HasFixedPolicy()) {
int output_vreg = first_output->virtual_register();
UnallocatedOperand output_copy(UnallocatedOperand::REGISTER_OR_SLOT,
output_vreg);
bool is_tagged = code()->IsReference(output_vreg);
if (first_output->HasSecondaryStorage()) {
range->MarkHasPreassignedSlot();
data()->preassigned_slot_ranges().push_back(
std::make_pair(range, first_output->GetSecondaryStorage()));
}
AllocateFixed(first_output, instr_index, is_tagged, false);
// This value is produced on the stack, we never need to spill it.
if (first_output->IsStackSlot()) {
DCHECK(LocationOperand::cast(first_output)->index() <
data()->frame()->GetTotalFrameSlotCount());
range->SetSpillOperand(LocationOperand::cast(first_output));
range->SetSpillStartIndex(instr_index + 1);
assigned = true;
}
data()->AddGapMove(instr_index + 1, Instruction::START, *first_output,
output_copy);
}
// Make sure we add a gap move for spilling (if we have not done
// so already).
if (!assigned) {
range->RecordSpillLocation(allocation_zone(), instr_index + 1,
first_output);
range->SetSpillStartIndex(instr_index + 1);
}
}
}
void ConstraintBuilder::MeetConstraintsBefore(int instr_index) {
Instruction* second = code()->InstructionAt(instr_index);
// Handle fixed input operands of second instruction.
for (size_t i = 0; i < second->InputCount(); i++) {
InstructionOperand* input = second->InputAt(i);
if (input->IsImmediate()) {
continue; // Ignore immediates.
}
UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
if (cur_input->HasFixedPolicy()) {
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
input_vreg);
bool is_tagged = code()->IsReference(input_vreg);
AllocateFixed(cur_input, instr_index, is_tagged, true);
data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
}
}
// Handle "output same as input" for second instruction.
for (size_t i = 0; i < second->OutputCount(); i++) {
InstructionOperand* output = second->OutputAt(i);
if (!output->IsUnallocated()) continue;
UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
if (!second_output->HasSameAsInputPolicy()) continue;
DCHECK_EQ(0, i); // Only valid for first output.
UnallocatedOperand* cur_input =
UnallocatedOperand::cast(second->InputAt(0));
int output_vreg = second_output->virtual_register();
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::REGISTER_OR_SLOT,
input_vreg);
*cur_input =
UnallocatedOperand(*cur_input, second_output->virtual_register());
MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END,
input_copy, *cur_input);
DCHECK_NOT_NULL(gap_move);
if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) {
if (second->HasReferenceMap()) {
TopTierRegisterAllocationData::DelayedReference delayed_reference = {
second->reference_map(), &gap_move->source()};
data()->delayed_references().push_back(delayed_reference);
}
}
}
}
void ConstraintBuilder::ResolvePhis() {
// Process the blocks in reverse order.
for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) {
data_->tick_counter()->TickAndMaybeEnterSafepoint();
ResolvePhis(block);
}
}
void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) {
for (PhiInstruction* phi : block->phis()) {
int phi_vreg = phi->virtual_register();
TopTierRegisterAllocationData::PhiMapValue* map_value =
data()->InitializePhiMap(block, phi);
InstructionOperand& output = phi->output();
// Map the destination operands, so the commitment phase can find them.
for (size_t i = 0; i < phi->operands().size(); ++i) {
InstructionBlock* cur_block =
code()->InstructionBlockAt(block->predecessors()[i]);
UnallocatedOperand input(UnallocatedOperand::REGISTER_OR_SLOT,
phi->operands()[i]);
MoveOperands* move = data()->AddGapMove(
cur_block->last_instruction_index(), Instruction::END, input, output);
map_value->AddOperand(&move->destination());
DCHECK(!code()
->InstructionAt(cur_block->last_instruction_index())
->HasReferenceMap());
}
TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg);
int gap_index = block->first_instruction_index();
live_range->RecordSpillLocation(allocation_zone(), gap_index, &output);
live_range->SetSpillStartIndex(gap_index);
// We use the phi-ness of some nodes in some later heuristics.
live_range->set_is_phi(true);
live_range->set_is_non_loop_phi(!block->IsLoopHeader());
}
}
LiveRangeBuilder::LiveRangeBuilder(TopTierRegisterAllocationData* data,
Zone* local_zone)
: data_(data), phi_hints_(local_zone) {}
BitVector* LiveRangeBuilder::ComputeLiveOut(
const InstructionBlock* block, TopTierRegisterAllocationData* data) {
size_t block_index = block->rpo_number().ToSize();
BitVector* live_out = data->live_out_sets()[block_index];
if (live_out == nullptr) {
// Compute live out for the given block, except not including backward
// successor edges.
Zone* zone = data->allocation_zone();
const InstructionSequence* code = data->code();
live_out = zone->New<BitVector>(code->VirtualRegisterCount(), zone);
// Process all successor blocks.
for (const RpoNumber& succ : block->successors()) {
// Add values live on entry to the successor.
if (succ <= block->rpo_number()) continue;
BitVector* live_in = data->live_in_sets()[succ.ToSize()];
if (live_in != nullptr) live_out->Union(*live_in);
// All phi input operands corresponding to this successor edge are live
// out from this block.
const InstructionBlock* successor = code->InstructionBlockAt(succ);
size_t index = successor->PredecessorIndexOf(block->rpo_number());
DCHECK(index < successor->PredecessorCount());
for (PhiInstruction* phi : successor->phis()) {
live_out->Add(phi->operands()[index]);
}
}
data->live_out_sets()[block_index] = live_out;
}
return live_out;
}
void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block,
BitVector* live_out) {
// Add an interval that includes the entire block to the live range for
// each live_out value.
LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex(
block->last_instruction_index())
.NextStart();
BitVector::Iterator iterator(live_out);
while (!iterator.Done()) {
int operand_index = iterator.Current();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
range->AddUseInterval(start, end, allocation_zone(),
data()->is_trace_alloc());
iterator.Advance();
}
}
int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
int result = -index - 1;
switch (rep) {
case MachineRepresentation::kSimd128:
result -=
kNumberOfFixedRangesPerRegister * config()->num_float_registers();
V8_FALLTHROUGH;
case MachineRepresentation::kFloat32:
result -=
kNumberOfFixedRangesPerRegister * config()->num_double_registers();
V8_FALLTHROUGH;
case MachineRepresentation::kFloat64:
result -=
kNumberOfFixedRangesPerRegister * config()->num_general_registers();
break;
default:
UNREACHABLE();
}
return result;
}
TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index,
SpillMode spill_mode) {
int offset = spill_mode == SpillMode::kSpillAtDefinition
? 0
: config()->num_general_registers();
DCHECK(index < config()->num_general_registers());
TopLevelLiveRange* result = data()->fixed_live_ranges()[offset + index];
if (result == nullptr) {
MachineRepresentation rep = InstructionSequence::DefaultRepresentation();
result = data()->NewLiveRange(FixedLiveRangeID(offset + index), rep);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
data()->MarkAllocated(rep, index);
if (spill_mode == SpillMode::kSpillDeferred) {
result->set_deferred_fixed();
}
data()->fixed_live_ranges()[offset + index] = result;
}
return result;
}
TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
int index, MachineRepresentation rep, SpillMode spill_mode) {
int num_regs = config()->num_double_registers();
ZoneVector<TopLevelLiveRange*>* live_ranges =
&data()->fixed_double_live_ranges();
if (!kSimpleFPAliasing) {
switch (rep) {
case MachineRepresentation::kFloat32:
num_regs = config()->num_float_registers();
live_ranges = &data()->fixed_float_live_ranges();
break;
case MachineRepresentation::kSimd128:
num_regs = config()->num_simd128_registers();
live_ranges = &data()->fixed_simd128_live_ranges();
break;
default:
break;
}
}
int offset = spill_mode == SpillMode::kSpillAtDefinition ? 0 : num_regs;
DCHECK(index < num_regs);
USE(num_regs);
TopLevelLiveRange* result = (*live_ranges)[offset + index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedFPLiveRangeID(offset + index, rep), rep);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
data()->MarkAllocated(rep, index);
if (spill_mode == SpillMode::kSpillDeferred) {
result->set_deferred_fixed();
}
(*live_ranges)[offset + index] = result;
}
return result;
}
TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand,
SpillMode spill_mode) {
if (operand->IsUnallocated()) {
return data()->GetOrCreateLiveRangeFor(
UnallocatedOperand::cast(operand)->virtual_register());
} else if (operand->IsConstant()) {
return data()->GetOrCreateLiveRangeFor(
ConstantOperand::cast(operand)->virtual_register());
} else if (operand->IsRegister()) {
return FixedLiveRangeFor(
LocationOperand::cast(operand)->GetRegister().code(), spill_mode);
} else if (operand->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(operand);
return FixedFPLiveRangeFor(op->register_code(), op->representation(),
spill_mode);
} else {
return nullptr;
}
}
UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos,
InstructionOperand* operand,
void* hint,
UsePositionHintType hint_type) {
return allocation_zone()->New<UsePosition>(pos, operand, hint, hint_type);
}
UsePosition* LiveRangeBuilder::Define(LifetimePosition position,
InstructionOperand* operand, void* hint,
UsePositionHintType hint_type,
SpillMode spill_mode) {
TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode);
if (range == nullptr) return nullptr;
if (range->IsEmpty() || range->Start() > position) {
// Can happen if there is a definition without use.
range->AddUseInterval(position, position.NextStart(), allocation_zone(),
data()->is_trace_alloc());
range->AddUsePosition(NewUsePosition(position.NextStart()),
data()->is_trace_alloc());
} else {
range->ShortenTo(position, data()->is_trace_alloc());
}
if (!operand->IsUnallocated()) return nullptr;
UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
UsePosition* use_pos =
NewUsePosition(position, unalloc_operand, hint, hint_type);
range->AddUsePosition(use_pos, data()->is_trace_alloc());
return use_pos;
}
UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start,
LifetimePosition position,
InstructionOperand* operand, void* hint,
UsePositionHintType hint_type,
SpillMode spill_mode) {
TopLevelLiveRange* range = LiveRangeFor(operand, spill_mode);
if (range == nullptr) return nullptr;
UsePosition* use_pos = nullptr;
if (operand->IsUnallocated()) {
UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type);
range->AddUsePosition(use_pos, data()->is_trace_alloc());
}
range->AddUseInterval(block_start, position, allocation_zone(),
data()->is_trace_alloc());
return use_pos;
}
void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
BitVector* live) {
int block_start = block->first_instruction_index();
LifetimePosition block_start_position =
LifetimePosition::GapFromInstructionIndex(block_start);
bool fixed_float_live_ranges = false;
bool fixed_simd128_live_ranges = false;
if (!kSimpleFPAliasing) {
int mask = data()->code()->representation_mask();
fixed_float_live_ranges = (mask & kFloat32Bit) != 0;
fixed_simd128_live_ranges = (mask & kSimd128Bit) != 0;
}
SpillMode spill_mode = SpillModeForBlock(block);
for (int index = block->last_instruction_index(); index >= block_start;
index--) {
LifetimePosition curr_position =
LifetimePosition::InstructionFromInstructionIndex(index);
Instruction* instr = code()->InstructionAt(index);
DCHECK_NOT_NULL(instr);
DCHECK(curr_position.IsInstructionPosition());
// Process output, inputs, and temps of this instruction.
for (size_t i = 0; i < instr->OutputCount(); i++) {
InstructionOperand* output = instr->OutputAt(i);
if (output->IsUnallocated()) {
// Unsupported.
DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
live->Remove(out_vreg);
} else if (output->IsConstant()) {
int out_vreg = ConstantOperand::cast(output)->virtual_register();
live->Remove(out_vreg);
}
if (block->IsHandler() && index == block_start && output->IsAllocated() &&
output->IsRegister() &&
AllocatedOperand::cast(output)->GetRegister() ==
v8::internal::kReturnRegister0) {
// The register defined here is blocked from gap start - it is the
// exception value.
// TODO(mtrofin): should we explore an explicit opcode for
// the first instruction in the handler?
Define(LifetimePosition::GapFromInstructionIndex(index), output,
spill_mode);
} else {
Define(curr_position, output, spill_mode);
}
}
if (instr->ClobbersRegisters()) {
for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) {
// Create a UseInterval at this instruction for all fixed registers,
// (including the instruction outputs). Adding another UseInterval here
// is OK because AddUseInterval will just merge it with the existing
// one at the end of the range.
int code = config()->GetAllocatableGeneralCode(i);
TopLevelLiveRange* range = FixedLiveRangeFor(code, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone(), data()->is_trace_alloc());
}
}
if (instr->ClobbersDoubleRegisters()) {
for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
// Add a UseInterval for all DoubleRegisters. See comment above for
// general registers.
int code = config()->GetAllocatableDoubleCode(i);
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kFloat64, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone(), data()->is_trace_alloc());
}
// Clobber fixed float registers on archs with non-simple aliasing.
if (!kSimpleFPAliasing) {
if (fixed_float_live_ranges) {
for (int i = 0; i < config()->num_allocatable_float_registers();
++i) {
// Add a UseInterval for all FloatRegisters. See comment above for
// general registers.
int code = config()->GetAllocatableFloatCode(i);
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kFloat32, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone(), data()->is_trace_alloc());
}
}
if (fixed_simd128_live_ranges) {
for (int i = 0; i < config()->num_allocatable_simd128_registers();
++i) {
int code = config()->GetAllocatableSimd128Code(i);
TopLevelLiveRange* range = FixedFPLiveRangeFor(
code, MachineRepresentation::kSimd128, spill_mode);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone(), data()->is_trace_alloc());
}
}
}
}
for (size_t i = 0; i < instr->InputCount(); i++) {
InstructionOperand* input = instr->InputAt(i);
if (input->IsImmediate()) {
continue; // Ignore immediates.
}
LifetimePosition use_pos;
if (input->IsUnallocated() &&
UnallocatedOperand::cast(input)->IsUsedAtStart()) {
use_pos = curr_position;
} else {
use_pos = curr_position.End();
}
if (input->IsUnallocated()) {
UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
int vreg = unalloc->virtual_register();
live->Add(vreg);
if (unalloc->HasSlotPolicy()) {
data()->GetOrCreateLiveRangeFor(vreg)->register_slot_use(
block->IsDeferred()
? TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
: TopLevelLiveRange::SlotUseKind::kGeneralSlotUse);
}
}
Use(block_start_position, use_pos, input, spill_mode);
}
for (size_t i = 0; i < instr->TempCount(); i++) {
InstructionOperand* temp = instr->TempAt(i);
// Unsupported.
DCHECK_IMPLIES(temp->IsUnallocated(),
!UnallocatedOperand::cast(temp)->HasSlotPolicy());
if (instr->ClobbersTemps()) {
if (temp->IsRegister()) continue;
if (temp->IsUnallocated()) {
UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
if (temp_unalloc->HasFixedPolicy()) {
continue;
}
}
}
Use(block_start_position, curr_position.End(), temp, spill_mode);
Define(curr_position, temp, spill_mode);
}
// Process the moves of the instruction's gaps, making their sources live.
const Instruction::GapPosition kPositions[] = {Instruction::END,
Instruction::START};
curr_position = curr_position.PrevStart();
DCHECK(curr_position.IsGapPosition());
for (const Instruction::GapPosition& position : kPositions) {
ParallelMove* move = instr->GetParallelMove(position);
if (move == nullptr) continue;
if (position == Instruction::END) {
curr_position = curr_position.End();
} else {
curr_position = curr_position.Start();
}
for (MoveOperands* cur : *move) {
InstructionOperand& from = cur->source();
InstructionOperand& to = cur->destination();
void* hint = &to;
UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to);
UsePosition* to_use = nullptr;
int phi_vreg = -1;
if (to.IsUnallocated()) {
int to_vreg = UnallocatedOperand::cast(to).virtual_register();
TopLevelLiveRange* to_range =
data()->GetOrCreateLiveRangeFor(to_vreg);
if (to_range->is_phi()) {
phi_vreg = to_vreg;
if (to_range->is_non_loop_phi()) {
hint = to_range->current_hint_position();
hint_type = hint == nullptr ? UsePositionHintType::kNone
: UsePositionHintType::kUsePos;
} else {
hint_type = UsePositionHintType::kPhi;
hint = data()->GetPhiMapValueFor(to_vreg);
}
} else {
if (live->Contains(to_vreg)) {
to_use =
Define(curr_position, &to, &from,
UsePosition::HintTypeForOperand(from), spill_mode);
live->Remove(to_vreg);
} else {
cur->Eliminate();
continue;
}
}
} else {
Define(curr_position, &to, spill_mode);
}
UsePosition* from_use = Use(block_start_position, curr_position, &from,
hint, hint_type, spill_mode);
// Mark range live.
if (from.IsUnallocated()) {
live->Add(UnallocatedOperand::cast(from).virtual_register());
}
// When the value is moved to a register to meet input constraints,
// we should consider this value use similar as a register use in the
// backward spilling heuristics, even though this value use is not
// register benefical at the AllocateBlockedReg stage.
if (to.IsAnyRegister() ||
(to.IsUnallocated() &&
UnallocatedOperand::cast(&to)->HasRegisterPolicy())) {
from_use->set_spill_detrimental();
}
// Resolve use position hints just created.
if (to_use != nullptr && from_use != nullptr) {
to_use->ResolveHint(from_use);
from_use->ResolveHint(to_use);
}
DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved());
DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved());
// Potentially resolve phi hint.
if (phi_vreg != -1) ResolvePhiHint(&from, from_use);
}
}
}
}
void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block,
BitVector* live) {
for (PhiInstruction* phi : block->phis()) {
// The live range interval already ends at the first instruction of the
// block.
int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
// Select a hint from a predecessor block that precedes this block in the
// rpo order. In order of priority:
// - Avoid hints from deferred blocks.
// - Prefer hints from allocated (or explicit) operands.
// - Prefer hints from empty blocks (containing just parallel moves and a
// jump). In these cases, if we can elide the moves, the jump threader
// is likely to be able to elide the jump.
// The enforcement of hinting in rpo order is required because hint
// resolution that happens later in the compiler pipeline visits
// instructions in reverse rpo order, relying on the fact that phis are
// encountered before their hints.
InstructionOperand* hint = nullptr;
int hint_preference = 0;
// The cost of hinting increases with the number of predecessors. At the
// same time, the typical benefit decreases, since this hinting only
// optimises the execution path through one predecessor. A limit of 2 is
// sufficient to hit the common if/else pattern.
int predecessor_limit = 2;
for (RpoNumber predecessor : block->predecessors()) {
const InstructionBlock* predecessor_block =
code()->InstructionBlockAt(predecessor);
DCHECK_EQ(predecessor_block->rpo_number(), predecessor);
// Only take hints from earlier rpo numbers.
if (predecessor >= block->rpo_number()) continue;
// Look up the predecessor instruction.
const Instruction* predecessor_instr =
GetLastInstruction(code(), predecessor_block);
InstructionOperand* predecessor_hint = nullptr;
// Phis are assigned in the END position of the last instruction in each
// predecessor block.
for (MoveOperands* move :
*predecessor_instr->GetParallelMove(Instruction::END)) {
InstructionOperand& to = move->destination();
if (to.IsUnallocated() &&
UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
predecessor_hint = &move->source();
break;
}
}
DCHECK_NOT_NULL(predecessor_hint);
// For each predecessor, generate a score according to the priorities
// described above, and pick the best one. Flags in higher-order bits have
// a higher priority than those in lower-order bits.
int predecessor_hint_preference = 0;
const int kNotDeferredBlockPreference = (1 << 2);
const int kMoveIsAllocatedPreference = (1 << 1);
const int kBlockIsEmptyPreference = (1 << 0);
// - Avoid hints from deferred blocks.
if (!predecessor_block->IsDeferred()) {
predecessor_hint_preference |= kNotDeferredBlockPreference;
}
// - Prefer hints from allocated operands.
//
// Already-allocated operands are typically assigned using the parallel
// moves on the last instruction. For example:
//
// gap (v101 = [x0|R|w32]) (v100 = v101)
// ArchJmp
// ...
// phi: v100 = v101 v102
//
// We have already found the END move, so look for a matching START move
// from an allocated operand.
//
// Note that we cannot simply look up data()->live_ranges()[vreg] here
// because the live ranges are still being built when this function is
// called.
// TODO(v8): Find a way to separate hinting from live range analysis in
// BuildLiveRanges so that we can use the O(1) live-range look-up.
auto moves = predecessor_instr->GetParallelMove(Instruction::START);
if (moves != nullptr) {
for (MoveOperands* move : *moves) {
InstructionOperand& to = move->destination();
if (predecessor_hint->Equals(to)) {
if (move->source().IsAllocated()) {
predecessor_hint_preference |= kMoveIsAllocatedPreference;
}
break;
}
}
}
// - Prefer hints from empty blocks.
if (predecessor_block->last_instruction_index() ==
predecessor_block->first_instruction_index()) {
predecessor_hint_preference |= kBlockIsEmptyPreference;
}
if ((hint == nullptr) ||
(predecessor_hint_preference > hint_preference)) {
// Take the hint from this predecessor.
hint = predecessor_hint;
hint_preference = predecessor_hint_preference;
}
if (--predecessor_limit <= 0) break;
}
DCHECK_NOT_NULL(hint);
LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
UsePosition* use_pos = Define(block_start, &phi->output(), hint,
UsePosition::HintTypeForOperand(*hint),
SpillModeForBlock(block));
MapPhiHint(hint, use_pos);
}
}
void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block,
BitVector* live) {
DCHECK(block->IsLoopHeader());
// Add a live range stretching from the first loop instruction to the last
// for each value live on entry to the header.
BitVector::Iterator iterator(live);
LifetimePosition start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
LifetimePosition end = LifetimePosition::GapFromInstructionIndex(
code()->LastLoopInstructionIndex(block))
.NextFullStart();
while (!iterator.Done()) {
int operand_index = iterator.Current();
TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index);
range->EnsureInterval(start, end, allocation_zone(),
data()->is_trace_alloc());
iterator.Advance();
}
// Insert all values into the live in sets of all blocks in the loop.
for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt();
++i) {
live_in_sets()[i]->Union(*live);
}
}
void LiveRangeBuilder::BuildLiveRanges() {
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
data_->tick_counter()->TickAndMaybeEnterSafepoint();
InstructionBlock* block =
code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
BitVector* live = ComputeLiveOut(block, data());
// Initially consider all live_out values live for the entire block. We
// will shorten these intervals if necessary.
AddInitialIntervals(block, live);
// Process the instructions in reverse order, generating and killing
// live values.
ProcessInstructions(block, live);
// All phi output operands are killed by this block.
ProcessPhis(block, live);
// Now live is live_in for this block except not including values live
// out on backward successor edges.
if (block->IsLoopHeader()) ProcessLoopHeader(block, live);
live_in_sets()[block_id] = live;
}
// Postprocess the ranges.
const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* range : data()->live_ranges()) {
data_->tick_counter()->TickAndMaybeEnterSafepoint();
CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (range == nullptr) continue;
// Give slots to all ranges with a non fixed slot use.
if (range->has_slot_use() && range->HasNoSpillType()) {
SpillMode spill_mode =
range->slot_use_kind() ==
TopLevelLiveRange::SlotUseKind::kDeferredSlotUse
? SpillMode::kSpillDeferred
: SpillMode::kSpillAtDefinition;
data()->AssignSpillRangeToLiveRange(range, spill_mode);
}
// TODO(bmeurer): This is a horrible hack to make sure that for constant
// live ranges, every use requires the constant to be in a register.
// Without this hack, all uses with "any" policy would get the constant
// operand assigned.
if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
for (UsePosition* pos = range->first_pos(); pos != nullptr;
pos = pos->next()) {
if (pos->type() == UsePositionType::kRequiresSlot ||
pos->type() == UsePositionType::kRegisterOrSlotOrConstant) {
continue;
}
UsePositionType new_type = UsePositionType::kRegisterOrSlot;
// Can't mark phis as needing a register.
if (!pos->pos().IsGapPosition()) {
new_type = UsePositionType::kRequiresRegister;
}
pos->set_type(new_type, true);
}
}
range->ResetCurrentHintPosition();
}
for (auto preassigned : data()->preassigned_slot_ranges()) {
TopLevelLiveRange* range = preassigned.first;
int slot_id = preassigned.second;
SpillRange* spill = range->HasSpillRange()
? range->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(
range, SpillMode::kSpillAtDefinition);
spill->set_assigned_slot(slot_id);
}
#ifdef DEBUG
Verify();
#endif
}
void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand,
UsePosition* use_pos) {
DCHECK(!use_pos->IsResolved());
auto res = phi_hints_.insert(std::make_pair(operand, use_pos));
DCHECK(res.second);
USE(res);
}
void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
UsePosition* use_pos) {
auto it = phi_hints_.find(operand);
if (it == phi_hints_.end()) return;
DCHECK(!it->second->IsResolved());
it->second->ResolveHint(use_pos);
}
void LiveRangeBuilder::Verify() const {
for (auto& hint : phi_hints_) {
CHECK(hint.second->IsResolved());
}
for (const TopLevelLiveRange* current : data()->live_ranges()) {
if (current != nullptr && !current->IsEmpty()) {
// New LiveRanges should not be split.
CHECK_NULL(current->next());
// General integrity check.
current->Verify();
const UseInterval* first = current->first_interval();
if (first->next() == nullptr) continue;
// Consecutive intervals should not end and start in the same block,
// otherwise the intervals should have been joined, because the
// variable is live throughout that block.
CHECK(NextIntervalStartsInDifferentBlocks(first));
for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) {
// Except for the first interval, the other intevals must start at
// a block boundary, otherwise data wouldn't flow to them.
CHECK(IntervalStartsAtBlockBoundary(i));
// The last instruction of the predecessors of the block the interval
// starts must be covered by the range.
CHECK(IntervalPredecessorsCoveredByRange(i, current));
if (i->next() != nullptr) {
// Check the consecutive intervals property, except for the last
// interval, where it doesn't apply.
CHECK(NextIntervalStartsInDifferentBlocks(i));
}
}
}
}
}
bool LiveRangeBuilder::IntervalStartsAtBlockBoundary(
const UseInterval* interval) const {
LifetimePosition start = interval->start();
if (!start.IsFullStart()) return false;
int instruction_index = start.ToInstructionIndex();
const InstructionBlock* block =
data()->code()->GetInstructionBlock(instruction_index);
return block->first_instruction_index() == instruction_index;
}
bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange(
const UseInterval* interval, const TopLevelLiveRange* range) const {
LifetimePosition start = interval->start();
int instruction_index = start.ToInstructionIndex();
const InstructionBlock* block =
data()->code()->GetInstructionBlock(instruction_index);
for (RpoNumber pred_index : block->predecessors()) {
const InstructionBlock* predecessor =
data()->code()->InstructionBlockAt(pred_index);
LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex(
predecessor->last_instruction_index());
last_pos = last_pos.NextStart().End();
if (!range->Covers(last_pos)) return false;
}
return true;
}
bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks(
const UseInterval* interval) const {
DCHECK_NOT_NULL(interval->next());
LifetimePosition end = interval->end();
LifetimePosition next_start = interval->next()->start();
// Since end is not covered, but the previous position is, move back a
// position
end = end.IsStart() ? end.PrevStart().End() : end.Start();
int last_covered_index = end.ToInstructionIndex();
const InstructionBlock* block =
data()->code()->GetInstructionBlock(last_covered_index);
const InstructionBlock* next_block =
data()->code()->GetInstructionBlock(next_start.ToInstructionIndex());
return block->rpo_number() < next_block->rpo_number();
}
void BundleBuilder::BuildBundles() {
TRACE("Build bundles\n");
// Process the blocks in reverse order.
for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
--block_id) {
InstructionBlock* block =
code()->InstructionBlockAt(RpoNumber::FromInt(block_id));
TRACE("Block B%d\n", block_id);
for (auto phi : block->phis()) {
LiveRange* out_range =
data()->GetOrCreateLiveRangeFor(phi->virtual_register());
LiveRangeBundle* out = out_range->get_bundle();
if (out == nullptr) {
out = data()->allocation_zone()->New<LiveRangeBundle>(
data()->allocation_zone(), next_bundle_id_++);
out->TryAddRange(out_range);
}
TRACE("Processing phi for v%d with %d:%d\n", phi->virtual_register(),
out_range->TopLevel()->vreg(), out_range->relative_id());
bool phi_interferes_with_backedge_input = false;
for (auto input : phi->operands()) {
LiveRange* input_range = data()->GetOrCreateLiveRangeFor(input);
TRACE("Input value v%d with range %d:%d\n", input,
input_range->TopLevel()->vreg(), input_range->relative_id());
LiveRangeBundle* input_bundle = input_range->get_bundle();
if (input_bundle != nullptr) {
TRACE("Merge\n");
if (out->TryMerge(input_bundle, data()->is_trace_alloc())) {
TRACE("Merged %d and %d to %d\n", phi->virtual_register(), input,
out->id());
} else if (input_range->Start() > out_range->Start()) {
// We are only interested in values defined after the phi, because
// those are values that will go over a back-edge.
phi_interferes_with_backedge_input = true;
}
} else {
TRACE("Add\n");
if (out->TryAddRange(input_range)) {
TRACE("Added %d and %d to %d\n", phi->virtual_register(), input,
out->id());
} else if (input_range->Start() > out_range->Start()) {
// We are only interested in values defined after the phi, because
// those are values that will go over a back-edge.
phi_interferes_with_backedge_input = true;
}
}
}
// Spilling the phi at the loop header is not beneficial if there is
// a back-edge with an input for the phi that interferes with the phi's
// value, because in case that input gets spilled it might introduce
// a stack-to-stack move at the back-edge.
if (phi_interferes_with_backedge_input)
out_range->TopLevel()->set_spilling_at_loop_header_not_beneficial();
}
TRACE("Done block B%d\n", block_id);
}
}
bool LiveRangeBundle::TryAddRange(LiveRange* range) {
DCHECK_NULL(range->get_bundle());
// We may only add a new live range if its use intervals do not
// overlap with existing intervals in the bundle.
if (UsesOverlap(range->first_interval())) return false;
ranges_.insert(range);
range->set_bundle(this);
InsertUses(range->first_interval());
return true;
}
bool LiveRangeBundle::TryMerge(LiveRangeBundle* other, bool trace_alloc) {
if (other == this) return true;
auto iter1 = uses_.begin();
auto iter2 = other->uses_.begin();
while (iter1 != uses_.end() && iter2 != other->uses_.end()) {
if (iter1->start >= iter2->end) {
++iter2;
} else if (iter2->start >= iter1->end) {
++iter1;
} else {
TRACE_COND(trace_alloc, "No merge %d:%d %d:%d\n", iter1->start,
iter1->end, iter2->start, iter2->end);
return false;
}
}
// Uses are disjoint, merging is possible.