| // Copyright 2014 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/compiler/register-allocator.h" |
| |
| #include "src/assembler-inl.h" |
| #include "src/base/adapters.h" |
| #include "src/compiler/linkage.h" |
| #include "src/string-stream.h" |
| |
| namespace v8 { |
| namespace internal { |
| namespace compiler { |
| |
| #define TRACE(...) \ |
| do { \ |
| if (FLAG_trace_alloc) PrintF(__VA_ARGS__); \ |
| } while (false) |
| |
| |
| namespace { |
| |
| static const int kFloatRepBit = |
| 1 << static_cast<int>(MachineRepresentation::kFloat32); |
| static const int kSimd128RepBit = |
| 1 << static_cast<int>(MachineRepresentation::kSimd128); |
| |
| void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) { |
| auto it = std::find(v->begin(), v->end(), range); |
| DCHECK(it != v->end()); |
| v->erase(it); |
| } |
| |
| int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) { |
| return kind == FP_REGISTERS ? cfg->num_double_registers() |
| : cfg->num_general_registers(); |
| } |
| |
| |
| int GetAllocatableRegisterCount(const RegisterConfiguration* cfg, |
| RegisterKind kind) { |
| return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers() |
| : cfg->num_allocatable_general_registers(); |
| } |
| |
| |
| const int* GetAllocatableRegisterCodes(const RegisterConfiguration* cfg, |
| RegisterKind kind) { |
| return kind == FP_REGISTERS ? cfg->allocatable_double_codes() |
| : cfg->allocatable_general_codes(); |
| } |
| |
| |
| const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence, |
| const InstructionBlock* block) { |
| RpoNumber index = block->loop_header(); |
| if (!index.IsValid()) return nullptr; |
| return sequence->InstructionBlockAt(index); |
| } |
| |
| |
| const InstructionBlock* GetInstructionBlock(const InstructionSequence* code, |
| LifetimePosition pos) { |
| return code->GetInstructionBlock(pos.ToInstructionIndex()); |
| } |
| |
| |
| Instruction* GetLastInstruction(InstructionSequence* code, |
| const InstructionBlock* block) { |
| return code->InstructionAt(block->last_instruction_index()); |
| } |
| |
| // TODO(dcarney): fix frame to allow frame accesses to half size location. |
| int GetByteWidth(MachineRepresentation rep) { |
| switch (rep) { |
| case MachineRepresentation::kBit: |
| case MachineRepresentation::kWord8: |
| case MachineRepresentation::kWord16: |
| case MachineRepresentation::kWord32: |
| case MachineRepresentation::kTaggedSigned: |
| case MachineRepresentation::kTaggedPointer: |
| case MachineRepresentation::kTagged: |
| case MachineRepresentation::kFloat32: |
| return kPointerSize; |
| case MachineRepresentation::kWord64: |
| case MachineRepresentation::kFloat64: |
| return kDoubleSize; |
| case MachineRepresentation::kSimd128: |
| return kSimd128Size; |
| case MachineRepresentation::kNone: |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| } // namespace |
| |
| class LiveRangeBound { |
| public: |
| explicit LiveRangeBound(LiveRange* range, bool skip) |
| : range_(range), start_(range->Start()), end_(range->End()), skip_(skip) { |
| DCHECK(!range->IsEmpty()); |
| } |
| |
| bool CanCover(LifetimePosition position) { |
| return start_ <= position && position < end_; |
| } |
| |
| LiveRange* const range_; |
| const LifetimePosition start_; |
| const LifetimePosition end_; |
| const bool skip_; |
| |
| private: |
| DISALLOW_COPY_AND_ASSIGN(LiveRangeBound); |
| }; |
| |
| |
| struct FindResult { |
| LiveRange* cur_cover_; |
| LiveRange* pred_cover_; |
| }; |
| |
| |
| class LiveRangeBoundArray { |
| public: |
| LiveRangeBoundArray() : length_(0), start_(nullptr) {} |
| |
| bool ShouldInitialize() { return start_ == nullptr; } |
| |
| void Initialize(Zone* zone, TopLevelLiveRange* range) { |
| length_ = range->GetChildCount(); |
| |
| start_ = zone->NewArray<LiveRangeBound>(length_); |
| LiveRangeBound* curr = start_; |
| // Normally, spilled ranges do not need connecting moves, because the spill |
| // location has been assigned at definition. For ranges spilled in deferred |
| // blocks, that is not the case, so we need to connect the spilled children. |
| for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) { |
| new (curr) LiveRangeBound(i, i->spilled()); |
| } |
| } |
| |
| LiveRangeBound* Find(const LifetimePosition position) const { |
| size_t left_index = 0; |
| size_t right_index = length_; |
| while (true) { |
| size_t current_index = left_index + (right_index - left_index) / 2; |
| DCHECK(right_index > current_index); |
| LiveRangeBound* bound = &start_[current_index]; |
| if (bound->start_ <= position) { |
| if (position < bound->end_) return bound; |
| DCHECK(left_index < current_index); |
| left_index = current_index; |
| } else { |
| right_index = current_index; |
| } |
| } |
| } |
| |
| LiveRangeBound* FindPred(const InstructionBlock* pred) { |
| LifetimePosition pred_end = |
| LifetimePosition::InstructionFromInstructionIndex( |
| pred->last_instruction_index()); |
| return Find(pred_end); |
| } |
| |
| LiveRangeBound* FindSucc(const InstructionBlock* succ) { |
| LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex( |
| succ->first_instruction_index()); |
| return Find(succ_start); |
| } |
| |
| bool FindConnectableSubranges(const InstructionBlock* block, |
| const InstructionBlock* pred, |
| FindResult* result) const { |
| LifetimePosition pred_end = |
| LifetimePosition::InstructionFromInstructionIndex( |
| pred->last_instruction_index()); |
| LiveRangeBound* bound = Find(pred_end); |
| result->pred_cover_ = bound->range_; |
| LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex( |
| block->first_instruction_index()); |
| |
| if (bound->CanCover(cur_start)) { |
| // Both blocks are covered by the same range, so there is nothing to |
| // connect. |
| return false; |
| } |
| bound = Find(cur_start); |
| if (bound->skip_) { |
| return false; |
| } |
| result->cur_cover_ = bound->range_; |
| DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr); |
| return (result->cur_cover_ != result->pred_cover_); |
| } |
| |
| private: |
| size_t length_; |
| LiveRangeBound* start_; |
| |
| DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray); |
| }; |
| |
| |
| class LiveRangeFinder { |
| public: |
| explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone) |
| : data_(data), |
| bounds_length_(static_cast<int>(data_->live_ranges().size())), |
| bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)), |
| zone_(zone) { |
| for (int i = 0; i < bounds_length_; ++i) { |
| new (&bounds_[i]) LiveRangeBoundArray(); |
| } |
| } |
| |
| LiveRangeBoundArray* ArrayFor(int operand_index) { |
| DCHECK(operand_index < bounds_length_); |
| TopLevelLiveRange* range = data_->live_ranges()[operand_index]; |
| DCHECK(range != nullptr && !range->IsEmpty()); |
| LiveRangeBoundArray* array = &bounds_[operand_index]; |
| if (array->ShouldInitialize()) { |
| array->Initialize(zone_, range); |
| } |
| return array; |
| } |
| |
| private: |
| const RegisterAllocationData* const data_; |
| const int bounds_length_; |
| LiveRangeBoundArray* const bounds_; |
| Zone* const zone_; |
| |
| DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder); |
| }; |
| |
| |
| typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey; |
| |
| |
| struct DelayedInsertionMapCompare { |
| bool operator()(const DelayedInsertionMapKey& a, |
| const DelayedInsertionMapKey& b) const { |
| if (a.first == b.first) { |
| return a.second.Compare(b.second); |
| } |
| return a.first < b.first; |
| } |
| }; |
| |
| |
| typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand, |
| DelayedInsertionMapCompare> DelayedInsertionMap; |
| |
| |
| UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand, |
| void* hint, UsePositionHintType hint_type) |
| : operand_(operand), hint_(hint), next_(nullptr), pos_(pos), flags_(0) { |
| DCHECK_IMPLIES(hint == nullptr, hint_type == UsePositionHintType::kNone); |
| bool register_beneficial = true; |
| UsePositionType type = UsePositionType::kAny; |
| if (operand_ != nullptr && operand_->IsUnallocated()) { |
| const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_); |
| if (unalloc->HasRegisterPolicy()) { |
| type = UsePositionType::kRequiresRegister; |
| } else if (unalloc->HasSlotPolicy()) { |
| type = UsePositionType::kRequiresSlot; |
| register_beneficial = false; |
| } else { |
| register_beneficial = !unalloc->HasAnyPolicy(); |
| } |
| } |
| flags_ = TypeField::encode(type) | HintTypeField::encode(hint_type) | |
| RegisterBeneficialField::encode(register_beneficial) | |
| AssignedRegisterField::encode(kUnassignedRegister); |
| DCHECK(pos_.IsValid()); |
| } |
| |
| |
| bool UsePosition::HasHint() const { |
| int hint_register; |
| return HintRegister(&hint_register); |
| } |
| |
| |
| bool UsePosition::HintRegister(int* register_code) const { |
| if (hint_ == nullptr) return false; |
| switch (HintTypeField::decode(flags_)) { |
| case UsePositionHintType::kNone: |
| case UsePositionHintType::kUnresolved: |
| return false; |
| case UsePositionHintType::kUsePos: { |
| UsePosition* use_pos = reinterpret_cast<UsePosition*>(hint_); |
| int assigned_register = AssignedRegisterField::decode(use_pos->flags_); |
| if (assigned_register == kUnassignedRegister) return false; |
| *register_code = assigned_register; |
| return true; |
| } |
| case UsePositionHintType::kOperand: { |
| InstructionOperand* operand = |
| reinterpret_cast<InstructionOperand*>(hint_); |
| *register_code = LocationOperand::cast(operand)->register_code(); |
| return true; |
| } |
| case UsePositionHintType::kPhi: { |
| RegisterAllocationData::PhiMapValue* phi = |
| reinterpret_cast<RegisterAllocationData::PhiMapValue*>(hint_); |
| int assigned_register = phi->assigned_register(); |
| if (assigned_register == kUnassignedRegister) return false; |
| *register_code = assigned_register; |
| return true; |
| } |
| } |
| UNREACHABLE(); |
| } |
| |
| |
| UsePositionHintType UsePosition::HintTypeForOperand( |
| const InstructionOperand& op) { |
| switch (op.kind()) { |
| case InstructionOperand::CONSTANT: |
| case InstructionOperand::IMMEDIATE: |
| case InstructionOperand::EXPLICIT: |
| return UsePositionHintType::kNone; |
| case InstructionOperand::UNALLOCATED: |
| return UsePositionHintType::kUnresolved; |
| case InstructionOperand::ALLOCATED: |
| if (op.IsRegister() || op.IsFPRegister()) { |
| return UsePositionHintType::kOperand; |
| } else { |
| DCHECK(op.IsStackSlot() || op.IsFPStackSlot()); |
| return UsePositionHintType::kNone; |
| } |
| case InstructionOperand::INVALID: |
| break; |
| } |
| UNREACHABLE(); |
| } |
| |
| void UsePosition::SetHint(UsePosition* use_pos) { |
| DCHECK_NOT_NULL(use_pos); |
| hint_ = use_pos; |
| flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos); |
| } |
| |
| void UsePosition::ResolveHint(UsePosition* use_pos) { |
| DCHECK_NOT_NULL(use_pos); |
| if (HintTypeField::decode(flags_) != UsePositionHintType::kUnresolved) return; |
| hint_ = use_pos; |
| flags_ = HintTypeField::update(flags_, UsePositionHintType::kUsePos); |
| } |
| |
| |
| void UsePosition::set_type(UsePositionType type, bool register_beneficial) { |
| DCHECK_IMPLIES(type == UsePositionType::kRequiresSlot, !register_beneficial); |
| DCHECK_EQ(kUnassignedRegister, AssignedRegisterField::decode(flags_)); |
| flags_ = TypeField::encode(type) | |
| RegisterBeneficialField::encode(register_beneficial) | |
| HintTypeField::encode(HintTypeField::decode(flags_)) | |
| AssignedRegisterField::encode(kUnassignedRegister); |
| } |
| |
| |
| UseInterval* UseInterval::SplitAt(LifetimePosition pos, Zone* zone) { |
| DCHECK(Contains(pos) && pos != start()); |
| UseInterval* after = new (zone) UseInterval(pos, end_); |
| after->next_ = next_; |
| next_ = nullptr; |
| end_ = pos; |
| return after; |
| } |
| |
| |
| void LifetimePosition::Print() const { |
| OFStream os(stdout); |
| os << *this << std::endl; |
| } |
| |
| |
| std::ostream& operator<<(std::ostream& os, const LifetimePosition pos) { |
| os << '@' << pos.ToInstructionIndex(); |
| if (pos.IsGapPosition()) { |
| os << 'g'; |
| } else { |
| os << 'i'; |
| } |
| if (pos.IsStart()) { |
| os << 's'; |
| } else { |
| os << 'e'; |
| } |
| return os; |
| } |
| |
| LiveRange::LiveRange(int relative_id, MachineRepresentation rep, |
| TopLevelLiveRange* top_level) |
| : relative_id_(relative_id), |
| bits_(0), |
| last_interval_(nullptr), |
| first_interval_(nullptr), |
| first_pos_(nullptr), |
| top_level_(top_level), |
| next_(nullptr), |
| current_interval_(nullptr), |
| last_processed_use_(nullptr), |
| current_hint_position_(nullptr), |
| splitting_pointer_(nullptr) { |
| DCHECK(AllocatedOperand::IsSupportedRepresentation(rep)); |
| bits_ = AssignedRegisterField::encode(kUnassignedRegister) | |
| RepresentationField::encode(rep); |
| } |
| |
| |
| void LiveRange::VerifyPositions() const { |
| // Walk the positions, verifying that each is in an interval. |
| UseInterval* interval = first_interval_; |
| for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) { |
| CHECK(Start() <= pos->pos()); |
| CHECK(pos->pos() <= End()); |
| CHECK_NOT_NULL(interval); |
| while (!interval->Contains(pos->pos()) && interval->end() != pos->pos()) { |
| interval = interval->next(); |
| CHECK_NOT_NULL(interval); |
| } |
| } |
| } |
| |
| |
| void LiveRange::VerifyIntervals() const { |
| DCHECK(first_interval()->start() == Start()); |
| LifetimePosition last_end = first_interval()->end(); |
| for (UseInterval* interval = first_interval()->next(); interval != nullptr; |
| interval = interval->next()) { |
| DCHECK(last_end <= interval->start()); |
| last_end = interval->end(); |
| } |
| DCHECK(last_end == End()); |
| } |
| |
| |
| void LiveRange::set_assigned_register(int reg) { |
| DCHECK(!HasRegisterAssigned() && !spilled()); |
| bits_ = AssignedRegisterField::update(bits_, reg); |
| } |
| |
| |
| void LiveRange::UnsetAssignedRegister() { |
| DCHECK(HasRegisterAssigned() && !spilled()); |
| bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister); |
| } |
| |
| |
| void LiveRange::Spill() { |
| DCHECK(!spilled()); |
| DCHECK(!TopLevel()->HasNoSpillType()); |
| set_spilled(true); |
| bits_ = AssignedRegisterField::update(bits_, kUnassignedRegister); |
| } |
| |
| |
| RegisterKind LiveRange::kind() const { |
| return IsFloatingPoint(representation()) ? FP_REGISTERS : GENERAL_REGISTERS; |
| } |
| |
| |
| UsePosition* LiveRange::FirstHintPosition(int* register_index) const { |
| for (UsePosition* pos = first_pos_; pos != nullptr; pos = pos->next()) { |
| if (pos->HintRegister(register_index)) return pos; |
| } |
| return nullptr; |
| } |
| |
| |
| UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const { |
| UsePosition* use_pos = last_processed_use_; |
| if (use_pos == nullptr || use_pos->pos() > start) { |
| use_pos = first_pos(); |
| } |
| while (use_pos != nullptr && use_pos->pos() < start) { |
| use_pos = use_pos->next(); |
| } |
| last_processed_use_ = use_pos; |
| return use_pos; |
| } |
| |
| |
| UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial( |
| LifetimePosition start) const { |
| UsePosition* pos = NextUsePosition(start); |
| while (pos != nullptr && !pos->RegisterIsBeneficial()) { |
| pos = pos->next(); |
| } |
| return pos; |
| } |
| |
| LifetimePosition LiveRange::NextLifetimePositionRegisterIsBeneficial( |
| const LifetimePosition& start) const { |
| UsePosition* next_use = NextUsePositionRegisterIsBeneficial(start); |
| if (next_use == nullptr) return End(); |
| return next_use->pos(); |
| } |
| |
| UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial( |
| LifetimePosition start) const { |
| UsePosition* pos = first_pos(); |
| UsePosition* prev = nullptr; |
| while (pos != nullptr && pos->pos() < start) { |
| if (pos->RegisterIsBeneficial()) prev = pos; |
| pos = pos->next(); |
| } |
| return prev; |
| } |
| |
| |
| UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) const { |
| UsePosition* pos = NextUsePosition(start); |
| while (pos != nullptr && pos->type() != UsePositionType::kRequiresRegister) { |
| pos = pos->next(); |
| } |
| return pos; |
| } |
| |
| |
| UsePosition* LiveRange::NextSlotPosition(LifetimePosition start) const { |
| for (UsePosition* pos = NextUsePosition(start); pos != nullptr; |
| pos = pos->next()) { |
| if (pos->type() != UsePositionType::kRequiresSlot) continue; |
| return pos; |
| } |
| return nullptr; |
| } |
| |
| |
| bool LiveRange::CanBeSpilled(LifetimePosition pos) const { |
| // We cannot spill a live range that has a use requiring a register |
| // at the current or the immediate next position. |
| UsePosition* use_pos = NextRegisterPosition(pos); |
| if (use_pos == nullptr) return true; |
| return use_pos->pos() > pos.NextStart().End(); |
| } |
| |
| |
| bool LiveRange::IsTopLevel() const { return top_level_ == this; } |
| |
| |
| InstructionOperand LiveRange::GetAssignedOperand() const { |
| if (HasRegisterAssigned()) { |
| DCHECK(!spilled()); |
| return AllocatedOperand(LocationOperand::REGISTER, representation(), |
| assigned_register()); |
| } |
| DCHECK(spilled()); |
| DCHECK(!HasRegisterAssigned()); |
| if (TopLevel()->HasSpillOperand()) { |
| InstructionOperand* op = TopLevel()->GetSpillOperand(); |
| DCHECK(!op->IsUnallocated()); |
| return *op; |
| } |
| return TopLevel()->GetSpillRangeOperand(); |
| } |
| |
| |
| UseInterval* LiveRange::FirstSearchIntervalForPosition( |
| LifetimePosition position) const { |
| if (current_interval_ == nullptr) return first_interval_; |
| if (current_interval_->start() > position) { |
| current_interval_ = nullptr; |
| return first_interval_; |
| } |
| return current_interval_; |
| } |
| |
| |
| void LiveRange::AdvanceLastProcessedMarker( |
| UseInterval* to_start_of, LifetimePosition but_not_past) const { |
| if (to_start_of == nullptr) return; |
| if (to_start_of->start() > but_not_past) return; |
| LifetimePosition start = current_interval_ == nullptr |
| ? LifetimePosition::Invalid() |
| : current_interval_->start(); |
| if (to_start_of->start() > start) { |
| current_interval_ = to_start_of; |
| } |
| } |
| |
| |
| LiveRange* LiveRange::SplitAt(LifetimePosition position, Zone* zone) { |
| int new_id = TopLevel()->GetNextChildId(); |
| LiveRange* child = new (zone) LiveRange(new_id, representation(), TopLevel()); |
| // If we split, we do so because we're about to switch registers or move |
| // to/from a slot, so there's no value in connecting hints. |
| DetachAt(position, child, zone, DoNotConnectHints); |
| |
| child->top_level_ = TopLevel(); |
| child->next_ = next_; |
| next_ = child; |
| return child; |
| } |
| |
| UsePosition* LiveRange::DetachAt(LifetimePosition position, LiveRange* result, |
| Zone* zone, |
| HintConnectionOption connect_hints) { |
| DCHECK(Start() < position); |
| DCHECK(End() > position); |
| DCHECK(result->IsEmpty()); |
| // Find the last interval that ends before the position. If the |
| // position is contained in one of the intervals in the chain, we |
| // split that interval and use the first part. |
| UseInterval* current = FirstSearchIntervalForPosition(position); |
| |
| // If the split position coincides with the beginning of a use interval |
| // we need to split use positons in a special way. |
| bool split_at_start = false; |
| |
| if (current->start() == position) { |
| // When splitting at start we need to locate the previous use interval. |
| current = first_interval_; |
| } |
| |
| UseInterval* after = nullptr; |
| while (current != nullptr) { |
| if (current->Contains(position)) { |
| after = current->SplitAt(position, zone); |
| break; |
| } |
| UseInterval* next = current->next(); |
| if (next->start() >= position) { |
| split_at_start = (next->start() == position); |
| after = next; |
| current->set_next(nullptr); |
| break; |
| } |
| current = next; |
| } |
| DCHECK_NOT_NULL(after); |
| |
| // Partition original use intervals to the two live ranges. |
| UseInterval* before = current; |
| result->last_interval_ = |
| (last_interval_ == before) |
| ? after // Only interval in the range after split. |
| : last_interval_; // Last interval of the original range. |
| result->first_interval_ = after; |
| last_interval_ = before; |
| |
| // Find the last use position before the split and the first use |
| // position after it. |
| UsePosition* use_after = |
| splitting_pointer_ == nullptr || splitting_pointer_->pos() > position |
| ? first_pos() |
| : splitting_pointer_; |
| UsePosition* use_before = nullptr; |
| if (split_at_start) { |
| // The split position coincides with the beginning of a use interval (the |
| // end of a lifetime hole). Use at this position should be attributed to |
| // the split child because split child owns use interval covering it. |
| while (use_after != nullptr && use_after->pos() < position) { |
| use_before = use_after; |
| use_after = use_after->next(); |
| } |
| } else { |
| while (use_after != nullptr && use_after->pos() <= position) { |
| use_before = use_after; |
| use_after = use_after->next(); |
| } |
| } |
| |
| // Partition original use positions to the two live ranges. |
| if (use_before != nullptr) { |
| use_before->set_next(nullptr); |
| } else { |
| first_pos_ = nullptr; |
| } |
| result->first_pos_ = use_after; |
| |
| // Discard cached iteration state. It might be pointing |
| // to the use that no longer belongs to this live range. |
| last_processed_use_ = nullptr; |
| current_interval_ = nullptr; |
| |
| if (connect_hints == ConnectHints && use_before != nullptr && |
| use_after != nullptr) { |
| use_after->SetHint(use_before); |
| } |
| #ifdef DEBUG |
| VerifyChildStructure(); |
| result->VerifyChildStructure(); |
| #endif |
| return use_before; |
| } |
| |
| |
| void LiveRange::UpdateParentForAllChildren(TopLevelLiveRange* new_top_level) { |
| LiveRange* child = this; |
| for (; child != nullptr; child = child->next()) { |
| child->top_level_ = new_top_level; |
| } |
| } |
| |
| |
| void LiveRange::ConvertUsesToOperand(const InstructionOperand& op, |
| const InstructionOperand& spill_op) { |
| for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) { |
| DCHECK(Start() <= pos->pos() && pos->pos() <= End()); |
| if (!pos->HasOperand()) continue; |
| switch (pos->type()) { |
| case UsePositionType::kRequiresSlot: |
| DCHECK(spill_op.IsStackSlot() || spill_op.IsFPStackSlot()); |
| InstructionOperand::ReplaceWith(pos->operand(), &spill_op); |
| break; |
| case UsePositionType::kRequiresRegister: |
| DCHECK(op.IsRegister() || op.IsFPRegister()); |
| // Fall through. |
| case UsePositionType::kAny: |
| InstructionOperand::ReplaceWith(pos->operand(), &op); |
| break; |
| } |
| } |
| } |
| |
| |
| // This implements an ordering on live ranges so that they are ordered by their |
| // start positions. This is needed for the correctness of the register |
| // allocation algorithm. If two live ranges start at the same offset then there |
| // is a tie breaker based on where the value is first used. This part of the |
| // ordering is merely a heuristic. |
| bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const { |
| LifetimePosition start = Start(); |
| LifetimePosition other_start = other->Start(); |
| if (start == other_start) { |
| UsePosition* pos = first_pos(); |
| if (pos == nullptr) return false; |
| UsePosition* other_pos = other->first_pos(); |
| if (other_pos == nullptr) return true; |
| return pos->pos() < other_pos->pos(); |
| } |
| return start < other_start; |
| } |
| |
| |
| void LiveRange::SetUseHints(int register_index) { |
| for (UsePosition* pos = first_pos(); pos != nullptr; pos = pos->next()) { |
| if (!pos->HasOperand()) continue; |
| switch (pos->type()) { |
| case UsePositionType::kRequiresSlot: |
| break; |
| case UsePositionType::kRequiresRegister: |
| case UsePositionType::kAny: |
| pos->set_assigned_register(register_index); |
| break; |
| } |
| } |
| } |
| |
| |
| bool LiveRange::CanCover(LifetimePosition position) const { |
| if (IsEmpty()) return false; |
| return Start() <= position && position < End(); |
| } |
| |
| |
| bool LiveRange::Covers(LifetimePosition position) const { |
| if (!CanCover(position)) return false; |
| UseInterval* start_search = FirstSearchIntervalForPosition(position); |
| for (UseInterval* interval = start_search; interval != nullptr; |
| interval = interval->next()) { |
| DCHECK(interval->next() == nullptr || |
| interval->next()->start() >= interval->start()); |
| AdvanceLastProcessedMarker(interval, position); |
| if (interval->Contains(position)) return true; |
| if (interval->start() > position) return false; |
| } |
| return false; |
| } |
| |
| |
| LifetimePosition LiveRange::FirstIntersection(LiveRange* other) const { |
| UseInterval* b = other->first_interval(); |
| if (b == nullptr) return LifetimePosition::Invalid(); |
| LifetimePosition advance_last_processed_up_to = b->start(); |
| UseInterval* a = FirstSearchIntervalForPosition(b->start()); |
| while (a != nullptr && b != nullptr) { |
| if (a->start() > other->End()) break; |
| if (b->start() > End()) break; |
| LifetimePosition cur_intersection = a->Intersect(b); |
| if (cur_intersection.IsValid()) { |
| return cur_intersection; |
| } |
| if (a->start() < b->start()) { |
| a = a->next(); |
| if (a == nullptr || a->start() > other->End()) break; |
| AdvanceLastProcessedMarker(a, advance_last_processed_up_to); |
| } else { |
| b = b->next(); |
| } |
| } |
| return LifetimePosition::Invalid(); |
| } |
| |
| void LiveRange::Print(const RegisterConfiguration* config, |
| bool with_children) const { |
| OFStream os(stdout); |
| PrintableLiveRange wrapper; |
| wrapper.register_configuration_ = config; |
| for (const LiveRange* i = this; i != nullptr; i = i->next()) { |
| wrapper.range_ = i; |
| os << wrapper << std::endl; |
| if (!with_children) break; |
| } |
| } |
| |
| |
| void LiveRange::Print(bool with_children) const { |
| Print(RegisterConfiguration::Default(), with_children); |
| } |
| |
| |
| struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject { |
| SpillMoveInsertionList(int gap_index, InstructionOperand* operand, |
| SpillMoveInsertionList* next) |
| : gap_index(gap_index), operand(operand), next(next) {} |
| const int gap_index; |
| InstructionOperand* const operand; |
| SpillMoveInsertionList* const next; |
| }; |
| |
| |
| TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep) |
| : LiveRange(0, rep, this), |
| vreg_(vreg), |
| last_child_id_(0), |
| splintered_from_(nullptr), |
| spill_operand_(nullptr), |
| spill_move_insertion_locations_(nullptr), |
| spilled_in_deferred_blocks_(false), |
| spill_start_index_(kMaxInt), |
| last_pos_(nullptr), |
| splinter_(nullptr), |
| has_preassigned_slot_(false) { |
| bits_ |= SpillTypeField::encode(SpillType::kNoSpillType); |
| } |
| |
| |
| #if DEBUG |
| int TopLevelLiveRange::debug_virt_reg() const { |
| return IsSplinter() ? splintered_from()->vreg() : vreg(); |
| } |
| #endif |
| |
| |
| void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index, |
| InstructionOperand* operand) { |
| DCHECK(HasNoSpillType()); |
| spill_move_insertion_locations_ = new (zone) SpillMoveInsertionList( |
| gap_index, operand, spill_move_insertion_locations_); |
| } |
| |
| void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence, |
| const InstructionOperand& op, |
| bool might_be_duplicated) { |
| DCHECK_IMPLIES(op.IsConstant(), GetSpillMoveInsertionLocations() == nullptr); |
| Zone* zone = sequence->zone(); |
| |
| for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations(); |
| to_spill != nullptr; to_spill = to_spill->next) { |
| Instruction* instr = sequence->InstructionAt(to_spill->gap_index); |
| ParallelMove* move = |
| instr->GetOrCreateParallelMove(Instruction::START, zone); |
| // Skip insertion if it's possible that the move exists already as a |
| // constraint move from a fixed output register to a slot. |
| if (might_be_duplicated || has_preassigned_slot()) { |
| bool found = false; |
| for (MoveOperands* move_op : *move) { |
| if (move_op->IsEliminated()) continue; |
| if (move_op->source().Equals(*to_spill->operand) && |
| move_op->destination().Equals(op)) { |
| found = true; |
| if (has_preassigned_slot()) move_op->Eliminate(); |
| break; |
| } |
| } |
| if (found) continue; |
| } |
| if (!has_preassigned_slot()) { |
| move->AddMove(*to_spill->operand, op); |
| } |
| } |
| } |
| |
| |
| void TopLevelLiveRange::SetSpillOperand(InstructionOperand* operand) { |
| DCHECK(HasNoSpillType()); |
| DCHECK(!operand->IsUnallocated() && !operand->IsImmediate()); |
| set_spill_type(SpillType::kSpillOperand); |
| spill_operand_ = operand; |
| } |
| |
| |
| void TopLevelLiveRange::SetSpillRange(SpillRange* spill_range) { |
| DCHECK(!HasSpillOperand()); |
| DCHECK(spill_range); |
| spill_range_ = spill_range; |
| } |
| |
| |
| AllocatedOperand TopLevelLiveRange::GetSpillRangeOperand() const { |
| SpillRange* spill_range = GetSpillRange(); |
| int index = spill_range->assigned_slot(); |
| return AllocatedOperand(LocationOperand::STACK_SLOT, representation(), index); |
| } |
| |
| |
| void TopLevelLiveRange::Splinter(LifetimePosition start, LifetimePosition end, |
| Zone* zone) { |
| DCHECK(start != Start() || end != End()); |
| DCHECK(start < end); |
| |
| TopLevelLiveRange splinter_temp(-1, representation()); |
| UsePosition* last_in_splinter = nullptr; |
| // Live ranges defined in deferred blocks stay in deferred blocks, so we |
| // don't need to splinter them. That means that start should always be |
| // after the beginning of the range. |
| DCHECK(start > Start()); |
| |
| if (end >= End()) { |
| DCHECK(start > Start()); |
| DetachAt(start, &splinter_temp, zone, ConnectHints); |
| next_ = nullptr; |
| } else { |
| DCHECK(start < End() && Start() < end); |
| |
| const int kInvalidId = std::numeric_limits<int>::max(); |
| |
| UsePosition* last = DetachAt(start, &splinter_temp, zone, ConnectHints); |
| |
| LiveRange end_part(kInvalidId, this->representation(), nullptr); |
| // The last chunk exits the deferred region, and we don't want to connect |
| // hints here, because the non-deferred region shouldn't be affected |
| // by allocation decisions on the deferred path. |
| last_in_splinter = |
| splinter_temp.DetachAt(end, &end_part, zone, DoNotConnectHints); |
| |
| next_ = end_part.next_; |
| last_interval_->set_next(end_part.first_interval_); |
| // The next splinter will happen either at or after the current interval. |
| // We can optimize DetachAt by setting current_interval_ accordingly, |
| // which will then be picked up by FirstSearchIntervalForPosition. |
| current_interval_ = last_interval_; |
| last_interval_ = end_part.last_interval_; |
| |
| if (first_pos_ == nullptr) { |
| first_pos_ = end_part.first_pos_; |
| } else { |
| splitting_pointer_ = last; |
| if (last != nullptr) last->set_next(end_part.first_pos_); |
| } |
| } |
| |
| if (splinter()->IsEmpty()) { |
| splinter()->first_interval_ = splinter_temp.first_interval_; |
| splinter()->last_interval_ = splinter_temp.last_interval_; |
| } else { |
| splinter()->last_interval_->set_next(splinter_temp.first_interval_); |
| splinter()->last_interval_ = splinter_temp.last_interval_; |
| } |
| if (splinter()->first_pos() == nullptr) { |
| splinter()->first_pos_ = splinter_temp.first_pos_; |
| } else { |
| splinter()->last_pos_->set_next(splinter_temp.first_pos_); |
| } |
| if (last_in_splinter != nullptr) { |
| splinter()->last_pos_ = last_in_splinter; |
| } else { |
| if (splinter()->first_pos() != nullptr && |
| splinter()->last_pos_ == nullptr) { |
| splinter()->last_pos_ = splinter()->first_pos(); |
| for (UsePosition* pos = splinter()->first_pos(); pos != nullptr; |
| pos = pos->next()) { |
| splinter()->last_pos_ = pos; |
| } |
| } |
| } |
| #if DEBUG |
| Verify(); |
| splinter()->Verify(); |
| #endif |
| } |
| |
| |
| void TopLevelLiveRange::SetSplinteredFrom(TopLevelLiveRange* splinter_parent) { |
| splintered_from_ = splinter_parent; |
| if (!HasSpillOperand() && splinter_parent->spill_range_ != nullptr) { |
| SetSpillRange(splinter_parent->spill_range_); |
| } |
| } |
| |
| |
| void TopLevelLiveRange::UpdateSpillRangePostMerge(TopLevelLiveRange* merged) { |
| DCHECK(merged->TopLevel() == this); |
| |
| if (HasNoSpillType() && merged->HasSpillRange()) { |
| set_spill_type(merged->spill_type()); |
| DCHECK_LT(0, GetSpillRange()->live_ranges().size()); |
| merged->spill_range_ = nullptr; |
| merged->bits_ = |
| SpillTypeField::update(merged->bits_, SpillType::kNoSpillType); |
| } |
| } |
| |
| |
| void TopLevelLiveRange::Merge(TopLevelLiveRange* other, Zone* zone) { |
| DCHECK(Start() < other->Start()); |
| DCHECK(other->splintered_from() == this); |
| |
| LiveRange* first = this; |
| LiveRange* second = other; |
| DCHECK(first->Start() < second->Start()); |
| while (first != nullptr && second != nullptr) { |
| DCHECK(first != second); |
| // Make sure the ranges are in order each time we iterate. |
| if (second->Start() < first->Start()) { |
| LiveRange* tmp = second; |
| second = first; |
| first = tmp; |
| continue; |
| } |
| |
| if (first->End() <= second->Start()) { |
| if (first->next() == nullptr || |
| first->next()->Start() > second->Start()) { |
| // First is in order before second. |
| LiveRange* temp = first->next(); |
| first->next_ = second; |
| first = temp; |
| } else { |
| // First is in order before its successor (or second), so advance first. |
| first = first->next(); |
| } |
| continue; |
| } |
| |
| DCHECK(first->Start() < second->Start()); |
| // If first and second intersect, split first. |
| if (first->Start() < second->End() && second->Start() < first->End()) { |
| LiveRange* temp = first->SplitAt(second->Start(), zone); |
| CHECK(temp != first); |
| temp->set_spilled(first->spilled()); |
| if (!temp->spilled()) |
| temp->set_assigned_register(first->assigned_register()); |
| |
| first->next_ = second; |
| first = temp; |
| continue; |
| } |
| DCHECK(first->End() <= second->Start()); |
| } |
| |
| TopLevel()->UpdateParentForAllChildren(TopLevel()); |
| TopLevel()->UpdateSpillRangePostMerge(other); |
| TopLevel()->set_has_slot_use(TopLevel()->has_slot_use() || |
| other->has_slot_use()); |
| |
| #if DEBUG |
| Verify(); |
| #endif |
| } |
| |
| |
| void TopLevelLiveRange::VerifyChildrenInOrder() const { |
| LifetimePosition last_end = End(); |
| for (const LiveRange* child = this->next(); child != nullptr; |
| child = child->next()) { |
| DCHECK(last_end <= child->Start()); |
| last_end = child->End(); |
| } |
| } |
| |
| |
| void TopLevelLiveRange::Verify() const { |
| VerifyChildrenInOrder(); |
| for (const LiveRange* child = this; child != nullptr; child = child->next()) { |
| VerifyChildStructure(); |
| } |
| } |
| |
| |
| void TopLevelLiveRange::ShortenTo(LifetimePosition start) { |
| TRACE("Shorten live range %d to [%d\n", vreg(), start.value()); |
| DCHECK_NOT_NULL(first_interval_); |
| DCHECK(first_interval_->start() <= start); |
| DCHECK(start < first_interval_->end()); |
| first_interval_->set_start(start); |
| } |
| |
| |
| void TopLevelLiveRange::EnsureInterval(LifetimePosition start, |
| LifetimePosition end, Zone* zone) { |
| TRACE("Ensure live range %d in interval [%d %d[\n", vreg(), start.value(), |
| end.value()); |
| LifetimePosition new_end = end; |
| while (first_interval_ != nullptr && first_interval_->start() <= end) { |
| if (first_interval_->end() > end) { |
| new_end = first_interval_->end(); |
| } |
| first_interval_ = first_interval_->next(); |
| } |
| |
| UseInterval* new_interval = new (zone) UseInterval(start, new_end); |
| new_interval->set_next(first_interval_); |
| first_interval_ = new_interval; |
| if (new_interval->next() == nullptr) { |
| last_interval_ = new_interval; |
| } |
| } |
| |
| |
| void TopLevelLiveRange::AddUseInterval(LifetimePosition start, |
| LifetimePosition end, Zone* zone) { |
| TRACE("Add to live range %d interval [%d %d[\n", vreg(), start.value(), |
| end.value()); |
| if (first_interval_ == nullptr) { |
| UseInterval* interval = new (zone) UseInterval(start, end); |
| first_interval_ = interval; |
| last_interval_ = interval; |
| } else { |
| if (end == first_interval_->start()) { |
| first_interval_->set_start(start); |
| } else if (end < first_interval_->start()) { |
| UseInterval* interval = new (zone) UseInterval(start, end); |
| interval->set_next(first_interval_); |
| first_interval_ = interval; |
| } else { |
| // Order of instruction's processing (see ProcessInstructions) guarantees |
| // that each new use interval either precedes, intersects with or touches |
| // the last added interval. |
| DCHECK(start <= first_interval_->end()); |
| first_interval_->set_start(Min(start, first_interval_->start())); |
| first_interval_->set_end(Max(end, first_interval_->end())); |
| } |
| } |
| } |
| |
| |
| void TopLevelLiveRange::AddUsePosition(UsePosition* use_pos) { |
| LifetimePosition pos = use_pos->pos(); |
| TRACE("Add to live range %d use position %d\n", vreg(), pos.value()); |
| UsePosition* prev_hint = nullptr; |
| UsePosition* prev = nullptr; |
| UsePosition* current = first_pos_; |
| while (current != nullptr && current->pos() < pos) { |
| prev_hint = current->HasHint() ? current : prev_hint; |
| prev = current; |
| current = current->next(); |
| } |
| |
| if (prev == nullptr) { |
| use_pos->set_next(first_pos_); |
| first_pos_ = use_pos; |
| } else { |
| use_pos->set_next(prev->next()); |
| prev->set_next(use_pos); |
| } |
| |
| if (prev_hint == nullptr && use_pos->HasHint()) { |
| current_hint_position_ = use_pos; |
| } |
| } |
| |
| |
| static bool AreUseIntervalsIntersecting(UseInterval* interval1, |
| UseInterval* interval2) { |
| while (interval1 != nullptr && interval2 != nullptr) { |
| if (interval1->start() < interval2->start()) { |
| if (interval1->end() > interval2->start()) { |
| return true; |
| } |
| interval1 = interval1->next(); |
| } else { |
| if (interval2->end() > interval1->start()) { |
| return true; |
| } |
| interval2 = interval2->next(); |
| } |
| } |
| return false; |
| } |
| |
| |
| std::ostream& operator<<(std::ostream& os, |
| const PrintableLiveRange& printable_range) { |
| const LiveRange* range = printable_range.range_; |
| os << "Range: " << range->TopLevel()->vreg() << ":" << range->relative_id() |
| << " "; |
| if (range->TopLevel()->is_phi()) os << "phi "; |
| if (range->TopLevel()->is_non_loop_phi()) os << "nlphi "; |
| |
| os << "{" << std::endl; |
| UseInterval* interval = range->first_interval(); |
| UsePosition* use_pos = range->first_pos(); |
| PrintableInstructionOperand pio; |
| pio.register_configuration_ = printable_range.register_configuration_; |
| while (use_pos != nullptr) { |
| if (use_pos->HasOperand()) { |
| pio.op_ = *use_pos->operand(); |
| os << pio << use_pos->pos() << " "; |
| } |
| use_pos = use_pos->next(); |
| } |
| os << std::endl; |
| |
| while (interval != nullptr) { |
| os << '[' << interval->start() << ", " << interval->end() << ')' |
| << std::endl; |
| interval = interval->next(); |
| } |
| os << "}"; |
| return os; |
| } |
| |
| SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone) |
| : live_ranges_(zone), |
| assigned_slot_(kUnassignedSlot), |
| byte_width_(GetByteWidth(parent->representation())) { |
| // Spill ranges are created for top level, non-splintered ranges. This is so |
| // that, when merging decisions are made, we consider the full extent of the |
| // virtual register, and avoid clobbering it. |
| DCHECK(!parent->IsSplinter()); |
| UseInterval* result = nullptr; |
| UseInterval* node = nullptr; |
| // Copy the intervals for all ranges. |
| for (LiveRange* range = parent; range != nullptr; range = range->next()) { |
| UseInterval* src = range->first_interval(); |
| while (src != nullptr) { |
| UseInterval* new_node = new (zone) UseInterval(src->start(), src->end()); |
| if (result == nullptr) { |
| result = new_node; |
| } else { |
| node->set_next(new_node); |
| } |
| node = new_node; |
| src = src->next(); |
| } |
| } |
| use_interval_ = result; |
| live_ranges().push_back(parent); |
| end_position_ = node->end(); |
| parent->SetSpillRange(this); |
| } |
| |
| bool SpillRange::IsIntersectingWith(SpillRange* other) const { |
| if (this->use_interval_ == nullptr || other->use_interval_ == nullptr || |
| this->End() <= other->use_interval_->start() || |
| other->End() <= this->use_interval_->start()) { |
| return false; |
| } |
| return AreUseIntervalsIntersecting(use_interval_, other->use_interval_); |
| } |
| |
| |
| bool SpillRange::TryMerge(SpillRange* other) { |
| if (HasSlot() || other->HasSlot()) return false; |
| if (byte_width() != other->byte_width() || IsIntersectingWith(other)) |
| return false; |
| |
| LifetimePosition max = LifetimePosition::MaxPosition(); |
| if (End() < other->End() && other->End() != max) { |
| end_position_ = other->End(); |
| } |
| other->end_position_ = max; |
| |
| MergeDisjointIntervals(other->use_interval_); |
| other->use_interval_ = nullptr; |
| |
| for (TopLevelLiveRange* range : other->live_ranges()) { |
| DCHECK(range->GetSpillRange() == other); |
| range->SetSpillRange(this); |
| } |
| |
| live_ranges().insert(live_ranges().end(), other->live_ranges().begin(), |
| other->live_ranges().end()); |
| other->live_ranges().clear(); |
| |
| return true; |
| } |
| |
| |
| void SpillRange::MergeDisjointIntervals(UseInterval* other) { |
| UseInterval* tail = nullptr; |
| UseInterval* current = use_interval_; |
| while (other != nullptr) { |
| // Make sure the 'current' list starts first |
| if (current == nullptr || current->start() > other->start()) { |
| std::swap(current, other); |
| } |
| // Check disjointness |
| DCHECK(other == nullptr || current->end() <= other->start()); |
| // Append the 'current' node to the result accumulator and move forward |
| if (tail == nullptr) { |
| use_interval_ = current; |
| } else { |
| tail->set_next(current); |
| } |
| tail = current; |
| current = current->next(); |
| } |
| // Other list is empty => we are done |
| } |
| |
| |
| void SpillRange::Print() const { |
| OFStream os(stdout); |
| os << "{" << std::endl; |
| for (TopLevelLiveRange* range : live_ranges()) { |
| os << range->vreg() << " "; |
| } |
| os << std::endl; |
| |
| for (UseInterval* i = interval(); i != nullptr; i = i->next()) { |
| os << '[' << i->start() << ", " << i->end() << ')' << std::endl; |
| } |
| os << "}" << std::endl; |
| } |
| |
| |
| RegisterAllocationData::PhiMapValue::PhiMapValue(PhiInstruction* phi, |
| const InstructionBlock* block, |
| Zone* zone) |
| : phi_(phi), |
| block_(block), |
| incoming_operands_(zone), |
| assigned_register_(kUnassignedRegister) { |
| incoming_operands_.reserve(phi->operands().size()); |
| } |
| |
| |
| void RegisterAllocationData::PhiMapValue::AddOperand( |
| InstructionOperand* operand) { |
| incoming_operands_.push_back(operand); |
| } |
| |
| |
| void RegisterAllocationData::PhiMapValue::CommitAssignment( |
| const InstructionOperand& assigned) { |
| for (InstructionOperand* operand : incoming_operands_) { |
| InstructionOperand::ReplaceWith(operand, &assigned); |
| } |
| } |
| |
| RegisterAllocationData::RegisterAllocationData( |
| const RegisterConfiguration* config, Zone* zone, Frame* frame, |
| InstructionSequence* code, const char* debug_name) |
| : allocation_zone_(zone), |
| frame_(frame), |
| code_(code), |
| debug_name_(debug_name), |
| config_(config), |
| phi_map_(allocation_zone()), |
| live_in_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()), |
| live_out_sets_(code->InstructionBlockCount(), nullptr, allocation_zone()), |
| live_ranges_(code->VirtualRegisterCount() * 2, nullptr, |
| allocation_zone()), |
| fixed_live_ranges_(this->config()->num_general_registers(), nullptr, |
| allocation_zone()), |
| fixed_float_live_ranges_(allocation_zone()), |
| fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr, |
| allocation_zone()), |
| fixed_simd128_live_ranges_(allocation_zone()), |
| spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()), |
| delayed_references_(allocation_zone()), |
| assigned_registers_(nullptr), |
| assigned_double_registers_(nullptr), |
| virtual_register_count_(code->VirtualRegisterCount()), |
| preassigned_slot_ranges_(zone) { |
| if (!kSimpleFPAliasing) { |
| fixed_float_live_ranges_.resize(this->config()->num_float_registers(), |
| nullptr); |
| fixed_simd128_live_ranges_.resize(this->config()->num_simd128_registers(), |
| nullptr); |
| } |
| |
| assigned_registers_ = new (code_zone()) |
| BitVector(this->config()->num_general_registers(), code_zone()); |
| assigned_double_registers_ = new (code_zone()) |
| BitVector(this->config()->num_double_registers(), code_zone()); |
| this->frame()->SetAllocatedRegisters(assigned_registers_); |
| this->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_); |
| } |
| |
| |
| MoveOperands* RegisterAllocationData::AddGapMove( |
| int index, Instruction::GapPosition position, |
| const InstructionOperand& from, const InstructionOperand& to) { |
| Instruction* instr = code()->InstructionAt(index); |
| ParallelMove* moves = instr->GetOrCreateParallelMove(position, code_zone()); |
| return moves->AddMove(from, to); |
| } |
| |
| |
| MachineRepresentation RegisterAllocationData::RepresentationFor( |
| int virtual_register) { |
| DCHECK_LT(virtual_register, code()->VirtualRegisterCount()); |
| return code()->GetRepresentation(virtual_register); |
| } |
| |
| |
| TopLevelLiveRange* RegisterAllocationData::GetOrCreateLiveRangeFor(int index) { |
| if (index >= static_cast<int>(live_ranges().size())) { |
| live_ranges().resize(index + 1, nullptr); |
| } |
| TopLevelLiveRange* result = live_ranges()[index]; |
| if (result == nullptr) { |
| result = NewLiveRange(index, RepresentationFor(index)); |
| live_ranges()[index] = result; |
| } |
| return result; |
| } |
| |
| |
| TopLevelLiveRange* RegisterAllocationData::NewLiveRange( |
| int index, MachineRepresentation rep) { |
| return new (allocation_zone()) TopLevelLiveRange(index, rep); |
| } |
| |
| |
| int RegisterAllocationData::GetNextLiveRangeId() { |
| int vreg = virtual_register_count_++; |
| if (vreg >= static_cast<int>(live_ranges().size())) { |
| live_ranges().resize(vreg + 1, nullptr); |
| } |
| return vreg; |
| } |
| |
| |
| TopLevelLiveRange* RegisterAllocationData::NextLiveRange( |
| MachineRepresentation rep) { |
| int vreg = GetNextLiveRangeId(); |
| TopLevelLiveRange* ret = NewLiveRange(vreg, rep); |
| return ret; |
| } |
| |
| |
| RegisterAllocationData::PhiMapValue* RegisterAllocationData::InitializePhiMap( |
| const InstructionBlock* block, PhiInstruction* phi) { |
| RegisterAllocationData::PhiMapValue* map_value = new (allocation_zone()) |
| RegisterAllocationData::PhiMapValue(phi, block, allocation_zone()); |
| auto res = |
| phi_map_.insert(std::make_pair(phi->virtual_register(), map_value)); |
| DCHECK(res.second); |
| USE(res); |
| return map_value; |
| } |
| |
| |
| RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( |
| int virtual_register) { |
| auto it = phi_map_.find(virtual_register); |
| DCHECK(it != phi_map_.end()); |
| return it->second; |
| } |
| |
| |
| RegisterAllocationData::PhiMapValue* RegisterAllocationData::GetPhiMapValueFor( |
| TopLevelLiveRange* top_range) { |
| return GetPhiMapValueFor(top_range->vreg()); |
| } |
| |
| |
| bool RegisterAllocationData::ExistsUseWithoutDefinition() { |
| bool found = false; |
| BitVector::Iterator iterator(live_in_sets()[0]); |
| while (!iterator.Done()) { |
| found = true; |
| int operand_index = iterator.Current(); |
| PrintF("Register allocator error: live v%d reached first block.\n", |
| operand_index); |
| LiveRange* range = GetOrCreateLiveRangeFor(operand_index); |
| PrintF(" (first use is at %d)\n", range->first_pos()->pos().value()); |
| if (debug_name() == nullptr) { |
| PrintF("\n"); |
| } else { |
| PrintF(" (function: %s)\n", debug_name()); |
| } |
| iterator.Advance(); |
| } |
| return found; |
| } |
| |
| |
| // If a range is defined in a deferred block, we can expect all the range |
| // to only cover positions in deferred blocks. Otherwise, a block on the |
| // hot path would be dominated by a deferred block, meaning it is unreachable |
| // without passing through the deferred block, which is contradictory. |
| // In particular, when such a range contributes a result back on the hot |
| // path, it will be as one of the inputs of a phi. In that case, the value |
| // will be transferred via a move in the Gap::END's of the last instruction |
| // of a deferred block. |
| bool RegisterAllocationData::RangesDefinedInDeferredStayInDeferred() { |
| for (const TopLevelLiveRange* range : live_ranges()) { |
| if (range == nullptr || range->IsEmpty() || |
| !code() |
| ->GetInstructionBlock(range->Start().ToInstructionIndex()) |
| ->IsDeferred()) { |
| continue; |
| } |
| for (const UseInterval* i = range->first_interval(); i != nullptr; |
| i = i->next()) { |
| int first = i->FirstGapIndex(); |
| int last = i->LastGapIndex(); |
| for (int instr = first; instr <= last;) { |
| const InstructionBlock* block = code()->GetInstructionBlock(instr); |
| if (!block->IsDeferred()) return false; |
| instr = block->last_instruction_index() + 1; |
| } |
| } |
| } |
| return true; |
| } |
| |
| SpillRange* RegisterAllocationData::AssignSpillRangeToLiveRange( |
| TopLevelLiveRange* range) { |
| DCHECK(!range->HasSpillOperand()); |
| |
| SpillRange* spill_range = range->GetAllocatedSpillRange(); |
| if (spill_range == nullptr) { |
| DCHECK(!range->IsSplinter()); |
| spill_range = new (allocation_zone()) SpillRange(range, allocation_zone()); |
| } |
| range->set_spill_type(TopLevelLiveRange::SpillType::kSpillRange); |
| |
| int spill_range_index = |
| range->IsSplinter() ? range->splintered_from()->vreg() : range->vreg(); |
| |
| spill_ranges()[spill_range_index] = spill_range; |
| |
| return spill_range; |
| } |
| |
| |
| SpillRange* RegisterAllocationData::CreateSpillRangeForLiveRange( |
| TopLevelLiveRange* range) { |
| DCHECK(!range->HasSpillOperand()); |
| DCHECK(!range->IsSplinter()); |
| SpillRange* spill_range = |
| new (allocation_zone()) SpillRange(range, allocation_zone()); |
| return spill_range; |
| } |
| |
| void RegisterAllocationData::MarkAllocated(MachineRepresentation rep, |
| int index) { |
| switch (rep) { |
| case MachineRepresentation::kFloat32: |
| case MachineRepresentation::kSimd128: |
| if (kSimpleFPAliasing) { |
| assigned_double_registers_->Add(index); |
| } else { |
| int alias_base_index = -1; |
| int aliases = config()->GetAliases( |
| rep, index, MachineRepresentation::kFloat64, &alias_base_index); |
| DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1)); |
| while (aliases--) { |
| int aliased_reg = alias_base_index + aliases; |
| assigned_double_registers_->Add(aliased_reg); |
| } |
| } |
| break; |
| case MachineRepresentation::kFloat64: |
| assigned_double_registers_->Add(index); |
| break; |
| default: |
| DCHECK(!IsFloatingPoint(rep)); |
| assigned_registers_->Add(index); |
| break; |
| } |
| } |
| |
| bool RegisterAllocationData::IsBlockBoundary(LifetimePosition pos) const { |
| return pos.IsFullStart() && |
| code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() == |
| pos.ToInstructionIndex(); |
| } |
| |
| |
| ConstraintBuilder::ConstraintBuilder(RegisterAllocationData* data) |
| : data_(data) {} |
| |
| |
| InstructionOperand* ConstraintBuilder::AllocateFixed( |
| UnallocatedOperand* operand, int pos, bool is_tagged) { |
| TRACE("Allocating fixed reg for op %d\n", operand->virtual_register()); |
| DCHECK(operand->HasFixedPolicy()); |
| InstructionOperand allocated; |
| MachineRepresentation rep = InstructionSequence::DefaultRepresentation(); |
| int virtual_register = operand->virtual_register(); |
| if (virtual_register != InstructionOperand::kInvalidVirtualRegister) { |
| rep = data()->RepresentationFor(virtual_register); |
| } |
| if (operand->HasFixedSlotPolicy()) { |
| allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, rep, |
| operand->fixed_slot_index()); |
| } else if (operand->HasFixedRegisterPolicy()) { |
| DCHECK(!IsFloatingPoint(rep)); |
| DCHECK(data()->config()->IsAllocatableGeneralCode( |
| operand->fixed_register_index())); |
| allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep, |
| operand->fixed_register_index()); |
| } else if (operand->HasFixedFPRegisterPolicy()) { |
| DCHECK(IsFloatingPoint(rep)); |
| DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register); |
| allocated = AllocatedOperand(AllocatedOperand::REGISTER, rep, |
| operand->fixed_register_index()); |
| } else { |
| UNREACHABLE(); |
| } |
| InstructionOperand::ReplaceWith(operand, &allocated); |
| if (is_tagged) { |
| TRACE("Fixed reg is tagged at %d\n", pos); |
| Instruction* instr = code()->InstructionAt(pos); |
| if (instr->HasReferenceMap()) { |
| instr->reference_map()->RecordReference(*AllocatedOperand::cast(operand)); |
| } |
| } |
| return operand; |
| } |
| |
| |
| void ConstraintBuilder::MeetRegisterConstraints() { |
| for (InstructionBlock* block : code()->instruction_blocks()) { |
| MeetRegisterConstraints(block); |
| } |
| } |
| |
| |
| void ConstraintBuilder::MeetRegisterConstraints(const InstructionBlock* block) { |
| int start = block->first_instruction_index(); |
| int end = block->last_instruction_index(); |
| DCHECK_NE(-1, start); |
| for (int i = start; i <= end; ++i) { |
| MeetConstraintsBefore(i); |
| if (i != end) MeetConstraintsAfter(i); |
| } |
| // Meet register constraints for the instruction in the end. |
| MeetRegisterConstraintsForLastInstructionInBlock(block); |
| } |
| |
| |
| void ConstraintBuilder::MeetRegisterConstraintsForLastInstructionInBlock( |
| const InstructionBlock* block) { |
| int end = block->last_instruction_index(); |
| Instruction* last_instruction = code()->InstructionAt(end); |
| for (size_t i = 0; i < last_instruction->OutputCount(); i++) { |
| InstructionOperand* output_operand = last_instruction->OutputAt(i); |
| DCHECK(!output_operand->IsConstant()); |
| UnallocatedOperand* output = UnallocatedOperand::cast(output_operand); |
| int output_vreg = output->virtual_register(); |
| TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg); |
| bool assigned = false; |
| if (output->HasFixedPolicy()) { |
| AllocateFixed(output, -1, false); |
| // This value is produced on the stack, we never need to spill it. |
| if (output->IsStackSlot()) { |
| DCHECK(LocationOperand::cast(output)->index() < |
| data()->frame()->GetSpillSlotCount()); |
| range->SetSpillOperand(LocationOperand::cast(output)); |
| range->SetSpillStartIndex(end); |
| assigned = true; |
| } |
| |
| for (const RpoNumber& succ : block->successors()) { |
| const InstructionBlock* successor = code()->InstructionBlockAt(succ); |
| DCHECK_EQ(1, successor->PredecessorCount()); |
| int gap_index = successor->first_instruction_index(); |
| // Create an unconstrained operand for the same virtual register |
| // and insert a gap move from the fixed output to the operand. |
| UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg); |
| data()->AddGapMove(gap_index, Instruction::START, *output, output_copy); |
| } |
| } |
| |
| if (!assigned) { |
| for (const RpoNumber& succ : block->successors()) { |
| const InstructionBlock* successor = code()->InstructionBlockAt(succ); |
| DCHECK_EQ(1, successor->PredecessorCount()); |
| int gap_index = successor->first_instruction_index(); |
| range->RecordSpillLocation(allocation_zone(), gap_index, output); |
| range->SetSpillStartIndex(gap_index); |
| } |
| } |
| } |
| } |
| |
| |
| void ConstraintBuilder::MeetConstraintsAfter(int instr_index) { |
| Instruction* first = code()->InstructionAt(instr_index); |
| // Handle fixed temporaries. |
| for (size_t i = 0; i < first->TempCount(); i++) { |
| UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i)); |
| if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false); |
| } |
| // Handle constant/fixed output operands. |
| for (size_t i = 0; i < first->OutputCount(); i++) { |
| InstructionOperand* output = first->OutputAt(i); |
| if (output->IsConstant()) { |
| int output_vreg = ConstantOperand::cast(output)->virtual_register(); |
| TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(output_vreg); |
| range->SetSpillStartIndex(instr_index + 1); |
| range->SetSpillOperand(output); |
| continue; |
| } |
| UnallocatedOperand* first_output = UnallocatedOperand::cast(output); |
| TopLevelLiveRange* range = |
| data()->GetOrCreateLiveRangeFor(first_output->virtual_register()); |
| bool assigned = false; |
| if (first_output->HasFixedPolicy()) { |
| int output_vreg = first_output->virtual_register(); |
| UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg); |
| bool is_tagged = code()->IsReference(output_vreg); |
| if (first_output->HasSecondaryStorage()) { |
| range->MarkHasPreassignedSlot(); |
| data()->preassigned_slot_ranges().push_back( |
| std::make_pair(range, first_output->GetSecondaryStorage())); |
| } |
| AllocateFixed(first_output, instr_index, is_tagged); |
| |
| // This value is produced on the stack, we never need to spill it. |
| if (first_output->IsStackSlot()) { |
| DCHECK(LocationOperand::cast(first_output)->index() < |
| data()->frame()->GetTotalFrameSlotCount()); |
| range->SetSpillOperand(LocationOperand::cast(first_output)); |
| range->SetSpillStartIndex(instr_index + 1); |
| assigned = true; |
| } |
| data()->AddGapMove(instr_index + 1, Instruction::START, *first_output, |
| output_copy); |
| } |
| // Make sure we add a gap move for spilling (if we have not done |
| // so already). |
| if (!assigned) { |
| range->RecordSpillLocation(allocation_zone(), instr_index + 1, |
| first_output); |
| range->SetSpillStartIndex(instr_index + 1); |
| } |
| } |
| } |
| |
| |
| void ConstraintBuilder::MeetConstraintsBefore(int instr_index) { |
| Instruction* second = code()->InstructionAt(instr_index); |
| // Handle fixed input operands of second instruction. |
| for (size_t i = 0; i < second->InputCount(); i++) { |
| InstructionOperand* input = second->InputAt(i); |
| if (input->IsImmediate() || input->IsExplicit()) { |
| continue; // Ignore immediates and explicitly reserved registers. |
| } |
| UnallocatedOperand* cur_input = UnallocatedOperand::cast(input); |
| if (cur_input->HasFixedPolicy()) { |
| int input_vreg = cur_input->virtual_register(); |
| UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg); |
| bool is_tagged = code()->IsReference(input_vreg); |
| AllocateFixed(cur_input, instr_index, is_tagged); |
| data()->AddGapMove(instr_index, Instruction::END, input_copy, *cur_input); |
| } |
| } |
| // Handle "output same as input" for second instruction. |
| for (size_t i = 0; i < second->OutputCount(); i++) { |
| InstructionOperand* output = second->OutputAt(i); |
| if (!output->IsUnallocated()) continue; |
| UnallocatedOperand* second_output = UnallocatedOperand::cast(output); |
| if (!second_output->HasSameAsInputPolicy()) continue; |
| DCHECK_EQ(0, i); // Only valid for first output. |
| UnallocatedOperand* cur_input = |
| UnallocatedOperand::cast(second->InputAt(0)); |
| int output_vreg = second_output->virtual_register(); |
| int input_vreg = cur_input->virtual_register(); |
| UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg); |
| *cur_input = |
| UnallocatedOperand(*cur_input, second_output->virtual_register()); |
| MoveOperands* gap_move = data()->AddGapMove(instr_index, Instruction::END, |
| input_copy, *cur_input); |
| if (code()->IsReference(input_vreg) && !code()->IsReference(output_vreg)) { |
| if (second->HasReferenceMap()) { |
| RegisterAllocationData::DelayedReference delayed_reference = { |
| second->reference_map(), &gap_move->source()}; |
| data()->delayed_references().push_back(delayed_reference); |
| } |
| } else if (!code()->IsReference(input_vreg) && |
| code()->IsReference(output_vreg)) { |
| // The input is assumed to immediately have a tagged representation, |
| // before the pointer map can be used. I.e. the pointer map at the |
| // instruction will include the output operand (whose value at the |
| // beginning of the instruction is equal to the input operand). If |
| // this is not desired, then the pointer map at this instruction needs |
| // to be adjusted manually. |
| } |
| } |
| } |
| |
| |
| void ConstraintBuilder::ResolvePhis() { |
| // Process the blocks in reverse order. |
| for (InstructionBlock* block : base::Reversed(code()->instruction_blocks())) { |
| ResolvePhis(block); |
| } |
| } |
| |
| |
| void ConstraintBuilder::ResolvePhis(const InstructionBlock* block) { |
| for (PhiInstruction* phi : block->phis()) { |
| int phi_vreg = phi->virtual_register(); |
| RegisterAllocationData::PhiMapValue* map_value = |
| data()->InitializePhiMap(block, phi); |
| InstructionOperand& output = phi->output(); |
| // Map the destination operands, so the commitment phase can find them. |
| for (size_t i = 0; i < phi->operands().size(); ++i) { |
| InstructionBlock* cur_block = |
| code()->InstructionBlockAt(block->predecessors()[i]); |
| UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]); |
| MoveOperands* move = data()->AddGapMove( |
| cur_block->last_instruction_index(), Instruction::END, input, output); |
| map_value->AddOperand(&move->destination()); |
| DCHECK(!code() |
| ->InstructionAt(cur_block->last_instruction_index()) |
| ->HasReferenceMap()); |
| } |
| TopLevelLiveRange* live_range = data()->GetOrCreateLiveRangeFor(phi_vreg); |
| int gap_index = block->first_instruction_index(); |
| live_range->RecordSpillLocation(allocation_zone(), gap_index, &output); |
| live_range->SetSpillStartIndex(gap_index); |
| // We use the phi-ness of some nodes in some later heuristics. |
| live_range->set_is_phi(true); |
| live_range->set_is_non_loop_phi(!block->IsLoopHeader()); |
| } |
| } |
| |
| |
| LiveRangeBuilder::LiveRangeBuilder(RegisterAllocationData* data, |
| Zone* local_zone) |
| : data_(data), phi_hints_(local_zone) {} |
| |
| |
| BitVector* LiveRangeBuilder::ComputeLiveOut(const InstructionBlock* block, |
| RegisterAllocationData* data) { |
| size_t block_index = block->rpo_number().ToSize(); |
| BitVector* live_out = data->live_out_sets()[block_index]; |
| if (live_out == nullptr) { |
| // Compute live out for the given block, except not including backward |
| // successor edges. |
| Zone* zone = data->allocation_zone(); |
| const InstructionSequence* code = data->code(); |
| |
| live_out = new (zone) BitVector(code->VirtualRegisterCount(), zone); |
| |
| // Process all successor blocks. |
| for (const RpoNumber& succ : block->successors()) { |
| // Add values live on entry to the successor. |
| if (succ <= block->rpo_number()) continue; |
| BitVector* live_in = data->live_in_sets()[succ.ToSize()]; |
| if (live_in != nullptr) live_out->Union(*live_in); |
| |
| // All phi input operands corresponding to this successor edge are live |
| // out from this block. |
| const InstructionBlock* successor = code->InstructionBlockAt(succ); |
| size_t index = successor->PredecessorIndexOf(block->rpo_number()); |
| DCHECK(index < successor->PredecessorCount()); |
| for (PhiInstruction* phi : successor->phis()) { |
| live_out->Add(phi->operands()[index]); |
| } |
| } |
| data->live_out_sets()[block_index] = live_out; |
| } |
| return live_out; |
| } |
| |
| |
| void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block, |
| BitVector* live_out) { |
| // Add an interval that includes the entire block to the live range for |
| // each live_out value. |
| LifetimePosition start = LifetimePosition::GapFromInstructionIndex( |
| block->first_instruction_index()); |
| LifetimePosition end = LifetimePosition::InstructionFromInstructionIndex( |
| block->last_instruction_index()) |
| .NextStart(); |
| BitVector::Iterator iterator(live_out); |
| while (!iterator.Done()) { |
| int operand_index = iterator.Current(); |
| TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index); |
| range->AddUseInterval(start, end, allocation_zone()); |
| iterator.Advance(); |
| } |
| } |
| |
| int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) { |
| int result = -index - 1; |
| switch (rep) { |
| case MachineRepresentation::kSimd128: |
| result -= config()->num_float_registers(); |
| // Fall through. |
| case MachineRepresentation::kFloat32: |
| result -= config()->num_double_registers(); |
| // Fall through. |
| case MachineRepresentation::kFloat64: |
| result -= config()->num_general_registers(); |
| break; |
| default: |
| UNREACHABLE(); |
| break; |
| } |
| return result; |
| } |
| |
| TopLevelLiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) { |
| DCHECK(index < config()->num_general_registers()); |
| TopLevelLiveRange* result = data()->fixed_live_ranges()[index]; |
| if (result == nullptr) { |
| MachineRepresentation rep = InstructionSequence::DefaultRepresentation(); |
| result = data()->NewLiveRange(FixedLiveRangeID(index), rep); |
| DCHECK(result->IsFixed()); |
| result->set_assigned_register(index); |
| data()->MarkAllocated(rep, index); |
| data()->fixed_live_ranges()[index] = result; |
| } |
| return result; |
| } |
| |
| TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor( |
| int index, MachineRepresentation rep) { |
| int num_regs = config()->num_double_registers(); |
| ZoneVector<TopLevelLiveRange*>* live_ranges = |
| &data()->fixed_double_live_ranges(); |
| if (!kSimpleFPAliasing) { |
| switch (rep) { |
| case MachineRepresentation::kFloat32: |
| num_regs = config()->num_float_registers(); |
| live_ranges = &data()->fixed_float_live_ranges(); |
| break; |
| case MachineRepresentation::kSimd128: |
| num_regs = config()->num_simd128_registers(); |
| live_ranges = &data()->fixed_simd128_live_ranges(); |
| break; |
| default: |
| break; |
| } |
| } |
| |
| DCHECK(index < num_regs); |
| USE(num_regs); |
| TopLevelLiveRange* result = (*live_ranges)[index]; |
| if (result == nullptr) { |
| result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep); |
| DCHECK(result->IsFixed()); |
| result->set_assigned_register(index); |
| data()->MarkAllocated(rep, index); |
| (*live_ranges)[index] = result; |
| } |
| return result; |
| } |
| |
| TopLevelLiveRange* LiveRangeBuilder::LiveRangeFor(InstructionOperand* operand) { |
| if (operand->IsUnallocated()) { |
| return data()->GetOrCreateLiveRangeFor( |
| UnallocatedOperand::cast(operand)->virtual_register()); |
| } else if (operand->IsConstant()) { |
| return data()->GetOrCreateLiveRangeFor( |
| ConstantOperand::cast(operand)->virtual_register()); |
| } else if (operand->IsRegister()) { |
| return FixedLiveRangeFor( |
| LocationOperand::cast(operand)->GetRegister().code()); |
| } else if (operand->IsFPRegister()) { |
| LocationOperand* op = LocationOperand::cast(operand); |
| return FixedFPLiveRangeFor(op->register_code(), op->representation()); |
| } else { |
| return nullptr; |
| } |
| } |
| |
| |
| UsePosition* LiveRangeBuilder::NewUsePosition(LifetimePosition pos, |
| InstructionOperand* operand, |
| void* hint, |
| UsePositionHintType hint_type) { |
| return new (allocation_zone()) UsePosition(pos, operand, hint, hint_type); |
| } |
| |
| |
| UsePosition* LiveRangeBuilder::Define(LifetimePosition position, |
| InstructionOperand* operand, void* hint, |
| UsePositionHintType hint_type) { |
| TopLevelLiveRange* range = LiveRangeFor(operand); |
| if (range == nullptr) return nullptr; |
| |
| if (range->IsEmpty() || range->Start() > position) { |
| // Can happen if there is a definition without use. |
| range->AddUseInterval(position, position.NextStart(), allocation_zone()); |
| range->AddUsePosition(NewUsePosition(position.NextStart())); |
| } else { |
| range->ShortenTo(position); |
| } |
| if (!operand->IsUnallocated()) return nullptr; |
| UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); |
| UsePosition* use_pos = |
| NewUsePosition(position, unalloc_operand, hint, hint_type); |
| range->AddUsePosition(use_pos); |
| return use_pos; |
| } |
| |
| |
| UsePosition* LiveRangeBuilder::Use(LifetimePosition block_start, |
| LifetimePosition position, |
| InstructionOperand* operand, void* hint, |
| UsePositionHintType hint_type) { |
| TopLevelLiveRange* range = LiveRangeFor(operand); |
| if (range == nullptr) return nullptr; |
| UsePosition* use_pos = nullptr; |
| if (operand->IsUnallocated()) { |
| UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand); |
| use_pos = NewUsePosition(position, unalloc_operand, hint, hint_type); |
| range->AddUsePosition(use_pos); |
| } |
| range->AddUseInterval(block_start, position, allocation_zone()); |
| return use_pos; |
| } |
| |
| |
| void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block, |
| BitVector* live) { |
| int block_start = block->first_instruction_index(); |
| LifetimePosition block_start_position = |
| LifetimePosition::GapFromInstructionIndex(block_start); |
| bool fixed_float_live_ranges = false; |
| bool fixed_simd128_live_ranges = false; |
| if (!kSimpleFPAliasing) { |
| int mask = data()->code()->representation_mask(); |
| fixed_float_live_ranges = (mask & kFloatRepBit) != 0; |
| fixed_simd128_live_ranges = (mask & kSimd128RepBit) != 0; |
| } |
| |
| for (int index = block->last_instruction_index(); index >= block_start; |
| index--) { |
| LifetimePosition curr_position = |
| LifetimePosition::InstructionFromInstructionIndex(index); |
| Instruction* instr = code()->InstructionAt(index); |
| DCHECK_NOT_NULL(instr); |
| DCHECK(curr_position.IsInstructionPosition()); |
| // Process output, inputs, and temps of this instruction. |
| for (size_t i = 0; i < instr->OutputCount(); i++) { |
| InstructionOperand* output = instr->OutputAt(i); |
| if (output->IsUnallocated()) { |
| // Unsupported. |
| DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy()); |
| int out_vreg = UnallocatedOperand::cast(output)->virtual_register(); |
| live->Remove(out_vreg); |
| } else if (output->IsConstant()) { |
| int out_vreg = ConstantOperand::cast(output)->virtual_register(); |
| live->Remove(out_vreg); |
| } |
| if (block->IsHandler() && index == block_start && output->IsAllocated() && |
| output->IsRegister() && |
| AllocatedOperand::cast(output)->GetRegister() == |
| v8::internal::kReturnRegister0) { |
| // The register defined here is blocked from gap start - it is the |
| // exception value. |
| // TODO(mtrofin): should we explore an explicit opcode for |
| // the first instruction in the handler? |
| Define(LifetimePosition::GapFromInstructionIndex(index), output); |
| } else { |
| Define(curr_position, output); |
| } |
| } |
| |
| if (instr->ClobbersRegisters()) { |
| for (int i = 0; i < config()->num_allocatable_general_registers(); ++i) { |
| // Create a UseInterval at this instruction for all fixed registers, |
| // (including the instruction outputs). Adding another UseInterval here |
| // is OK because AddUseInterval will just merge it with the existing |
| // one at the end of the range. |
| int code = config()->GetAllocatableGeneralCode(i); |
| TopLevelLiveRange* range = FixedLiveRangeFor(code); |
| range->AddUseInterval(curr_position, curr_position.End(), |
| allocation_zone()); |
| } |
| } |
| |
| if (instr->ClobbersDoubleRegisters()) { |
| for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) { |
| // Add a UseInterval for all DoubleRegisters. See comment above for |
| // general registers. |
| int code = config()->GetAllocatableDoubleCode(i); |
| TopLevelLiveRange* range = |
| FixedFPLiveRangeFor(code, MachineRepresentation::kFloat64); |
| range->AddUseInterval(curr_position, curr_position.End(), |
| allocation_zone()); |
| } |
| // Clobber fixed float registers on archs with non-simple aliasing. |
| if (!kSimpleFPAliasing) { |
| if (fixed_float_live_ranges) { |
| for (int i = 0; i < config()->num_allocatable_float_registers(); |
| ++i) { |
| // Add a UseInterval for all FloatRegisters. See comment above for |
| // general registers. |
| int code = config()->GetAllocatableFloatCode(i); |
| TopLevelLiveRange* range = |
| FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32); |
| range->AddUseInterval(curr_position, curr_position.End(), |
| allocation_zone()); |
| } |
| } |
| if (fixed_simd128_live_ranges) { |
| for (int i = 0; i < config()->num_allocatable_simd128_registers(); |
| ++i) { |
| int code = config()->GetAllocatableSimd128Code(i); |
| TopLevelLiveRange* range = |
| FixedFPLiveRangeFor(code, MachineRepresentation::kSimd128); |
| range->AddUseInterval(curr_position, curr_position.End(), |
| allocation_zone()); |
| } |
| } |
| } |
| } |
| |
| for (size_t i = 0; i < instr->InputCount(); i++) { |
| InstructionOperand* input = instr->InputAt(i); |
| if (input->IsImmediate() || input->IsExplicit()) { |
| continue; // Ignore immediates and explicitly reserved registers. |
| } |
| LifetimePosition use_pos; |
| if (input->IsUnallocated() && |
| UnallocatedOperand::cast(input)->IsUsedAtStart()) { |
| use_pos = curr_position; |
| } else { |
| use_pos = curr_position.End(); |
| } |
| |
| if (input->IsUnallocated()) { |
| UnallocatedOperand* unalloc = UnallocatedOperand::cast(input); |
| int vreg = unalloc->virtual_register(); |
| live->Add(vreg); |
| if (unalloc->HasSlotPolicy()) { |
| data()->GetOrCreateLiveRangeFor(vreg)->set_has_slot_use(true); |
| } |
| } |
| Use(block_start_position, use_pos, input); |
| } |
| |
| for (size_t i = 0; i < instr->TempCount(); i++) { |
| InstructionOperand* temp = instr->TempAt(i); |
| // Unsupported. |
| DCHECK_IMPLIES(temp->IsUnallocated(), |
| !UnallocatedOperand::cast(temp)->HasSlotPolicy()); |
| if (instr->ClobbersTemps()) { |
| if (temp->IsRegister()) continue; |
| if (temp->IsUnallocated()) { |
| UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp); |
| if (temp_unalloc->HasFixedPolicy()) { |
| continue; |
| } |
| } |
| } |
| Use(block_start_position, curr_position.End(), temp); |
| Define(curr_position, temp); |
| } |
| |
| // Process the moves of the instruction's gaps, making their sources live. |
| const Instruction::GapPosition kPositions[] = {Instruction::END, |
| Instruction::START}; |
| curr_position = curr_position.PrevStart(); |
| DCHECK(curr_position.IsGapPosition()); |
| for (const Instruction::GapPosition& position : kPositions) { |
| ParallelMove* move = instr->GetParallelMove(position); |
| if (move == nullptr) continue; |
| if (position == Instruction::END) { |
| curr_position = curr_position.End(); |
| } else { |
| curr_position = curr_position.Start(); |
| } |
| for (MoveOperands* cur : *move) { |
| InstructionOperand& from = cur->source(); |
| InstructionOperand& to = cur->destination(); |
| void* hint = &to; |
| UsePositionHintType hint_type = UsePosition::HintTypeForOperand(to); |
| UsePosition* to_use = nullptr; |
| int phi_vreg = -1; |
| if (to.IsUnallocated()) { |
| int to_vreg = UnallocatedOperand::cast(to).virtual_register(); |
| TopLevelLiveRange* to_range = |
| data()->GetOrCreateLiveRangeFor(to_vreg); |
| if (to_range->is_phi()) { |
| phi_vreg = to_vreg; |
| if (to_range->is_non_loop_phi()) { |
| hint = to_range->current_hint_position(); |
| hint_type = hint == nullptr ? UsePositionHintType::kNone |
| : UsePositionHintType::kUsePos; |
| } else { |
| hint_type = UsePositionHintType::kPhi; |
| hint = data()->GetPhiMapValueFor(to_vreg); |
| } |
| } else { |
| if (live->Contains(to_vreg)) { |
| to_use = Define(curr_position, &to, &from, |
| UsePosition::HintTypeForOperand(from)); |
| live->Remove(to_vreg); |
| } else { |
| cur->Eliminate(); |
| continue; |
| } |
| } |
| } else { |
| Define(curr_position, &to); |
| } |
| UsePosition* from_use = |
| Use(block_start_position, curr_position, &from, hint, hint_type); |
| // Mark range live. |
| if (from.IsUnallocated()) { |
| live->Add(UnallocatedOperand::cast(from).virtual_register()); |
| } |
| // Resolve use position hints just created. |
| if (to_use != nullptr && from_use != nullptr) { |
| to_use->ResolveHint(from_use); |
| from_use->ResolveHint(to_use); |
| } |
| DCHECK_IMPLIES(to_use != nullptr, to_use->IsResolved()); |
| DCHECK_IMPLIES(from_use != nullptr, from_use->IsResolved()); |
| // Potentially resolve phi hint. |
| if (phi_vreg != -1) ResolvePhiHint(&from, from_use); |
| } |
| } |
| } |
| } |
| |
| void LiveRangeBuilder::ProcessPhis(const InstructionBlock* block, |
| BitVector* live) { |
| for (PhiInstruction* phi : block->phis()) { |
| // The live range interval already ends at the first instruction of the |
| // block. |
| int phi_vreg = phi->virtual_register(); |
| live->Remove(phi_vreg); |
| // Select a hint from a predecessor block that precedes this block in the |
| // rpo order. In order of priority: |
| // - Avoid hints from deferred blocks. |
| // - Prefer hints from allocated (or explicit) operands. |
| // - Prefer hints from empty blocks (containing just parallel moves and a |
| // jump). In these cases, if we can elide the moves, the jump threader |
| // is likely to be able to elide the jump. |
| // The enforcement of hinting in rpo order is required because hint |
| // resolution that happens later in the compiler pipeline visits |
| // instructions in reverse rpo order, relying on the fact that phis are |
| // encountered before their hints. |
| InstructionOperand* hint = nullptr; |
| int hint_preference = 0; |
| |
| // The cost of hinting increases with the number of predecessors. At the |
| // same time, the typical benefit decreases, since this hinting only |
| // optimises the execution path through one predecessor. A limit of 2 is |
| // sufficient to hit the common if/else pattern. |
| int predecessor_limit = 2; |
| |
| for (RpoNumber predecessor : block->predecessors()) { |
| const InstructionBlock* predecessor_block = |
| code()->InstructionBlockAt(predecessor); |
| DCHECK_EQ(predecessor_block->rpo_number(), predecessor); |
| |
| // Only take hints from earlier rpo numbers. |
| if (predecessor >= block->rpo_number()) continue; |
| |
| // Look up the predecessor instruction. |
| const Instruction* predecessor_instr = |
| GetLastInstruction(code(), predecessor_block); |
| InstructionOperand* predecessor_hint = nullptr; |
| // Phis are assigned in the END position of the last instruction in each |
| // predecessor block. |
| for (MoveOperands* move : |
| *predecessor_instr->GetParallelMove(Instruction::END)) { |
| InstructionOperand& to = move->destination(); |
| if (to.IsUnallocated() && |
| UnallocatedOperand::cast(to).virtual_register() == phi_vreg) { |
| predecessor_hint = &move->source(); |
| break; |
| } |
| } |
| DCHECK_NOT_NULL(predecessor_hint); |
| |
| // For each predecessor, generate a score according to the priorities |
| // described above, and pick the best one. Flags in higher-order bits have |
| // a higher priority than those in lower-order bits. |
| int predecessor_hint_preference = 0; |
| const int kNotDeferredBlockPreference = (1 << 2); |
| const int kMoveIsAllocatedPreference = (1 << 1); |
| const int kBlockIsEmptyPreference = (1 << 0); |
| |
| // - Avoid hints from deferred blocks. |
| if (!predecessor_block->IsDeferred()) { |
| predecessor_hint_preference |= kNotDeferredBlockPreference; |
| } |
| |
| // - Prefer hints from allocated (or explicit) operands. |
| // |
| // Already-allocated or explicit operands are typically assigned using |
| // the parallel moves on the last instruction. For example: |
| // |
| // gap (v101 = [x0|R|w32]) (v100 = v101) |
| // ArchJmp |
| // ... |
| // phi: v100 = v101 v102 |
| // |
| // We have already found the END move, so look for a matching START move |
| // from an allocated (or explicit) operand. |
| // |
| // Note that we cannot simply look up data()->live_ranges()[vreg] here |
| // because the live ranges are still being built when this function is |
| // called. |
| // TODO(v8): Find a way to separate hinting from live range analysis in |
| // BuildLiveRanges so that we can use the O(1) live-range look-up. |
| auto moves = predecessor_instr->GetParallelMove(Instruction::START); |
| if (moves != nullptr) { |
| for (MoveOperands* move : *moves) { |
| InstructionOperand& to = move->destination(); |
| if (predecessor_hint->Equals(to)) { |
| if (move->source().IsAllocated() || move->source().IsExplicit()) { |
| predecessor_hint_preference |= kMoveIsAllocatedPreference; |
| } |
| break; |
| } |
| } |
| } |
| |
| // - Prefer hints from empty blocks. |
| if (predecessor_block->last_instruction_index() == |
| predecessor_block->first_instruction_index()) { |
| predecessor_hint_preference |= kBlockIsEmptyPreference; |
| } |
| |
| if ((hint == nullptr) || |
| (predecessor_hint_preference > hint_preference)) { |
| // Take the hint from this predecessor. |
| hint = predecessor_hint; |
| hint_preference = predecessor_hint_preference; |
| } |
| |
| if (--predecessor_limit <= 0) break; |
| } |
| DCHECK_NOT_NULL(hint); |
| |
| LifetimePosition block_start = LifetimePosition::GapFromInstructionIndex( |
| block->first_instruction_index()); |
| UsePosition* use_pos = Define(block_start, &phi->output(), hint, |
| UsePosition::HintTypeForOperand(*hint)); |
| MapPhiHint(hint, use_pos); |
| } |
| } |
| |
| |
| void LiveRangeBuilder::ProcessLoopHeader(const InstructionBlock* block, |
| BitVector* live) { |
| DCHECK(block->IsLoopHeader()); |
| // Add a live range stretching from the first loop instruction to the last |
| // for each value live on entry to the header. |
| BitVector::Iterator iterator(live); |
| LifetimePosition start = LifetimePosition::GapFromInstructionIndex( |
| block->first_instruction_index()); |
| LifetimePosition end = LifetimePosition::GapFromInstructionIndex( |
| code()->LastLoopInstructionIndex(block)) |
| .NextFullStart(); |
| while (!iterator.Done()) { |
| int operand_index = iterator.Current(); |
| TopLevelLiveRange* range = data()->GetOrCreateLiveRangeFor(operand_index); |
| range->EnsureInterval(start, end, allocation_zone()); |
| iterator.Advance(); |
| } |
| // Insert all values into the live in sets of all blocks in the loop. |
| for (int i = block->rpo_number().ToInt() + 1; i < block->loop_end().ToInt(); |
| ++i) { |
| live_in_sets()[i]->Union(*live); |
| } |
| } |
| |
| |
| void LiveRangeBuilder::BuildLiveRanges() { |
| // Process the blocks in reverse order. |
| for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0; |
| --block_id) { |
| InstructionBlock* block = |
| code()->InstructionBlockAt(RpoNumber::FromInt(block_id)); |
| BitVector* live = ComputeLiveOut(block, data()); |
| // Initially consider all live_out values live for the entire block. We |
| // will shorten these intervals if necessary. |
| AddInitialIntervals(block, live); |
| // Process the instructions in reverse order, generating and killing |
| // live values. |
| ProcessInstructions(block, live); |
| // All phi output operands are killed by this block. |
| ProcessPhis(block, live); |
| // Now live is live_in for this block except not including values live |
| // out on backward successor edges. |
| if (block->IsLoopHeader()) ProcessLoopHeader(block, live); |
| live_in_sets()[block_id] = live; |
| } |
| // Postprocess the ranges. |
| for (TopLevelLiveRange* range : data()->live_ranges()) { |
| if (range == nullptr) continue; |
| // Give slots to all ranges with a non fixed slot use. |
| if (range->has_slot_use() && range->HasNoSpillType()) { |
| data()->AssignSpillRangeToLiveRange(range); |
| } |
| // TODO(bmeurer): This is a horrible hack to make sure that for constant |
| // live ranges, every use requires the constant to be in a register. |
| // Without this hack, all uses with "any" policy would get the constant |
| // operand assigned. |
| if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) { |
| for (UsePosition* pos = range->first_pos(); pos != nullptr; |
| pos = pos->next()) { |
| if (pos->type() == UsePositionType::kRequiresSlot) continue; |
| UsePositionType new_type = UsePositionType::kAny; |
| // Can't mark phis as needing a register. |
| if (!pos->pos().IsGapPosition()) { |
| new_type = UsePositionType::kRequiresRegister; |
| } |
| pos->set_type(new_type, true); |
| } |
| } |
| } |
| for (auto preassigned : data()->preassigned_slot_ranges()) { |
| TopLevelLiveRange* range = preassigned.first; |
| int slot_id = preassigned.second; |
| SpillRange* spill = range->HasSpillRange() |
| ? range->GetSpillRange() |
| : data()->AssignSpillRangeToLiveRange(range); |
| spill->set_assigned_slot(slot_id); |
| } |
| #ifdef DEBUG |
| Verify(); |
| #endif |
| } |
| |
| |
| void LiveRangeBuilder::MapPhiHint(InstructionOperand* operand, |
| UsePosition* use_pos) { |
| DCHECK(!use_pos->IsResolved()); |
| auto res = phi_hints_.insert(std::make_pair(operand, use_pos)); |
| DCHECK(res.second); |
| USE(res); |
| } |
| |
| |
| void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand, |
| UsePosition* use_pos) { |
| auto it = phi_hints_.find(operand); |
| if (it == phi_hints_.end()) return; |
| DCHECK(!it->second->IsResolved()); |
| it->second->ResolveHint(use_pos); |
| } |
| |
| |
| void LiveRangeBuilder::Verify() const { |
| for (auto& hint : phi_hints_) { |
| CHECK(hint.second->IsResolved()); |
| } |
| for (const TopLevelLiveRange* current : data()->live_ranges()) { |
| if (current != nullptr && !current->IsEmpty()) { |
| // New LiveRanges should not be split. |
| CHECK_NULL(current->next()); |
| // General integrity check. |
| current->Verify(); |
| const UseInterval* first = current->first_interval(); |
| if (first->next() == nullptr) continue; |
| |
| // Consecutive intervals should not end and start in the same block, |
| // otherwise the intervals should have been joined, because the |
| // variable is live throughout that block. |
| CHECK(NextIntervalStartsInDifferentBlocks(first)); |
| |
| for (const UseInterval* i = first->next(); i != nullptr; i = i->next()) { |
| // Except for the first interval, the other intevals must start at |
| // a block boundary, otherwise data wouldn't flow to them. |
| CHECK(IntervalStartsAtBlockBoundary(i)); |
| // The last instruction of the predecessors of the block the interval |
| // starts must be covered by the range. |
| CHECK(IntervalPredecessorsCoveredByRange(i, current)); |
| if (i->next() != nullptr) { |
| // Check the consecutive intervals property, except for the last |
| // interval, where it doesn't apply. |
| CHECK(NextIntervalStartsInDifferentBlocks(i)); |
| } |
| } |
| } |
| } |
| } |
| |
| bool LiveRangeBuilder::IntervalStartsAtBlockBoundary( |
| const UseInterval* interval) const { |
| LifetimePosition start = interval->start(); |
| if (!start.IsFullStart()) return false; |
| int instruction_index = start.ToInstructionIndex(); |
| const InstructionBlock* block = |
| data()->code()->GetInstructionBlock(instruction_index); |
| return block->first_instruction_index() == instruction_index; |
| } |
| |
| bool LiveRangeBuilder::IntervalPredecessorsCoveredByRange( |
| const UseInterval* interval, const TopLevelLiveRange* range) const { |
| LifetimePosition start = interval->start(); |
| int instruction_index = start.ToInstructionIndex(); |
| const InstructionBlock* block = |
| data()->code()->GetInstructionBlock(instruction_index); |
| for (RpoNumber pred_index : block->predecessors()) { |
| const InstructionBlock* predecessor = |
| data()->code()->InstructionBlockAt(pred_index); |
| LifetimePosition last_pos = LifetimePosition::GapFromInstructionIndex( |
| predecessor->last_instruction_index()); |
| last_pos = last_pos.NextStart().End(); |
| if (!range->Covers(last_pos)) return false; |
| } |
| return true; |
| } |
| |
| bool LiveRangeBuilder::NextIntervalStartsInDifferentBlocks( |
| const UseInterval* interval) const { |
| DCHECK_NOT_NULL(interval->next()); |
| LifetimePosition end = interval->end(); |
| LifetimePosition next_start = interval->next()->start(); |
| // Since end is not covered, but the previous position is, move back a |
| // position |
| end = end.IsStart() ? end.PrevStart().End() : end.Start(); |
| int last_covered_index = end.ToInstructionIndex(); |
| const InstructionBlock* block = |
| data()->code()->GetInstructionBlock(last_covered_index); |
| const InstructionBlock* next_block = |
| data()->code()->GetInstructionBlock(next_start.ToInstructionIndex()); |
| return block->rpo_number() < next_block->rpo_number(); |
| } |
| |
| RegisterAllocator::RegisterAllocator(RegisterAllocationData* data, |
| RegisterKind kind) |
| : data_(data), |
| mode_(kind), |
| num_registers_(GetRegisterCount(data->config(), kind)), |
| num_allocatable_registers_( |
| GetAllocatableRegisterCount(data->config(), kind)), |
| allocatable_register_codes_( |
| GetAllocatableRegisterCodes(data->config(), kind)), |
| check_fp_aliasing_(false) { |
| if (!kSimpleFPAliasing && kind == FP_REGISTERS) { |
| check_fp_aliasing_ = (data->code()->representation_mask() & |
| (kFloatRepBit | kSimd128RepBit)) != 0; |
| } |
| } |
| |
| LifetimePosition RegisterAllocator::GetSplitPositionForInstruction( |
| const LiveRange* range, int instruction_index) { |
| LifetimePosition ret = LifetimePosition::Invalid(); |
| |
| ret = LifetimePosition::GapFromInstructionIndex(instruction_index); |
| if (range->Start() >= ret || ret >= range->End()) { |
| return LifetimePosition::Invalid(); |
| } |
| return ret; |
| } |
| |
| void RegisterAllocator::SplitAndSpillRangesDefinedByMemoryOperand() { |
| size_t initial_range_count = data()->live_ranges().size(); |
| for (size_t i = 0; i < initial_range_count; ++i) { |
| TopLevelLiveRange* range = data()->live_ranges()[i]; |
| if (!CanProcessRange(range)) continue; |
| if (range->HasNoSpillType() || |
| (range->HasSpillRange() && !range->has_slot_use())) { |
| continue; |
| } |
| LifetimePosition start = range->Start(); |
| TRACE("Live range %d:%d is defined by a spill operand.\n", |
| range->TopLevel()->vreg(), range->relative_id()); |
| LifetimePosition next_pos = start; |
| if (next_pos.IsGapPosition()) { |
| next_pos = next_pos.NextStart(); |
| } |
| |
| // With splinters, we can be more strict and skip over positions |
| // not strictly needing registers. |
| UsePosition* pos = |
| range->IsSplinter() |
| ? range->NextRegisterPosition(next_pos) |
| : range->NextUsePositionRegisterIsBeneficial(next_pos); |
| // If the range already has a spill operand and it doesn't need a |
| // register immediately, split it and spill the first part of the range. |
| if (pos == nullptr) { |
| Spill(range); |
| } else if (pos->pos() > range->Start().NextStart()) { |
| // Do not spill live range eagerly if use position that can benefit from |
| // the register is too close to the start of live range. |
| LifetimePosition split_pos = GetSplitPositionForInstruction( |
| range, pos->pos().ToInstructionIndex()); |
| // There is no place to split, so we can't split and spill. |
| if (!split_pos.IsValid()) continue; |
| |
| split_pos = |
| FindOptimalSplitPos(range->Start().NextFullStart(), split_pos); |
| |
| SplitRangeAt(range, split_pos); |
| Spill(range); |
| } |
| } |
| } |
| |
| |
| LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range, |
| LifetimePosition pos) { |
| DCHECK(!range->TopLevel()->IsFixed()); |
| TRACE("Splitting live range %d:%d at %d\n", range->TopLevel()->vreg(), |
| range->relative_id(), pos.value()); |
| |
| if (pos <= range->Start()) return range; |
| |
| // We can't properly connect liveranges if splitting occurred at the end |
| // a block. |
| DCHECK(pos.IsStart() || pos.IsGapPosition() || |
| (GetInstructionBlock(code(), pos)->last_instruction_index() != |
| pos.ToInstructionIndex())); |
| |
| LiveRange* result = range->SplitAt(pos, allocation_zone()); |
| return result; |
| } |
| |
| |
| LiveRange* RegisterAllocator::SplitBetween(LiveRange* range, |
| LifetimePosition start, |
| LifetimePosition end) { |
| DCHECK(!range->TopLevel()->IsFixed()); |
| TRACE("Splitting live range %d:%d in position between [%d, %d]\n", |
| range->TopLevel()->vreg(), range->relative_id(), start.value(), |
| end.value()); |
| |
| LifetimePosition split_pos = FindOptimalSplitPos(start, end); |
| DCHECK(split_pos >= start); |
| return SplitRangeAt(range, split_pos); |
| } |
| |
| |
| LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start, |
| LifetimePosition end) { |
| int start_instr = start.ToInstructionIndex(); |
| int end_instr = end.ToInstructionIndex(); |
| DCHECK(start_instr <= end_instr); |
| |
| // We have no choice |
| if (start_instr == end_instr) return end; |
| |
| const InstructionBlock* start_block = GetInstructionBlock(code(), start); |
| const InstructionBlock* end_block = GetInstructionBlock(code(), end); |
| |
| if (end_block == start_block) { |
| // The interval is split in the same basic block. Split at the latest |
| // possible position. |
| return end; |
| } |
| |
| const InstructionBlock* block = end_block; |
| // Find header of outermost loop. |
| do { |
| const InstructionBlock* loop = GetContainingLoop(code(), block); |
| if (loop == nullptr || |
| loop->rpo_number().ToInt() <= start_block->rpo_number().ToInt()) { |
| // No more loops or loop starts before the lifetime start. |
| break; |
| } |
| block = loop; |
| } while (true); |
| |
| // We did not find any suitable outer loop. Split at the latest possible |
| // position unless end_block is a loop header itself. |
| if (block == end_block && !end_block->IsLoopHeader()) return end; |
| |
| return LifetimePosition::GapFromInstructionIndex( |
| block->first_instruction_index()); |
| } |
| |
| |
| LifetimePosition RegisterAllocator::FindOptimalSpillingPos( |
| LiveRange* range, LifetimePosition pos) { |
| const InstructionBlock* block = GetInstructionBlock(code(), pos.Start()); |
| const InstructionBlock* loop_header = |
| block->IsLoopHeader() ? block : GetContainingLoop(code(), block); |
| |
| if (loop_header == nullptr) return pos; |
| |
| const UsePosition* prev_use = |
| range->PreviousUsePositionRegisterIsBeneficial(pos); |
| |
| while (loop_header != nullptr) { |
| // We are going to spill live range inside the loop. |
| // If possible try to move spilling position backwards to loop header. |
| // This will reduce number of memory moves on the back edge. |
| LifetimePosition loop_start = LifetimePosition::GapFromInstructionIndex( |
| loop_header->first_instruction_index()); |
| |
| if (range->Covers(loop_start)) { |
| if (prev_use == nullptr || prev_use->pos() < loop_start) { |
| // No register beneficial use inside the loop before the pos. |
| pos = loop_start; |
| } |
| } |
| |
| // Try hoisting out to an outer loop. |
| loop_header = GetContainingLoop(code(), loop_header); |
| } |
| |
| return pos; |
| } |
| |
| |
| void RegisterAllocator::Spill(LiveRange* range) { |
| DCHECK(!range->spilled()); |
| TopLevelLiveRange* first = range->TopLevel(); |
| TRACE("Spilling live range %d:%d\n", first->vreg(), range->relative_id()); |
| |
| if (first->HasNoSpillType()) { |
| data()->AssignSpillRangeToLiveRange(first); |
| } |
| range->Spill(); |
| } |
| |
| const char* RegisterAllocator::RegisterName(int register_code) const { |
| if (mode() == GENERAL_REGISTERS) { |
| return data()->config()->GetGeneralRegisterName(register_code); |
| } else { |
| return data()->config()->GetDoubleRegisterName(register_code); |
| } |
| } |
| |
| |
| LinearScanAllocator::LinearScanAllocator(RegisterAllocationData* data, |
| RegisterKind kind, Zone* local_zone) |
| : RegisterAllocator(data, kind), |
| unhandled_live_ranges_(local_zone), |
| active_live_ranges_(local_zone), |
| inactive_live_ranges_(local_zone) { |
| unhandled_live_ranges().reserve( |
| static_cast<size_t>(code()->VirtualRegisterCount() * 2)); |
| active_live_ranges().reserve(8); |
| inactive_live_ranges().reserve(8); |
| // TryAllocateFreeReg and AllocateBlockedReg assume this |
| // when allocating local arrays. |
| DCHECK_GE(RegisterConfiguration::kMaxFPRegisters, |
| this->data()->config()->num_general_registers()); |
| } |
| |
| |
| void LinearScanAllocator::AllocateRegisters() { |
| DCHECK(unhandled_live_ranges().empty()); |
| DCHECK(active_live_ranges().empty()); |
|