blob: 6101811adb849b6a54df6758cd479aff2937bcd6 [file] [log] [blame]
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/IonAnalysis.h"
#include "jit/AliasAnalysis.h"
#include "jit/BaselineInspector.h"
#include "jit/BaselineJIT.h"
#include "jit/Ion.h"
#include "jit/IonBuilder.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/LIR.h"
#include "jit/Lowering.h"
#include "jit/MIRGraph.h"
#include "jsobjinlines.h"
#include "jsopcodeinlines.h"
#include "jsscriptinlines.h"
#include "jit/shared/Lowering-shared-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::DebugOnly;
typedef Vector<MPhi*, 16, SystemAllocPolicy> MPhiVector;
static bool
FlagPhiInputsAsHavingRemovedUses(MBasicBlock* block, MBasicBlock* succ, MPhiVector& worklist)
{
// When removing an edge between 2 blocks, we might remove the ability of
// later phases to figure out that the uses of a Phi should be considered as
// a use of all its inputs. Thus we need to mark the Phi inputs as having
// removed uses iff the phi has any uses.
//
//
// +--------------------+ +---------------------+
// |12 MFoo 6 | |32 MBar 5 |
// | | | |
// | ... | | ... |
// | | | |
// |25 MGoto Block 4 | |43 MGoto Block 4 |
// +--------------------+ +---------------------+
// | |
// | | |
// | | |
// | +-----X------------------------+
// | Edge |
// | Removed |
// | |
// | +------------v-----------+
// | |50 MPhi 12 32 |
// | | |
// | | ... |
// | | |
// | |70 MReturn 50 |
// | +------------------------+
// |
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// |
// v
//
// ^ +--------------------+ +---------------------+
// /!\ |12 MConst opt-out | |32 MBar 5 |
// '---' | | | |
// | ... | | ... |
// |78 MBail | | |
// |80 MUnreachable | |43 MGoto Block 4 |
// +--------------------+ +---------------------+
// |
// |
// |
// +---------------+
// |
// |
// |
// +------------v-----------+
// |50 MPhi 32 |
// | |
// | ... |
// | |
// |70 MReturn 50 |
// +------------------------+
//
//
// If the inputs of the Phi are not flagged as having removed uses, then
// later compilation phase might optimize them out. The problem is that a
// bailout will use this value and give it back to baseline, which will then
// use the OptimizedOut magic value in a computation.
// Conservative upper limit for the number of Phi instructions which are
// visited while looking for uses.
const size_t conservativeUsesLimit = 128;
MOZ_ASSERT(worklist.empty());
size_t predIndex = succ->getPredecessorIndex(block);
MPhiIterator end = succ->phisEnd();
MPhiIterator it = succ->phisBegin();
for (; it != end; it++) {
MPhi* phi = *it;
// We are looking to mark the Phi inputs which are used across the edge
// between the |block| and its successor |succ|.
MDefinition* def = phi->getOperand(predIndex);
if (def->isUseRemoved())
continue;
phi->setInWorklist();
if (!worklist.append(phi))
return false;
// Fill the work list with all the Phi nodes uses until we reach either:
// - A resume point which uses the Phi as an observable operand.
// - An explicit use of the Phi instruction.
// - An implicit use of the Phi instruction.
bool isUsed = false;
for (size_t idx = 0; !isUsed && idx < worklist.length(); idx++) {
phi = worklist[idx];
MUseIterator usesEnd(phi->usesEnd());
for (MUseIterator use(phi->usesBegin()); use != usesEnd; use++) {
MNode* consumer = (*use)->consumer();
if (consumer->isResumePoint()) {
MResumePoint* rp = consumer->toResumePoint();
if (rp->isObservableOperand(*use)) {
// The phi is observable via a resume point operand.
isUsed = true;
break;
}
continue;
}
MDefinition* cdef = consumer->toDefinition();
if (!cdef->isPhi()) {
// The phi is explicitly used.
isUsed = true;
break;
}
phi = cdef->toPhi();
if (phi->isInWorklist())
continue;
if (phi->isUseRemoved() || phi->isImplicitlyUsed()) {
// The phi is implicitly used.
isUsed = true;
break;
}
phi->setInWorklist();
if (!worklist.append(phi))
return false;
}
// Use a conservative upper bound to avoid iterating too many times
// on very large graphs.
if (idx >= conservativeUsesLimit) {
isUsed = true;
break;
}
}
if (isUsed)
def->setUseRemoved();
// Remove all the InWorklist flags.
while (!worklist.empty()) {
phi = worklist.popCopy();
phi->setNotInWorklist();
}
}
return true;
}
static bool
FlagAllOperandsAsHavingRemovedUses(MBasicBlock* block)
{
// Flag all instructions operands as having removed uses.
MInstructionIterator end = block->end();
for (MInstructionIterator it = block->begin(); it != end; it++) {
MInstruction* ins = *it;
for (size_t i = 0, e = ins->numOperands(); i < e; i++)
ins->getOperand(i)->setUseRemovedUnchecked();
// Flag observable resume point operands as having removed uses.
if (MResumePoint* rp = ins->resumePoint()) {
// Note: no need to iterate over the caller's of the resume point as
// this is the same as the entry resume point.
for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
if (!rp->isObservableOperand(i))
continue;
rp->getOperand(i)->setUseRemovedUnchecked();
}
}
}
// Flag observable operands of the entry resume point as having removed uses.
MResumePoint* rp = block->entryResumePoint();
do {
for (size_t i = 0, e = rp->numOperands(); i < e; i++) {
if (!rp->isObservableOperand(i))
continue;
rp->getOperand(i)->setUseRemovedUnchecked();
}
rp = rp->caller();
} while (rp);
// Flag Phi inputs of the successors has having removed uses.
MPhiVector worklist;
for (size_t i = 0, e = block->numSuccessors(); i < e; i++) {
if (!FlagPhiInputsAsHavingRemovedUses(block, block->getSuccessor(i), worklist))
return false;
}
return true;
}
static void
RemoveFromSuccessors(MBasicBlock* block)
{
// Remove this block from its successors.
size_t numSucc = block->numSuccessors();
while (numSucc--) {
MBasicBlock* succ = block->getSuccessor(numSucc);
if (succ->isDead())
continue;
JitSpew(JitSpew_Prune, "Remove block edge %d -> %d.", block->id(), succ->id());
succ->removePredecessor(block);
}
}
static void
ConvertToBailingBlock(TempAllocator& alloc, MBasicBlock* block)
{
// Add a bailout instruction.
MBail* bail = MBail::New(alloc, Bailout_FirstExecution);
MInstruction* bailPoint = block->safeInsertTop();
block->insertBefore(block->safeInsertTop(), bail);
// Discard all remaining instructions.
MInstructionIterator clearStart = block->begin(bailPoint);
block->discardAllInstructionsStartingAt(clearStart);
if (block->outerResumePoint())
block->clearOuterResumePoint();
// And replace the last instruction by the unreachable control instruction.
block->end(MUnreachable::New(alloc));
}
bool
jit::PruneUnusedBranches(MIRGenerator* mir, MIRGraph& graph)
{
MOZ_ASSERT(!mir->compilingAsmJS(), "AsmJS compilation have no code coverage support.");
// We do a reverse-post-order traversal, marking basic blocks when the block
// have to be converted into bailing blocks, and flagging block as
// unreachable if all predecessors are flagged as bailing or unreachable.
bool someUnreachable = false;
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
JitSpew(JitSpew_Prune, "Investigate Block %d:", block->id());
// Do not touch entry basic blocks.
if (*block == graph.osrBlock() || *block == graph.entryBlock())
continue;
// Compute if all the predecessors of this block are either bailling out
// or are already flagged as unreachable.
bool isUnreachable = true;
bool isLoopHeader = block->isLoopHeader();
size_t numPred = block->numPredecessors();
size_t i = 0;
for (; i < numPred; i++) {
MBasicBlock* pred = block->getPredecessor(i);
// The backedge is visited after the loop header, but if the loop
// header is unreachable, then we can assume that the backedge would
// be unreachable too.
if (isLoopHeader && pred == block->backedge())
continue;
// Break if any of the predecessor can continue in this block.
if (!pred->isMarked() && !pred->unreachable()) {
isUnreachable = false;
break;
}
}
// Compute if the block should bailout, based on the trivial heuristic
// which is that if the block never got visited before, then it is
// likely to not be visited after.
bool shouldBailout =
block->getHitState() == MBasicBlock::HitState::Count &&
block->getHitCount() == 0;
// Check if the predecessors got accessed a large number of times in
// comparisons of the current block, in order to know if our attempt at
// removing this block is not premature.
if (shouldBailout) {
size_t p = numPred;
size_t predCount = 0;
bool isLoopExit = false;
while (p--) {
MBasicBlock* pred = block->getPredecessor(p);
if (pred->getHitState() == MBasicBlock::HitState::Count)
predCount += pred->getHitCount();
isLoopExit |= pred->isLoopHeader() && pred->backedge() != *block;
}
// This assumes that instructions are numbered in sequence, which is
// the case after IonBuilder creation of basic blocks.
size_t numInst = block->rbegin()->id() - block->begin()->id();
// This sum is not homogeneous but gives good results on benchmarks
// like Kraken and Octane. The current block has not been executed
// yet, and ...
//
// 1. If the number of times the predecessor got executed is
// larger, then we are less likely to hit this block.
//
// 2. If the block is large, then this is likely a corner case,
// and thus we are less likely to hit this block.
if (predCount + numInst < 75)
shouldBailout = false;
// If this is the exit block of a loop, then keep this basic
// block. This heuristic is useful as a bailout is often much more
// costly than a simple exit sequence.
if (isLoopExit)
shouldBailout = false;
}
// Continue to the next basic block if the current basic block can
// remain unchanged.
if (!isUnreachable && !shouldBailout)
continue;
JitSpewIndent indent(JitSpew_Prune);
someUnreachable = true;
if (isUnreachable) {
JitSpew(JitSpew_Prune, "Mark block %d as unreachable.", block->id());
block->setUnreachable();
// If the block is unreachable, then there is no need to convert it
// to a bailing block.
} else if (shouldBailout) {
JitSpew(JitSpew_Prune, "Mark block %d as bailing block.", block->id());
block->markUnchecked();
}
// When removing a loop header, we should ensure that its backedge is
// removed first, otherwise this triggers an assertion in
// removePredecessorsWithoutPhiOperands.
if (block->isLoopHeader()) {
JitSpew(JitSpew_Prune, "Mark block %d as bailing block. (loop backedge)", block->backedge()->id());
block->backedge()->markUnchecked();
}
}
// Returns early if nothing changed.
if (!someUnreachable)
return true;
JitSpew(JitSpew_Prune, "Convert basic block to bailing blocks, and remove unreachable blocks:");
JitSpewIndent indent(JitSpew_Prune);
// As we are going to remove edges and basic block, we have to mark
// instructions which would be needed by baseline if we were to bailout.
for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
MBasicBlock* block = *it++;
if (!block->isMarked() && !block->unreachable())
continue;
FlagAllOperandsAsHavingRemovedUses(block);
}
// Remove the blocks in post-order such that consumers are visited before
// the predecessors, the only exception being the Phi nodes of loop headers.
for (PostorderIterator it(graph.poBegin()); it != graph.poEnd();) {
MBasicBlock* block = *it++;
if (!block->isMarked() && !block->unreachable())
continue;
JitSpew(JitSpew_Prune, "Remove / Replace block %d.", block->id());
JitSpewIndent indent(JitSpew_Prune);
// As we are going to replace/remove the last instruction, we first have
// to remove this block from the predecessor list of its successors.
RemoveFromSuccessors(block);
// Convert the current basic block to a bailing block which ends with an
// Unreachable control instruction.
if (block->isMarked()) {
JitSpew(JitSpew_Prune, "Convert Block %d to a bailing block.", block->id());
if (!graph.alloc().ensureBallast())
return false;
ConvertToBailingBlock(graph.alloc(), block);
block->unmark();
}
// Remove all instructions.
if (block->unreachable()) {
JitSpew(JitSpew_Prune, "Remove Block %d.", block->id());
JitSpewIndent indent(JitSpew_Prune);
graph.removeBlock(block);
}
}
return true;
}
static bool
SplitCriticalEdgesForBlock(MIRGraph& graph, MBasicBlock* block)
{
if (block->numSuccessors() < 2)
return true;
for (size_t i = 0; i < block->numSuccessors(); i++) {
MBasicBlock* target = block->getSuccessor(i);
if (target->numPredecessors() < 2)
continue;
// Create a new block inheriting from the predecessor.
MBasicBlock* split = MBasicBlock::NewSplitEdge(graph, block->info(), block);
if (!split)
return false;
split->setLoopDepth(block->loopDepth());
graph.insertBlockAfter(block, split);
split->end(MGoto::New(graph.alloc(), target));
// The entry resume point won't properly reflect state at the start of
// the split edge, so remove it. Split edges start out empty, but might
// have fallible code moved into them later. Rather than immediately
// figure out a valid resume point and pc we can use for the split edge,
// we wait until lowering (see LIRGenerator::visitBlock), where this
// will be easier.
if (MResumePoint* rp = split->entryResumePoint()) {
rp->releaseUses();
split->clearEntryResumePoint();
}
block->replaceSuccessor(i, split);
target->replacePredecessor(block, split);
}
return true;
}
// A critical edge is an edge which is neither its successor's only predecessor
// nor its predecessor's only successor. Critical edges must be split to
// prevent copy-insertion and code motion from affecting other edges.
bool
jit::SplitCriticalEdges(MIRGraph& graph)
{
for (MBasicBlockIterator iter(graph.begin()); iter != graph.end(); iter++) {
MBasicBlock* block = *iter;
if (!SplitCriticalEdgesForBlock(graph, block))
return false;
}
return true;
}
// Return whether a block simply computes the specified constant value.
static bool
BlockComputesConstant(MBasicBlock* block, MDefinition* value)
{
// Look for values with no uses. This is used to eliminate constant
// computing blocks in condition statements, and the phi which used to
// consume the constant has already been removed.
if (value->hasUses())
return false;
if (!value->isConstant() || value->block() != block)
return false;
if (!block->phisEmpty())
return false;
for (MInstructionIterator iter = block->begin(); iter != block->end(); ++iter) {
if (*iter != value || !iter->isGoto())
return false;
}
return true;
}
// Find phis that are redudant:
//
// 1) phi(a, a)
// can get replaced by a
//
// 2) phi(filtertypeset(a, type1), filtertypeset(a, type1))
// equals filtertypeset(a, type1)
//
// 3) phi(a, filtertypeset(a, type1))
// equals filtertypeset(a, type1 union type(a))
// equals filtertypeset(a, type(a))
// equals a
//
// 4) phi(filtertypeset(a, type1), filtertypeset(a, type2))
// equals filtertypeset(a, type1 union type2)
//
// This is the special case. We can only replace this with 'a' iif
// type(a) == type1 union type2. Since optimizations could have
// happened based on a more specific phi type.
static bool
IsPhiRedudantFilter(MPhi* phi)
{
// Handle (1) and (2)
if (phi->operandIfRedundant())
return true;
// Handle (3)
bool onlyFilters = false;
MDefinition* a = phi->getOperand(0);
if (a->isFilterTypeSet()) {
a = a->toFilterTypeSet()->input();
onlyFilters = true;
}
for (size_t i = 1; i < phi->numOperands(); i++) {
MDefinition* operand = phi->getOperand(i);
if (operand == a) {
onlyFilters = false;
continue;
}
if (operand->isFilterTypeSet() && operand->toFilterTypeSet()->input() == a)
continue;
return false;
}
if (!onlyFilters)
return true;
// Handle (4)
MOZ_ASSERT(onlyFilters);
return EqualTypes(a->type(), a->resultTypeSet(),
phi->type(), phi->resultTypeSet());
}
// Determine whether phiBlock/testBlock simply compute a phi and perform a
// test on it.
static bool
BlockIsSingleTest(MBasicBlock* phiBlock, MBasicBlock* testBlock, MPhi** pphi, MTest** ptest)
{
*pphi = nullptr;
*ptest = nullptr;
if (phiBlock != testBlock) {
MOZ_ASSERT(phiBlock->numSuccessors() == 1 && phiBlock->getSuccessor(0) == testBlock);
if (!phiBlock->begin()->isGoto())
return false;
}
MInstruction* ins = *testBlock->begin();
if (!ins->isTest())
return false;
MTest* test = ins->toTest();
if (!test->input()->isPhi())
return false;
MPhi* phi = test->input()->toPhi();
if (phi->block() != phiBlock)
return false;
for (MUseIterator iter = phi->usesBegin(); iter != phi->usesEnd(); ++iter) {
MUse* use = *iter;
if (use->consumer() == test)
continue;
if (use->consumer()->isResumePoint()) {
MBasicBlock* useBlock = use->consumer()->block();
if (useBlock == phiBlock || useBlock == testBlock)
continue;
}
return false;
}
for (MPhiIterator iter = phiBlock->phisBegin(); iter != phiBlock->phisEnd(); ++iter) {
if (*iter == phi)
continue;
if (IsPhiRedudantFilter(*iter))
continue;
return false;
}
if (phiBlock != testBlock && !testBlock->phisEmpty())
return false;
*pphi = phi;
*ptest = test;
return true;
}
// Change block so that it ends in a goto to the specific target block.
// existingPred is an existing predecessor of the block.
static void
UpdateGotoSuccessor(TempAllocator& alloc, MBasicBlock* block, MBasicBlock* target,
MBasicBlock* existingPred)
{
MInstruction* ins = block->lastIns();
MOZ_ASSERT(ins->isGoto());
ins->toGoto()->target()->removePredecessor(block);
block->discardLastIns();
MGoto* newGoto = MGoto::New(alloc, target);
block->end(newGoto);
target->addPredecessorSameInputsAs(block, existingPred);
}
// Change block so that it ends in a test of the specified value, going to
// either ifTrue or ifFalse. existingPred is an existing predecessor of ifTrue
// or ifFalse with the same values incoming to ifTrue/ifFalse as block.
// existingPred is not required to be a predecessor of ifTrue/ifFalse if block
// already ends in a test going to that block on a true/false result.
static void
UpdateTestSuccessors(TempAllocator& alloc, MBasicBlock* block,
MDefinition* value, MBasicBlock* ifTrue, MBasicBlock* ifFalse,
MBasicBlock* existingPred)
{
MInstruction* ins = block->lastIns();
if (ins->isTest()) {
MTest* test = ins->toTest();
MOZ_ASSERT(test->input() == value);
if (ifTrue != test->ifTrue()) {
test->ifTrue()->removePredecessor(block);
ifTrue->addPredecessorSameInputsAs(block, existingPred);
MOZ_ASSERT(test->ifTrue() == test->getSuccessor(0));
test->replaceSuccessor(0, ifTrue);
}
if (ifFalse != test->ifFalse()) {
test->ifFalse()->removePredecessor(block);
ifFalse->addPredecessorSameInputsAs(block, existingPred);
MOZ_ASSERT(test->ifFalse() == test->getSuccessor(1));
test->replaceSuccessor(1, ifFalse);
}
return;
}
MOZ_ASSERT(ins->isGoto());
ins->toGoto()->target()->removePredecessor(block);
block->discardLastIns();
MTest* test = MTest::New(alloc, value, ifTrue, ifFalse);
block->end(test);
ifTrue->addPredecessorSameInputsAs(block, existingPred);
ifFalse->addPredecessorSameInputsAs(block, existingPred);
}
static bool
MaybeFoldConditionBlock(MIRGraph& graph, MBasicBlock* initialBlock)
{
// Optimize the MIR graph to improve the code generated for conditional
// operations. A test like 'if (a ? b : c)' normally requires four blocks,
// with a phi for the intermediate value. This can be improved to use three
// blocks with no phi value, and if either b or c is constant,
// e.g. 'if (a ? b : 0)', then the block associated with that constant
// can be eliminated.
/*
* Look for a diamond pattern:
*
* initialBlock
* / \
* trueBranch falseBranch
* \ /
* phiBlock
* |
* testBlock
*
* Where phiBlock contains a single phi combining values pushed onto the
* stack by trueBranch and falseBranch, and testBlock contains a test on
* that phi. phiBlock and testBlock may be the same block; generated code
* will use different blocks if the (?:) op is in an inlined function.
*/
MInstruction* ins = initialBlock->lastIns();
if (!ins->isTest())
return true;
MTest* initialTest = ins->toTest();
MBasicBlock* trueBranch = initialTest->ifTrue();
if (trueBranch->numPredecessors() != 1 || trueBranch->numSuccessors() != 1)
return true;
MBasicBlock* falseBranch = initialTest->ifFalse();
if (falseBranch->numPredecessors() != 1 || falseBranch->numSuccessors() != 1)
return true;
MBasicBlock* phiBlock = trueBranch->getSuccessor(0);
if (phiBlock != falseBranch->getSuccessor(0))
return true;
if (phiBlock->numPredecessors() != 2)
return true;
if (initialBlock->isLoopBackedge() || trueBranch->isLoopBackedge() || falseBranch->isLoopBackedge())
return true;
MBasicBlock* testBlock = phiBlock;
if (testBlock->numSuccessors() == 1) {
if (testBlock->isLoopBackedge())
return true;
testBlock = testBlock->getSuccessor(0);
if (testBlock->numPredecessors() != 1)
return true;
}
// Make sure the test block does not have any outgoing loop backedges.
if (!SplitCriticalEdgesForBlock(graph, testBlock))
return false;
MPhi* phi;
MTest* finalTest;
if (!BlockIsSingleTest(phiBlock, testBlock, &phi, &finalTest))
return true;
MDefinition* trueResult = phi->getOperand(phiBlock->indexForPredecessor(trueBranch));
MDefinition* falseResult = phi->getOperand(phiBlock->indexForPredecessor(falseBranch));
// OK, we found the desired pattern, now transform the graph.
// Patch up phis that filter their input.
for (MPhiIterator iter = phiBlock->phisBegin(); iter != phiBlock->phisEnd(); ++iter) {
if (*iter == phi)
continue;
MOZ_ASSERT(IsPhiRedudantFilter(*iter));
MDefinition* redundant = (*iter)->operandIfRedundant();
if (!redundant) {
redundant = (*iter)->getOperand(0);
if (redundant->isFilterTypeSet())
redundant = redundant->toFilterTypeSet()->input();
}
(*iter)->replaceAllUsesWith(redundant);
}
// Remove the phi from phiBlock.
phiBlock->discardPhi(*phiBlock->phisBegin());
// If either trueBranch or falseBranch just computes a constant for the
// test, determine the block that branch will end up jumping to and eliminate
// the branch. Otherwise, change the end of the block to a test that jumps
// directly to successors of testBlock, rather than to testBlock itself.
MBasicBlock* trueTarget = trueBranch;
if (BlockComputesConstant(trueBranch, trueResult)) {
trueTarget = trueResult->constantToBoolean()
? finalTest->ifTrue()
: finalTest->ifFalse();
phiBlock->removePredecessor(trueBranch);
graph.removeBlock(trueBranch);
} else if (initialTest->input() == trueResult) {
UpdateGotoSuccessor(graph.alloc(), trueBranch, finalTest->ifTrue(), testBlock);
} else {
UpdateTestSuccessors(graph.alloc(), trueBranch, trueResult,
finalTest->ifTrue(), finalTest->ifFalse(), testBlock);
}
MBasicBlock* falseTarget = falseBranch;
if (BlockComputesConstant(falseBranch, falseResult)) {
falseTarget = falseResult->constantToBoolean()
? finalTest->ifTrue()
: finalTest->ifFalse();
phiBlock->removePredecessor(falseBranch);
graph.removeBlock(falseBranch);
} else if (initialTest->input() == falseResult) {
UpdateGotoSuccessor(graph.alloc(), falseBranch, finalTest->ifFalse(), testBlock);
} else {
UpdateTestSuccessors(graph.alloc(), falseBranch, falseResult,
finalTest->ifTrue(), finalTest->ifFalse(), testBlock);
}
// Short circuit the initial test to skip any constant branch eliminated above.
UpdateTestSuccessors(graph.alloc(), initialBlock, initialTest->input(),
trueTarget, falseTarget, testBlock);
// Remove phiBlock, if different from testBlock.
if (phiBlock != testBlock) {
testBlock->removePredecessor(phiBlock);
graph.removeBlock(phiBlock);
}
// Remove testBlock itself.
finalTest->ifTrue()->removePredecessor(testBlock);
finalTest->ifFalse()->removePredecessor(testBlock);
graph.removeBlock(testBlock);
return true;
}
bool
jit::FoldTests(MIRGraph& graph)
{
for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
if (!MaybeFoldConditionBlock(graph, *block))
return false;
}
return true;
}
static void
EliminateTriviallyDeadResumePointOperands(MIRGraph& graph, MResumePoint* rp)
{
// If we will pop the top of the stack immediately after resuming,
// then don't preserve the top value in the resume point.
if (rp->mode() != MResumePoint::ResumeAt || *rp->pc() != JSOP_POP)
return;
size_t top = rp->stackDepth() - 1;
MOZ_ASSERT(!rp->isObservableOperand(top));
MDefinition* def = rp->getOperand(top);
if (def->isConstant())
return;
MConstant* constant = rp->block()->optimizedOutConstant(graph.alloc());
rp->replaceOperand(top, constant);
}
// Operands to a resume point which are dead at the point of the resume can be
// replaced with a magic value. This analysis supports limited detection of
// dead operands, pruning those which are defined in the resume point's basic
// block and have no uses outside the block or at points later than the resume
// point.
//
// This is intended to ensure that extra resume points within a basic block
// will not artificially extend the lifetimes of any SSA values. This could
// otherwise occur if the new resume point captured a value which is created
// between the old and new resume point and is dead at the new resume point.
bool
jit::EliminateDeadResumePointOperands(MIRGenerator* mir, MIRGraph& graph)
{
// If we are compiling try blocks, locals and arguments may be observable
// from catch or finally blocks (which Ion does not compile). For now just
// disable the pass in this case.
if (graph.hasTryBlock())
return true;
for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)"))
return false;
if (MResumePoint* rp = block->entryResumePoint())
EliminateTriviallyDeadResumePointOperands(graph, rp);
// The logic below can get confused on infinite loops.
if (block->isLoopHeader() && block->backedge() == *block)
continue;
for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
if (MResumePoint* rp = ins->resumePoint())
EliminateTriviallyDeadResumePointOperands(graph, rp);
// No benefit to replacing constant operands with other constants.
if (ins->isConstant())
continue;
// Scanning uses does not give us sufficient information to tell
// where instructions that are involved in box/unbox operations or
// parameter passing might be live. Rewriting uses of these terms
// in resume points may affect the interpreter's behavior. Rather
// than doing a more sophisticated analysis, just ignore these.
if (ins->isUnbox() || ins->isParameter() || ins->isTypeBarrier() ||
ins->isComputeThis() || ins->isFilterTypeSet())
{
continue;
}
// Early intermediate values captured by resume points, such as
// TypedObject, ArrayState and its allocation, may be legitimately
// dead in Ion code, but are still needed if we bail out. They can
// recover on bailout.
if (ins->isNewDerivedTypedObject() || ins->isRecoveredOnBailout()) {
MOZ_ASSERT(ins->canRecoverOnBailout());
continue;
}
// If the instruction's behavior has been constant folded into a
// separate instruction, we can't determine precisely where the
// instruction becomes dead and can't eliminate its uses.
if (ins->isImplicitlyUsed() || ins->isUseRemoved())
continue;
// Check if this instruction's result is only used within the
// current block, and keep track of its last use in a definition
// (not resume point). This requires the instructions in the block
// to be numbered, ensured by running this immediately after alias
// analysis.
uint32_t maxDefinition = 0;
for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); uses++) {
MNode* consumer = uses->consumer();
if (consumer->isResumePoint()) {
// If the instruction's is captured by one of the resume point, then
// it might be observed indirectly while the frame is live on the
// stack, so it has to be computed.
MResumePoint* resume = consumer->toResumePoint();
if (resume->isObservableOperand(*uses)) {
maxDefinition = UINT32_MAX;
break;
}
continue;
}
MDefinition* def = consumer->toDefinition();
if (def->block() != *block || def->isBox() || def->isPhi()) {
maxDefinition = UINT32_MAX;
break;
}
maxDefinition = Max(maxDefinition, def->id());
}
if (maxDefinition == UINT32_MAX)
continue;
// Walk the uses a second time, removing any in resume points after
// the last use in a definition.
for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); ) {
MUse* use = *uses++;
if (use->consumer()->isDefinition())
continue;
MResumePoint* mrp = use->consumer()->toResumePoint();
if (mrp->block() != *block ||
!mrp->instruction() ||
mrp->instruction() == *ins ||
mrp->instruction()->id() <= maxDefinition)
{
continue;
}
// Store an optimized out magic value in place of all dead
// resume point operands. Making any such substitution can in
// general alter the interpreter's behavior, even though the
// code is dead, as the interpreter will still execute opcodes
// whose effects cannot be observed. If the magic value value
// were to flow to, say, a dead property access the
// interpreter could throw an exception; we avoid this problem
// by removing dead operands before removing dead code.
MConstant* constant = MConstant::New(graph.alloc(), MagicValue(JS_OPTIMIZED_OUT));
block->insertBefore(*(block->begin()), constant);
use->replaceProducer(constant);
}
}
}
return true;
}
// Test whether |def| would be needed if it had no uses.
bool
js::jit::DeadIfUnused(const MDefinition* def)
{
return !def->isEffectful() && !def->isGuard() && !def->isGuardRangeBailouts() &&
!def->isControlInstruction() &&
(!def->isInstruction() || !def->toInstruction()->resumePoint());
}
// Test whether |def| may be safely discarded, due to being dead or due to being
// located in a basic block which has itself been marked for discarding.
bool
js::jit::IsDiscardable(const MDefinition* def)
{
return !def->hasUses() && (DeadIfUnused(def) || def->block()->isMarked());
}
// Instructions are useless if they are unused and have no side effects.
// This pass eliminates useless instructions.
// The graph itself is unchanged.
bool
jit::EliminateDeadCode(MIRGenerator* mir, MIRGraph& graph)
{
// Traverse in postorder so that we hit uses before definitions.
// Traverse instruction list backwards for the same reason.
for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
if (mir->shouldCancel("Eliminate Dead Code (main loop)"))
return false;
// Remove unused instructions.
for (MInstructionReverseIterator iter = block->rbegin(); iter != block->rend(); ) {
MInstruction* inst = *iter++;
if (js::jit::IsDiscardable(inst))
{
block->discard(inst);
}
}
}
return true;
}
static inline bool
IsPhiObservable(MPhi* phi, Observability observe)
{
// If the phi has uses which are not reflected in SSA, then behavior in the
// interpreter may be affected by removing the phi.
if (phi->isImplicitlyUsed() || phi->isUseRemoved())
return true;
// Check for uses of this phi node outside of other phi nodes.
// Note that, initially, we skip reading resume points, which we
// don't count as actual uses. If the only uses are resume points,
// then the SSA name is never consumed by the program. However,
// after optimizations have been performed, it's possible that the
// actual uses in the program have been (incorrectly) optimized
// away, so we must be more conservative and consider resume
// points as well.
for (MUseIterator iter(phi->usesBegin()); iter != phi->usesEnd(); iter++) {
MNode* consumer = iter->consumer();
if (consumer->isResumePoint()) {
MResumePoint* resume = consumer->toResumePoint();
if (observe == ConservativeObservability)
return true;
if (resume->isObservableOperand(*iter))
return true;
} else {
MDefinition* def = consumer->toDefinition();
if (!def->isPhi())
return true;
}
}
return false;
}
// Handles cases like:
// x is phi(a, x) --> a
// x is phi(a, a) --> a
static inline MDefinition*
IsPhiRedundant(MPhi* phi)
{
MDefinition* first = phi->operandIfRedundant();
if (first == nullptr)
return nullptr;
// Propagate the ImplicitlyUsed flag if |phi| is replaced with another phi.
if (phi->isImplicitlyUsed())
first->setImplicitlyUsedUnchecked();
return first;
}
bool
jit::EliminatePhis(MIRGenerator* mir, MIRGraph& graph,
Observability observe)
{
// Eliminates redundant or unobservable phis from the graph. A
// redundant phi is something like b = phi(a, a) or b = phi(a, b),
// both of which can be replaced with a. An unobservable phi is
// one that whose value is never used in the program.
//
// Note that we must be careful not to eliminate phis representing
// values that the interpreter will require later. When the graph
// is first constructed, we can be more aggressive, because there
// is a greater correspondence between the CFG and the bytecode.
// After optimizations such as GVN have been performed, however,
// the bytecode and CFG may not correspond as closely to one
// another. In that case, we must be more conservative. The flag
// |conservativeObservability| is used to indicate that eliminate
// phis is being run after some optimizations have been performed,
// and thus we should use more conservative rules about
// observability. The particular danger is that we can optimize
// away uses of a phi because we think they are not executable,
// but the foundation for that assumption is false TI information
// that will eventually be invalidated. Therefore, if
// |conservativeObservability| is set, we will consider any use
// from a resume point to be observable. Otherwise, we demand a
// use from an actual instruction.
Vector<MPhi*, 16, SystemAllocPolicy> worklist;
// Add all observable phis to a worklist. We use the "in worklist" bit to
// mean "this phi is live".
for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
if (mir->shouldCancel("Eliminate Phis (populate loop)"))
return false;
MPhiIterator iter = block->phisBegin();
while (iter != block->phisEnd()) {
MPhi* phi = *iter++;
// Flag all as unused, only observable phis would be marked as used
// when processed by the work list.
phi->setUnused();
// If the phi is redundant, remove it here.
if (MDefinition* redundant = IsPhiRedundant(phi)) {
phi->justReplaceAllUsesWith(redundant);
block->discardPhi(phi);
continue;
}
// Enqueue observable Phis.
if (IsPhiObservable(phi, observe)) {
phi->setInWorklist();
if (!worklist.append(phi))
return false;
}
}
}
// Iteratively mark all phis reachable from live phis.
while (!worklist.empty()) {
if (mir->shouldCancel("Eliminate Phis (worklist)"))
return false;
MPhi* phi = worklist.popCopy();
MOZ_ASSERT(phi->isUnused());
phi->setNotInWorklist();
// The removal of Phis can produce newly redundant phis.
if (MDefinition* redundant = IsPhiRedundant(phi)) {
// Add to the worklist the used phis which are impacted.
for (MUseDefIterator it(phi); it; it++) {
if (it.def()->isPhi()) {
MPhi* use = it.def()->toPhi();
if (!use->isUnused()) {
use->setUnusedUnchecked();
use->setInWorklist();
if (!worklist.append(use))
return false;
}
}
}
phi->justReplaceAllUsesWith(redundant);
} else {
// Otherwise flag them as used.
phi->setNotUnused();
}
// The current phi is/was used, so all its operands are used.
for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
MDefinition* in = phi->getOperand(i);
if (!in->isPhi() || !in->isUnused() || in->isInWorklist())
continue;
in->setInWorklist();
if (!worklist.append(in->toPhi()))
return false;
}
}
// Sweep dead phis.
for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
MPhiIterator iter = block->phisBegin();
while (iter != block->phisEnd()) {
MPhi* phi = *iter++;
if (phi->isUnused()) {
phi->optimizeOutAllUses(graph.alloc());
block->discardPhi(phi);
}
}
}
return true;
}
namespace {
// The type analysis algorithm inserts conversions and box/unbox instructions
// to make the IR graph well-typed for future passes.
//
// Phi adjustment: If a phi's inputs are all the same type, the phi is
// specialized to return that type.
//
// Input adjustment: Each input is asked to apply conversion operations to its
// inputs. This may include Box, Unbox, or other instruction-specific type
// conversion operations.
//
class TypeAnalyzer
{
MIRGenerator* mir;
MIRGraph& graph;
Vector<MPhi*, 0, SystemAllocPolicy> phiWorklist_;
TempAllocator& alloc() const {
return graph.alloc();
}
bool addPhiToWorklist(MPhi* phi) {
if (phi->isInWorklist())
return true;
if (!phiWorklist_.append(phi))
return false;
phi->setInWorklist();
return true;
}
MPhi* popPhi() {
MPhi* phi = phiWorklist_.popCopy();
phi->setNotInWorklist();
return phi;
}
bool respecialize(MPhi* phi, MIRType type);
bool propagateSpecialization(MPhi* phi);
bool specializePhis();
void replaceRedundantPhi(MPhi* phi);
void adjustPhiInputs(MPhi* phi);
bool adjustInputs(MDefinition* def);
bool insertConversions();
bool checkFloatCoherency();
bool graphContainsFloat32();
bool markPhiConsumers();
bool markPhiProducers();
bool specializeValidFloatOps();
bool tryEmitFloatOperations();
public:
TypeAnalyzer(MIRGenerator* mir, MIRGraph& graph)
: mir(mir), graph(graph)
{ }
bool analyze();
};
} /* anonymous namespace */
// Try to specialize this phi based on its non-cyclic inputs.
static MIRType
GuessPhiType(MPhi* phi, bool* hasInputsWithEmptyTypes)
{
#ifdef DEBUG
// Check that different magic constants aren't flowing together. Ignore
// JS_OPTIMIZED_OUT, since an operand could be legitimately optimized
// away.
MIRType magicType = MIRType_None;
for (size_t i = 0; i < phi->numOperands(); i++) {
MDefinition* in = phi->getOperand(i);
if (in->type() == MIRType_MagicOptimizedArguments ||
in->type() == MIRType_MagicHole ||
in->type() == MIRType_MagicIsConstructing)
{
if (magicType == MIRType_None)
magicType = in->type();
MOZ_ASSERT(magicType == in->type());
}
}
#endif
*hasInputsWithEmptyTypes = false;
MIRType type = MIRType_None;
bool convertibleToFloat32 = false;
bool hasPhiInputs = false;
for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
MDefinition* in = phi->getOperand(i);
if (in->isPhi()) {
hasPhiInputs = true;
if (!in->toPhi()->triedToSpecialize())
continue;
if (in->type() == MIRType_None) {
// The operand is a phi we tried to specialize, but we were
// unable to guess its type. propagateSpecialization will
// propagate the type to this phi when it becomes known.
continue;
}
}
// Ignore operands which we've never observed.
if (in->resultTypeSet() && in->resultTypeSet()->empty()) {
*hasInputsWithEmptyTypes = true;
continue;
}
if (type == MIRType_None) {
type = in->type();
if (in->canProduceFloat32())
convertibleToFloat32 = true;
continue;
}
if (type != in->type()) {
if (convertibleToFloat32 && in->type() == MIRType_Float32) {
// If we only saw definitions that can be converted into Float32 before and
// encounter a Float32 value, promote previous values to Float32
type = MIRType_Float32;
} else if (IsNumberType(type) && IsNumberType(in->type())) {
// Specialize phis with int32 and double operands as double.
type = MIRType_Double;
convertibleToFloat32 &= in->canProduceFloat32();
} else {
return MIRType_Value;
}
}
}
if (type == MIRType_None && !hasPhiInputs) {
// All inputs are non-phis with empty typesets. Use MIRType_Value
// in this case, as it's impossible to get better type information.
MOZ_ASSERT(*hasInputsWithEmptyTypes);
type = MIRType_Value;
}
return type;
}
bool
TypeAnalyzer::respecialize(MPhi* phi, MIRType type)
{
if (phi->type() == type)
return true;
phi->specialize(type);
return addPhiToWorklist(phi);
}
bool
TypeAnalyzer::propagateSpecialization(MPhi* phi)
{
MOZ_ASSERT(phi->type() != MIRType_None);
// Verify that this specialization matches any phis depending on it.
for (MUseDefIterator iter(phi); iter; iter++) {
if (!iter.def()->isPhi())
continue;
MPhi* use = iter.def()->toPhi();
if (!use->triedToSpecialize())
continue;
if (use->type() == MIRType_None) {
// We tried to specialize this phi, but were unable to guess its
// type. Now that we know the type of one of its operands, we can
// specialize it.
if (!respecialize(use, phi->type()))
return false;
continue;
}
if (use->type() != phi->type()) {
// Specialize phis with int32 that can be converted to float and float operands as floats.
if ((use->type() == MIRType_Int32 && use->canProduceFloat32() && phi->type() == MIRType_Float32) ||
(phi->type() == MIRType_Int32 && phi->canProduceFloat32() && use->type() == MIRType_Float32))
{
if (!respecialize(use, MIRType_Float32))
return false;
continue;
}
// Specialize phis with int32 and double operands as double.
if (IsNumberType(use->type()) && IsNumberType(phi->type())) {
if (!respecialize(use, MIRType_Double))
return false;
continue;
}
// This phi in our use chain can now no longer be specialized.
if (!respecialize(use, MIRType_Value))
return false;
}
}
return true;
}
bool
TypeAnalyzer::specializePhis()
{
Vector<MPhi*, 0, SystemAllocPolicy> phisWithEmptyInputTypes;
for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
if (mir->shouldCancel("Specialize Phis (main loop)"))
return false;
for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
bool hasInputsWithEmptyTypes;
MIRType type = GuessPhiType(*phi, &hasInputsWithEmptyTypes);
phi->specialize(type);
if (type == MIRType_None) {
// We tried to guess the type but failed because all operands are
// phis we still have to visit. Set the triedToSpecialize flag but
// don't propagate the type to other phis, propagateSpecialization
// will do that once we know the type of one of the operands.
// Edge case: when this phi has a non-phi input with an empty
// typeset, it's possible for two phis to have a cyclic
// dependency and they will both have MIRType_None. Specialize
// such phis to MIRType_Value later on.
if (hasInputsWithEmptyTypes && !phisWithEmptyInputTypes.append(*phi))
return false;
continue;
}
if (!propagateSpecialization(*phi))
return false;
}
}
do {
while (!phiWorklist_.empty()) {
if (mir->shouldCancel("Specialize Phis (worklist)"))
return false;
MPhi* phi = popPhi();
if (!propagateSpecialization(phi))
return false;
}
// When two phis have a cyclic dependency and inputs that have an empty
// typeset (which are ignored by GuessPhiType), we may still have to
// specialize these to MIRType_Value.
while (!phisWithEmptyInputTypes.empty()) {
if (mir->shouldCancel("Specialize Phis (phisWithEmptyInputTypes)"))
return false;
MPhi* phi = phisWithEmptyInputTypes.popCopy();
if (phi->type() == MIRType_None) {
phi->specialize(MIRType_Value);
if (!propagateSpecialization(phi))
return false;
}
}
} while (!phiWorklist_.empty());
return true;
}
void
TypeAnalyzer::adjustPhiInputs(MPhi* phi)
{
MIRType phiType = phi->type();
MOZ_ASSERT(phiType != MIRType_None);
// If we specialized a type that's not Value, there are 3 cases:
// 1. Every input is of that type.
// 2. Every observed input is of that type (i.e., some inputs haven't been executed yet).
// 3. Inputs were doubles and int32s, and was specialized to double.
if (phiType != MIRType_Value) {
for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
MDefinition* in = phi->getOperand(i);
if (in->type() == phiType)
continue;
if (in->isBox() && in->toBox()->input()->type() == phiType) {
phi->replaceOperand(i, in->toBox()->input());
} else {
MInstruction* replacement;
if (phiType == MIRType_Double && IsFloatType(in->type())) {
// Convert int32 operands to double.
replacement = MToDouble::New(alloc(), in);
} else if (phiType == MIRType_Float32) {
if (in->type() == MIRType_Int32 || in->type() == MIRType_Double) {
replacement = MToFloat32::New(alloc(), in);
} else {
// See comment below
if (in->type() != MIRType_Value) {
MBox* box = MBox::New(alloc(), in);
in->block()->insertBefore(in->block()->lastIns(), box);
in = box;
}
MUnbox* unbox = MUnbox::New(alloc(), in, MIRType_Double, MUnbox::Fallible);
in->block()->insertBefore(in->block()->lastIns(), unbox);
replacement = MToFloat32::New(alloc(), in);
}
} else {
// If we know this branch will fail to convert to phiType,
// insert a box that'll immediately fail in the fallible unbox
// below.
if (in->type() != MIRType_Value) {
MBox* box = MBox::New(alloc(), in);
in->block()->insertBefore(in->block()->lastIns(), box);
in = box;
}
// Be optimistic and insert unboxes when the operand is a
// value.
replacement = MUnbox::New(alloc(), in, phiType, MUnbox::Fallible);
}
in->block()->insertBefore(in->block()->lastIns(), replacement);
phi->replaceOperand(i, replacement);
}
}
return;
}
// Box every typed input.
for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
MDefinition* in = phi->getOperand(i);
if (in->type() == MIRType_Value)
continue;
if (in->isUnbox() && phi->typeIncludes(in->toUnbox()->input())) {
// The input is being explicitly unboxed, so sneak past and grab
// the original box.
phi->replaceOperand(i, in->toUnbox()->input());
} else {
MDefinition* box = AlwaysBoxAt(alloc(), in->block()->lastIns(), in);
phi->replaceOperand(i, box);
}
}
}
bool
TypeAnalyzer::adjustInputs(MDefinition* def)
{
// Definitions such as MPhi have no type policy.
if (!def->isInstruction())
return true;
MInstruction* ins = def->toInstruction();
TypePolicy* policy = ins->typePolicy();
if (policy && !policy->adjustInputs(alloc(), ins))
return false;
return true;
}
void
TypeAnalyzer::replaceRedundantPhi(MPhi* phi)
{
MBasicBlock* block = phi->block();
js::Value v;
switch (phi->type()) {
case MIRType_Undefined:
v = UndefinedValue();
break;
case MIRType_Null:
v = NullValue();
break;
case MIRType_MagicOptimizedArguments:
v = MagicValue(JS_OPTIMIZED_ARGUMENTS);
break;
case MIRType_MagicOptimizedOut:
v = MagicValue(JS_OPTIMIZED_OUT);
break;
case MIRType_MagicUninitializedLexical:
v = MagicValue(JS_UNINITIALIZED_LEXICAL);
break;
default:
MOZ_CRASH("unexpected type");
}
MConstant* c = MConstant::New(alloc(), v);
// The instruction pass will insert the box
block->insertBefore(*(block->begin()), c);
phi->justReplaceAllUsesWith(c);
}
bool
TypeAnalyzer::insertConversions()
{
// Instructions are processed in reverse postorder: all uses are defs are
// seen before uses. This ensures that output adjustment (which may rewrite
// inputs of uses) does not conflict with input adjustment.
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
if (mir->shouldCancel("Insert Conversions"))
return false;
for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ) {
MPhi* phi = *iter++;
if (phi->type() == MIRType_Undefined ||
phi->type() == MIRType_Null ||
phi->type() == MIRType_MagicOptimizedArguments ||
phi->type() == MIRType_MagicOptimizedOut ||
phi->type() == MIRType_MagicUninitializedLexical)
{
replaceRedundantPhi(phi);
block->discardPhi(phi);
} else {
adjustPhiInputs(phi);
}
}
// AdjustInputs can add/remove/mutate instructions before and after the
// current instruction. Only increment the iterator after it is finished.
for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
if (!adjustInputs(*iter))
return false;
}
}
return true;
}
// This function tries to emit Float32 specialized operations whenever it's possible.
// MIR nodes are flagged as:
// - Producers, when they can create Float32 that might need to be coerced into a Double.
// Loads in Float32 arrays and conversions to Float32 are producers.
// - Consumers, when they can have Float32 as inputs and validate a legal use of a Float32.
// Stores in Float32 arrays and conversions to Float32 are consumers.
// - Float32 commutative, when using the Float32 instruction instead of the Double instruction
// does not result in a compound loss of precision. This is the case for +, -, /, * with 2
// operands, for instance. However, an addition with 3 operands is not commutative anymore,
// so an intermediate coercion is needed.
// Except for phis, all these flags are known after Ion building, so they cannot change during
// the process.
//
// The idea behind the algorithm is easy: whenever we can prove that a commutative operation
// has only producers as inputs and consumers as uses, we can specialize the operation as a
// float32 operation. Otherwise, we have to convert all float32 inputs to doubles. Even
// if a lot of conversions are produced, GVN will take care of eliminating the redundant ones.
//
// Phis have a special status. Phis need to be flagged as producers or consumers as they can
// be inputs or outputs of commutative instructions. Fortunately, producers and consumers
// properties are such that we can deduce the property using all non phis inputs first (which form
// an initial phi graph) and then propagate all properties from one phi to another using a
// fixed point algorithm. The algorithm is ensured to terminate as each iteration has less or as
// many flagged phis as the previous iteration (so the worst steady state case is all phis being
// flagged as false).
//
// In a nutshell, the algorithm applies three passes:
// 1 - Determine which phis are consumers. Each phi gets an initial value by making a global AND on
// all its non-phi inputs. Then each phi propagates its value to other phis. If after propagation,
// the flag value changed, we have to reapply the algorithm on all phi operands, as a phi is a
// consumer if all of its uses are consumers.
// 2 - Determine which phis are producers. It's the same algorithm, except that we have to reapply
// the algorithm on all phi uses, as a phi is a producer if all of its operands are producers.
// 3 - Go through all commutative operations and ensure their inputs are all producers and their
// uses are all consumers.
bool
TypeAnalyzer::markPhiConsumers()
{
MOZ_ASSERT(phiWorklist_.empty());
// Iterate in postorder so worklist is initialized to RPO.
for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); ++block) {
if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Initial state"))
return false;
for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
MOZ_ASSERT(!phi->isInWorklist());
bool canConsumeFloat32 = true;
for (MUseDefIterator use(*phi); canConsumeFloat32 && use; use++) {
MDefinition* usedef = use.def();
canConsumeFloat32 &= usedef->isPhi() || usedef->canConsumeFloat32(use.use());
}
phi->setCanConsumeFloat32(canConsumeFloat32);
if (canConsumeFloat32 && !addPhiToWorklist(*phi))
return false;
}
}
while (!phiWorklist_.empty()) {
if (mir->shouldCancel("Ensure Float32 commutativity - Consumer Phis - Fixed point"))
return false;
MPhi* phi = popPhi();
MOZ_ASSERT(phi->canConsumeFloat32(nullptr /* unused */));
bool validConsumer = true;
for (MUseDefIterator use(phi); use; use++) {
MDefinition* def = use.def();
if (def->isPhi() && !def->canConsumeFloat32(use.use())) {
validConsumer = false;
break;
}
}
if (validConsumer)
continue;
// Propagate invalidated phis
phi->setCanConsumeFloat32(false);
for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
MDefinition* input = phi->getOperand(i);
if (input->isPhi() && !input->isInWorklist() && input->canConsumeFloat32(nullptr /* unused */))
{
if (!addPhiToWorklist(input->toPhi()))
return false;
}
}
}
return true;
}
bool
TypeAnalyzer::markPhiProducers()
{
MOZ_ASSERT(phiWorklist_.empty());
// Iterate in reverse postorder so worklist is initialized to PO.
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - initial state"))
return false;
for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); ++phi) {
MOZ_ASSERT(!phi->isInWorklist());
bool canProduceFloat32 = true;
for (size_t i = 0, e = phi->numOperands(); canProduceFloat32 && i < e; ++i) {
MDefinition* input = phi->getOperand(i);
canProduceFloat32 &= input->isPhi() || input->canProduceFloat32();
}
phi->setCanProduceFloat32(canProduceFloat32);
if (canProduceFloat32 && !addPhiToWorklist(*phi))
return false;
}
}
while (!phiWorklist_.empty()) {
if (mir->shouldCancel("Ensure Float32 commutativity - Producer Phis - Fixed point"))
return false;
MPhi* phi = popPhi();
MOZ_ASSERT(phi->canProduceFloat32());
bool validProducer = true;
for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
MDefinition* input = phi->getOperand(i);
if (input->isPhi() && !input->canProduceFloat32()) {
validProducer = false;
break;
}
}
if (validProducer)
continue;
// Propagate invalidated phis
phi->setCanProduceFloat32(false);
for (MUseDefIterator use(phi); use; use++) {
MDefinition* def = use.def();
if (def->isPhi() && !def->isInWorklist() && def->canProduceFloat32())
{
if (!addPhiToWorklist(def->toPhi()))
return false;
}
}
}
return true;
}
bool
TypeAnalyzer::specializeValidFloatOps()
{
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
if (mir->shouldCancel("Ensure Float32 commutativity - Instructions"))
return false;
for (MInstructionIterator ins(block->begin()); ins != block->end(); ++ins) {
if (!ins->isFloat32Commutative())
continue;
if (ins->type() == MIRType_Float32)
continue;
// This call will try to specialize the instruction iff all uses are consumers and
// all inputs are producers.
ins->trySpecializeFloat32(alloc());
}
}
return true;
}
bool
TypeAnalyzer::graphContainsFloat32()
{
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
if (mir->shouldCancel("Ensure Float32 commutativity - Graph contains Float32"))
return false;
for (MDefinitionIterator def(*block); def; def++) {
if (def->type() == MIRType_Float32)
return true;
}
}
return false;
}
bool
TypeAnalyzer::tryEmitFloatOperations()
{
// Asm.js uses the ahead of time type checks to specialize operations, no need to check
// them again at this point.
if (mir->compilingAsmJS())
return true;
// Check ahead of time that there is at least one definition typed as Float32, otherwise we
// don't need this pass.
if (!graphContainsFloat32())
return true;
if (!markPhiConsumers())
return false;
if (!markPhiProducers())
return false;
if (!specializeValidFloatOps())
return false;
return true;
}
bool
TypeAnalyzer::checkFloatCoherency()
{
#ifdef DEBUG
// Asserts that all Float32 instructions are flowing into Float32 consumers or specialized
// operations
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); ++block) {
if (mir->shouldCancel("Check Float32 coherency"))
return false;
for (MDefinitionIterator def(*block); def; def++) {
if (def->type() != MIRType_Float32)
continue;
for (MUseDefIterator use(*def); use; use++) {
MDefinition* consumer = use.def();
MOZ_ASSERT(consumer->isConsistentFloat32Use(use.use()));
}
}
}
#endif
return true;
}
bool
TypeAnalyzer::analyze()
{
if (!tryEmitFloatOperations())
return false;
if (!specializePhis())
return false;
if (!insertConversions())
return false;
if (!checkFloatCoherency())
return false;
return true;
}
bool
jit::ApplyTypeInformation(MIRGenerator* mir, MIRGraph& graph)
{
TypeAnalyzer analyzer(mir, graph);
if (!analyzer.analyze())
return false;
return true;
}
bool
jit::MakeMRegExpHoistable(MIRGraph& graph)
{
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
for (MDefinitionIterator iter(*block); iter; iter++) {
if (!iter->isRegExp())
continue;
MRegExp* regexp = iter->toRegExp();
// Test if MRegExp is hoistable by looking at all uses.
bool hoistable = true;
for (MUseIterator i = regexp->usesBegin(); i != regexp->usesEnd(); i++) {
// Ignore resume points. At this point all uses are listed.
// No DCE or GVN or something has happened.
if (i->consumer()->isResumePoint())
continue;
MOZ_ASSERT(i->consumer()->isDefinition());
// All MRegExp* MIR's don't adjust the regexp.
MDefinition* use = i->consumer()->toDefinition();
if (use->isRegExpReplace())
continue;
if (use->isRegExpExec())
continue;
if (use->isRegExpTest())
continue;
hoistable = false;
break;
}
if (!hoistable)
continue;
// Make MRegExp hoistable
regexp->setMovable();
// That would be incorrect for global/sticky, because lastIndex could be wrong.
// Therefore setting the lastIndex to 0. That is faster than a not movable regexp.
RegExpObject* source = regexp->source();
if (source->sticky() || source->global()) {
MOZ_ASSERT(regexp->mustClone());
MConstant* zero = MConstant::New(graph.alloc(), Int32Value(0));
regexp->block()->insertAfter(regexp, zero);
MStoreFixedSlot* lastIndex =
MStoreFixedSlot::New(graph.alloc(), regexp, RegExpObject::lastIndexSlot(), zero);
regexp->block()->insertAfter(zero, lastIndex);
}
}
}
return true;
}
bool
jit::RenumberBlocks(MIRGraph& graph)
{
size_t id = 0;
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++)
block->setId(id++);
return true;
}
// A utility for code which deletes blocks. Renumber the remaining blocks,
// recompute dominators, and optionally recompute AliasAnalysis dependencies.
bool
jit::AccountForCFGChanges(MIRGenerator* mir, MIRGraph& graph, bool updateAliasAnalysis)
{
// Renumber the blocks and clear out the old dominator info.
size_t id = 0;
for (ReversePostorderIterator i(graph.rpoBegin()), e(graph.rpoEnd()); i != e; ++i) {
i->clearDominatorInfo();
i->setId(id++);
}
// Recompute dominator info.
if (!BuildDominatorTree(graph))
return false;
// If needed, update alias analysis dependencies.
if (updateAliasAnalysis) {
if (!AliasAnalysis(mir, graph).analyze())
return false;
}
AssertExtendedGraphCoherency(graph);
return true;
}
// Remove all blocks not marked with isMarked(). Unmark all remaining blocks.
// Alias analysis dependencies may be invalid after calling this function.
bool
jit::RemoveUnmarkedBlocks(MIRGenerator* mir, MIRGraph& graph, uint32_t numMarkedBlocks)
{
if (numMarkedBlocks == graph.numBlocks()) {
// If all blocks are marked, no blocks need removal. Just clear the
// marks. We'll still need to update the dominator tree below though,
// since we may have removed edges even if we didn't remove any blocks.
graph.unmarkBlocks();
} else {
// Find unmarked blocks and remove them.
for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd();) {
MBasicBlock* block = *iter++;
if (block->isMarked()) {
block->unmark();
continue;
}
// The block is unreachable. Clear out the loop header flag, as
// we're doing the sweep of a mark-and-sweep here, so we no longer
// need to worry about whether an unmarked block is a loop or not.
if (block->isLoopHeader())
block->clearLoopHeader();
for (size_t i = 0, e = block->numSuccessors(); i != e; ++i)
block->getSuccessor(i)->removePredecessor(block);
graph.removeBlockIncludingPhis(block);
}
}
// Renumber the blocks and update the dominator tree.
return AccountForCFGChanges(mir, graph, /*updateAliasAnalysis=*/false);
}
// A Simple, Fast Dominance Algorithm by Cooper et al.
// Modified to support empty intersections for OSR, and in RPO.
static MBasicBlock*
IntersectDominators(MBasicBlock* block1, MBasicBlock* block2)
{
MBasicBlock* finger1 = block1;
MBasicBlock* finger2 = block2;
MOZ_ASSERT(finger1);
MOZ_ASSERT(finger2);
// In the original paper, the block ID comparisons are on the postorder index.
// This implementation iterates in RPO, so the comparisons are reversed.
// For this function to be called, the block must have multiple predecessors.
// If a finger is then found to be self-dominating, it must therefore be
// reachable from multiple roots through non-intersecting control flow.
// nullptr is returned in this case, to denote an empty intersection.
while (finger1->id() != finger2->id()) {
while (finger1->id() > finger2->id()) {
MBasicBlock* idom = finger1->immediateDominator();
if (idom == finger1)
return nullptr; // Empty intersection.
finger1 = idom;
}
while (finger2->id() > finger1->id()) {
MBasicBlock* idom = finger2->immediateDominator();
if (idom == finger2)
return nullptr; // Empty intersection.
finger2 = idom;
}
}
return finger1;
}
void
jit::ClearDominatorTree(MIRGraph& graph)
{
for (MBasicBlockIterator iter = graph.begin(); iter != graph.end(); iter++)
iter->clearDominatorInfo();
}
static void
ComputeImmediateDominators(MIRGraph& graph)
{
// The default start block is a root and therefore only self-dominates.
MBasicBlock* startBlock = graph.entryBlock();
startBlock->setImmediateDominator(startBlock);
// Any OSR block is a root and therefore only self-dominates.
MBasicBlock* osrBlock = graph.osrBlock();
if (osrBlock)
osrBlock->setImmediateDominator(osrBlock);
bool changed = true;
while (changed) {
changed = false;
ReversePostorderIterator block = graph.rpoBegin();
// For each block in RPO, intersect all dominators.
for (; block != graph.rpoEnd(); block++) {
// If a node has once been found to have no exclusive dominator,
// it will never have an exclusive dominator, so it may be skipped.
if (block->immediateDominator() == *block)
continue;
// A block with no predecessors is not reachable from any entry, so
// it self-dominates.
if (MOZ_UNLIKELY(block->numPredecessors() == 0)) {
block->setImmediateDominator(*block);
continue;
}
MBasicBlock* newIdom = block->getPredecessor(0);
// Find the first common dominator.
for (size_t i = 1; i < block->numPredecessors(); i++) {
MBasicBlock* pred = block->getPredecessor(i);
if (pred->immediateDominator() == nullptr)
continue;
newIdom = IntersectDominators(pred, newIdom);
// If there is no common dominator, the block self-dominates.
if (newIdom == nullptr) {
block->setImmediateDominator(*block);
changed = true;
break;
}
}
if (newIdom && block->immediateDominator() != newIdom) {
block->setImmediateDominator(newIdom);
changed = true;
}
}
}
#ifdef DEBUG
// Assert that all blocks have dominator information.
for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
MOZ_ASSERT(block->immediateDominator() != nullptr);
}
#endif
}
bool
jit::BuildDominatorTree(MIRGraph& graph)
{
ComputeImmediateDominators(graph);
Vector<MBasicBlock*, 4, JitAllocPolicy> worklist(graph.alloc());
// Traversing through the graph in post-order means that every non-phi use
// of a definition is visited before the def itself. Since a def
// dominates its uses, by the time we reach a particular
// block, we have processed all of its dominated children, so
// block->numDominated() is accurate.
for (PostorderIterator i(graph.poBegin()); i != graph.poEnd(); i++) {
MBasicBlock* child = *i;
MBasicBlock* parent = child->immediateDominator();
// Dominance is defined such that blocks always dominate themselves.
child->addNumDominated(1);
// If the block only self-dominates, it has no definite parent.
// Add it to the worklist as a root for pre-order traversal.
// This includes all roots. Order does not matter.
if (child == parent) {
if (!worklist.append(child))
return false;
continue;
}
if (!parent->addImmediatelyDominatedBlock(child))
return false;
parent->addNumDominated(child->numDominated());
}
#ifdef DEBUG
// If compiling with OSR, many blocks will self-dominate.
// Without OSR, there is only one root block which dominates all.
if (!graph.osrBlock())
MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
#endif
// Now, iterate through the dominator tree in pre-order and annotate every
// block with its index in the traversal.
size_t index = 0;
while (!worklist.empty()) {
MBasicBlock* block = worklist.popCopy();
block->setDomIndex(index);
if (!worklist.append(block->immediatelyDominatedBlocksBegin(),
block->immediatelyDominatedBlocksEnd())) {
return false;
}
index++;
}
return true;
}
bool
jit::BuildPhiReverseMapping(MIRGraph& graph)
{
// Build a mapping such that given a basic block, whose successor has one or
// more phis, we can find our specific input to that phi. To make this fast
// mapping work we rely on a specific property of our structured control
// flow graph: For a block with phis, its predecessors each have only one
// successor with phis. Consider each case:
// * Blocks with less than two predecessors cannot have phis.
// * Breaks. A break always has exactly one successor, and the break
// catch block has exactly one predecessor for each break, as
// well as a final predecessor for the actual loop exit.
// * Continues. A continue always has exactly one successor, and the
// continue catch block has exactly one predecessor for each
// continue, as well as a final predecessor for the actual
// loop continuation. The continue itself has exactly one
// successor.
// * An if. Each branch as exactly one predecessor.
// * A switch. Each branch has exactly one predecessor.
// * Loop tail. A new block is always created for the exit, and if a
// break statement is present, the exit block will forward
// directly to the break block.
for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
if (block->phisEmpty())
continue;
// Assert on the above.
for (size_t j = 0; j < block->numPredecessors(); j++) {
MBasicBlock* pred = block->getPredecessor(j);
#ifdef DEBUG
size_t numSuccessorsWithPhis = 0;
for (size_t k = 0; k < pred->numSuccessors(); k++) {
MBasicBlock* successor = pred->getSuccessor(k);
if (!successor->phisEmpty())
numSuccessorsWithPhis++;
}
MOZ_ASSERT(numSuccessorsWithPhis <= 1);
#endif
pred->setSuccessorWithPhis(*block, j);
}
}
return true;
}
#ifdef DEBUG
static bool
CheckSuccessorImpliesPredecessor(MBasicBlock* A, MBasicBlock* B)
{
// Assuming B = succ(A), verify A = pred(B).
for (size_t i = 0; i < B->numPredecessors(); i++) {
if (A == B->getPredecessor(i))
return true;
}
return false;
}
static bool
CheckPredecessorImpliesSuccessor(MBasicBlock* A, MBasicBlock* B)
{
// Assuming B = pred(A), verify A = succ(B).
for (size_t i = 0; i < B->numSuccessors(); i++) {
if (A == B->getSuccessor(i))
return true;
}
return false;
}
// If you have issues with the usesBalance assertions, then define the macro
// _DEBUG_CHECK_OPERANDS_USES_BALANCE to spew information on the error output.
// This output can then be processed with the following awk script to filter and
// highlight which checks are missing or if there is an unexpected operand /
// use.
//
// define _DEBUG_CHECK_OPERANDS_USES_BALANCE 1
/*
$ ./js 2>stderr.log
$ gawk '
/^==Check/ { context = ""; state = $2; }
/^[a-z]/ { context = context "\n\t" $0; }
/^==End/ {
if (state == "Operand") {
list[context] = list[context] - 1;
} else if (state == "Use") {
list[context] = list[context] + 1;
}
}
END {
for (ctx in list) {
if (list[ctx] > 0) {
print "Missing operand check", ctx, "\n"
}
if (list[ctx] < 0) {
print "Missing use check", ctx, "\n"
}
};
}' < stderr.log
*/
static void
CheckOperand(const MNode* consumer, const MUse* use, int32_t* usesBalance)
{
MOZ_ASSERT(use->hasProducer());
MDefinition* producer = use->producer();
MOZ_ASSERT(!producer->isDiscarded());
MOZ_ASSERT(producer->block() != nullptr);
MOZ_ASSERT(use->consumer() == consumer);
#ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
fprintf(stderr, "==Check Operand\n");
use->producer()->dump(stderr);
fprintf(stderr, " index: %" PRIuSIZE "\n", use->consumer()->indexOf(use));
use->consumer()->dump(stderr);
fprintf(stderr, "==End\n");
#endif
--*usesBalance;
}
static void
CheckUse(const MDefinition* producer, const MUse* use, int32_t* usesBalance)
{
MOZ_ASSERT(!use->consumer()->block()->isDead());
MOZ_ASSERT_IF(use->consumer()->isDefinition(),
!use->consumer()->toDefinition()->isDiscarded());
MOZ_ASSERT(use->consumer()->block() != nullptr);
MOZ_ASSERT(use->consumer()->getOperand(use->index()) == producer);
#ifdef _DEBUG_CHECK_OPERANDS_USES_BALANCE
fprintf(stderr, "==Check Use\n");
use->producer()->dump(stderr);
fprintf(stderr, " index: %" PRIuSIZE "\n", use->consumer()->indexOf(use));
use->consumer()->dump(stderr);
fprintf(stderr, "==End\n");
#endif
++*usesBalance;
}
#endif // DEBUG
void
jit::AssertBasicGraphCoherency(MIRGraph& graph)
{
#ifdef DEBUG
MOZ_ASSERT(graph.entryBlock()->numPredecessors() == 0);
MOZ_ASSERT(graph.entryBlock()->phisEmpty());
MOZ_ASSERT(!graph.entryBlock()->unreachable());
if (MBasicBlock* osrBlock = graph.osrBlock()) {
MOZ_ASSERT(osrBlock->numPredecessors() == 0);
MOZ_ASSERT(osrBlock->phisEmpty());
MOZ_ASSERT(osrBlock != graph.entryBlock());
MOZ_ASSERT(!osrBlock->unreachable());
}
if (MResumePoint* resumePoint = graph.entryResumePoint())
MOZ_ASSERT(resumePoint->block() == graph.entryBlock());
// Assert successor and predecessor list coherency.
uint32_t count = 0;
int32_t usesBalance = 0;
for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
count++;
MOZ_ASSERT(&block->graph() == &graph);
MOZ_ASSERT(!block->isDead());
MOZ_ASSERT_IF(block->outerResumePoint() != nullptr,
block->entryResumePoint() != nullptr);
for (size_t i = 0; i < block->numSuccessors(); i++)
MOZ_ASSERT(CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));
for (size_t i = 0; i < block->numPredecessors(); i++)
MOZ_ASSERT(CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));
if (block->entryResumePoint()) {
MOZ_ASSERT(!block->entryResumePoint()->instruction());
MOZ_ASSERT(block->entryResumePoint()->block() == *block);
}
if (block->outerResumePoint()) {
MOZ_ASSERT(!block->outerResumePoint()->instruction());
MOZ_ASSERT(block->outerResumePoint()->block() == *block);
}
for (MResumePointIterator iter(block->resumePointsBegin()); iter != block->resumePointsEnd(); iter++) {
// We cannot yet assert that is there is no instruction then this is
// the entry resume point because we are still storing resume points
// in the InlinePropertyTable.
MOZ_ASSERT_IF(iter->instruction(), iter->instruction()->block() == *block);
for (uint32_t i = 0, e = iter->numOperands(); i < e; i++)
CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
}
for (MPhiIterator phi(block->phisBegin()); phi != block->phisEnd(); phi++) {
MOZ_ASSERT(phi->numOperands() == block->numPredecessors());
MOZ_ASSERT(!phi->isRecoveredOnBailout());
MOZ_ASSERT(phi->type() != MIRType_None);
MOZ_ASSERT(phi->dependency() == nullptr);
}
for (MDefinitionIterator iter(*block); iter; iter++) {
MOZ_ASSERT(iter->block() == *block);
MOZ_ASSERT_IF(iter->hasUses(), iter->type() != MIRType_None);
MOZ_ASSERT(!iter->isDiscarded());
MOZ_ASSERT_IF(iter->isStart(),
*block == graph.entryBlock() || *block == graph.osrBlock());
MOZ_ASSERT_IF(iter->isParameter(),
*block == graph.entryBlock() || *block == graph.osrBlock());
MOZ_ASSERT_IF(iter->isOsrEntry(), *block == graph.osrBlock());
MOZ_ASSERT_IF(iter->isOsrValue(), *block == graph.osrBlock());
// Assert that use chains are valid for this instruction.
for (uint32_t i = 0, end = iter->numOperands(); i < end; i++)
CheckOperand(*iter, iter->getUseFor(i), &usesBalance);
for (MUseIterator use(iter->usesBegin()); use != iter->usesEnd(); use++)
CheckUse(*iter, *use, &usesBalance);
if (iter->isInstruction()) {
if (MResumePoint* resume = iter->toInstruction()->resumePoint()) {
MOZ_ASSERT(resume->instruction() == *iter);
MOZ_ASSERT(resume->block() == *block);
MOZ_ASSERT(resume->block()->entryResumePoint() != nullptr);
}
}
if (iter->isRecoveredOnBailout())
MOZ_ASSERT(!iter->hasLiveDefUses());
}
// The control instruction is not visited by the MDefinitionIterator.
MControlInstruction* control = block->lastIns();
MOZ_ASSERT(control->block() == *block);
MOZ_ASSERT(!control->hasUses());
MOZ_ASSERT(control->type() == MIRType_None);
MOZ_ASSERT(!control->isDiscarded());
MOZ_ASSERT(!control->isRecoveredOnBailout());
MOZ_ASSERT(control->resumePoint() == nullptr);
for (uint32_t i = 0, end = control->numOperands(); i < end; i++)
CheckOperand(control, control->getUseFor(i), &usesBalance);
}
// In case issues, see the _DEBUG_CHECK_OPERANDS_USES_BALANCE macro above.
MOZ_ASSERT(usesBalance <= 0, "More use checks than operand checks");
MOZ_ASSERT(usesBalance >= 0, "More operand checks than use checks");
MOZ_ASSERT(graph.numBlocks() == count);
#endif
}
#ifdef DEBUG
static void
AssertReversePostorder(MIRGraph& graph)
{
// Check that every block is visited after all its predecessors (except backedges).
for (ReversePostorderIterator iter(graph.rpoBegin()); iter != graph.rpoEnd(); ++iter) {
MBasicBlock* block = *iter;
MOZ_ASSERT(!block->isMarked());
for (size_t i = 0; i < block->numPredecessors(); i++) {
MBasicBlock* pred = block->getPredecessor(i);
if (!pred->isMarked()) {
MOZ_ASSERT(pred->isLoopBackedge());
MOZ_ASSERT(block->backedge() == pred);
}
}
block->mark();
}
graph.unmarkBlocks();
}
#endif
#ifdef DEBUG
static void
AssertDominatorTree(MIRGraph& graph)
{
// Check dominators.
MOZ_ASSERT(graph.entryBlock()->immediateDominator() == graph.entryBlock());
if (MBasicBlock* osrBlock = graph.osrBlock())
MOZ_ASSERT(osrBlock->immediateDominator() == osrBlock);
else
MOZ_ASSERT(graph.entryBlock()->numDominated() == graph.numBlocks());
size_t i = graph.numBlocks();
size_t totalNumDominated = 0;
for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
MOZ_ASSERT(block->dominates(*block));
MBasicBlock* idom = block->immediateDominator();
MOZ_ASSERT(idom->dominates(*block));
MOZ_ASSERT(idom == *block || idom->id() < block->id());
if (idom == *block) {
totalNumDominated += block->numDominated();
} else {
bool foundInParent = false;
for (size_t j = 0; j < idom->numImmediatelyDominatedBlocks(); j++) {
if (idom->getImmediatelyDominatedBlock(j) == *block) {
foundInParent = true;
break;
}
}
MOZ_ASSERT(foundInParent);
}
size_t numDominated = 1;
for (size_t j = 0; j < block->numImmediatelyDominatedBlocks(); j++) {
MBasicBlock* dom = block->getImmediatelyDominatedBlock(j);
MOZ_ASSERT(block->dominates(dom));
MOZ_ASSERT(dom->id() > block->id());
MOZ_ASSERT(dom->immediateDominator() == *block);
numDominated += dom->numDominated();
}
MOZ_ASSERT(block->numDominated() == numDominated);
MOZ_ASSERT(block->numDominated() <= i);
MOZ_ASSERT(block->numSuccessors() != 0 || block->numDominated() == 1);
i--;
}
MOZ_ASSERT(i == 0);
MOZ_ASSERT(totalNumDominated == graph.numBlocks());
}
#endif
void
jit::AssertGraphCoherency(MIRGraph& graph)
{
#ifdef DEBUG
if (!JitOptions.checkGraphConsistency)
return;
AssertBasicGraphCoherency(graph);
AssertReversePostorder(graph);
#endif
}
#ifdef DEBUG
static bool
IsResumableMIRType(MIRType type)
{
// see CodeGeneratorShared::encodeAllocation
switch (type) {
case MIRType_Undefined:
case MIRType_Null:
case MIRType_Boolean:
case MIRType_Int32:
case MIRType_Double:
case MIRType_Float32:
case MIRType_String:
case MIRType_Symbol:
case MIRType_Object:
case MIRType_MagicOptimizedArguments:
case MIRType_MagicOptimizedOut:
case MIRType_MagicUninitializedLexical:
case MIRType_Value:
case MIRType_Float32x4:
case MIRType_Int32x4:
return true;
case MIRType_MagicHole:
case MIRType_MagicIsConstructing:
case MIRType_ObjectOrNull:
case MIRType_None:
case MIRType_Slots:
case MIRType_Elements:
case MIRType_Pointer:
case MIRType_Shape:
case MIRType_ObjectGroup:
case MIRType_Doublex2: // NYI, see also RSimdBox::recover
case MIRType_SinCosDouble:
return false;
}
MOZ_CRASH("Unknown MIRType.");
}
static void
AssertResumableOperands(MNode* node)
{
for (size_t i = 0, e = node->numOperands(); i < e; ++i) {
MDefinition* op = node->getOperand(i);
if (op->isRecoveredOnBailout())
continue;
MOZ_ASSERT(IsResumableMIRType(op->type()),
"Resume point cannot encode its operands");
}
}
static void
AssertIfResumableInstruction(MDefinition* def)
{
if (!def->isRecoveredOnBailout())
return;
AssertResumableOperands(def);
}
static void
AssertResumePointDominatedByOperands(MResumePoint* resume)
{
for (size_t i = 0, e = resume->numOperands(); i < e; ++i) {
MDefinition* op = resume->getOperand(i);
if (op->type() == MIRType_MagicOptimizedArguments)
continue;
MOZ_ASSERT(op->block()->dominates(resume->block()),
"Resume point is not dominated by its operands");
}
}
#endif // DEBUG
void
jit::AssertExtendedGraphCoherency(MIRGraph& graph)
{
// Checks the basic GraphCoherency but also other conditions that
// do not hold immediately (such as the fact that critical edges
// are split)
#ifdef DEBUG
if (!JitOptions.checkGraphConsistency)
return;
AssertGraphCoherency(graph);
AssertDominatorTree(graph);
uint32_t idx = 0;
for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
MOZ_ASSERT(block->id() == idx++);
// No critical edges:
if (block->numSuccessors() > 1)
for (size_t i = 0; i < block->numSuccessors(); i++)
MOZ_ASSERT(block->getSuccessor(i)->numPredecessors() == 1);
if (block->isLoopHeader()) {
MOZ_ASSERT(block->numPredecessors() == 2);
MBasicBlock* backedge = block->getPredecessor(1);
MOZ_ASSERT(backedge->id() >= block->id());
MOZ_ASSERT(backedge->numSuccessors() == 1);
MOZ_ASSERT(backedge->getSuccessor(0) == *block);
}
if (!block->phisEmpty()) {
for (size_t i = 0; i < block->numPredecessors(); i++) {
MBasicBlock* pred = block->getPredecessor(i);
MOZ_ASSERT(pred->successorWithPhis() == *block);
MOZ_ASSERT(pred->positionInPhiSuccessor() == i);
}
}
uint32_t successorWithPhis = 0;
for (size_t i = 0; i < block->numSuccessors(); i++)
if (!block->getSuccessor(i)->phisEmpty())
successorWithPhis++;
MOZ_ASSERT(successorWithPhis <= 1);
MOZ_ASSERT((successorWithPhis != 0) == (block->successorWithPhis() != nullptr));
// Verify that phi operands dominate the corresponding CFG predecessor
// edges.
for (MPhiIterator iter(block->phisBegin()), end(block->phisEnd()); iter != end; ++iter) {
MPhi* phi = *iter;
for (size_t i = 0, e = phi->numOperands(); i < e; ++i) {
// We sometimes see a phi with a magic-optimized-arguments
// operand defined in the normal entry block, while the phi is
// also reachable from the OSR entry (auto-regress/bug779818.js)
if (phi->getOperand(i)->type() == MIRType_MagicOptimizedArguments)
continue;
MOZ_ASSERT(phi->getOperand(i)->block()->dominates(block->getPredecessor(i)),
"Phi input is not dominated by its operand");
}
}
// Verify that instructions are dominated by their operands.
for (MInstructionIterator iter(block->begin()), end(block->end()); iter != end; ++iter) {
MInstruction* ins = *iter;
for (size_t i = 0, e = ins->numOperands(); i < e; ++i) {
MDefinition* op = ins->getOperand(i);
MBasicBlock* opBlock = op->block();
MOZ_ASSERT(opBlock->dominates(*block),
"Instruction is not dominated by its operands");
// If the operand is an instruction in the same block, check
// that it comes first.
if (opBlock == *block && !op->isPhi()) {
MInstructionIterator opIter = block->begin(op->toInstruction());
do {
++opIter;
MOZ_ASSERT(opIter != block->end(),
"Operand in same block as instruction does not precede");
} while (*opIter != ins);
}
}
AssertIfResumableInstruction(ins);
if (MResumePoint* resume = ins->resumePoint()) {
AssertResumePointDominatedByOperands(resume);
AssertResumableOperands(resume);
}
}
// Verify that the block resume points are dominated by their operands.
if (MResumePoint* resume = block->entryResumePoint()) {
AssertResumePointDominatedByOperands(resume);
AssertResumableOperands(resume);
}
if (MResumePoint* resume = block->outerResumePoint()) {
AssertResumePointDominatedByOperands(resume);
AssertResumableOperands(resume);
}
}
#endif
}
struct BoundsCheckInfo
{
MBoundsCheck* check;
uint32_t validEnd;
};
typedef HashMap<uint32_t,
BoundsCheckInfo,
DefaultHasher<uint32_t>,
JitAllocPolicy> BoundsCheckMap;
// Compute a hash for bounds checks which ignores constant offsets in the index.
static HashNumber
BoundsCheckHashIgnoreOffset(MBoundsCheck* check)