blob: 5e5c085095701919751615da011eb8826d999d2b [file] [log] [blame]
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/mips/CodeGenerator-mips.h"
#include "mozilla/MathAlgorithms.h"
#include "jscntxt.h"
#include "jscompartment.h"
#include "jsnum.h"
#include "jit/PerfSpewer.h"
#include "jit/CodeGenerator.h"
#include "jit/IonFrames.h"
#include "jit/IonCompartment.h"
#include "jit/MIR.h"
#include "jit/MIRGraph.h"
#include "vm/Shape.h"
#include "jsscriptinlines.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
#include "jit/MoveEmitter.h"
using namespace js;
using namespace js::jit;
// shared
CodeGeneratorMIPS::CodeGeneratorMIPS(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm)
: CodeGeneratorShared(gen, graph, masm),
deoptLabel_(NULL)
{
}
bool
CodeGeneratorMIPS::generatePrologue()
{
if (gen->compilingAsmJS()) {
masm.Push(ra);
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameDepth_);
} else {
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameSize());
masm.checkStackAlignment();
}
returnLabel_ = new HeapLabel();
return true;
}
bool
CodeGeneratorMIPS::generateEpilogue()
{
masm.bind(returnLabel_);
#if JS_TRACE_LOGGING
masm.tracelogStop();
#endif
if (gen->compilingAsmJS()) {
// Pop the stack we allocated at the start of the function.
masm.freeStack(frameDepth_);
masm.Pop(ra);
masm.abiret();
MOZ_ASSERT(masm.framePushed() == 0);
} else {
// Pop the stack we allocated at the start of the function.
masm.freeStack(frameSize());
MOZ_ASSERT(masm.framePushed() == 0);
masm.ret();
}
return true;
}
void
CodeGeneratorMIPS::branchToBlock(Assembler::FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
MBasicBlock *mir, Assembler::DoubleCondition cond)
{
Label *label = mir->lir()->label();
masm.branchDouble(cond, lhs, rhs, mir->lir()->label());
}
bool
OutOfLineBailout::accept(CodeGeneratorMIPS *codegen)
{
return codegen->visitOutOfLineBailout(this);
}
bool
CodeGeneratorMIPS::visitTestIAndBranch(LTestIAndBranch *test)
{
const LAllocation *opd = test->getOperand(0);
MBasicBlock *ifTrue = test->ifTrue();
MBasicBlock *ifFalse = test->ifFalse();
emitBranch(ToRegister(opd), Imm32(0), Assembler::NonZero, ifTrue, ifFalse);
return true;
}
bool
CodeGeneratorMIPS::visitCompare(LCompare *comp)
{
Assembler::Condition cond = JSOpToCondition(comp->mir()->compareType(), comp->jsop());
const LAllocation *left = comp->getOperand(0);
const LAllocation *right = comp->getOperand(1);
const LDefinition *def = comp->getDef(0);
if (right->isConstant())
masm.cmp32Set(cond, ToRegister(left), Imm32(ToInt32(right)), ToRegister(def));
else if (right->isGeneralReg())
masm.cmp32Set(cond, ToRegister(left), ToRegister(right), ToRegister(def));
else
masm.cmp32Set(cond, ToRegister(left), ToAddress(right), ToRegister(def));
return true;
}
bool
CodeGeneratorMIPS::visitCompareAndBranch(LCompareAndBranch *comp)
{
Assembler::Condition cond = JSOpToCondition(comp->mir()->compareType(), comp->jsop());
if (comp->right()->isConstant()) {
emitBranch(ToRegister(comp->left()), Imm32(ToInt32(comp->right())), cond,
comp->ifTrue(), comp->ifFalse());
} else if (comp->right()->isGeneralReg()) {
emitBranch(ToRegister(comp->left()), ToRegister(comp->right()), cond,
comp->ifTrue(), comp->ifFalse());
} else {
emitBranch(ToRegister(comp->left()), ToAddress(comp->right()), cond,
comp->ifTrue(), comp->ifFalse());
}
return true;
}
bool
CodeGeneratorMIPS::generateOutOfLineCode()
{
if (!CodeGeneratorShared::generateOutOfLineCode())
return false;
if (deoptLabel_) {
// All non-table-based bailouts will go here.
masm.bind(deoptLabel_);
// Push the frame size, so the handler can recover the IonScript.
// Frame size is stored in 'ra' and pushed by GenerateBailoutThunk
// We have to use 'ra' because generateBailoutTable will implicitly do
// the same.
masm.move32(Imm32(frameSize()), ra);
// IonCode *handler = gen->jitRuntime()->getGenericBailoutHandler();
IonCompartment *ion = GetIonContext()->compartment->ionCompartment();
IonCode *handler = ion->getGenericBailoutHandler();
masm.branch(handler);
}
return true;
}
bool
CodeGeneratorMIPS::bailoutFrom(Label *label, LSnapshot *snapshot)
{
JS_ASSERT(label->used() && !label->bound());
CompileInfo &info = snapshot->mir()->block()->info();
switch (info.executionMode()) {
case ParallelExecution: {
// in parallel mode, make no attempt to recover, just signal an error.
OutOfLineParallelAbort* ool = oolParallelAbort(ParallelBailoutUnsupported,
snapshot->mir()->block(),
snapshot->mir()->pc());
masm.retarget(label, ool->entry());
return true;
}
case SequentialExecution:
break;
default:
JS_NOT_REACHED("No such execution mode");
}
if (!encode(snapshot))
return false;
// Though the assembler doesn't track all frame pushes, at least make sure
// the known value makes sense. We can't use bailout tables if the stack
// isn't properly aligned to the static frame size.
JS_ASSERT_IF(frameClass_ != FrameSizeClass::None(),
frameClass_.frameSize() == masm.framePushed());
// We don't use table bailouts because retargeting is easier this way.
OutOfLineBailout *ool = new OutOfLineBailout(snapshot, masm.framePushed());
if (!addOutOfLineCode(ool)) {
return false;
}
masm.retarget(label, ool->entry());
return true;
}
bool
CodeGeneratorMIPS::bailout(LSnapshot *snapshot)
{
Label label;
masm.jump(&label);
return bailoutFrom(&label, snapshot);
}
bool
CodeGeneratorMIPS::visitOutOfLineBailout(OutOfLineBailout *ool)
{
if (!deoptLabel_) {
deoptLabel_ = new HeapLabel();
}
// Push snapshotOffset and make sure stack is aligned.
masm.subPtr(Imm32(2 * sizeof(void *)), StackPointer);
masm.storePtr(ImmWord(ool->snapshot()->snapshotOffset()), Address(StackPointer, 0));
masm.jump(deoptLabel_);
return true;
}
bool
CodeGeneratorMIPS::visitMinMaxD(LMinMaxD *ins)
{
FloatRegister first = ToFloatRegister(ins->first());
FloatRegister second = ToFloatRegister(ins->second());
FloatRegister output = ToFloatRegister(ins->output());
MOZ_ASSERT(first == output);
Assembler::DoubleCondition cond = ins->mir()->isMax()
? Assembler::DoubleLessThanOrEqual
: Assembler::DoubleGreaterThanOrEqual;
Label nan, equal, returnSecond, done;
// First or second is NaN, result is NaN.
masm.ma_bc1d(first, second, &nan, Assembler::DoubleUnordered, ShortJump);
// Make sure we handle -0 and 0 right.
masm.ma_bc1d(first, second, &equal, Assembler::DoubleEqual, ShortJump);
masm.ma_bc1d(first, second, &returnSecond, cond, ShortJump);
masm.ma_b(&done, ShortJump);
// Check for zero.
masm.bind(&equal);
masm.loadConstantDouble(0.0, ScratchFloatReg);
// First wasn't 0 or -0, so just return it.
masm.ma_bc1d(first, ScratchFloatReg, &done, Assembler::DoubleNotEqualOrUnordered, ShortJump);
// So now both operands are either -0 or 0.
if (ins->mir()->isMax()) {
// -0 + -0 = -0 and -0 + 0 = 0.
masm.addDouble(second, first);
} else {
masm.negateDouble(first);
masm.subDouble(second, first);
masm.negateDouble(first);
}
masm.ma_b(&done, ShortJump);
masm.bind(&nan);
masm.loadStaticDouble(&js_NaN, output);
masm.ma_b(&done, ShortJump);
masm.bind(&returnSecond);
masm.moveDouble(second, output);
masm.bind(&done);
return true;
}
bool
CodeGeneratorMIPS::visitAbsD(LAbsD *ins)
{
FloatRegister input = ToFloatRegister(ins->input());
MOZ_ASSERT(input == ToFloatRegister(ins->output()));
masm.as_absd(input, input);
return true;
}
bool
CodeGeneratorMIPS::visitSqrtD(LSqrtD *ins)
{
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
masm.as_sqrtd(output, input);
return true;
}
bool
CodeGeneratorMIPS::visitAddI(LAddI *ins)
{
const LAllocation *lhs = ins->getOperand(0);
const LAllocation *rhs = ins->getOperand(1);
const LDefinition *dest = ins->getDef(0);
MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
// If there is no snapshot, we don't need to check for overflow
if (!ins->snapshot()) {
if (rhs->isConstant())
masm.ma_addu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
else
masm.as_addu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
return true;
}
Label overflow;
if (rhs->isConstant())
masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow);
else
masm.ma_addTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow);
if (!bailoutFrom(&overflow, ins->snapshot()))
return false;
return true;
}
bool
CodeGeneratorMIPS::visitSubI(LSubI *ins)
{
const LAllocation *lhs = ins->getOperand(0);
const LAllocation *rhs = ins->getOperand(1);
const LDefinition *dest = ins->getDef(0);
MOZ_ASSERT(rhs->isConstant() || rhs->isGeneralReg());
// If there is no snapshot, we don't need to check for overflow
if (!ins->snapshot()) {
if (rhs->isConstant())
masm.ma_subu(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
else
masm.as_subu(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
return true;
}
Label overflow;
if (rhs->isConstant())
masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)), &overflow);
else
masm.ma_subTestOverflow(ToRegister(dest), ToRegister(lhs), ToRegister(rhs), &overflow);
if (!bailoutFrom(&overflow, ins->snapshot()))
return false;
return true;
}
bool
CodeGeneratorMIPS::visitMulI(LMulI *ins)
{
const LAllocation *lhs = ins->lhs();
const LAllocation *rhs = ins->rhs();
Register dest = ToRegister(ins->output());
MMul *mul = ins->mir();
MOZ_ASSERT_IF(mul->mode() == MMul::Integer, !mul->canBeNegativeZero() && !mul->canOverflow());
if (rhs->isConstant()) {
int32_t constant = ToInt32(rhs);
Register src = ToRegister(lhs);
// Bailout on -0.0
if (mul->canBeNegativeZero() && constant <= 0) {
Assembler::Condition cond = (constant == 0) ? Assembler::LessThan : Assembler::Equal;
if (!bailoutCmp32(cond, src, Imm32(0), ins->snapshot()))
return false;
}
switch (constant) {
case -1:
if (mul->canOverflow()) {
if (!bailoutCmp32(Assembler::Equal, src, Imm32(INT32_MIN), ins->snapshot()))
return false;
}
masm.ma_negu(dest, src);
break;
case 0:
masm.move32(Imm32(0), dest);
break;
case 1:
masm.move32(src, dest);
break;
case 2:
if (mul->canOverflow()) {
Label mulTwoOverflow;
masm.ma_addTestOverflow(dest, src, src, &mulTwoOverflow);
if (!bailoutFrom(&mulTwoOverflow, ins->snapshot()))
return false;
} else {
masm.as_addu(dest, src, src);
}
break;
default:
uint32_t shift;
JS_FLOOR_LOG2(shift, constant);
if (!mul->canOverflow() && (constant > 0)) {
// If it cannot overflow, we can do lots of optimizations.
uint32_t rest = constant - (1 << shift);
// See if the constant has one bit set, meaning it can be
// encoded as a bitshift.
if ((1 << shift) == constant) {
masm.ma_sll(dest, src, Imm32(shift));
return true;
}
// If the constant cannot be encoded as (1<<C1), see if it can
// be encoded as (1<<C1) | (1<<C2), which can be computed
// using an add and a shift.
uint32_t shift_rest;
JS_FLOOR_LOG2(shift_rest, rest);
if (src != dest && (1u << shift_rest) == rest) {
masm.ma_sll(dest, src, Imm32(shift - shift_rest));
masm.add32(src, dest);
if (shift_rest != 0)
masm.ma_sll(dest, dest, Imm32(shift_rest));
return true;
}
}
if (mul->canOverflow() && (constant > 0) && (src != dest)) {
// To stay on the safe side, only optimize things that are a
// power of 2.
if ((1 << shift) == constant) {
// dest = lhs * pow(2, shift)
masm.ma_sll(dest, src, Imm32(shift));
// At runtime, check (lhs == dest >> shift), if this does
// not hold, some bits were lost due to overflow, and the
// computation should be resumed as a double.
masm.ma_sra(ScratchRegister, dest, Imm32(shift));
if (!bailoutCmp32(Assembler::NotEqual, src, ScratchRegister, ins->snapshot()))
return false;
return true;
}
}
if (mul->canOverflow()) {
Label mulConstOverflow;
masm.ma_mul_branch_overflow(dest, ToRegister(lhs), Imm32(ToInt32(rhs)),
&mulConstOverflow);
if (!bailoutFrom(&mulConstOverflow, ins->snapshot()))
return false;
} else {
masm.ma_mult(src, Imm32(ToInt32(rhs)));
masm.as_mflo(dest);
}
break;
}
} else {
Label multRegOverflow;
if (mul->canOverflow()) {
masm.ma_mul_branch_overflow(dest, ToRegister(lhs), ToRegister(rhs), &multRegOverflow);
if (!bailoutFrom(&multRegOverflow, ins->snapshot()))
return false;
} else {
masm.as_mult(ToRegister(lhs), ToRegister(rhs));
masm.as_mflo(dest);
}
if (mul->canBeNegativeZero()) {
Label done;
masm.ma_b(dest, dest, &done, Assembler::NonZero, ShortJump);
// Result is -0 if lhs or rhs is negative.
// In that case result must be double value so bailout
Register scratch = SecondScratchReg;
masm.ma_or(scratch, ToRegister(lhs), ToRegister(rhs));
if (!bailoutCmp32(Assembler::Signed, scratch, scratch, ins->snapshot()))
return false;
masm.bind(&done);
}
}
return true;
}
bool
CodeGeneratorMIPS::visitDivI(LDivI *ins)
{
// Extract the registers from this instruction
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register dest = ToRegister(ins->output());
Register temp = ToRegister(ins->getTemp(0));
MDiv *mir = ins->mir();
Label done;
// Handle divide by zero.
if (mir->canBeDivideByZero()) {
if (mir->canTruncateInfinities()) {
// Truncated division by zero is zero (Infinity|0 == 0)
Label notzero;
masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
masm.move32(Imm32(0), dest);
masm.ma_b(&done, ShortJump);
masm.bind(&notzero);
} else {
MOZ_ASSERT(mir->fallible());
if (!bailoutCmp32(Assembler::Zero, rhs, rhs, ins->snapshot()))
return false;
}
}
// Handle an integer overflow exception from -2147483648 / -1.
if (mir->canBeNegativeOverflow()) {
Label notMinInt;
masm.move32(Imm32(INT32_MIN), temp);
masm.ma_b(lhs, temp, &notMinInt, Assembler::NotEqual, ShortJump);
masm.move32(Imm32(-1), temp);
if (mir->canTruncateOverflow()) {
// (-INT32_MIN)|0 == INT32_MIN
Label skip;
masm.ma_b(rhs, temp, &skip, Assembler::NotEqual, ShortJump);
masm.move32(Imm32(INT32_MIN), dest);
masm.ma_b(&done, ShortJump);
masm.bind(&skip);
} else {
MOZ_ASSERT(mir->fallible());
if (!bailoutCmp32(Assembler::Equal, rhs, temp, ins->snapshot()))
return false;
}
masm.bind(&notMinInt);
}
// Handle negative 0. (0/-Y)
if (!mir->canTruncateNegativeZero() && mir->canBeNegativeZero()) {
Label nonzero;
masm.ma_b(lhs, lhs, &nonzero, Assembler::NonZero, ShortJump);
if (!bailoutCmp32(Assembler::LessThan, rhs, Imm32(0), ins->snapshot()))
return false;
masm.bind(&nonzero);
}
// Note: above safety checks could not be verified as Ion seems to be
// smarter and requires double arithmetic in such cases.
// All regular. Lets call div.
if (mir->canTruncateRemainder()) {
masm.as_div(lhs, rhs);
masm.as_mflo(dest);
} else {
MOZ_ASSERT(mir->fallible());
Label remainderNonZero;
masm.ma_div_branch_overflow(dest, lhs, rhs, &remainderNonZero);
if (!bailoutFrom(&remainderNonZero, ins->snapshot()))
return false;
}
masm.bind(&done);
return true;
}
bool
CodeGeneratorMIPS::visitDivPowTwoI(LDivPowTwoI *ins)
{
Register lhs = ToRegister(ins->numerator());
Register dest = ToRegister(ins->output());
Register tmp = ToRegister(ins->getTemp(0));
int32_t shift = ins->shift();
if (shift != 0) {
MDiv *mir = ins->mir();
if (!mir->isTruncated()) {
// If the remainder is going to be != 0, bailout since this must
// be a double.
masm.ma_sll(tmp, lhs, Imm32(32 - shift));
if (!bailoutCmp32(Assembler::NonZero, tmp, tmp, ins->snapshot()))
return false;
}
// Adjust the value so that shifting produces a correctly rounded result
// when the numerator is negative. See 10-1 "Signed Division by a Known
// Power of 2" in Henry S. Warren, Jr.'s Hacker's Delight.
if (shift > 1) {
masm.ma_sra(tmp, lhs, Imm32(31));
masm.ma_srl(tmp, tmp, Imm32(32 - shift));
masm.add32(lhs, tmp);
} else {
masm.ma_srl(tmp, lhs, Imm32(32 - shift));
masm.add32(lhs, tmp);
}
// Do the shift.
masm.ma_sra(dest, tmp, Imm32(shift));
} else {
masm.move32(lhs, dest);
}
return true;
}
bool
CodeGeneratorMIPS::visitModI(LModI *ins)
{
// Extract the registers from this instruction
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register dest = ToRegister(ins->output());
Register callTemp = ToRegister(ins->callTemp());
MMod *mir = ins->mir();
Label done, prevent;
masm.move32(lhs, callTemp);
// Prevent INT_MIN % -1;
// The integer division will give INT_MIN, but we want -(double)INT_MIN.
if (mir->canBeNegativeDividend()) {
masm.ma_b(lhs, Imm32(INT_MIN), &prevent, Assembler::NotEqual, ShortJump);
if (mir->isTruncated()) {
// (INT_MIN % -1)|0 == 0
Label skip;
masm.ma_b(rhs, Imm32(-1), &skip, Assembler::NotEqual, ShortJump);
masm.move32(Imm32(0), dest);
masm.ma_b(&done, ShortJump);
masm.bind(&skip);
} else {
MOZ_ASSERT(mir->fallible());
if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(-1), ins->snapshot()))
return false;
}
masm.bind(&prevent);
}
// 0/X (with X < 0) is bad because both of these values *should* be
// doubles, and the result should be -0.0, which cannot be represented in
// integers. X/0 is bad because it will give garbage (or abort), when it
// should give either \infty, -\infty or NAN.
// Prevent 0 / X (with X < 0) and X / 0
// testing X / Y. Compare Y with 0.
// There are three cases: (Y < 0), (Y == 0) and (Y > 0)
// If (Y < 0), then we compare X with 0, and bail if X == 0
// If (Y == 0), then we simply want to bail.
// if (Y > 0), we don't bail.
if (mir->canBeDivideByZero()) {
if (mir->isTruncated()) {
Label skip;
masm.ma_b(rhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
masm.move32(Imm32(0), dest);
masm.ma_b(&done, ShortJump);
masm.bind(&skip);
} else {
MOZ_ASSERT(mir->fallible());
if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot()))
return false;
}
}
if (mir->canBeNegativeDividend()) {
Label notNegative;
masm.ma_b(rhs, Imm32(0), &notNegative, Assembler::GreaterThan, ShortJump);
if (mir->isTruncated()) {
// NaN|0 == 0 and (0 % -X)|0 == 0
Label skip;
masm.ma_b(lhs, Imm32(0), &skip, Assembler::NotEqual, ShortJump);
masm.move32(Imm32(0), dest);
masm.ma_b(&done, ShortJump);
masm.bind(&skip);
} else {
MOZ_ASSERT(mir->fallible());
if (!bailoutCmp32(Assembler::Equal, lhs, Imm32(0), ins->snapshot()))
return false;
}
masm.bind(&notNegative);
}
masm.as_div(lhs, rhs);
masm.as_mfhi(dest);
// If X%Y == 0 and X < 0, then we *actually* wanted to return -0.0
if (mir->canBeNegativeDividend()) {
if (mir->isTruncated()) {
// -0.0|0 == 0
} else {
MOZ_ASSERT(mir->fallible());
// See if X < 0
masm.ma_b(dest, Imm32(0), &done, Assembler::NotEqual, ShortJump);
if (!bailoutCmp32(Assembler::Signed, callTemp, Imm32(0), ins->snapshot()))
return false;
}
}
masm.bind(&done);
return true;
}
bool
CodeGeneratorMIPS::visitModPowTwoI(LModPowTwoI *ins)
{
Register in = ToRegister(ins->getOperand(0));
Register out = ToRegister(ins->getDef(0));
MMod *mir = ins->mir();
Label negative, done;
masm.move32(in, out);
masm.ma_b(in, in, &done, Assembler::Zero, ShortJump);
// Switch based on sign of the lhs.
// Positive numbers are just a bitmask
masm.ma_b(in, in, &negative, Assembler::Signed, ShortJump);
{
masm.and32(Imm32((1 << ins->shift()) - 1), out);
masm.ma_b(&done, ShortJump);
}
// Negative numbers need a negate, bitmask, negate
{
masm.bind(&negative);
masm.neg32(out);
masm.and32(Imm32((1 << ins->shift()) - 1), out);
masm.neg32(out);
}
if (mir->canBeNegativeDividend()) {
if (!mir->isTruncated()) {
JS_ASSERT(mir->fallible());
if (!bailoutCmp32(Assembler::Equal, out, zero, ins->snapshot()))
return false;
} else {
// -0|0 == 0
}
}
masm.bind(&done);
return true;
}
bool
CodeGeneratorMIPS::visitModMaskI(LModMaskI *ins)
{
Register src = ToRegister(ins->getOperand(0));
Register dest = ToRegister(ins->getDef(0));
Register tmp = ToRegister(ins->getTemp(0));
MMod *mir = ins->mir();
if (!mir->isTruncated() && mir->canBeNegativeDividend()) {
JS_ASSERT(mir->fallible());
Label bail;
masm.ma_mod_mask(src, dest, tmp, ins->shift(), &bail);
if (!bailoutFrom(&bail, ins->snapshot()))
return false;
} else {
masm.ma_mod_mask(src, dest, tmp, ins->shift(), NULL);
}
return true;
}
bool
CodeGeneratorMIPS::visitBitNotI(LBitNotI *ins)
{
const LAllocation *input = ins->getOperand(0);
const LDefinition *dest = ins->getDef(0);
MOZ_ASSERT(!input->isConstant());
masm.ma_not(ToRegister(dest), ToRegister(input));
return true;
}
bool
CodeGeneratorMIPS::visitBitOpI(LBitOpI *ins)
{
const LAllocation *lhs = ins->getOperand(0);
const LAllocation *rhs = ins->getOperand(1);
const LDefinition *dest = ins->getDef(0);
// all of these bitops should be either imm32's, or integer registers.
switch (ins->bitop()) {
case JSOP_BITOR:
if (rhs->isConstant())
masm.ma_or(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
else
masm.ma_or(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
break;
case JSOP_BITXOR:
if (rhs->isConstant())
masm.ma_xor(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
else
masm.ma_xor(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
break;
case JSOP_BITAND:
if (rhs->isConstant())
masm.ma_and(ToRegister(dest), ToRegister(lhs), Imm32(ToInt32(rhs)));
else
masm.ma_and(ToRegister(dest), ToRegister(lhs), ToRegister(rhs));
break;
default:
JS_NOT_REACHED("unexpected binary opcode");
}
return true;
}
bool
CodeGeneratorMIPS::visitShiftI(LShiftI *ins)
{
Register lhs = ToRegister(ins->lhs());
const LAllocation *rhs = ins->rhs();
Register dest = ToRegister(ins->output());
if (rhs->isConstant()) {
int32_t shift = ToInt32(rhs) & 0x1F;
switch (ins->bitop()) {
case JSOP_LSH:
if (shift)
masm.ma_sll(dest, lhs, Imm32(shift));
else
masm.move32(lhs, dest);
break;
case JSOP_RSH:
if (shift)
masm.ma_sra(dest, lhs, Imm32(shift));
else
masm.move32(lhs, dest);
break;
case JSOP_URSH:
if (shift) {
masm.ma_srl(dest, lhs, Imm32(shift));
} else {
// x >>> 0 can overflow.
masm.move32(lhs, dest);
if (ins->mir()->toUrsh()->fallible()) {
if (!bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot()))
return false;
}
}
break;
default:
JS_NOT_REACHED("Unexpected shift op");
}
} else {
// The shift amounts should be AND'ed into the 0-31 range
masm.ma_and(dest, ToRegister(rhs), Imm32(0x1F));
switch (ins->bitop()) {
case JSOP_LSH:
masm.ma_sll(dest, lhs, dest);
break;
case JSOP_RSH:
masm.ma_sra(dest, lhs, dest);
break;
case JSOP_URSH:
masm.ma_srl(dest, lhs, dest);
if (ins->mir()->toUrsh()->fallible()) {
// x >>> 0 can overflow.
if (!bailoutCmp32(Assembler::LessThan, dest, Imm32(0), ins->snapshot()))
return false;
}
break;
default:
JS_NOT_REACHED("Unexpected shift op");
}
}
return true;
}
bool
CodeGeneratorMIPS::visitUrshD(LUrshD *ins)
{
Register lhs = ToRegister(ins->lhs());
Register temp = ToRegister(ins->temp());
const LAllocation *rhs = ins->rhs();
FloatRegister out = ToFloatRegister(ins->output());
if (rhs->isConstant()) {
masm.ma_srl(temp, lhs, Imm32(ToInt32(rhs)));
} else {
masm.ma_srl(temp, lhs, ToRegister(rhs));
}
masm.convertUInt32ToDouble(temp, out);
return true;
}
bool
CodeGeneratorMIPS::visitPowHalfD(LPowHalfD *ins)
{
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
Label done, skip;
// Masm.pow(-Infinity, 0.5) == Infinity.
masm.loadConstantDouble(js_NegativeInfinity, ScratchFloatReg);
masm.ma_bc1d(input, ScratchFloatReg, &skip, Assembler::DoubleNotEqualOrUnordered, ShortJump);
masm.as_negd(output, ScratchFloatReg);
masm.ma_b(&done, ShortJump);
masm.bind(&skip);
// Math.pow(-0, 0.5) == 0 == Math.pow(0, 0.5).
// Adding 0 converts any -0 to 0.
masm.loadConstantDouble(0.0, ScratchFloatReg);
masm.as_addd(output, input, ScratchFloatReg);
masm.as_sqrtd(output, output);
masm.bind(&done);
return true;
}
typedef MoveResolver::MoveOperand MoveOperand;
MoveOperand
CodeGeneratorMIPS::toMoveOperand(const LAllocation *a) const
{
if (a->isGeneralReg())
return MoveOperand(ToRegister(a));
if (a->isFloatReg()) {
return MoveOperand(ToFloatRegister(a));
}
MOZ_ASSERT((ToStackOffset(a) & 3) == 0);
int32_t offset = ToStackOffset(a);
// The way the stack slots work, we assume that everything from
// depth == 0 downwards is writable. However, since our frame is included
// in this, ensure that the frame gets skipped.
if (gen->compilingAsmJS())
offset -= AlignmentMidPrologue;
return MoveOperand(StackPointer, offset);
}
class js::jit::OutOfLineTableSwitch : public OutOfLineCodeBase<CodeGeneratorMIPS>
{
MTableSwitch *mir_;
CodeLabel jumpLabel_;
bool accept(CodeGeneratorMIPS *codegen) {
return codegen->visitOutOfLineTableSwitch(this);
}
public:
OutOfLineTableSwitch(MTableSwitch *mir)
: mir_(mir)
{}
MTableSwitch *mir() const {
return mir_;
}
CodeLabel *jumpLabel() {
return &jumpLabel_;
}
};
bool
CodeGeneratorMIPS::visitOutOfLineTableSwitch(OutOfLineTableSwitch *ool)
{
MTableSwitch *mir = ool->mir();
masm.align(sizeof(void*));
masm.bind(ool->jumpLabel()->src());
if (!masm.addCodeLabel(*ool->jumpLabel()))
return false;
for (size_t i = 0; i < mir->numCases(); i++) {
LBlock *caseblock = mir->getCase(i)->lir();
Label *caseheader = caseblock->label();
uint32_t caseoffset = caseheader->offset();
// The entries of the jump table need to be absolute addresses and thus
// must be patched after codegen is finished.
CodeLabel cl;
masm.ma_li(ScratchRegister, cl.dest());
masm.branch(ScratchRegister);
cl.src()->bind(caseoffset);
if (!masm.addCodeLabel(cl))
return false;
}
return true;
}
bool
CodeGeneratorMIPS::emitTableSwitchDispatch(MTableSwitch *mir, const Register &index,
const Register &address)
{
Label *defaultcase = mir->getDefault()->lir()->label();
// Lower value with low value
if (mir->low() != 0)
masm.subPtr(Imm32(mir->low()), index);
// Jump to default case if input is out of range
int32_t cases = mir->numCases();
masm.branchPtr(Assembler::AboveOrEqual, index, ImmWord(cases), defaultcase);
// To fill in the CodeLabels for the case entries, we need to first
// generate the case entries (we don't yet know their offsets in the
// instruction stream).
OutOfLineTableSwitch *ool = new OutOfLineTableSwitch(mir);
if (!addOutOfLineCode(ool))
return false;
// Compute the position where a pointer to the right case stands.
masm.ma_li(address, ool->jumpLabel()->dest());
masm.lshiftPtr(Imm32(4), index);
masm.addPtr(index, address);
masm.branch(address);
return true;
}
bool
CodeGeneratorMIPS::visitMathD(LMathD *math)
{
const LAllocation *src1 = math->getOperand(0);
const LAllocation *src2 = math->getOperand(1);
const LDefinition *output = math->getDef(0);
switch (math->jsop()) {
case JSOP_ADD:
masm.as_addd(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
break;
case JSOP_SUB:
masm.as_subd(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
break;
case JSOP_MUL:
masm.as_muld(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
break;
case JSOP_DIV:
masm.as_divd(ToFloatRegister(output), ToFloatRegister(src1), ToFloatRegister(src2));
break;
default:
JS_NOT_REACHED("unexpected opcode");
}
return true;
}
bool
CodeGeneratorMIPS::visitFloor(LFloor *lir)
{
FloatRegister input = ToFloatRegister(lir->input());
FloatRegister scratch = ScratchFloatReg;
Register output = ToRegister(lir->output());
Label skipCheck, done;
// If Nan, 0 or -0 check for bailout
masm.loadConstantDouble(0.0, scratch);
masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
// If high part is not zero, it is NaN or -0, so we bail.
masm.moveFromDoubleHi(input, SecondScratchReg);
if (!bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot()))
return false;
// Input was zero, so return zero.
masm.move32(Imm32(0), output);
masm.ma_b(&done, ShortJump);
masm.bind(&skipCheck);
masm.as_floorwd(scratch, input);
masm.moveFromDoubleLo(scratch, output);
if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot()))
return false;
if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot()))
return false;
masm.bind(&done);
return true;
}
bool
CodeGeneratorMIPS::visitRound(LRound *lir)
{
FloatRegister input = ToFloatRegister(lir->input());
FloatRegister temp = ToFloatRegister(lir->temp());
FloatRegister scratch = ScratchFloatReg;
Register output = ToRegister(lir->output());
Label bail, negative, end, skipCheck;
// Load 0.5 in the temp register.
masm.loadConstantDouble(0.5, temp);
// Branch to a slow path for negative inputs. Doesn't catch NaN or -0.
masm.loadConstantDouble(0.0, scratch);
masm.ma_bc1d(input, scratch, &negative, Assembler::DoubleLessThan, ShortJump);
// If Nan, 0 or -0 check for bailout
masm.ma_bc1d(input, scratch, &skipCheck, Assembler::DoubleNotEqual, ShortJump);
// If high part is not zero, it is NaN or -0, so we bail.
masm.moveFromDoubleHi(input, SecondScratchReg);
if (!bailoutCmp32(Assembler::NotEqual, SecondScratchReg, Imm32(0), lir->snapshot()))
return false;
// Input was zero, so return zero.
masm.move32(Imm32(0), output);
masm.ma_b(&end, ShortJump);
masm.bind(&skipCheck);
masm.loadConstantDouble(0.5, scratch);
masm.addDouble(input, scratch);
masm.as_floorwd(scratch, scratch);
masm.moveFromDoubleLo(scratch, output);
if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot()))
return false;
if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MAX), lir->snapshot()))
return false;
masm.jump(&end);
// Input is negative, but isn't -0.
masm.bind(&negative);
masm.addDouble(input, temp);
// If input + 0.5 >= 0, input is a negative number >= -0.5 and the
// result is -0.
masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, temp, scratch, &bail);
if (!bailoutFrom(&bail, lir->snapshot()))
return false;
// Truncate and round toward zero.
// This is off-by-one for everything but integer-valued inputs.
masm.as_floorwd(scratch, temp);
masm.moveFromDoubleLo(scratch, output);
if (!bailoutCmp32(Assembler::Equal, output, Imm32(INT_MIN), lir->snapshot()))
return false;
masm.bind(&end);
return true;
}
bool
CodeGeneratorMIPS::visitTruncateDToInt32(LTruncateDToInt32 *ins)
{
return emitTruncateDouble(ToFloatRegister(ins->input()), ToRegister(ins->output()));
}
static const uint32_t FrameSizes[] = { 128, 256, 512, 1024 };
FrameSizeClass
FrameSizeClass::FromDepth(uint32_t frameDepth)
{
for (uint32_t i = 0; i < JS_ARRAY_LENGTH(FrameSizes); i++) {
if (frameDepth < FrameSizes[i])
return FrameSizeClass(i);
}
return FrameSizeClass::None();
}
FrameSizeClass
FrameSizeClass::ClassLimit()
{
return FrameSizeClass(JS_ARRAY_LENGTH(FrameSizes));
}
uint32_t
FrameSizeClass::frameSize() const
{
MOZ_ASSERT(class_ != NO_FRAME_SIZE_CLASS_ID);
MOZ_ASSERT(class_ < JS_ARRAY_LENGTH(FrameSizes));
return FrameSizes[class_];
}
ValueOperand
CodeGeneratorMIPS::ToValue(LInstruction *ins, size_t pos)
{
Register typeReg = ToRegister(ins->getOperand(pos + TYPE_INDEX));
Register payloadReg = ToRegister(ins->getOperand(pos + PAYLOAD_INDEX));
return ValueOperand(typeReg, payloadReg);
}
ValueOperand
CodeGeneratorMIPS::ToOutValue(LInstruction *ins)
{
Register typeReg = ToRegister(ins->getDef(TYPE_INDEX));
Register payloadReg = ToRegister(ins->getDef(PAYLOAD_INDEX));
return ValueOperand(typeReg, payloadReg);
}
ValueOperand
CodeGeneratorMIPS::ToTempValue(LInstruction *ins, size_t pos)
{
Register typeReg = ToRegister(ins->getTemp(pos + TYPE_INDEX));
Register payloadReg = ToRegister(ins->getTemp(pos + PAYLOAD_INDEX));
return ValueOperand(typeReg, payloadReg);
}
bool
CodeGeneratorMIPS::visitValue(LValue *value)
{
const ValueOperand out = ToOutValue(value);
masm.moveValue(value->value(), out);
return true;
}
bool
CodeGeneratorMIPS::visitBox(LBox *box)
{
const LDefinition *type = box->getDef(TYPE_INDEX);
MOZ_ASSERT(!box->getOperand(0)->isConstant());
// For NUNBOX32, the input operand and the output payload have the same
// virtual register. All that needs to be written is the type tag for
// the type definition.
masm.move32(Imm32(MIRTypeToTag(box->type())), ToRegister(type));
return true;
}
bool
CodeGeneratorMIPS::visitUnbox(LUnbox *unbox)
{
// Note that for unbox, the type and payload indexes are switched on the
// inputs.
MUnbox *mir = unbox->mir();
Register type = ToRegister(unbox->type());
if (mir->fallible()) {
if (!bailoutCmp32(Assembler::NotEqual, type, Imm32(MIRTypeToTag(mir->type())),
unbox->snapshot()))
return false;
}
return true;
}
bool
CodeGeneratorMIPS::visitDouble(LDouble *ins)
{
const LDefinition *out = ins->getDef(0);
masm.loadConstantDouble(ins->getDouble(), ToFloatRegister(out));
return true;
}
Register
CodeGeneratorMIPS::splitTagForTest(const ValueOperand &value)
{
return value.typeReg();
}
bool
CodeGeneratorMIPS::visitTestDAndBranch(LTestDAndBranch *test)
{
FloatRegister input = ToFloatRegister(test->input());
MBasicBlock *ifTrue = test->ifTrue();
MBasicBlock *ifFalse = test->ifFalse();
masm.loadConstantDouble(0.0, ScratchFloatReg);
// If 0, or NaN, the result is false.
if (isNextBlock(ifFalse->lir())) {
branchToBlock(Assembler::DoubleFloat, input, ScratchFloatReg, ifTrue,
Assembler::DoubleNotEqual);
} else {
branchToBlock(Assembler::DoubleFloat, input, ScratchFloatReg, ifFalse,
Assembler::DoubleEqualOrUnordered);
jumpToBlock(ifTrue);
}
return true;
}
bool
CodeGeneratorMIPS::visitCompareD(LCompareD *comp)
{
FloatRegister lhs = ToFloatRegister(comp->left());
FloatRegister rhs = ToFloatRegister(comp->right());
Register dest = ToRegister(comp->output());
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->mir()->jsop());
masm.ma_cmp_set_double(dest, lhs, rhs, cond);
return true;
}
bool
CodeGeneratorMIPS::visitCompareDAndBranch(LCompareDAndBranch *comp)
{
FloatRegister lhs = ToFloatRegister(comp->left());
FloatRegister rhs = ToFloatRegister(comp->right());
Assembler::DoubleCondition cond = JSOpToDoubleCondition(comp->cmpMir()->jsop());
MBasicBlock *ifTrue = comp->ifTrue();
MBasicBlock *ifFalse = comp->ifFalse();
if (isNextBlock(ifFalse->lir())) {
branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifTrue, cond);
} else {
branchToBlock(Assembler::DoubleFloat, lhs, rhs, ifFalse,
Assembler::InvertCondition(cond));
jumpToBlock(ifTrue);
}
return true;
}
bool
CodeGeneratorMIPS::visitCompareB(LCompareB *lir)
{
MCompare *mir = lir->mir();
const ValueOperand lhs = ToValue(lir, LCompareB::Lhs);
const LAllocation *rhs = lir->rhs();
const Register output = ToRegister(lir->output());
MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
Label notBoolean, done;
masm.branchTestBoolean(Assembler::NotEqual, lhs, &notBoolean);
{
if (rhs->isConstant())
masm.cmp32Set(cond, lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), output);
else
masm.cmp32Set(cond, lhs.payloadReg(), ToRegister(rhs), output);
masm.jump(&done);
}
masm.bind(&notBoolean);
{
masm.move32(Imm32(mir->jsop() == JSOP_STRICTNE), output);
}
masm.bind(&done);
return true;
}
bool
CodeGeneratorMIPS::visitCompareBAndBranch(LCompareBAndBranch *lir)
{
MCompare *mir = lir->mir();
const ValueOperand lhs = ToValue(lir, LCompareBAndBranch::Lhs);
const LAllocation *rhs = lir->rhs();
MOZ_ASSERT(mir->jsop() == JSOP_STRICTEQ || mir->jsop() == JSOP_STRICTNE);
MBasicBlock *mirNotBoolean = (mir->jsop() == JSOP_STRICTEQ) ? lir->ifFalse() : lir->ifTrue();
branchToBlock(lhs.typeReg(), ImmType(JSVAL_TYPE_BOOLEAN), mirNotBoolean, Assembler::NotEqual);
Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
if (rhs->isConstant())
emitBranch(lhs.payloadReg(), Imm32(rhs->toConstant()->toBoolean()), cond, lir->ifTrue(),
lir->ifFalse());
else
emitBranch(lhs.payloadReg(), ToRegister(rhs), cond, lir->ifTrue(), lir->ifFalse());
return true;
}
bool
CodeGeneratorMIPS::visitCompareV(LCompareV *lir)
{
MCompare *mir = lir->mir();
Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
const ValueOperand lhs = ToValue(lir, LCompareV::LhsInput);
const ValueOperand rhs = ToValue(lir, LCompareV::RhsInput);
const Register output = ToRegister(lir->output());
MOZ_ASSERT(IsEqualityOp(mir->jsop()));
Label notEqual, done;
masm.ma_b(lhs.typeReg(), rhs.typeReg(), &notEqual, Assembler::NotEqual, ShortJump);
{
masm.cmp32Set(cond, lhs.payloadReg(), rhs.payloadReg(), output);
masm.ma_b(&done, ShortJump);
}
masm.bind(&notEqual);
{
masm.move32(Imm32(cond == Assembler::NotEqual), output);
}
masm.bind(&done);
return true;
}
bool
CodeGeneratorMIPS::visitCompareVAndBranch(LCompareVAndBranch *lir)
{
MCompare *mir = lir->mir();
Assembler::Condition cond = JSOpToCondition(mir->compareType(), mir->jsop());
const ValueOperand lhs = ToValue(lir, LCompareVAndBranch::LhsInput);
const ValueOperand rhs = ToValue(lir, LCompareVAndBranch::RhsInput);
MOZ_ASSERT(mir->jsop() == JSOP_EQ || mir->jsop() == JSOP_STRICTEQ ||
mir->jsop() == JSOP_NE || mir->jsop() == JSOP_STRICTNE);
MBasicBlock *notEqual = (cond == Assembler::Equal) ? lir->ifFalse() : lir->ifTrue();
branchToBlock(lhs.typeReg(), rhs.typeReg(), notEqual, Assembler::NotEqual);
emitBranch(lhs.payloadReg(), rhs.payloadReg(), cond, lir->ifTrue(), lir->ifFalse());
return true;
}
bool
CodeGeneratorMIPS::visitUInt32ToDouble(LUInt32ToDouble *lir)
{
masm.convertUInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
return true;
}
bool
CodeGeneratorMIPS::visitNotI(LNotI *ins)
{
masm.cmp32Set(Assembler::Equal, ToRegister(ins->input()), Imm32(0),
ToRegister(ins->output()));
return true;
}
bool
CodeGeneratorMIPS::visitNotD(LNotD *ins)
{
// Since this operation is not, we want to set a bit if
// the double is falsey, which means 0.0, -0.0 or NaN.
FloatRegister in = ToFloatRegister(ins->input());
Register dest = ToRegister(ins->output());
Label falsey, done;
masm.loadConstantDouble(0.0, ScratchFloatReg);
masm.ma_bc1d(in, ScratchFloatReg, &falsey, Assembler::DoubleEqualOrUnordered, ShortJump);
masm.move32(Imm32(0), dest);
masm.ma_b(&done, ShortJump);
masm.bind(&falsey);
masm.move32(Imm32(1), dest);
masm.bind(&done);
return true;
}
bool
CodeGeneratorMIPS::visitLoadSlotV(LLoadSlotV *load)
{
const ValueOperand out = ToOutValue(load);
Register base = ToRegister(load->input());
int32_t offset = load->mir()->slot() * sizeof(js::Value);
masm.loadValue(Address(base, offset), out);
return true;
}
bool
CodeGeneratorMIPS::visitLoadSlotT(LLoadSlotT *load)
{
Register base = ToRegister(load->input());
int32_t offset = load->mir()->slot() * sizeof(js::Value);
if (load->mir()->type() == MIRType_Double)
masm.loadInt32OrDouble(Address(base, offset), ToFloatRegister(load->output()));
else
masm.load32(Address(base, offset + NUNBOX32_PAYLOAD_OFFSET), ToRegister(load->output()));
return true;
}
bool
CodeGeneratorMIPS::visitStoreSlotT(LStoreSlotT *store)
{
Register base = ToRegister(store->slots());
int32_t offset = store->mir()->slot() * sizeof(js::Value);
const LAllocation *value = store->value();
MIRType valueType = store->mir()->value()->type();
if (store->mir()->needsBarrier())
emitPreBarrier(Address(base, offset), store->mir()->slotType());
if (valueType == MIRType_Double) {
masm.storeDouble(ToFloatRegister(value), Address(base, offset));
return true;
}
// Store the type tag if needed.
if (valueType != store->mir()->slotType())
masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), Address(base, offset));
// Store the payload.
if (value->isConstant())
masm.storePayload(*value->toConstant(), Address(base, offset));
else
masm.storePayload(ToRegister(value), Address(base, offset));
return true;
}
bool
CodeGeneratorMIPS::visitLoadElementT(LLoadElementT *load)
{
Register base = ToRegister(load->elements());
if (load->mir()->type() == MIRType_Double) {
FloatRegister fpreg = ToFloatRegister(load->output());
if (load->index()->isConstant()) {
Address source(base, ToInt32(load->index()) * sizeof(Value));
if (load->mir()->loadDoubles())
masm.loadDouble(source, fpreg);
else
masm.loadInt32OrDouble(source, fpreg);
} else {
Register index = ToRegister(load->index());
if (load->mir()->loadDoubles())
masm.loadDouble(BaseIndex(base, index, TimesEight), fpreg);
else
masm.loadInt32OrDouble(base, index, fpreg);
}
} else {
if (load->index()->isConstant()) {
Address source(base, ToInt32(load->index()) * sizeof(Value));
masm.load32(source, ToRegister(load->output()));
} else {
BaseIndex source(base, ToRegister(load->index()), TimesEight);
masm.load32(source, ToRegister(load->output()));
}
}
MOZ_ASSERT(!load->mir()->needsHoleCheck());
return true;
}
void
CodeGeneratorMIPS::storeElementTyped(const LAllocation *value, MIRType valueType,
MIRType elementType, const Register &elements,
const LAllocation *index)
{
if (index->isConstant()) {
Address dest = Address(elements, ToInt32(index) * sizeof(Value));
if (valueType == MIRType_Double) {
masm.storeDouble(ToFloatRegister(value), Address(dest.base, dest.offset));
return;
}
// Store the type tag if needed.
if (valueType != elementType)
masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), dest);
// Store the payload.
if (value->isConstant())
masm.storePayload(*value->toConstant(), dest);
else
masm.storePayload(ToRegister(value), dest);
} else {
Register indexReg = ToRegister(index);
if (valueType == MIRType_Double) {
masm.storeDouble(ToFloatRegister(value), BaseIndex(elements, indexReg, TimesEight));
return;
}
// Store the type tag if needed.
if (valueType != elementType)
masm.storeTypeTag(ImmType(ValueTypeFromMIRType(valueType)), elements, indexReg);
// Store the payload.
if (value->isConstant())
masm.storePayload(*value->toConstant(), elements, indexReg);
else
masm.storePayload(ToRegister(value), elements, indexReg);
}
}
bool
CodeGeneratorMIPS::visitGuardShape(LGuardShape *guard)
{
Register obj = ToRegister(guard->input());
Register tmp = ToRegister(guard->tempInt());
masm.loadPtr(Address(obj, JSObject::offsetOfShape()), tmp);
return bailoutCmpPtr(Assembler::NotEqual, tmp, ImmGCPtr(guard->mir()->shape()),
guard->snapshot());
}
bool
CodeGeneratorMIPS::visitGuardObjectType(LGuardObjectType *guard)
{
Register obj = ToRegister(guard->input());
Register tmp = ToRegister(guard->tempInt());
masm.loadPtr(Address(obj, JSObject::offsetOfType()), tmp);
Assembler::Condition cond = guard->mir()->bailOnEquality()
? Assembler::Equal
: Assembler::NotEqual;
return bailoutCmpPtr(cond, tmp, ImmGCPtr(guard->mir()->typeObject()), guard->snapshot());
}
bool
CodeGeneratorMIPS::visitGuardClass(LGuardClass *guard)
{
Register obj = ToRegister(guard->input());
Register tmp = ToRegister(guard->tempInt());
masm.loadObjClass(obj, tmp);
if (!bailoutCmpPtr(Assembler::NotEqual, tmp, Imm32((uint32_t)guard->mir()->getClass()),
guard->snapshot()))
return false;
return true;
}
bool
CodeGeneratorMIPS::visitImplicitThis(LImplicitThis *lir)
{
Register callee = ToRegister(lir->callee());
const ValueOperand out = ToOutValue(lir);
// The implicit |this| is always |undefined| if the function's environment
// is the current global.
masm.loadPtr(Address(callee, JSFunction::offsetOfEnvironment()), out.typeReg());
GlobalObject *global = &gen->info().script()->global();
// TODO: OOL stub path.
if (!bailoutCmpPtr(Assembler::NotEqual, out.typeReg(), ImmGCPtr(global), lir->snapshot()))
return false;
masm.moveValue(UndefinedValue(), out);
return true;
}
typedef bool (*InterruptCheckFn)(JSContext *);
static const VMFunction InterruptCheckInfo = FunctionInfo<InterruptCheckFn>(InterruptCheck);
bool
CodeGeneratorMIPS::generateInvalidateEpilogue()
{
// Ensure that there is enough space in the buffer for the OsiPoint
// patching to occur. Otherwise, we could overwrite the invalidation
// epilogue.
for (size_t i = 0; i < sizeof(void *); i += Assembler::nopSize())
masm.nop();
masm.bind(&invalidate_);
// Push the return address of the point that we bailed out at to the stack
masm.Push(ra);
// Push the Ion script onto the stack (when we determine what that
// pointer is).
invalidateEpilogueData_ = masm.pushWithPatch(ImmWord(uintptr_t(-1)));
IonCode* thunk = GetIonContext()->compartment->ionCompartment()->getInvalidationThunk();
masm.branch(thunk);
// We should never reach this point in JIT code -- the invalidation thunk
// should pop the invalidated JS frame and return directly to its caller.
// masm.assumeUnreachable("Should have returned directly to its caller instead of here.");
masm.breakpoint();
return true;
}
void
ParallelGetPropertyIC::initializeAddCacheState(LInstruction *ins, AddCacheState *addState)
{
// Can always use the scratch register on ARM.
JS_ASSERT(ins->isGetPropertyCacheV() || ins->isGetPropertyCacheT());
addState->dispatchScratch = ScratchRegister;
}
bool
CodeGeneratorMIPS::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
{
const MAsmJSStoreHeap *mir = ins->mir();
const LAllocation *value = ins->value();
const LAllocation *ptr = ins->ptr();
bool isSigned;
int size;
bool isFloat = false;
switch (mir->viewType()) {
case ArrayBufferView::TYPE_INT8: isSigned = true; size = 8; break;
case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break;
case ArrayBufferView::TYPE_INT16: isSigned = true; size = 16; break;
case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break;
case ArrayBufferView::TYPE_INT32: isSigned = true; size = 32; break;
case ArrayBufferView::TYPE_UINT32: isSigned = false; size = 32; break;
case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break;
case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break;
default: JS_NOT_REACHED("unexpected array type");
}
if (ptr->isConstant()) {
int32_t ptrImm = ptr->toConstant()->toInt32();
MOZ_ASSERT(ptrImm >= 0);
if (isFloat) {
if (size == 32) {
JS_NOT_REACHED("No 32-bit floats in SpiderMonkey 24.");
} else {
masm.storeDouble(ToFloatRegister(value), Address(HeapReg, ptrImm));
}
} else {
masm.ma_store(ToRegister(value), Address(HeapReg, ptrImm),
static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
}
return true;
}
Register ptrReg = ToRegister(ptr);
Address dstAddr(ptrReg, 0);
if (isFloat) {
if (size == 32) {
JS_NOT_REACHED("No 32-bit floats in SpiderMonkey 24.");
} else
masm.storeDouble(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
} else {
masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
}
return true;
BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
Label rejoin;
masm.ma_b(ptrReg, ScratchRegister, &rejoin, Assembler::AboveOrEqual, ShortJump);
// Offset is ok, let's store value.
if (isFloat) {
if (size == 32) {
JS_NOT_REACHED("No 32-bit floats in SM42.");
} else
masm.storeDouble(ToFloatRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne));
} else {
masm.ma_store(ToRegister(value), BaseIndex(HeapReg, ptrReg, TimesOne),
static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
}
masm.bind(&rejoin);
return gen->noteHeapAccess(AsmJSHeapAccess(bo.getOffset()));
}
bool
CodeGeneratorMIPS::visitAsmJSPassStackArg(LAsmJSPassStackArg *ins)
{
const MAsmJSPassStackArg *mir = ins->mir();
if (ins->arg()->isConstant()) {
masm.storePtr(ImmWord(ToInt32(ins->arg())), Address(StackPointer, mir->spOffset()));
} else {
if (ins->arg()->isGeneralReg()) {
masm.storePtr(ToRegister(ins->arg()), Address(StackPointer, mir->spOffset()));
} else {
masm.storeDouble(ToFloatRegister(ins->arg()), Address(StackPointer, mir->spOffset()));
}
}
return true;
}
bool
CodeGeneratorMIPS::visitUDiv(LUDiv *ins)
{
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register output = ToRegister(ins->output());
Label done;
if (ins->mir()->canBeDivideByZero()) {
if (ins->mir()->isTruncated()) {
Label notzero;
masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
masm.move32(Imm32(0), output);
masm.ma_b(&done, ShortJump);
masm.bind(&notzero);
} else {
MOZ_ASSERT(ins->mir()->fallible());
if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot()))
return false;
}
}
masm.as_divu(lhs, rhs);
masm.as_mflo(output);
if (!ins->mir()->isTruncated()) {
if (!bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot()))
return false;
}
masm.bind(&done);
return true;
}
bool
CodeGeneratorMIPS::visitUMod(LUMod *ins)
{
Register lhs = ToRegister(ins->lhs());
Register rhs = ToRegister(ins->rhs());
Register output = ToRegister(ins->output());
Label done;
if (ins->mir()->canBeDivideByZero()) {
if (ins->mir()->isTruncated()) {
// Infinity|0 == 0
Label notzero;
masm.ma_b(rhs, rhs, &notzero, Assembler::NonZero, ShortJump);
masm.move32(Imm32(0), output);
masm.ma_b(&done, ShortJump);
masm.bind(&notzero);
} else {
MOZ_ASSERT(ins->mir()->fallible());
if (!bailoutCmp32(Assembler::Equal, rhs, Imm32(0), ins->snapshot()))
return false;
}
}
masm.as_divu(lhs, rhs);
masm.as_mfhi(output);
if (!ins->mir()->isTruncated()) {
if (!bailoutCmp32(Assembler::LessThan, output, Imm32(0), ins->snapshot()))
return false;
}
masm.bind(&done);
return true;
}
bool
CodeGeneratorMIPS::visitEffectiveAddress(LEffectiveAddress *ins)
{
const MEffectiveAddress *mir = ins->mir();
Register base = ToRegister(ins->base());
Register index = ToRegister(ins->index());
Register output = ToRegister(ins->output());
BaseIndex address(base, index, mir->scale(), mir->displacement());
masm.computeEffectiveAddress(address, output);
return true;
}
bool
CodeGeneratorMIPS::visitAsmJSLoadGlobalVar(LAsmJSLoadGlobalVar *ins)
{
const MAsmJSLoadGlobalVar *mir = ins->mir();
unsigned addr = mir->globalDataOffset();
if (mir->type() == MIRType_Int32)
masm.load32(Address(GlobalReg, addr), ToRegister(ins->output()));
else
masm.loadDouble(Address(GlobalReg, addr), ToFloatRegister(ins->output()));
return true;
}
bool
CodeGeneratorMIPS::visitAsmJSStoreGlobalVar(LAsmJSStoreGlobalVar *ins)
{
const MAsmJSStoreGlobalVar *mir = ins->mir();
MIRType type = mir->value()->type();
MOZ_ASSERT(IsNumberType(type));
unsigned addr = mir->globalDataOffset();
if (mir->value()->type() == MIRType_Int32)
masm.store32(ToRegister(ins->value()), Address(GlobalReg, addr));
else
masm.storeDouble(ToFloatRegister(ins->value()), Address(GlobalReg, addr));
return true;
}
bool
CodeGeneratorMIPS::visitAsmJSLoadFuncPtr(LAsmJSLoadFuncPtr *ins)
{
const MAsmJSLoadFuncPtr *mir = ins->mir();
Register index = ToRegister(ins->index());
Register tmp = ToRegister(ins->temp());
Register out = ToRegister(ins->output());
unsigned addr = mir->globalDataOffset();
BaseIndex source(GlobalReg, index, TimesFour, addr);
masm.load32(source, out);
return true;
}
bool
CodeGeneratorMIPS::visitAsmJSLoadFFIFunc(LAsmJSLoadFFIFunc *ins)
{
const MAsmJSLoadFFIFunc *mir = ins->mir();
masm.loadPtr(Address(GlobalReg, mir->globalDataOffset()), ToRegister(ins->output()));
return true;
}
bool
CodeGeneratorMIPS::visitNegI(LNegI *ins)
{
Register input = ToRegister(ins->input());
Register output = ToRegister(ins->output());
masm.ma_negu(output, input);
return true;
}
bool
CodeGeneratorMIPS::visitNegD(LNegD *ins)
{
FloatRegister input = ToFloatRegister(ins->input());
FloatRegister output = ToFloatRegister(ins->output());
masm.as_negd(output, input);
return true;
}
bool
CodeGeneratorMIPS::visitOsrValue(LOsrValue *value)
{
const LAllocation *frame = value->getOperand(0);
const ValueOperand out = ToOutValue(value);
const ptrdiff_t frameOffset = value->mir()->frameOffset();
masm.loadValue(Address(ToRegister(frame), frameOffset), out);
return true;
}
bool
CodeGeneratorMIPS::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic *ins)
{
JS_NOT_REACHED("NYI");
}
bool
CodeGeneratorMIPS::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic *ins)
{
JS_NOT_REACHED("NYI");
}
bool
CodeGeneratorMIPS::visitInterruptCheck(LInterruptCheck *lir)
{
OutOfLineCode *ool = oolCallVM(InterruptCheckInfo, lir, (ArgList()), StoreNothing());
if (!ool)
return false;
masm.branch32(Assembler::NotEqual,
AbsoluteAddress((void*)&gen->compartment->rt->interrupt), Imm32(0),
ool->entry());
masm.bind(ool->rejoin());
return true;
}
bool
CodeGeneratorMIPS::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
{
const MAsmJSLoadHeap *mir = ins->mir();
const LAllocation *ptr = ins->ptr();
const LDefinition *out = ins->output();
bool isSigned;
int size;
bool isFloat = false;
switch (mir->viewType()) {
case ArrayBufferView::TYPE_INT8: isSigned = true; size = 8; break;
case ArrayBufferView::TYPE_UINT8: isSigned = false; size = 8; break;
case ArrayBufferView::TYPE_INT16: isSigned = true; size = 16; break;
case ArrayBufferView::TYPE_UINT16: isSigned = false; size = 16; break;
case ArrayBufferView::TYPE_INT32: isSigned = true; size = 32; break;
case ArrayBufferView::TYPE_UINT32: isSigned = false; size = 32; break;
case ArrayBufferView::TYPE_FLOAT64: isFloat = true; size = 64; break;
case ArrayBufferView::TYPE_FLOAT32: isFloat = true; size = 32; break;
default: JS_NOT_REACHED("unexpected array type");
}
if (ptr->isConstant()) {
int32_t ptrImm = ptr->toConstant()->toInt32();
MOZ_ASSERT(ptrImm >= 0);
if (isFloat) {
masm.loadDouble(Address(HeapReg, ptrImm), ToFloatRegister(out));
} else {
masm.ma_load(ToRegister(out), Address(HeapReg, ptrImm),
static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
}
return true;
}
Register ptrReg = ToRegister(ptr);
BufferOffset bo = masm.ma_BoundsCheck(ScratchRegister);
Label outOfRange;
Label done;
masm.ma_b(ptrReg, ScratchRegister, &outOfRange, Assembler::AboveOrEqual, ShortJump);
// Offset is ok, let's load value.
if (isFloat) {
if (size == 32)
JS_NOT_REACHED("No 32-bit floats in SpiderMonkey 24.");
else
masm.loadDouble(BaseIndex(HeapReg, ptrReg, TimesOne), ToFloatRegister(out));
} else {
masm.ma_load(ToRegister(out), BaseIndex(HeapReg, ptrReg, TimesOne),
static_cast<LoadStoreSize>(size), isSigned ? SignExtend : ZeroExtend);
}
masm.ma_b(&done, ShortJump);
masm.bind(&outOfRange);
// Offset is out of range. Load default values.
if (isFloat) {
masm.moveDouble(NANReg, ToFloatRegister(out));
} else {
masm.move32(Imm32(0), ToRegister(out));
}
masm.bind(&done);
return gen->noteHeapAccess(AsmJSHeapAccess(bo.getOffset()));
}
bool
CodeGeneratorMIPS::visitBoxDouble(LBoxDouble* box)
{
const LDefinition *payload = box->getDef(PAYLOAD_INDEX);
const LDefinition *type = box->getDef(TYPE_INDEX);
const LAllocation *in = box->getOperand(0);
FloatRegister reg = ToFloatRegister(in);
masm.ma_mv(reg, ValueOperand(ToRegister(type), ToRegister(payload)));
return true;
}
bool
CodeGeneratorMIPS::visitMoveGroup(LMoveGroup *group)
{
if (!group->numMoves())
return true;
MoveResolver &resolver = masm.moveResolver();
for (size_t i = 0; i < group->numMoves(); i++) {
const LMove &move = group->getMove(i);
const LAllocation *from = move.from();
const LAllocation *to = move.to();
// No bogus moves.
JS_ASSERT(*from != *to);
JS_ASSERT(!from->isConstant());
JS_ASSERT(from->isDouble() == to->isDouble());
MoveResolver::Move::Kind kind = from->isDouble() ? MoveResolver::Move::DOUBLE : MoveResolver::Move::GENERAL;
if (!resolver.addMove(toMoveOperand(from), toMoveOperand(to), kind))
return false;
}
if (!resolver.resolve())
return false;
MoveEmitter emitter(masm);
emitter.emit(resolver);
emitter.finish();
return true;
}