blob: d921f1b3fa6eab8164237619185d0056e09e4249 [file] [log] [blame]
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/CodeGenerator.h"
#include "mozilla/Assertions.h"
#include "mozilla/Attributes.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/MathAlgorithms.h"
#include "mozilla/SizePrintfMacros.h"
#include "jslibmath.h"
#include "jsmath.h"
#include "jsnum.h"
#include "jsprf.h"
#include "builtin/Eval.h"
#include "builtin/TypedObject.h"
#include "gc/Nursery.h"
#include "irregexp/NativeRegExpMacroAssembler.h"
#include "jit/AtomicOperations.h"
#include "jit/BaselineCompiler.h"
#include "jit/IonBuilder.h"
#include "jit/IonCaches.h"
#include "jit/IonOptimizationLevels.h"
#include "jit/JitcodeMap.h"
#include "jit/JitSpewer.h"
#include "jit/Linker.h"
#include "jit/Lowering.h"
#include "jit/MIRGenerator.h"
#include "jit/MoveEmitter.h"
#include "jit/RangeAnalysis.h"
#include "jit/SharedICHelpers.h"
#include "vm/MatchPairs.h"
#include "vm/RegExpStatics.h"
#include "vm/TraceLogging.h"
#include "jsboolinlines.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/shared/CodeGenerator-shared-inl.h"
#include "jit/shared/Lowering-shared-inl.h"
#include "vm/Interpreter-inl.h"
using namespace js;
using namespace js::jit;
using mozilla::DebugOnly;
using mozilla::FloatingPoint;
using mozilla::Maybe;
using mozilla::NegativeInfinity;
using mozilla::PositiveInfinity;
using mozilla::UniquePtr;
using JS::GenericNaN;
namespace js {
namespace jit {
// This out-of-line cache is used to do a double dispatch including it-self and
// the wrapped IonCache.
class OutOfLineUpdateCache :
public OutOfLineCodeBase<CodeGenerator>,
public IonCacheVisitor
{
private:
LInstruction* lir_;
size_t cacheIndex_;
RepatchLabel entry_;
public:
OutOfLineUpdateCache(LInstruction* lir, size_t cacheIndex)
: lir_(lir),
cacheIndex_(cacheIndex)
{ }
void bind(MacroAssembler* masm) {
// The binding of the initial jump is done in
// CodeGenerator::visitOutOfLineCache.
}
size_t getCacheIndex() const {
return cacheIndex_;
}
LInstruction* lir() const {
return lir_;
}
RepatchLabel& entry() {
return entry_;
}
void accept(CodeGenerator* codegen) {
codegen->visitOutOfLineCache(this);
}
// ICs' visit functions delegating the work to the CodeGen visit funtions.
#define VISIT_CACHE_FUNCTION(op) \
void visit##op##IC(CodeGenerator* codegen) { \
CodeGenerator::DataPtr<op##IC> ic(codegen, getCacheIndex()); \
codegen->visit##op##IC(this, ic); \
}
IONCACHE_KIND_LIST(VISIT_CACHE_FUNCTION)
#undef VISIT_CACHE_FUNCTION
};
// This function is declared here because it needs to instantiate an
// OutOfLineUpdateCache, but we want to keep it visible inside the
// CodeGeneratorShared such as we can specialize inline caches in function of
// the architecture.
void
CodeGeneratorShared::addCache(LInstruction* lir, size_t cacheIndex)
{
if (cacheIndex == SIZE_MAX) {
masm.setOOM();
return;
}
DataPtr<IonCache> cache(this, cacheIndex);
MInstruction* mir = lir->mirRaw()->toInstruction();
if (mir->resumePoint())
cache->setScriptedLocation(mir->block()->info().script(),
mir->resumePoint()->pc());
else
cache->setIdempotent();
OutOfLineUpdateCache* ool = new(alloc()) OutOfLineUpdateCache(lir, cacheIndex);
addOutOfLineCode(ool, mir);
cache->emitInitialJump(masm, ool->entry());
masm.bind(ool->rejoin());
}
void
CodeGenerator::visitOutOfLineCache(OutOfLineUpdateCache* ool)
{
DataPtr<IonCache> cache(this, ool->getCacheIndex());
// Register the location of the OOL path in the IC.
cache->setFallbackLabel(masm.labelForPatch());
masm.bind(&ool->entry());
// Dispatch to ICs' accept functions.
cache->accept(this, ool);
}
StringObject*
MNewStringObject::templateObj() const {
return &templateObj_->as<StringObject>();
}
CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masm)
: CodeGeneratorSpecific(gen, graph, masm)
, ionScriptLabels_(gen->alloc())
, scriptCounts_(nullptr)
, simdRefreshTemplatesDuringLink_(0)
{
}
CodeGenerator::~CodeGenerator()
{
MOZ_ASSERT_IF(!gen->compilingAsmJS(), masm.numAsmJSAbsoluteLinks() == 0);
js_delete(scriptCounts_);
}
typedef bool (*StringToNumberFn)(ExclusiveContext*, JSString*, double*);
static const VMFunction StringToNumberInfo = FunctionInfo<StringToNumberFn>(StringToNumber);
void
CodeGenerator::visitValueToInt32(LValueToInt32* lir)
{
ValueOperand operand = ToValue(lir, LValueToInt32::Input);
Register output = ToRegister(lir->output());
FloatRegister temp = ToFloatRegister(lir->tempFloat());
MDefinition* input;
if (lir->mode() == LValueToInt32::NORMAL)
input = lir->mirNormal()->input();
else
input = lir->mirTruncate()->input();
Label fails;
if (lir->mode() == LValueToInt32::TRUNCATE) {
OutOfLineCode* oolDouble = oolTruncateDouble(temp, output, lir->mir());
// We can only handle strings in truncation contexts, like bitwise
// operations.
Label* stringEntry;
Label* stringRejoin;
Register stringReg;
if (input->mightBeType(MIRType_String)) {
stringReg = ToRegister(lir->temp());
OutOfLineCode* oolString = oolCallVM(StringToNumberInfo, lir, ArgList(stringReg),
StoreFloatRegisterTo(temp));
stringEntry = oolString->entry();
stringRejoin = oolString->rejoin();
} else {
stringReg = InvalidReg;
stringEntry = nullptr;
stringRejoin = nullptr;
}
masm.truncateValueToInt32(operand, input, stringEntry, stringRejoin, oolDouble->entry(),
stringReg, temp, output, &fails);
masm.bind(oolDouble->rejoin());
} else {
masm.convertValueToInt32(operand, input, temp, output, &fails,
lir->mirNormal()->canBeNegativeZero(),
lir->mirNormal()->conversion());
}
bailoutFrom(&fails, lir->snapshot());
}
void
CodeGenerator::visitValueToDouble(LValueToDouble* lir)
{
MToDouble* mir = lir->mir();
ValueOperand operand = ToValue(lir, LValueToDouble::Input);
FloatRegister output = ToFloatRegister(lir->output());
Register tag = masm.splitTagForTest(operand);
Label isDouble, isInt32, isBool, isNull, isUndefined, done;
bool hasBoolean = false, hasNull = false, hasUndefined = false;
masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
if (mir->conversion() != MToFPInstruction::NumbersOnly) {
masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
hasBoolean = true;
hasUndefined = true;
if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
masm.branchTestNull(Assembler::Equal, tag, &isNull);
hasNull = true;
}
}
bailout(lir->snapshot());
if (hasNull) {
masm.bind(&isNull);
masm.loadConstantDouble(0.0, output);
masm.jump(&done);
}
if (hasUndefined) {
masm.bind(&isUndefined);
masm.loadConstantDouble(GenericNaN(), output);
masm.jump(&done);
}
if (hasBoolean) {
masm.bind(&isBool);
masm.boolValueToDouble(operand, output);
masm.jump(&done);
}
masm.bind(&isInt32);
masm.int32ValueToDouble(operand, output);
masm.jump(&done);
masm.bind(&isDouble);
masm.unboxDouble(operand, output);
masm.bind(&done);
}
void
CodeGenerator::visitValueToFloat32(LValueToFloat32* lir)
{
MToFloat32* mir = lir->mir();
ValueOperand operand = ToValue(lir, LValueToFloat32::Input);
FloatRegister output = ToFloatRegister(lir->output());
Register tag = masm.splitTagForTest(operand);
Label isDouble, isInt32, isBool, isNull, isUndefined, done;
bool hasBoolean = false, hasNull = false, hasUndefined = false;
masm.branchTestDouble(Assembler::Equal, tag, &isDouble);
masm.branchTestInt32(Assembler::Equal, tag, &isInt32);
if (mir->conversion() != MToFPInstruction::NumbersOnly) {
masm.branchTestBoolean(Assembler::Equal, tag, &isBool);
masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined);
hasBoolean = true;
hasUndefined = true;
if (mir->conversion() != MToFPInstruction::NonNullNonStringPrimitives) {
masm.branchTestNull(Assembler::Equal, tag, &isNull);
hasNull = true;
}
}
bailout(lir->snapshot());
if (hasNull) {
masm.bind(&isNull);
masm.loadConstantFloat32(0.0f, output);
masm.jump(&done);
}
if (hasUndefined) {
masm.bind(&isUndefined);
masm.loadConstantFloat32(float(GenericNaN()), output);
masm.jump(&done);
}
if (hasBoolean) {
masm.bind(&isBool);
masm.boolValueToFloat32(operand, output);
masm.jump(&done);
}
masm.bind(&isInt32);
masm.int32ValueToFloat32(operand, output);
masm.jump(&done);
masm.bind(&isDouble);
// ARM and MIPS may not have a double register available if we've
// allocated output as a float32.
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
masm.unboxDouble(operand, ScratchDoubleReg);
masm.convertDoubleToFloat32(ScratchDoubleReg, output);
#else
masm.unboxDouble(operand, output);
masm.convertDoubleToFloat32(output, output);
#endif
masm.bind(&done);
}
void
CodeGenerator::visitInt32ToDouble(LInt32ToDouble* lir)
{
masm.convertInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output()));
}
void
CodeGenerator::visitFloat32ToDouble(LFloat32ToDouble* lir)
{
masm.convertFloat32ToDouble(ToFloatRegister(lir->input()), ToFloatRegister(lir->output()));
}
void
CodeGenerator::visitDoubleToFloat32(LDoubleToFloat32* lir)
{
masm.convertDoubleToFloat32(ToFloatRegister(lir->input()), ToFloatRegister(lir->output()));
}
void
CodeGenerator::visitInt32ToFloat32(LInt32ToFloat32* lir)
{
masm.convertInt32ToFloat32(ToRegister(lir->input()), ToFloatRegister(lir->output()));
}
void
CodeGenerator::visitDoubleToInt32(LDoubleToInt32* lir)
{
Label fail;
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
masm.convertDoubleToInt32(input, output, &fail, lir->mir()->canBeNegativeZero());
bailoutFrom(&fail, lir->snapshot());
}
void
CodeGenerator::visitFloat32ToInt32(LFloat32ToInt32* lir)
{
Label fail;
FloatRegister input = ToFloatRegister(lir->input());
Register output = ToRegister(lir->output());
masm.convertFloat32ToInt32(input, output, &fail, lir->mir()->canBeNegativeZero());
bailoutFrom(&fail, lir->snapshot());
}
void
CodeGenerator::emitOOLTestObject(Register objreg,
Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined,
Register scratch)
{
saveVolatile(scratch);
masm.setupUnalignedABICall(scratch);
masm.passABIArg(objreg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::EmulatesUndefined));
masm.storeCallResult(scratch);
restoreVolatile(scratch);
masm.branchIfTrueBool(scratch, ifEmulatesUndefined);
masm.jump(ifDoesntEmulateUndefined);
}
// Base out-of-line code generator for all tests of the truthiness of an
// object, where the object might not be truthy. (Recall that per spec all
// objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class
// flag to permit objects to look like |undefined| in certain contexts,
// including in object truthiness testing.) We check truthiness inline except
// when we're testing it on a proxy (or if TI guarantees us that the specified
// object will never emulate |undefined|), in which case out-of-line code will
// call EmulatesUndefined for a conclusive answer.
class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator>
{
Register objreg_;
Register scratch_;
Label* ifEmulatesUndefined_;
Label* ifDoesntEmulateUndefined_;
#ifdef DEBUG
bool initialized() { return ifEmulatesUndefined_ != nullptr; }
#endif
public:
OutOfLineTestObject()
#ifdef DEBUG
: ifEmulatesUndefined_(nullptr), ifDoesntEmulateUndefined_(nullptr)
#endif
{ }
void accept(CodeGenerator* codegen) final override {
MOZ_ASSERT(initialized());
codegen->emitOOLTestObject(objreg_, ifEmulatesUndefined_, ifDoesntEmulateUndefined_,
scratch_);
}
// Specify the register where the object to be tested is found, labels to
// jump to if the object is truthy or falsy, and a scratch register for
// use in the out-of-line path.
void setInputAndTargets(Register objreg, Label* ifEmulatesUndefined, Label* ifDoesntEmulateUndefined,
Register scratch)
{
MOZ_ASSERT(!initialized());
MOZ_ASSERT(ifEmulatesUndefined);
objreg_ = objreg;
scratch_ = scratch;
ifEmulatesUndefined_ = ifEmulatesUndefined;
ifDoesntEmulateUndefined_ = ifDoesntEmulateUndefined;
}
};
// A subclass of OutOfLineTestObject containing two extra labels, for use when
// the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line
// code. The user should bind these labels in inline code, and specify them as
// targets via setInputAndTargets, as appropriate.
class OutOfLineTestObjectWithLabels : public OutOfLineTestObject
{
Label label1_;
Label label2_;
public:
OutOfLineTestObjectWithLabels() { }
Label* label1() { return &label1_; }
Label* label2() { return &label2_; }
};
void
CodeGenerator::testObjectEmulatesUndefinedKernel(Register objreg,
Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined,
Register scratch, OutOfLineTestObject* ool)
{
ool->setInputAndTargets(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined, scratch);
// Perform a fast-path check of the object's class flags if the object's
// not a proxy. Let out-of-line code handle the slow cases that require
// saving registers, making a function call, and restoring registers.
masm.branchTestObjectTruthy(false, objreg, scratch, ool->entry(), ifEmulatesUndefined);
}
void
CodeGenerator::branchTestObjectEmulatesUndefined(Register objreg,
Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined,
Register scratch, OutOfLineTestObject* ool)
{
MOZ_ASSERT(!ifDoesntEmulateUndefined->bound(),
"ifDoesntEmulateUndefined will be bound to the fallthrough path");
testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
scratch, ool);
masm.bind(ifDoesntEmulateUndefined);
}
void
CodeGenerator::testObjectEmulatesUndefined(Register objreg,
Label* ifEmulatesUndefined,
Label* ifDoesntEmulateUndefined,
Register scratch, OutOfLineTestObject* ool)
{
testObjectEmulatesUndefinedKernel(objreg, ifEmulatesUndefined, ifDoesntEmulateUndefined,
scratch, ool);
masm.jump(ifDoesntEmulateUndefined);
}
void
CodeGenerator::testValueTruthyKernel(const ValueOperand& value,
const LDefinition* scratch1, const LDefinition* scratch2,
FloatRegister fr,
Label* ifTruthy, Label* ifFalsy,
OutOfLineTestObject* ool,
MDefinition* valueMIR)
{
// Count the number of possible type tags we might have, so we'll know when
// we've checked them all and hence can avoid emitting a tag check for the
// last one. In particular, whenever tagCount is 1 that means we've tried
// all but one of them already so we know exactly what's left based on the
// mightBe* booleans.
bool mightBeUndefined = valueMIR->mightBeType(MIRType_Undefined);
bool mightBeNull = valueMIR->mightBeType(MIRType_Null);
bool mightBeBoolean = valueMIR->mightBeType(MIRType_Boolean);
bool mightBeInt32 = valueMIR->mightBeType(MIRType_Int32);
bool mightBeObject = valueMIR->mightBeType(MIRType_Object);
bool mightBeString = valueMIR->mightBeType(MIRType_String);
bool mightBeSymbol = valueMIR->mightBeType(MIRType_Symbol);
bool mightBeDouble = valueMIR->mightBeType(MIRType_Double);
int tagCount = int(mightBeUndefined) + int(mightBeNull) +
int(mightBeBoolean) + int(mightBeInt32) + int(mightBeObject) +
int(mightBeString) + int(mightBeSymbol) + int(mightBeDouble);
MOZ_ASSERT_IF(!valueMIR->emptyResultTypeSet(), tagCount > 0);
// If we know we're null or undefined, we're definitely falsy, no
// need to even check the tag.
if (int(mightBeNull) + int(mightBeUndefined) == tagCount) {
masm.jump(ifFalsy);
return;
}
Register tag = masm.splitTagForTest(value);
if (mightBeUndefined) {
MOZ_ASSERT(tagCount > 1);
masm.branchTestUndefined(Assembler::Equal, tag, ifFalsy);
--tagCount;
}
if (mightBeNull) {
MOZ_ASSERT(tagCount > 1);
masm.branchTestNull(Assembler::Equal, tag, ifFalsy);
--tagCount;
}
if (mightBeBoolean) {
MOZ_ASSERT(tagCount != 0);
Label notBoolean;
if (tagCount != 1)
masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
masm.branchTestBooleanTruthy(false, value, ifFalsy);
if (tagCount != 1)
masm.jump(ifTruthy);
// Else just fall through to truthiness.
masm.bind(&notBoolean);
--tagCount;
}
if (mightBeInt32) {
MOZ_ASSERT(tagCount != 0);
Label notInt32;
if (tagCount != 1)
masm.branchTestInt32(Assembler::NotEqual, tag, &notInt32);
masm.branchTestInt32Truthy(false, value, ifFalsy);
if (tagCount != 1)
masm.jump(ifTruthy);
// Else just fall through to truthiness.
masm.bind(&notInt32);
--tagCount;
}
if (mightBeObject) {
MOZ_ASSERT(tagCount != 0);
if (ool) {
Label notObject;
if (tagCount != 1)
masm.branchTestObject(Assembler::NotEqual, tag, &notObject);
Register objreg = masm.extractObject(value, ToRegister(scratch1));
testObjectEmulatesUndefined(objreg, ifFalsy, ifTruthy, ToRegister(scratch2), ool);
masm.bind(&notObject);
} else {
if (tagCount != 1)
masm.branchTestObject(Assembler::Equal, tag, ifTruthy);
// Else just fall through to truthiness.
}
--tagCount;
} else {
MOZ_ASSERT(!ool,
"We better not have an unused OOL path, since the code generator will try to "
"generate code for it but we never set up its labels, which will cause null "
"derefs of those labels.");
}
if (mightBeString) {
// Test if a string is non-empty.
MOZ_ASSERT(tagCount != 0);
Label notString;
if (tagCount != 1)
masm.branchTestString(Assembler::NotEqual, tag, &notString);
masm.branchTestStringTruthy(false, value, ifFalsy);
if (tagCount != 1)
masm.jump(ifTruthy);
// Else just fall through to truthiness.
masm.bind(&notString);
--tagCount;
}
if (mightBeSymbol) {
// All symbols are truthy.
MOZ_ASSERT(tagCount != 0);
if (tagCount != 1)
masm.branchTestSymbol(Assembler::Equal, tag, ifTruthy);
// Else fall through to ifTruthy.
--tagCount;
}
if (mightBeDouble) {
MOZ_ASSERT(tagCount == 1);
// If we reach here the value is a double.
masm.unboxDouble(value, fr);
masm.branchTestDoubleTruthy(false, fr, ifFalsy);
--tagCount;
}
MOZ_ASSERT(tagCount == 0);
// Fall through for truthy.
}
void
CodeGenerator::testValueTruthy(const ValueOperand& value,
const LDefinition* scratch1, const LDefinition* scratch2,
FloatRegister fr,
Label* ifTruthy, Label* ifFalsy,
OutOfLineTestObject* ool,
MDefinition* valueMIR)
{
testValueTruthyKernel(value, scratch1, scratch2, fr, ifTruthy, ifFalsy, ool, valueMIR);
masm.jump(ifTruthy);
}
Label*
CodeGenerator::getJumpLabelForBranch(MBasicBlock* block)
{
// Skip past trivial blocks.
block = skipTrivialBlocks(block);
if (!labelForBackedgeWithImplicitCheck(block))
return block->lir()->label();
// We need to use a patchable jump for this backedge, but want to treat
// this as a normal label target to simplify codegen. Efficiency isn't so
// important here as these tests are extremely unlikely to be used in loop
// backedges, so emit inline code for the patchable jump. Heap allocating
// the label allows it to be used by out of line blocks.
Label* res = alloc().lifoAlloc()->newInfallible<Label>();
Label after;
masm.jump(&after);
masm.bind(res);
jumpToBlock(block);
masm.bind(&after);
return res;
}
void
CodeGenerator::visitTestOAndBranch(LTestOAndBranch* lir)
{
MIRType inputType = lir->mir()->input()->type();
MOZ_ASSERT(inputType == MIRType_ObjectOrNull || lir->mir()->operandMightEmulateUndefined(),
"If the object couldn't emulate undefined, this should have been folded.");
Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
Register input = ToRegister(lir->input());
if (lir->mir()->operandMightEmulateUndefined()) {
if (inputType == MIRType_ObjectOrNull)
masm.branchTestPtr(Assembler::Zero, input, input, falsy);
OutOfLineTestObject* ool = new(alloc()) OutOfLineTestObject();
addOutOfLineCode(ool, lir->mir());
testObjectEmulatesUndefined(input, falsy, truthy, ToRegister(lir->temp()), ool);
} else {
MOZ_ASSERT(inputType == MIRType_ObjectOrNull);
testZeroEmitBranch(Assembler::NotEqual, input, lir->ifTruthy(), lir->ifFalsy());
}
}
void
CodeGenerator::visitTestVAndBranch(LTestVAndBranch* lir)
{
OutOfLineTestObject* ool = nullptr;
MDefinition* input = lir->mir()->input();
// Unfortunately, it's possible that someone (e.g. phi elimination) switched
// out our input after we did cacheOperandMightEmulateUndefined. So we
// might think it can emulate undefined _and_ know that it can't be an
// object.
if (lir->mir()->operandMightEmulateUndefined() && input->mightBeType(MIRType_Object)) {
ool = new(alloc()) OutOfLineTestObject();
addOutOfLineCode(ool, lir->mir());
}
Label* truthy = getJumpLabelForBranch(lir->ifTruthy());
Label* falsy = getJumpLabelForBranch(lir->ifFalsy());
testValueTruthy(ToValue(lir, LTestVAndBranch::Input),
lir->temp1(), lir->temp2(),
ToFloatRegister(lir->tempFloat()),
truthy, falsy, ool, input);
}
void
CodeGenerator::visitFunctionDispatch(LFunctionDispatch* lir)
{
MFunctionDispatch* mir = lir->mir();
Register input = ToRegister(lir->input());
Label* lastLabel;
size_t casesWithFallback;
// Determine if the last case is fallback or an ordinary case.
if (!mir->hasFallback()) {
MOZ_ASSERT(mir->numCases() > 0);
casesWithFallback = mir->numCases();
lastLabel = skipTrivialBlocks(mir->getCaseBlock(mir->numCases() - 1))->lir()->label();
} else {
casesWithFallback = mir->numCases() + 1;
lastLabel = skipTrivialBlocks(mir->getFallback())->lir()->label();
}
// Compare function pointers, except for the last case.
for (size_t i = 0; i < casesWithFallback - 1; i++) {
MOZ_ASSERT(i < mir->numCases());
LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();
if (ObjectGroup* funcGroup = mir->getCaseObjectGroup(i)) {
masm.branchPtr(Assembler::Equal, Address(input, JSObject::offsetOfGroup()),
ImmGCPtr(funcGroup), target->label());
} else {
JSFunction* func = mir->getCase(i);
masm.branchPtr(Assembler::Equal, input, ImmGCPtr(func), target->label());
}
}
// Jump to the last case.
masm.jump(lastLabel);
}
void
CodeGenerator::visitObjectGroupDispatch(LObjectGroupDispatch* lir)
{
MObjectGroupDispatch* mir = lir->mir();
Register input = ToRegister(lir->input());
Register temp = ToRegister(lir->temp());
// Load the incoming ObjectGroup in temp.
masm.loadPtr(Address(input, JSObject::offsetOfGroup()), temp);
// Compare ObjectGroups.
MacroAssembler::BranchGCPtr lastBranch;
LBlock* lastBlock = nullptr;
InlinePropertyTable* propTable = mir->propTable();
for (size_t i = 0; i < mir->numCases(); i++) {
JSFunction* func = mir->getCase(i);
LBlock* target = skipTrivialBlocks(mir->getCaseBlock(i))->lir();
DebugOnly<bool> found = false;
for (size_t j = 0; j < propTable->numEntries(); j++) {
if (propTable->getFunction(j) != func)
continue;
if (lastBranch.isInitialized())
lastBranch.emit(masm);
ObjectGroup* group = propTable->getObjectGroup(j);
lastBranch = MacroAssembler::BranchGCPtr(Assembler::Equal, temp, ImmGCPtr(group),
target->label());
lastBlock = target;
found = true;
}
MOZ_ASSERT(found);
}
// Jump to fallback block if we have an unknown ObjectGroup. If there's no
// fallback block, we should have handled all cases.
if (!mir->hasFallback()) {
MOZ_ASSERT(lastBranch.isInitialized());
#ifdef DEBUG
Label ok;
lastBranch.relink(&ok);
lastBranch.emit(masm);
masm.assumeUnreachable("Unexpected ObjectGroup");
masm.bind(&ok);
#endif
if (!isNextBlock(lastBlock))
masm.jump(lastBlock->label());
return;
}
LBlock* fallback = skipTrivialBlocks(mir->getFallback())->lir();
if (!lastBranch.isInitialized()) {
if (!isNextBlock(fallback))
masm.jump(fallback->label());
return;
}
lastBranch.invertCondition();
lastBranch.relink(fallback->label());
lastBranch.emit(masm);
if (!isNextBlock(lastBlock))
masm.jump(lastBlock->label());
}
void
CodeGenerator::visitBooleanToString(LBooleanToString* lir)
{
Register input = ToRegister(lir->input());
Register output = ToRegister(lir->output());
const JSAtomState& names = GetJitContext()->runtime->names();
Label true_, done;
masm.branchTest32(Assembler::NonZero, input, input, &true_);
masm.movePtr(ImmGCPtr(names.false_), output);
masm.jump(&done);
masm.bind(&true_);
masm.movePtr(ImmGCPtr(names.true_), output);
masm.bind(&done);
}
void
CodeGenerator::emitIntToString(Register input, Register output, Label* ool)
{
masm.branch32(Assembler::AboveOrEqual, input, Imm32(StaticStrings::INT_STATIC_LIMIT), ool);
// Fast path for small integers.
masm.movePtr(ImmPtr(&GetJitContext()->runtime->staticStrings().intStaticTable), output);
masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
}
typedef JSFlatString* (*IntToStringFn)(ExclusiveContext*, int);
static const VMFunction IntToStringInfo = FunctionInfo<IntToStringFn>(Int32ToString<CanGC>);
void
CodeGenerator::visitIntToString(LIntToString* lir)
{
Register input = ToRegister(lir->input());
Register output = ToRegister(lir->output());
OutOfLineCode* ool = oolCallVM(IntToStringInfo, lir, ArgList(input),
StoreRegisterTo(output));
emitIntToString(input, output, ool->entry());
masm.bind(ool->rejoin());
}
typedef JSString* (*DoubleToStringFn)(ExclusiveContext*, double);
static const VMFunction DoubleToStringInfo = FunctionInfo<DoubleToStringFn>(NumberToString<CanGC>);
void
CodeGenerator::visitDoubleToString(LDoubleToString* lir)
{
FloatRegister input = ToFloatRegister(lir->input());
Register temp = ToRegister(lir->tempInt());
Register output = ToRegister(lir->output());
OutOfLineCode* ool = oolCallVM(DoubleToStringInfo, lir, ArgList(input),
StoreRegisterTo(output));
// Try double to integer conversion and run integer to string code.
masm.convertDoubleToInt32(input, temp, ool->entry(), true);
emitIntToString(temp, output, ool->entry());
masm.bind(ool->rejoin());
}
typedef JSString* (*PrimitiveToStringFn)(JSContext*, HandleValue);
static const VMFunction PrimitiveToStringInfo = FunctionInfo<PrimitiveToStringFn>(ToStringSlow);
void
CodeGenerator::visitValueToString(LValueToString* lir)
{
ValueOperand input = ToValue(lir, LValueToString::Input);
Register output = ToRegister(lir->output());
OutOfLineCode* ool = oolCallVM(PrimitiveToStringInfo, lir, ArgList(input),
StoreRegisterTo(output));
Label done;
Register tag = masm.splitTagForTest(input);
const JSAtomState& names = GetJitContext()->runtime->names();
// String
if (lir->mir()->input()->mightBeType(MIRType_String)) {
Label notString;
masm.branchTestString(Assembler::NotEqual, tag, &notString);
masm.unboxString(input, output);
masm.jump(&done);
masm.bind(&notString);
}
// Integer
if (lir->mir()->input()->mightBeType(MIRType_Int32)) {
Label notInteger;
masm.branchTestInt32(Assembler::NotEqual, tag, &notInteger);
Register unboxed = ToTempUnboxRegister(lir->tempToUnbox());
unboxed = masm.extractInt32(input, unboxed);
emitIntToString(unboxed, output, ool->entry());
masm.jump(&done);
masm.bind(&notInteger);
}
// Double
if (lir->mir()->input()->mightBeType(MIRType_Double)) {
// Note: no fastpath. Need two extra registers and can only convert doubles
// that fit integers and are smaller than StaticStrings::INT_STATIC_LIMIT.
masm.branchTestDouble(Assembler::Equal, tag, ool->entry());
}
// Undefined
if (lir->mir()->input()->mightBeType(MIRType_Undefined)) {
Label notUndefined;
masm.branchTestUndefined(Assembler::NotEqual, tag, &notUndefined);
masm.movePtr(ImmGCPtr(names.undefined), output);
masm.jump(&done);
masm.bind(&notUndefined);
}
// Null
if (lir->mir()->input()->mightBeType(MIRType_Null)) {
Label notNull;
masm.branchTestNull(Assembler::NotEqual, tag, &notNull);
masm.movePtr(ImmGCPtr(names.null), output);
masm.jump(&done);
masm.bind(&notNull);
}
// Boolean
if (lir->mir()->input()->mightBeType(MIRType_Boolean)) {
Label notBoolean, true_;
masm.branchTestBoolean(Assembler::NotEqual, tag, &notBoolean);
masm.branchTestBooleanTruthy(true, input, &true_);
masm.movePtr(ImmGCPtr(names.false_), output);
masm.jump(&done);
masm.bind(&true_);
masm.movePtr(ImmGCPtr(names.true_), output);
masm.jump(&done);
masm.bind(&notBoolean);
}
// Object
if (lir->mir()->input()->mightBeType(MIRType_Object)) {
// Bail.
MOZ_ASSERT(lir->mir()->fallible());
Label bail;
masm.branchTestObject(Assembler::Equal, tag, &bail);
bailoutFrom(&bail, lir->snapshot());
}
// Symbol
if (lir->mir()->input()->mightBeType(MIRType_Symbol))
masm.branchTestSymbol(Assembler::Equal, tag, ool->entry());
#ifdef DEBUG
masm.assumeUnreachable("Unexpected type for MValueToString.");
#endif
masm.bind(&done);
masm.bind(ool->rejoin());
}
typedef JSObject* (*ToObjectFn)(JSContext*, HandleValue, bool);
static const VMFunction ToObjectInfo = FunctionInfo<ToObjectFn>(ToObjectSlow);
void
CodeGenerator::visitValueToObjectOrNull(LValueToObjectOrNull* lir)
{
ValueOperand input = ToValue(lir, LValueToObjectOrNull::Input);
Register output = ToRegister(lir->output());
OutOfLineCode* ool = oolCallVM(ToObjectInfo, lir, ArgList(input, Imm32(0)),
StoreRegisterTo(output));
Label done;
masm.branchTestObject(Assembler::Equal, input, &done);
masm.branchTestNull(Assembler::NotEqual, input, ool->entry());
masm.bind(&done);
masm.unboxNonDouble(input, output);
masm.bind(ool->rejoin());
}
typedef JSObject* (*CloneRegExpObjectFn)(JSContext*, JSObject*);
static const VMFunction CloneRegExpObjectInfo =
FunctionInfo<CloneRegExpObjectFn>(CloneRegExpObject);
void
CodeGenerator::visitRegExp(LRegExp* lir)
{
pushArg(ImmGCPtr(lir->mir()->source()));
callVM(CloneRegExpObjectInfo, lir);
}
// Amount of space to reserve on the stack when executing RegExps inline.
static const size_t RegExpReservedStack = sizeof(irregexp::InputOutputData)
+ sizeof(MatchPairs)
+ RegExpObject::MaxPairCount * sizeof(MatchPair);
static size_t
RegExpPairsVectorStartOffset(size_t inputOutputDataStartOffset)
{
return inputOutputDataStartOffset + sizeof(irregexp::InputOutputData) + sizeof(MatchPairs);
}
static Address
RegExpPairCountAddress(MacroAssembler& masm, size_t inputOutputDataStartOffset)
{
return Address(masm.getStackPointer(), inputOutputDataStartOffset
+ sizeof(irregexp::InputOutputData)
+ MatchPairs::offsetOfPairCount());
}
// Prepare an InputOutputData and optional MatchPairs which space has been
// allocated for on the stack, and try to execute a RegExp on a string input.
// If the RegExp was successfully executed and matched the input, fallthrough,
// otherwise jump to notFound or failure.
static bool
PrepareAndExecuteRegExp(JSContext* cx, MacroAssembler& masm, Register regexp, Register input,
Register temp1, Register temp2, Register temp3,
size_t inputOutputDataStartOffset,
RegExpShared::CompilationMode mode,
Label* notFound, Label* failure)
{
size_t matchPairsStartOffset = inputOutputDataStartOffset + sizeof(irregexp::InputOutputData);
size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
Address inputStartAddress(masm.getStackPointer(),
inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, inputStart));
Address inputEndAddress(masm.getStackPointer(),
inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, inputEnd));
Address matchesPointerAddress(masm.getStackPointer(),
inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, matches));
Address startIndexAddress(masm.getStackPointer(),
inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, startIndex));
Address matchResultAddress(masm.getStackPointer(),
inputOutputDataStartOffset + offsetof(irregexp::InputOutputData, result));
Address pairCountAddress = RegExpPairCountAddress(masm, inputOutputDataStartOffset);
Address pairsPointerAddress(masm.getStackPointer(),
matchPairsStartOffset + MatchPairs::offsetOfPairs());
Address pairsVectorAddress(masm.getStackPointer(), pairsVectorStartOffset);
RegExpStatics* res = cx->global()->getRegExpStatics(cx);
if (!res)
return false;
#ifdef JS_USE_LINK_REGISTER
if (mode != RegExpShared::MatchOnly)
masm.pushReturnAddress();
#endif
if (mode == RegExpShared::Normal) {
// First, fill in a skeletal MatchPairs instance on the stack. This will be
// passed to the OOL stub in the caller if we aren't able to execute the
// RegExp inline, and that stub needs to be able to determine whether the
// execution finished successfully.
masm.store32(Imm32(1), pairCountAddress);
masm.store32(Imm32(-1), pairsVectorAddress);
masm.computeEffectiveAddress(pairsVectorAddress, temp1);
masm.storePtr(temp1, pairsPointerAddress);
}
// Check for a linear input string.
masm.branchIfRope(input, failure);
// Get the RegExpShared for the RegExp.
masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(RegExpObject::PRIVATE_SLOT)), temp1);
masm.branchPtr(Assembler::Equal, temp1, ImmWord(0), failure);
// Don't handle RegExps which read and write to lastIndex.
masm.branchTest32(Assembler::NonZero, Address(temp1, RegExpShared::offsetOfFlags()),
Imm32(StickyFlag | GlobalFlag), failure);
if (mode == RegExpShared::Normal) {
// Don't handle RegExps with excessive parens.
masm.load32(Address(temp1, RegExpShared::offsetOfParenCount()), temp2);
masm.branch32(Assembler::AboveOrEqual, temp2, Imm32(RegExpObject::MaxPairCount), failure);
// Fill in the paren count in the MatchPairs on the stack.
masm.add32(Imm32(1), temp2);
masm.store32(temp2, pairCountAddress);
}
// Load the code pointer for the type of input string we have, and compute
// the input start/end pointers in the InputOutputData.
Register codePointer = temp1;
{
masm.loadStringChars(input, temp2);
masm.storePtr(temp2, inputStartAddress);
masm.loadStringLength(input, temp3);
Label isLatin1, done;
masm.branchTest32(Assembler::NonZero, Address(input, JSString::offsetOfFlags()),
Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
{
masm.lshiftPtr(Imm32(1), temp3);
masm.loadPtr(Address(temp1, RegExpShared::offsetOfJitCode(mode, false)), codePointer);
}
masm.jump(&done);
{
masm.bind(&isLatin1);
masm.loadPtr(Address(temp1, RegExpShared::offsetOfJitCode(mode, true)), codePointer);
}
masm.bind(&done);
masm.addPtr(temp3, temp2);
masm.storePtr(temp2, inputEndAddress);
}
// Check the RegExpShared has been compiled for this type of input.
masm.branchPtr(Assembler::Equal, codePointer, ImmWord(0), failure);
masm.loadPtr(Address(codePointer, JitCode::offsetOfCode()), codePointer);
// Finish filling in the InputOutputData instance on the stack.
if (mode == RegExpShared::Normal) {
masm.computeEffectiveAddress(Address(masm.getStackPointer(), matchPairsStartOffset), temp2);
masm.storePtr(temp2, matchesPointerAddress);
}
masm.storePtr(ImmWord(0), startIndexAddress);
masm.store32(Imm32(0), matchResultAddress);
// Save any volatile inputs.
LiveGeneralRegisterSet volatileRegs;
if (input.volatile_())
volatileRegs.add(input);
if (regexp.volatile_())
volatileRegs.add(regexp);
// Execute the RegExp.
masm.computeEffectiveAddress(Address(masm.getStackPointer(), inputOutputDataStartOffset), temp2);
masm.PushRegsInMask(volatileRegs);
masm.setupUnalignedABICall(temp3);
masm.passABIArg(temp2);
masm.callWithABI(codePointer);
masm.PopRegsInMask(volatileRegs);
Label success;
masm.branch32(Assembler::Equal, matchResultAddress,
Imm32(RegExpRunStatus_Success_NotFound), notFound);
masm.branch32(Assembler::Equal, matchResultAddress,
Imm32(RegExpRunStatus_Error), failure);
// Lazily update the RegExpStatics.
masm.movePtr(ImmPtr(res), temp1);
Address pendingInputAddress(temp1, RegExpStatics::offsetOfPendingInput());
Address matchesInputAddress(temp1, RegExpStatics::offsetOfMatchesInput());
Address lazySourceAddress(temp1, RegExpStatics::offsetOfLazySource());
masm.patchableCallPreBarrier(pendingInputAddress, MIRType_String);
masm.patchableCallPreBarrier(matchesInputAddress, MIRType_String);
masm.patchableCallPreBarrier(lazySourceAddress, MIRType_String);
masm.storePtr(input, pendingInputAddress);
masm.storePtr(input, matchesInputAddress);
masm.storePtr(ImmWord(0), Address(temp1, RegExpStatics::offsetOfLazyIndex()));
masm.store32(Imm32(1), Address(temp1, RegExpStatics::offsetOfPendingLazyEvaluation()));
masm.loadPtr(Address(regexp, NativeObject::getFixedSlotOffset(RegExpObject::PRIVATE_SLOT)), temp2);
masm.loadPtr(Address(temp2, RegExpShared::offsetOfSource()), temp3);
masm.storePtr(temp3, lazySourceAddress);
masm.load32(Address(temp2, RegExpShared::offsetOfFlags()), temp3);
masm.store32(temp3, Address(temp1, RegExpStatics::offsetOfLazyFlags()));
return true;
}
static void
CopyStringChars(MacroAssembler& masm, Register to, Register from, Register len,
Register byteOpScratch, size_t fromWidth, size_t toWidth);
static void
CreateDependentString(MacroAssembler& masm, const JSAtomState& names,
bool latin1, Register string,
Register base, Register temp1, Register temp2,
BaseIndex startIndexAddress, BaseIndex limitIndexAddress,
Label* failure)
{
// Compute the string length.
masm.load32(startIndexAddress, temp2);
masm.load32(limitIndexAddress, temp1);
masm.sub32(temp2, temp1);
Label done, nonEmpty;
// Zero length matches use the empty string.
masm.branchTest32(Assembler::NonZero, temp1, temp1, &nonEmpty);
masm.movePtr(ImmGCPtr(names.empty), string);
masm.jump(&done);
masm.bind(&nonEmpty);
Label notInline;
int32_t maxInlineLength = latin1
? (int32_t) JSFatInlineString::MAX_LENGTH_LATIN1
: (int32_t) JSFatInlineString::MAX_LENGTH_TWO_BYTE;
masm.branch32(Assembler::Above, temp1, Imm32(maxInlineLength), &notInline);
{
// Make a thin or fat inline string.
Label stringAllocated, fatInline;
int32_t maxThinInlineLength = latin1
? (int32_t) JSThinInlineString::MAX_LENGTH_LATIN1
: (int32_t) JSThinInlineString::MAX_LENGTH_TWO_BYTE;
masm.branch32(Assembler::Above, temp1, Imm32(maxThinInlineLength), &fatInline);
int32_t thinFlags = (latin1 ? JSString::LATIN1_CHARS_BIT : 0) | JSString::INIT_THIN_INLINE_FLAGS;
masm.newGCString(string, temp2, failure);
masm.store32(Imm32(thinFlags), Address(string, JSString::offsetOfFlags()));
masm.jump(&stringAllocated);
masm.bind(&fatInline);
int32_t fatFlags = (latin1 ? JSString::LATIN1_CHARS_BIT : 0) | JSString::INIT_FAT_INLINE_FLAGS;
masm.newGCFatInlineString(string, temp2, failure);
masm.store32(Imm32(fatFlags), Address(string, JSString::offsetOfFlags()));
masm.bind(&stringAllocated);
masm.store32(temp1, Address(string, JSString::offsetOfLength()));
masm.push(string);
masm.push(base);
// Adjust the start index address for the above pushes.
MOZ_ASSERT(startIndexAddress.base == masm.getStackPointer());
BaseIndex newStartIndexAddress = startIndexAddress;
newStartIndexAddress.offset += 2 * sizeof(void*);
// Load chars pointer for the new string.
masm.addPtr(ImmWord(JSInlineString::offsetOfInlineStorage()), string);
// Load the source characters pointer.
masm.loadStringChars(base, base);
masm.load32(newStartIndexAddress, temp2);
if (latin1)
masm.addPtr(temp2, base);
else
masm.computeEffectiveAddress(BaseIndex(base, temp2, TimesTwo), base);
CopyStringChars(masm, string, base, temp1, temp2, latin1 ? 1 : 2, latin1 ? 1 : 2);
// Null-terminate.
if (latin1)
masm.store8(Imm32(0), Address(string, 0));
else
masm.store16(Imm32(0), Address(string, 0));
masm.pop(base);
masm.pop(string);
}
masm.jump(&done);
masm.bind(&notInline);
{
// Make a dependent string.
int32_t flags = (latin1 ? JSString::LATIN1_CHARS_BIT : 0) | JSString::DEPENDENT_FLAGS;
masm.newGCString(string, temp2, failure);
masm.store32(Imm32(flags), Address(string, JSString::offsetOfFlags()));
masm.store32(temp1, Address(string, JSString::offsetOfLength()));
masm.loadPtr(Address(base, JSString::offsetOfNonInlineChars()), temp1);
masm.load32(startIndexAddress, temp2);
if (latin1)
masm.addPtr(temp2, temp1);
else
masm.computeEffectiveAddress(BaseIndex(temp1, temp2, TimesTwo), temp1);
masm.storePtr(temp1, Address(string, JSString::offsetOfNonInlineChars()));
masm.storePtr(base, Address(string, JSDependentString::offsetOfBase()));
// Follow any base pointer if the input is itself a dependent string.
// Watch for undepended strings, which have a base pointer but don't
// actually share their characters with it.
Label noBase;
masm.branchTest32(Assembler::Zero, Address(base, JSString::offsetOfFlags()),
Imm32(JSString::HAS_BASE_BIT), &noBase);
masm.branchTest32(Assembler::NonZero, Address(base, JSString::offsetOfFlags()),
Imm32(JSString::FLAT_BIT), &noBase);
masm.loadPtr(Address(base, JSDependentString::offsetOfBase()), temp1);
masm.storePtr(temp1, Address(string, JSDependentString::offsetOfBase()));
masm.bind(&noBase);
}
masm.bind(&done);
}
JitCode*
JitCompartment::generateRegExpExecStub(JSContext* cx)
{
Register regexp = CallTempReg0;
Register input = CallTempReg1;
ValueOperand result = JSReturnOperand;
// We are free to clobber all registers, as LRegExpExec is a call instruction.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(input);
regs.take(regexp);
// temp5 is used in single byte instructions when creating dependent
// strings, and has restrictions on which register it can be on some
// platforms.
Register temp5;
{
AllocatableGeneralRegisterSet oregs = regs;
do {
temp5 = oregs.takeAny();
} while (!MacroAssembler::canUseInSingleByteInstruction(temp5));
regs.take(temp5);
}
Register temp1 = regs.takeAny();
Register temp2 = regs.takeAny();
Register temp3 = regs.takeAny();
Register temp4 = regs.takeAny();
ArrayObject* templateObject = cx->compartment()->regExps.getOrCreateMatchResultTemplateObject(cx);
if (!templateObject)
return nullptr;
// The template object should have enough space for the maximum number of
// pairs this stub can handle.
MOZ_ASSERT(ObjectElements::VALUES_PER_HEADER + RegExpObject::MaxPairCount ==
gc::GetGCKindSlots(templateObject->asTenured().getAllocKind()));
MacroAssembler masm(cx);
// The InputOutputData is placed above the return address on the stack.
size_t inputOutputDataStartOffset = sizeof(void*);
Label notFound, oolEntry;
if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, temp1, temp2, temp3,
inputOutputDataStartOffset, RegExpShared::Normal,
&notFound, &oolEntry))
{
return nullptr;
}
// Construct the result.
Register object = temp1;
masm.createGCObject(object, temp2, templateObject, gc::DefaultHeap, &oolEntry);
Register matchIndex = temp2;
masm.move32(Imm32(0), matchIndex);
size_t pairsVectorStartOffset = RegExpPairsVectorStartOffset(inputOutputDataStartOffset);
Address pairsVectorAddress(masm.getStackPointer(), pairsVectorStartOffset);
Address pairCountAddress = RegExpPairCountAddress(masm, inputOutputDataStartOffset);
size_t elementsOffset = NativeObject::offsetOfFixedElements();
BaseIndex stringAddress(object, matchIndex, TimesEight, elementsOffset);
JS_STATIC_ASSERT(sizeof(MatchPair) == 8);
BaseIndex stringIndexAddress(masm.getStackPointer(), matchIndex, TimesEight,
pairsVectorStartOffset + offsetof(MatchPair, start));
BaseIndex stringLimitAddress(masm.getStackPointer(), matchIndex, TimesEight,
pairsVectorStartOffset + offsetof(MatchPair, limit));
// Loop to construct the match strings. There are two different loops,
// depending on whether the input is latin1.
{
Label isLatin1, done;
masm.branchTest32(Assembler::NonZero, Address(input, JSString::offsetOfFlags()),
Imm32(JSString::LATIN1_CHARS_BIT), &isLatin1);
for (int isLatin = 0; isLatin <= 1; isLatin++) {
if (isLatin)
masm.bind(&isLatin1);
Label matchLoop;
masm.bind(&matchLoop);
Label isUndefined, storeDone;
masm.branch32(Assembler::LessThan, stringIndexAddress, Imm32(0), &isUndefined);
CreateDependentString(masm, cx->names(), isLatin, temp3, input, temp4, temp5,
stringIndexAddress, stringLimitAddress, &oolEntry);
masm.storeValue(JSVAL_TYPE_STRING, temp3, stringAddress);
masm.jump(&storeDone);
masm.bind(&isUndefined);
masm.storeValue(UndefinedValue(), stringAddress);
masm.bind(&storeDone);
masm.add32(Imm32(1), matchIndex);
masm.branch32(Assembler::LessThanOrEqual, pairCountAddress, matchIndex, &done);
masm.jump(&matchLoop);
}
masm.bind(&done);
}
// Fill in the rest of the output object.
masm.store32(matchIndex, Address(object, elementsOffset + ObjectElements::offsetOfInitializedLength()));
masm.store32(matchIndex, Address(object, elementsOffset + ObjectElements::offsetOfLength()));
masm.loadPtr(Address(object, NativeObject::offsetOfSlots()), temp2);
MOZ_ASSERT(templateObject->numFixedSlots() == 0);
MOZ_ASSERT(templateObject->lookupPure(cx->names().index)->slot() == 0);
MOZ_ASSERT(templateObject->lookupPure(cx->names().input)->slot() == 1);
masm.load32(pairsVectorAddress, temp3);
masm.storeValue(JSVAL_TYPE_INT32, temp3, Address(temp2, 0));
masm.storeValue(JSVAL_TYPE_STRING, input, Address(temp2, sizeof(Value)));
// All done!
masm.tagValue(JSVAL_TYPE_OBJECT, object, result);
masm.ret();
masm.bind(&notFound);
masm.moveValue(NullValue(), result);
masm.ret();
// Use an undefined value to signal to the caller that the OOL stub needs to be called.
masm.bind(&oolEntry);
masm.moveValue(UndefinedValue(), result);
masm.ret();
Linker linker(masm);
AutoFlushICache afc("RegExpExecStub");
JitCode* code = linker.newCode<CanGC>(cx, OTHER_CODE);
if (!code)
return nullptr;
#ifdef JS_ION_PERF
writePerfSpewerJitCodeProfile(code, "RegExpExecStub");
#endif
if (cx->zone()->needsIncrementalBarrier())
code->togglePreBarriers(true);
return code;
}
class OutOfLineRegExpExec : public OutOfLineCodeBase<CodeGenerator>
{
LRegExpExec* lir_;
public:
explicit OutOfLineRegExpExec(LRegExpExec* lir)
: lir_(lir)
{ }
void accept(CodeGenerator* codegen) {
codegen->visitOutOfLineRegExpExec(this);
}
LRegExpExec* lir() const {
return lir_;
}
};
typedef bool (*RegExpExecRawFn)(JSContext* cx, HandleObject regexp,
HandleString input, MatchPairs* pairs, MutableHandleValue output);
static const VMFunction RegExpExecRawInfo = FunctionInfo<RegExpExecRawFn>(regexp_exec_raw);
void
CodeGenerator::visitOutOfLineRegExpExec(OutOfLineRegExpExec* ool)
{
LRegExpExec* lir = ool->lir();
Register input = ToRegister(lir->string());
Register regexp = ToRegister(lir->regexp());
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(input);
regs.take(regexp);
Register temp = regs.takeAny();
masm.computeEffectiveAddress(Address(masm.getStackPointer(),
sizeof(irregexp::InputOutputData)), temp);
pushArg(temp);
pushArg(input);
pushArg(regexp);
callVM(RegExpExecRawInfo, lir);
masm.jump(ool->rejoin());
}
void
CodeGenerator::visitRegExpExec(LRegExpExec* lir)
{
MOZ_ASSERT(ToRegister(lir->regexp()) == CallTempReg0);
MOZ_ASSERT(ToRegister(lir->string()) == CallTempReg1);
MOZ_ASSERT(GetValueOutput(lir) == JSReturnOperand);
masm.reserveStack(RegExpReservedStack);
OutOfLineRegExpExec* ool = new(alloc()) OutOfLineRegExpExec(lir);
addOutOfLineCode(ool, lir->mir());
JitCode* regExpExecStub = gen->compartment->jitCompartment()->regExpExecStubNoBarrier();
masm.call(regExpExecStub);
masm.branchTestUndefined(Assembler::Equal, JSReturnOperand, ool->entry());
masm.bind(ool->rejoin());
masm.freeStack(RegExpReservedStack);
}
// The value returned by the RegExp test stub if inline execution failed.
static const int32_t RegExpTestFailedValue = 2;
JitCode*
JitCompartment::generateRegExpTestStub(JSContext* cx)
{
Register regexp = CallTempReg2;
Register input = CallTempReg3;
Register result = ReturnReg;
MOZ_ASSERT(regexp != result && input != result);
// We are free to clobber all registers, as LRegExpTest is a call instruction.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
regs.take(input);
regs.take(regexp);
Register temp1 = regs.takeAny();
Register temp2 = regs.takeAny();
Register temp3 = regs.takeAny();
MacroAssembler masm(cx);
#ifdef JS_USE_LINK_REGISTER
masm.pushReturnAddress();
#endif
masm.reserveStack(sizeof(irregexp::InputOutputData));
Label notFound, oolEntry;
if (!PrepareAndExecuteRegExp(cx, masm, regexp, input, temp1, temp2, temp3, 0,
RegExpShared::MatchOnly, &notFound, &oolEntry))
{
return nullptr;
}
Label done;
masm.move32(Imm32(1), result);
masm.jump(&done);
masm.bind(&notFound);
masm.move32(Imm32(0), result);
masm.jump(&done);
masm.bind(&oolEntry);
masm.move32(Imm32(RegExpTestFailedValue), result);
masm.bind(&done);
masm.freeStack(sizeof(irregexp::InputOutputData));
masm.ret();
Linker linker(masm);
AutoFlushICache afc("RegExpTestStub");
JitCode* code = linker.newCode<CanGC>(cx, OTHER_CODE);
if (!code)
return nullptr;
#ifdef JS_ION_PERF
writePerfSpewerJitCodeProfile(code, "RegExpTestStub");
#endif
if (cx->zone()->needsIncrementalBarrier())
code->togglePreBarriers(true);
return code;
}
class OutOfLineRegExpTest : public OutOfLineCodeBase<CodeGenerator>
{
LRegExpTest* lir_;
public:
explicit OutOfLineRegExpTest(LRegExpTest* lir)
: lir_(lir)
{ }
void accept(CodeGenerator* codegen) {
codegen->visitOutOfLineRegExpTest(this);
}
LRegExpTest* lir() const {
return lir_;
}
};
typedef bool (*RegExpTestRawFn)(JSContext* cx, HandleObject regexp,
HandleString input, bool* result);
static const VMFunction RegExpTestRawInfo = FunctionInfo<RegExpTestRawFn>(regexp_test_raw);
void
CodeGenerator::visitOutOfLineRegExpTest(OutOfLineRegExpTest* ool)
{
LRegExpTest* lir = ool->lir();
Register input = ToRegister(lir->string());
Register regexp = ToRegister(lir->regexp());
pushArg(input);
pushArg(regexp);
callVM(RegExpTestRawInfo, lir);
masm.jump(ool->rejoin());
}
void
CodeGenerator::visitRegExpTest(LRegExpTest* lir)
{
MOZ_ASSERT(ToRegister(lir->regexp()) == CallTempReg2);
MOZ_ASSERT(ToRegister(lir->string()) == CallTempReg3);
MOZ_ASSERT(ToRegister(lir->output()) == ReturnReg);
OutOfLineRegExpTest* ool = new(alloc()) OutOfLineRegExpTest(lir);
addOutOfLineCode(ool, lir->mir());
JitCode* regExpTestStub = gen->compartment->jitCompartment()->regExpTestStubNoBarrier();
masm.call(regExpTestStub);
masm.branch32(Assembler::Equal, ReturnReg, Imm32(RegExpTestFailedValue), ool->entry());
masm.bind(ool->rejoin());
}
typedef JSString* (*RegExpReplaceFn)(JSContext*, HandleString, HandleObject, HandleString);
static const VMFunction RegExpReplaceInfo = FunctionInfo<RegExpReplaceFn>(RegExpReplace);
void
CodeGenerator::visitRegExpReplace(LRegExpReplace* lir)
{
if (lir->replacement()->isConstant())
pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
else
pushArg(ToRegister(lir->replacement()));
pushArg(ToRegister(lir->pattern()));
if (lir->string()->isConstant())
pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
else
pushArg(ToRegister(lir->string()));
callVM(RegExpReplaceInfo, lir);
}
typedef JSString* (*StringReplaceFn)(JSContext*, HandleString, HandleString, HandleString);
static const VMFunction StringReplaceInfo = FunctionInfo<StringReplaceFn>(StringReplace);
void
CodeGenerator::visitStringReplace(LStringReplace* lir)
{
if (lir->replacement()->isConstant())
pushArg(ImmGCPtr(lir->replacement()->toConstant()->toString()));
else
pushArg(ToRegister(lir->replacement()));
if (lir->pattern()->isConstant())
pushArg(ImmGCPtr(lir->pattern()->toConstant()->toString()));
else
pushArg(ToRegister(lir->pattern()));
if (lir->string()->isConstant())
pushArg(ImmGCPtr(lir->string()->toConstant()->toString()));
else
pushArg(ToRegister(lir->string()));
callVM(StringReplaceInfo, lir);
}
void
CodeGenerator::emitSharedStub(ICStub::Kind kind, LInstruction* lir)
{
JSScript* script = lir->mirRaw()->block()->info().script();
jsbytecode* pc = lir->mirRaw()->toInstruction()->resumePoint()->pc();
#ifdef JS_USE_LINK_REGISTER
// Some architectures don't push the return address on the stack but
// use the link register. In that case the stack isn't aligned. Push
// to make sure we are aligned.
masm.Push(Imm32(0));
#endif
// Create descriptor signifying end of Ion frame.
uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), JitFrame_IonJS);
masm.Push(Imm32(descriptor));
// Call into the stubcode.
CodeOffset patchOffset;
IonICEntry entry(script->pcToOffset(pc), ICEntry::Kind_Op, script);
EmitCallIC(&patchOffset, masm);
entry.setReturnOffset(CodeOffset(masm.currentOffset()));
SharedStub sharedStub(kind, entry, patchOffset);
masm.propagateOOM(sharedStubs_.append(sharedStub));
// Fix up upon return.
uint32_t callOffset = masm.currentOffset();
#ifdef JS_USE_LINK_REGISTER
masm.freeStack(sizeof(intptr_t) * 2);
#else
masm.freeStack(sizeof(intptr_t));
#endif
markSafepointAt(callOffset, lir);
}
void
CodeGenerator::visitBinarySharedStub(LBinarySharedStub* lir)
{
JSOp jsop = JSOp(*lir->mirRaw()->toInstruction()->resumePoint()->pc());
switch (jsop) {
case JSOP_ADD:
case JSOP_SUB:
case JSOP_MUL:
case JSOP_DIV:
case JSOP_MOD:
emitSharedStub(ICStub::Kind::BinaryArith_Fallback, lir);
break;
case JSOP_LT:
case JSOP_LE:
case JSOP_GT:
case JSOP_GE:
case JSOP_EQ:
case JSOP_NE:
case JSOP_STRICTEQ:
case JSOP_STRICTNE:
emitSharedStub(ICStub::Kind::Compare_Fallback, lir);
break;
default:
MOZ_CRASH("Unsupported jsop in shared stubs.");
}
}
void
CodeGenerator::visitUnarySharedStub(LUnarySharedStub* lir)
{
JSOp jsop = JSOp(*lir->mir()->resumePoint()->pc());
switch (jsop) {
case JSOP_BITNOT:
case JSOP_NEG:
emitSharedStub(ICStub::Kind::UnaryArith_Fallback, lir);
break;
case JSOP_CALLPROP:
case JSOP_GETPROP:
case JSOP_LENGTH:
emitSharedStub(ICStub::Kind::GetProp_Fallback, lir);
break;
default:
MOZ_CRASH("Unsupported jsop in shared stubs.");
}
}
typedef JSObject* (*LambdaFn)(JSContext*, HandleFunction, HandleObject);
static const VMFunction LambdaInfo = FunctionInfo<LambdaFn>(js::Lambda);
void
CodeGenerator::visitLambdaForSingleton(LLambdaForSingleton* lir)
{
pushArg(ToRegister(lir->scopeChain()));
pushArg(ImmGCPtr(lir->mir()->info().fun));
callVM(LambdaInfo, lir);
}
void
CodeGenerator::visitLambda(LLambda* lir)
{
Register scopeChain = ToRegister(lir->scopeChain());
Register output = ToRegister(lir->output());
Register tempReg = ToRegister(lir->temp());
const LambdaFunctionInfo& info = lir->mir()->info();
OutOfLineCode* ool = oolCallVM(LambdaInfo, lir, ArgList(ImmGCPtr(info.fun), scopeChain),
StoreRegisterTo(output));
MOZ_ASSERT(!info.singletonType);
masm.createGCObject(output, tempReg, info.fun, gc::DefaultHeap, ool->entry());
emitLambdaInit(output, scopeChain, info);
if (info.flags & JSFunction::EXTENDED) {
MOZ_ASSERT(info.fun->allowSuperProperty());
static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2, "All slots must be initialized");
masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(0)));
masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(1)));
}
masm.bind(ool->rejoin());
}
class OutOfLineLambdaArrow : public OutOfLineCodeBase<CodeGenerator>
{
public:
LLambdaArrow* lir;
Label entryNoPop_;
explicit OutOfLineLambdaArrow(LLambdaArrow* lir)
: lir(lir)
{ }
void accept(CodeGenerator* codegen) {
codegen->visitOutOfLineLambdaArrow(this);
}
Label* entryNoPop() {
return &entryNoPop_;
}
};
typedef JSObject* (*LambdaArrowFn)(JSContext*, HandleFunction, HandleObject, HandleValue);
static const VMFunction LambdaArrowInfo = FunctionInfo<LambdaArrowFn>(js::LambdaArrow);
void
CodeGenerator::visitOutOfLineLambdaArrow(OutOfLineLambdaArrow* ool)
{
Register scopeChain = ToRegister(ool->lir->scopeChain());
ValueOperand newTarget = ToValue(ool->lir, LLambdaArrow::NewTargetValue);
Register output = ToRegister(ool->lir->output());
const LambdaFunctionInfo& info = ool->lir->mir()->info();
// When we get here, we may need to restore part of the newTarget,
// which has been conscripted into service as a temp register.
masm.pop(newTarget.scratchReg());
masm.bind(ool->entryNoPop());
saveLive(ool->lir);
pushArg(newTarget);
pushArg(scopeChain);
pushArg(ImmGCPtr(info.fun));
callVM(LambdaArrowInfo, ool->lir);
StoreRegisterTo(output).generate(this);
restoreLiveIgnore(ool->lir, StoreRegisterTo(output).clobbered());
masm.jump(ool->rejoin());
}
void
CodeGenerator::visitLambdaArrow(LLambdaArrow* lir)
{
Register scopeChain = ToRegister(lir->scopeChain());
ValueOperand newTarget = ToValue(lir, LLambdaArrow::NewTargetValue);
Register output = ToRegister(lir->output());
const LambdaFunctionInfo& info = lir->mir()->info();
OutOfLineLambdaArrow* ool = new (alloc()) OutOfLineLambdaArrow(lir);
addOutOfLineCode(ool, lir->mir());
MOZ_ASSERT(!info.useSingletonForClone);
if (info.singletonType) {
// If the function has a singleton type, this instruction will only be
// executed once so we don't bother inlining it.
masm.jump(ool->entryNoPop());
masm.bind(ool->rejoin());
return;
}
// There's not enough registers on x86 with the profiler enabled to request
// a temp. Instead, spill part of one of the values, being prepared to
// restore it if necessary on the out of line path.
Register tempReg = newTarget.scratchReg();
masm.push(newTarget.scratchReg());
masm.createGCObject(output, tempReg, info.fun, gc::DefaultHeap, ool->entry());
masm.pop(newTarget.scratchReg());
emitLambdaInit(output, scopeChain, info);
// Initialize extended slots. Lexical |this| is stored in the first one.
MOZ_ASSERT(info.flags & JSFunction::EXTENDED);
static_assert(FunctionExtended::NUM_EXTENDED_SLOTS == 2, "All slots must be initialized");
static_assert(FunctionExtended::ARROW_NEWTARGET_SLOT == 0,
"|new.target| must be stored in first slot");
masm.storeValue(newTarget, Address(output, FunctionExtended::offsetOfExtendedSlot(0)));
masm.storeValue(UndefinedValue(), Address(output, FunctionExtended::offsetOfExtendedSlot(1)));
masm.bind(ool->rejoin());
}
void
CodeGenerator::emitLambdaInit(Register output, Register scopeChain,
const LambdaFunctionInfo& info)
{
// Initialize nargs and flags. We do this with a single uint32 to avoid
// 16-bit writes.
union {
struct S {
uint16_t nargs;
uint16_t flags;
} s;
uint32_t word;
} u;
u.s.nargs = info.nargs;
u.s.flags = info.flags;
MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
masm.store32(Imm32(u.word), Address(output, JSFunction::offsetOfNargs()));
masm.storePtr(ImmGCPtr(info.scriptOrLazyScript),
Address(output, JSFunction::offsetOfNativeOrScript()));
masm.storePtr(scopeChain, Address(output, JSFunction::offsetOfEnvironment()));
masm.storePtr(ImmGCPtr(info.fun->displayAtom()), Address(output, JSFunction::offsetOfAtom()));
}
void
CodeGenerator::visitOsiPoint(LOsiPoint* lir)
{
// Note: markOsiPoint ensures enough space exists between the last
// LOsiPoint and this one to patch adjacent call instructions.
MOZ_ASSERT(masm.framePushed() == frameSize());
uint32_t osiCallPointOffset = markOsiPoint(lir);
LSafepoint* safepoint = lir->associatedSafepoint();
MOZ_ASSERT(!safepoint->osiCallPointOffset());
safepoint->setOsiCallPointOffset(osiCallPointOffset);
#ifdef DEBUG
// There should be no movegroups or other instructions between
// an instruction and its OsiPoint. This is necessary because
// we use the OsiPoint's snapshot from within VM calls.
for (LInstructionReverseIterator iter(current->rbegin(lir)); iter != current->rend(); iter++) {
if (*iter == lir)
continue;
MOZ_ASSERT(!iter->isMoveGroup());
MOZ_ASSERT(iter->safepoint() == safepoint);
break;
}
#endif
#ifdef CHECK_OSIPOINT_REGISTERS
if (shouldVerifyOsiPointRegs(safepoint))
verifyOsiPointRegs(safepoint);
#endif
}
void
CodeGenerator::visitGoto(LGoto* lir)
{
jumpToBlock(lir->target());
}
// Out-of-line path to execute any move groups between the start of a loop
// header and its interrupt check, then invoke the interrupt handler.
class OutOfLineInterruptCheckImplicit : public OutOfLineCodeBase<CodeGenerator>
{
public:
LBlock* block;
LInterruptCheckImplicit* lir;
OutOfLineInterruptCheckImplicit(LBlock* block, LInterruptCheckImplicit* lir)
: block(block), lir(lir)
{ }
void accept(CodeGenerator* codegen) {
codegen->visitOutOfLineInterruptCheckImplicit(this);
}
};
typedef bool (*InterruptCheckFn)(JSContext*);
static const VMFunction InterruptCheckInfo = FunctionInfo<InterruptCheckFn>(InterruptCheck);
void
CodeGenerator::visitOutOfLineInterruptCheckImplicit(OutOfLineInterruptCheckImplicit* ool)
{
#ifdef CHECK_OSIPOINT_REGISTERS
// This is path is entered from the patched back-edge of the loop. This
// means that the JitAtivation flags used for checking the validity of the
// OSI points are not reseted by the path generated by generateBody, so we
// have to reset it here.
resetOsiPointRegs(ool->lir->safepoint());
#endif
LInstructionIterator iter = ool->block->begin();
for (; iter != ool->block->end(); iter++) {
if (iter->isMoveGroup()) {
// Replay this move group that preceds the interrupt check at the
// start of the loop header. Any incoming jumps here will be from
// the backedge and will skip over the move group emitted inline.
visitMoveGroup(iter->toMoveGroup());
} else {
break;
}
}
MOZ_ASSERT(*iter == ool->lir);
saveLive(ool->lir);
callVM(InterruptCheckInfo, ool->lir);
restoreLive(ool->lir);
masm.jump(ool->rejoin());
}
void
CodeGenerator::visitInterruptCheckImplicit(LInterruptCheckImplicit* lir)
{
OutOfLineInterruptCheckImplicit* ool = new(alloc()) OutOfLineInterruptCheckImplicit(current, lir);
addOutOfLineCode(ool, lir->mir());
lir->setOolEntry(ool->entry());
masm.bind(ool->rejoin());
}
void
CodeGenerator::visitTableSwitch(LTableSwitch* ins)
{
MTableSwitch* mir = ins->mir();
Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
const LAllocation* temp;
if (mir->getOperand(0)->type() != MIRType_Int32) {
temp = ins->tempInt()->output();
// The input is a double, so try and convert it to an integer.
// If it does not fit in an integer, take the default case.
masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp), defaultcase, false);
} else {
temp = ins->index();
}
emitTableSwitchDispatch(mir, ToRegister(temp), ToRegisterOrInvalid(ins->tempPointer()));
}
void
CodeGenerator::visitTableSwitchV(LTableSwitchV* ins)
{
MTableSwitch* mir = ins->mir();
Label* defaultcase = skipTrivialBlocks(mir->getDefault())->lir()->label();
Register index = ToRegister(ins->tempInt());
ValueOperand value = ToValue(ins, LTableSwitchV::InputValue);
Register tag = masm.extractTag(value, index);
masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase);
Label unboxInt, isInt;
masm.branchTestInt32(Assembler::Equal, tag, &unboxInt);
{
FloatRegister floatIndex = ToFloatRegister(ins->tempFloat());
masm.unboxDouble(value, floatIndex);
masm.convertDoubleToInt32(floatIndex, index, defaultcase, false);
masm.jump(&isInt);
}
masm.bind(&unboxInt);
masm.unboxInt32(value, index);
masm.bind(&isInt);
emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer()));
}
typedef JSObject* (*DeepCloneObjectLiteralFn)(JSContext*, HandleObject, NewObjectKind);
static const VMFunction DeepCloneObjectLiteralInfo =
FunctionInfo<DeepCloneObjectLiteralFn>(DeepCloneObjectLiteral);
void
CodeGenerator::visitCloneLiteral(LCloneLiteral* lir)
{
pushArg(ImmWord(TenuredObject));
pushArg(ToRegister(lir->getObjectLiteral()));
callVM(DeepCloneObjectLiteralInfo, lir);
}
void
CodeGenerator::visitParameter(LParameter* lir)
{
}
void
CodeGenerator::visitCallee(LCallee* lir)
{
Register callee = ToRegister(lir->output());
Address ptr(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfCalleeToken());
masm.loadFunctionFromCalleeToken(ptr, callee);
}
void
CodeGenerator::visitIsConstructing(LIsConstructing* lir)
{
Register output = ToRegister(lir->output());
Address calleeToken(masm.getStackPointer(), frameSize() + JitFrameLayout::offsetOfCalleeToken());
masm.loadPtr(calleeToken, output);
// We must be inside a function.
MOZ_ASSERT(current->mir()->info().script()->functionNonDelazifying());
// The low bit indicates whether this call is constructing, just clear the
// other bits.
static_assert(CalleeToken_Function == 0x0, "CalleeTokenTag value should match");
static_assert(CalleeToken_FunctionConstructing == 0x1, "CalleeTokenTag value should match");
masm.andPtr(Imm32(0x1), output);
}
void
CodeGenerator::visitStart(LStart* lir)
{
}
void
CodeGenerator::visitReturn(LReturn* lir)
{
#if defined(JS_NUNBOX32)
DebugOnly<LAllocation*> type = lir->getOperand(TYPE_INDEX);
DebugOnly<LAllocation*> payload = lir->getOperand(PAYLOAD_INDEX);
MOZ_ASSERT(ToRegister(type) == JSReturnReg_Type);
MOZ_ASSERT(ToRegister(payload) == JSReturnReg_Data);
#elif defined(JS_PUNBOX64)
DebugOnly<LAllocation*> result = lir->getOperand(0);
MOZ_ASSERT(ToRegister(result) == JSReturnReg);
#endif
// Don't emit a jump to the return label if this is the last block.
if (current->mir() != *gen->graph().poBegin())
masm.jump(&returnLabel_);
}
void
CodeGenerator::visitOsrEntry(LOsrEntry* lir)
{
Register temp = ToRegister(lir->temp());
// Remember the OSR entry offset into the code buffer.
masm.flushBuffer();
setOsrEntryOffset(masm.size());
#ifdef JS_TRACE_LOGGING
emitTracelogStopEvent(TraceLogger_Baseline);
emitTracelogStartEvent(TraceLogger_IonMonkey);
#endif
// If profiling, save the current frame pointer to a per-thread global field.
if (isProfilerInstrumentationEnabled())
masm.profilerEnterFrame(masm.getStackPointer(), temp);
// Allocate the full frame for this function
// Note we have a new entry here. So we reset MacroAssembler::framePushed()
// to 0, before reserving the stack.
MOZ_ASSERT(masm.framePushed() == frameSize());
masm.setFramePushed(0);
// Ensure that the Ion frames is properly aligned.
masm.assertStackAlignment(JitStackAlignment, 0);
masm.reserveStack(frameSize());
}
void
CodeGenerator::visitOsrScopeChain(LOsrScopeChain* lir)
{
const LAllocation* frame = lir->getOperand(0);
const LDefinition* object = lir->getDef(0);
const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfScopeChain();
masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
}
void
CodeGenerator::visitOsrArgumentsObject(LOsrArgumentsObject* lir)
{
const LAllocation* frame = lir->getOperand(0);
const LDefinition* object = lir->getDef(0);
const ptrdiff_t frameOffset = BaselineFrame::reverseOffsetOfArgsObj();
masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object));
}
void
CodeGenerator::visitOsrValue(LOsrValue* value)
{
const LAllocation* frame = value->getOperand(0);
const ValueOperand out = ToOutValue(value);
const ptrdiff_t frameOffset = value->mir()->frameOffset();
masm.loadValue(Address(ToRegister(frame), frameOffset), out);
}
void
CodeGenerator::visitOsrReturnValue(LOsrReturnValue* lir)
{
const LAllocation* frame = lir->getOperand(0);
const ValueOperand out = ToOutValue(lir);
Address flags = Address(ToRegister(frame), BaselineFrame::reverseOffsetOfFlags());
Address retval = Address(ToRegister(frame), BaselineFrame::reverseOffsetOfReturnValue());
masm.moveValue(UndefinedValue(), out);
Label done;
masm.branchTest32(Assembler::Zero, flags, Imm32(BaselineFrame::HAS_RVAL), &done);
masm.loadValue(retval, out);
masm.bind(&done);
}
void
CodeGenerator::visitStackArgT(LStackArgT* lir)
{
const LAllocation* arg = lir->getArgument();
MIRType argType = lir->type();
uint32_t argslot = lir->argslot();
MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
int32_t stack_offset = StackOffsetOfPassedArg(argslot);
Address dest(masm.getStackPointer(), stack_offset);
if (arg->isFloatReg())
masm.storeDouble(ToFloatRegister(arg), dest);
else if (arg->isRegister())
masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest);
else
masm.storeValue(*(arg->toConstant()), dest);
}
void
CodeGenerator::visitStackArgV(LStackArgV* lir)
{
ValueOperand val = ToValue(lir, 0);
uint32_t argslot = lir->argslot();
MOZ_ASSERT(argslot - 1u < graph.argumentSlotCount());
int32_t stack_offset = StackOffsetOfPassedArg(argslot);
masm.storeValue(val, Address(masm.getStackPointer(), stack_offset));
}
void
CodeGenerator::visitMoveGroup(LMoveGroup* group)
{
if (!group->numMoves())
return;
MoveResolver& resolver = masm.moveResolver();
for (size_t i = 0; i < group->numMoves(); i++) {
const LMove& move = group->getMove(i);
LAllocation from = move.from();
LAllocation to = move.to();
LDefinition::Type type = move.type();
// No bogus moves.
MOZ_ASSERT(from != to);
MOZ_ASSERT(!from.isConstant());
MoveOp::Type moveType;
switch (type) {
case LDefinition::OBJECT:
case LDefinition::SLOTS:
#ifdef JS_NUNBOX32
case LDefinition::TYPE:
case LDefinition::PAYLOAD:
#else
case LDefinition::BOX:
#endif
case LDefinition::GENERAL: moveType = MoveOp::GENERAL; break;
case LDefinition::INT32: moveType = MoveOp::INT32; break;
case LDefinition::FLOAT32: moveType = MoveOp::FLOAT32; break;
case LDefinition::DOUBLE: moveType = MoveOp::DOUBLE; break;
case LDefinition::INT32X4: moveType = MoveOp::INT32X4; break;
case LDefinition::FLOAT32X4: moveType = MoveOp::FLOAT32X4; break;
default: MOZ_CRASH("Unexpected move type");
}
masm.propagateOOM(resolver.addMove(toMoveOperand(from), toMoveOperand(to), moveType));
}
masm.propagateOOM(resolver.resolve());
MoveEmitter emitter(masm);
#ifdef JS_CODEGEN_X86
if (group->maybeScratchRegister().isGeneralReg())
emitter.setScratchRegister(group->maybeScratchRegister().toGeneralReg()->reg());
else
resolver.sortMemoryToMemoryMoves();
#endif
emitter.emit(resolver);
emitter.finish();
}
void
CodeGenerator::visitInteger(LInteger* lir)
{
masm.move32(Imm32(lir->getValue()), ToRegister(lir->output()));
}
void
CodeGenerator::visitPointer(LPointer* lir)
{
if (lir->kind() == LPointer::GC_THING)
masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output()));
else
masm.movePtr(ImmPtr(lir->ptr()), ToRegister(lir->output()));
}
void
CodeGenerator::visitKeepAliveObject(LKeepAliveObject* lir)
{
// No-op.
}
void
CodeGenerator::visitSlots(LSlots* lir)
{
Address slots(ToRegister(lir->object()), NativeObject::offsetOfSlots());
masm.loadPtr(slots, ToRegister(lir->output()));
}
void
CodeGenerator::visitLoadSlotT(LLoadSlotT* lir)
{
Register base = ToRegister(lir->slots());
int32_t offset = lir->mir()->slot() * sizeof(js::Value);
AnyRegister result = ToAnyRegister(lir->output());
masm.loadUnboxedValue(Address(base, offset), lir->mir()->type(), result);
}
void
CodeGenerator::visitLoadSlotV(LLoadSlotV* lir)
{
ValueOperand dest = ToOutValue(lir);
Register base = ToRegister(lir->input());
int32_t offset = lir->mir()->slot() * sizeof(js::Value);
masm.loadValue(Address(base, offset), dest);
}
void
CodeGenerator::visitStoreSlotT(LStoreSlotT* lir)
{
Register base = ToRegister(lir->slots());
int32_t offset = lir->mir()->slot() * sizeof(js::Value);
Address dest(base, offset);
if (lir->mir()->needsBarrier())
emitPreBarrier(dest);
MIRType valueType = lir->mir()->value()->type();
if (valueType == MIRType_ObjectOrNull) {
masm.storeObjectOrNull(ToRegister(lir->value()), dest);
} else {
ConstantOrRegister value;
if (lir->value()->isConstant())
value = ConstantOrRegister(*lir->value()->toConstant());
else
value = TypedOrValueRegister(valueType, ToAnyRegister(lir->value()));
masm.storeUnboxedValue(value, valueType, dest, lir->mir()->slotType());
}
}
void
CodeGenerator::visitStoreSlotV(LStoreSlotV* lir)
{
Register base = ToRegister(lir->slots());
int32_t offset = lir->mir()->slot() * sizeof(Value);
const ValueOperand value = ToValue(lir, LStoreSlotV::Value);
if (lir->mir()->needsBarrier())
emitPreBarrier(Address(base, offset));
masm.storeValue(value, Address(base, offset));
}
static void
GuardReceiver(MacroAssembler& masm, const ReceiverGuard& guard,
Register obj, Register scratch, Label* miss, bool checkNullExpando)
{
if (guard.group) {
masm.branchTestObjGroup(Assembler::NotEqual, obj, guard.group, miss);
Address expandoAddress(obj, UnboxedPlainObject::offsetOfExpando());
if (guard.shape) {
masm.loadPtr(expandoAddress, scratch);
masm.branchPtr(Assembler::Equal, scratch, ImmWord(0), miss);
masm.branchTestObjShape(Assembler::NotEqual, scratch, guard.shape, miss);
} else if (checkNullExpando) {
masm.branchPtr(Assembler::NotEqual, expandoAddress, ImmWord(0), miss);
}
} else {
masm.branchTestObjShape(Assembler::NotEqual, obj, guard.shape, miss);
}
}
void
CodeGenerator::emitGetPropertyPolymorphic(LInstruction* ins, Register obj, Register scratch,
const TypedOrValueRegister& output)
{
MGetPropertyPolymorphic* mir = ins->mirRaw()->toGetPropertyPolymorphic();
Label done;
for (size_t i = 0; i < mir->numReceivers(); i++) {
ReceiverGuard receiver = mir->receiver(i);
Label next;
GuardReceiver(masm, receiver, obj, scratch, &next, /* checkNullExpando = */ false);
if (receiver.shape) {
// If this is an unboxed expando access, GuardReceiver loaded the
// expando object into scratch.
Register target = receiver.group ? scratch : obj;
Shape* shape = mir->shape(i);
if (shape->slot() < shape->numFixedSlots()) {
// Fixed slot.
masm.loadTypedOrValue(Address(target, NativeObject::getFixedSlotOffset(shape->slot())),
output);
} else {
// Dynamic slot.
uint32_t offset = (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value);
masm.loadPtr(Address(target, NativeObject::offsetOfSlots()), scratch);
masm.loadTypedOrValue(Address(scratch, offset), output);
}
} else {
const UnboxedLayout::Property* property =
receiver.group->unboxedLayout().lookup(mir->name());
Address propertyAddr(obj, UnboxedPlainObject::offsetOfData() + property->offset);
masm.loadUnboxedProperty(propertyAddr, property->type, output);
}
if (i == mir->numReceivers() - 1) {
bailoutFrom(&next, ins->snapshot());
} else {
masm.jump(&done);
masm.bind(&next);
}
}
masm.bind(&done);
}
void
CodeGenerator::visitGetPropertyPolymorphicV(LGetPropertyPolymorphicV* ins)
{
Register obj = ToRegister(ins->obj());
ValueOperand output = GetValueOutput(ins);
emitGetPropertyPolymorphic(ins, obj, output.scratchReg(), output);
}
void
CodeGenerator::visitGetPropertyPolymorphicT(LGetPropertyPolymorphicT* ins)
{
Register obj = ToRegister(ins->obj());
TypedOrValueRegister output(ins->mir()->type(), ToAnyRegister(ins->output()));
Register temp = (output.type() == MIRType_Double)
? ToRegister(ins->temp())
: output.typedReg().gpr();
emitGetPropertyPolymorphic(ins, obj, temp, output);
}
template <typename T>
static void
EmitUnboxedPreBarrier(MacroAssembler &masm, T address, JSValueType type)
{
if (type == JSVAL_TYPE_OBJECT)
masm.patchableCallPreBarrier(address, MIRType_Object);
else if (type == JSVAL_TYPE_STRING)
masm.patchableCallPreBarrier(address, MIRType_String);
else
MOZ_ASSERT(!UnboxedTypeNeedsPreBarrier(type));
}
void
CodeGenerator::emitSetPropertyPolymorphic(LInstruction* ins, Register obj, Register scratch,
const ConstantOrRegister& value)
{
MSetPropertyPolymorphic* mir = ins->mirRaw()->toSetPropertyPolymorphic();
Label done;
for (size_t i = 0; i < mir->numReceivers(); i++) {
ReceiverGuard receiver = mir->receiver(i);
Label next;
GuardReceiver(masm, receiver, obj, scratch, &next, /* checkNullExpando = */ false);
if (receiver.shape) {
// If this is an unboxed expando access, GuardReceiver loaded the
// expando object into scratch.
Register target = receiver.group ? scratch : obj;
Shape* shape = mir->shape(i);
if (shape->slot() < shape->numFixedSlots()) {
// Fixed slot.
Address addr(target, NativeObject::getFixedSlotOffset(shape->slot()));
if (mir->needsBarrier())
emitPreBarrier(addr);
masm.storeConstantOrRegister(value, addr);
} else {
// Dynamic slot.
masm.loadPtr(Address(target, NativeObject::offsetOfSlots()), scratch);
Address addr(scratch, (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value));
if (mir->needsBarrier())
emitPreBarrier(addr);
masm.storeConstantOrRegister(value, addr);
}
} else {
const UnboxedLayout::Property* property =
receiver.group->unboxedLayout().lookup(mir->name());
Address propertyAddr(obj, UnboxedPlainObject::offsetOfData() + property->offset);
EmitUnboxedPreBarrier(masm, propertyAddr, property->type);
masm.storeUnboxedProperty(propertyAddr, property->type, value, nullptr);
}
if (i == mir->numReceivers() - 1) {
bailoutFrom(&next, ins->snapshot());
} else {
masm.jump(&done);
masm.bind(&next);
}
}
masm.bind(&done);
}
void
CodeGenerator::visitSetPropertyPolymorphicV(LSetPropertyPolymorphicV* ins)
{
Register obj = ToRegister(ins->obj());
Register temp = ToRegister(ins->temp());
ValueOperand value = ToValue(ins, LSetPropertyPolymorphicV::Value);
emitSetPropertyPolymorphic(ins, obj, temp, TypedOrValueRegister(value));
}
void
CodeGenerator::visitSetPropertyPolymorphicT(LSetPropertyPolymorphicT* ins)
{
Register obj = ToRegister(ins->obj());
Register temp = ToRegister(ins->temp());
ConstantOrRegister value;
if (ins->mir()->value()->isConstant())
value = ConstantOrRegister(ins->mir()->value()->toConstant()->value());
else
value = TypedOrValueRegister(ins->mir()->value()->type(), ToAnyRegister(ins->value()));
emitSetPropertyPolymorphic(ins, obj, temp, value);
}
void
CodeGenerator::visitElements(LElements* lir)
{
Address elements(ToRegister(lir->object()),
lir->mir()->unboxed() ? UnboxedArrayObject::offsetOfElements()
: NativeObject::offsetOfElements());
masm.loadPtr(elements, ToRegister(lir->output()));
}
typedef bool (*ConvertElementsToDoublesFn)(JSContext*, uintptr_t);
static const VMFunction ConvertElementsToDoublesInfo =
FunctionInfo<ConvertElementsToDoublesFn>(ObjectElements::ConvertEle