| /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
| * vim: set ts=8 sts=4 et sw=4 tw=99: |
| * This Source Code Form is subject to the terms of the Mozilla Public |
| * License, v. 2.0. If a copy of the MPL was not distributed with this |
| * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
| |
| #include "mozilla/Assertions.h" |
| #include "mozilla/Attributes.h" |
| #include "mozilla/DebugOnly.h" |
| #include "mozilla/Util.h" |
| |
| #include "PerfSpewer.h" |
| #include "CodeGenerator.h" |
| #include "IonLinker.h" |
| #include "IonSpewer.h" |
| #include "MIRGenerator.h" |
| #include "shared/CodeGenerator-shared-inl.h" |
| #include "jsnum.h" |
| #include "jsmath.h" |
| #include "ParallelFunctions.h" |
| #include "ExecutionModeInlines.h" |
| #include "builtin/Eval.h" |
| #include "gc/Nursery.h" |
| #include "vm/ForkJoin.h" |
| #include "ParallelArrayAnalysis.h" |
| |
| #include "jsscriptinlines.h" |
| |
| #include "vm/Interpreter-inl.h" |
| #include "vm/StringObject-inl.h" |
| |
| using namespace js; |
| using namespace js::jit; |
| |
| using mozilla::DebugOnly; |
| using mozilla::Maybe; |
| |
| namespace js { |
| namespace jit { |
| |
| // This out-of-line cache is used to do a double dispatch including it-self and |
| // the wrapped IonCache. |
| class OutOfLineUpdateCache : |
| public OutOfLineCodeBase<CodeGenerator>, |
| public IonCacheVisitor |
| { |
| private: |
| LInstruction *lir_; |
| size_t cacheIndex_; |
| AddCacheState state_; |
| |
| public: |
| OutOfLineUpdateCache(LInstruction *lir, size_t cacheIndex) |
| : lir_(lir), |
| cacheIndex_(cacheIndex) |
| { } |
| |
| void bind(MacroAssembler *masm) { |
| // The binding of the initial jump is done in |
| // CodeGenerator::visitOutOfLineCache. |
| } |
| |
| size_t getCacheIndex() const { |
| return cacheIndex_; |
| } |
| LInstruction *lir() const { |
| return lir_; |
| } |
| AddCacheState &state() { |
| return state_; |
| } |
| |
| bool accept(CodeGenerator *codegen) { |
| return codegen->visitOutOfLineCache(this); |
| } |
| |
| // ICs' visit functions delegating the work to the CodeGen visit funtions. |
| #define VISIT_CACHE_FUNCTION(op) \ |
| bool visit##op##IC(CodeGenerator *codegen, op##IC *ic) { \ |
| return codegen->visit##op##IC(this, ic); \ |
| } |
| |
| IONCACHE_KIND_LIST(VISIT_CACHE_FUNCTION) |
| #undef VISIT_CACHE_FUNCTION |
| }; |
| |
| // This function is declared here because it needs to instantiate an |
| // OutOfLineUpdateCache, but we want to keep it visible inside the |
| // CodeGeneratorShared such as we can specialize inline caches in function of |
| // the architecture. |
| bool |
| CodeGeneratorShared::addCache(LInstruction *lir, size_t cacheIndex) |
| { |
| IonCache *cache = static_cast<IonCache *>(getCache(cacheIndex)); |
| MInstruction *mir = lir->mirRaw()->toInstruction(); |
| if (mir->resumePoint()) |
| cache->setScriptedLocation(mir->block()->info().script(), |
| mir->resumePoint()->pc()); |
| else |
| cache->setIdempotent(); |
| |
| OutOfLineUpdateCache *ool = new OutOfLineUpdateCache(lir, cacheIndex); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| |
| // OOL-specific state depends on the type of cache. |
| cache->initializeAddCacheState(lir, &ool->state()); |
| |
| cache->emitInitialJump(masm, ool->state()); |
| masm.bind(ool->rejoin()); |
| |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitOutOfLineCache(OutOfLineUpdateCache *ool) |
| { |
| size_t cacheIndex = ool->getCacheIndex(); |
| IonCache *cache = static_cast<IonCache *>(getCache(cacheIndex)); |
| |
| // Register the location of the OOL path in the IC. |
| cache->setFallbackLabel(masm.labelForPatch()); |
| cache->bindInitialJump(masm, ool->state()); |
| |
| // Dispatch to ICs' accept functions. |
| return cache->accept(this, ool); |
| } |
| |
| StringObject * |
| MNewStringObject::templateObj() const { |
| return &templateObj_->as<StringObject>(); |
| } |
| |
| CodeGenerator::CodeGenerator(MIRGenerator *gen, LIRGraph *graph, MacroAssembler *masm) |
| : CodeGeneratorSpecific(gen, graph, masm), |
| unassociatedScriptCounts_(NULL) |
| { |
| } |
| |
| CodeGenerator::~CodeGenerator() |
| { |
| js_delete(unassociatedScriptCounts_); |
| } |
| |
| bool |
| CodeGenerator::visitValueToInt32(LValueToInt32 *lir) |
| { |
| ValueOperand operand = ToValue(lir, LValueToInt32::Input); |
| Register output = ToRegister(lir->output()); |
| |
| Register tag = masm.splitTagForTest(operand); |
| |
| Label done, simple, isInt32, isBool, notDouble; |
| // Type-check switch. |
| masm.branchTestInt32(Assembler::Equal, tag, &isInt32); |
| masm.branchTestBoolean(Assembler::Equal, tag, &isBool); |
| masm.branchTestDouble(Assembler::NotEqual, tag, ¬Double); |
| |
| // If the value is a double, see if it fits in a 32-bit int. We need to ask |
| // the platform-specific codegenerator to do this. |
| FloatRegister temp = ToFloatRegister(lir->tempFloat()); |
| masm.unboxDouble(operand, temp); |
| |
| Label fails; |
| switch (lir->mode()) { |
| case LValueToInt32::TRUNCATE: |
| if (!emitTruncateDouble(temp, output)) |
| return false; |
| break; |
| default: |
| JS_ASSERT(lir->mode() == LValueToInt32::NORMAL); |
| masm.convertDoubleToInt32(temp, output, &fails, lir->mir()->canBeNegativeZero()); |
| break; |
| } |
| masm.jump(&done); |
| |
| masm.bind(¬Double); |
| |
| if (lir->mode() == LValueToInt32::NORMAL) { |
| // If the value is not null, it's a string, object, or undefined, |
| // which we can't handle here. |
| masm.branchTestNull(Assembler::NotEqual, tag, &fails); |
| } else { |
| // Test for string or object - then fallthrough to null, which will |
| // also handle undefined. |
| masm.branchTestObject(Assembler::Equal, tag, &fails); |
| masm.branchTestString(Assembler::Equal, tag, &fails); |
| } |
| |
| if (fails.used() && !bailoutFrom(&fails, lir->snapshot())) |
| return false; |
| |
| // The value is null - just emit 0. |
| masm.mov(Imm32(0), output); |
| masm.jump(&done); |
| |
| // Just unbox a bool, the result is 0 or 1. |
| masm.bind(&isBool); |
| masm.unboxBoolean(operand, output); |
| masm.jump(&done); |
| |
| // Integers can be unboxed. |
| masm.bind(&isInt32); |
| masm.unboxInt32(operand, output); |
| |
| masm.bind(&done); |
| |
| return true; |
| } |
| |
| static const double DoubleZero = 0.0; |
| |
| bool |
| CodeGenerator::visitValueToDouble(LValueToDouble *lir) |
| { |
| MToDouble *mir = lir->mir(); |
| ValueOperand operand = ToValue(lir, LValueToDouble::Input); |
| FloatRegister output = ToFloatRegister(lir->output()); |
| |
| Register tag = masm.splitTagForTest(operand); |
| |
| Label isDouble, isInt32, isBool, isNull, isUndefined, done; |
| bool hasBoolean = false, hasNull = false, hasUndefined = false; |
| |
| masm.branchTestDouble(Assembler::Equal, tag, &isDouble); |
| masm.branchTestInt32(Assembler::Equal, tag, &isInt32); |
| |
| if (mir->conversion() != MToDouble::NumbersOnly) { |
| masm.branchTestBoolean(Assembler::Equal, tag, &isBool); |
| masm.branchTestUndefined(Assembler::Equal, tag, &isUndefined); |
| hasBoolean = true; |
| hasUndefined = true; |
| if (mir->conversion() != MToDouble::NonNullNonStringPrimitives) { |
| masm.branchTestNull(Assembler::Equal, tag, &isNull); |
| hasNull = true; |
| } |
| } |
| |
| if (!bailout(lir->snapshot())) |
| return false; |
| |
| if (hasNull) { |
| masm.bind(&isNull); |
| masm.loadStaticDouble(&DoubleZero, output); |
| masm.jump(&done); |
| } |
| |
| if (hasUndefined) { |
| masm.bind(&isUndefined); |
| masm.loadStaticDouble(&js_NaN, output); |
| masm.jump(&done); |
| } |
| |
| if (hasBoolean) { |
| masm.bind(&isBool); |
| masm.boolValueToDouble(operand, output); |
| masm.jump(&done); |
| } |
| |
| masm.bind(&isInt32); |
| masm.int32ValueToDouble(operand, output); |
| masm.jump(&done); |
| |
| masm.bind(&isDouble); |
| masm.unboxDouble(operand, output); |
| masm.bind(&done); |
| |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitInt32ToDouble(LInt32ToDouble *lir) |
| { |
| masm.convertInt32ToDouble(ToRegister(lir->input()), ToFloatRegister(lir->output())); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitDoubleToInt32(LDoubleToInt32 *lir) |
| { |
| Label fail; |
| FloatRegister input = ToFloatRegister(lir->input()); |
| Register output = ToRegister(lir->output()); |
| masm.convertDoubleToInt32(input, output, &fail, lir->mir()->canBeNegativeZero()); |
| if (!bailoutFrom(&fail, lir->snapshot())) |
| return false; |
| return true; |
| } |
| |
| void |
| CodeGenerator::emitOOLTestObject(Register objreg, Label *ifTruthy, Label *ifFalsy, Register scratch) |
| { |
| saveVolatile(scratch); |
| masm.setupUnalignedABICall(1, scratch); |
| masm.passABIArg(objreg); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ObjectEmulatesUndefined)); |
| masm.storeCallResult(scratch); |
| restoreVolatile(scratch); |
| |
| masm.branchTest32(Assembler::NonZero, scratch, scratch, ifFalsy); |
| masm.jump(ifTruthy); |
| } |
| |
| // Base out-of-line code generator for all tests of the truthiness of an |
| // object, where the object might not be truthy. (Recall that per spec all |
| // objects are truthy, but we implement the JSCLASS_EMULATES_UNDEFINED class |
| // flag to permit objects to look like |undefined| in certain contexts, |
| // including in object truthiness testing.) We check truthiness inline except |
| // when we're testing it on a proxy (or if TI guarantees us that the specified |
| // object will never emulate |undefined|), in which case out-of-line code will |
| // call EmulatesUndefined for a conclusive answer. |
| class OutOfLineTestObject : public OutOfLineCodeBase<CodeGenerator> |
| { |
| Register objreg_; |
| Register scratch_; |
| |
| Label *ifTruthy_; |
| Label *ifFalsy_; |
| |
| #ifdef DEBUG |
| bool initialized() { return ifTruthy_ != NULL; } |
| #endif |
| |
| public: |
| OutOfLineTestObject() |
| #ifdef DEBUG |
| : ifTruthy_(NULL), ifFalsy_(NULL) |
| #endif |
| { } |
| |
| bool accept(CodeGenerator *codegen) MOZ_FINAL MOZ_OVERRIDE { |
| MOZ_ASSERT(initialized()); |
| codegen->emitOOLTestObject(objreg_, ifTruthy_, ifFalsy_, scratch_); |
| return true; |
| } |
| |
| // Specify the register where the object to be tested is found, labels to |
| // jump to if the object is truthy or falsy, and a scratch register for |
| // use in the out-of-line path. |
| void setInputAndTargets(Register objreg, Label *ifTruthy, Label *ifFalsy, Register scratch) { |
| MOZ_ASSERT(!initialized()); |
| MOZ_ASSERT(ifTruthy); |
| objreg_ = objreg; |
| scratch_ = scratch; |
| ifTruthy_ = ifTruthy; |
| ifFalsy_ = ifFalsy; |
| } |
| }; |
| |
| // A subclass of OutOfLineTestObject containing two extra labels, for use when |
| // the ifTruthy/ifFalsy labels are needed in inline code as well as out-of-line |
| // code. The user should bind these labels in inline code, and specify them as |
| // targets via setInputAndTargets, as appropriate. |
| class OutOfLineTestObjectWithLabels : public OutOfLineTestObject |
| { |
| Label label1_; |
| Label label2_; |
| |
| public: |
| OutOfLineTestObjectWithLabels() { } |
| |
| Label *label1() { return &label1_; } |
| Label *label2() { return &label2_; } |
| }; |
| |
| void |
| CodeGenerator::testObjectTruthy(Register objreg, Label *ifTruthy, Label *ifFalsy, Register scratch, |
| OutOfLineTestObject *ool) |
| { |
| ool->setInputAndTargets(objreg, ifTruthy, ifFalsy, scratch); |
| |
| // Perform a fast-path check of the object's class flags if the object's |
| // not a proxy. Let out-of-line code handle the slow cases that require |
| // saving registers, making a function call, and restoring registers. |
| Assembler::Condition cond = masm.branchTestObjectTruthy(true, objreg, scratch, ool->entry()); |
| masm.j(cond, ifTruthy); |
| masm.jump(ifFalsy); |
| } |
| |
| void |
| CodeGenerator::testValueTruthy(const ValueOperand &value, |
| const LDefinition *scratch1, const LDefinition *scratch2, |
| FloatRegister fr, |
| Label *ifTruthy, Label *ifFalsy, |
| OutOfLineTestObject *ool) |
| { |
| Register tag = masm.splitTagForTest(value); |
| Assembler::Condition cond; |
| |
| // Eventually we will want some sort of type filter here. For now, just |
| // emit all easy cases. For speed we use the cached tag for all comparison, |
| // except for doubles, which we test last (as the operation can clobber the |
| // tag, which may be in ScratchReg). |
| masm.branchTestUndefined(Assembler::Equal, tag, ifFalsy); |
| masm.branchTestNull(Assembler::Equal, tag, ifFalsy); |
| |
| Label notBoolean; |
| masm.branchTestBoolean(Assembler::NotEqual, tag, ¬Boolean); |
| masm.branchTestBooleanTruthy(false, value, ifFalsy); |
| masm.jump(ifTruthy); |
| masm.bind(¬Boolean); |
| |
| Label notInt32; |
| masm.branchTestInt32(Assembler::NotEqual, tag, ¬Int32); |
| cond = masm.testInt32Truthy(false, value); |
| masm.j(cond, ifFalsy); |
| masm.jump(ifTruthy); |
| masm.bind(¬Int32); |
| |
| if (ool) { |
| Label notObject; |
| |
| masm.branchTestObject(Assembler::NotEqual, tag, ¬Object); |
| |
| Register objreg = masm.extractObject(value, ToRegister(scratch1)); |
| testObjectTruthy(objreg, ifTruthy, ifFalsy, ToRegister(scratch2), ool); |
| |
| masm.bind(¬Object); |
| } else { |
| masm.branchTestObject(Assembler::Equal, tag, ifTruthy); |
| } |
| |
| // Test if a string is non-empty. |
| Label notString; |
| masm.branchTestString(Assembler::NotEqual, tag, ¬String); |
| cond = masm.testStringTruthy(false, value); |
| masm.j(cond, ifFalsy); |
| masm.jump(ifTruthy); |
| masm.bind(¬String); |
| |
| // If we reach here the value is a double. |
| masm.unboxDouble(value, fr); |
| cond = masm.testDoubleTruthy(false, fr); |
| masm.j(cond, ifFalsy); |
| masm.jump(ifTruthy); |
| } |
| |
| bool |
| CodeGenerator::visitTestOAndBranch(LTestOAndBranch *lir) |
| { |
| MOZ_ASSERT(lir->mir()->operandMightEmulateUndefined(), |
| "Objects which can't emulate undefined should have been constant-folded"); |
| |
| OutOfLineTestObject *ool = new OutOfLineTestObject(); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| |
| testObjectTruthy(ToRegister(lir->input()), lir->ifTruthy(), lir->ifFalsy(), |
| ToRegister(lir->temp()), ool); |
| return true; |
| |
| } |
| |
| bool |
| CodeGenerator::visitTestVAndBranch(LTestVAndBranch *lir) |
| { |
| OutOfLineTestObject *ool = NULL; |
| if (lir->mir()->operandMightEmulateUndefined()) { |
| ool = new OutOfLineTestObject(); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| } |
| |
| testValueTruthy(ToValue(lir, LTestVAndBranch::Input), |
| lir->temp1(), lir->temp2(), |
| ToFloatRegister(lir->tempFloat()), |
| lir->ifTruthy(), lir->ifFalsy(), ool); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitFunctionDispatch(LFunctionDispatch *lir) |
| { |
| MFunctionDispatch *mir = lir->mir(); |
| Register input = ToRegister(lir->input()); |
| Label *lastLabel; |
| size_t casesWithFallback; |
| |
| // Determine if the last case is fallback or an ordinary case. |
| if (!mir->hasFallback()) { |
| JS_ASSERT(mir->numCases() > 0); |
| casesWithFallback = mir->numCases(); |
| lastLabel = mir->getCaseBlock(mir->numCases() - 1)->lir()->label(); |
| } else { |
| casesWithFallback = mir->numCases() + 1; |
| lastLabel = mir->getFallback()->lir()->label(); |
| } |
| |
| // Compare function pointers, except for the last case. |
| for (size_t i = 0; i < casesWithFallback - 1; i++) { |
| JS_ASSERT(i < mir->numCases()); |
| JSFunction *func = mir->getCase(i); |
| LBlock *target = mir->getCaseBlock(i)->lir(); |
| masm.branchPtr(Assembler::Equal, input, ImmGCPtr(func), target->label()); |
| } |
| |
| // Jump to the last case. |
| masm.jump(lastLabel); |
| |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitTypeObjectDispatch(LTypeObjectDispatch *lir) |
| { |
| MTypeObjectDispatch *mir = lir->mir(); |
| Register input = ToRegister(lir->input()); |
| Register temp = ToRegister(lir->temp()); |
| |
| // Hold the incoming TypeObject. |
| masm.loadPtr(Address(input, JSObject::offsetOfType()), temp); |
| |
| // Compare TypeObjects. |
| InlinePropertyTable *propTable = mir->propTable(); |
| for (size_t i = 0; i < mir->numCases(); i++) { |
| JSFunction *func = mir->getCase(i); |
| LBlock *target = mir->getCaseBlock(i)->lir(); |
| |
| DebugOnly<bool> found = false; |
| for (size_t j = 0; j < propTable->numEntries(); j++) { |
| if (propTable->getFunction(j) != func) |
| continue; |
| types::TypeObject *typeObj = propTable->getTypeObject(j); |
| masm.branchPtr(Assembler::Equal, temp, ImmGCPtr(typeObj), target->label()); |
| found = true; |
| } |
| JS_ASSERT(found); |
| } |
| |
| // Unknown function: jump to fallback block. |
| LBlock *fallback = mir->getFallback()->lir(); |
| masm.jump(fallback->label()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitPolyInlineDispatch(LPolyInlineDispatch *lir) |
| { |
| MPolyInlineDispatch *mir = lir->mir(); |
| Register inputReg = ToRegister(lir->input()); |
| |
| InlinePropertyTable *inlinePropTable = mir->propTable(); |
| if (inlinePropTable) { |
| // Temporary register is only assigned in the TypeObject case. |
| Register tempReg = ToRegister(lir->temp()); |
| masm.loadPtr(Address(inputReg, JSObject::offsetOfType()), tempReg); |
| |
| // Detect functions by TypeObject. |
| for (size_t i = 0; i < inlinePropTable->numEntries(); i++) { |
| types::TypeObject *typeObj = inlinePropTable->getTypeObject(i); |
| JSFunction *func = inlinePropTable->getFunction(i); |
| LBlock *target = mir->getFunctionBlock(func)->lir(); |
| masm.branchPtr(Assembler::Equal, tempReg, ImmGCPtr(typeObj), target->label()); |
| } |
| |
| // Unknown function: jump to fallback block. |
| LBlock *fallback = mir->fallbackPrepBlock()->lir(); |
| masm.jump(fallback->label()); |
| return true; |
| } |
| |
| // Compare function pointers directly. |
| for (size_t i = 0; i < mir->numCallees() - 1; i++) { |
| JSFunction *func = mir->getFunction(i); |
| LBlock *target = mir->getFunctionBlock(i)->lir(); |
| masm.branchPtr(Assembler::Equal, inputReg, ImmGCPtr(func), target->label()); |
| } |
| |
| // There's no fallback case, so a final guard isn't necessary. |
| LBlock *target = mir->getFunctionBlock(mir->numCallees() - 1)->lir(); |
| masm.jump(target->label()); |
| return true; |
| } |
| |
| typedef JSFlatString *(*IntToStringFn)(JSContext *, int); |
| static const VMFunction IntToStringInfo = |
| FunctionInfo<IntToStringFn>(Int32ToString<CanGC>); |
| |
| bool |
| CodeGenerator::visitIntToString(LIntToString *lir) |
| { |
| Register input = ToRegister(lir->input()); |
| Register output = ToRegister(lir->output()); |
| |
| OutOfLineCode *ool = oolCallVM(IntToStringInfo, lir, (ArgList(), input), |
| StoreRegisterTo(output)); |
| if (!ool) |
| return false; |
| |
| masm.branch32(Assembler::AboveOrEqual, input, Imm32(StaticStrings::INT_STATIC_LIMIT), |
| ool->entry()); |
| |
| masm.movePtr(ImmWord(&gen->compartment->rt->staticStrings.intStaticTable), output); |
| masm.loadPtr(BaseIndex(output, input, ScalePointer), output); |
| |
| masm.bind(ool->rejoin()); |
| return true; |
| } |
| |
| typedef JSObject *(*CloneRegExpObjectFn)(JSContext *, JSObject *, JSObject *); |
| static const VMFunction CloneRegExpObjectInfo = |
| FunctionInfo<CloneRegExpObjectFn>(CloneRegExpObject); |
| |
| bool |
| CodeGenerator::visitRegExp(LRegExp *lir) |
| { |
| JSObject *proto = lir->mir()->getRegExpPrototype(); |
| |
| pushArg(ImmGCPtr(proto)); |
| pushArg(ImmGCPtr(lir->mir()->source())); |
| return callVM(CloneRegExpObjectInfo, lir); |
| } |
| |
| typedef bool (*RegExpTestRawFn)(JSContext *cx, HandleObject regexp, |
| HandleString input, JSBool *result); |
| static const VMFunction RegExpTestRawInfo = FunctionInfo<RegExpTestRawFn>(regexp_test_raw); |
| |
| bool |
| CodeGenerator::visitRegExpTest(LRegExpTest *lir) |
| { |
| pushArg(ToRegister(lir->string())); |
| pushArg(ToRegister(lir->regexp())); |
| return callVM(RegExpTestRawInfo, lir); |
| } |
| |
| typedef JSObject *(*LambdaFn)(JSContext *, HandleFunction, HandleObject); |
| static const VMFunction LambdaInfo = |
| FunctionInfo<LambdaFn>(js::Lambda); |
| |
| bool |
| CodeGenerator::visitLambdaForSingleton(LLambdaForSingleton *lir) |
| { |
| pushArg(ToRegister(lir->scopeChain())); |
| pushArg(ImmGCPtr(lir->mir()->fun())); |
| return callVM(LambdaInfo, lir); |
| } |
| |
| bool |
| CodeGenerator::visitLambda(LLambda *lir) |
| { |
| Register scopeChain = ToRegister(lir->scopeChain()); |
| Register output = ToRegister(lir->output()); |
| JSFunction *fun = lir->mir()->fun(); |
| |
| OutOfLineCode *ool = oolCallVM(LambdaInfo, lir, (ArgList(), ImmGCPtr(fun), scopeChain), |
| StoreRegisterTo(output)); |
| if (!ool) |
| return false; |
| |
| JS_ASSERT(gen->compartment == fun->compartment()); |
| JS_ASSERT(!fun->hasSingletonType()); |
| |
| masm.newGCThing(output, fun, ool->entry()); |
| masm.initGCThing(output, fun); |
| |
| emitLambdaInit(output, scopeChain, fun); |
| |
| masm.bind(ool->rejoin()); |
| return true; |
| } |
| |
| void |
| CodeGenerator::emitLambdaInit(const Register &output, |
| const Register &scopeChain, |
| JSFunction *fun) |
| { |
| // Initialize nargs and flags. We do this with a single uint32 to avoid |
| // 16-bit writes. |
| union { |
| struct S { |
| uint16_t nargs; |
| uint16_t flags; |
| } s; |
| uint32_t word; |
| } u; |
| u.s.nargs = fun->nargs; |
| u.s.flags = fun->flags & ~JSFunction::EXTENDED; |
| |
| JS_STATIC_ASSERT(offsetof(JSFunction, flags) == offsetof(JSFunction, nargs) + 2); |
| masm.store32(Imm32(u.word), Address(output, offsetof(JSFunction, nargs))); |
| masm.storePtr(ImmGCPtr(fun->nonLazyScript()), |
| Address(output, JSFunction::offsetOfNativeOrScript())); |
| masm.storePtr(scopeChain, Address(output, JSFunction::offsetOfEnvironment())); |
| masm.storePtr(ImmGCPtr(fun->displayAtom()), Address(output, JSFunction::offsetOfAtom())); |
| } |
| |
| bool |
| CodeGenerator::visitParLambda(LParLambda *lir) |
| { |
| Register resultReg = ToRegister(lir->output()); |
| Register parSliceReg = ToRegister(lir->parSlice()); |
| Register scopeChainReg = ToRegister(lir->scopeChain()); |
| Register tempReg1 = ToRegister(lir->getTemp0()); |
| Register tempReg2 = ToRegister(lir->getTemp1()); |
| JSFunction *fun = lir->mir()->fun(); |
| |
| JS_ASSERT(scopeChainReg != resultReg); |
| |
| emitParAllocateGCThing(lir, resultReg, parSliceReg, tempReg1, tempReg2, fun); |
| emitLambdaInit(resultReg, scopeChainReg, fun); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitLabel(LLabel *lir) |
| { |
| masm.bind(lir->label()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitNop(LNop *lir) |
| { |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitMop(LMop *lir) |
| { |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitOsiPoint(LOsiPoint *lir) |
| { |
| // Note: markOsiPoint ensures enough space exists between the last |
| // LOsiPoint and this one to patch adjacent call instructions. |
| |
| JS_ASSERT(masm.framePushed() == frameSize()); |
| |
| uint32_t osiCallPointOffset; |
| if (!markOsiPoint(lir, &osiCallPointOffset)) |
| return false; |
| |
| LSafepoint *safepoint = lir->associatedSafepoint(); |
| JS_ASSERT(!safepoint->osiCallPointOffset()); |
| safepoint->setOsiCallPointOffset(osiCallPointOffset); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitGoto(LGoto *lir) |
| { |
| LBlock *target = lir->target()->lir(); |
| |
| // No jump necessary if we can fall through to the next block. |
| if (isNextBlock(target)) |
| return true; |
| |
| masm.jump(target->label()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitTableSwitch(LTableSwitch *ins) |
| { |
| MTableSwitch *mir = ins->mir(); |
| Label *defaultcase = mir->getDefault()->lir()->label(); |
| const LAllocation *temp; |
| |
| if (ins->index()->isDouble()) { |
| temp = ins->tempInt(); |
| |
| // The input is a double, so try and convert it to an integer. |
| // If it does not fit in an integer, take the default case. |
| masm.convertDoubleToInt32(ToFloatRegister(ins->index()), ToRegister(temp), defaultcase, false); |
| } else { |
| temp = ins->index(); |
| } |
| |
| return emitTableSwitchDispatch(mir, ToRegister(temp), ToRegisterOrInvalid(ins->tempPointer())); |
| } |
| |
| bool |
| CodeGenerator::visitTableSwitchV(LTableSwitchV *ins) |
| { |
| MTableSwitch *mir = ins->mir(); |
| Label *defaultcase = mir->getDefault()->lir()->label(); |
| |
| Register index = ToRegister(ins->tempInt()); |
| ValueOperand value = ToValue(ins, LTableSwitchV::InputValue); |
| Register tag = masm.extractTag(value, index); |
| masm.branchTestNumber(Assembler::NotEqual, tag, defaultcase); |
| |
| Label unboxInt, isInt; |
| masm.branchTestInt32(Assembler::Equal, tag, &unboxInt); |
| { |
| FloatRegister floatIndex = ToFloatRegister(ins->tempFloat()); |
| masm.unboxDouble(value, floatIndex); |
| masm.convertDoubleToInt32(floatIndex, index, defaultcase, false); |
| masm.jump(&isInt); |
| } |
| |
| masm.bind(&unboxInt); |
| masm.unboxInt32(value, index); |
| |
| masm.bind(&isInt); |
| |
| return emitTableSwitchDispatch(mir, index, ToRegisterOrInvalid(ins->tempPointer())); |
| } |
| |
| bool |
| CodeGenerator::visitParameter(LParameter *lir) |
| { |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitCallee(LCallee *lir) |
| { |
| // read number of actual arguments from the JS frame. |
| Register callee = ToRegister(lir->output()); |
| Address ptr(StackPointer, frameSize() + IonJSFrameLayout::offsetOfCalleeToken()); |
| |
| masm.loadPtr(ptr, callee); |
| masm.clearCalleeTag(callee, gen->info().executionMode()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitStart(LStart *lir) |
| { |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitReturn(LReturn *lir) |
| { |
| #if defined(JS_NUNBOX32) |
| DebugOnly<LAllocation *> type = lir->getOperand(TYPE_INDEX); |
| DebugOnly<LAllocation *> payload = lir->getOperand(PAYLOAD_INDEX); |
| JS_ASSERT(ToRegister(type) == JSReturnReg_Type); |
| JS_ASSERT(ToRegister(payload) == JSReturnReg_Data); |
| #elif defined(JS_PUNBOX64) |
| DebugOnly<LAllocation *> result = lir->getOperand(0); |
| JS_ASSERT(ToRegister(result) == JSReturnReg); |
| #endif |
| // Don't emit a jump to the return label if this is the last block. |
| if (current->mir() != *gen->graph().poBegin()) |
| masm.jump(returnLabel_); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitOsrEntry(LOsrEntry *lir) |
| { |
| // Remember the OSR entry offset into the code buffer. |
| masm.flushBuffer(); |
| setOsrEntryOffset(masm.size()); |
| |
| // Allocate the full frame for this function. |
| uint32_t size = frameSize(); |
| if (size != 0) |
| masm.subPtr(Imm32(size), StackPointer); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitOsrScopeChain(LOsrScopeChain *lir) |
| { |
| const LAllocation *frame = lir->getOperand(0); |
| const LDefinition *object = lir->getDef(0); |
| |
| const ptrdiff_t frameOffset = StackFrame::offsetOfScopeChain(); |
| |
| masm.loadPtr(Address(ToRegister(frame), frameOffset), ToRegister(object)); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitStackArgT(LStackArgT *lir) |
| { |
| const LAllocation *arg = lir->getArgument(); |
| MIRType argType = lir->mir()->getArgument()->type(); |
| uint32_t argslot = lir->argslot(); |
| |
| int32_t stack_offset = StackOffsetOfPassedArg(argslot); |
| Address dest(StackPointer, stack_offset); |
| |
| if (arg->isFloatReg()) |
| masm.storeDouble(ToFloatRegister(arg), dest); |
| else if (arg->isRegister()) |
| masm.storeValue(ValueTypeFromMIRType(argType), ToRegister(arg), dest); |
| else |
| masm.storeValue(*(arg->toConstant()), dest); |
| |
| return pushedArgumentSlots_.append(StackOffsetToSlot(stack_offset)); |
| } |
| |
| bool |
| CodeGenerator::visitStackArgV(LStackArgV *lir) |
| { |
| ValueOperand val = ToValue(lir, 0); |
| uint32_t argslot = lir->argslot(); |
| int32_t stack_offset = StackOffsetOfPassedArg(argslot); |
| |
| masm.storeValue(val, Address(StackPointer, stack_offset)); |
| return pushedArgumentSlots_.append(StackOffsetToSlot(stack_offset)); |
| } |
| |
| bool |
| CodeGenerator::visitInteger(LInteger *lir) |
| { |
| masm.move32(Imm32(lir->getValue()), ToRegister(lir->output())); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitPointer(LPointer *lir) |
| { |
| if (lir->kind() == LPointer::GC_THING) |
| masm.movePtr(ImmGCPtr(lir->gcptr()), ToRegister(lir->output())); |
| else |
| masm.movePtr(ImmWord(lir->ptr()), ToRegister(lir->output())); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitSlots(LSlots *lir) |
| { |
| Address slots(ToRegister(lir->object()), JSObject::offsetOfSlots()); |
| masm.loadPtr(slots, ToRegister(lir->output())); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitStoreSlotV(LStoreSlotV *store) |
| { |
| Register base = ToRegister(store->slots()); |
| int32_t offset = store->mir()->slot() * sizeof(Value); |
| |
| const ValueOperand value = ToValue(store, LStoreSlotV::Value); |
| |
| if (store->mir()->needsBarrier()) |
| emitPreBarrier(Address(base, offset), MIRType_Value); |
| |
| masm.storeValue(value, Address(base, offset)); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::emitGetPropertyPolymorphic(LInstruction *ins, Register obj, Register scratch, |
| const TypedOrValueRegister &output) |
| { |
| MGetPropertyPolymorphic *mir = ins->mirRaw()->toGetPropertyPolymorphic(); |
| JS_ASSERT(mir->numShapes() > 1); |
| |
| masm.loadObjShape(obj, scratch); |
| |
| Label done; |
| for (size_t i = 0; i < mir->numShapes(); i++) { |
| Label next; |
| masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(mir->objShape(i)), &next); |
| |
| Shape *shape = mir->shape(i); |
| if (shape->slot() < shape->numFixedSlots()) { |
| // Fixed slot. |
| masm.loadTypedOrValue(Address(obj, JSObject::getFixedSlotOffset(shape->slot())), |
| output); |
| } else { |
| // Dynamic slot. |
| uint32_t offset = (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value); |
| masm.loadPtr(Address(obj, JSObject::offsetOfSlots()), scratch); |
| masm.loadTypedOrValue(Address(scratch, offset), output); |
| } |
| |
| masm.jump(&done); |
| masm.bind(&next); |
| } |
| |
| // Bailout if no shape matches. |
| if (!bailout(ins->snapshot())) |
| return false; |
| |
| masm.bind(&done); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitGetPropertyPolymorphicV(LGetPropertyPolymorphicV *ins) |
| { |
| Register obj = ToRegister(ins->obj()); |
| ValueOperand output = GetValueOutput(ins); |
| return emitGetPropertyPolymorphic(ins, obj, output.scratchReg(), output); |
| } |
| |
| bool |
| CodeGenerator::visitGetPropertyPolymorphicT(LGetPropertyPolymorphicT *ins) |
| { |
| Register obj = ToRegister(ins->obj()); |
| TypedOrValueRegister output(ins->mir()->type(), ToAnyRegister(ins->output())); |
| Register temp = (output.type() == MIRType_Double) |
| ? ToRegister(ins->temp()) |
| : output.typedReg().gpr(); |
| return emitGetPropertyPolymorphic(ins, obj, temp, output); |
| } |
| |
| bool |
| CodeGenerator::emitSetPropertyPolymorphic(LInstruction *ins, Register obj, Register scratch, |
| const ConstantOrRegister &value) |
| { |
| MSetPropertyPolymorphic *mir = ins->mirRaw()->toSetPropertyPolymorphic(); |
| JS_ASSERT(mir->numShapes() > 1); |
| |
| masm.loadObjShape(obj, scratch); |
| |
| Label done; |
| for (size_t i = 0; i < mir->numShapes(); i++) { |
| Label next; |
| masm.branchPtr(Assembler::NotEqual, scratch, ImmGCPtr(mir->objShape(i)), &next); |
| |
| Shape *shape = mir->shape(i); |
| if (shape->slot() < shape->numFixedSlots()) { |
| // Fixed slot. |
| Address addr(obj, JSObject::getFixedSlotOffset(shape->slot())); |
| if (mir->needsBarrier()) |
| emitPreBarrier(addr, MIRType_Value); |
| masm.storeConstantOrRegister(value, addr); |
| } else { |
| // Dynamic slot. |
| masm.loadPtr(Address(obj, JSObject::offsetOfSlots()), scratch); |
| Address addr(scratch, (shape->slot() - shape->numFixedSlots()) * sizeof(js::Value)); |
| if (mir->needsBarrier()) |
| emitPreBarrier(addr, MIRType_Value); |
| masm.storeConstantOrRegister(value, addr); |
| } |
| |
| masm.jump(&done); |
| masm.bind(&next); |
| } |
| |
| // Bailout if no shape matches. |
| if (!bailout(ins->snapshot())) |
| return false; |
| |
| masm.bind(&done); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitSetPropertyPolymorphicV(LSetPropertyPolymorphicV *ins) |
| { |
| Register obj = ToRegister(ins->obj()); |
| Register temp = ToRegister(ins->temp()); |
| ValueOperand value = ToValue(ins, LSetPropertyPolymorphicV::Value); |
| return emitSetPropertyPolymorphic(ins, obj, temp, TypedOrValueRegister(value)); |
| } |
| |
| bool |
| CodeGenerator::visitSetPropertyPolymorphicT(LSetPropertyPolymorphicT *ins) |
| { |
| Register obj = ToRegister(ins->obj()); |
| Register temp = ToRegister(ins->temp()); |
| |
| ConstantOrRegister value; |
| if (ins->mir()->value()->isConstant()) |
| value = ConstantOrRegister(ins->mir()->value()->toConstant()->value()); |
| else |
| value = TypedOrValueRegister(ins->mir()->value()->type(), ToAnyRegister(ins->value())); |
| |
| return emitSetPropertyPolymorphic(ins, obj, temp, value); |
| } |
| |
| bool |
| CodeGenerator::visitElements(LElements *lir) |
| { |
| Address elements(ToRegister(lir->object()), JSObject::offsetOfElements()); |
| masm.loadPtr(elements, ToRegister(lir->output())); |
| return true; |
| } |
| |
| typedef bool (*ConvertElementsToDoublesFn)(JSContext *, uintptr_t); |
| static const VMFunction ConvertElementsToDoublesInfo = |
| FunctionInfo<ConvertElementsToDoublesFn>(ObjectElements::ConvertElementsToDoubles); |
| |
| bool |
| CodeGenerator::visitConvertElementsToDoubles(LConvertElementsToDoubles *lir) |
| { |
| Register elements = ToRegister(lir->elements()); |
| |
| OutOfLineCode *ool = oolCallVM(ConvertElementsToDoublesInfo, lir, |
| (ArgList(), elements), StoreNothing()); |
| if (!ool) |
| return false; |
| |
| Address convertedAddress(elements, ObjectElements::offsetOfFlags()); |
| Imm32 bit(ObjectElements::CONVERT_DOUBLE_ELEMENTS); |
| masm.branchTest32(Assembler::Zero, convertedAddress, bit, ool->entry()); |
| masm.bind(ool->rejoin()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment *lir) |
| { |
| Address environment(ToRegister(lir->function()), JSFunction::offsetOfEnvironment()); |
| masm.loadPtr(environment, ToRegister(lir->output())); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitParSlice(LParSlice *lir) |
| { |
| const Register tempReg = ToRegister(lir->getTempReg()); |
| |
| masm.setupUnalignedABICall(0, tempReg); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParForkJoinSlice)); |
| JS_ASSERT(ToRegister(lir->output()) == ReturnReg); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitParWriteGuard(LParWriteGuard *lir) |
| { |
| JS_ASSERT(gen->info().executionMode() == ParallelExecution); |
| |
| const Register tempReg = ToRegister(lir->getTempReg()); |
| masm.setupUnalignedABICall(2, tempReg); |
| masm.passABIArg(ToRegister(lir->parSlice())); |
| masm.passABIArg(ToRegister(lir->object())); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParWriteGuard)); |
| |
| OutOfLineParallelAbort *bail = oolParallelAbort(ParallelBailoutIllegalWrite, lir); |
| if (!bail) |
| return false; |
| |
| // branch to the OOL failure code if false is returned |
| masm.branchIfFalseBool(ReturnReg, bail->entry()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitParDump(LParDump *lir) |
| { |
| ValueOperand value = ToValue(lir, 0); |
| masm.reserveStack(sizeof(Value)); |
| masm.storeValue(value, Address(StackPointer, 0)); |
| masm.movePtr(StackPointer, CallTempReg0); |
| masm.setupUnalignedABICall(1, CallTempReg1); |
| masm.passABIArg(CallTempReg0); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParDumpValue)); |
| masm.freeStack(sizeof(Value)); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitTypeBarrier(LTypeBarrier *lir) |
| { |
| ValueOperand operand = ToValue(lir, LTypeBarrier::Input); |
| Register scratch = ToTempUnboxRegister(lir->temp()); |
| |
| Label matched, miss; |
| masm.guardTypeSet(operand, lir->mir()->resultTypeSet(), scratch, &matched, &miss); |
| masm.jump(&miss); |
| if (!bailoutFrom(&miss, lir->snapshot())) |
| return false; |
| masm.bind(&matched); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitMonitorTypes(LMonitorTypes *lir) |
| { |
| ValueOperand operand = ToValue(lir, LMonitorTypes::Input); |
| Register scratch = ToTempUnboxRegister(lir->temp()); |
| |
| Label matched, miss; |
| masm.guardTypeSet(operand, lir->mir()->typeSet(), scratch, &matched, &miss); |
| masm.jump(&miss); |
| if (!bailoutFrom(&miss, lir->snapshot())) |
| return false; |
| masm.bind(&matched); |
| return true; |
| } |
| |
| #ifdef JSGC_GENERATIONAL |
| // Out-of-line path to update the store buffer. |
| class OutOfLineCallPostWriteBarrier : public OutOfLineCodeBase<CodeGenerator> |
| { |
| LInstruction *lir_; |
| const LAllocation *object_; |
| |
| public: |
| OutOfLineCallPostWriteBarrier(LInstruction *lir, const LAllocation *object) |
| : lir_(lir), object_(object) |
| { } |
| |
| bool accept(CodeGenerator *codegen) { |
| return codegen->visitOutOfLineCallPostWriteBarrier(this); |
| } |
| |
| LInstruction *lir() const { |
| return lir_; |
| } |
| const LAllocation *object() const { |
| return object_; |
| } |
| }; |
| |
| bool |
| CodeGenerator::visitOutOfLineCallPostWriteBarrier(OutOfLineCallPostWriteBarrier *ool) |
| { |
| saveLive(ool->lir()); |
| |
| const LAllocation *obj = ool->object(); |
| |
| GeneralRegisterSet regs; |
| regs.add(CallTempReg0); |
| regs.add(CallTempReg1); |
| regs.add(CallTempReg2); |
| |
| Register objreg; |
| if (obj->isConstant()) { |
| objreg = regs.takeAny(); |
| masm.movePtr(ImmGCPtr(&obj->toConstant()->toObject()), objreg); |
| } else { |
| objreg = ToRegister(obj); |
| if (regs.has(objreg)) |
| regs.take(objreg); |
| } |
| |
| Register runtimereg = regs.takeAny(); |
| masm.mov(ImmWord(GetIonContext()->compartment->rt), runtimereg); |
| |
| masm.setupUnalignedABICall(2, regs.takeAny()); |
| masm.passABIArg(runtimereg); |
| masm.passABIArg(objreg); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, PostWriteBarrier)); |
| |
| restoreLive(ool->lir()); |
| |
| masm.jump(ool->rejoin()); |
| return true; |
| } |
| #endif |
| |
| bool |
| CodeGenerator::visitPostWriteBarrierO(LPostWriteBarrierO *lir) |
| { |
| #ifdef JSGC_GENERATIONAL |
| OutOfLineCallPostWriteBarrier *ool = new OutOfLineCallPostWriteBarrier(lir, lir->object()); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| |
| Nursery &nursery = GetIonContext()->compartment->rt->gcNursery; |
| |
| if (lir->object()->isConstant()) { |
| JSObject *obj = &lir->object()->toConstant()->toObject(); |
| JS_ASSERT(!nursery.isInside(obj)); |
| /* |
| if (nursery.isInside(obj)) |
| return true; |
| */ |
| } else { |
| Label tenured; |
| Register objreg = ToRegister(lir->object()); |
| masm.branchPtr(Assembler::Below, objreg, ImmWord(nursery.start()), &tenured); |
| masm.branchPtr(Assembler::Below, objreg, ImmWord(nursery.heapEnd()), ool->rejoin()); |
| masm.bind(&tenured); |
| } |
| |
| Register valuereg = ToRegister(lir->value()); |
| masm.branchPtr(Assembler::Below, valuereg, ImmWord(nursery.start()), ool->rejoin()); |
| masm.branchPtr(Assembler::Below, valuereg, ImmWord(nursery.heapEnd()), ool->entry()); |
| |
| masm.bind(ool->rejoin()); |
| #endif |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitPostWriteBarrierV(LPostWriteBarrierV *lir) |
| { |
| #ifdef JSGC_GENERATIONAL |
| OutOfLineCallPostWriteBarrier *ool = new OutOfLineCallPostWriteBarrier(lir, lir->object()); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| |
| ValueOperand value = ToValue(lir, LPostWriteBarrierV::Input); |
| masm.branchTestObject(Assembler::NotEqual, value, ool->rejoin()); |
| |
| Nursery &nursery = GetIonContext()->compartment->rt->gcNursery; |
| |
| if (lir->object()->isConstant()) { |
| JSObject *obj = &lir->object()->toConstant()->toObject(); |
| JS_ASSERT(!nursery.isInside(obj)); |
| /* |
| if (nursery.isInside(obj)) |
| return true; |
| */ |
| } else { |
| Label tenured; |
| Register objreg = ToRegister(lir->object()); |
| masm.branchPtr(Assembler::Below, objreg, ImmWord(nursery.start()), &tenured); |
| masm.branchPtr(Assembler::Below, objreg, ImmWord(nursery.heapEnd()), ool->rejoin()); |
| masm.bind(&tenured); |
| } |
| |
| Register valuereg = masm.extractObject(value, ToTempUnboxRegister(lir->temp())); |
| masm.branchPtr(Assembler::Below, valuereg, ImmWord(nursery.start()), ool->rejoin()); |
| masm.branchPtr(Assembler::Below, valuereg, ImmWord(nursery.heapEnd()), ool->entry()); |
| |
| masm.bind(ool->rejoin()); |
| #endif |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitCallNative(LCallNative *call) |
| { |
| JSFunction *target = call->getSingleTarget(); |
| JS_ASSERT(target); |
| JS_ASSERT(target->isNative()); |
| |
| int callargslot = call->argslot(); |
| int unusedStack = StackOffsetOfPassedArg(callargslot); |
| |
| // Registers used for callWithABI() argument-passing. |
| const Register argJSContextReg = ToRegister(call->getArgJSContextReg()); |
| const Register argUintNReg = ToRegister(call->getArgUintNReg()); |
| const Register argVpReg = ToRegister(call->getArgVpReg()); |
| |
| // Misc. temporary registers. |
| const Register tempReg = ToRegister(call->getTempReg()); |
| |
| DebugOnly<uint32_t> initialStack = masm.framePushed(); |
| |
| masm.checkStackAlignment(); |
| |
| // Native functions have the signature: |
| // bool (*)(JSContext *, unsigned, Value *vp) |
| // Where vp[0] is space for an outparam, vp[1] is |this|, and vp[2] onward |
| // are the function arguments. |
| |
| // Allocate space for the outparam, moving the StackPointer to what will be &vp[1]. |
| masm.adjustStack(unusedStack); |
| |
| // Push a Value containing the callee object: natives are allowed to access their callee before |
| // setitng the return value. The StackPointer is moved to &vp[0]. |
| masm.Push(ObjectValue(*target)); |
| |
| // Preload arguments into registers. |
| masm.loadJSContext(argJSContextReg); |
| masm.move32(Imm32(call->numStackArgs()), argUintNReg); |
| masm.movePtr(StackPointer, argVpReg); |
| |
| masm.Push(argUintNReg); |
| |
| // Construct native exit frame. |
| uint32_t safepointOffset; |
| if (!masm.buildFakeExitFrame(tempReg, &safepointOffset)) |
| return false; |
| masm.enterFakeExitFrame(); |
| |
| if (!markSafepointAt(safepointOffset, call)) |
| return false; |
| |
| // Construct and execute call. |
| masm.setupUnalignedABICall(3, tempReg); |
| masm.passABIArg(argJSContextReg); |
| masm.passABIArg(argUintNReg); |
| masm.passABIArg(argVpReg); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, target->native())); |
| |
| // Test for failure. |
| Label success, exception; |
| masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, &exception); |
| |
| // Load the outparam vp[0] into output register(s). |
| masm.loadValue(Address(StackPointer, IonNativeExitFrameLayout::offsetOfResult()), JSReturnOperand); |
| masm.jump(&success); |
| |
| // Handle exception case. |
| { |
| masm.bind(&exception); |
| masm.handleException(); |
| } |
| masm.bind(&success); |
| |
| // The next instruction is removing the footer of the exit frame, so there |
| // is no need for leaveFakeExitFrame. |
| |
| // Move the StackPointer back to its original location, unwinding the native exit frame. |
| masm.adjustStack(IonNativeExitFrameLayout::Size() - unusedStack); |
| JS_ASSERT(masm.framePushed() == initialStack); |
| |
| dropArguments(call->numStackArgs() + 1); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitCallDOMNative(LCallDOMNative *call) |
| { |
| JSFunction *target = call->getSingleTarget(); |
| JS_ASSERT(target); |
| JS_ASSERT(target->isNative()); |
| JS_ASSERT(target->jitInfo()); |
| JS_ASSERT(call->mir()->isDOMFunction()); |
| |
| int callargslot = call->argslot(); |
| int unusedStack = StackOffsetOfPassedArg(callargslot); |
| |
| // Registers used for callWithABI() argument-passing. |
| const Register argJSContext = ToRegister(call->getArgJSContext()); |
| const Register argObj = ToRegister(call->getArgObj()); |
| const Register argPrivate = ToRegister(call->getArgPrivate()); |
| const Register argArgs = ToRegister(call->getArgArgs()); |
| |
| DebugOnly<uint32_t> initialStack = masm.framePushed(); |
| |
| masm.checkStackAlignment(); |
| |
| // DOM methods have the signature: |
| // bool (*)(JSContext *, HandleObject, void *private, const JSJitMethodCallArgs& args) |
| // Where args is initialized from an argc and a vp, vp[0] is space for an |
| // outparam and the callee, vp[1] is |this|, and vp[2] onward are the |
| // function arguments. Note that args stores the argv, not the vp, and |
| // argv == vp + 2. |
| |
| // Nestle the stack up against the pushed arguments, leaving StackPointer at |
| // &vp[1] |
| masm.adjustStack(unusedStack); |
| // argObj is filled with the extracted object, then returned. |
| Register obj = masm.extractObject(Address(StackPointer, 0), argObj); |
| JS_ASSERT(obj == argObj); |
| |
| // Push a Value containing the callee object: natives are allowed to access their callee before |
| // setitng the return value. After this the StackPointer points to &vp[0]. |
| masm.Push(ObjectValue(*target)); |
| |
| // Now compute the argv value. Since StackPointer is pointing to &vp[0] and |
| // argv is &vp[2] we just need to add 2*sizeof(Value) to the current |
| // StackPointer. |
| JS_STATIC_ASSERT(JSJitMethodCallArgsTraits::offsetOfArgv == 0); |
| JS_STATIC_ASSERT(JSJitMethodCallArgsTraits::offsetOfArgc == |
| IonDOMMethodExitFrameLayoutTraits::offsetOfArgcFromArgv); |
| masm.computeEffectiveAddress(Address(StackPointer, 2 * sizeof(Value)), argArgs); |
| |
| // GetReservedSlot(obj, DOM_OBJECT_SLOT).toPrivate() |
| masm.loadPrivate(Address(obj, JSObject::getFixedSlotOffset(0)), argPrivate); |
| |
| // Push argc from the call instruction into what will become the IonExitFrame |
| masm.Push(Imm32(call->numStackArgs())); |
| |
| // Push our argv onto the stack |
| masm.Push(argArgs); |
| // And store our JSJitMethodCallArgs* in argArgs. |
| masm.movePtr(StackPointer, argArgs); |
| |
| // Push |this| object for passing HandleObject. We push after argc to |
| // maintain the same sp-relative location of the object pointer with other |
| // DOMExitFrames. |
| masm.Push(argObj); |
| masm.movePtr(StackPointer, argObj); |
| |
| // Construct native exit frame. |
| uint32_t safepointOffset; |
| if (!masm.buildFakeExitFrame(argJSContext, &safepointOffset)) |
| return false; |
| masm.enterFakeExitFrame(ION_FRAME_DOMMETHOD); |
| |
| if (!markSafepointAt(safepointOffset, call)) |
| return false; |
| |
| // Construct and execute call. |
| masm.setupUnalignedABICall(4, argJSContext); |
| |
| masm.loadJSContext(argJSContext); |
| |
| masm.passABIArg(argJSContext); |
| masm.passABIArg(argObj); |
| masm.passABIArg(argPrivate); |
| masm.passABIArg(argArgs); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, target->jitInfo()->method)); |
| |
| if (target->jitInfo()->isInfallible) { |
| masm.loadValue(Address(StackPointer, IonDOMMethodExitFrameLayout::offsetOfResult()), |
| JSReturnOperand); |
| } else { |
| // Test for failure. |
| Label success, exception; |
| masm.branchIfFalseBool(ReturnReg, &exception); |
| |
| // Load the outparam vp[0] into output register(s). |
| masm.loadValue(Address(StackPointer, IonDOMMethodExitFrameLayout::offsetOfResult()), |
| JSReturnOperand); |
| masm.jump(&success); |
| |
| // Handle exception case. |
| { |
| masm.bind(&exception); |
| masm.handleException(); |
| } |
| masm.bind(&success); |
| } |
| |
| // The next instruction is removing the footer of the exit frame, so there |
| // is no need for leaveFakeExitFrame. |
| |
| // Move the StackPointer back to its original location, unwinding the native exit frame. |
| masm.adjustStack(IonDOMMethodExitFrameLayout::Size() - unusedStack); |
| JS_ASSERT(masm.framePushed() == initialStack); |
| |
| dropArguments(call->numStackArgs() + 1); |
| return true; |
| } |
| |
| typedef bool (*GetIntrinsicValueFn)(JSContext *cx, HandlePropertyName, MutableHandleValue); |
| static const VMFunction GetIntrinsicValueInfo = |
| FunctionInfo<GetIntrinsicValueFn>(GetIntrinsicValue); |
| |
| bool |
| CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue *lir) |
| { |
| pushArg(ImmGCPtr(lir->mir()->name())); |
| return callVM(GetIntrinsicValueInfo, lir); |
| } |
| |
| typedef bool (*InvokeFunctionFn)(JSContext *, HandleFunction, uint32_t, Value *, Value *); |
| static const VMFunction InvokeFunctionInfo = FunctionInfo<InvokeFunctionFn>(InvokeFunction); |
| |
| bool |
| CodeGenerator::emitCallInvokeFunction(LInstruction *call, Register calleereg, |
| uint32_t argc, uint32_t unusedStack) |
| { |
| // Nestle %esp up to the argument vector. |
| // Each path must account for framePushed_ separately, for callVM to be valid. |
| masm.freeStack(unusedStack); |
| |
| pushArg(StackPointer); // argv. |
| pushArg(Imm32(argc)); // argc. |
| pushArg(calleereg); // JSFunction *. |
| |
| if (!callVM(InvokeFunctionInfo, call)) |
| return false; |
| |
| // Un-nestle %esp from the argument vector. No prefix was pushed. |
| masm.reserveStack(unusedStack); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitCallGeneric(LCallGeneric *call) |
| { |
| Register calleereg = ToRegister(call->getFunction()); |
| Register objreg = ToRegister(call->getTempObject()); |
| Register nargsreg = ToRegister(call->getNargsReg()); |
| uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot()); |
| ExecutionMode executionMode = gen->info().executionMode(); |
| Label uncompiled, thunk, makeCall, end; |
| |
| // Known-target case is handled by LCallKnown. |
| JS_ASSERT(!call->hasSingleTarget()); |
| |
| // Generate an ArgumentsRectifier. |
| IonCompartment *ion = gen->ionCompartment(); |
| IonCode *argumentsRectifier = ion->getArgumentsRectifier(executionMode); |
| |
| masm.checkStackAlignment(); |
| |
| // Guard that calleereg is actually a function object. |
| masm.loadObjClass(calleereg, nargsreg); |
| masm.cmpPtr(nargsreg, ImmWord(&JSFunction::class_)); |
| if (!bailoutIf(Assembler::NotEqual, call->snapshot())) |
| return false; |
| |
| // Guard that calleereg is an interpreted function with a JSScript: |
| masm.branchIfFunctionHasNoScript(calleereg, &uncompiled); |
| |
| // Knowing that calleereg is a non-native function, load the JSScript. |
| masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg); |
| |
| // Load script jitcode. |
| masm.loadBaselineOrIonRaw(objreg, objreg, executionMode, &uncompiled); |
| |
| // Nestle the StackPointer up to the argument vector. |
| masm.freeStack(unusedStack); |
| |
| // Construct the IonFramePrefix. |
| uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), IonFrame_OptimizedJS); |
| masm.Push(Imm32(call->numActualArgs())); |
| masm.tagCallee(calleereg, executionMode); |
| masm.Push(calleereg); |
| // Clear the tag after pushing it, as we load nargs below. |
| masm.clearCalleeTag(calleereg, executionMode); |
| masm.Push(Imm32(descriptor)); |
| |
| // Check whether the provided arguments satisfy target argc. |
| masm.load16ZeroExtend(Address(calleereg, offsetof(JSFunction, nargs)), nargsreg); |
| masm.cmp32(nargsreg, Imm32(call->numStackArgs())); |
| masm.j(Assembler::Above, &thunk); |
| |
| masm.jump(&makeCall); |
| |
| // Argument fixed needed. Load the ArgumentsRectifier. |
| masm.bind(&thunk); |
| { |
| JS_ASSERT(ArgumentsRectifierReg != objreg); |
| masm.movePtr(ImmGCPtr(argumentsRectifier), objreg); // Necessary for GC marking. |
| masm.loadPtr(Address(objreg, IonCode::offsetOfCode()), objreg); |
| masm.move32(Imm32(call->numStackArgs()), ArgumentsRectifierReg); |
| } |
| |
| // Finally call the function in objreg. |
| masm.bind(&makeCall); |
| uint32_t callOffset = masm.callIon(objreg); |
| if (!markSafepointAt(callOffset, call)) |
| return false; |
| |
| // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. |
| // The return address has already been removed from the Ion frame. |
| int prefixGarbage = sizeof(IonJSFrameLayout) - sizeof(void *); |
| masm.adjustStack(prefixGarbage - unusedStack); |
| masm.jump(&end); |
| |
| // Handle uncompiled or native functions. |
| masm.bind(&uncompiled); |
| switch (executionMode) { |
| case SequentialExecution: |
| if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack)) |
| return false; |
| break; |
| |
| case ParallelExecution: |
| if (!emitParCallToUncompiledScript(call, calleereg)) |
| return false; |
| break; |
| } |
| |
| masm.bind(&end); |
| |
| // If the return value of the constructing function is Primitive, |
| // replace the return value with the Object from CreateThis. |
| if (call->mir()->isConstructing()) { |
| Label notPrimitive; |
| masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, ¬Primitive); |
| masm.loadValue(Address(StackPointer, unusedStack), JSReturnOperand); |
| masm.bind(¬Primitive); |
| } |
| |
| if (!checkForParallelBailout(call)) |
| return false; |
| |
| dropArguments(call->numStackArgs() + 1); |
| return true; |
| } |
| |
| // Generates a call to ParCallToUncompiledScript() and then bails out. |
| // |calleeReg| should contain the JSFunction*. |
| bool |
| CodeGenerator::emitParCallToUncompiledScript(LInstruction *lir, |
| Register calleeReg) |
| { |
| OutOfLineCode *bail = oolParallelAbort(ParallelBailoutCalledToUncompiledScript, lir); |
| if (!bail) |
| return false; |
| |
| masm.movePtr(calleeReg, CallTempReg0); |
| masm.setupUnalignedABICall(1, CallTempReg1); |
| masm.passABIArg(CallTempReg0); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCallToUncompiledScript)); |
| masm.jump(bail->entry()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitCallKnown(LCallKnown *call) |
| { |
| Register calleereg = ToRegister(call->getFunction()); |
| Register objreg = ToRegister(call->getTempObject()); |
| uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot()); |
| JSFunction *target = call->getSingleTarget(); |
| ExecutionMode executionMode = gen->info().executionMode(); |
| Label end, uncompiled; |
| |
| // Native single targets are handled by LCallNative. |
| JS_ASSERT(!target->isNative()); |
| // Missing arguments must have been explicitly appended by the IonBuilder. |
| JS_ASSERT(target->nargs <= call->numStackArgs()); |
| |
| masm.checkStackAlignment(); |
| |
| // If the function is known to be uncompilable, just emit the call to |
| // Invoke in sequential mode, else mark as cannot compile. |
| JS_ASSERT(call->mir()->hasRootedScript()); |
| JSScript *targetScript = target->nonLazyScript(); |
| if (GetIonScript(targetScript, executionMode) == ION_DISABLED_SCRIPT) { |
| if (executionMode == ParallelExecution) |
| return false; |
| |
| if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack)) |
| return false; |
| |
| if (call->mir()->isConstructing()) { |
| Label notPrimitive; |
| masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, ¬Primitive); |
| masm.loadValue(Address(StackPointer, unusedStack), JSReturnOperand); |
| masm.bind(¬Primitive); |
| } |
| |
| dropArguments(call->numStackArgs() + 1); |
| return true; |
| } |
| |
| // The calleereg is known to be a non-native function, but might point to |
| // a LazyScript instead of a JSScript. |
| masm.branchIfFunctionHasNoScript(calleereg, &uncompiled); |
| |
| // Knowing that calleereg is a non-native function, load the JSScript. |
| masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg); |
| |
| // Load script jitcode. |
| if (call->mir()->needsArgCheck()) |
| masm.loadBaselineOrIonRaw(objreg, objreg, executionMode, &uncompiled); |
| else |
| masm.loadBaselineOrIonNoArgCheck(objreg, objreg, executionMode, &uncompiled); |
| |
| // Nestle the StackPointer up to the argument vector. |
| masm.freeStack(unusedStack); |
| |
| // Construct the IonFramePrefix. |
| uint32_t descriptor = MakeFrameDescriptor(masm.framePushed(), IonFrame_OptimizedJS); |
| masm.tagCallee(calleereg, executionMode); |
| masm.Push(Imm32(call->numActualArgs())); |
| masm.Push(calleereg); |
| // Clear the tag after pushing it. |
| masm.clearCalleeTag(calleereg, executionMode); |
| masm.Push(Imm32(descriptor)); |
| |
| // Finally call the function in objreg. |
| uint32_t callOffset = masm.callIon(objreg); |
| if (!markSafepointAt(callOffset, call)) |
| return false; |
| |
| // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. |
| // The return address has already been removed from the Ion frame. |
| int prefixGarbage = sizeof(IonJSFrameLayout) - sizeof(void *); |
| masm.adjustStack(prefixGarbage - unusedStack); |
| masm.jump(&end); |
| |
| // Handle uncompiled functions. |
| masm.bind(&uncompiled); |
| switch (executionMode) { |
| case SequentialExecution: |
| if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack)) |
| return false; |
| break; |
| |
| case ParallelExecution: |
| if (!emitParCallToUncompiledScript(call, calleereg)) |
| return false; |
| break; |
| } |
| |
| masm.bind(&end); |
| |
| if (!checkForParallelBailout(call)) |
| return false; |
| |
| // If the return value of the constructing function is Primitive, |
| // replace the return value with the Object from CreateThis. |
| if (call->mir()->isConstructing()) { |
| Label notPrimitive; |
| masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand, ¬Primitive); |
| masm.loadValue(Address(StackPointer, unusedStack), JSReturnOperand); |
| masm.bind(¬Primitive); |
| } |
| |
| dropArguments(call->numStackArgs() + 1); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::checkForParallelBailout(LInstruction *lir) |
| { |
| // In parallel mode, if we call another ion-compiled function and |
| // it returns JS_ION_ERROR, that indicates a bailout that we have |
| // to propagate up the stack. |
| ExecutionMode executionMode = gen->info().executionMode(); |
| if (executionMode == ParallelExecution) { |
| OutOfLinePropagateParallelAbort *bail = oolPropagateParallelAbort(lir); |
| if (!bail) |
| return false; |
| masm.branchTestMagic(Assembler::Equal, JSReturnOperand, bail->entry()); |
| } |
| return true; |
| } |
| |
| bool |
| CodeGenerator::emitCallInvokeFunction(LApplyArgsGeneric *apply, Register extraStackSize) |
| { |
| Register objreg = ToRegister(apply->getTempObject()); |
| JS_ASSERT(objreg != extraStackSize); |
| |
| // Push the space used by the arguments. |
| masm.movePtr(StackPointer, objreg); |
| masm.Push(extraStackSize); |
| |
| pushArg(objreg); // argv. |
| pushArg(ToRegister(apply->getArgc())); // argc. |
| pushArg(ToRegister(apply->getFunction())); // JSFunction *. |
| |
| // This specialization og callVM restore the extraStackSize after the call. |
| if (!callVM(InvokeFunctionInfo, apply, &extraStackSize)) |
| return false; |
| |
| masm.Pop(extraStackSize); |
| return true; |
| } |
| |
| // Do not bailout after the execution of this function since the stack no longer |
| // correspond to what is expected by the snapshots. |
| void |
| CodeGenerator::emitPushArguments(LApplyArgsGeneric *apply, Register extraStackSpace) |
| { |
| // Holds the function nargs. Initially undefined. |
| Register argcreg = ToRegister(apply->getArgc()); |
| |
| Register copyreg = ToRegister(apply->getTempObject()); |
| size_t argvOffset = frameSize() + IonJSFrameLayout::offsetOfActualArgs(); |
| Label end; |
| |
| // Initialize the loop counter AND Compute the stack usage (if == 0) |
| masm.movePtr(argcreg, extraStackSpace); |
| masm.branchTestPtr(Assembler::Zero, argcreg, argcreg, &end); |
| |
| // Copy arguments. |
| { |
| Register count = extraStackSpace; // <- argcreg |
| Label loop; |
| masm.bind(&loop); |
| |
| // We remove sizeof(void*) from argvOffset because withtout it we target |
| // the address after the memory area that we want to copy. |
| BaseIndex disp(StackPointer, argcreg, ScaleFromElemWidth(sizeof(Value)), argvOffset - sizeof(void*)); |
| |
| // Do not use Push here because other this account to 1 in the framePushed |
| // instead of 0. These push are only counted by argcreg. |
| masm.loadPtr(disp, copyreg); |
| masm.push(copyreg); |
| |
| // Handle 32 bits architectures. |
| if (sizeof(Value) == 2 * sizeof(void*)) { |
| masm.loadPtr(disp, copyreg); |
| masm.push(copyreg); |
| } |
| |
| masm.decBranchPtr(Assembler::NonZero, count, Imm32(1), &loop); |
| } |
| |
| // Compute the stack usage. |
| masm.movePtr(argcreg, extraStackSpace); |
| masm.lshiftPtr(Imm32::ShiftOf(ScaleFromElemWidth(sizeof(Value))), extraStackSpace); |
| |
| // Join with all arguments copied and the extra stack usage computed. |
| masm.bind(&end); |
| |
| // Push |this|. |
| masm.addPtr(Imm32(sizeof(Value)), extraStackSpace); |
| masm.pushValue(ToValue(apply, LApplyArgsGeneric::ThisIndex)); |
| } |
| |
| void |
| CodeGenerator::emitPopArguments(LApplyArgsGeneric *apply, Register extraStackSpace) |
| { |
| // Pop |this| and Arguments. |
| masm.freeStack(extraStackSpace); |
| } |
| |
| bool |
| CodeGenerator::visitApplyArgsGeneric(LApplyArgsGeneric *apply) |
| { |
| // Holds the function object. |
| Register calleereg = ToRegister(apply->getFunction()); |
| |
| // Temporary register for modifying the function object. |
| Register objreg = ToRegister(apply->getTempObject()); |
| Register copyreg = ToRegister(apply->getTempCopy()); |
| |
| // Holds the function nargs. Initially undefined. |
| Register argcreg = ToRegister(apply->getArgc()); |
| |
| // Unless already known, guard that calleereg is actually a function object. |
| if (!apply->hasSingleTarget()) { |
| masm.loadObjClass(calleereg, objreg); |
| masm.cmpPtr(objreg, ImmWord(&JSFunction::class_)); |
| if (!bailoutIf(Assembler::NotEqual, apply->snapshot())) |
| return false; |
| } |
| |
| // Copy the arguments of the current function. |
| emitPushArguments(apply, copyreg); |
| |
| masm.checkStackAlignment(); |
| |
| // If the function is known to be uncompilable, only emit the call to InvokeFunction. |
| ExecutionMode executionMode = gen->info().executionMode(); |
| if (apply->hasSingleTarget()) { |
| JSFunction *target = apply->getSingleTarget(); |
| if (!CanIonCompile(target, executionMode)) { |
| if (!emitCallInvokeFunction(apply, copyreg)) |
| return false; |
| emitPopArguments(apply, copyreg); |
| return true; |
| } |
| } |
| |
| Label end, invoke; |
| |
| // Guard that calleereg is an interpreted function with a JSScript: |
| if (!apply->hasSingleTarget()) { |
| masm.branchIfFunctionHasNoScript(calleereg, &invoke); |
| } else { |
| // Native single targets are handled by LCallNative. |
| JS_ASSERT(!apply->getSingleTarget()->isNative()); |
| } |
| |
| // Knowing that calleereg is a non-native function, load the JSScript. |
| masm.loadPtr(Address(calleereg, JSFunction::offsetOfNativeOrScript()), objreg); |
| |
| // Load script jitcode. |
| masm.loadBaselineOrIonRaw(objreg, objreg, executionMode, &invoke); |
| |
| // Call with an Ion frame or a rectifier frame. |
| { |
| // Create the frame descriptor. |
| unsigned pushed = masm.framePushed(); |
| masm.addPtr(Imm32(pushed), copyreg); |
| masm.makeFrameDescriptor(copyreg, IonFrame_OptimizedJS); |
| |
| masm.Push(argcreg); |
| masm.Push(calleereg); |
| masm.Push(copyreg); // descriptor |
| |
| Label underflow, rejoin; |
| |
| // Check whether the provided arguments satisfy target argc. |
| if (!apply->hasSingleTarget()) { |
| masm.load16ZeroExtend(Address(calleereg, offsetof(JSFunction, nargs)), copyreg); |
| masm.cmp32(argcreg, copyreg); |
| masm.j(Assembler::Below, &underflow); |
| } else { |
| masm.cmp32(argcreg, Imm32(apply->getSingleTarget()->nargs)); |
| masm.j(Assembler::Below, &underflow); |
| } |
| |
| // Skip the construction of the rectifier frame because we have no |
| // underflow. |
| masm.jump(&rejoin); |
| |
| // Argument fixup needed. Get ready to call the argumentsRectifier. |
| { |
| masm.bind(&underflow); |
| |
| // Hardcode the address of the argumentsRectifier code. |
| IonCompartment *ion = gen->ionCompartment(); |
| IonCode *argumentsRectifier = ion->getArgumentsRectifier(executionMode); |
| |
| JS_ASSERT(ArgumentsRectifierReg != objreg); |
| masm.movePtr(ImmGCPtr(argumentsRectifier), objreg); // Necessary for GC marking. |
| masm.loadPtr(Address(objreg, IonCode::offsetOfCode()), objreg); |
| masm.movePtr(argcreg, ArgumentsRectifierReg); |
| } |
| |
| masm.bind(&rejoin); |
| |
| // Finally call the function in objreg, as assigned by one of the paths above. |
| uint32_t callOffset = masm.callIon(objreg); |
| if (!markSafepointAt(callOffset, apply)) |
| return false; |
| |
| // Recover the number of arguments from the frame descriptor. |
| masm.loadPtr(Address(StackPointer, 0), copyreg); |
| masm.rshiftPtr(Imm32(FRAMESIZE_SHIFT), copyreg); |
| masm.subPtr(Imm32(pushed), copyreg); |
| |
| // Increment to remove IonFramePrefix; decrement to fill FrameSizeClass. |
| // The return address has already been removed from the Ion frame. |
| int prefixGarbage = sizeof(IonJSFrameLayout) - sizeof(void *); |
| masm.adjustStack(prefixGarbage); |
| masm.jump(&end); |
| } |
| |
| // Handle uncompiled or native functions. |
| { |
| masm.bind(&invoke); |
| if (!emitCallInvokeFunction(apply, copyreg)) |
| return false; |
| } |
| |
| // Pop arguments and continue. |
| masm.bind(&end); |
| emitPopArguments(apply, copyreg); |
| |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitGetDynamicName(LGetDynamicName *lir) |
| { |
| Register scopeChain = ToRegister(lir->getScopeChain()); |
| Register name = ToRegister(lir->getName()); |
| Register temp1 = ToRegister(lir->temp1()); |
| Register temp2 = ToRegister(lir->temp2()); |
| Register temp3 = ToRegister(lir->temp3()); |
| |
| masm.loadJSContext(temp3); |
| |
| /* Make space for the outparam. */ |
| masm.adjustStack(-int32_t(sizeof(Value))); |
| masm.movePtr(StackPointer, temp2); |
| |
| masm.setupUnalignedABICall(4, temp1); |
| masm.passABIArg(temp3); |
| masm.passABIArg(scopeChain); |
| masm.passABIArg(name); |
| masm.passABIArg(temp2); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, GetDynamicName)); |
| |
| const ValueOperand out = ToOutValue(lir); |
| |
| masm.loadValue(Address(StackPointer, 0), out); |
| masm.adjustStack(sizeof(Value)); |
| |
| Assembler::Condition cond = masm.testUndefined(Assembler::Equal, out); |
| return bailoutIf(cond, lir->snapshot()); |
| } |
| |
| bool |
| CodeGenerator::visitFilterArguments(LFilterArguments *lir) |
| { |
| Register string = ToRegister(lir->getString()); |
| Register temp1 = ToRegister(lir->temp1()); |
| Register temp2 = ToRegister(lir->temp2()); |
| |
| masm.loadJSContext(temp2); |
| |
| masm.setupUnalignedABICall(2, temp1); |
| masm.passABIArg(temp2); |
| masm.passABIArg(string); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, FilterArguments)); |
| |
| Label bail; |
| masm.branch32(Assembler::Equal, ReturnReg, Imm32(0), &bail); |
| return bailoutFrom(&bail, lir->snapshot()); |
| } |
| |
| typedef bool (*DirectEvalFn)(JSContext *, HandleObject, HandleScript, HandleValue, HandleString, |
| jsbytecode *, MutableHandleValue); |
| static const VMFunction DirectEvalInfo = FunctionInfo<DirectEvalFn>(DirectEvalFromIon); |
| |
| bool |
| CodeGenerator::visitCallDirectEval(LCallDirectEval *lir) |
| { |
| Register scopeChain = ToRegister(lir->getScopeChain()); |
| Register string = ToRegister(lir->getString()); |
| |
| pushArg(ImmWord(lir->mir()->pc())); |
| pushArg(string); |
| pushArg(ToValue(lir, LCallDirectEval::ThisValueInput)); |
| pushArg(ImmGCPtr(gen->info().script())); |
| pushArg(scopeChain); |
| |
| return callVM(DirectEvalInfo, lir); |
| } |
| |
| // Registers safe for use before generatePrologue(). |
| static const uint32_t EntryTempMask = Registers::TempMask & ~(1 << OsrFrameReg.code()); |
| |
| bool |
| CodeGenerator::generateArgumentsChecks() |
| { |
| MIRGraph &mir = gen->graph(); |
| MResumePoint *rp = mir.entryResumePoint(); |
| |
| // Reserve the amount of stack the actual frame will use. We have to undo |
| // this before falling through to the method proper though, because the |
| // monomorphic call case will bypass this entire path. |
| masm.reserveStack(frameSize()); |
| |
| // No registers are allocated yet, so it's safe to grab anything. |
| Register temp = GeneralRegisterSet(EntryTempMask).getAny(); |
| |
| CompileInfo &info = gen->info(); |
| |
| Label miss; |
| for (uint32_t i = info.startArgSlot(); i < info.endArgSlot(); i++) { |
| // All initial parameters are guaranteed to be MParameters. |
| MParameter *param = rp->getOperand(i)->toParameter(); |
| const types::TypeSet *types = param->resultTypeSet(); |
| if (!types || types->unknown()) |
| continue; |
| |
| // Calculate the offset on the stack of the argument. |
| // (i - info.startArgSlot()) - Compute index of arg within arg vector. |
| // ... * sizeof(Value) - Scale by value size. |
| // ArgToStackOffset(...) - Compute displacement within arg vector. |
| int32_t offset = ArgToStackOffset((i - info.startArgSlot()) * sizeof(Value)); |
| Label matched; |
| masm.guardTypeSet(Address(StackPointer, offset), types, temp, &matched, &miss); |
| masm.jump(&miss); |
| masm.bind(&matched); |
| } |
| |
| if (miss.used() && !bailoutFrom(&miss, graph.entrySnapshot())) |
| return false; |
| |
| masm.freeStack(frameSize()); |
| |
| return true; |
| } |
| |
| // Out-of-line path to report over-recursed error and fail. |
| class CheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> |
| { |
| LCheckOverRecursed *lir_; |
| |
| public: |
| CheckOverRecursedFailure(LCheckOverRecursed *lir) |
| : lir_(lir) |
| { } |
| |
| bool accept(CodeGenerator *codegen) { |
| return codegen->visitCheckOverRecursedFailure(this); |
| } |
| |
| LCheckOverRecursed *lir() const { |
| return lir_; |
| } |
| }; |
| |
| bool |
| CodeGenerator::visitCheckOverRecursed(LCheckOverRecursed *lir) |
| { |
| // Ensure that this frame will not cross the stack limit. |
| // This is a weak check, justified by Ion using the C stack: we must always |
| // be some distance away from the actual limit, since if the limit is |
| // crossed, an error must be thrown, which requires more frames. |
| // |
| // It must always be possible to trespass past the stack limit. |
| // Ion may legally place frames very close to the limit. Calling additional |
| // C functions may then violate the limit without any checking. |
| |
| JSRuntime *rt = gen->compartment->rt; |
| |
| // Since Ion frames exist on the C stack, the stack limit may be |
| // dynamically set by JS_SetThreadStackLimit() and JS_SetNativeStackQuota(). |
| uintptr_t *limitAddr = &rt->mainThread.ionStackLimit; |
| |
| CheckOverRecursedFailure *ool = new CheckOverRecursedFailure(lir); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| |
| // Conditional forward (unlikely) branch to failure. |
| masm.branchPtr(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr), StackPointer, ool->entry()); |
| masm.bind(ool->rejoin()); |
| |
| return true; |
| } |
| |
| typedef bool (*DefVarOrConstFn)(JSContext *, HandlePropertyName, unsigned, HandleObject); |
| static const VMFunction DefVarOrConstInfo = |
| FunctionInfo<DefVarOrConstFn>(DefVarOrConst); |
| |
| bool |
| CodeGenerator::visitDefVar(LDefVar *lir) |
| { |
| Register scopeChain = ToRegister(lir->scopeChain()); |
| |
| pushArg(scopeChain); // JSObject * |
| pushArg(Imm32(lir->mir()->attrs())); // unsigned |
| pushArg(ImmGCPtr(lir->mir()->name())); // PropertyName * |
| |
| if (!callVM(DefVarOrConstInfo, lir)) |
| return false; |
| |
| return true; |
| } |
| |
| typedef bool (*DefFunOperationFn)(JSContext *, HandleScript, HandleObject, HandleFunction); |
| static const VMFunction DefFunOperationInfo = FunctionInfo<DefFunOperationFn>(DefFunOperation); |
| |
| bool |
| CodeGenerator::visitDefFun(LDefFun *lir) |
| { |
| Register scopeChain = ToRegister(lir->scopeChain()); |
| |
| pushArg(ImmGCPtr(lir->mir()->fun())); |
| pushArg(scopeChain); |
| pushArg(ImmGCPtr(current->mir()->info().script())); |
| |
| return callVM(DefFunOperationInfo, lir); |
| } |
| |
| typedef bool (*ReportOverRecursedFn)(JSContext *); |
| static const VMFunction CheckOverRecursedInfo = |
| FunctionInfo<ReportOverRecursedFn>(CheckOverRecursed); |
| |
| bool |
| CodeGenerator::visitCheckOverRecursedFailure(CheckOverRecursedFailure *ool) |
| { |
| // The OOL path is hit if the recursion depth has been exceeded. |
| // Throw an InternalError for over-recursion. |
| |
| // LFunctionEnvironment can appear before LCheckOverRecursed, so we have |
| // to save all live registers to avoid crashes if CheckOverRecursed triggers |
| // a GC. |
| saveLive(ool->lir()); |
| |
| if (!callVM(CheckOverRecursedInfo, ool->lir())) |
| return false; |
| |
| restoreLive(ool->lir()); |
| masm.jump(ool->rejoin()); |
| return true; |
| } |
| |
| // Out-of-line path to report over-recursed error and fail. |
| class ParCheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator> |
| { |
| LParCheckOverRecursed *lir_; |
| |
| public: |
| ParCheckOverRecursedFailure(LParCheckOverRecursed *lir) |
| : lir_(lir) |
| { } |
| |
| bool accept(CodeGenerator *codegen) { |
| return codegen->visitParCheckOverRecursedFailure(this); |
| } |
| |
| LParCheckOverRecursed *lir() const { |
| return lir_; |
| } |
| }; |
| |
| bool |
| CodeGenerator::visitParCheckOverRecursed(LParCheckOverRecursed *lir) |
| { |
| // See above: unlike visitCheckOverRecursed(), this code runs in |
| // parallel mode and hence uses the ionStackLimit from the current |
| // thread state. Also, we must check the interrupt flags because |
| // on interrupt or abort, only the stack limit for the main thread |
| // is reset, not the worker threads. See comment in vm/ForkJoin.h |
| // for more details. |
| |
| Register parSliceReg = ToRegister(lir->parSlice()); |
| Register tempReg = ToRegister(lir->getTempReg()); |
| |
| masm.loadPtr(Address(parSliceReg, offsetof(ForkJoinSlice, perThreadData)), tempReg); |
| masm.loadPtr(Address(tempReg, offsetof(PerThreadData, ionStackLimit)), tempReg); |
| |
| // Conditional forward (unlikely) branch to failure. |
| ParCheckOverRecursedFailure *ool = new ParCheckOverRecursedFailure(lir); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| masm.branchPtr(Assembler::BelowOrEqual, StackPointer, tempReg, ool->entry()); |
| masm.parCheckInterruptFlags(tempReg, ool->entry()); |
| masm.bind(ool->rejoin()); |
| |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitParCheckOverRecursedFailure(ParCheckOverRecursedFailure *ool) |
| { |
| OutOfLinePropagateParallelAbort *bail = oolPropagateParallelAbort(ool->lir()); |
| if (!bail) |
| return false; |
| |
| // Avoid saving/restoring the temp register since we will put the |
| // ReturnReg into it below and we don't want to clobber that |
| // during PopRegsInMask(): |
| LParCheckOverRecursed *lir = ool->lir(); |
| Register tempReg = ToRegister(lir->getTempReg()); |
| RegisterSet saveSet(lir->safepoint()->liveRegs()); |
| saveSet.maybeTake(tempReg); |
| |
| masm.PushRegsInMask(saveSet); |
| masm.movePtr(ToRegister(lir->parSlice()), CallTempReg0); |
| masm.setupUnalignedABICall(1, CallTempReg1); |
| masm.passABIArg(CallTempReg0); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCheckOverRecursed)); |
| masm.movePtr(ReturnReg, tempReg); |
| masm.PopRegsInMask(saveSet); |
| masm.branchIfFalseBool(tempReg, bail->entry()); |
| masm.jump(ool->rejoin()); |
| |
| return true; |
| } |
| |
| // Out-of-line path to report over-recursed error and fail. |
| class OutOfLineParCheckInterrupt : public OutOfLineCodeBase<CodeGenerator> |
| { |
| public: |
| LParCheckInterrupt *const lir; |
| |
| OutOfLineParCheckInterrupt(LParCheckInterrupt *lir) |
| : lir(lir) |
| { } |
| |
| bool accept(CodeGenerator *codegen) { |
| return codegen->visitOutOfLineParCheckInterrupt(this); |
| } |
| }; |
| |
| bool |
| CodeGenerator::visitParCheckInterrupt(LParCheckInterrupt *lir) |
| { |
| // First check for slice->shared->interrupt_. |
| OutOfLineParCheckInterrupt *ool = new OutOfLineParCheckInterrupt(lir); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| |
| // We must check two flags: |
| // - runtime->interrupt |
| // - runtime->parallelAbort |
| // See vm/ForkJoin.h for discussion on why we use this design. |
| |
| Register tempReg = ToRegister(lir->getTempReg()); |
| masm.parCheckInterruptFlags(tempReg, ool->entry()); |
| masm.bind(ool->rejoin()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitOutOfLineParCheckInterrupt(OutOfLineParCheckInterrupt *ool) |
| { |
| OutOfLinePropagateParallelAbort *bail = oolPropagateParallelAbort(ool->lir); |
| if (!bail) |
| return false; |
| |
| // Avoid saving/restoring the temp register since we will put the |
| // ReturnReg into it below and we don't want to clobber that |
| // during PopRegsInMask(): |
| LParCheckInterrupt *lir = ool->lir; |
| Register tempReg = ToRegister(lir->getTempReg()); |
| RegisterSet saveSet(lir->safepoint()->liveRegs()); |
| saveSet.maybeTake(tempReg); |
| |
| masm.PushRegsInMask(saveSet); |
| masm.movePtr(ToRegister(ool->lir->parSlice()), CallTempReg0); |
| masm.setupUnalignedABICall(1, CallTempReg1); |
| masm.passABIArg(CallTempReg0); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCheckInterrupt)); |
| masm.movePtr(ReturnReg, tempReg); |
| masm.PopRegsInMask(saveSet); |
| masm.branchIfFalseBool(tempReg, bail->entry()); |
| masm.jump(ool->rejoin()); |
| |
| return true; |
| } |
| |
| IonScriptCounts * |
| CodeGenerator::maybeCreateScriptCounts() |
| { |
| // If scripts are being profiled, create a new IonScriptCounts and attach |
| // it to the script. This must be done on the main thread. |
| JSContext *cx = GetIonContext()->cx; |
| if (!cx) |
| return NULL; |
| |
| IonScriptCounts *counts = NULL; |
| |
| CompileInfo *outerInfo = &gen->info(); |
| JSScript *script = outerInfo->script(); |
| |
| if (cx->runtime()->profilingScripts) { |
| if (script && !script->hasScriptCounts && !script->initScriptCounts(cx)) |
| return NULL; |
| } else if (!script) { |
| return NULL; |
| } |
| |
| if (script && !script->hasScriptCounts) |
| return NULL; |
| |
| counts = js_new<IonScriptCounts>(); |
| if (!counts || !counts->init(graph.numBlocks())) { |
| js_delete(counts); |
| return NULL; |
| } |
| |
| if (script) |
| script->addIonCounts(counts); |
| |
| for (size_t i = 0; i < graph.numBlocks(); i++) { |
| MBasicBlock *block = graph.getBlock(i)->mir(); |
| |
| uint32_t offset = 0; |
| if (script) { |
| // Find a PC offset in the outermost script to use. If this block |
| // is from an inlined script, find a location in the outer script |
| // to associate information about the inlining with. |
| MResumePoint *resume = block->entryResumePoint(); |
| while (resume->caller()) |
| resume = resume->caller(); |
| DebugOnly<uint32_t> offset = resume->pc() - script->code; |
| JS_ASSERT(offset < script->length); |
| } |
| |
| if (!counts->block(i).init(block->id(), offset, block->numSuccessors())) |
| return NULL; |
| for (size_t j = 0; j < block->numSuccessors(); j++) |
| counts->block(i).setSuccessor(j, block->getSuccessor(j)->id()); |
| } |
| |
| if (!script) { |
| // Compiling code for Asm.js. Leave the counts on the CodeGenerator to |
| // be picked up by the AsmJSModule after generation finishes. |
| unassociatedScriptCounts_ = counts; |
| } |
| |
| return counts; |
| } |
| |
| // Structure for managing the state tracked for a block by script counters. |
| struct ScriptCountBlockState |
| { |
| IonBlockCounts █ |
| MacroAssembler &masm; |
| |
| Sprinter printer; |
| |
| uint32_t instructionBytes; |
| uint32_t spillBytes; |
| |
| // Pointer to instructionBytes, spillBytes, or NULL, depending on the last |
| // instruction processed. |
| uint32_t *last; |
| uint32_t lastLength; |
| |
| public: |
| ScriptCountBlockState(IonBlockCounts *block, MacroAssembler *masm) |
| : block(*block), masm(*masm), |
| printer(GetIonContext()->cx), |
| instructionBytes(0), spillBytes(0), last(NULL), lastLength(0) |
| { |
| } |
| |
| bool init() |
| { |
| if (!printer.init()) |
| return false; |
| |
| // Bump the hit count for the block at the start. This code is not |
| // included in either the text for the block or the instruction byte |
| // counts. |
| masm.inc64(AbsoluteAddress(block.addressOfHitCount())); |
| |
| // Collect human readable assembly for the code generated in the block. |
| masm.setPrinter(&printer); |
| |
| return true; |
| } |
| |
| void visitInstruction(LInstruction *ins) |
| { |
| if (last) |
| *last += masm.size() - lastLength; |
| lastLength = masm.size(); |
| last = ins->isMoveGroup() ? &spillBytes : &instructionBytes; |
| |
| // Prefix stream of assembly instructions with their LIR instruction |
| // name and any associated high level info. |
| if (const char *extra = ins->extraName()) |
| printer.printf("[%s:%s]\n", ins->opName(), extra); |
| else |
| printer.printf("[%s]\n", ins->opName()); |
| } |
| |
| ~ScriptCountBlockState() |
| { |
| masm.setPrinter(NULL); |
| |
| if (last) |
| *last += masm.size() - lastLength; |
| |
| block.setCode(printer.string()); |
| block.setInstructionBytes(instructionBytes); |
| block.setSpillBytes(spillBytes); |
| } |
| }; |
| |
| bool |
| CodeGenerator::generateBody() |
| { |
| IonScriptCounts *counts = maybeCreateScriptCounts(); |
| |
| for (size_t i = 0; i < graph.numBlocks(); i++) { |
| current = graph.getBlock(i); |
| |
| LInstructionIterator iter = current->begin(); |
| |
| // Separately visit the label at the start of every block, so that |
| // count instrumentation is inserted after the block label is bound. |
| if (!iter->accept(this)) |
| return false; |
| iter++; |
| |
| mozilla::Maybe<ScriptCountBlockState> blockCounts; |
| if (counts) { |
| blockCounts.construct(&counts->block(i), &masm); |
| if (!blockCounts.ref().init()) |
| return false; |
| } |
| |
| if (PerfBlockEnabled()) |
| perfSpewer_.startBasicBlock(current->mir(), masm); |
| |
| for (; iter != current->end(); iter++) { |
| IonSpew(IonSpew_Codegen, "instruction %s", iter->opName()); |
| |
| if (counts) |
| blockCounts.ref().visitInstruction(*iter); |
| |
| if (iter->safepoint() && pushedArgumentSlots_.length()) { |
| if (!markArgumentSlots(iter->safepoint())) |
| return false; |
| } |
| |
| if (!callTraceLIR(i, *iter)) |
| return false; |
| |
| if (!iter->accept(this)) |
| return false; |
| } |
| if (masm.oom()) |
| return false; |
| |
| if (PerfBlockEnabled()) |
| perfSpewer_.endBasicBlock(masm); |
| } |
| |
| JS_ASSERT(pushedArgumentSlots_.empty()); |
| return true; |
| } |
| |
| // Out-of-line object allocation for LNewParallelArray. |
| class OutOfLineNewParallelArray : public OutOfLineCodeBase<CodeGenerator> |
| { |
| LNewParallelArray *lir_; |
| |
| public: |
| OutOfLineNewParallelArray(LNewParallelArray *lir) |
| : lir_(lir) |
| { } |
| |
| bool accept(CodeGenerator *codegen) { |
| return codegen->visitOutOfLineNewParallelArray(this); |
| } |
| |
| LNewParallelArray *lir() const { |
| return lir_; |
| } |
| }; |
| |
| typedef JSObject *(*NewInitParallelArrayFn)(JSContext *, HandleObject); |
| static const VMFunction NewInitParallelArrayInfo = |
| FunctionInfo<NewInitParallelArrayFn>(NewInitParallelArray); |
| |
| bool |
| CodeGenerator::visitNewParallelArrayVMCall(LNewParallelArray *lir) |
| { |
| JS_ASSERT(gen->info().executionMode() == SequentialExecution); |
| |
| Register objReg = ToRegister(lir->output()); |
| |
| JS_ASSERT(!lir->isCall()); |
| saveLive(lir); |
| |
| pushArg(ImmGCPtr(lir->mir()->templateObject())); |
| if (!callVM(NewInitParallelArrayInfo, lir)) |
| return false; |
| |
| if (ReturnReg != objReg) |
| masm.movePtr(ReturnReg, objReg); |
| |
| restoreLive(lir); |
| return true; |
| } |
| |
| // Out-of-line object allocation for LNewArray. |
| class OutOfLineNewArray : public OutOfLineCodeBase<CodeGenerator> |
| { |
| LNewArray *lir_; |
| |
| public: |
| OutOfLineNewArray(LNewArray *lir) |
| : lir_(lir) |
| { } |
| |
| bool accept(CodeGenerator *codegen) { |
| return codegen->visitOutOfLineNewArray(this); |
| } |
| |
| LNewArray *lir() const { |
| return lir_; |
| } |
| }; |
| |
| typedef JSObject *(*NewInitArrayFn)(JSContext *, uint32_t, types::TypeObject *); |
| static const VMFunction NewInitArrayInfo = |
| FunctionInfo<NewInitArrayFn>(NewInitArray); |
| |
| bool |
| CodeGenerator::visitNewArrayCallVM(LNewArray *lir) |
| { |
| JS_ASSERT(gen->info().executionMode() == SequentialExecution); |
| |
| Register objReg = ToRegister(lir->output()); |
| |
| JS_ASSERT(!lir->isCall()); |
| saveLive(lir); |
| |
| JSObject *templateObject = lir->mir()->templateObject(); |
| types::TypeObject *type = templateObject->hasSingletonType() ? NULL : templateObject->type(); |
| |
| pushArg(ImmGCPtr(type)); |
| pushArg(Imm32(lir->mir()->count())); |
| |
| if (!callVM(NewInitArrayInfo, lir)) |
| return false; |
| |
| if (ReturnReg != objReg) |
| masm.movePtr(ReturnReg, objReg); |
| |
| restoreLive(lir); |
| |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitNewSlots(LNewSlots *lir) |
| { |
| Register temp1 = ToRegister(lir->temp1()); |
| Register temp2 = ToRegister(lir->temp2()); |
| Register temp3 = ToRegister(lir->temp3()); |
| Register output = ToRegister(lir->output()); |
| |
| masm.mov(ImmWord(gen->compartment->rt), temp1); |
| masm.mov(Imm32(lir->mir()->nslots()), temp2); |
| |
| masm.setupUnalignedABICall(2, temp3); |
| masm.passABIArg(temp1); |
| masm.passABIArg(temp2); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, NewSlots)); |
| |
| masm.testPtr(output, output); |
| if (!bailoutIf(Assembler::Zero, lir->snapshot())) |
| return false; |
| |
| return true; |
| } |
| |
| bool CodeGenerator::visitAtan2D(LAtan2D *lir) |
| { |
| Register temp = ToRegister(lir->temp()); |
| FloatRegister y = ToFloatRegister(lir->y()); |
| FloatRegister x = ToFloatRegister(lir->x()); |
| |
| masm.setupUnalignedABICall(2, temp); |
| masm.passABIArg(y); |
| masm.passABIArg(x); |
| masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ecmaAtan2), MacroAssembler::DOUBLE); |
| |
| JS_ASSERT(ToFloatRegister(lir->output()) == ReturnFloatReg); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitNewParallelArray(LNewParallelArray *lir) |
| { |
| Register objReg = ToRegister(lir->output()); |
| JSObject *templateObject = lir->mir()->templateObject(); |
| |
| OutOfLineNewParallelArray *ool = new OutOfLineNewParallelArray(lir); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| |
| masm.newGCThing(objReg, templateObject, ool->entry()); |
| masm.initGCThing(objReg, templateObject); |
| |
| masm.bind(ool->rejoin()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitOutOfLineNewParallelArray(OutOfLineNewParallelArray *ool) |
| { |
| if (!visitNewParallelArrayVMCall(ool->lir())) |
| return false; |
| masm.jump(ool->rejoin()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitNewArray(LNewArray *lir) |
| { |
| JS_ASSERT(gen->info().executionMode() == SequentialExecution); |
| Register objReg = ToRegister(lir->output()); |
| JSObject *templateObject = lir->mir()->templateObject(); |
| DebugOnly<uint32_t> count = lir->mir()->count(); |
| |
| JS_ASSERT(count < JSObject::NELEMENTS_LIMIT); |
| |
| if (lir->mir()->shouldUseVM()) |
| return visitNewArrayCallVM(lir); |
| |
| OutOfLineNewArray *ool = new OutOfLineNewArray(lir); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| |
| masm.newGCThing(objReg, templateObject, ool->entry()); |
| masm.initGCThing(objReg, templateObject); |
| |
| masm.bind(ool->rejoin()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitOutOfLineNewArray(OutOfLineNewArray *ool) |
| { |
| if (!visitNewArrayCallVM(ool->lir())) |
| return false; |
| masm.jump(ool->rejoin()); |
| return true; |
| } |
| |
| // Out-of-line object allocation for JSOP_NEWOBJECT. |
| class OutOfLineNewObject : public OutOfLineCodeBase<CodeGenerator> |
| { |
| LNewObject *lir_; |
| |
| public: |
| OutOfLineNewObject(LNewObject *lir) |
| : lir_(lir) |
| { } |
| |
| bool accept(CodeGenerator *codegen) { |
| return codegen->visitOutOfLineNewObject(this); |
| } |
| |
| LNewObject *lir() const { |
| return lir_; |
| } |
| }; |
| |
| typedef JSObject *(*NewInitObjectFn)(JSContext *, HandleObject); |
| static const VMFunction NewInitObjectInfo = FunctionInfo<NewInitObjectFn>(NewInitObject); |
| |
| typedef JSObject *(*NewInitObjectWithClassPrototypeFn)(JSContext *, HandleObject); |
| static const VMFunction NewInitObjectWithClassPrototypeInfo = |
| FunctionInfo<NewInitObjectWithClassPrototypeFn>(NewInitObjectWithClassPrototype); |
| |
| bool |
| CodeGenerator::visitNewObjectVMCall(LNewObject *lir) |
| { |
| JS_ASSERT(gen->info().executionMode() == SequentialExecution); |
| |
| Register objReg = ToRegister(lir->output()); |
| |
| JS_ASSERT(!lir->isCall()); |
| saveLive(lir); |
| |
| pushArg(ImmGCPtr(lir->mir()->templateObject())); |
| |
| // If we're making a new object with a class prototype (that is, an object |
| // that derives its class from its prototype instead of being |
| // ObjectClass'd) from self-hosted code, we need a different init |
| // function. |
| if (lir->mir()->templateObjectIsClassPrototype()) { |
| if (!callVM(NewInitObjectWithClassPrototypeInfo, lir)) |
| return false; |
| } else if (!callVM(NewInitObjectInfo, lir)) { |
| return false; |
| } |
| |
| if (ReturnReg != objReg) |
| masm.movePtr(ReturnReg, objReg); |
| |
| restoreLive(lir); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitNewObject(LNewObject *lir) |
| { |
| JS_ASSERT(gen->info().executionMode() == SequentialExecution); |
| Register objReg = ToRegister(lir->output()); |
| JSObject *templateObject = lir->mir()->templateObject(); |
| |
| if (lir->mir()->shouldUseVM()) |
| return visitNewObjectVMCall(lir); |
| |
| OutOfLineNewObject *ool = new OutOfLineNewObject(lir); |
| if (!addOutOfLineCode(ool)) |
| return false; |
| |
| masm.newGCThing(objReg, templateObject, ool->entry()); |
| masm.initGCThing(objReg, templateObject); |
| |
| masm.bind(ool->rejoin()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitOutOfLineNewObject(OutOfLineNewObject *ool) |
| { |
| if (!visitNewObjectVMCall(ool->lir())) |
| return false; |
| masm.jump(ool->rejoin()); |
| return true; |
| } |
| |
| typedef js::DeclEnvObject *(*NewDeclEnvObjectFn)(JSContext *, HandleFunction, gc::InitialHeap); |
| static const VMFunction NewDeclEnvObjectInfo = |
| FunctionInfo<NewDeclEnvObjectFn>(DeclEnvObject::createTemplateObject); |
| |
| bool |
| CodeGenerator::visitNewDeclEnvObject(LNewDeclEnvObject *lir) |
| { |
| Register obj = ToRegister(lir->output()); |
| JSObject *templateObj = lir->mir()->templateObj(); |
| CompileInfo &info = lir->mir()->block()->info(); |
| |
| // If we have a template object, we can inline call object creation. |
| OutOfLineCode *ool = oolCallVM(NewDeclEnvObjectInfo, lir, |
| (ArgList(), ImmGCPtr(info.fun()), Imm32(gc::DefaultHeap)), |
| StoreRegisterTo(obj)); |
| if (!ool) |
| return false; |
| |
| masm.newGCThing(obj, templateObj, ool->entry()); |
| masm.initGCThing(obj, templateObj); |
| masm.bind(ool->rejoin()); |
| return true; |
| } |
| |
| typedef JSObject *(*NewCallObjectFn)(JSContext *, HandleScript, HandleShape, |
| HandleTypeObject, HeapSlot *); |
| static const VMFunction NewCallObjectInfo = |
| FunctionInfo<NewCallObjectFn>(NewCallObject); |
| |
| bool |
| CodeGenerator::visitNewCallObject(LNewCallObject *lir) |
| { |
| Register obj = ToRegister(lir->output()); |
| |
| JSObject *templateObj = lir->mir()->templateObject(); |
| |
| // If we have a template object, we can inline call object creation. |
| OutOfLineCode *ool; |
| if (lir->slots()->isRegister()) { |
| ool = oolCallVM(NewCallObjectInfo, lir, |
| (ArgList(), ImmGCPtr(lir->mir()->block()->info().script()), |
| ImmGCPtr(templateObj->lastProperty()), |
| ImmGCPtr(templateObj->hasLazyType() ? NULL : templateObj->type()), |
| ToRegister(lir->slots())), |
| StoreRegisterTo(obj)); |
| } else { |
| ool = oolCallVM(NewCallObjectInfo, lir, |
| (ArgList(), ImmGCPtr(lir->mir()->block()->info().script()), |
| ImmGCPtr(templateObj->lastProperty()), |
| ImmGCPtr(templateObj->hasLazyType() ? NULL : templateObj->type()), |
| ImmWord((void *)NULL)), |
| StoreRegisterTo(obj)); |
| } |
| if (!ool) |
| return false; |
| |
| if (lir->mir()->needsSingletonType()) { |
| // Objects can only be given singleton types in VM calls. |
| masm.jump(ool->entry()); |
| } else { |
| masm.newGCThing(obj, templateObj, ool->entry()); |
| masm.initGCThing(obj, templateObj); |
| |
| if (lir->slots()->isRegister()) |
| masm.storePtr(ToRegister(lir->slots()), Address(obj, JSObject::offsetOfSlots())); |
| } |
| |
| masm.bind(ool->rejoin()); |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitParNewCallObject(LParNewCallObject *lir) |
| { |
| Register resultReg = ToRegister(lir->output()); |
| Register parSliceReg = ToRegister(lir->parSlice()); |
| Register tempReg1 = ToRegister(lir->getTemp0()); |
| Register tempReg2 = ToRegister(lir->getTemp1()); |
| JSObject *templateObj = lir->mir()->templateObj(); |
| |
| emitParAllocateGCThing(lir, resultReg, parSliceReg, tempReg1, tempReg2, templateObj); |
| |
| // NB: !lir->slots()->isRegister() implies that there is no slots |
| // array at all, and the memory is already zeroed when copying |
| // from the template object |
| |
| if (lir->slots()->isRegister()) { |
| Register slotsReg = ToRegister(lir->slots()); |
| JS_ASSERT(slotsReg != resultReg); |
| masm.storePtr(slotsReg, Address(resultReg, JSObject::offsetOfSlots())); |
| } |
| |
| return true; |
| } |
| |
| bool |
| CodeGenerator::visitParNewDenseArray(LParNewDenseArray *lir) |
| { |
| Register parSliceReg = ToRegister(lir->parSlice()); |
| Register lengthReg = ToRegister(lir->length()); |
| Register tempReg0 = ToRegister(lir->getTemp0()); |
| Register tempReg1 = ToRegister(lir->getTemp1()); |
| Register tempReg2 = ToRegister(lir-&
|