blob: ab11826b3bb5028091c617adc2e883bab9d6e4e9 [file] [log] [blame]
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/BaselineIC.h"
#include "mozilla/DebugOnly.h"
#include "mozilla/SizePrintfMacros.h"
#include "mozilla/TemplateLib.h"
#include "jslibmath.h"
#include "jstypes.h"
#include "builtin/Eval.h"
#include "builtin/SIMD.h"
#include "jit/BaselineDebugModeOSR.h"
#include "jit/BaselineJIT.h"
#include "jit/JitSpewer.h"
#include "jit/Linker.h"
#include "jit/Lowering.h"
#ifdef JS_ION_PERF
# include "jit/PerfSpewer.h"
#endif
#include "jit/SharedICHelpers.h"
#include "jit/VMFunctions.h"
#include "js/Conversions.h"
#include "js/TraceableVector.h"
#include "vm/Opcodes.h"
#include "vm/TypedArrayCommon.h"
#include "jsboolinlines.h"
#include "jsscriptinlines.h"
#include "jit/JitFrames-inl.h"
#include "jit/MacroAssembler-inl.h"
#include "jit/shared/Lowering-shared-inl.h"
#include "vm/Interpreter-inl.h"
#include "vm/ScopeObject-inl.h"
#include "vm/StringObject-inl.h"
#include "vm/UnboxedObject-inl.h"
using mozilla::DebugOnly;
namespace js {
namespace jit {
//
// WarmUpCounter_Fallback
//
static bool
EnsureCanEnterIon(JSContext* cx, ICWarmUpCounter_Fallback* stub, BaselineFrame* frame,
HandleScript script, jsbytecode* pc, void** jitcodePtr)
{
MOZ_ASSERT(jitcodePtr);
MOZ_ASSERT(!*jitcodePtr);
bool isLoopEntry = (JSOp(*pc) == JSOP_LOOPENTRY);
MethodStatus stat;
if (isLoopEntry) {
MOZ_ASSERT(LoopEntryCanIonOsr(pc));
JitSpew(JitSpew_BaselineOSR, " Compile at loop entry!");
stat = CanEnterAtBranch(cx, script, frame, pc);
} else if (frame->isFunctionFrame()) {
JitSpew(JitSpew_BaselineOSR, " Compile function from top for later entry!");
stat = CompileFunctionForBaseline(cx, script, frame);
} else {
return true;
}
if (stat == Method_Error) {
JitSpew(JitSpew_BaselineOSR, " Compile with Ion errored!");
return false;
}
if (stat == Method_CantCompile)
JitSpew(JitSpew_BaselineOSR, " Can't compile with Ion!");
else if (stat == Method_Skipped)
JitSpew(JitSpew_BaselineOSR, " Skipped compile with Ion!");
else if (stat == Method_Compiled)
JitSpew(JitSpew_BaselineOSR, " Compiled with Ion!");
else
MOZ_CRASH("Invalid MethodStatus!");
// Failed to compile. Reset warm-up counter and return.
if (stat != Method_Compiled) {
// TODO: If stat == Method_CantCompile, insert stub that just skips the
// warm-up counter entirely, instead of resetting it.
bool bailoutExpected = script->hasIonScript() && script->ionScript()->bailoutExpected();
if (stat == Method_CantCompile || bailoutExpected) {
JitSpew(JitSpew_BaselineOSR, " Reset WarmUpCounter cantCompile=%s bailoutExpected=%s!",
stat == Method_CantCompile ? "yes" : "no",
bailoutExpected ? "yes" : "no");
script->resetWarmUpCounter();
}
return true;
}
if (isLoopEntry) {
IonScript* ion = script->ionScript();
MOZ_ASSERT(cx->runtime()->spsProfiler.enabled() == ion->hasProfilingInstrumentation());
MOZ_ASSERT(ion->osrPc() == pc);
JitSpew(JitSpew_BaselineOSR, " OSR possible!");
*jitcodePtr = ion->method()->raw() + ion->osrEntryOffset();
}
return true;
}
//
// The following data is kept in a temporary heap-allocated buffer, stored in
// JitRuntime (high memory addresses at top, low at bottom):
//
// +----->+=================================+ -- <---- High Address
// | | | |
// | | ...BaselineFrame... | |-- Copy of BaselineFrame + stack values
// | | | |
// | +---------------------------------+ |
// | | | |
// | | ...Locals/Stack... | |
// | | | |
// | +=================================+ --
// | | Padding(Maybe Empty) |
// | +=================================+ --
// +------|-- baselineFrame | |-- IonOsrTempData
// | jitcode | |
// +=================================+ -- <---- Low Address
//
// A pointer to the IonOsrTempData is returned.
struct IonOsrTempData
{
void* jitcode;
uint8_t* baselineFrame;
};
static IonOsrTempData*
PrepareOsrTempData(JSContext* cx, ICWarmUpCounter_Fallback* stub, BaselineFrame* frame,
HandleScript script, jsbytecode* pc, void* jitcode)
{
size_t numLocalsAndStackVals = frame->numValueSlots();
// Calculate the amount of space to allocate:
// BaselineFrame space:
// (sizeof(Value) * (numLocals + numStackVals))
// + sizeof(BaselineFrame)
//
// IonOsrTempData space:
// sizeof(IonOsrTempData)
size_t frameSpace = sizeof(BaselineFrame) + sizeof(Value) * numLocalsAndStackVals;
size_t ionOsrTempDataSpace = sizeof(IonOsrTempData);
size_t totalSpace = AlignBytes(frameSpace, sizeof(Value)) +
AlignBytes(ionOsrTempDataSpace, sizeof(Value));
IonOsrTempData* info = (IonOsrTempData*)cx->runtime()->getJitRuntime(cx)->allocateOsrTempData(totalSpace);
if (!info)
return nullptr;
memset(info, 0, totalSpace);
info->jitcode = jitcode;
// Copy the BaselineFrame + local/stack Values to the buffer. Arguments and
// |this| are not copied but left on the stack: the Baseline and Ion frame
// share the same frame prefix and Ion won't clobber these values. Note
// that info->baselineFrame will point to the *end* of the frame data, like
// the frame pointer register in baseline frames.
uint8_t* frameStart = (uint8_t*)info + AlignBytes(ionOsrTempDataSpace, sizeof(Value));
info->baselineFrame = frameStart + frameSpace;
memcpy(frameStart, (uint8_t*)frame - numLocalsAndStackVals * sizeof(Value), frameSpace);
JitSpew(JitSpew_BaselineOSR, "Allocated IonOsrTempData at %p", (void*) info);
JitSpew(JitSpew_BaselineOSR, "Jitcode is %p", info->jitcode);
// All done.
return info;
}
static bool
DoWarmUpCounterFallback(JSContext* cx, BaselineFrame* frame, ICWarmUpCounter_Fallback* stub,
IonOsrTempData** infoPtr)
{
MOZ_ASSERT(infoPtr);
*infoPtr = nullptr;
// A TI OOM will disable TI and Ion.
if (!jit::IsIonEnabled(cx))
return true;
RootedScript script(cx, frame->script());
jsbytecode* pc = stub->icEntry()->pc(script);
bool isLoopEntry = JSOp(*pc) == JSOP_LOOPENTRY;
MOZ_ASSERT(!isLoopEntry || LoopEntryCanIonOsr(pc));
FallbackICSpew(cx, stub, "WarmUpCounter(%d)", isLoopEntry ? int(script->pcToOffset(pc)) : int(-1));
if (!script->canIonCompile()) {
// TODO: ASSERT that ion-compilation-disabled checker stub doesn't exist.
// TODO: Clear all optimized stubs.
// TODO: Add a ion-compilation-disabled checker IC stub
script->resetWarmUpCounter();
return true;
}
MOZ_ASSERT(!script->isIonCompilingOffThread());
// If Ion script exists, but PC is not at a loop entry, then Ion will be entered for
// this script at an appropriate LOOPENTRY or the next time this function is called.
if (script->hasIonScript() && !isLoopEntry) {
JitSpew(JitSpew_BaselineOSR, "IonScript exists, but not at loop entry!");
// TODO: ASSERT that a ion-script-already-exists checker stub doesn't exist.
// TODO: Clear all optimized stubs.
// TODO: Add a ion-script-already-exists checker stub.
return true;
}
// Ensure that Ion-compiled code is available.
JitSpew(JitSpew_BaselineOSR,
"WarmUpCounter for %s:%" PRIuSIZE " reached %d at pc %p, trying to switch to Ion!",
script->filename(), script->lineno(), (int) script->getWarmUpCount(), (void*) pc);
void* jitcode = nullptr;
if (!EnsureCanEnterIon(cx, stub, frame, script, pc, &jitcode))
return false;
// Jitcode should only be set here if not at loop entry.
MOZ_ASSERT_IF(!isLoopEntry, !jitcode);
if (!jitcode)
return true;
// Prepare the temporary heap copy of the fake InterpreterFrame and actual args list.
JitSpew(JitSpew_BaselineOSR, "Got jitcode. Preparing for OSR into ion.");
IonOsrTempData* info = PrepareOsrTempData(cx, stub, frame, script, pc, jitcode);
if (!info)
return false;
*infoPtr = info;
return true;
}
typedef bool (*DoWarmUpCounterFallbackFn)(JSContext*, BaselineFrame*,
ICWarmUpCounter_Fallback*, IonOsrTempData** infoPtr);
static const VMFunction DoWarmUpCounterFallbackInfo =
FunctionInfo<DoWarmUpCounterFallbackFn>(DoWarmUpCounterFallback);
bool
ICWarmUpCounter_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
// Push a stub frame so that we can perform a non-tail call.
enterStubFrame(masm, R1.scratchReg());
Label noCompiledCode;
// Call DoWarmUpCounterFallback to compile/check-for Ion-compiled function
{
// Push IonOsrTempData pointer storage
masm.subFromStackPtr(Imm32(sizeof(void*)));
masm.push(masm.getStackPointer());
// Push stub pointer.
masm.push(ICStubReg);
pushFramePtr(masm, R0.scratchReg());
if (!callVM(DoWarmUpCounterFallbackInfo, masm))
return false;
// Pop IonOsrTempData pointer.
masm.pop(R0.scratchReg());
leaveStubFrame(masm);
// If no JitCode was found, then skip just exit the IC.
masm.branchPtr(Assembler::Equal, R0.scratchReg(), ImmPtr(nullptr), &noCompiledCode);
}
// Get a scratch register.
AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
Register osrDataReg = R0.scratchReg();
regs.take(osrDataReg);
regs.takeUnchecked(OsrFrameReg);
Register scratchReg = regs.takeAny();
// At this point, stack looks like:
// +-> [...Calling-Frame...]
// | [...Actual-Args/ThisV/ArgCount/Callee...]
// | [Descriptor]
// | [Return-Addr]
// +---[Saved-FramePtr] <-- BaselineFrameReg points here.
// [...Baseline-Frame...]
// Restore the stack pointer to point to the saved frame pointer.
masm.moveToStackPtr(BaselineFrameReg);
// Discard saved frame pointer, so that the return address is on top of
// the stack.
masm.pop(scratchReg);
#ifdef DEBUG
// If profiler instrumentation is on, ensure that lastProfilingFrame is
// the frame currently being OSR-ed
{
Label checkOk;
AbsoluteAddress addressOfEnabled(cx->runtime()->spsProfiler.addressOfEnabled());
masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &checkOk);
masm.loadPtr(AbsoluteAddress((void*)&cx->runtime()->jitActivation), scratchReg);
masm.loadPtr(Address(scratchReg, JitActivation::offsetOfLastProfilingFrame()), scratchReg);
// It may be the case that we entered the baseline frame with
// profiling turned off on, then in a call within a loop (i.e. a
// callee frame), turn on profiling, then return to this frame,
// and then OSR with profiling turned on. In this case, allow for
// lastProfilingFrame to be null.
masm.branchPtr(Assembler::Equal, scratchReg, ImmWord(0), &checkOk);
masm.branchStackPtr(Assembler::Equal, scratchReg, &checkOk);
masm.assumeUnreachable("Baseline OSR lastProfilingFrame mismatch.");
masm.bind(&checkOk);
}
#endif
// Jump into Ion.
masm.loadPtr(Address(osrDataReg, offsetof(IonOsrTempData, jitcode)), scratchReg);
masm.loadPtr(Address(osrDataReg, offsetof(IonOsrTempData, baselineFrame)), OsrFrameReg);
masm.jump(scratchReg);
// No jitcode available, do nothing.
masm.bind(&noCompiledCode);
EmitReturnFromIC(masm);
return true;
}
//
// TypeUpdate_Fallback
//
static bool
DoTypeUpdateFallback(JSContext* cx, BaselineFrame* frame, ICUpdatedStub* stub, HandleValue objval,
HandleValue value)
{
FallbackICSpew(cx, stub->getChainFallback(), "TypeUpdate(%s)",
ICStub::KindString(stub->kind()));
RootedScript script(cx, frame->script());
RootedObject obj(cx, &objval.toObject());
RootedId id(cx);
switch(stub->kind()) {
case ICStub::SetElem_DenseOrUnboxedArray:
case ICStub::SetElem_DenseOrUnboxedArrayAdd: {
id = JSID_VOID;
AddTypePropertyId(cx, obj, id, value);
break;
}
case ICStub::SetProp_Native:
case ICStub::SetProp_NativeAdd:
case ICStub::SetProp_Unboxed: {
MOZ_ASSERT(obj->isNative() || obj->is<UnboxedPlainObject>());
jsbytecode* pc = stub->getChainFallback()->icEntry()->pc(script);
if (*pc == JSOP_SETALIASEDVAR || *pc == JSOP_INITALIASEDLEXICAL)
id = NameToId(ScopeCoordinateName(cx->runtime()->scopeCoordinateNameCache, script, pc));
else
id = NameToId(script->getName(pc));
AddTypePropertyId(cx, obj, id, value);
break;
}
case ICStub::SetProp_TypedObject: {
MOZ_ASSERT(obj->is<TypedObject>());
jsbytecode* pc = stub->getChainFallback()->icEntry()->pc(script);
id = NameToId(script->getName(pc));
if (stub->toSetProp_TypedObject()->isObjectReference()) {
// Ignore all values being written except plain objects. Null
// is included implicitly in type information for this property,
// and non-object non-null values will cause the stub to fail to
// match shortly and we will end up doing the assignment in the VM.
if (value.isObject())
AddTypePropertyId(cx, obj, id, value);
} else {
// Ignore undefined values, which are included implicitly in type
// information for this property.
if (!value.isUndefined())
AddTypePropertyId(cx, obj, id, value);
}
break;
}
default:
MOZ_CRASH("Invalid stub");
}
return stub->addUpdateStubForValue(cx, script, obj, id, value);
}
typedef bool (*DoTypeUpdateFallbackFn)(JSContext*, BaselineFrame*, ICUpdatedStub*, HandleValue,
HandleValue);
const VMFunction DoTypeUpdateFallbackInfo =
FunctionInfo<DoTypeUpdateFallbackFn>(DoTypeUpdateFallback, NonTailCall);
bool
ICTypeUpdate_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
// Just store false into R1.scratchReg() and return.
masm.move32(Imm32(0), R1.scratchReg());
EmitReturnFromIC(masm);
return true;
}
bool
ICTypeUpdate_PrimitiveSet::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label success;
if ((flags_ & TypeToFlag(JSVAL_TYPE_INT32)) && !(flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE)))
masm.branchTestInt32(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE))
masm.branchTestNumber(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_UNDEFINED))
masm.branchTestUndefined(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_BOOLEAN))
masm.branchTestBoolean(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_STRING))
masm.branchTestString(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_SYMBOL))
masm.branchTestSymbol(Assembler::Equal, R0, &success);
// Currently, we will never generate primitive stub checks for object. However,
// when we do get to the point where we want to collapse our monitor chains of
// objects and singletons down (when they get too long) to a generic "any object"
// in coordination with the typeset doing the same thing, this will need to
// be re-enabled.
/*
if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT))
masm.branchTestObject(Assembler::Equal, R0, &success);
*/
MOZ_ASSERT(!(flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)));
if (flags_ & TypeToFlag(JSVAL_TYPE_NULL))
masm.branchTestNull(Assembler::Equal, R0, &success);
EmitStubGuardFailure(masm);
// Type matches, load true into R1.scratchReg() and return.
masm.bind(&success);
masm.mov(ImmWord(1), R1.scratchReg());
EmitReturnFromIC(masm);
return true;
}
bool
ICTypeUpdate_SingleObject::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
// Guard on the object's identity.
Register obj = masm.extractObject(R0, R1.scratchReg());
Address expectedObject(ICStubReg, ICTypeUpdate_SingleObject::offsetOfObject());
masm.branchPtr(Assembler::NotEqual, expectedObject, obj, &failure);
// Identity matches, load true into R1.scratchReg() and return.
masm.mov(ImmWord(1), R1.scratchReg());
EmitReturnFromIC(masm);
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICTypeUpdate_ObjectGroup::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
// Guard on the object's ObjectGroup.
Register obj = masm.extractObject(R0, R1.scratchReg());
masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), R1.scratchReg());
Address expectedGroup(ICStubReg, ICTypeUpdate_ObjectGroup::offsetOfGroup());
masm.branchPtr(Assembler::NotEqual, expectedGroup, R1.scratchReg(), &failure);
// Group matches, load true into R1.scratchReg() and return.
masm.mov(ImmWord(1), R1.scratchReg());
EmitReturnFromIC(masm);
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
typedef bool (*DoCallNativeGetterFn)(JSContext*, HandleFunction, HandleObject, MutableHandleValue);
static const VMFunction DoCallNativeGetterInfo =
FunctionInfo<DoCallNativeGetterFn>(DoCallNativeGetter);
//
// NewArray_Fallback
//
static bool
DoNewArray(JSContext* cx, BaselineFrame* frame, ICNewArray_Fallback* stub, uint32_t length,
MutableHandleValue res)
{
FallbackICSpew(cx, stub, "NewArray");
RootedObject obj(cx);
if (stub->templateObject()) {
RootedObject templateObject(cx, stub->templateObject());
obj = NewArrayOperationWithTemplate(cx, templateObject);
if (!obj)
return false;
} else {
RootedScript script(cx, frame->script());
jsbytecode* pc = stub->icEntry()->pc(script);
obj = NewArrayOperation(cx, script, pc, length);
if (!obj)
return false;
if (obj && !obj->isSingleton() && !obj->group()->maybePreliminaryObjects()) {
JSObject* templateObject = NewArrayOperation(cx, script, pc, length, TenuredObject);
if (!templateObject)
return false;
stub->setTemplateObject(templateObject);
}
}
res.setObject(*obj);
return true;
}
typedef bool(*DoNewArrayFn)(JSContext*, BaselineFrame*, ICNewArray_Fallback*, uint32_t,
MutableHandleValue);
static const VMFunction DoNewArrayInfo = FunctionInfo<DoNewArrayFn>(DoNewArray, TailCall);
bool
ICNewArray_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
EmitRestoreTailCallReg(masm);
masm.push(R0.scratchReg()); // length
masm.push(ICStubReg); // stub.
pushFramePtr(masm, R0.scratchReg());
return tailCallVM(DoNewArrayInfo, masm);
}
//
// NewObject_Fallback
//
// Unlike typical baseline IC stubs, the code for NewObject_WithTemplate is
// specialized for the template object being allocated.
static JitCode*
GenerateNewObjectWithTemplateCode(JSContext* cx, JSObject* templateObject)
{
JitContext jctx(cx, nullptr);
MacroAssembler masm;
#ifdef JS_CODEGEN_ARM
masm.setSecondScratchReg(BaselineSecondScratchReg);
#endif
Label failure;
Register objReg = R0.scratchReg();
Register tempReg = R1.scratchReg();
masm.movePtr(ImmGCPtr(templateObject->group()), tempReg);
masm.branchTest32(Assembler::NonZero, Address(tempReg, ObjectGroup::offsetOfFlags()),
Imm32(OBJECT_FLAG_PRE_TENURE), &failure);
masm.branchPtr(Assembler::NotEqual, AbsoluteAddress(cx->compartment()->addressOfMetadataCallback()),
ImmWord(0), &failure);
masm.createGCObject(objReg, tempReg, templateObject, gc::DefaultHeap, &failure);
masm.tagValue(JSVAL_TYPE_OBJECT, objReg, R0);
EmitReturnFromIC(masm);
masm.bind(&failure);
EmitStubGuardFailure(masm);
Linker linker(masm);
AutoFlushICache afc("GenerateNewObjectWithTemplateCode");
return linker.newCode<CanGC>(cx, BASELINE_CODE);
}
static bool
DoNewObject(JSContext* cx, BaselineFrame* frame, ICNewObject_Fallback* stub, MutableHandleValue res)
{
FallbackICSpew(cx, stub, "NewObject");
RootedObject obj(cx);
RootedObject templateObject(cx, stub->templateObject());
if (templateObject) {
MOZ_ASSERT(!templateObject->group()->maybePreliminaryObjects());
obj = NewObjectOperationWithTemplate(cx, templateObject);
} else {
RootedScript script(cx, frame->script());
jsbytecode* pc = stub->icEntry()->pc(script);
obj = NewObjectOperation(cx, script, pc);
if (obj && !obj->isSingleton() && !obj->group()->maybePreliminaryObjects()) {
JSObject* templateObject = NewObjectOperation(cx, script, pc, TenuredObject);
if (!templateObject)
return false;
if (templateObject->is<UnboxedPlainObject>() ||
!templateObject->as<PlainObject>().hasDynamicSlots())
{
JitCode* code = GenerateNewObjectWithTemplateCode(cx, templateObject);
if (!code)
return false;
ICStubSpace* space =
ICStubCompiler::StubSpaceForKind(ICStub::NewObject_WithTemplate, script);
ICStub* templateStub = ICStub::New<ICNewObject_WithTemplate>(cx, space, code);
if (!templateStub)
return false;
stub->addNewStub(templateStub);
}
stub->setTemplateObject(templateObject);
}
}
if (!obj)
return false;
res.setObject(*obj);
return true;
}
typedef bool(*DoNewObjectFn)(JSContext*, BaselineFrame*, ICNewObject_Fallback*, MutableHandleValue);
static const VMFunction DoNewObjectInfo = FunctionInfo<DoNewObjectFn>(DoNewObject, TailCall);
bool
ICNewObject_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
EmitRestoreTailCallReg(masm);
masm.push(ICStubReg); // stub.
pushFramePtr(masm, R0.scratchReg());
return tailCallVM(DoNewObjectInfo, masm);
}
//
// ToBool_Fallback
//
static bool
DoToBoolFallback(JSContext* cx, BaselineFrame* frame, ICToBool_Fallback* stub, HandleValue arg,
MutableHandleValue ret)
{
FallbackICSpew(cx, stub, "ToBool");
bool cond = ToBoolean(arg);
ret.setBoolean(cond);
// Check to see if a new stub should be generated.
if (stub->numOptimizedStubs() >= ICToBool_Fallback::MAX_OPTIMIZED_STUBS) {
// TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
// But for now we just bail.
return true;
}
MOZ_ASSERT(!arg.isBoolean());
JSScript* script = frame->script();
// Try to generate new stubs.
if (arg.isInt32()) {
JitSpew(JitSpew_BaselineIC, " Generating ToBool(Int32) stub.");
ICToBool_Int32::Compiler compiler(cx);
ICStub* int32Stub = compiler.getStub(compiler.getStubSpace(script));
if (!int32Stub)
return false;
stub->addNewStub(int32Stub);
return true;
}
if (arg.isDouble() && cx->runtime()->jitSupportsFloatingPoint) {
JitSpew(JitSpew_BaselineIC, " Generating ToBool(Double) stub.");
ICToBool_Double::Compiler compiler(cx);
ICStub* doubleStub = compiler.getStub(compiler.getStubSpace(script));
if (!doubleStub)
return false;
stub->addNewStub(doubleStub);
return true;
}
if (arg.isString()) {
JitSpew(JitSpew_BaselineIC, " Generating ToBool(String) stub");
ICToBool_String::Compiler compiler(cx);
ICStub* stringStub = compiler.getStub(compiler.getStubSpace(script));
if (!stringStub)
return false;
stub->addNewStub(stringStub);
return true;
}
if (arg.isNull() || arg.isUndefined()) {
ICToBool_NullUndefined::Compiler compiler(cx);
ICStub* nilStub = compiler.getStub(compiler.getStubSpace(script));
if (!nilStub)
return false;
stub->addNewStub(nilStub);
return true;
}
if (arg.isObject()) {
JitSpew(JitSpew_BaselineIC, " Generating ToBool(Object) stub.");
ICToBool_Object::Compiler compiler(cx);
ICStub* objStub = compiler.getStub(compiler.getStubSpace(script));
if (!objStub)
return false;
stub->addNewStub(objStub);
return true;
}
return true;
}
typedef bool (*pf)(JSContext*, BaselineFrame*, ICToBool_Fallback*, HandleValue,
MutableHandleValue);
static const VMFunction fun = FunctionInfo<pf>(DoToBoolFallback, TailCall);
bool
ICToBool_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
MOZ_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Push arguments.
masm.pushValue(R0);
masm.push(ICStubReg);
pushFramePtr(masm, R0.scratchReg());
return tailCallVM(fun, masm);
}
//
// ToBool_Int32
//
bool
ICToBool_Int32::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
Label ifFalse;
masm.branchTestInt32Truthy(false, R0, &ifFalse);
masm.moveValue(BooleanValue(true), R0);
EmitReturnFromIC(masm);
masm.bind(&ifFalse);
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToBool_String
//
bool
ICToBool_String::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
masm.branchTestString(Assembler::NotEqual, R0, &failure);
Label ifFalse;
masm.branchTestStringTruthy(false, R0, &ifFalse);
masm.moveValue(BooleanValue(true), R0);
EmitReturnFromIC(masm);
masm.bind(&ifFalse);
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToBool_NullUndefined
//
bool
ICToBool_NullUndefined::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure, ifFalse;
masm.branchTestNull(Assembler::Equal, R0, &ifFalse);
masm.branchTestUndefined(Assembler::NotEqual, R0, &failure);
masm.bind(&ifFalse);
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToBool_Double
//
bool
ICToBool_Double::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure, ifTrue;
masm.branchTestDouble(Assembler::NotEqual, R0, &failure);
masm.unboxDouble(R0, FloatReg0);
masm.branchTestDoubleTruthy(true, FloatReg0, &ifTrue);
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
masm.bind(&ifTrue);
masm.moveValue(BooleanValue(true), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToBool_Object
//
bool
ICToBool_Object::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure, ifFalse, slowPath;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
Register objReg = masm.extractObject(R0, ExtractTemp0);
Register scratch = R1.scratchReg();
masm.branchTestObjectTruthy(false, objReg, scratch, &slowPath, &ifFalse);
// If object doesn't emulate undefined, it evaulates to true.
masm.moveValue(BooleanValue(true), R0);
EmitReturnFromIC(masm);
masm.bind(&ifFalse);
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
masm.bind(&slowPath);
masm.setupUnalignedABICall(scratch);
masm.passABIArg(objReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, js::EmulatesUndefined));
masm.convertBoolToInt32(ReturnReg, ReturnReg);
masm.xor32(Imm32(1), ReturnReg);
masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToNumber_Fallback
//
static bool
DoToNumberFallback(JSContext* cx, ICToNumber_Fallback* stub, HandleValue arg, MutableHandleValue ret)
{
FallbackICSpew(cx, stub, "ToNumber");
ret.set(arg);
return ToNumber(cx, ret);
}
typedef bool (*DoToNumberFallbackFn)(JSContext*, ICToNumber_Fallback*, HandleValue, MutableHandleValue);
static const VMFunction DoToNumberFallbackInfo =
FunctionInfo<DoToNumberFallbackFn>(DoToNumberFallback, TailCall, PopValues(1));
bool
ICToNumber_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
MOZ_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Ensure stack is fully synced for the expression decompiler.
masm.pushValue(R0);
// Push arguments.
masm.pushValue(R0);
masm.push(ICStubReg);
return tailCallVM(DoToNumberFallbackInfo, masm);
}
//
// GetElem_Fallback
//
static Shape*
LastPropertyForSetProp(JSObject* obj)
{
if (obj->isNative())
return obj->as<NativeObject>().lastProperty();
if (obj->is<UnboxedPlainObject>()) {
UnboxedExpandoObject* expando = obj->as<UnboxedPlainObject>().maybeExpando();
return expando ? expando->lastProperty() : nullptr;
}
return nullptr;
}
static bool
IsCacheableSetPropWriteSlot(JSObject* obj, Shape* oldShape, Shape* propertyShape)
{
// Object shape must not have changed during the property set.
if (LastPropertyForSetProp(obj) != oldShape)
return false;
if (!propertyShape->hasSlot() ||
!propertyShape->hasDefaultSetter() ||
!propertyShape->writable())
{
return false;
}
return true;
}
static bool
IsCacheableSetPropAddSlot(JSContext* cx, JSObject* obj, Shape* oldShape,
jsid id, Shape* propertyShape, size_t* protoChainDepth)
{
// The property must be the last added property of the object.
if (LastPropertyForSetProp(obj) != propertyShape)
return false;
// Object must be extensible, oldShape must be immediate parent of current shape.
if (!obj->nonProxyIsExtensible() || propertyShape->previous() != oldShape)
return false;
// Basic shape checks.
if (propertyShape->inDictionary() ||
!propertyShape->hasSlot() ||
!propertyShape->hasDefaultSetter() ||
!propertyShape->writable())
{
return false;
}
// Watch out for resolve or addProperty hooks.
if (ClassMayResolveId(cx->names(), obj->getClass(), id, obj) ||
obj->getClass()->addProperty)
{
return false;
}
size_t chainDepth = 0;
// Walk up the object prototype chain and ensure that all prototypes are
// native, and that all prototypes have no setter defined on the property.
for (JSObject* proto = obj->getProto(); proto; proto = proto->getProto()) {
chainDepth++;
// if prototype is non-native, don't optimize
if (!proto->isNative())
return false;
// if prototype defines this property in a non-plain way, don't optimize
Shape* protoShape = proto->as<NativeObject>().lookup(cx, id);
if (protoShape && !protoShape->hasDefaultSetter())
return false;
// Otherwise, if there's no such property, watch out for a resolve hook
// that would need to be invoked and thus prevent inlining of property
// addition.
if (ClassMayResolveId(cx->names(), proto->getClass(), id, proto))
return false;
}
// Only add a IC entry if the dynamic slots didn't change when the shapes
// changed. Need to ensure that a shape change for a subsequent object
// won't involve reallocating the slot array.
if (NativeObject::dynamicSlotsCount(propertyShape) != NativeObject::dynamicSlotsCount(oldShape))
return false;
*protoChainDepth = chainDepth;
return true;
}
static bool
IsCacheableSetPropCall(JSContext* cx, JSObject* obj, JSObject* holder, Shape* shape,
bool* isScripted, bool* isTemporarilyUnoptimizable)
{
MOZ_ASSERT(isScripted);
if (!shape || !IsCacheableProtoChain(obj, holder))
return false;
if (shape->hasSlot() || shape->hasDefaultSetter())
return false;
if (!shape->hasSetterValue())
return false;
if (!shape->setterValue().isObject() || !shape->setterObject()->is<JSFunction>())
return false;
JSFunction* func = &shape->setterObject()->as<JSFunction>();
if (func->isNative()) {
*isScripted = false;
return true;
}
if (!func->hasJITCode()) {
*isTemporarilyUnoptimizable = true;
return false;
}
*isScripted = true;
return true;
}
template <class T>
static bool
GetElemNativeStubExists(ICGetElem_Fallback* stub, HandleObject obj, HandleObject holder,
Handle<T> key, bool needsAtomize)
{
bool indirect = (obj.get() != holder.get());
MOZ_ASSERT_IF(indirect, holder->isNative());
for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
if (iter->kind() != ICStub::GetElem_NativeSlotName &&
iter->kind() != ICStub::GetElem_NativeSlotSymbol &&
iter->kind() != ICStub::GetElem_NativePrototypeSlotName &&
iter->kind() != ICStub::GetElem_NativePrototypeSlotSymbol &&
iter->kind() != ICStub::GetElem_NativePrototypeCallNativeName &&
iter->kind() != ICStub::GetElem_NativePrototypeCallNativeSymbol &&
iter->kind() != ICStub::GetElem_NativePrototypeCallScriptedName &&
iter->kind() != ICStub::GetElem_NativePrototypeCallScriptedSymbol)
{
continue;
}
if (indirect && (iter->kind() != ICStub::GetElem_NativePrototypeSlotName &&
iter->kind() != ICStub::GetElem_NativePrototypeSlotSymbol &&
iter->kind() != ICStub::GetElem_NativePrototypeCallNativeName &&
iter->kind() != ICStub::GetElem_NativePrototypeCallNativeSymbol &&
iter->kind() != ICStub::GetElem_NativePrototypeCallScriptedName &&
iter->kind() != ICStub::GetElem_NativePrototypeCallScriptedSymbol))
{
continue;
}
if(mozilla::IsSame<T, JS::Symbol*>::value !=
static_cast<ICGetElemNativeStub*>(*iter)->isSymbol())
{
continue;
}
ICGetElemNativeStubImpl<T>* getElemNativeStub =
reinterpret_cast<ICGetElemNativeStubImpl<T>*>(*iter);
if (key != getElemNativeStub->key())
continue;
if (ReceiverGuard(obj) != getElemNativeStub->receiverGuard())
continue;
// If the new stub needs atomization, and the old stub doesn't atomize, then
// an appropriate stub doesn't exist.
if (needsAtomize && !getElemNativeStub->needsAtomize())
continue;
// For prototype gets, check the holder and holder shape.
if (indirect) {
if (iter->isGetElem_NativePrototypeSlotName() ||
iter->isGetElem_NativePrototypeSlotSymbol()) {
ICGetElem_NativePrototypeSlot<T>* protoStub =
reinterpret_cast<ICGetElem_NativePrototypeSlot<T>*>(*iter);
if (holder != protoStub->holder())
continue;
if (holder->as<NativeObject>().lastProperty() != protoStub->holderShape())
continue;
} else {
MOZ_ASSERT(iter->isGetElem_NativePrototypeCallNativeName() ||
iter->isGetElem_NativePrototypeCallNativeSymbol() ||
iter->isGetElem_NativePrototypeCallScriptedName() ||
iter->isGetElem_NativePrototypeCallScriptedSymbol());
ICGetElemNativePrototypeCallStub<T>* protoStub =
reinterpret_cast<ICGetElemNativePrototypeCallStub<T>*>(*iter);
if (holder != protoStub->holder())
continue;
if (holder->as<NativeObject>().lastProperty() != protoStub->holderShape())
continue;
}
}
return true;
}
return false;
}
template <class T>
static void
RemoveExistingGetElemNativeStubs(JSContext* cx, ICGetElem_Fallback* stub, HandleObject obj,
HandleObject holder, Handle<T> key, bool needsAtomize)
{
bool indirect = (obj.get() != holder.get());
MOZ_ASSERT_IF(indirect, holder->isNative());
for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++) {
switch (iter->kind()) {
case ICStub::GetElem_NativeSlotName:
case ICStub::GetElem_NativeSlotSymbol:
if (indirect)
continue;
case ICStub::GetElem_NativePrototypeSlotName:
case ICStub::GetElem_NativePrototypeSlotSymbol:
case ICStub::GetElem_NativePrototypeCallNativeName:
case ICStub::GetElem_NativePrototypeCallNativeSymbol:
case ICStub::GetElem_NativePrototypeCallScriptedName:
case ICStub::GetElem_NativePrototypeCallScriptedSymbol:
break;
default:
continue;
}
if(mozilla::IsSame<T, JS::Symbol*>::value !=
static_cast<ICGetElemNativeStub*>(*iter)->isSymbol())
{
continue;
}
ICGetElemNativeStubImpl<T>* getElemNativeStub =
reinterpret_cast<ICGetElemNativeStubImpl<T>*>(*iter);
if (key != getElemNativeStub->key())
continue;
if (ReceiverGuard(obj) != getElemNativeStub->receiverGuard())
continue;
// For prototype gets, check the holder and holder shape.
if (indirect) {
if (iter->isGetElem_NativePrototypeSlotName() ||
iter->isGetElem_NativePrototypeSlotSymbol()) {
ICGetElem_NativePrototypeSlot<T>* protoStub =
reinterpret_cast<ICGetElem_NativePrototypeSlot<T>*>(*iter);
if (holder != protoStub->holder())
continue;
// If the holder matches, but the holder's lastProperty doesn't match, then
// this stub is invalid anyway. Unlink it.
if (holder->as<NativeObject>().lastProperty() != protoStub->holderShape()) {
iter.unlink(cx);
continue;
}
} else {
MOZ_ASSERT(iter->isGetElem_NativePrototypeCallNativeName() ||
iter->isGetElem_NativePrototypeCallNativeSymbol() ||
iter->isGetElem_NativePrototypeCallScriptedName() ||
iter->isGetElem_NativePrototypeCallScriptedSymbol());
ICGetElemNativePrototypeCallStub<T>* protoStub =
reinterpret_cast<ICGetElemNativePrototypeCallStub<T>*>(*iter);
if (holder != protoStub->holder())
continue;
// If the holder matches, but the holder's lastProperty doesn't match, then
// this stub is invalid anyway. Unlink it.
if (holder->as<NativeObject>().lastProperty() != protoStub->holderShape()) {
iter.unlink(cx);
continue;
}
}
}
// If the new stub needs atomization, and the old stub doesn't atomize, then
// remove the old stub.
if (needsAtomize && !getElemNativeStub->needsAtomize()) {
iter.unlink(cx);
continue;
}
// Should never get here, because this means a matching stub exists, and if
// a matching stub exists, this procedure should never have been called.
MOZ_CRASH("Procedure should never have been called.");
}
}
static bool
TypedArrayGetElemStubExists(ICGetElem_Fallback* stub, HandleObject obj)
{
for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
if (!iter->isGetElem_TypedArray())
continue;
if (obj->maybeShape() == iter->toGetElem_TypedArray()->shape())
return true;
}
return false;
}
static bool
ArgumentsGetElemStubExists(ICGetElem_Fallback* stub, ICGetElem_Arguments::Which which)
{
for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
if (!iter->isGetElem_Arguments())
continue;
if (iter->toGetElem_Arguments()->which() == which)
return true;
}
return false;
}
template <class T>
static T
getKey(jsid id)
{
MOZ_ASSERT_UNREACHABLE("Key has to be PropertyName or Symbol");
return false;
}
template <>
JS::Symbol* getKey<JS::Symbol*>(jsid id)
{
if (!JSID_IS_SYMBOL(id))
return nullptr;
return JSID_TO_SYMBOL(id);
}
template <>
PropertyName* getKey<PropertyName*>(jsid id)
{
uint32_t dummy;
if (!JSID_IS_ATOM(id) || JSID_TO_ATOM(id)->isIndex(&dummy))
return nullptr;
return JSID_TO_ATOM(id)->asPropertyName();
}
static bool
IsOptimizableElementPropertyName(JSContext* cx, HandleValue key, MutableHandleId idp)
{
if (!key.isString())
return false;
// Convert to interned property name.
if (!ValueToId<CanGC>(cx, key, idp))
return false;
uint32_t dummy;
if (!JSID_IS_ATOM(idp) || JSID_TO_ATOM(idp)->isIndex(&dummy))
return false;
return true;
}
template <class T>
static bool
checkAtomize(HandleValue key)
{
MOZ_ASSERT_UNREACHABLE("Key has to be PropertyName or Symbol");
return false;
}
template <>
bool checkAtomize<JS::Symbol*>(HandleValue key)
{
return false;
}
template <>
bool checkAtomize<PropertyName*>(HandleValue key)
{
return !key.toString()->isAtom();
}
template <class T>
static bool
TryAttachNativeOrUnboxedGetValueElemStub(JSContext* cx, HandleScript script, jsbytecode* pc,
ICGetElem_Fallback* stub, HandleObject obj,
HandleValue keyVal, bool* attached)
{
MOZ_ASSERT(keyVal.isString() || keyVal.isSymbol());
// Convert to id.
RootedId id(cx);
if (!ValueToId<CanGC>(cx, keyVal, &id))
return false;
Rooted<T> key(cx, getKey<T>(id));
if (!key)
return true;
bool needsAtomize = checkAtomize<T>(keyVal);
RootedShape shape(cx);
RootedObject holder(cx);
if (!EffectlesslyLookupProperty(cx, obj, id, &holder, &shape))
return false;
if (!holder || (holder != obj && !holder->isNative()))
return true;
// If a suitable stub already exists, nothing else to do.
if (GetElemNativeStubExists<T>(stub, obj, holder, key, needsAtomize))
return true;
// Remove any existing stubs that may interfere with the new stub being added.
RemoveExistingGetElemNativeStubs<T>(cx, stub, obj, holder, key, needsAtomize);
ICStub* monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
if (obj->is<UnboxedPlainObject>() && holder == obj) {
const UnboxedLayout::Property* property = obj->as<UnboxedPlainObject>().layout().lookup(id);
// Once unboxed objects support symbol-keys, we need to change the following accordingly
MOZ_ASSERT_IF(!keyVal.isString(), !property);
if (property) {
if (!cx->runtime()->jitSupportsFloatingPoint)
return true;
RootedPropertyName name(cx, JSID_TO_ATOM(id)->asPropertyName());
ICGetElemNativeCompiler<PropertyName*> compiler(cx, ICStub::GetElem_UnboxedPropertyName,
monitorStub, obj, holder,
name,
ICGetElemNativeStub::UnboxedProperty,
needsAtomize, property->offset +
UnboxedPlainObject::offsetOfData(),
property->type);
ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
if (!newStub)
return false;
stub->addNewStub(newStub);
*attached = true;
return true;
}
Shape* shape = obj->as<UnboxedPlainObject>().maybeExpando()->lookup(cx, id);
if (!shape->hasDefaultGetter() || !shape->hasSlot())
return true;
bool isFixedSlot;
uint32_t offset;
GetFixedOrDynamicSlotOffset(shape, &isFixedSlot, &offset);
ICGetElemNativeStub::AccessType acctype =
isFixedSlot ? ICGetElemNativeStub::FixedSlot
: ICGetElemNativeStub::DynamicSlot;
ICGetElemNativeCompiler<T> compiler(cx, getGetElemStubKind<T>(ICStub::GetElem_NativeSlotName),
monitorStub, obj, holder, key,
acctype, needsAtomize, offset);
ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
if (!newStub)
return false;
stub->addNewStub(newStub);
*attached = true;
return true;
}
if (!holder->isNative())
return true;
if (IsCacheableGetPropReadSlot(obj, holder, shape)) {
bool isFixedSlot;
uint32_t offset;
GetFixedOrDynamicSlotOffset(shape, &isFixedSlot, &offset);
ICStub::Kind kind = (obj == holder) ? ICStub::GetElem_NativeSlotName
: ICStub::GetElem_NativePrototypeSlotName;
kind = getGetElemStubKind<T>(kind);
JitSpew(JitSpew_BaselineIC, " Generating GetElem(Native %s%s slot) stub "
"(obj=%p, holder=%p, holderShape=%p)",
(obj == holder) ? "direct" : "prototype",
needsAtomize ? " atomizing" : "",
obj.get(), holder.get(), holder->as<NativeObject>().lastProperty());
AccType acctype = isFixedSlot ? ICGetElemNativeStub::FixedSlot
: ICGetElemNativeStub::DynamicSlot;
ICGetElemNativeCompiler<T> compiler(cx, kind, monitorStub, obj, holder, key,
acctype, needsAtomize, offset);
ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
if (!newStub)
return false;
stub->addNewStub(newStub);
*attached = true;
return true;
}
return true;
}
template <class T>
static bool
TryAttachNativeGetAccessorElemStub(JSContext* cx, HandleScript script, jsbytecode* pc,
ICGetElem_Fallback* stub, HandleNativeObject obj,
HandleValue keyVal, bool* attached,
bool* isTemporarilyUnoptimizable)
{
MOZ_ASSERT(!*attached);
MOZ_ASSERT(keyVal.isString() || keyVal.isSymbol());
RootedId id(cx);
if (!ValueToId<CanGC>(cx, keyVal, &id))
return false;
Rooted<T> key(cx, getKey<T>(id));
if (!key)
return true;
bool needsAtomize = checkAtomize<T>(keyVal);
RootedShape shape(cx);
RootedObject baseHolder(cx);
if (!EffectlesslyLookupProperty(cx, obj, id, &baseHolder, &shape))
return false;
if (!baseHolder || baseHolder->isNative())
return true;
HandleNativeObject holder = baseHolder.as<NativeObject>();
bool getterIsScripted = false;
if (IsCacheableGetPropCall(cx, obj, baseHolder, shape, &getterIsScripted,
isTemporarilyUnoptimizable, /*isDOMProxy=*/false))
{
RootedFunction getter(cx, &shape->getterObject()->as<JSFunction>());
// For now, we do not handle own property getters
if (obj == holder)
return true;
// If a suitable stub already exists, nothing else to do.
if (GetElemNativeStubExists<T>(stub, obj, holder, key, needsAtomize))
return true;
// Remove any existing stubs that may interfere with the new stub being added.
RemoveExistingGetElemNativeStubs<T>(cx, stub, obj, holder, key, needsAtomize);
ICStub* monitorStub = stub->fallbackMonitorStub()->firstMonitorStub();
ICStub::Kind kind = getterIsScripted ? ICStub::GetElem_NativePrototypeCallScriptedName
: ICStub::GetElem_NativePrototypeCallNativeName;
kind = getGetElemStubKind<T>(kind);
if (getterIsScripted) {
JitSpew(JitSpew_BaselineIC,
" Generating GetElem(Native %s%s call scripted %s:%" PRIuSIZE ") stub "
"(obj=%p, shape=%p, holder=%p, holderShape=%p)",
(obj == holder) ? "direct" : "prototype",
needsAtomize ? " atomizing" : "",
getter->nonLazyScript()->filename(), getter->nonLazyScript()->lineno(),
obj.get(), obj->lastProperty(), holder.get(), holder->lastProperty());
} else {
JitSpew(JitSpew_BaselineIC,
" Generating GetElem(Native %s%s call native) stub "
"(obj=%p, shape=%p, holder=%p, holderShape=%p)",
(obj == holder) ? "direct" : "prototype",
needsAtomize ? " atomizing" : "",
obj.get(), obj->lastProperty(), holder.get(), holder->lastProperty());
}
AccType acctype = getterIsScripted ? ICGetElemNativeStub::ScriptedGetter
: ICGetElemNativeStub::NativeGetter;
ICGetElemNativeCompiler<T> compiler(cx, kind, monitorStub, obj, holder, key, acctype,
needsAtomize, getter, script->pcToOffset(pc));
ICStub* newStub = compiler.getStub(compiler.getStubSpace(script));
if (!newStub)
return false;
stub->addNewStub(newStub);
*attached = true;
return true;
}
return true;
}
static bool
IsPrimitiveArrayTypedObject(JSObject* obj)
{
if (!obj->is<TypedObject>())
return false;
TypeDescr& descr = obj->as<TypedObject>().typeDescr();
return descr.is<ArrayTypeDescr>() &&
descr.as<ArrayTypeDescr>().elementType().is<ScalarTypeDescr>();
}
static Scalar::Type
PrimitiveArrayTypedObjectType(JSObject* obj)
{
MOZ_ASSERT(IsPrimitiveArrayTypedObject(obj));
TypeDescr& descr = obj->as<TypedObject>().typeDescr();
return descr.as<ArrayTypeDescr>().elementType().as<ScalarTypeDescr>().type();
}
static Scalar::Type
TypedThingElementType(JSObject* obj)
{
return IsAnyTypedArray(obj)
? AnyTypedArrayType(obj)
: PrimitiveArrayTypedObjectType(obj);
}
static bool
TypedThingRequiresFloatingPoint(JSObject* obj)
{
Scalar::Type type = TypedThingElementType(obj);
return type == Scalar::Uint32 ||
type == Scalar::Float32 ||
type == Scalar::Float64;
}
static bool
IsNativeDenseElementAccess(HandleObject obj, HandleValue key)
{
if (obj->isNative() && key.isInt32() && key.toInt32() >= 0 && !IsAnyTypedArray(obj.get()))
return true;
return false;
}
static bool
IsNativeOrUnboxedDenseElementAccess(HandleObject obj, HandleValue key)
{
if (!obj->isNative() && !obj->is<UnboxedArrayObject>())
return false;
if (key.isInt32() && key.toInt32() >= 0 && !IsAnyTypedArray(obj.get()))
return true;
return false;
}
static bool
TryAttachGetElemStub(JSContext* cx, JSScript* script, jsbytecode* pc, ICGetElem_Fallback* stub,
HandleValue lhs, HandleValue rhs, HandleValue res, bool* attached)
{
// Check for String[i] => Char accesses.
if (lhs.isString() && rhs.isInt32() && res.isString() &&
!stub->hasStub(ICStub::GetElem_String))
{
// NoSuchMethod handling doesn't apply to string targets.
JitSpew(JitSpew_BaselineIC, " Generating GetElem(String[Int32]) stub");
ICGetElem_String::Compiler compiler(cx);
ICStub* stringStub = compiler.getStub(compiler.getStubSpace(script));
if (!stringStub)
return false;
stub->addNewStub(stringStub);
*attached = true;
return true;
}
if (lhs.isMagic(JS_OPTIMIZED_ARGUMENTS) && rhs.isInt32() &&
!ArgumentsGetElemStubExists(stub, ICGetElem_Arguments::Magic))
{
JitSpew(JitSpew_BaselineIC, " Generating GetElem(MagicArgs[Int32]) stub");
ICGetElem_Arguments::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
ICGetElem_Arguments::Magic);
ICStub* argsStub = compiler.getStub(compiler.getStubSpace(script));
if (!argsStub)
return false;
stub->addNewStub(argsStub);
*attached = true;
return true;
}
// Otherwise, GetElem is only optimized on objects.
if (!lhs.isObject())
return true;
RootedObject obj(cx, &lhs.toObject());
// Check for ArgumentsObj[int] accesses
if (obj->is<ArgumentsObject>() && rhs.isInt32()) {
ICGetElem_Arguments::Which which = ICGetElem_Arguments::Mapped;
if (obj->is<UnmappedArgumentsObject>())
which = ICGetElem_Arguments::Unmapped;
if (!ArgumentsGetElemStubExists(stub, which)) {
JitSpew(JitSpew_BaselineIC, " Generating GetElem(ArgsObj[Int32]) stub");
ICGetElem_Arguments::Compiler compiler(
cx, stub->fallbackMonitorStub()->firstMonitorStub(), which);
ICStub* argsStub = compiler.getStub(compiler.getStubSpace(script));
if (!argsStub)
return false;
stub->addNewStub(argsStub);
*attached = true;
return true;
}
}
// Check for NativeObject[int] dense accesses.
if (IsNativeDenseElementAccess(obj, rhs)) {
JitSpew(JitSpew_BaselineIC, " Generating GetElem(Native[Int32] dense) stub");
ICGetElem_Dense::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
obj->as<NativeObject>().lastProperty());
ICStub* denseStub = compiler.getStub(compiler.getStubSpace(script));
if (!denseStub)
return false;
stub->addNewStub(denseStub);
*attached = true;
return true;
}
// Check for NativeObject[id] and UnboxedPlainObject[id] shape-optimizable accesses.
if (obj->isNative() || obj->is<UnboxedPlainObject>()) {
RootedScript rootedScript(cx, script);
if (rhs.isString()) {
if (!TryAttachNativeOrUnboxedGetValueElemStub<PropertyName*>(cx, rootedScript, pc, stub,
obj, rhs, attached))
{
return false;
}
} else if (rhs.isSymbol()) {
if (!TryAttachNativeOrUnboxedGetValueElemStub<JS::Symbol*>(cx, rootedScript, pc, stub,
obj, rhs, attached))
{
return false;
}
}
if (*attached)
return true;
script = rootedScript;
}
// Check for UnboxedArray[int] accesses.
if (obj->is<UnboxedArrayObject>() && rhs.isInt32() && rhs.toInt32() >= 0) {
JitSpew(JitSpew_BaselineIC, " Generating GetElem(UnboxedArray[Int32]) stub");
ICGetElem_UnboxedArray::Compiler compiler(cx, stub->fallbackMonitorStub()->firstMonitorStub(),
obj->group());
ICStub* unboxedStub = compiler.getStub(compiler.getStubSpace(script));
if (!unboxedStub)
return false;
stub->addNewStub(unboxedStub);
*attached = true;
return true;
}
// Check for TypedArray[int] => Number and TypedObject[int] => Number accesses.
if ((IsAnyTypedArray(obj.get()) || IsPrimitiveArrayTypedObject(obj)) &&
rhs.isNumber() &&
res.isNumber() &&
!TypedArrayGetElemStubExists(stub, obj))
{
if (!cx->runtime()->jitSupportsFloatingPoint &&
(TypedThingRequiresFloatingPoint(obj) || rhs.isDouble()))
{
return true;
}
// Don't attach typed object stubs if they might be neutered, as the
// stub will always bail out.
if (IsPrimitiveArrayTypedObject(obj) && cx->compartment()->neuteredTypedObjects)
return true;
JitSpew(JitSpew_BaselineIC, " Generating GetElem(TypedArray[Int32]) stub");
ICGetElem_TypedArray::Compiler compiler(cx, obj->maybeShape(), TypedThingElementType(obj));
ICStub* typedArrayStub = compiler.getStub(compiler.getStubSpace(script));
if (!typedArrayStub)
return false;
stub->addNewStub(typedArrayStub);
*attached = true;
return true;
}
// GetElem operations on non-native objects cannot be cached by either
// Baseline or Ion. Indicate this in the cache so that Ion does not
// generate a cache for this op.
if (!obj->isNative())
stub->noteNonNativeAccess();
// GetElem operations which could access negative indexes generally can't
// be optimized without the potential for bailouts, as we can't statically
// determine that an object has no properties on such indexes.
if (rhs.isNumber() && rhs.toNumber() < 0)
stub->noteNegativeIndex();
return true;
}
static bool
DoGetElemFallback(JSContext* cx, BaselineFrame* frame, ICGetElem_Fallback* stub_, HandleValue lhs,
HandleValue rhs, MutableHandleValue res)
{
// This fallback stub may trigger debug mode toggling.
DebugModeOSRVolatileStub<ICGetElem_Fallback*> stub(frame, stub_);
RootedScript script(cx, frame->script());
jsbytecode* pc = stub->icEntry()->pc(frame->script());
JSOp op = JSOp(*pc);
FallbackICSpew(cx, stub, "GetElem(%s)", CodeName[op]);
MOZ_ASSERT(op == JSOP_GETELEM || op == JSOP_CALLELEM);
// Don't pass lhs directly, we need it when generating stubs.
RootedValue lhsCopy(cx, lhs);
bool isOptimizedArgs = false;
if (lhs.isMagic(JS_OPTIMIZED_ARGUMENTS)) {
// Handle optimized arguments[i] access.
if (!GetElemOptimizedArguments(cx, frame, &lhsCopy, rhs, res, &isOptimizedArgs))
return false;
if (isOptimizedArgs)
TypeScript::Monitor(cx, frame->script(), pc, res);
}
bool attached = false;
if (stub->numOptimizedStubs() >= ICGetElem_Fallback::MAX_OPTIMIZED_STUBS) {
// TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
// But for now we just bail.
stub->noteUnoptimizableAccess();
attached = true;
}
// Try to attach an optimized getter stub.
bool isTemporarilyUnoptimizable = false;
if (!attached && lhs.isObject() && lhs.toObject().isNative()){
if (rhs.isString()) {
RootedScript rootedScript(cx, frame->script());
RootedNativeObject obj(cx, &lhs.toObject().as<NativeObject>());
if (!TryAttachNativeGetAccessorElemStub<PropertyName*>(cx, rootedScript, pc, stub,
obj, rhs, &attached,
&isTemporarilyUnoptimizable))
{
return false;
}
script = rootedScript;
} else if (rhs.isSymbol()) {
RootedScript rootedScript(cx, frame->script());
RootedNativeObject obj(cx, &lhs.toObject().as<NativeObject>());
if (!TryAttachNativeGetAccessorElemStub<JS::Symbol*>(cx, rootedScript, pc, stub,
obj, rhs, &attached,
&isTemporarilyUnoptimizable))
{
return false;
}
script = rootedScript;
}
}
if (!isOptimizedArgs) {
if (!GetElementOperation(cx, op, &lhsCopy, rhs, res))
return false;
TypeScript::Monitor(cx, frame->script(), pc, res);
}
// Check if debug mode toggling made the stub invalid.
if (stub.invalid())
return true;
// Add a type monitor stub for the resulting value.
if (!stub->addMonitorStubForValue(cx, frame->script(), res,
ICStubCompiler::Engine::Baseline))
{
return false;
}
if (attached)
return true;
// Try to attach an optimized stub.
if (!TryAttachGetElemStub(cx, frame->script(), pc, stub, lhs, rhs, res, &attached))
return false;
if (!attached && !isTemporarilyUnoptimizable)
stub->noteUnoptimizableAccess();
return true;
}
typedef bool (*DoGetElemFallbackFn)(JSContext*, BaselineFrame*, ICGetElem_Fallback*,
HandleValue, HandleValue, MutableHandleValue);
static const VMFunction DoGetElemFallbackInfo =
FunctionInfo<DoGetElemFallbackFn>(DoGetElemFallback, TailCall, PopValues(2));
bool
ICGetElem_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
MOZ_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Ensure stack is fully synced for the expression decompiler.
masm.pushValue(R0);
masm.pushValue(R1);
// Push arguments.
masm.pushValue(R1);
masm.pushValue(R0);
masm.push(ICStubReg);
pushFramePtr(masm, R0.scratchReg());
return tailCallVM(DoGetElemFallbackInfo, masm);
}
//
// GetElem_NativeSlot
//
static bool
DoAtomizeString(JSContext* cx, HandleString string, MutableHandleValue result)
{
JitSpew(JitSpew_BaselineIC, " AtomizeString called");
RootedValue key(cx, StringValue(string));
// Convert to interned property name.
RootedId id(cx);
if (!ValueToId<CanGC>(cx, key, &id))
return false;
if (!JSID_IS_ATOM(id)) {
result.set(key);
return true;
}
result.set(StringValue(JSID_TO_ATOM(id)));
return true;
}
typedef bool (*DoAtomizeStringFn)(JSContext*, HandleString, MutableHandleValue);
static const VMFunction DoAtomizeStringInfo = FunctionInfo<DoAtomizeStringFn>(DoAtomizeString);
template <class T>
bool
ICGetElemNativeCompiler<T>::emitCallNative(MacroAssembler& masm, Register objReg)
{
AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
regs.takeUnchecked(objReg);
regs.takeUnchecked(ICTailCallReg);
enterStubFrame(masm, regs.getAny());
// Push object.
masm.push(objReg);
// Push native callee.
masm.loadPtr(Address(ICStubReg, ICGetElemNativeGetterStub<T>::offsetOfGetter()), objReg);
masm.push(objReg);
regs.add(objReg);
// Call helper.
if (!callVM(DoCallNativeGetterInfo, masm))
return false;
leaveStubFrame(masm);
return true;
}
template <class T>
bool
ICGetElemNativeCompiler<T>::emitCallScripted(MacroAssembler& masm, Register objReg)
{
AllocatableGeneralRegisterSet regs(availableGeneralRegs(0));
regs.takeUnchecked(objReg);
regs.takeUnchecked(ICTailCallReg);
// Enter stub frame.
enterStubFrame(masm, regs.getAny());
// Align the stack such that the JitFrameLayout is aligned on
// JitStackAlignment.
masm.alignJitStackBasedOnNArgs(0);
// Push |this| for getter (target object).
{
ValueOperand val = regs.takeAnyValue();
masm.tagValue(JSVAL_TYPE_OBJECT, objReg, val);
masm.Push(val);
regs.add(val);
}
regs.add(objReg);
Register callee = regs.takeAny();
masm.loadPtr(Address(ICStubReg, ICGetElemNativeGetterStub<T>::offsetOfGetter()), callee);
// Push argc, callee, and descriptor.
{
Register callScratch = regs.takeAny();
EmitBaselineCreateStubFrameDescriptor(masm, callScratch);
masm.Push(Imm32(0)); // ActualArgc is 0
masm.Push(callee);
masm.Push(callScratch);
regs.add(callScratch);
}
Register code = regs.takeAnyExcluding(ArgumentsRectifierReg);
masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), code);
masm.loadBaselineOrIonRaw(code, code, nullptr);
Register scratch = regs.takeAny();
// Handle arguments underflow.
Label noUnderflow;
masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), scratch);
masm.branch32(Assembler::Equal, scratch, Imm32(0), &noUnderflow);
{
// Call the arguments rectifier.
MOZ_ASSERT(ArgumentsRectifierReg != code);
JitCode* argumentsRectifier =
cx->runtime()->jitRuntime()->getArgumentsRectifier();
masm.movePtr(ImmGCPtr(argumentsRectifier), code);
masm.loadPtr(Address(code, JitCode::offsetOfCode()), code);
masm.movePtr(ImmWord(0), ArgumentsRectifierReg);
}
masm.bind(&noUnderflow);
masm.callJit(code);
leaveStubFrame(masm, true);
return true;
}
template <class T>
bool
ICGetElemNativeCompiler<T>::emitCheckKey(MacroAssembler& masm, Label& failure)
{
MOZ_ASSERT_UNREACHABLE("Key has to be PropertyName or Symbol");
return false;
}
template <>
bool
ICGetElemNativeCompiler<JS::Symbol*>::emitCheckKey(MacroAssembler& masm, Label& failure)
{
MOZ_ASSERT(!needsAtomize_);
masm.branchTestSymbol(Assembler::NotEqual, R1, &failure);
Address symbolAddr(ICStubReg, ICGetElemNativeStubImpl<JS::Symbol*>::offsetOfKey());
Register symExtract = masm.extractObject(R1, ExtractTemp1);
masm.branchPtr(Assembler::NotEqual, symbolAddr, symExtract, &failure);
return true;
}
template <>
bool
ICGetElemNativeCompiler<PropertyName*>::emitCheckKey(MacroAssembler& masm, Label& failure)
{
masm.branchTestString(Assembler::NotEqual, R1, &failure);
// Check key identity. Don't automatically fail if this fails, since the incoming
// key maybe a non-interned string. Switch to a slowpath vm-call based check.
Address nameAddr(ICStubReg, ICGetElemNativeStubImpl<PropertyName*>::offsetOfKey());
Register strExtract = masm.extractString(R1, ExtractTemp1);
// If needsAtomize_ is true, and the string is not already an atom, then atomize the
// string before proceeding.
if (needsAtomize_) {
Label skipAtomize;
// If string is already an atom, skip the atomize.
masm.branchTest32(Assembler::NonZero,
Address(strExtract, JSString::offsetOfFlags()),
Imm32(JSString::ATOM_BIT),
&skipAtomize);
// Stow R0.
EmitStowICValues(masm, 1);
enterStubFrame(masm, R0.scratchReg());
// Atomize the string into a new value.
masm.push(strExtract);
if (!callVM(DoAtomizeStringInfo, masm))
return false;
// Atomized string is now in JSReturnOperand (R0).
// Leave stub frame, move atomized string into R1.
MOZ_ASSERT(R0 == JSReturnOperand);
leaveStubFrame(masm);
masm.moveValue(JSReturnOperand, R1);
// Unstow R0
EmitUnstowICValues(masm, 1);
// Extract string from R1 again.
DebugOnly<Register> strExtract2 = masm.extractString(R1, ExtractTemp1);
MOZ_ASSERT(Register(strExtract2) == strExtract);
masm.bind(&skipAtomize);
}
// Key has been atomized if necessary. Do identity check on string pointer.
masm.branchPtr(Assembler::NotEqual, nameAddr, strExtract, &failure);
return true;
}
template <class T>
bool
ICGetElemNativeCompiler<T>::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
Label failurePopR1;
bool popR1 = false;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
Register scratchReg = regs.takeAny();
// Unbox object.
Register objReg = masm.extractObject(R0, ExtractTemp0);
// Check object shape/group.
GuardReceiverObject(masm, ReceiverGuard(obj_), objReg, scratchReg,
ICGetElemNativeStub::offsetOfReceiverGuard(), &failure);
// Since this stub sometimes enters a stub frame, we manually set this to true (lie).
#ifdef DEBUG
entersStubFrame_ = true;
#endif
if (!emitCheckKey(masm, failure))
return false;
Register holderReg;
if (obj_ == holder_) {
holderReg = objReg;
if (obj_->is<UnboxedPlainObject>() && acctype_ != ICGetElemNativeStub::UnboxedProperty) {
// The property will be loaded off the unboxed expando.
masm.push(R1.scratchReg());
popR1 = true;
holderReg = R1.scratchReg();
masm.loadPtr(Address(objReg, UnboxedPlainObject::offsetOfExpando()), holderReg);
}
} else {
// Shape guard holder.
if (regs.empty()) {
masm.push(R1.scratchReg());
popR1 = true;
holderReg = R1.scratchReg();
} else {
holderReg = regs.takeAny();
}
if (kind == ICStub::GetElem_NativePrototypeCallNativeName ||
kind == ICStub::GetElem_NativePrototypeCallNativeSymbol ||
kind == ICStub::GetElem_NativePrototypeCallScriptedName ||
kind == ICStub::GetElem_NativePrototypeCallScriptedSymbol)
{
masm.loadPtr(Address(ICStubReg,
ICGetElemNativePrototypeCallStub<T>::offsetOfHolder()),
holderReg);
masm.loadPtr(Address(ICStubReg,
ICGetElemNativePrototypeCallStub<T>::offsetOfHolderShape()),
scratchReg);
} else {
masm.loadPtr(Address(ICStubReg,
ICGetElem_NativePrototypeSlot<T>::offsetOfHolder()),
holderReg);
masm.loadPtr(Address(ICStubReg,
ICGetElem_NativePrototypeSlot<T>::offsetOfHolderShape()),
scratchReg);
}
masm.branchTestObjShape(Assembler::NotEqual, holderReg, scratchReg,
popR1 ? &failurePopR1 : &failure);
}
if (acctype_ == ICGetElemNativeStub::DynamicSlot ||
acctype_ == ICGetElemNativeStub::FixedSlot)
{
masm.load32(Address(ICStubReg, ICGetElemNativeSlotStub<T>::offsetOfOffset()),
scratchReg);
// Load from object.
if (acctype_ == ICGetElemNativeStub::DynamicSlot)
masm.addPtr(Address(holderReg, NativeObject::offsetOfSlots()), scratchReg);
else
masm.addPtr(holderReg, scratchReg);
Address valAddr(scratchReg, 0);
masm.loadValue(valAddr, R0);
if (popR1)
masm.addToStackPtr(ImmWord(sizeof(size_t)));
} else if (acctype_ == ICGetElemNativeStub::UnboxedProperty) {
masm.load32(Address(ICStubReg, ICGetElemNativeSlotStub<T>::offsetOfOffset()),
scratchReg);
masm.loadUnboxedProperty(BaseIndex(objReg, scratchReg, TimesOne), unboxedType_,
TypedOrValueRegister(R0));
if (popR1)
masm.addToStackPtr(ImmWord(sizeof(size_t)));
} else {
MOZ_ASSERT(acctype_ == ICGetElemNativeStub::NativeGetter ||
acctype_ == ICGetElemNativeStub::ScriptedGetter);
MOZ_ASSERT(kind == ICStub::GetElem_NativePrototypeCallNativeName ||
kind == ICStub::GetElem_NativePrototypeCallNativeSymbol ||
kind == ICStub::GetElem_NativePrototypeCallScriptedName ||
kind == ICStub::GetElem_NativePrototypeCallScriptedSymbol);
if (acctype_ == ICGetElemNativeStub::NativeGetter) {
// If calling a native getter, there is no chance of failure now.
// GetElem key (R1) is no longer needed.
if (popR1)
masm.addToStackPtr(ImmWord(sizeof(size_t)));
emitCallNative(masm, objReg);
} else {
MOZ_ASSERT(acctype_ == ICGetElemNativeStub::ScriptedGetter);
// Load function in scratchReg and ensure that it has a jit script.
masm.loadPtr(Address(ICStubReg, ICGetElemNativeGetterStub<T>::offsetOfGetter()),
scratchReg);
masm.branchIfFunctionHasNoScript(scratchReg, popR1 ? &failurePopR1 : &failure);
masm.loadPtr(Address(scratchReg, JSFunction::offsetOfNativeOrScript()), scratchReg);
masm.loadBaselineOrIonRaw(scratchReg, scratchReg, popR1 ? &failurePopR1 : &failure);
// At this point, we are guaranteed to successfully complete.
if (popR1)
masm.addToStackPtr(Imm32(sizeof(size_t)));
emitCallScripted(masm, objReg);
}
}
// Enter type monitor IC to type-check result.
EmitEnterTypeMonitorIC(masm);
// Failure case - jump to next stub
if (popR1) {
masm.bind(&failurePopR1);
masm.pop(R1.scratchReg());
}
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// GetElem_String
//
bool
ICGetElem_String::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
masm.branchTestString(Assembler::NotEqual, R0, &failure);
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
Register scratchReg = regs.takeAny();
// Unbox string in R0.
Register str = masm.extractString(R0, ExtractTemp0);
// Check for non-linear strings.
masm.branchIfRope(str, &failure);
// Unbox key.
Register key = masm.extractInt32(R1, ExtractTemp1);
// Bounds check.
masm.branch32(Assembler::BelowOrEqual, Address(str, JSString::offsetOfLength()),
key, &failure);
// Get char code.
masm.loadStringChar(str, key, scratchReg);
// Check if char code >= UNIT_STATIC_LIMIT.
masm.branch32(Assembler::AboveOrEqual, scratchReg, Imm32(StaticStrings::UNIT_STATIC_LIMIT),
&failure);
// Load static string.
masm.movePtr(ImmPtr(&cx->staticStrings().unitStaticTable), str);
masm.loadPtr(BaseIndex(str, scratchReg, ScalePointer), str);
// Return.
masm.tagValue(JSVAL_TYPE_STRING, str, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// GetElem_Dense
//
bool
ICGetElem_Dense::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
Register scratchReg = regs.takeAny();
// Unbox R0 and shape guard.
Register obj = masm.extractObject(R0, ExtractTemp0);
masm.loadPtr(Address(ICStubReg, ICGetElem_Dense::offsetOfShape()), scratchReg);
masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
// Load obj->elements.
masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratchReg);
// Unbox key.
Register key = masm.extractInt32(R1, ExtractTemp1);
// Bounds check.
Address initLength(scratchReg, ObjectElements::offsetOfInitializedLength());
masm.branch32(Assembler::BelowOrEqual, initLength, key, &failure);
// Hole check and load value.
BaseObjectElementIndex element(scratchReg, key);
masm.branchTestMagic(Assembler::Equal, element, &failure);
// Load value from element location.
masm.loadValue(element, R0);
// Enter type monitor IC to type-check result.
EmitEnterTypeMonitorIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// GetElem_UnboxedArray
//
bool
ICGetElem_UnboxedArray::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
Register scratchReg = regs.takeAny();
// Unbox R0 and group guard.
Register obj = masm.extractObject(R0, ExtractTemp0);
masm.loadPtr(Address(ICStubReg, ICGetElem_UnboxedArray::offsetOfGroup()), scratchReg);
masm.branchTestObjGroup(Assembler::NotEqual, obj, scratchReg, &failure);
// Unbox key.
Register key = masm.extractInt32(R1, ExtractTemp1);
// Bounds check.
masm.load32(Address(obj, UnboxedArrayObject::offsetOfCapacityIndexAndInitializedLength()),
scratchReg);
masm.and32(Imm32(UnboxedArrayObject::InitializedLengthMask), scratchReg);
masm.branch32(Assembler::BelowOrEqual, scratchReg, key, &failure);
// Load obj->elements.
masm.loadPtr(Address(obj, UnboxedArrayObject::offsetOfElements()), scratchReg);
// Load value.
size_t width = UnboxedTypeSize(elementType_);
BaseIndex addr(scratchReg, key, ScaleFromElemWidth(width));
masm.loadUnboxedProperty(addr, elementType_, R0);
// Only monitor the result if its type might change.
if (elementType_ == JSVAL_TYPE_OBJECT)
EmitEnterTypeMonitorIC(masm);
else
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// GetElem_TypedArray
//
static void
LoadTypedThingLength(MacroAssembler& masm, TypedThingLayout layout, Register obj, Register result)
{
switch (layout) {
case Layout_TypedArray:
masm.unboxInt32(Address(obj, TypedArrayObject::lengthOffset()), result);
break;
case Layout_OutlineTypedObject:
case Layout_InlineTypedObject:
masm.loadPtr(Address(obj, JSObject::offsetOfGroup()), result);
masm.loadPtr(Address(result, ObjectGroup::offsetOfAddendum()), result);
masm.unboxInt32(Address(result, ArrayTypeDescr::offsetOfLength()), result);
break;
default:
MOZ_CRASH();
}
}
bool
ICGetElem_TypedArray::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
if (layout_ != Layout_TypedArray)
CheckForNeuteredTypedObject(cx, masm, &failure);
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
Register scratchReg = regs.takeAny();
// Unbox R0 and shape guard.
Register obj = masm.extractObject(R0, ExtractTemp0);
masm.loadPtr(Address(ICStubReg, ICGetElem_TypedArray::offsetOfShape()), scratchReg);
masm.branchTestObjShape(Assembler::NotEqual, obj, scratchReg, &failure);
// Ensure the index is an integer.
if (cx->runtime()->jitSupportsFloatingPoint) {
Label isInt32;
masm.branchTestInt32(Assembler::Equal, R1, &isInt32);
{
// If the index is a double, try to convert it to int32. It's okay
// to convert -0 to 0: the shape check ensures the object is a typed
// array so the difference is not observable.
masm.branchTestDouble(Assembler::NotEqual, R1, &failure);
masm.unboxDouble(R1, FloatReg0);
masm.convertDoubleToInt32(FloatReg0, scratchReg, &failure, /* negZeroCheck = */false);
masm.tagValue(JSVAL_TYPE_INT32, scratchReg, R1);
}
masm.bind(&isInt32);
} else {
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
}
// Unbox key.
Register key = masm.extractInt32(R1, ExtractTemp1);
// Bounds check.
LoadTypedThingLength(masm, layout_, obj, scratchReg);
masm.branch32(Assembler::BelowOrEqual, scratchReg, key, &failure);
// Load the elements vector.
LoadTypedThingData(masm, layout_, obj, scratchReg);
// Load the value.
BaseIndex source(scratchReg, key, ScaleFromElemWidth(Scalar::byteSize(type_)));
masm.loadFromTypedArray(type_, source, R0, false, scratchReg, &failure);
// Todo: Allow loading doubles from uint32 arrays, but this requires monitoring.
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// GetElem_Arguments
//
bool
ICGetElem_Arguments::Compiler::generateStubCode(MacroAssembler& masm)
{
MOZ_ASSERT(engine_ == Engine::Baseline);
Label failure;
if (which_ == ICGetElem_Arguments::Magic) {
// Ensure that this is a magic arguments value.
masm.branchTestMagicValue(Assembler::NotEqual, R0, JS_OPTIMIZED_ARGUMENTS, &failure);
// Ensure that frame has not loaded different arguments object since.
masm.branchTest32(Assembler::NonZero,
Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags()),
Imm32(BaselineFrame::HAS_ARGS_OBJ),
&failure);
// Ensure that index is an integer.
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
Register idx = masm.extractInt32(R1, ExtractTemp1);
AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
Register scratch = regs.takeAny();
// Load num actual arguments
Address actualArgs(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs());
masm.loadPtr(actualArgs, scratch);
// Ensure idx < argc
masm.branch32(Assembler::AboveOrEqual, idx, scratch, &failure);
// Load argval
masm.movePtr(BaselineFrameReg, scratch);
masm.addPtr(Imm32(BaselineFrame::offsetOfArg(0)), scratch);
BaseValueIndex element(scratch, idx);
masm.loadValue(element, R0);
// Enter type monitor IC to type-check result.
EmitEnterTypeMonitorIC(masm);
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
MOZ_ASSERT(which_ == ICGetElem_Arguments::Mapped ||
which_ == ICGetElem_Arguments::Unmapped);
const Class* clasp = (which_ == ICGetElem_Arguments::Mapped)
? &MappedArgumentsObject::class_
: &UnmappedArgumentsObject::class_;
AllocatableGeneralRegisterSet regs(availableGeneralRegs(2));
Register scratchReg = regs.takeAny();
// Guard on input being an arguments object.
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
Register objReg = masm.extractObject(R0, ExtractTemp0);
masm.branchTestObjClass(Assembler::NotEqual, objReg, scratchReg, clasp, &failure);
// Guard on index being int32
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
Register idxReg = masm.extractInt32(R1, ExtractTemp1);
// Get initial ArgsObj length value.
masm.unboxInt32(Address(objReg, ArgumentsObject::getInitialLengthSlotOffset()), scratchReg);
// Test if length has been overridden.
masm.branchTest32(Assembler::NonZero,
scratchReg,
Imm32(ArgumentsObject::LENGTH_OVERRIDDEN_BIT),
&failure);
// Length has not been overridden, ensure that R1 is an integer and is <= length.
masm.rshiftPtr(Imm32(ArgumentsObject::PACKED_BITS_COUNT), scratchReg);
masm.branch32(Assembler::AboveOrEqual, idxReg, scratchReg, &failure);
// Length check succeeded, now check the correct bit. We clobber potential type regs
// now. Inputs will have to be reconstructed if we fail after this point, but that's
// unlikely.
Label failureReconstructInputs;
regs = availableGeneralRegs(0);
regs.takeUnchecked(objReg);
regs.takeUnchecked(idxReg);
regs.take(scratchReg);
Register argData = regs.takeAny();
Register tempReg = regs.takeAny();
// Load ArgumentsData
masm.loadPrivate(Address(objReg, ArgumentsObject::getDataSlotOffset()), argData);
// Load deletedBits bitArray pointer into scratchReg
masm.loadPtr(Address(argData, offsetof(ArgumentsData, deletedBits)), scratchReg);
// In tempReg, calculate index of word containing bit: (idx >> logBitsPerWord)
masm.movePtr(idxReg, tempReg);
const uint32_t shift = mozilla::tl::FloorLog2<(sizeof(size_t) * JS_BITS_PER_BYTE)>::value;
MOZ_ASSERT(shift == 5 || shift == 6);
masm.rshiftPtr(Imm32(shift), tempReg);
masm.loadPtr(BaseIndex(scratchReg, tempReg, ScaleFromElemWidth(sizeof(size_t))), scratchReg);
// Don't bother testing specific bit, if any bit is set in the word, fail.
masm.branchPtr(Assembler::NotEqual, scratchReg, ImmPtr(nullptr), &failureReconstructInputs);
// Load the value. use scratchReg and tempReg to form a ValueOperand to load into.
masm.addPtr(Imm32(ArgumentsData::offsetOfArgs()), argData);
regs.add(scratchReg);
regs.add(tempReg);
ValueOperand tempVal = regs.takeAnyValue();
masm.loadValue(BaseValueIndex(argData, idxReg), tempVal);
// Makesure that this is not a FORWARD_TO_CALL_SLOT magic value.
masm.branchTestMagic(Assembler::Equal, tempVal, &failureReconstructInputs);
// Copy value from temp to R0.
masm.moveValue(tempVal, R0);
// Type-check result
EmitEnterTypeMonitorIC(masm);
// Failed, but inputs are deconstructed into object and int, and need to be
// reconstructed into values.
masm.bind(&failureReconstructInputs);
masm.tagValue(JSVAL_TYPE_OBJECT, objReg, R0);
masm.tagValue(JSVAL_TYPE_INT32, idxReg, R1);
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// SetElem_Fallback
//
static bool
SetElemAddHasSameShapes(ICSetElem_DenseOrUnboxedArrayAdd* stub, JSObject* obj)
{
static const size_t MAX_DEPTH = ICSetElem_DenseOrUnboxedArrayAdd::MAX_PROTO_CHAIN_DEPTH;
ICSetElem_DenseOrUnboxedArrayAddImpl<MAX_DEPTH>* nstub = stub->toImplUnchecked<MAX_DEPTH>();
if (obj->maybeShape() != nstub->shape(0))
return false;
JSObject* proto = obj->getProto();
for (size_t i = 0; i < stub->protoChainDepth(); i++) {
if (!proto->isNative())
return false;
if (proto->as<NativeObject>().lastProperty() != nstub->shape(i + 1))
return false;
proto = obj->getProto();
if (!proto) {
if (i != stub->protoChainDepth() - 1)
return false;
break;
}
}
return true;
}
static bool
DenseOrUnboxedArraySetElemStubExists(JSContext* cx, ICStub::Kind kind,
ICSetElem_Fallback* stub, HandleObject obj)
{
MOZ_ASSERT(kind == ICStub::SetElem_DenseOrUnboxedArray ||
kind == ICStub::SetElem_DenseOrUnboxedArrayAdd);
for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
if (kind == ICStub::SetElem_DenseOrUnboxedArray && iter->isSetElem_DenseOrUnboxedArray()) {
ICSetElem_DenseOrUnboxedArray* nstub = iter->toSetElem_DenseOrUnboxedArray();
if (obj->maybeShape() == nstub->shape() && obj->getGroup(cx) == nstub->group())
return true;
}
if (kind == ICStub::SetElem_DenseOrUnboxedArrayAdd && iter->isSetElem_DenseOrUnboxedArrayAdd()) {
ICSetElem_DenseOrUnboxedArrayAdd* nstub = iter->toSetElem_DenseOrUnboxedArrayAdd();
if (obj->getGroup(cx) == nstub->group() && SetElemAddHasSameShapes(nstub, obj))
return true;
}
}
return false;
}
static bool
TypedArraySetElemStubExists(ICSetElem_Fallback* stub, HandleObject obj, bool expectOOB)
{
for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
if (!iter->isSetElem_TypedArray())
continue;
ICSetElem_TypedArray* taStub = iter->toSetElem_TypedArray();
if (obj->maybeShape() == taStub->shape() && taStub->expectOutOfBounds() == expectOOB)
return true;
}
return false;
}
static bool
RemoveExistingTypedArraySetElemStub(JSContext* cx, ICSetElem_Fallback* stub, HandleObject obj)
{
for (ICStubIterator iter = stub->beginChain(); !iter.atEnd(); iter++) {
if (!iter->isSetElem_TypedArray())
continue;
if (obj->maybeShape() != iter->toSetElem_TypedArray()->shape())
continue;
// TypedArraySetElem stubs are only removed using this procedure if
// being replaced with one that expects out of bounds index.
MOZ_ASSERT(!iter->toSetElem_TypedArray()->expectOutOfBounds());
iter.unlink(cx);
return true;
}
return false;
}
static bool
CanOptimizeDenseOrUnboxedArraySetElem(JSObject* obj, uint32_t index,
Shape* oldShape, uint32_t oldCapacity, uint32_t oldInitLength,
bool* isAddingCaseOut, size_t* protoDepthOut)
{
uint32_t initLength = GetAnyBoxedOrUnboxedInitializedLength(obj);
uint32_t capacity = GetAnyBoxedOrUnboxedCapacity(obj);
*isAddingCaseOut = false;
*protoDepthOut = 0;
// Some initial sanity checks.
if (initLength < oldInitLength || capacity < oldCapacity)
return false;
// Unboxed arrays need to be able to emit floating point code.
if (obj->is<UnboxedArrayObject>() && !obj->runtimeFromMainThread()->jitSupportsFloatingPoint)
return false;
Shape* shape = obj->maybeShape();
// Cannot optimize if the shape changed.
if (oldShape != shape)
return false;
// Cannot optimize if the capacity changed.
if (oldCapacity != capacity)
return false;
// Cannot optimize if the index doesn't fit within the new initialized length.
if (index >= initLength)
return false;
// Cannot optimize if the value at position after the set is a hole.
if (obj->isNative() && !obj->as<NativeObject>().containsDenseElement(index))
return false;
// At this point, if we know that the initLength did not change, then
// an optimized set is possible.
if (oldInitLength == initLength)
return true;
// If it did change, ensure that it changed specifically by incrementing by 1
// to accomodate this particular indexed set.
if (oldInitLength + 1 != initLength)
return false;
if (index != oldInitLength)
return false;
// The checks are not complete. The object may have a setter definition,
// either directly, or via a prototype, or via the target object for a prototype
// which is a proxy, that handles a particular integer write.
// Scan the prototype and shape chain to make sure that this is not the case.
if (obj->isIndexed())
return false;
JSObject* curObj = obj->getProto();
while (curObj) {
++*protoDepthOut;
if (!curObj->isNative() || curObj->isIndexed())
return false;
curObj = curObj->getProto();
}
if (*protoDepthOut > ICSetElem_DenseOrUnboxedArrayAdd::MAX_PROTO_CHAIN_DEPTH)
return false;
*isAddingCaseOut = true;
return true;
}
static bool
DoSetElemFallback(JSContext* cx, BaselineFrame* frame, ICSetElem_Fallback* stub_, Value* stack,
HandleValue objv, HandleValue index, HandleValue rhs)
{
// This fallback stub may trigger debug mode toggling.
DebugModeOSRVolatileStub<ICSetElem_Fallback*> stub(frame, stub_);
RootedScript script(cx, frame->script());
jsbytecode* pc = stub->icEntry()->pc(script);
JSOp op = JSOp(*pc);
FallbackICSpew(cx, stub, "SetElem(%s)", CodeName[JSOp(*pc)]);
MOZ_ASSERT(op == JSOP_SETELEM ||
op == JSOP_STRICTSETELEM ||
op == JSOP_INITELEM ||
op == JSOP_INITHIDDENELEM ||
op == JSOP_INITELEM_ARRAY ||
op == JSOP_INITELEM_INC);
RootedObject obj(cx, ToObjectFromStack(cx, objv));
if (!obj)
return false;
RootedShape oldShape(cx, obj->maybeShape());
// Check the old capacity
uint32_t oldCapacity = 0;
uint32_t oldInitLength = 0;
if (index.isInt32() && index.toInt32() >= 0) {
oldCapacity = GetAnyBoxedOrUnboxedCapacity(obj);
oldInitLength = GetAnyBoxedOrUnboxedInitializedLength(obj);
}
if (op == JSOP_INITELEM || op == JSOP_INITHIDDENELEM) {
if (!InitElemOperation(cx, pc, obj, index, rhs))
return false;
} else if (op == JSOP_INITELEM_ARRAY) {
MOZ_ASSERT(uint32_t(index.toInt32()) <= INT32_MAX,
"the bytecode emitter must fail to compile code that would "
"produce JSOP_INITELEM_ARRAY with an index exceeding "
"int32_t range");
MOZ_ASSERT(uint32_t(index.toInt32()) == GET_UINT32(pc));
if (!InitArrayElemOperation(cx, pc, obj, index.toInt32(), rhs))
return false;
} else if (op == JSOP_INITELEM_INC) {
if (!InitArrayElemOperation(cx, pc, obj, index.toInt32(), rhs))
return false;
} else {
if (!SetObjectElement(cx, obj, index, rhs, JSOp(*pc) == JSOP_STRICTSETELEM, script, pc))
return false;
}
// Don't try to attach stubs that wish to be hidden. We don't know how to
// have different enumerability in the stubs for the moment.
if (op == JSOP_INITHIDDENELEM)
return true;
// Overwrite the object on the stack (pushed for the decompiler) with the rhs.
MOZ_ASSERT(stack[2] == objv);
stack[2] = rhs;
// Check if debug mode toggling made the stub invalid.
if (stub.invalid())
return true;
if (stub->numOptimizedStubs() >= ICSetElem_Fallback::MAX_OPTIMIZED_STUBS) {
// TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
// But for now we just bail.
return true;
}
// Try to generate new stubs.
if (IsNativeOrUnboxedDenseElementAccess(obj, index) && !rhs.isMagic(JS_ELEMENTS_HOLE)) {
bool addingCase;
size_t protoDepth;
if (CanOptimizeDenseOrUnboxedArraySetElem(obj, index.toInt32(),
oldShape, oldCapacity, oldInitLength,
&addingCase, &protoDepth))
{
RootedShape shape(cx, obj->maybeShape());
RootedObjectGroup group(cx, obj->getGroup(cx));
if (!group)
return false;
if (addingCase &&
!DenseOrUnboxedArraySetElemStubExists(cx, ICStub::SetElem_DenseOrUnboxedArrayAdd,
stub, obj))
{
JitSpew(JitSpew_BaselineIC,
" Generating SetElem_DenseOrUnboxedArrayAdd stub "
"(shape=%p, group=%p, protoDepth=%u)",
shape.get(), group.get(), protoDepth);
ICSetElemDenseOrUnboxedArrayAddCompiler compiler(cx, obj, protoDepth);
ICUpdatedStub* newStub = compiler.getStub(compiler.getStubSpace(script));
if (!newStub)
return false;
if (compiler.needsUpdateStubs() &&
!newStub->addUpdateStubForValue(cx, script, obj, JSID_VOIDHANDLE, rhs))
{
return false;
}
stub->addNewStub(newStub);
} else if (!addingCase &&
!DenseOrUnboxedArraySetElemStubExists(cx,
ICStub::SetElem_DenseOrUnboxedArray,
stub, obj))
{
JitSpew(JitSpew_BaselineIC,
" Generating SetElem_DenseOrUnboxedArray stub (shape=%p, group=%p)",
shape.get(), group.get());
ICSetElem_DenseOrUnboxedArray::Compiler compiler(cx, shape, group);
ICUpdatedStub* newStub = compiler.getStub(compiler.getStubSpace(script));
if (!newStub)
return false;
if (compiler.needsUpdateStubs() &&
!newStub->addUpdateStubForValue(cx, script, obj, JSID_VOIDHANDLE, rhs))
{
return false;
}
stub->addNewStub(newStub);
}
}
return true;
}
if ((IsAnyTypedArray(obj.get()) || IsPrimitiveArrayTypedObject(obj)) &&
index.isNumber() &&
rhs.isNumber())
{
if (!cx->runtime()->jitSupportsFloatingPoint &&
(TypedThingRequiresFloatingPoint(obj) || index.isDouble()))
{
return true;
}
bool expectOutOfBounds;
double idx = index.toNumber();
if (IsAnyTypedArray(obj)) {
expectOutOfBounds = (idx < 0 || idx >= double(AnyTypedArrayLength(obj)));
} else {
// Typed objects throw on out of bounds accesses. Don't attach
// a stub in this case.
if (idx < 0 || idx >= double(obj->as<TypedObject>().length()))
return true;
expectOutOfBounds = false;
// Don't attach stubs if typed objects in the compartment might be
// neutered, as the stub will always bail out.
if (cx->compartment()->neuteredTypedObjects)
return true;
}
if (!TypedArraySetElemStubExists(stub, obj, expectOutOfBounds)) {
// Remove any existing TypedArraySetElemStub that doesn't handle out-of-bounds
if (expectOutOfBounds)
RemoveExistingTypedArraySetElemStub(cx, stub, obj);
Shape* shape = obj->maybeShape();
Scalar::Type type = TypedThingElementType(obj);
JitSpew(JitSpew_BaselineIC,
" Generating SetElem_TypedArray stub (shape=%p, type=%u, oob=%s)",
shape, type, expectOutOfBounds ?