blob: a9e7ef5e547873dea6821819efb27cdf4405e955 [file] [log] [blame]
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
* vim: set ts=8 sts=4 et sw=4 tw=99:
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "jit/BaselineJIT.h"
#include "builtin/Eval.h"
#include "jit/BaselineCompiler.h"
#include "jit/BaselineHelpers.h"
#include "jit/BaselineIC.h"
#include "jit/IonLinker.h"
#include "jit/IonSpewer.h"
#include "jit/VMFunctions.h"
#include "builtin/Iterator-inl.h"
#include "jit/IonFrames-inl.h"
#include "vm/Interpreter-inl.h"
namespace js {
namespace jit {
#ifdef DEBUG
void
FallbackICSpew(JSContext *cx, ICFallbackStub *stub, const char *fmt, ...)
{
if (IonSpewEnabled(IonSpew_BaselineICFallback)) {
RootedScript script(cx, GetTopIonJSScript(cx));
jsbytecode *pc = stub->icEntry()->pc(script);
char fmtbuf[100];
va_list args;
va_start(args, fmt);
vsnprintf(fmtbuf, 100, fmt, args);
va_end(args);
IonSpew(IonSpew_BaselineICFallback,
"Fallback hit for (%s:%d) (pc=%d,line=%d,uses=%d,stubs=%d): %s",
script->filename(),
script->lineno,
(int) (pc - script->code),
PCToLineNumber(script, pc),
script->getUseCount(),
(int) stub->numOptimizedStubs(),
fmtbuf);
}
}
void
TypeFallbackICSpew(JSContext *cx, ICTypeMonitor_Fallback *stub, const char *fmt, ...)
{
if (IonSpewEnabled(IonSpew_BaselineICFallback)) {
RootedScript script(cx, GetTopIonJSScript(cx));
jsbytecode *pc = stub->icEntry()->pc(script);
char fmtbuf[100];
va_list args;
va_start(args, fmt);
vsnprintf(fmtbuf, 100, fmt, args);
va_end(args);
IonSpew(IonSpew_BaselineICFallback,
"Type monitor fallback hit for (%s:%d) (pc=%d,line=%d,uses=%d,stubs=%d): %s",
script->filename(),
script->lineno,
(int) (pc - script->code),
PCToLineNumber(script, pc),
script->getUseCount(),
(int) stub->numOptimizedMonitorStubs(),
fmtbuf);
}
}
#else
#define FallbackICSpew(...)
#define TypeFallbackICSpew(...)
#endif
ICFallbackStub *
ICEntry::fallbackStub() const
{
return firstStub()->getChainFallback();
}
ICStubConstIterator &
ICStubConstIterator::operator++()
{
JS_ASSERT(currentStub_ != NULL);
currentStub_ = currentStub_->next();
return *this;
}
ICStubIterator::ICStubIterator(ICFallbackStub *fallbackStub, bool end)
: icEntry_(fallbackStub->icEntry()),
fallbackStub_(fallbackStub),
previousStub_(NULL),
currentStub_(end ? fallbackStub : icEntry_->firstStub()),
unlinked_(false)
{ }
ICStubIterator &
ICStubIterator::operator++()
{
JS_ASSERT(currentStub_->next() != NULL);
if (!unlinked_)
previousStub_ = currentStub_;
currentStub_ = currentStub_->next();
unlinked_ = false;
return *this;
}
void
ICStubIterator::unlink(Zone *zone)
{
JS_ASSERT(currentStub_->next() != NULL);
JS_ASSERT(currentStub_ != fallbackStub_);
JS_ASSERT(!unlinked_);
fallbackStub_->unlinkStub(zone, previousStub_, currentStub_);
// Mark the current iterator position as unlinked, so operator++ works properly.
unlinked_ = true;
}
void
ICStub::markCode(JSTracer *trc, const char *name)
{
IonCode *stubIonCode = ionCode();
MarkIonCodeUnbarriered(trc, &stubIonCode, name);
}
void
ICStub::updateCode(IonCode *code)
{
// Write barrier on the old code.
#ifdef JSGC_INCREMENTAL
IonCode::writeBarrierPre(ionCode());
#endif
stubCode_ = code->raw();
}
/* static */ void
ICStub::trace(JSTracer *trc)
{
markCode(trc, "baseline-stub-ioncode");
// If the stub is a monitored fallback stub, then mark the monitor ICs hanging
// off of that stub. We don't need to worry about the regular monitored stubs,
// because the regular monitored stubs will always have a monitored fallback stub
// that references the same stub chain.
if (isMonitoredFallback()) {
ICTypeMonitor_Fallback *lastMonStub = toMonitoredFallbackStub()->fallbackMonitorStub();
for (ICStubConstIterator iter = lastMonStub->firstMonitorStub(); !iter.atEnd(); iter++) {
JS_ASSERT_IF(iter->next() == NULL, *iter == lastMonStub);
iter->markCode(trc, "baseline-monitor-stub-ioncode");
}
}
if (isUpdated()) {
for (ICStubConstIterator iter = toUpdatedStub()->firstUpdateStub(); !iter.atEnd(); iter++) {
JS_ASSERT_IF(iter->next() == NULL, iter->isTypeUpdate_Fallback());
iter->markCode(trc, "baseline-update-stub-ioncode");
}
}
switch (kind()) {
case ICStub::Call_Scripted: {
ICCall_Scripted *callStub = toCall_Scripted();
MarkScript(trc, &callStub->calleeScript(), "baseline-callscripted-callee");
break;
}
case ICStub::Call_Native: {
ICCall_Native *callStub = toCall_Native();
MarkObject(trc, &callStub->callee(), "baseline-callnative-callee");
break;
}
case ICStub::GetElem_Native: {
ICGetElem_Native *getElemStub = toGetElem_Native();
MarkShape(trc, &getElemStub->shape(), "baseline-getelem-native-shape");
gc::MarkValue(trc, &getElemStub->idval(), "baseline-getelem-native-idval");
break;
}
case ICStub::GetElem_NativePrototype: {
ICGetElem_NativePrototype *getElemStub = toGetElem_NativePrototype();
MarkShape(trc, &getElemStub->shape(), "baseline-getelem-nativeproto-shape");
gc::MarkValue(trc, &getElemStub->idval(), "baseline-getelem-nativeproto-idval");
MarkObject(trc, &getElemStub->holder(), "baseline-getelem-nativeproto-holder");
MarkShape(trc, &getElemStub->holderShape(), "baseline-getelem-nativeproto-holdershape");
break;
}
case ICStub::GetElem_Dense: {
ICGetElem_Dense *getElemStub = toGetElem_Dense();
MarkShape(trc, &getElemStub->shape(), "baseline-getelem-dense-shape");
break;
}
case ICStub::GetElem_TypedArray: {
ICGetElem_TypedArray *getElemStub = toGetElem_TypedArray();
MarkShape(trc, &getElemStub->shape(), "baseline-getelem-typedarray-shape");
break;
}
case ICStub::SetElem_Dense: {
ICSetElem_Dense *setElemStub = toSetElem_Dense();
MarkShape(trc, &setElemStub->shape(), "baseline-getelem-dense-shape");
MarkTypeObject(trc, &setElemStub->type(), "baseline-setelem-dense-type");
break;
}
case ICStub::SetElem_DenseAdd: {
ICSetElem_DenseAdd *setElemStub = toSetElem_DenseAdd();
MarkTypeObject(trc, &setElemStub->type(), "baseline-setelem-denseadd-type");
JS_STATIC_ASSERT(ICSetElem_DenseAdd::MAX_PROTO_CHAIN_DEPTH == 4);
switch (setElemStub->protoChainDepth()) {
case 0: setElemStub->toImpl<0>()->traceShapes(trc); break;
case 1: setElemStub->toImpl<1>()->traceShapes(trc); break;
case 2: setElemStub->toImpl<2>()->traceShapes(trc); break;
case 3: setElemStub->toImpl<3>()->traceShapes(trc); break;
case 4: setElemStub->toImpl<4>()->traceShapes(trc); break;
default: JS_NOT_REACHED("Invalid proto stub.");
}
break;
}
case ICStub::SetElem_TypedArray: {
ICSetElem_TypedArray *setElemStub = toSetElem_TypedArray();
MarkShape(trc, &setElemStub->shape(), "baseline-setelem-typedarray-shape");
break;
}
case ICStub::TypeMonitor_SingleObject: {
ICTypeMonitor_SingleObject *monitorStub = toTypeMonitor_SingleObject();
MarkObject(trc, &monitorStub->object(), "baseline-monitor-singleobject");
break;
}
case ICStub::TypeMonitor_TypeObject: {
ICTypeMonitor_TypeObject *monitorStub = toTypeMonitor_TypeObject();
MarkTypeObject(trc, &monitorStub->type(), "baseline-monitor-typeobject");
break;
}
case ICStub::TypeUpdate_SingleObject: {
ICTypeUpdate_SingleObject *updateStub = toTypeUpdate_SingleObject();
MarkObject(trc, &updateStub->object(), "baseline-update-singleobject");
break;
}
case ICStub::TypeUpdate_TypeObject: {
ICTypeUpdate_TypeObject *updateStub = toTypeUpdate_TypeObject();
MarkTypeObject(trc, &updateStub->type(), "baseline-update-typeobject");
break;
}
case ICStub::Profiler_PushFunction: {
ICProfiler_PushFunction *pushFunStub = toProfiler_PushFunction();
MarkScript(trc, &pushFunStub->script(), "baseline-profilerpushfunction-stub-script");
break;
}
case ICStub::GetName_Global: {
ICGetName_Global *globalStub = toGetName_Global();
MarkShape(trc, &globalStub->shape(), "baseline-global-stub-shape");
break;
}
case ICStub::GetName_Scope0:
static_cast<ICGetName_Scope<0>*>(this)->traceScopes(trc);
break;
case ICStub::GetName_Scope1:
static_cast<ICGetName_Scope<1>*>(this)->traceScopes(trc);
break;
case ICStub::GetName_Scope2:
static_cast<ICGetName_Scope<2>*>(this)->traceScopes(trc);
break;
case ICStub::GetName_Scope3:
static_cast<ICGetName_Scope<3>*>(this)->traceScopes(trc);
break;
case ICStub::GetName_Scope4:
static_cast<ICGetName_Scope<4>*>(this)->traceScopes(trc);
break;
case ICStub::GetName_Scope5:
static_cast<ICGetName_Scope<5>*>(this)->traceScopes(trc);
break;
case ICStub::GetName_Scope6:
static_cast<ICGetName_Scope<6>*>(this)->traceScopes(trc);
break;
case ICStub::GetIntrinsic_Constant: {
ICGetIntrinsic_Constant *constantStub = toGetIntrinsic_Constant();
gc::MarkValue(trc, &constantStub->value(), "baseline-getintrinsic-constant-value");
break;
}
case ICStub::GetProp_String: {
ICGetProp_String *propStub = toGetProp_String();
MarkShape(trc, &propStub->stringProtoShape(), "baseline-getpropstring-stub-shape");
break;
}
case ICStub::GetProp_Native: {
ICGetProp_Native *propStub = toGetProp_Native();
MarkShape(trc, &propStub->shape(), "baseline-getpropnative-stub-shape");
break;
}
case ICStub::GetProp_NativePrototype: {
ICGetProp_NativePrototype *propStub = toGetProp_NativePrototype();
MarkShape(trc, &propStub->shape(), "baseline-getpropnativeproto-stub-shape");
MarkObject(trc, &propStub->holder(), "baseline-getpropnativeproto-stub-holder");
MarkShape(trc, &propStub->holderShape(), "baseline-getpropnativeproto-stub-holdershape");
break;
}
case ICStub::GetProp_CallDOMProxyNative:
case ICStub::GetProp_CallDOMProxyWithGenerationNative: {
ICGetPropCallDOMProxyNativeStub *propStub;
if (kind() == ICStub::GetProp_CallDOMProxyNative)
propStub = toGetProp_CallDOMProxyNative();
else
propStub = toGetProp_CallDOMProxyWithGenerationNative();
MarkShape(trc, &propStub->shape(), "baseline-getproplistbasenative-stub-shape");
if (propStub->expandoShape()) {
MarkShape(trc, &propStub->expandoShape(),
"baseline-getproplistbasenative-stub-expandoshape");
}
MarkObject(trc, &propStub->holder(), "baseline-getproplistbasenative-stub-holder");
MarkShape(trc, &propStub->holderShape(), "baseline-getproplistbasenative-stub-holdershape");
MarkObject(trc, &propStub->getter(), "baseline-getproplistbasenative-stub-getter");
break;
}
case ICStub::GetProp_DOMProxyShadowed: {
ICGetProp_DOMProxyShadowed *propStub = toGetProp_DOMProxyShadowed();
MarkShape(trc, &propStub->shape(), "baseline-getproplistbaseshadowed-stub-shape");
MarkString(trc, &propStub->name(), "baseline-getproplistbaseshadowed-stub-name");
break;
}
case ICStub::GetProp_CallScripted: {
ICGetProp_CallScripted *callStub = toGetProp_CallScripted();
MarkShape(trc, &callStub->shape(), "baseline-getpropcallscripted-stub-shape");
MarkObject(trc, &callStub->holder(), "baseline-getpropcallscripted-stub-holder");
MarkShape(trc, &callStub->holderShape(), "baseline-getpropcallscripted-stub-holdershape");
MarkObject(trc, &callStub->getter(), "baseline-getpropcallscripted-stub-getter");
break;
}
case ICStub::GetProp_CallNative: {
ICGetProp_CallNative *callStub = toGetProp_CallNative();
MarkShape(trc, &callStub->shape(), "baseline-getpropcallnative-stub-shape");
MarkObject(trc, &callStub->holder(), "baseline-getpropcallnative-stub-holder");
MarkShape(trc, &callStub->holderShape(), "baseline-getpropcallnative-stub-holdershape");
MarkObject(trc, &callStub->getter(), "baseline-getpropcallnative-stub-getter");
break;
}
case ICStub::SetProp_Native: {
ICSetProp_Native *propStub = toSetProp_Native();
MarkShape(trc, &propStub->shape(), "baseline-setpropnative-stub-shape");
MarkTypeObject(trc, &propStub->type(), "baseline-setpropnative-stub-type");
break;
}
case ICStub::SetProp_NativeAdd: {
ICSetProp_NativeAdd *propStub = toSetProp_NativeAdd();
MarkTypeObject(trc, &propStub->type(), "baseline-setpropnativeadd-stub-type");
MarkShape(trc, &propStub->newShape(), "baseline-setpropnativeadd-stub-newshape");
JS_STATIC_ASSERT(ICSetProp_NativeAdd::MAX_PROTO_CHAIN_DEPTH == 4);
switch (propStub->protoChainDepth()) {
case 0: propStub->toImpl<0>()->traceShapes(trc); break;
case 1: propStub->toImpl<1>()->traceShapes(trc); break;
case 2: propStub->toImpl<2>()->traceShapes(trc); break;
case 3: propStub->toImpl<3>()->traceShapes(trc); break;
case 4: propStub->toImpl<4>()->traceShapes(trc); break;
default: JS_NOT_REACHED("Invalid proto stub.");
}
break;
}
case ICStub::SetProp_CallScripted: {
ICSetProp_CallScripted *callStub = toSetProp_CallScripted();
MarkShape(trc, &callStub->shape(), "baseline-setpropcallscripted-stub-shape");
MarkObject(trc, &callStub->holder(), "baseline-setpropcallscripted-stub-holder");
MarkShape(trc, &callStub->holderShape(), "baseline-setpropcallscripted-stub-holdershape");
MarkObject(trc, &callStub->setter(), "baseline-setpropcallscripted-stub-setter");
break;
}
case ICStub::SetProp_CallNative: {
ICSetProp_CallNative *callStub = toSetProp_CallNative();
MarkShape(trc, &callStub->shape(), "baseline-setpropcallnative-stub-shape");
MarkObject(trc, &callStub->holder(), "baseline-setpropcallnative-stub-holder");
MarkShape(trc, &callStub->holderShape(), "baseline-setpropcallnative-stub-holdershape");
MarkObject(trc, &callStub->setter(), "baseline-setpropcallnative-stub-setter");
break;
}
default:
break;
}
}
void
ICFallbackStub::unlinkStub(Zone *zone, ICStub *prev, ICStub *stub)
{
JS_ASSERT(stub->next());
// If stub is the last optimized stub, update lastStubPtrAddr.
if (stub->next() == this) {
JS_ASSERT(lastStubPtrAddr_ == stub->addressOfNext());
if (prev)
lastStubPtrAddr_ = prev->addressOfNext();
else
lastStubPtrAddr_ = icEntry()->addressOfFirstStub();
*lastStubPtrAddr_ = this;
} else {
if (prev) {
JS_ASSERT(prev->next() == stub);
prev->setNext(stub->next());
} else {
JS_ASSERT(icEntry()->firstStub() == stub);
icEntry()->setFirstStub(stub->next());
}
}
JS_ASSERT(numOptimizedStubs_ > 0);
numOptimizedStubs_--;
if (zone->needsBarrier()) {
// We are removing edges from ICStub to gcthings. Perform one final trace
// of the stub for incremental GC, as it must know about those edges.
stub->trace(zone->barrierTracer());
}
if (ICStub::CanMakeCalls(stub->kind()) && stub->isMonitored()) {
// This stub can make calls so we can return to it if it's on the stack.
// We just have to reset its firstMonitorStub_ field to avoid a stale
// pointer when purgeOptimizedStubs destroys all optimized monitor
// stubs (unlinked stubs won't be updated).
ICTypeMonitor_Fallback *monitorFallback = toMonitoredFallbackStub()->fallbackMonitorStub();
stub->toMonitoredStub()->resetFirstMonitorStub(monitorFallback);
}
#ifdef DEBUG
// Poison stub code to ensure we don't call this stub again. However, if this
// stub can make calls, a pointer to it may be stored in a stub frame on the
// stack, so we can't touch the stubCode_ or GC will crash when marking this
// pointer.
if (!ICStub::CanMakeCalls(stub->kind()))
stub->stubCode_ = (uint8_t *)0xbad;
#endif
}
void
ICFallbackStub::unlinkStubsWithKind(JSContext *cx, ICStub::Kind kind)
{
for (ICStubIterator iter = beginChain(); !iter.atEnd(); iter++) {
if (iter->kind() == kind)
iter.unlink(cx->zone());
}
}
void
ICTypeMonitor_Fallback::resetMonitorStubChain(Zone *zone)
{
if (zone->needsBarrier()) {
// We are removing edges from monitored stubs to gcthings (IonCode).
// Perform one final trace of all monitor stubs for incremental GC,
// as it must know about those edges.
if (hasFallbackStub_) {
for (ICStub *s = firstMonitorStub_; !s->isTypeMonitor_Fallback(); s = s->next())
s->trace(zone->barrierTracer());
}
}
firstMonitorStub_ = this;
numOptimizedMonitorStubs_ = 0;
if (hasFallbackStub_) {
lastMonitorStubPtrAddr_ = NULL;
// Reset firstMonitorStub_ field of all monitored stubs.
for (ICStubConstIterator iter = mainFallbackStub_->beginChainConst();
!iter.atEnd(); iter++)
{
if (!iter->isMonitored())
continue;
iter->toMonitoredStub()->resetFirstMonitorStub(this);
}
} else {
icEntry_->setFirstStub(this);
lastMonitorStubPtrAddr_ = icEntry_->addressOfFirstStub();
}
}
ICMonitoredStub::ICMonitoredStub(Kind kind, IonCode *stubCode, ICStub *firstMonitorStub)
: ICStub(kind, ICStub::Monitored, stubCode),
firstMonitorStub_(firstMonitorStub)
{
// If the first monitored stub is a ICTypeMonitor_Fallback stub, then
// double check that _its_ firstMonitorStub is the same as this one.
JS_ASSERT_IF(firstMonitorStub_->isTypeMonitor_Fallback(),
firstMonitorStub_->toTypeMonitor_Fallback()->firstMonitorStub() ==
firstMonitorStub_);
}
bool
ICMonitoredFallbackStub::initMonitoringChain(JSContext *cx, ICStubSpace *space)
{
JS_ASSERT(fallbackMonitorStub_ == NULL);
ICTypeMonitor_Fallback::Compiler compiler(cx, this);
ICTypeMonitor_Fallback *stub = compiler.getStub(space);
if (!stub)
return false;
fallbackMonitorStub_ = stub;
return true;
}
bool
ICMonitoredFallbackStub::addMonitorStubForValue(JSContext *cx, HandleScript script, HandleValue val)
{
return fallbackMonitorStub_->addMonitorStubForValue(cx, script, val);
}
bool
ICUpdatedStub::initUpdatingChain(JSContext *cx, ICStubSpace *space)
{
JS_ASSERT(firstUpdateStub_ == NULL);
ICTypeUpdate_Fallback::Compiler compiler(cx);
ICTypeUpdate_Fallback *stub = compiler.getStub(space);
if (!stub)
return false;
firstUpdateStub_ = stub;
return true;
}
IonCode *
ICStubCompiler::getStubCode()
{
IonCompartment *ion = cx->compartment()->ionCompartment();
// Check for existing cached stubcode.
uint32_t stubKey = getKey();
IonCode *stubCode = ion->getStubCode(stubKey);
if (stubCode)
return stubCode;
// Compile new stubcode.
MacroAssembler masm;
#ifdef JS_CPU_ARM
masm.setSecondScratchReg(BaselineSecondScratchReg);
#endif
AutoFlushCache afc("ICStubCompiler::getStubCode", cx->runtime()->ionRuntime());
if (!generateStubCode(masm))
return NULL;
Linker linker(masm);
Rooted<IonCode *> newStubCode(cx, linker.newCode(cx, JSC::BASELINE_CODE));
if (!newStubCode)
return NULL;
// After generating code, run postGenerateStubCode()
if (!postGenerateStubCode(masm, newStubCode))
return NULL;
// All barriers are emitted off-by-default, enable them if needed.
if (cx->zone()->needsBarrier())
newStubCode->togglePreBarriers(true);
// Cache newly compiled stubcode.
if (!ion->putStubCode(stubKey, newStubCode))
return NULL;
JS_ASSERT(entersStubFrame_ == ICStub::CanMakeCalls(kind));
return newStubCode;
}
bool
ICStubCompiler::tailCallVM(const VMFunction &fun, MacroAssembler &masm)
{
IonCompartment *ion = cx->compartment()->ionCompartment();
IonCode *code = ion->getVMWrapper(fun);
if (!code)
return false;
uint32_t argSize = fun.explicitStackSlots() * sizeof(void *);
EmitTailCallVM(code, masm, argSize);
return true;
}
bool
ICStubCompiler::callVM(const VMFunction &fun, MacroAssembler &masm)
{
IonCompartment *ion = cx->compartment()->ionCompartment();
IonCode *code = ion->getVMWrapper(fun);
if (!code)
return false;
EmitCallVM(code, masm);
return true;
}
bool
ICStubCompiler::callTypeUpdateIC(MacroAssembler &masm, uint32_t objectOffset)
{
IonCompartment *ion = cx->compartment()->ionCompartment();
IonCode *code = ion->getVMWrapper(DoTypeUpdateFallbackInfo);
if (!code)
return false;
EmitCallTypeUpdateIC(masm, code, objectOffset);
return true;
}
void
ICStubCompiler::enterStubFrame(MacroAssembler &masm, Register scratch)
{
EmitEnterStubFrame(masm, scratch);
#ifdef DEBUG
entersStubFrame_ = true;
#endif
}
void
ICStubCompiler::leaveStubFrame(MacroAssembler &masm, bool calledIntoIon)
{
JS_ASSERT(entersStubFrame_);
EmitLeaveStubFrame(masm, calledIntoIon);
}
void
ICStubCompiler::guardProfilingEnabled(MacroAssembler &masm, Register scratch, Label *skip)
{
// This should only be called from the following stubs.
JS_ASSERT(kind == ICStub::Call_Scripted || kind == ICStub::Call_AnyScripted ||
kind == ICStub::Call_Native || kind == ICStub::GetProp_CallScripted ||
kind == ICStub::GetProp_CallNative || kind == ICStub::GetProp_CallDOMProxyNative ||
kind == ICStub::Call_ScriptedApplyArguments ||
kind == ICStub::GetProp_CallDOMProxyWithGenerationNative ||
kind == ICStub::GetProp_DOMProxyShadowed ||
kind == ICStub::SetProp_CallScripted || kind == ICStub::SetProp_CallNative);
// Guard on bit in frame that indicates if the SPS frame was pushed in the first
// place. This code is expected to be called from within a stub that has already
// entered a stub frame.
JS_ASSERT(entersStubFrame_);
masm.loadPtr(Address(BaselineFrameReg, 0), scratch);
masm.branchTest32(Assembler::Zero,
Address(scratch, BaselineFrame::reverseOffsetOfFlags()),
Imm32(BaselineFrame::HAS_PUSHED_SPS_FRAME),
skip);
// Check if profiling is enabled
uint32_t *enabledAddr = cx->runtime()->spsProfiler.addressOfEnabled();
masm.branch32(Assembler::Equal, AbsoluteAddress(enabledAddr), Imm32(0), skip);
}
#ifdef JSGC_GENERATIONAL
inline bool
ICStubCompiler::emitPostWriteBarrierSlot(MacroAssembler &masm, Register obj, Register scratch,
GeneralRegisterSet saveRegs)
{
Nursery &nursery = cx->runtime()->gcNursery;
Label skipBarrier;
Label isTenured;
masm.branchPtr(Assembler::Below, obj, ImmWord(nursery.start()), &isTenured);
masm.branchPtr(Assembler::Below, obj, ImmWord(nursery.heapEnd()), &skipBarrier);
masm.bind(&isTenured);
// void PostWriteBarrier(JSRuntime *rt, JSObject *obj);
#if defined(JS_CPU_ARM) || defined(JS_CPU_MIPS)
saveRegs.add(BaselineTailCallReg);
#endif
saveRegs = GeneralRegisterSet::Intersect(saveRegs, GeneralRegisterSet::Volatile());
masm.PushRegsInMask(saveRegs);
masm.setupUnalignedABICall(2, scratch);
masm.movePtr(ImmWord(cx->runtime()), scratch);
masm.passABIArg(scratch);
masm.passABIArg(obj);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, PostWriteBarrier));
masm.PopRegsInMask(saveRegs);
masm.bind(&skipBarrier);
return true;
}
#endif // JSGC_GENERATIONAL
//
// UseCount_Fallback
//
static bool
IsTopFrameConstructing(JSContext *cx)
{
JS_ASSERT(cx->currentlyRunningInJit());
JitActivationIterator activations(cx->runtime());
IonFrameIterator iter(activations);
JS_ASSERT(iter.type() == IonFrame_Exit);
++iter;
JS_ASSERT(iter.type() == IonFrame_BaselineStub);
++iter;
JS_ASSERT(iter.isBaselineJS());
return iter.isConstructing();
}
static bool
EnsureCanEnterIon(JSContext *cx, ICUseCount_Fallback *stub, BaselineFrame *frame,
HandleScript script, jsbytecode *pc, void **jitcodePtr)
{
JS_ASSERT(jitcodePtr);
JS_ASSERT(!*jitcodePtr);
bool isLoopEntry = (JSOp(*pc) == JSOP_LOOPENTRY);
bool isConstructing = IsTopFrameConstructing(cx);
MethodStatus stat;
if (isLoopEntry) {
IonSpew(IonSpew_BaselineOSR, " Compile at loop entry!");
stat = CanEnterAtBranch(cx, script, frame, pc, isConstructing);
} else if (frame->isFunctionFrame()) {
IonSpew(IonSpew_BaselineOSR, " Compile function from top for later entry!");
stat = CompileFunctionForBaseline(cx, script, frame, isConstructing);
} else {
return true;
}
if (stat == Method_Error) {
IonSpew(IonSpew_BaselineOSR, " Compile with Ion errored!");
return false;
}
if (stat == Method_CantCompile)
IonSpew(IonSpew_BaselineOSR, " Can't compile with Ion!");
else if (stat == Method_Skipped)
IonSpew(IonSpew_BaselineOSR, " Skipped compile with Ion!");
else if (stat == Method_Compiled)
IonSpew(IonSpew_BaselineOSR, " Compiled with Ion!");
else
JS_NOT_REACHED("Invalid MethodStatus!");
// Failed to compile. Reset use count and return.
if (stat != Method_Compiled) {
// TODO: If stat == Method_CantCompile, insert stub that just skips the useCount
// entirely, instead of resetting it.
bool bailoutExpected = script->hasIonScript() && script->ionScript()->bailoutExpected();
if (stat == Method_CantCompile || bailoutExpected) {
IonSpew(IonSpew_BaselineOSR, " Reset UseCount cantCompile=%s bailoutExpected=%s!",
stat == Method_CantCompile ? "yes" : "no",
bailoutExpected ? "yes" : "no");
script->resetUseCount();
}
return true;
}
if (isLoopEntry) {
IonSpew(IonSpew_BaselineOSR, " OSR possible!");
IonScript *ion = script->ionScript();
*jitcodePtr = ion->method()->raw() + ion->osrEntryOffset();
}
return true;
}
//
// The following data is kept in a temporary heap-allocated buffer, stored in
// IonRuntime (high memory addresses at top, low at bottom):
//
// +=================================+ -- <---- High Address
// | | |
// | ...Locals/Stack... | |
// | | |
// +---------------------------------+ |
// | | |
// | ...StackFrame... | |-- Fake StackFrame
// | | |
// +----> +---------------------------------+ |
// | | | |
// | | ...Args/This... | |
// | | | |
// | +=================================+ --
// | | Padding(Maybe Empty) |
// | +=================================+ --
// +------|-- stackFrame | |-- IonOsrTempData
// | jitcode | |
// +=================================+ -- <---- Low Address
//
// A pointer to the IonOsrTempData is returned.
struct IonOsrTempData
{
void *jitcode;
uint8_t *stackFrame;
};
static IonOsrTempData *
PrepareOsrTempData(JSContext *cx, ICUseCount_Fallback *stub, BaselineFrame *frame,
HandleScript script, jsbytecode *pc, void *jitcode)
{
// Calculate the (numLocals + numStackVals), and the number of formal args.
size_t numLocalsAndStackVals = frame->numValueSlots();
size_t numFormalArgs = frame->isFunctionFrame() ? frame->numFormalArgs() : 0;
// Calculate the amount of space to allocate:
// StackFrame space:
// (sizeof(Value) * (numLocals + numStackVals))
// + sizeof(StackFrame)
// + (sizeof(Value) * (numFormalArgs + 1)) // +1 for ThisV
//
// IonOsrTempData space:
// sizeof(IonOsrTempData)
size_t stackFrameSpace = (sizeof(Value) * numLocalsAndStackVals) + sizeof(StackFrame)
+ (sizeof(Value) * (numFormalArgs + 1));
size_t ionOsrTempDataSpace = sizeof(IonOsrTempData);
size_t totalSpace = AlignBytes(stackFrameSpace, sizeof(Value)) +
AlignBytes(ionOsrTempDataSpace, sizeof(Value));
IonOsrTempData *info = (IonOsrTempData *)cx->runtime()->getIonRuntime(cx)->allocateOsrTempData(totalSpace);
if (!info)
return NULL;
memset(info, 0, totalSpace);
info->jitcode = jitcode;
uint8_t *stackFrameStart = (uint8_t *)info + AlignBytes(ionOsrTempDataSpace, sizeof(Value));
info->stackFrame = stackFrameStart + (numFormalArgs * sizeof(Value)) + sizeof(Value);
//
// Initialize the fake StackFrame.
//
// Copy formal args and thisv.
memcpy(stackFrameStart, frame->argv() - 1, (numFormalArgs + 1) * sizeof(Value));
// Initialize ScopeChain, Exec, and Flags fields in StackFrame struct.
uint8_t *stackFrame = info->stackFrame;
*((JSObject **) (stackFrame + StackFrame::offsetOfScopeChain())) = frame->scopeChain();
if (frame->isFunctionFrame()) {
// Store the function in exec field, and StackFrame::FUNCTION for flags.
*((JSFunction **) (stackFrame + StackFrame::offsetOfExec())) = frame->fun();
*((uint32_t *) (stackFrame + StackFrame::offsetOfFlags())) = StackFrame::FUNCTION;
} else {
*((JSScript **) (stackFrame + StackFrame::offsetOfExec())) = frame->script();
*((uint32_t *) (stackFrame + StackFrame::offsetOfFlags())) = 0;
}
// Do locals and stack values. Note that in the fake StackFrame, these go from
// low to high addresses, while on the C stack, they go from high to low addresses.
// So we can't use memcpy on this, but must copy the values in reverse order.
Value *stackFrameLocalsStart = (Value *) (stackFrame + sizeof(StackFrame));
for (size_t i = 0; i < numLocalsAndStackVals; i++)
stackFrameLocalsStart[i] = *(frame->valueSlot(i));
IonSpew(IonSpew_BaselineOSR, "Allocated IonOsrTempData at %p", (void *) info);
IonSpew(IonSpew_BaselineOSR, "Jitcode is %p", info->jitcode);
// All done.
return info;
}
static bool
DoUseCountFallback(JSContext *cx, ICUseCount_Fallback *stub, BaselineFrame *frame,
IonOsrTempData **infoPtr)
{
JS_ASSERT(infoPtr);
*infoPtr = NULL;
// A TI OOM will disable TI and Ion.
if (!jit::IsIonEnabled(cx))
return true;
RootedScript script(cx, frame->script());
jsbytecode *pc = stub->icEntry()->pc(script);
bool isLoopEntry = JSOp(*pc) == JSOP_LOOPENTRY;
FallbackICSpew(cx, stub, "UseCount(%d)", isLoopEntry ? int(pc - script->code) : int(-1));
if (!script->canIonCompile()) {
// TODO: ASSERT that ion-compilation-disabled checker stub doesn't exist.
// TODO: Clear all optimized stubs.
// TODO: Add a ion-compilation-disabled checker IC stub
script->resetUseCount();
return true;
}
JS_ASSERT(!script->isIonCompilingOffThread());
// If Ion script exists, but PC is not at a loop entry, then Ion will be entered for
// this script at an appropriate LOOPENTRY or the next time this function is called.
if (script->hasIonScript() && !isLoopEntry) {
IonSpew(IonSpew_BaselineOSR, "IonScript exists, but not at loop entry!");
// TODO: ASSERT that a ion-script-already-exists checker stub doesn't exist.
// TODO: Clear all optimized stubs.
// TODO: Add a ion-script-already-exists checker stub.
return true;
}
// Ensure that Ion-compiled code is available.
IonSpew(IonSpew_BaselineOSR,
"UseCount for %s:%d reached %d at pc %p, trying to switch to Ion!",
script->filename(), script->lineno, (int) script->getUseCount(), (void *) pc);
void *jitcode = NULL;
if (!EnsureCanEnterIon(cx, stub, frame, script, pc, &jitcode))
return false;
// Jitcode should only be set here if not at loop entry.
JS_ASSERT_IF(!isLoopEntry, !jitcode);
if (!jitcode)
return true;
// Prepare the temporary heap copy of the fake StackFrame and actual args list.
IonSpew(IonSpew_BaselineOSR, "Got jitcode. Preparing for OSR into ion.");
IonOsrTempData *info = PrepareOsrTempData(cx, stub, frame, script, pc, jitcode);
if (!info)
return false;
*infoPtr = info;
return true;
}
typedef bool (*DoUseCountFallbackFn)(JSContext *, ICUseCount_Fallback *, BaselineFrame *frame,
IonOsrTempData **infoPtr);
static const VMFunction DoUseCountFallbackInfo =
FunctionInfo<DoUseCountFallbackFn>(DoUseCountFallback);
bool
ICUseCount_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
// enterStubFrame is going to clobber the BaselineFrameReg, save it in R0.scratchReg()
// first.
masm.movePtr(BaselineFrameReg, R0.scratchReg());
// Push a stub frame so that we can perform a non-tail call.
enterStubFrame(masm, R1.scratchReg());
Label noCompiledCode;
// Call DoUseCountFallback to compile/check-for Ion-compiled function
{
// Push IonOsrTempData pointer storage
masm.subPtr(Imm32(sizeof(void *)), BaselineStackReg);
masm.push(BaselineStackReg);
// Push IonJSFrameLayout pointer.
masm.loadBaselineFramePtr(R0.scratchReg(), R0.scratchReg());
masm.push(R0.scratchReg());
// Push stub pointer.
masm.push(BaselineStubReg);
if (!callVM(DoUseCountFallbackInfo, masm))
return false;
// Pop IonOsrTempData pointer.
masm.pop(R0.scratchReg());
leaveStubFrame(masm);
// If no IonCode was found, then skip just exit the IC.
masm.branchPtr(Assembler::Equal, R0.scratchReg(), ImmWord((void*) NULL), &noCompiledCode);
}
// Get a scratch register.
GeneralRegisterSet regs(availableGeneralRegs(0));
Register osrDataReg = R0.scratchReg();
regs.take(osrDataReg);
regs.takeUnchecked(OsrFrameReg);
Register scratchReg = regs.takeAny();
// At this point, stack looks like:
// +-> [...Calling-Frame...]
// | [...Actual-Args/ThisV/ArgCount/Callee...]
// | [Descriptor]
// | [Return-Addr]
// +---[Saved-FramePtr] <-- BaselineFrameReg points here.
// [...Baseline-Frame...]
// Restore the stack pointer to point to the saved frame pointer.
masm.movePtr(BaselineFrameReg, BaselineStackReg);
// Discard saved frame pointer, so that the return address is on top of
// the stack.
masm.pop(scratchReg);
// Jump into Ion.
masm.loadPtr(Address(osrDataReg, offsetof(IonOsrTempData, jitcode)), scratchReg);
masm.loadPtr(Address(osrDataReg, offsetof(IonOsrTempData, stackFrame)), OsrFrameReg);
masm.jump(scratchReg);
// No jitcode available, do nothing.
masm.bind(&noCompiledCode);
EmitReturnFromIC(masm);
return true;
}
//
// ICProfile_Fallback
//
static bool
DoProfilerFallback(JSContext *cx, BaselineFrame *frame, ICProfiler_Fallback *stub)
{
RootedScript script(cx, frame->script());
RootedFunction func(cx, frame->maybeFun());
mozilla::DebugOnly<ICEntry *> icEntry = stub->icEntry();
FallbackICSpew(cx, stub, "Profiler");
SPSProfiler *profiler = &cx->runtime()->spsProfiler;
// Manually enter SPS this time.
JS_ASSERT(profiler->enabled());
if (!cx->runtime()->spsProfiler.enter(cx, script, func))
return false;
frame->setPushedSPSFrame();
// Unlink any existing PushFunction stub (which may hold stale 'const char *' to
// the profile string.
JS_ASSERT_IF(icEntry->firstStub() != stub,
icEntry->firstStub()->isProfiler_PushFunction() &&
icEntry->firstStub()->next() == stub);
stub->unlinkStubsWithKind(cx, ICStub::Profiler_PushFunction);
JS_ASSERT(icEntry->firstStub() == stub);
// Generate the string to use to identify this stack frame.
const char *string = profiler->profileString(cx, script, func);
if (string == NULL)
return false;
IonSpew(IonSpew_BaselineIC, " Generating Profiler_PushFunction stub for %s:%d",
script->filename(), script->lineno);
// Create a new optimized stub.
ICProfiler_PushFunction::Compiler compiler(cx, string, script);
ICStub *optStub = compiler.getStub(compiler.getStubSpace(script));
if (!optStub)
return false;
stub->addNewStub(optStub);
return true;
}
typedef bool (*DoProfilerFallbackFn)(JSContext *, BaselineFrame *frame, ICProfiler_Fallback *);
static const VMFunction DoProfilerFallbackInfo =
FunctionInfo<DoProfilerFallbackFn>(DoProfilerFallback);
bool
ICProfiler_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
EmitRestoreTailCallReg(masm);
masm.push(BaselineStubReg); // Push stub.
masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg()); // Push frame.
return tailCallVM(DoProfilerFallbackInfo, masm);
}
bool
ICProfiler_PushFunction::Compiler::generateStubCode(MacroAssembler &masm)
{
Register scratch = R0.scratchReg();
Register scratch2 = R1.scratchReg();
// Profiling should be enabled if we ever reach here.
#ifdef DEBUG
Label spsEnabled;
uint32_t *enabledAddr = cx->runtime()->spsProfiler.addressOfEnabled();
masm.branch32(Assembler::NotEqual, AbsoluteAddress(enabledAddr), Imm32(0), &spsEnabled);
masm.breakpoint();
masm.bind(&spsEnabled);
#endif
// Push SPS entry.
masm.spsPushFrame(&cx->runtime()->spsProfiler,
Address(BaselineStubReg, ICProfiler_PushFunction::offsetOfStr()),
Address(BaselineStubReg, ICProfiler_PushFunction::offsetOfScript()),
scratch,
scratch2);
// Mark frame as having profiler entry pushed.
Address flagsOffset(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags());
masm.or32(Imm32(BaselineFrame::HAS_PUSHED_SPS_FRAME), flagsOffset);
EmitReturnFromIC(masm);
return true;
}
//
// TypeMonitor_Fallback
//
bool
ICTypeMonitor_Fallback::addMonitorStubForValue(JSContext *cx, HandleScript script, HandleValue val)
{
bool wasDetachedMonitorChain = lastMonitorStubPtrAddr_ == NULL;
JS_ASSERT_IF(wasDetachedMonitorChain, numOptimizedMonitorStubs_ == 0);
if (numOptimizedMonitorStubs_ >= MAX_OPTIMIZED_STUBS) {
// TODO: if the TypeSet becomes unknown or has the AnyObject type,
// replace stubs with a single stub to handle these.
return true;
}
if (val.isPrimitive()) {
JS_ASSERT(!val.isMagic());
JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType();
// Check for existing TypeMonitor stub.
ICTypeMonitor_PrimitiveSet *existingStub = NULL;
for (ICStubConstIterator iter = firstMonitorStub(); !iter.atEnd(); iter++) {
if (iter->isTypeMonitor_PrimitiveSet()) {
existingStub = iter->toTypeMonitor_PrimitiveSet();
if (existingStub->containsType(type))
return true;
}
}
ICTypeMonitor_PrimitiveSet::Compiler compiler(cx, existingStub, type);
ICStub *stub = existingStub ? compiler.updateStub()
: compiler.getStub(compiler.getStubSpace(script));
if (!stub)
return false;
IonSpew(IonSpew_BaselineIC, " %s TypeMonitor stub %p for primitive type %d",
existingStub ? "Modified existing" : "Created new", stub, type);
if (!existingStub) {
JS_ASSERT(!hasStub(TypeMonitor_PrimitiveSet));
addOptimizedMonitorStub(stub);
}
} else if (val.toObject().hasSingletonType()) {
RootedObject obj(cx, &val.toObject());
// Check for existing TypeMonitor stub.
for (ICStubConstIterator iter = firstMonitorStub(); !iter.atEnd(); iter++) {
if (iter->isTypeMonitor_SingleObject() &&
iter->toTypeMonitor_SingleObject()->object() == obj)
{
return true;
}
}
ICTypeMonitor_SingleObject::Compiler compiler(cx, obj);
ICStub *stub = compiler.getStub(compiler.getStubSpace(script));
if (!stub)
return false;
IonSpew(IonSpew_BaselineIC, " Added TypeMonitor stub %p for singleton %p",
stub, obj.get());
addOptimizedMonitorStub(stub);
} else {
RootedTypeObject type(cx, val.toObject().type());
// Check for existing TypeMonitor stub.
for (ICStubConstIterator iter = firstMonitorStub(); !iter.atEnd(); iter++) {
if (iter->isTypeMonitor_TypeObject() &&
iter->toTypeMonitor_TypeObject()->type() == type)
{
return true;
}
}
ICTypeMonitor_TypeObject::Compiler compiler(cx, type);
ICStub *stub = compiler.getStub(compiler.getStubSpace(script));
if (!stub)
return false;
IonSpew(IonSpew_BaselineIC, " Added TypeMonitor stub %p for TypeObject %p",
stub, type.get());
addOptimizedMonitorStub(stub);
}
bool firstMonitorStubAdded = wasDetachedMonitorChain && (numOptimizedMonitorStubs_ > 0);
if (firstMonitorStubAdded) {
// Was an empty monitor chain before, but a new stub was added. This is the
// only time that any main stubs' firstMonitorStub fields need to be updated to
// refer to the newly added monitor stub.
ICStub *firstStub = mainFallbackStub_->icEntry()->firstStub();
for (ICStubConstIterator iter = firstStub; !iter.atEnd(); iter++) {
// Non-monitored stubs are used if the result has always the same type,
// e.g. a StringLength stub will always return int32.
if (!iter->isMonitored())
continue;
// Since we just added the first optimized monitoring stub, any
// existing main stub's |firstMonitorStub| MUST be pointing to the fallback
// monitor stub (i.e. this stub).
JS_ASSERT(iter->toMonitoredStub()->firstMonitorStub() == this);
iter->toMonitoredStub()->updateFirstMonitorStub(firstMonitorStub_);
}
}
return true;
}
static bool
DoTypeMonitorFallback(JSContext *cx, BaselineFrame *frame, ICTypeMonitor_Fallback *stub,
HandleValue value, MutableHandleValue res)
{
RootedScript script(cx, frame->script());
jsbytecode *pc = stub->icEntry()->pc(script);
TypeFallbackICSpew(cx, stub, "TypeMonitor");
uint32_t argument;
if (stub->monitorsThis()) {
JS_ASSERT(pc == script->code);
types::TypeScript::SetThis(cx, script, value);
} else if (stub->monitorsArgument(&argument)) {
JS_ASSERT(pc == script->code);
types::TypeScript::SetArgument(cx, script, argument, value);
} else {
types::TypeScript::Monitor(cx, script, pc, value);
}
if (!stub->addMonitorStubForValue(cx, script, value))
return false;
// Copy input value to res.
res.set(value);
return true;
}
typedef bool (*DoTypeMonitorFallbackFn)(JSContext *, BaselineFrame *, ICTypeMonitor_Fallback *,
HandleValue, MutableHandleValue);
static const VMFunction DoTypeMonitorFallbackInfo =
FunctionInfo<DoTypeMonitorFallbackFn>(DoTypeMonitorFallback);
bool
ICTypeMonitor_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
JS_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
masm.pushValue(R0);
masm.push(BaselineStubReg);
masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
return tailCallVM(DoTypeMonitorFallbackInfo, masm);
}
bool
ICTypeMonitor_PrimitiveSet::Compiler::generateStubCode(MacroAssembler &masm)
{
Label success;
if ((flags_ & TypeToFlag(JSVAL_TYPE_INT32)) && !(flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE)))
masm.branchTestInt32(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE))
masm.branchTestNumber(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_UNDEFINED))
masm.branchTestUndefined(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_BOOLEAN))
masm.branchTestBoolean(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_STRING))
masm.branchTestString(Assembler::Equal, R0, &success);
// Currently, we will never generate primitive stub checks for object. However,
// when we do get to the point where we want to collapse our monitor chains of
// objects and singletons down (when they get too long) to a generic "any object"
// in coordination with the typeset doing the same thing, this will need to
// be re-enabled.
/*
if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT))
masm.branchTestObject(Assembler::Equal, R0, &success);
*/
JS_ASSERT(!(flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)));
if (flags_ & TypeToFlag(JSVAL_TYPE_NULL))
masm.branchTestNull(Assembler::Equal, R0, &success);
EmitStubGuardFailure(masm);
masm.bind(&success);
EmitReturnFromIC(masm);
return true;
}
bool
ICTypeMonitor_SingleObject::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
// Guard on the object's identity.
Register obj = masm.extractObject(R0, ExtractTemp0);
Address expectedObject(BaselineStubReg, ICTypeMonitor_SingleObject::offsetOfObject());
masm.branchPtr(Assembler::NotEqual, expectedObject, obj, &failure);
EmitReturnFromIC(masm);
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICTypeMonitor_TypeObject::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
// Guard on the object's TypeObject.
Register obj = masm.extractObject(R0, ExtractTemp0);
masm.loadPtr(Address(obj, JSObject::offsetOfType()), R1.scratchReg());
Address expectedType(BaselineStubReg, ICTypeMonitor_TypeObject::offsetOfType());
masm.branchPtr(Assembler::NotEqual, expectedType, R1.scratchReg(), &failure);
EmitReturnFromIC(masm);
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICUpdatedStub::addUpdateStubForValue(JSContext *cx, HandleScript script, HandleObject obj,
HandleId id, HandleValue val)
{
if (numOptimizedStubs_ >= MAX_OPTIMIZED_STUBS) {
// TODO: if the TypeSet becomes unknown or has the AnyObject type,
// replace stubs with a single stub to handle these.
return true;
}
if (!obj->getType(cx))
return false;
types::EnsureTrackPropertyTypes(cx, obj, id);
if (val.isPrimitive()) {
JSValueType type = val.isDouble() ? JSVAL_TYPE_DOUBLE : val.extractNonDoubleType();
// Check for existing TypeUpdate stub.
ICTypeUpdate_PrimitiveSet *existingStub = NULL;
for (ICStubConstIterator iter = firstUpdateStub_; !iter.atEnd(); iter++) {
if (iter->isTypeUpdate_PrimitiveSet()) {
existingStub = iter->toTypeUpdate_PrimitiveSet();
if (existingStub->containsType(type))
return true;
}
}
ICTypeUpdate_PrimitiveSet::Compiler compiler(cx, existingStub, type);
ICStub *stub = existingStub ? compiler.updateStub()
: compiler.getStub(compiler.getStubSpace(script));
if (!stub)
return false;
if (!existingStub) {
JS_ASSERT(!hasTypeUpdateStub(TypeUpdate_PrimitiveSet));
addOptimizedUpdateStub(stub);
}
IonSpew(IonSpew_BaselineIC, " %s TypeUpdate stub %p for primitive type %d",
existingStub ? "Modified existing" : "Created new", stub, type);
} else if (val.toObject().hasSingletonType()) {
RootedObject obj(cx, &val.toObject());
// Check for existing TypeUpdate stub.
for (ICStubConstIterator iter = firstUpdateStub_; !iter.atEnd(); iter++) {
if (iter->isTypeUpdate_SingleObject() &&
iter->toTypeUpdate_SingleObject()->object() == obj)
{
return true;
}
}
ICTypeUpdate_SingleObject::Compiler compiler(cx, obj);
ICStub *stub = compiler.getStub(compiler.getStubSpace(script));
if (!stub)
return false;
IonSpew(IonSpew_BaselineIC, " Added TypeUpdate stub %p for singleton %p", stub, obj.get());
addOptimizedUpdateStub(stub);
} else {
RootedTypeObject type(cx, val.toObject().type());
// Check for existing TypeUpdate stub.
for (ICStubConstIterator iter = firstUpdateStub_; !iter.atEnd(); iter++) {
if (iter->isTypeUpdate_TypeObject() &&
iter->toTypeUpdate_TypeObject()->type() == type)
{
return true;
}
}
ICTypeUpdate_TypeObject::Compiler compiler(cx, type);
ICStub *stub = compiler.getStub(compiler.getStubSpace(script));
if (!stub)
return false;
IonSpew(IonSpew_BaselineIC, " Added TypeUpdate stub %p for TypeObject %p",
stub, type.get());
addOptimizedUpdateStub(stub);
}
return true;
}
//
// TypeUpdate_Fallback
//
static bool
DoTypeUpdateFallback(JSContext *cx, BaselineFrame *frame, ICUpdatedStub *stub, HandleValue objval,
HandleValue value)
{
FallbackICSpew(cx, stub->getChainFallback(), "TypeUpdate(%s)",
ICStub::KindString(stub->kind()));
RootedScript script(cx, frame->script());
RootedObject obj(cx, &objval.toObject());
RootedId id(cx);
switch(stub->kind()) {
case ICStub::SetElem_Dense:
case ICStub::SetElem_DenseAdd: {
JS_ASSERT(obj->isNative());
id = JSID_VOID;
types::AddTypePropertyId(cx, obj, id, value);
break;
}
case ICStub::SetProp_Native:
case ICStub::SetProp_NativeAdd: {
JS_ASSERT(obj->isNative());
jsbytecode *pc = stub->getChainFallback()->icEntry()->pc(script);
if (*pc == JSOP_SETALIASEDVAR)
id = NameToId(ScopeCoordinateName(cx, script, pc));
else
id = NameToId(script->getName(pc));
types::AddTypePropertyId(cx, obj, id, value);
break;
}
default:
JS_NOT_REACHED("Invalid stub");
return false;
}
return stub->addUpdateStubForValue(cx, script, obj, id, value);
}
typedef bool (*DoTypeUpdateFallbackFn)(JSContext *, BaselineFrame *, ICUpdatedStub *, HandleValue,
HandleValue);
const VMFunction DoTypeUpdateFallbackInfo =
FunctionInfo<DoTypeUpdateFallbackFn>(DoTypeUpdateFallback);
bool
ICTypeUpdate_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
// Just store false into R1.scratchReg() and return.
masm.move32(Imm32(0), R1.scratchReg());
EmitReturnFromIC(masm);
return true;
}
bool
ICTypeUpdate_PrimitiveSet::Compiler::generateStubCode(MacroAssembler &masm)
{
Label success;
if ((flags_ & TypeToFlag(JSVAL_TYPE_INT32)) && !(flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE)))
masm.branchTestInt32(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_DOUBLE))
masm.branchTestNumber(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_UNDEFINED))
masm.branchTestUndefined(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_BOOLEAN))
masm.branchTestBoolean(Assembler::Equal, R0, &success);
if (flags_ & TypeToFlag(JSVAL_TYPE_STRING))
masm.branchTestString(Assembler::Equal, R0, &success);
// Currently, we will never generate primitive stub checks for object. However,
// when we do get to the point where we want to collapse our monitor chains of
// objects and singletons down (when they get too long) to a generic "any object"
// in coordination with the typeset doing the same thing, this will need to
// be re-enabled.
/*
if (flags_ & TypeToFlag(JSVAL_TYPE_OBJECT))
masm.branchTestObject(Assembler::Equal, R0, &success);
*/
JS_ASSERT(!(flags_ & TypeToFlag(JSVAL_TYPE_OBJECT)));
if (flags_ & TypeToFlag(JSVAL_TYPE_NULL))
masm.branchTestNull(Assembler::Equal, R0, &success);
EmitStubGuardFailure(masm);
// Type matches, load true into R1.scratchReg() and return.
masm.bind(&success);
masm.mov(Imm32(1), R1.scratchReg());
EmitReturnFromIC(masm);
return true;
}
bool
ICTypeUpdate_SingleObject::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
// Guard on the object's identity.
Register obj = masm.extractObject(R0, R1.scratchReg());
Address expectedObject(BaselineStubReg, ICTypeUpdate_SingleObject::offsetOfObject());
masm.branchPtr(Assembler::NotEqual, expectedObject, obj, &failure);
// Identity matches, load true into R1.scratchReg() and return.
masm.mov(Imm32(1), R1.scratchReg());
EmitReturnFromIC(masm);
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICTypeUpdate_TypeObject::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
// Guard on the object's TypeObject.
Register obj = masm.extractObject(R0, R1.scratchReg());
masm.loadPtr(Address(obj, JSObject::offsetOfType()), R1.scratchReg());
Address expectedType(BaselineStubReg, ICTypeUpdate_TypeObject::offsetOfType());
masm.branchPtr(Assembler::NotEqual, expectedType, R1.scratchReg(), &failure);
// Type matches, load true into R1.scratchReg() and return.
masm.mov(Imm32(1), R1.scratchReg());
EmitReturnFromIC(masm);
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// This_Fallback
//
static bool
DoThisFallback(JSContext *cx, ICThis_Fallback *stub, HandleValue thisv, MutableHandleValue ret)
{
FallbackICSpew(cx, stub, "This");
ret.set(thisv);
bool modified;
if (!BoxNonStrictThis(cx, ret, &modified))
return false;
return true;
}
typedef bool (*DoThisFallbackFn)(JSContext *, ICThis_Fallback *, HandleValue, MutableHandleValue);
static const VMFunction DoThisFallbackInfo = FunctionInfo<DoThisFallbackFn>(DoThisFallback);
bool
ICThis_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
JS_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
masm.pushValue(R0);
masm.push(BaselineStubReg);
return tailCallVM(DoThisFallbackInfo, masm);
}
//
// NewArray_Fallback
//
static bool
DoNewArray(JSContext *cx, ICNewArray_Fallback *stub, uint32_t length,
HandleTypeObject type, MutableHandleValue res)
{
FallbackICSpew(cx, stub, "NewArray");
JSObject *obj = NewInitArray(cx, length, type);
if (!obj)
return false;
res.setObject(*obj);
return true;
}
typedef bool(*DoNewArrayFn)(JSContext *, ICNewArray_Fallback *, uint32_t, HandleTypeObject,
MutableHandleValue);
static const VMFunction DoNewArrayInfo = FunctionInfo<DoNewArrayFn>(DoNewArray);
bool
ICNewArray_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
EmitRestoreTailCallReg(masm);
masm.push(R1.scratchReg()); // type
masm.push(R0.scratchReg()); // length
masm.push(BaselineStubReg); // stub.
return tailCallVM(DoNewArrayInfo, masm);
}
//
// NewObject_Fallback
//
static bool
DoNewObject(JSContext *cx, ICNewObject_Fallback *stub, HandleObject templateObject,
MutableHandleValue res)
{
FallbackICSpew(cx, stub, "NewObject");
JSObject *obj = NewInitObject(cx, templateObject);
if (!obj)
return false;
res.setObject(*obj);
return true;
}
typedef bool(*DoNewObjectFn)(JSContext *, ICNewObject_Fallback *, HandleObject,
MutableHandleValue);
static const VMFunction DoNewObjectInfo = FunctionInfo<DoNewObjectFn>(DoNewObject);
bool
ICNewObject_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
EmitRestoreTailCallReg(masm);
masm.push(R0.scratchReg()); // template
masm.push(BaselineStubReg); // stub.
return tailCallVM(DoNewObjectInfo, masm);
}
//
// Compare_Fallback
//
static bool
DoCompareFallback(JSContext *cx, BaselineFrame *frame, ICCompare_Fallback *stub, HandleValue lhs,
HandleValue rhs, MutableHandleValue ret)
{
jsbytecode *pc = stub->icEntry()->pc(frame->script());
JSOp op = JSOp(*pc);
FallbackICSpew(cx, stub, "Compare(%s)", js_CodeName[op]);
// Case operations in a CONDSWITCH are performing strict equality.
if (op == JSOP_CASE)
op = JSOP_STRICTEQ;
// Don't pass lhs/rhs directly, we need the original values when
// generating stubs.
RootedValue lhsCopy(cx, lhs);
RootedValue rhsCopy(cx, rhs);
// Perform the compare operation.
JSBool out;
switch(op) {
case JSOP_LT:
if (!LessThan(cx, &lhsCopy, &rhsCopy, &out))
return false;
break;
case JSOP_LE:
if (!LessThanOrEqual(cx, &lhsCopy, &rhsCopy, &out))
return false;
break;
case JSOP_GT:
if (!GreaterThan(cx, &lhsCopy, &rhsCopy, &out))
return false;
break;
case JSOP_GE:
if (!GreaterThanOrEqual(cx, &lhsCopy, &rhsCopy, &out))
return false;
break;
case JSOP_EQ:
if (!LooselyEqual<true>(cx, &lhsCopy, &rhsCopy, &out))
return false;
break;
case JSOP_NE:
if (!LooselyEqual<false>(cx, &lhsCopy, &rhsCopy, &out))
return false;
break;
case JSOP_STRICTEQ:
if (!StrictlyEqual<true>(cx, &lhsCopy, &rhsCopy, &out))
return false;
break;
case JSOP_STRICTNE:
if (!StrictlyEqual<false>(cx, &lhsCopy, &rhsCopy, &out))
return false;
break;
default:
JS_ASSERT(!"Unhandled baseline compare op");
return false;
}
ret.setBoolean(out);
// Check to see if a new stub should be generated.
if (stub->numOptimizedStubs() >= ICCompare_Fallback::MAX_OPTIMIZED_STUBS) {
// TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
// But for now we just bail.
return true;
}
JSScript *script = frame->script();
// Try to generate new stubs.
if (lhs.isInt32() && rhs.isInt32()) {
IonSpew(IonSpew_BaselineIC, " Generating %s(Int32, Int32) stub", js_CodeName[op]);
ICCompare_Int32::Compiler compiler(cx, op);
ICStub *int32Stub = compiler.getStub(compiler.getStubSpace(script));
if (!int32Stub)
return false;
stub->addNewStub(int32Stub);
return true;
}
if (!cx->runtime()->jitSupportsFloatingPoint && (lhs.isNumber() || rhs.isNumber()))
return true;
if (lhs.isNumber() && rhs.isNumber()) {
IonSpew(IonSpew_BaselineIC, " Generating %s(Number, Number) stub", js_CodeName[op]);
// Unlink int32 stubs, it's faster to always use the double stub.
stub->unlinkStubsWithKind(cx, ICStub::Compare_Int32);
ICCompare_Double::Compiler compiler(cx, op);
ICStub *doubleStub = compiler.getStub(compiler.getStubSpace(script));
if (!doubleStub)
return false;
stub->addNewStub(doubleStub);
return true;
}
if ((lhs.isNumber() && rhs.isUndefined()) ||
(lhs.isUndefined() && rhs.isNumber()))
{
IonSpew(IonSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
rhs.isUndefined() ? "Number" : "Undefined",
rhs.isUndefined() ? "Undefined" : "Number");
ICCompare_NumberWithUndefined::Compiler compiler(cx, op, lhs.isUndefined());
ICStub *doubleStub = compiler.getStub(compiler.getStubSpace(script));
if (!stub)
return false;
stub->addNewStub(doubleStub);
return true;
}
if (lhs.isBoolean() && rhs.isBoolean()) {
IonSpew(IonSpew_BaselineIC, " Generating %s(Boolean, Boolean) stub", js_CodeName[op]);
ICCompare_Boolean::Compiler compiler(cx, op);
ICStub *booleanStub = compiler.getStub(compiler.getStubSpace(script));
if (!booleanStub)
return false;
stub->addNewStub(booleanStub);
return true;
}
if ((lhs.isBoolean() && rhs.isInt32()) || (lhs.isInt32() && rhs.isBoolean())) {
IonSpew(IonSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
rhs.isInt32() ? "Boolean" : "Int32",
rhs.isInt32() ? "Int32" : "Boolean");
ICCompare_Int32WithBoolean::Compiler compiler(cx, op, lhs.isInt32());
ICStub *optStub = compiler.getStub(compiler.getStubSpace(script));
if (!optStub)
return false;
stub->addNewStub(optStub);
return true;
}
if (IsEqualityOp(op)) {
if (lhs.isString() && rhs.isString() && !stub->hasStub(ICStub::Compare_String)) {
IonSpew(IonSpew_BaselineIC, " Generating %s(String, String) stub", js_CodeName[op]);
ICCompare_String::Compiler compiler(cx, op);
ICStub *stringStub = compiler.getStub(compiler.getStubSpace(script));
if (!stringStub)
return false;
stub->addNewStub(stringStub);
return true;
}
if (lhs.isObject() && rhs.isObject()) {
JS_ASSERT(!stub->hasStub(ICStub::Compare_Object));
IonSpew(IonSpew_BaselineIC, " Generating %s(Object, Object) stub", js_CodeName[op]);
ICCompare_Object::Compiler compiler(cx, op);
ICStub *objectStub = compiler.getStub(compiler.getStubSpace(script));
if (!objectStub)
return false;
stub->addNewStub(objectStub);
return true;
}
if ((lhs.isObject() || lhs.isNull() || lhs.isUndefined()) &&
(rhs.isObject() || rhs.isNull() || rhs.isUndefined()) &&
!stub->hasStub(ICStub::Compare_ObjectWithUndefined))
{
IonSpew(IonSpew_BaselineIC, " Generating %s(Obj/Null/Undef, Obj/Null/Undef) stub",
js_CodeName[op]);
bool lhsIsUndefined = lhs.isNull() || lhs.isUndefined();
bool compareWithNull = lhs.isNull() || rhs.isNull();
ICCompare_ObjectWithUndefined::Compiler compiler(cx, op,
lhsIsUndefined, compareWithNull);
ICStub *objectStub = compiler.getStub(compiler.getStubSpace(script));
if (!objectStub)
return false;
stub->addNewStub(objectStub);
return true;
}
}
return true;
}
typedef bool (*DoCompareFallbackFn)(JSContext *, BaselineFrame *, ICCompare_Fallback *,
HandleValue, HandleValue, MutableHandleValue);
static const VMFunction DoCompareFallbackInfo =
FunctionInfo<DoCompareFallbackFn>(DoCompareFallback, PopValues(2));
bool
ICCompare_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
JS_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Ensure stack is fully synced for the expression decompiler.
masm.pushValue(R0);
masm.pushValue(R1);
// Push arguments.
masm.pushValue(R1);
masm.pushValue(R0);
masm.push(BaselineStubReg);
masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
return tailCallVM(DoCompareFallbackInfo, masm);
}
//
// Compare_String
//
bool
ICCompare_String::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.branchTestString(Assembler::NotEqual, R0, &failure);
masm.branchTestString(Assembler::NotEqual, R1, &failure);
JS_ASSERT(IsEqualityOp(op));
Register left = masm.extractString(R0, ExtractTemp0);
Register right = masm.extractString(R1, ExtractTemp1);
GeneralRegisterSet regs(availableGeneralRegs(2));
Register scratchReg = regs.takeAny();
// x86 doesn't have the luxury of a second scratch.
Register scratchReg2;
if (regs.empty()) {
scratchReg2 = BaselineStubReg;
masm.push(BaselineStubReg);
} else {
scratchReg2 = regs.takeAny();
}
JS_ASSERT(scratchReg2 != scratchReg);
Label inlineCompareFailed;
masm.compareStrings(op, left, right, scratchReg2, scratchReg, &inlineCompareFailed);
masm.tagValue(JSVAL_TYPE_BOOLEAN, scratchReg2, R0);
if (scratchReg2 == BaselineStubReg)
masm.pop(BaselineStubReg);
EmitReturnFromIC(masm);
masm.bind(&inlineCompareFailed);
if (scratchReg2 == BaselineStubReg)
masm.pop(BaselineStubReg);
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// Compare_Boolean
//
bool
ICCompare_Boolean::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.branchTestBoolean(Assembler::NotEqual, R0, &failure);
masm.branchTestBoolean(Assembler::NotEqual, R1, &failure);
Register left = masm.extractInt32(R0, ExtractTemp0);
Register right = masm.extractInt32(R1, ExtractTemp1);
// Compare payload regs of R0 and R1.
Assembler::Condition cond = JSOpToCondition(op, /* signed = */true);
#if defined(JS_CPU_MIPS)
masm.cmp32Set(cond, left, right, left);
#else
masm.cmp32(left, right);
masm.emitSet(cond, left);
#endif
// Box the result and return
masm.tagValue(JSVAL_TYPE_BOOLEAN, left, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// Compare_NumberWithUndefined
//
bool
ICCompare_NumberWithUndefined::Compiler::generateStubCode(MacroAssembler &masm)
{
ValueOperand numberOperand, undefinedOperand;
if (lhsIsUndefined) {
numberOperand = R1;
undefinedOperand = R0;
} else {
numberOperand = R0;
undefinedOperand = R1;
}
Label failure;
masm.branchTestNumber(Assembler::NotEqual, numberOperand, &failure);
masm.branchTestUndefined(Assembler::NotEqual, undefinedOperand, &failure);
// Comparing a number with undefined will always be true for NE/STRICTNE,
// and always be false for other compare ops.
masm.moveValue(BooleanValue(op == JSOP_NE || op == JSOP_STRICTNE), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// Compare_Object
//
bool
ICCompare_Object::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
masm.branchTestObject(Assembler::NotEqual, R1, &failure);
JS_ASSERT(IsEqualityOp(op));
Register left = masm.extractObject(R0, ExtractTemp0);
Register right = masm.extractObject(R1, ExtractTemp1);
Label ifTrue;
masm.branchPtr(JSOpToCondition(op, /* signed = */true), left, right, &ifTrue);
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
masm.bind(&ifTrue);
masm.moveValue(BooleanValue(true), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// Compare_ObjectWithUndefined
//
bool
ICCompare_ObjectWithUndefined::Compiler::generateStubCode(MacroAssembler &masm)
{
JS_ASSERT(IsEqualityOp(op));
ValueOperand objectOperand, undefinedOperand;
if (lhsIsUndefined) {
objectOperand = R1;
undefinedOperand = R0;
} else {
objectOperand = R0;
undefinedOperand = R1;
}
Label failure;
if (compareWithNull)
masm.branchTestNull(Assembler::NotEqual, undefinedOperand, &failure);
else
masm.branchTestUndefined(Assembler::NotEqual, undefinedOperand, &failure);
Label notObject;
masm.branchTestObject(Assembler::NotEqual, objectOperand, &notObject);
if (op == JSOP_STRICTEQ || op == JSOP_STRICTNE) {
// obj !== undefined for all objects.
masm.moveValue(BooleanValue(op == JSOP_STRICTNE), R0);
EmitReturnFromIC(masm);
} else {
// obj != undefined only where !obj->getClass()->emulatesUndefined()
Label emulatesUndefined;
Register obj = masm.extractObject(objectOperand, ExtractTemp0);
masm.loadPtr(Address(obj, JSObject::offsetOfType()), obj);
masm.loadPtr(Address(obj, offsetof(types::TypeObject, clasp)), obj);
masm.branchTest32(Assembler::NonZero,
Address(obj, Class::offsetOfFlags()),
Imm32(JSCLASS_EMULATES_UNDEFINED),
&emulatesUndefined);
masm.moveValue(BooleanValue(op == JSOP_NE), R0);
EmitReturnFromIC(masm);
masm.bind(&emulatesUndefined);
masm.moveValue(BooleanValue(op == JSOP_EQ), R0);
EmitReturnFromIC(masm);
}
masm.bind(&notObject);
// Also support null == null or undefined == undefined comparisons.
if (compareWithNull)
masm.branchTestNull(Assembler::NotEqual, objectOperand, &failure);
else
masm.branchTestUndefined(Assembler::NotEqual, objectOperand, &failure);
masm.moveValue(BooleanValue(op == JSOP_STRICTEQ || op == JSOP_EQ), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// Compare_Int32WithBoolean
//
bool
ICCompare_Int32WithBoolean::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
ValueOperand int32Val;
ValueOperand boolVal;
if (lhsIsInt32_) {
int32Val = R0;
boolVal = R1;
} else {
boolVal = R0;
int32Val = R1;
}
masm.branchTestBoolean(Assembler::NotEqual, boolVal, &failure);
masm.branchTestInt32(Assembler::NotEqual, int32Val, &failure);
if (op_ == JSOP_STRICTEQ || op_ == JSOP_STRICTNE) {
// Ints and booleans are never strictly equal, always strictly not equal.
masm.moveValue(BooleanValue(op_ == JSOP_STRICTNE), R0);
EmitReturnFromIC(masm);
} else {
Register boolReg = masm.extractBoolean(boolVal, ExtractTemp0);
Register int32Reg = masm.extractInt32(int32Val, ExtractTemp1);
// Compare payload regs of R0 and R1.
Assembler::Condition cond = JSOpToCondition(op_, /* signed = */true);
#if defined(JS_CPU_MIPS)
masm.cmp32Set(
cond,
lhsIsInt32_ ? int32Reg : boolReg,
lhsIsInt32_ ? boolReg : int32Reg,
R0.scratchReg()
);
#else
masm.cmp32(lhsIsInt32_ ? int32Reg : boolReg,
lhsIsInt32_ ? boolReg : int32Reg);
masm.emitSet(cond, R0.scratchReg());
#endif
// Box the result and return
masm.tagValue(JSVAL_TYPE_BOOLEAN, R0.scratchReg(), R0);
EmitReturnFromIC(masm);
}
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToBool_Fallback
//
static bool
DoToBoolFallback(JSContext *cx, BaselineFrame *frame, ICToBool_Fallback *stub, HandleValue arg,
MutableHandleValue ret)
{
FallbackICSpew(cx, stub, "ToBool");
bool cond = ToBoolean(arg);
ret.setBoolean(cond);
// Check to see if a new stub should be generated.
if (stub->numOptimizedStubs() >= ICToBool_Fallback::MAX_OPTIMIZED_STUBS) {
// TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
// But for now we just bail.
return true;
}
JS_ASSERT(!arg.isBoolean());
JSScript *script = frame->script();
// Try to generate new stubs.
if (arg.isInt32()) {
IonSpew(IonSpew_BaselineIC, " Generating ToBool(Int32) stub.");
ICToBool_Int32::Compiler compiler(cx);
ICStub *int32Stub = compiler.getStub(compiler.getStubSpace(script));
if (!int32Stub)
return false;
stub->addNewStub(int32Stub);
return true;
}
if (arg.isDouble() && cx->runtime()->jitSupportsFloatingPoint) {
IonSpew(IonSpew_BaselineIC, " Generating ToBool(Double) stub.");
ICToBool_Double::Compiler compiler(cx);
ICStub *doubleStub = compiler.getStub(compiler.getStubSpace(script));
if (!doubleStub)
return false;
stub->addNewStub(doubleStub);
return true;
}
if (arg.isString()) {
IonSpew(IonSpew_BaselineIC, " Generating ToBool(String) stub");
ICToBool_String::Compiler compiler(cx);
ICStub *stringStub = compiler.getStub(compiler.getStubSpace(script));
if (!stringStub)
return false;
stub->addNewStub(stringStub);
return true;
}
if (arg.isNull() || arg.isUndefined()) {
ICToBool_NullUndefined::Compiler compiler(cx);
ICStub *nilStub = compiler.getStub(compiler.getStubSpace(script));
if (!nilStub)
return false;
stub->addNewStub(nilStub);
return true;
}
if (arg.isObject()) {
IonSpew(IonSpew_BaselineIC, " Generating ToBool(Object) stub.");
ICToBool_Object::Compiler compiler(cx);
ICStub *objStub = compiler.getStub(compiler.getStubSpace(script));
if (!objStub)
return false;
stub->addNewStub(objStub);
return true;
}
return true;
}
typedef bool (*pf)(JSContext *, BaselineFrame *, ICToBool_Fallback *, HandleValue,
MutableHandleValue);
static const VMFunction fun = FunctionInfo<pf>(DoToBoolFallback);
bool
ICToBool_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
JS_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Push arguments.
masm.pushValue(R0);
masm.push(BaselineStubReg);
masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
return tailCallVM(fun, masm);
}
//
// ToBool_Int32
//
bool
ICToBool_Int32::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
Label ifFalse;
#if defined(JS_CPU_MIPS)
masm.branchTestInt32Truthy(false, R0, &ifFalse);
#else
Assembler::Condition cond = masm.testInt32Truthy(false, R0);
masm.j(cond, &ifFalse);
#endif
masm.moveValue(BooleanValue(true), R0);
EmitReturnFromIC(masm);
masm.bind(&ifFalse);
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToBool_String
//
bool
ICToBool_String::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.branchTestString(Assembler::NotEqual, R0, &failure);
Label ifFalse;
#if defined(JS_CPU_MIPS)
masm.branchTestStringTruthy(false, R0, &ifFalse);
#else
Assembler::Condition cond = masm.testStringTruthy(false, R0);
masm.j(cond, &ifFalse);
#endif
masm.moveValue(BooleanValue(true), R0);
EmitReturnFromIC(masm);
masm.bind(&ifFalse);
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToBool_NullUndefined
//
bool
ICToBool_NullUndefined::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure, ifFalse;
masm.branchTestNull(Assembler::Equal, R0, &ifFalse);
masm.branchTestUndefined(Assembler::NotEqual, R0, &failure);
masm.bind(&ifFalse);
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToBool_Double
//
bool
ICToBool_Double::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure, ifTrue;
masm.branchTestDouble(Assembler::NotEqual, R0, &failure);
masm.unboxDouble(R0, FloatReg0);
#if defined(JS_CPU_MIPS)
masm.branchTestDoubleTruthy(true, FloatReg0, &ifTrue);
#else
Assembler::Condition cond = masm.testDoubleTruthy(true, FloatReg0);
masm.j(cond, &ifTrue);
#endif
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
masm.bind(&ifTrue);
masm.moveValue(BooleanValue(true), R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToBool_Object
//
bool
ICToBool_Object::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure, ifFalse, slowPath;
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
Register objReg = masm.extractObject(R0, ExtractTemp0);
Register scratch = R1.scratchReg();
#if defined(JS_CPU_MIPS)
masm.branchTestObjectTruthy(false, objReg, scratch, &slowPath, &ifFalse);
#else
Assembler::Condition cond = masm.branchTestObjectTruthy(false, objReg, scratch, &slowPath);
masm.j(cond, &ifFalse);
#endif
// If object doesn't emulate undefined, it evaulates to true.
masm.moveValue(BooleanValue(true), R0);
EmitReturnFromIC(masm);
masm.bind(&ifFalse);
masm.moveValue(BooleanValue(false), R0);
EmitReturnFromIC(masm);
masm.bind(&slowPath);
masm.setupUnalignedABICall(1, scratch);
masm.passABIArg(objReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ObjectEmulatesUndefined));
#if defined(JS_CPU_MIPS)
masm.convertBoolToInt32(ReturnReg, ReturnReg);
#endif
masm.xor32(Imm32(1), ReturnReg);
masm.tagValue(JSVAL_TYPE_BOOLEAN, ReturnReg, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
//
// ToNumber_Fallback
//
static bool
DoToNumberFallback(JSContext *cx, ICToNumber_Fallback *stub, HandleValue arg, MutableHandleValue ret)
{
FallbackICSpew(cx, stub, "ToNumber");
ret.set(arg);
return ToNumber(cx, ret.address());
}
typedef bool (*DoToNumberFallbackFn)(JSContext *, ICToNumber_Fallback *, HandleValue, MutableHandleValue);
static const VMFunction DoToNumberFallbackInfo =
FunctionInfo<DoToNumberFallbackFn>(DoToNumberFallback, PopValues(1));
bool
ICToNumber_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
JS_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Ensure stack is fully synced for the expression decompiler.
masm.pushValue(R0);
// Push arguments.
masm.pushValue(R0);
masm.push(BaselineStubReg);
return tailCallVM(DoToNumberFallbackInfo, masm);
}
//
// BinaryArith_Fallback
//
// Disable PGO (see bug 851490).
#if defined(_MSC_VER)
# pragma optimize("g", off)
#endif
static bool
DoBinaryArithFallback(JSContext *cx, BaselineFrame *frame, ICBinaryArith_Fallback *stub,
HandleValue lhs, HandleValue rhs, MutableHandleValue ret)
{
RootedScript script(cx, frame->script());
jsbytecode *pc = stub->icEntry()->pc(script);
JSOp op = JSOp(*pc);
FallbackICSpew(cx, stub, "BinaryArith(%s,%d,%d)", js_CodeName[op],
int(lhs.isDouble() ? JSVAL_TYPE_DOUBLE : lhs.extractNonDoubleType()),
int(rhs.isDouble() ? JSVAL_TYPE_DOUBLE : rhs.extractNonDoubleType()));
// Don't pass lhs/rhs directly, we need the original values when
// generating stubs.
RootedValue lhsCopy(cx, lhs);
RootedValue rhsCopy(cx, rhs);
// Perform the compare operation.
switch(op) {
case JSOP_ADD:
// Do an add.
if (!AddValues(cx, script, pc, &lhsCopy, &rhsCopy, ret.address()))
return false;
break;
case JSOP_SUB:
if (!SubValues(cx, script, pc, &lhsCopy, &rhsCopy, ret.address()))
return false;
break;
case JSOP_MUL:
if (!MulValues(cx, script, pc, &lhsCopy, &rhsCopy, ret.address()))
return false;
break;
case JSOP_DIV:
if (!DivValues(cx, script, pc, &lhsCopy, &rhsCopy, ret.address()))
return false;
break;
case JSOP_MOD:
if (!ModValues(cx, script, pc, &lhsCopy, &rhsCopy, ret.address()))
return false;
break;
case JSOP_BITOR: {
int32_t result;
if (!BitOr(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_BITXOR: {
int32_t result;
if (!BitXor(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_BITAND: {
int32_t result;
if (!BitAnd(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_LSH: {
int32_t result;
if (!BitLsh(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_RSH: {
int32_t result;
if (!BitRsh(cx, lhs, rhs, &result))
return false;
ret.setInt32(result);
break;
}
case JSOP_URSH: {
if (!UrshOperation(cx, script, pc, lhs, rhs, ret.address()))
return false;
break;
}
default:
JS_NOT_REACHED("Unhandled baseline arith op");
return false;
}
if (ret.isDouble())
stub->setSawDoubleResult();
// Check to see if a new stub should be generated.
if (stub->numOptimizedStubs() >= ICBinaryArith_Fallback::MAX_OPTIMIZED_STUBS) {
// TODO: Discard all stubs in this IC and replace with inert megamorphic stub.
// But for now we just bail.
return true;
}
// Handle string concat.
if (op == JSOP_ADD) {
if (lhs.isString() && rhs.isString()) {
IonSpew(IonSpew_BaselineIC, " Generating %s(String, String) stub", js_CodeName[op]);
JS_ASSERT(ret.isString());
ICBinaryArith_StringConcat::Compiler compiler(cx);
ICStub *strcatStub = compiler.getStub(compiler.getStubSpace(script));
if (!strcatStub)
return false;
stub->addNewStub(strcatStub);
return true;
}
if ((lhs.isString() && rhs.isObject()) || (lhs.isObject() && rhs.isString())) {
IonSpew(IonSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
lhs.isString() ? "String" : "Object",
lhs.isString() ? "Object" : "String");
JS_ASSERT(ret.isString());
ICBinaryArith_StringObjectConcat::Compiler compiler(cx, lhs.isString());
ICStub *strcatStub = compiler.getStub(compiler.getStubSpace(script));
if (!strcatStub)
return false;
stub->addNewStub(strcatStub);
return true;
}
}
if (((lhs.isBoolean() && (rhs.isBoolean() || rhs.isInt32())) ||
(rhs.isBoolean() && (lhs.isBoolean() || lhs.isInt32()))) &&
(op == JSOP_ADD || op == JSOP_SUB || op == JSOP_BITOR || op == JSOP_BITAND ||
op == JSOP_BITXOR))
{
IonSpew(IonSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
lhs.isBoolean() ? "Boolean" : "Int32", rhs.isBoolean() ? "Boolean" : "Int32");
ICBinaryArith_BooleanWithInt32::Compiler compiler(cx, op, lhs.isBoolean(), rhs.isBoolean());
ICStub *arithStub = compiler.getStub(compiler.getStubSpace(script));
if (!arithStub)
return false;
stub->addNewStub(arithStub);
return true;
}
// Handle only int32 or double.
if (!lhs.isNumber() || !rhs.isNumber())
return true;
JS_ASSERT(ret.isNumber());
if (lhs.isDouble() || rhs.isDouble() || ret.isDouble()) {
if (!cx->runtime()->jitSupportsFloatingPoint)
return true;
switch (op) {
case JSOP_ADD:
case JSOP_SUB:
case JSOP_MUL:
case JSOP_DIV:
case JSOP_MOD: {
// Unlink int32 stubs, it's faster to always use the double stub.
stub->unlinkStubsWithKind(cx, ICStub::BinaryArith_Int32);
IonSpew(IonSpew_BaselineIC, " Generating %s(Double, Double) stub", js_CodeName[op]);
ICBinaryArith_Double::Compiler compiler(cx, op);
ICStub *doubleStub = compiler.getStub(compiler.getStubSpace(script));
if (!doubleStub)
return false;
stub->addNewStub(doubleStub);
return true;
}
default:
break;
}
}
if (lhs.isInt32() && rhs.isInt32()) {
bool allowDouble = ret.isDouble();
if (allowDouble)
stub->unlinkStubsWithKind(cx, ICStub::BinaryArith_Int32);
IonSpew(IonSpew_BaselineIC, " Generating %s(Int32, Int32%s) stub", js_CodeName[op],
allowDouble ? " => Double" : "");
ICBinaryArith_Int32::Compiler compilerInt32(cx, op, allowDouble);
ICStub *int32Stub = compilerInt32.getStub(compilerInt32.getStubSpace(script));
if (!int32Stub)
return false;
stub->addNewStub(int32Stub);
return true;
}
// Handle Double <BITOP> Int32 or Int32 <BITOP> Double case.
if (((lhs.isDouble() && rhs.isInt32()) || (lhs.isInt32() && rhs.isDouble())) &&
ret.isInt32())
{
switch(op) {
case JSOP_BITOR:
case JSOP_BITXOR:
case JSOP_BITAND: {
IonSpew(IonSpew_BaselineIC, " Generating %s(%s, %s) stub", js_CodeName[op],
lhs.isDouble() ? "Double" : "Int32",
lhs.isDouble() ? "Int32" : "Double");
ICBinaryArith_DoubleWithInt32::Compiler compiler(cx, op, lhs.isDouble());
ICStub *optStub = compiler.getStub(compiler.getStubSpace(script));
if (!optStub)
return false;
stub->addNewStub(optStub);
return true;
}
default:
break;
}
}
return true;
}
#if defined(_MSC_VER)
# pragma optimize("", on)
#endif
typedef bool (*DoBinaryArithFallbackFn)(JSContext *, BaselineFrame *, ICBinaryArith_Fallback *,
HandleValue, HandleValue, MutableHandleValue);
static const VMFunction DoBinaryArithFallbackInfo =
FunctionInfo<DoBinaryArithFallbackFn>(DoBinaryArithFallback, PopValues(2));
bool
ICBinaryArith_Fallback::Compiler::generateStubCode(MacroAssembler &masm)
{
JS_ASSERT(R0 == JSReturnOperand);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Ensure stack is fully synced for the expression decompiler.
masm.pushValue(R0);
masm.pushValue(R1);
// Push arguments.
masm.pushValue(R1);
masm.pushValue(R0);
masm.push(BaselineStubReg);
masm.pushBaselineFramePtr(BaselineFrameReg, R0.scratchReg());
return tailCallVM(DoBinaryArithFallbackInfo, masm);
}
static bool
DoConcatStrings(JSContext *cx, HandleValue lhs, HandleValue rhs, MutableHandleValue res)
{
JS_ASSERT(lhs.isString());
JS_ASSERT(rhs.isString());
JSString *lstr = lhs.toString();
JSString *rstr = rhs.toString();
JSString *result = ConcatStrings<NoGC>(cx, lstr, rstr);
if (result) {
res.set(StringValue(result));
return true;
}
RootedString rootedl(cx, lstr), rootedr(cx, rstr);
result = ConcatStrings<CanGC>(cx, rootedl, rootedr);
if (!result)
return false;
res.set(StringValue(result));
return true;
}
typedef bool (*DoConcatStringsFn)(JSContext *, HandleValue, HandleValue, MutableHandleValue);
static const VMFunction DoConcatStringsInfo = FunctionInfo<DoConcatStringsFn>(DoConcatStrings);
bool
ICBinaryArith_StringConcat::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.branchTestString(Assembler::NotEqual, R0, &failure);
masm.branchTestString(Assembler::NotEqual, R1, &failure);
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
masm.pushValue(R1);
masm.pushValue(R0);
if (!tailCallVM(DoConcatStringsInfo, masm))
return false;
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
static JSString *
ConvertObjectToStringForConcat(JSContext *cx, HandleValue obj)
{
JS_ASSERT(obj.isObject());
RootedValue rootedObj(cx, obj);
if (!ToPrimitive(cx, &rootedObj))
return NULL;
return ToString<CanGC>(cx, rootedObj);
}
static bool
DoConcatStringObject(JSContext *cx, bool lhsIsString, HandleValue lhs, HandleValue rhs,
MutableHandleValue res)
{
JSString *lstr = NULL;
JSString *rstr = NULL;
if (lhsIsString) {
// Convert rhs first.
JS_ASSERT(lhs.isString() && rhs.isObject());
rstr = ConvertObjectToStringForConcat(cx, rhs);
if (!rstr)
return false;
// lhs is already string.
lstr = lhs.toString();
} else {
JS_ASSERT(rhs.isString() && lhs.isObject());
// Convert lhs first.
lstr = ConvertObjectToStringForConcat(cx, lhs);
if (!lstr)
return false;
// rhs is already string.
rstr = rhs.toString();
}
JSString *str = ConcatStrings<NoGC>(cx, lstr, rstr);
if (!str) {
RootedString nlstr(cx, lstr), nrstr(cx, rstr);
str = ConcatStrings<CanGC>(cx, nlstr, nrstr);
if (!str)
return false;
}
// Technically, we need to call TypeScript::MonitorString for this PC, however
// it was called when this stub was attached so it's OK.
res.setString(str);
return true;
}
typedef bool (*DoConcatStringObjectFn)(JSContext *, bool lhsIsString, HandleValue, HandleValue,
MutableHandleValue);
static const VMFunction DoConcatStringObjectInfo =
FunctionInfo<DoConcatStringObjectFn>(DoConcatStringObject, PopValues(2));
bool
ICBinaryArith_StringObjectConcat::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
if (lhsIsString_) {
masm.branchTestString(Assembler::NotEqual, R0, &failure);
masm.branchTestObject(Assembler::NotEqual, R1, &failure);
} else {
masm.branchTestObject(Assembler::NotEqual, R0, &failure);
masm.branchTestString(Assembler::NotEqual, R1, &failure);
}
// Restore the tail call register.
EmitRestoreTailCallReg(masm);
// Sync for the decompiler.
masm.pushValue(R0);
masm.pushValue(R1);
// Push arguments.
masm.pushValue(R1);
masm.pushValue(R0);
masm.push(Imm32(lhsIsString_));
if (!tailCallVM(DoConcatStringObjectInfo, masm))
return false;
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICBinaryArith_Double::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
masm.ensureDouble(R0, FloatReg0, &failure);
masm.ensureDouble(R1, FloatReg1, &failure);
switch (op) {
case JSOP_ADD:
masm.addDouble(FloatReg1, FloatReg0);
break;
case JSOP_SUB:
masm.subDouble(FloatReg1, FloatReg0);
break;
case JSOP_MUL:
masm.mulDouble(FloatReg1, FloatReg0);
break;
case JSOP_DIV:
masm.divDouble(FloatReg1, FloatReg0);
break;
case JSOP_MOD:
masm.setupUnalignedABICall(2, R0.scratchReg());
masm.passABIArg(FloatReg0);
masm.passABIArg(FloatReg1);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, NumberMod), MacroAssembler::DOUBLE);
JS_ASSERT(ReturnFloatReg == FloatReg0);
break;
default:
JS_NOT_REACHED("Unexpected op");
return false;
}
masm.boxDouble(FloatReg0, R0);
EmitReturnFromIC(masm);
// Failure case - jump to next stub
masm.bind(&failure);
EmitStubGuardFailure(masm);
return true;
}
bool
ICBinaryArith_BooleanWithInt32::Compiler::generateStubCode(MacroAssembler &masm)
{
Label failure;
if (lhsIsBool_)
masm.branchTestBoolean(Assembler::NotEqual, R0, &failure);
else
masm.branchTestInt32(Assembler::NotEqual, R0, &failure);
if (rhsIsBool_)
masm.branchTestBoolean(Assembler::NotEqual, R1, &failure);
else
masm.branchTestInt32(Assembler::NotEqual, R1, &failure);
Register lhsReg = lhsIsBool_ ? masm.extractBoolean(R0, ExtractTemp0)
: masm.extractInt32(R0, ExtractTemp0);
Register rhsReg = rhsIsBool_ ? masm.extractBoolean(R1, ExtractTemp1)
: masm.extractInt32(R1, ExtractTemp1);
JS_ASSERT(op_ == JSOP_ADD || op_ == JSOP_SUB ||
op_ == JSOP_BITOR || op_ == JSOP_BITXOR || op_ == JSOP_BITAND);
switch(op_) {
case JSOP_ADD: {
Label fixOverflow;
#if defined(JS_CPU_MIPS)
masm.branchAdd32(Assembler::Overflow, rhsReg, lhsReg, &fixOverflow);
masm.tagValue(JSVAL_TYPE_INT32, lhsReg, R0);
EmitReturnFromIC(masm);
masm.bind