blob: 69a5fa6856e57256e70a18721c96ab0182313b32 [file] [log] [blame]
/*
* Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include "DFGSpeculativeJIT.h"
#if ENABLE(DFG_JIT)
#include "Arguments.h"
#include "DFGCallArrayAllocatorSlowPathGenerator.h"
#include "DFGSlowPathGenerator.h"
#include "LinkBuffer.h"
namespace JSC { namespace DFG {
SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
: m_compileOkay(true)
, m_jit(jit)
, m_compileIndex(0)
, m_indexInBlock(0)
, m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
, m_blockHeads(jit.graph().m_blocks.size())
, m_arguments(jit.codeBlock()->numParameters())
, m_variables(jit.graph().m_localVars)
, m_lastSetOperand(std::numeric_limits<int>::max())
, m_state(m_jit.graph())
, m_stream(&jit.codeBlock()->variableEventStream())
, m_minifiedGraph(&jit.codeBlock()->minifiedDFG())
, m_isCheckingArgumentTypes(false)
{
}
SpeculativeJIT::~SpeculativeJIT()
{
WTF::deleteAllValues(m_slowPathGenerators);
}
void SpeculativeJIT::emitAllocateJSArray(Structure* structure, GPRReg resultGPR, GPRReg storageGPR, unsigned numElements)
{
ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
GPRTemporary scratch(this);
GPRReg scratchGPR = scratch.gpr();
unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
JITCompiler::JumpList slowCases;
slowCases.append(
emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
emitAllocateBasicJSObject<JSArray, MarkedBlock::None>(
TrustedImmPtr(structure), resultGPR, scratchGPR,
storageGPR, sizeof(JSArray), slowCases);
// I'm assuming that two 32-bit stores are better than a 64-bit store.
// I have no idea if that's true. And it probably doesn't matter anyway.
m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
#if USE(JSVALUE64)
m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
for (unsigned i = numElements; i < vectorLength; ++i)
m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
#else
EncodedValueDescriptor value;
value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
for (unsigned i = numElements; i < vectorLength; ++i) {
m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
}
#endif
}
// I want a slow path that also loads out the storage pointer, and that's
// what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
// of work for a very small piece of functionality. :-/
addSlowPathGenerator(adoptPtr(
new CallArrayAllocatorSlowPathGenerator(
slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
structure, numElements)));
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail)
{
if (!m_compileOkay)
return;
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this, m_stream->size()));
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail);
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, const MacroAssembler::JumpList& jumpsToFail)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
for (unsigned i = 0; i < jumpVector.size(); ++i)
speculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i]);
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpsToFail);
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
{
if (!m_compileOkay)
return;
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
m_jit.codeBlock()->appendSpeculationRecovery(recovery);
m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this, m_stream->size(), m_jit.codeBlock()->numberOfSpeculationRecoveries()));
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail, recovery);
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery, SpeculationDirection direction)
{
speculationCheck(kind, jsValueSource, nodeIndex, jumpToFail, recovery);
if (direction == ForwardSpeculation)
convertLastOSRExitToForward();
}
JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex)
{
if (!m_compileOkay)
return 0;
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
OSRExit& exit = m_jit.codeBlock()->osrExit(
m_jit.codeBlock()->appendOSRExit(
OSRExit(kind, jsValueSource,
m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex),
JITCompiler::Jump(), this, m_stream->size())));
exit.m_watchpointIndex = m_jit.codeBlock()->appendWatchpoint(
JumpReplacementWatchpoint(m_jit.watchpointLabel()));
return &m_jit.codeBlock()->watchpoint(exit.m_watchpointIndex);
}
JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind)
{
return speculationWatchpoint(kind, JSValueSource(), NoNode);
}
void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
{
if (!valueRecovery) {
// Check that either the current node is a SetLocal, or the preceding node was a
// SetLocal with the same code origin.
if (at(m_compileIndex).op() != SetLocal) {
Node* setLocal = &at(m_jit.graph().m_blocks[m_block]->at(m_indexInBlock - 1));
ASSERT_UNUSED(setLocal, setLocal->op() == SetLocal);
ASSERT_UNUSED(setLocal, setLocal->codeOrigin == at(m_compileIndex).codeOrigin);
}
// Find the next node.
unsigned indexInBlock = m_indexInBlock + 1;
Node* node = 0;
for (;;) {
if (indexInBlock == m_jit.graph().m_blocks[m_block]->size()) {
// This is an inline return. Give up and do a backwards speculation. This is safe
// because an inline return has its own bytecode index and it's always safe to
// reexecute that bytecode.
ASSERT(node->op() == Jump);
return;
}
node = &at(m_jit.graph().m_blocks[m_block]->at(indexInBlock));
if (node->codeOrigin != at(m_compileIndex).codeOrigin)
break;
indexInBlock++;
}
ASSERT(node->codeOrigin != at(m_compileIndex).codeOrigin);
OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
exit.m_codeOrigin = node->codeOrigin;
return;
}
unsigned setLocalIndexInBlock = m_indexInBlock + 1;
Node* setLocal = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock));
bool hadInt32ToDouble = false;
if (setLocal->op() == Int32ToDouble) {
setLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
hadInt32ToDouble = true;
}
if (setLocal->op() == Flush || setLocal->op() == Phantom)
setLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
if (hadInt32ToDouble)
ASSERT(at(setLocal->child1()).child1() == m_compileIndex);
else
ASSERT(setLocal->child1() == m_compileIndex);
ASSERT(setLocal->op() == SetLocal);
ASSERT(setLocal->codeOrigin == at(m_compileIndex).codeOrigin);
Node* nextNode = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1));
if (nextNode->op() == Jump && nextNode->codeOrigin == at(m_compileIndex).codeOrigin) {
// We're at an inlined return. Use a backward speculation instead.
return;
}
ASSERT(nextNode->codeOrigin != at(m_compileIndex).codeOrigin);
OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
exit.m_codeOrigin = nextNode->codeOrigin;
exit.m_lastSetOperand = setLocal->local();
exit.m_valueRecoveryOverride = adoptRef(
new ValueRecoveryOverride(setLocal->local(), valueRecovery));
}
JumpReplacementWatchpoint* SpeculativeJIT::forwardSpeculationWatchpoint(ExitKind kind)
{
JumpReplacementWatchpoint* result = speculationWatchpoint(kind);
convertLastOSRExitToForward();
return result;
}
JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, SpeculationDirection direction)
{
JumpReplacementWatchpoint* result = speculationWatchpoint(kind);
if (direction == ForwardSpeculation)
convertLastOSRExitToForward();
return result;
}
void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
speculationCheck(kind, jsValueSource, nodeIndex, jumpToFail);
convertLastOSRExitToForward(valueRecovery);
}
void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
for (unsigned i = 0; i < jumpVector.size(); ++i)
forwardSpeculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i], valueRecovery);
}
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, SpeculationDirection direction)
{
if (direction == ForwardSpeculation)
forwardSpeculationCheck(kind, jsValueSource, nodeIndex, jumpToFail);
else
speculationCheck(kind, jsValueSource, nodeIndex, jumpToFail);
}
void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, NodeIndex nodeIndex)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("SpeculativeJIT was terminated.\n");
#endif
if (!m_compileOkay)
return;
speculationCheck(kind, jsValueRegs, nodeIndex, m_jit.jump());
m_compileOkay = false;
}
void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.index());
}
void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, NodeIndex nodeIndex, SpeculationDirection direction)
{
ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes);
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("SpeculativeJIT was terminated.\n");
#endif
if (!m_compileOkay)
return;
speculationCheck(kind, jsValueRegs, nodeIndex, m_jit.jump(), direction);
m_compileOkay = false;
}
void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
{
m_slowPathGenerators.append(slowPathGenerator.leakPtr());
}
void SpeculativeJIT::runSlowPathGenerators()
{
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators.size());
#endif
for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
m_slowPathGenerators[i]->generate(this);
}
// On Windows we need to wrap fmod; on other platforms we can call it directly.
// On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
#if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
{
return fmod(x, y);
}
#else
#define fmodAsDFGOperation fmod
#endif
void SpeculativeJIT::clearGenerationInfo()
{
for (unsigned i = 0; i < m_generationInfo.size(); ++i)
m_generationInfo[i] = GenerationInfo();
m_gprs = RegisterBank<GPRInfo>();
m_fprs = RegisterBank<FPRInfo>();
}
const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(ArrayMode arrayMode)
{
switch (arrayMode.type()) {
case Array::Int8Array:
return &m_jit.globalData()->int8ArrayDescriptor();
case Array::Int16Array:
return &m_jit.globalData()->int16ArrayDescriptor();
case Array::Int32Array:
return &m_jit.globalData()->int32ArrayDescriptor();
case Array::Uint8Array:
return &m_jit.globalData()->uint8ArrayDescriptor();
case Array::Uint8ClampedArray:
return &m_jit.globalData()->uint8ClampedArrayDescriptor();
case Array::Uint16Array:
return &m_jit.globalData()->uint16ArrayDescriptor();
case Array::Uint32Array:
return &m_jit.globalData()->uint32ArrayDescriptor();
case Array::Float32Array:
return &m_jit.globalData()->float32ArrayDescriptor();
case Array::Float64Array:
return &m_jit.globalData()->float64ArrayDescriptor();
default:
return 0;
}
}
JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape, bool invert)
{
switch (arrayMode.arrayClass()) {
case Array::OriginalArray: {
CRASH();
JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
return result;
}
case Array::Array:
m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
return m_jit.branch32(
invert ? MacroAssembler::Equal : MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
default:
m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
return m_jit.branch32(invert ? MacroAssembler::Equal : MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
}
}
JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, bool invert)
{
JITCompiler::JumpList result;
switch (arrayMode.type()) {
case Array::Int32:
return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape, invert);
case Array::Double:
return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape, invert);
case Array::Contiguous:
return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape, invert);
case Array::ArrayStorage:
case Array::SlowPutArrayStorage: {
ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
if (arrayMode.isJSArray()) {
if (arrayMode.isSlowPut()) {
if (invert) {
JITCompiler::Jump slow = m_jit.branchTest32(
MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray));
m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
result.append(
m_jit.branch32(
MacroAssembler::BelowOrEqual, tempGPR,
TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
slow.link(&m_jit);
}
result.append(
m_jit.branchTest32(
MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
result.append(
m_jit.branch32(
MacroAssembler::Above, tempGPR,
TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
break;
}
m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
result.append(
m_jit.branch32(invert ? MacroAssembler::Equal : MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
break;
}
m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
if (arrayMode.isSlowPut()) {
m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
result.append(
m_jit.branch32(
invert ? MacroAssembler::BelowOrEqual : MacroAssembler::Above, tempGPR,
TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
break;
}
result.append(
m_jit.branch32(invert ? MacroAssembler::Equal : MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
break;
}
default:
CRASH();
break;
}
return result;
}
void SpeculativeJIT::checkArray(Node& node)
{
ASSERT(node.arrayMode().isSpecific());
ASSERT(!node.arrayMode().doesConversion());
SpeculateCellOperand base(this, node.child1());
GPRReg baseReg = base.gpr();
const TypedArrayDescriptor* result = typedArrayDescriptor(node.arrayMode());
if (node.arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node.child1()))) {
noResult(m_compileIndex);
return;
}
const ClassInfo* expectedClassInfo = 0;
switch (node.arrayMode().type()) {
case Array::String:
expectedClassInfo = &JSString::s_info;
break;
case Array::Int32:
case Array::Double:
case Array::Contiguous:
case Array::ArrayStorage:
case Array::SlowPutArrayStorage: {
GPRTemporary temp(this);
GPRReg tempGPR = temp.gpr();
m_jit.loadPtr(
MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
speculationCheck(
BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode,
jumpSlowForUnwantedArrayMode(tempGPR, node.arrayMode()));
noResult(m_compileIndex);
return;
}
case Array::Arguments:
expectedClassInfo = &Arguments::s_info;
break;
case Array::Int8Array:
case Array::Int16Array:
case Array::Int32Array:
case Array::Uint8Array:
case Array::Uint8ClampedArray:
case Array::Uint16Array:
case Array::Uint32Array:
case Array::Float32Array:
case Array::Float64Array:
expectedClassInfo = result->m_classInfo;
break;
default:
ASSERT_NOT_REACHED();
break;
}
GPRTemporary temp(this);
m_jit.loadPtr(
MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
speculationCheck(
Uncountable, JSValueRegs(), NoNode,
m_jit.branchPtr(
MacroAssembler::NotEqual,
MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
MacroAssembler::TrustedImmPtr(expectedClassInfo)));
noResult(m_compileIndex);
}
void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg)
{
ASSERT(node.arrayMode().doesConversion());
GPRTemporary temp(this);
GPRTemporary structure;
GPRReg tempGPR = temp.gpr();
GPRReg structureGPR = InvalidGPRReg;
if (node.op() != ArrayifyToStructure) {
GPRTemporary realStructure(this);
structure.adopt(realStructure);
structureGPR = structure.gpr();
}
// We can skip all that comes next if we already have array storage.
MacroAssembler::JumpList done;
if (node.op() == ArrayifyToStructure) {
done.append(m_jit.branchWeakPtr(
JITCompiler::Equal,
JITCompiler::Address(baseReg, JSCell::structureOffset()),
node.structure()));
} else {
m_jit.loadPtr(
MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
m_jit.load8(
MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
done = jumpSlowForUnwantedArrayMode(tempGPR, node.arrayMode(), true);
// Next check that the object does not intercept indexed accesses. If it does,
// then this mode won't work.
speculationCheck(
BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode,
m_jit.branchTest8(
MacroAssembler::NonZero,
MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(InterceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero)));
}
// If we're allegedly creating contiguous storage and the index is bogus, then
// just don't.
if (propertyReg != InvalidGPRReg) {
switch (node.arrayMode().type()) {
case Array::Int32:
case Array::Double:
case Array::Contiguous:
speculationCheck(
Uncountable, JSValueRegs(), NoNode,
m_jit.branch32(
MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(MIN_SPARSE_ARRAY_INDEX)));
break;
default:
break;
}
}
// Now call out to create the array storage.
silentSpillAllRegisters(tempGPR);
switch (node.arrayMode().type()) {
case Array::Int32:
callOperation(operationEnsureInt32, tempGPR, baseReg);
break;
case Array::Double:
callOperation(operationEnsureDouble, tempGPR, baseReg);
break;
case Array::Contiguous:
if (node.arrayMode().conversion() == Array::RageConvert)
callOperation(operationRageEnsureContiguous, tempGPR, baseReg);
else
callOperation(operationEnsureContiguous, tempGPR, baseReg);
break;
case Array::ArrayStorage:
case Array::SlowPutArrayStorage:
callOperation(operationEnsureArrayStorage, tempGPR, baseReg);
break;
default:
CRASH();
break;
}
silentFillAllRegisters(tempGPR);
if (node.op() == ArrayifyToStructure) {
speculationCheck(
BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode,
m_jit.branchWeakPtr(
JITCompiler::NotEqual,
JITCompiler::Address(baseReg, JSCell::structureOffset()),
node.structure()));
} else {
// Alas, we need to reload the structure because silent spilling does not save
// temporaries. Nor would it be useful for it to do so. Either way we're talking
// about a load.
m_jit.loadPtr(
MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
// Finally, check that we have the kind of array storage that we wanted to get.
// Note that this is a backwards speculation check, which will result in the
// bytecode operation corresponding to this arrayification being reexecuted.
// That's fine, since arrayification is not user-visible.
m_jit.load8(
MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), structureGPR);
speculationCheck(
BadIndexingType, JSValueSource::unboxedCell(baseReg), NoNode,
jumpSlowForUnwantedArrayMode(structureGPR, node.arrayMode()));
}
done.link(&m_jit);
noResult(m_compileIndex);
}
void SpeculativeJIT::arrayify(Node& node)
{
ASSERT(node.arrayMode().isSpecific());
SpeculateCellOperand base(this, node.child1());
if (!node.child2()) {
arrayify(node, base.gpr(), InvalidGPRReg);
return;
}
SpeculateIntegerOperand property(this, node.child2());
arrayify(node, base.gpr(), property.gpr());
}
GPRReg SpeculativeJIT::fillStorage(NodeIndex nodeIndex)
{
Node& node = m_jit.graph()[nodeIndex];
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
switch (info.registerFormat()) {
case DataFormatNone: {
if (info.spillFormat() == DataFormatStorage) {
GPRReg gpr = allocate();
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
info.fillStorage(*m_stream, gpr);
return gpr;
}
// Must be a cell; fill it as a cell and then return the pointer.
return fillSpeculateCell(nodeIndex, BackwardSpeculation);
}
case DataFormatStorage: {
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
return gpr;
}
default:
return fillSpeculateCell(nodeIndex, BackwardSpeculation);
}
}
void SpeculativeJIT::useChildren(Node& node)
{
if (node.flags() & NodeHasVarArgs) {
for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++) {
if (!!m_jit.graph().m_varArgChildren[childIdx])
use(m_jit.graph().m_varArgChildren[childIdx]);
}
} else {
Edge child1 = node.child1();
if (!child1) {
ASSERT(!node.child2() && !node.child3());
return;
}
use(child1);
Edge child2 = node.child2();
if (!child2) {
ASSERT(!node.child3());
return;
}
use(child2);
Edge child3 = node.child3();
if (!child3)
return;
use(child3);
}
}
bool SpeculativeJIT::isStrictInt32(NodeIndex nodeIndex)
{
if (isInt32Constant(nodeIndex))
return true;
Node& node = m_jit.graph()[nodeIndex];
GenerationInfo& info = m_generationInfo[node.virtualRegister()];
return info.registerFormat() == DataFormatInteger;
}
bool SpeculativeJIT::isKnownInteger(NodeIndex nodeIndex)
{
if (isInt32Constant(nodeIndex))
return true;
Node& node = m_jit.graph()[nodeIndex];
if (node.hasInt32Result())
return true;
GenerationInfo& info = m_generationInfo[node.virtualRegister()];
return info.isJSInteger();
}
bool SpeculativeJIT::isKnownNumeric(NodeIndex nodeIndex)
{
if (isInt32Constant(nodeIndex) || isNumberConstant(nodeIndex))
return true;
Node& node = m_jit.graph()[nodeIndex];
if (node.hasNumberResult())
return true;
GenerationInfo& info = m_generationInfo[node.virtualRegister()];
return info.isJSInteger() || info.isJSDouble();
}
bool SpeculativeJIT::isKnownCell(NodeIndex nodeIndex)
{
return m_generationInfo[m_jit.graph()[nodeIndex].virtualRegister()].isJSCell();
}
bool SpeculativeJIT::isKnownNotCell(NodeIndex nodeIndex)
{
Node& node = m_jit.graph()[nodeIndex];
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
if (node.hasConstant() && !valueOfJSConstant(nodeIndex).isCell())
return true;
return !(info.isJSCell() || info.isUnknownJS());
}
bool SpeculativeJIT::isKnownNotInteger(NodeIndex nodeIndex)
{
Node& node = m_jit.graph()[nodeIndex];
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
return info.isJSDouble() || info.isJSCell() || info.isJSBoolean()
|| (node.hasConstant() && !valueOfJSConstant(nodeIndex).isInt32());
}
bool SpeculativeJIT::isKnownNotNumber(NodeIndex nodeIndex)
{
Node& node = m_jit.graph()[nodeIndex];
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
return (!info.isJSDouble() && !info.isJSInteger() && !info.isUnknownJS())
|| (node.hasConstant() && !isNumberConstant(nodeIndex));
}
void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
{
UNUSED_PARAM(jit);
UNUSED_PARAM(owner);
UNUSED_PARAM(scratch1);
UNUSED_PARAM(scratch2);
UNUSED_PARAM(useKind);
ASSERT(owner != scratch1);
ASSERT(owner != scratch2);
ASSERT(scratch1 != scratch2);
#if ENABLE(WRITE_BARRIER_PROFILING)
JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
#endif
markCellCard(jit, owner, scratch1, scratch2);
}
void SpeculativeJIT::markCellCard(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2)
{
UNUSED_PARAM(jit);
UNUSED_PARAM(owner);
UNUSED_PARAM(scratch1);
UNUSED_PARAM(scratch2);
#if ENABLE(GGC)
jit.move(owner, scratch1);
jit.andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch1);
jit.move(owner, scratch2);
// consume additional 8 bits as we're using an approximate filter
jit.rshift32(TrustedImm32(MarkedBlock::atomShift + 8), scratch2);
jit.andPtr(TrustedImm32(MarkedBlock::atomMask >> 8), scratch2);
MacroAssembler::Jump filter = jit.branchTest8(MacroAssembler::Zero, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfMarks()));
jit.move(owner, scratch2);
jit.rshift32(TrustedImm32(MarkedBlock::cardShift), scratch2);
jit.andPtr(TrustedImm32(MarkedBlock::cardMask), scratch2);
jit.store8(TrustedImm32(1), MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfCards()));
filter.link(&jit);
#endif
}
void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
{
UNUSED_PARAM(ownerGPR);
UNUSED_PARAM(valueGPR);
UNUSED_PARAM(scratch1);
UNUSED_PARAM(scratch2);
UNUSED_PARAM(useKind);
if (isKnownNotCell(valueUse.index()))
return;
#if ENABLE(WRITE_BARRIER_PROFILING)
JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
#endif
#if ENABLE(GGC)
GPRTemporary temp1;
GPRTemporary temp2;
if (scratch1 == InvalidGPRReg) {
GPRTemporary scratchGPR(this);
temp1.adopt(scratchGPR);
scratch1 = temp1.gpr();
}
if (scratch2 == InvalidGPRReg) {
GPRTemporary scratchGPR(this);
temp2.adopt(scratchGPR);
scratch2 = temp2.gpr();
}
JITCompiler::Jump rhsNotCell;
bool hadCellCheck = false;
if (!isKnownCell(valueUse.index()) && !isCellSpeculation(m_jit.getSpeculation(valueUse.index()))) {
hadCellCheck = true;
rhsNotCell = m_jit.branchIfNotCell(valueGPR);
}
markCellCard(m_jit, ownerGPR, scratch1, scratch2);
if (hadCellCheck)
rhsNotCell.link(&m_jit);
#endif
}
void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
{
UNUSED_PARAM(ownerGPR);
UNUSED_PARAM(value);
UNUSED_PARAM(scratch1);
UNUSED_PARAM(scratch2);
UNUSED_PARAM(useKind);
if (Heap::isMarked(value))
return;
#if ENABLE(WRITE_BARRIER_PROFILING)
JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
#endif
#if ENABLE(GGC)
GPRTemporary temp1;
GPRTemporary temp2;
if (scratch1 == InvalidGPRReg) {
GPRTemporary scratchGPR(this);
temp1.adopt(scratchGPR);
scratch1 = temp1.gpr();
}
if (scratch2 == InvalidGPRReg) {
GPRTemporary scratchGPR(this);
temp2.adopt(scratchGPR);
scratch2 = temp2.gpr();
}
markCellCard(m_jit, ownerGPR, scratch1, scratch2);
#endif
}
void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
{
UNUSED_PARAM(owner);
UNUSED_PARAM(valueGPR);
UNUSED_PARAM(scratch);
UNUSED_PARAM(useKind);
if (isKnownNotCell(valueUse.index()))
return;
#if ENABLE(WRITE_BARRIER_PROFILING)
JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
#endif
#if ENABLE(GGC)
JITCompiler::Jump rhsNotCell;
bool hadCellCheck = false;
if (!isKnownCell(valueUse.index()) && !isCellSpeculation(m_jit.getSpeculation(valueUse.index()))) {
hadCellCheck = true;
rhsNotCell = m_jit.branchIfNotCell(valueGPR);
}
GPRTemporary temp;
if (scratch == InvalidGPRReg) {
GPRTemporary scratchGPR(this);
temp.adopt(scratchGPR);
scratch = temp.gpr();
}
uint8_t* cardAddress = Heap::addressOfCardFor(owner);
m_jit.move(JITCompiler::TrustedImmPtr(cardAddress), scratch);
m_jit.store8(JITCompiler::TrustedImm32(1), JITCompiler::Address(scratch));
if (hadCellCheck)
rhsNotCell.link(&m_jit);
#endif
}
bool SpeculativeJIT::nonSpeculativeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
{
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
ASSERT(node.adjustedRefCount() == 1);
nonSpeculativePeepholeBranch(node, branchNodeIndex, cond, helperFunction);
m_indexInBlock = branchIndexInBlock;
m_compileIndex = branchNodeIndex;
return true;
}
nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
return false;
}
bool SpeculativeJIT::nonSpeculativeStrictEq(Node& node, bool invert)
{
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
ASSERT(node.adjustedRefCount() == 1);
nonSpeculativePeepholeStrictEq(node, branchNodeIndex, invert);
m_indexInBlock = branchIndexInBlock;
m_compileIndex = branchNodeIndex;
return true;
}
nonSpeculativeNonPeepholeStrictEq(node, invert);
return false;
}
#ifndef NDEBUG
static const char* dataFormatString(DataFormat format)
{
// These values correspond to the DataFormat enum.
const char* strings[] = {
"[ ]",
"[ i]",
"[ d]",
"[ c]",
"Err!",
"Err!",
"Err!",
"Err!",
"[J ]",
"[Ji]",
"[Jd]",
"[Jc]",
"Err!",
"Err!",
"Err!",
"Err!",
};
return strings[format];
}
void SpeculativeJIT::dump(const char* label)
{
if (label)
dataLogF("<%s>\n", label);
dataLogF(" gprs:\n");
m_gprs.dump();
dataLogF(" fprs:\n");
m_fprs.dump();
dataLogF(" VirtualRegisters:\n");
for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
GenerationInfo& info = m_generationInfo[i];
if (info.alive())
dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
else
dataLogF(" % 3d:[__][__]", i);
if (info.registerFormat() == DataFormatDouble)
dataLogF(":fpr%d\n", info.fpr());
else if (info.registerFormat() != DataFormatNone
#if USE(JSVALUE32_64)
&& !(info.registerFormat() & DataFormatJS)
#endif
) {
ASSERT(info.gpr() != InvalidGPRReg);
dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
} else
dataLogF("\n");
}
if (label)
dataLogF("</%s>\n", label);
}
#endif
#if DFG_ENABLE(CONSISTENCY_CHECK)
void SpeculativeJIT::checkConsistency()
{
bool failed = false;
for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
if (iter.isLocked()) {
dataLogF("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
failed = true;
}
}
for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
if (iter.isLocked()) {
dataLogF("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
failed = true;
}
}
for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
VirtualRegister virtualRegister = (VirtualRegister)i;
GenerationInfo& info = m_generationInfo[virtualRegister];
if (!info.alive())
continue;
switch (info.registerFormat()) {
case DataFormatNone:
break;
case DataFormatJS:
case DataFormatJSInteger:
case DataFormatJSDouble:
case DataFormatJSCell:
case DataFormatJSBoolean:
#if USE(JSVALUE32_64)
break;
#endif
case DataFormatInteger:
case DataFormatCell:
case DataFormatBoolean:
case DataFormatStorage: {
GPRReg gpr = info.gpr();
ASSERT(gpr != InvalidGPRReg);
if (m_gprs.name(gpr) != virtualRegister) {
dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
failed = true;
}
break;
}
case DataFormatDouble: {
FPRReg fpr = info.fpr();
ASSERT(fpr != InvalidFPRReg);
if (m_fprs.name(fpr) != virtualRegister) {
dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
failed = true;
}
break;
}
}
}
for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
VirtualRegister virtualRegister = iter.name();
if (virtualRegister == InvalidVirtualRegister)
continue;
GenerationInfo& info = m_generationInfo[virtualRegister];
#if USE(JSVALUE64)
if (iter.regID() != info.gpr()) {
dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
failed = true;
}
#else
if (!(info.registerFormat() & DataFormatJS)) {
if (iter.regID() != info.gpr()) {
dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
failed = true;
}
} else {
if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
failed = true;
}
}
#endif
}
for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
VirtualRegister virtualRegister = iter.name();
if (virtualRegister == InvalidVirtualRegister)
continue;
GenerationInfo& info = m_generationInfo[virtualRegister];
if (iter.regID() != info.fpr()) {
dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
failed = true;
}
}
if (failed) {
dump();
CRASH();
}
}
#endif
GPRTemporary::GPRTemporary()
: m_jit(0)
, m_gpr(InvalidGPRReg)
{
}
GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
m_gpr = m_jit->allocate();
}
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
m_gpr = m_jit->allocate(specific);
}
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
if (m_jit->canReuse(op1.index()))
m_gpr = m_jit->reuse(op1.gpr());
else
m_gpr = m_jit->allocate();
}
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1, SpeculateIntegerOperand& op2)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
if (m_jit->canReuse(op1.index()))
m_gpr = m_jit->reuse(op1.gpr());
else if (m_jit->canReuse(op2.index()))
m_gpr = m_jit->reuse(op2.gpr());
else
m_gpr = m_jit->allocate();
}
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateStrictInt32Operand& op1)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
if (m_jit->canReuse(op1.index()))
m_gpr = m_jit->reuse(op1.gpr());
else
m_gpr = m_jit->allocate();
}
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
if (m_jit->canReuse(op1.index()))
m_gpr = m_jit->reuse(op1.gpr());
else
m_gpr = m_jit->allocate();
}
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1, IntegerOperand& op2)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
if (m_jit->canReuse(op1.index()))
m_gpr = m_jit->reuse(op1.gpr());
else if (m_jit->canReuse(op2.index()))
m_gpr = m_jit->reuse(op2.gpr());
else
m_gpr = m_jit->allocate();
}
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateCellOperand& op1)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
if (m_jit->canReuse(op1.index()))
m_gpr = m_jit->reuse(op1.gpr());
else
m_gpr = m_jit->allocate();
}
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateBooleanOperand& op1)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
if (m_jit->canReuse(op1.index()))
m_gpr = m_jit->reuse(op1.gpr());
else
m_gpr = m_jit->allocate();
}
#if USE(JSVALUE64)
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
if (m_jit->canReuse(op1.index()))
m_gpr = m_jit->reuse(op1.gpr());
else
m_gpr = m_jit->allocate();
}
#else
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1, bool tag)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
if (!op1.isDouble() && m_jit->canReuse(op1.index()))
m_gpr = m_jit->reuse(tag ? op1.tagGPR() : op1.payloadGPR());
else
m_gpr = m_jit->allocate();
}
#endif
GPRTemporary::GPRTemporary(SpeculativeJIT* jit, StorageOperand& op1)
: m_jit(jit)
, m_gpr(InvalidGPRReg)
{
if (m_jit->canReuse(op1.index()))
m_gpr = m_jit->reuse(op1.gpr());
else
m_gpr = m_jit->allocate();
}
void GPRTemporary::adopt(GPRTemporary& other)
{
ASSERT(!m_jit);
ASSERT(m_gpr == InvalidGPRReg);
ASSERT(other.m_jit);
ASSERT(other.m_gpr != InvalidGPRReg);
m_jit = other.m_jit;
m_gpr = other.m_gpr;
other.m_jit = 0;
other.m_gpr = InvalidGPRReg;
}
FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
: m_jit(jit)
, m_fpr(InvalidFPRReg)
{
m_fpr = m_jit->fprAllocate();
}
FPRTemporary::FPRTemporary(SpeculativeJIT* jit, DoubleOperand& op1)
: m_jit(jit)
, m_fpr(InvalidFPRReg)
{
if (m_jit->canReuse(op1.index()))
m_fpr = m_jit->reuse(op1.fpr());
else
m_fpr = m_jit->fprAllocate();
}
FPRTemporary::FPRTemporary(SpeculativeJIT* jit, DoubleOperand& op1, DoubleOperand& op2)
: m_jit(jit)
, m_fpr(InvalidFPRReg)
{
if (m_jit->canReuse(op1.index()))
m_fpr = m_jit->reuse(op1.fpr());
else if (m_jit->canReuse(op2.index()))
m_fpr = m_jit->reuse(op2.fpr());
else
m_fpr = m_jit->fprAllocate();
}
FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
: m_jit(jit)
, m_fpr(InvalidFPRReg)
{
if (m_jit->canReuse(op1.index()))
m_fpr = m_jit->reuse(op1.fpr());
else
m_fpr = m_jit->fprAllocate();
}
FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
: m_jit(jit)
, m_fpr(InvalidFPRReg)
{
if (m_jit->canReuse(op1.index()))
m_fpr = m_jit->reuse(op1.fpr());
else if (m_jit->canReuse(op2.index()))
m_fpr = m_jit->reuse(op2.fpr());
else
m_fpr = m_jit->fprAllocate();
}
#if USE(JSVALUE32_64)
FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
: m_jit(jit)
, m_fpr(InvalidFPRReg)
{
if (op1.isDouble() && m_jit->canReuse(op1.index()))
m_fpr = m_jit->reuse(op1.fpr());
else
m_fpr = m_jit->fprAllocate();
}
#endif
void SpeculativeJIT::compilePeepHoleDoubleBranch(Node& node, NodeIndex branchNodeIndex, JITCompiler::DoubleCondition condition)
{
Node& branchNode = at(branchNodeIndex);
BlockIndex taken = branchNode.takenBlockIndex();
BlockIndex notTaken = branchNode.notTakenBlockIndex();
SpeculateDoubleOperand op1(this, node.child1());
SpeculateDoubleOperand op2(this, node.child2());
branchDouble(condition, op1.fpr(), op2.fpr(), taken);
jump(notTaken);
}
void SpeculativeJIT::compilePeepHoleObjectEquality(Node& node, NodeIndex branchNodeIndex)
{
Node& branchNode = at(branchNodeIndex);
BlockIndex taken = branchNode.takenBlockIndex();
BlockIndex notTaken = branchNode.notTakenBlockIndex();
MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
if (taken == nextBlock()) {
condition = MacroAssembler::NotEqual;
BlockIndex tmp = taken;
taken = notTaken;
notTaken = tmp;
}
SpeculateCellOperand op1(this, node.child1());
SpeculateCellOperand op2(this, node.child2());
GPRReg op1GPR = op1.gpr();
GPRReg op2GPR = op2.gpr();
if (m_jit.graph().globalObjectFor(node.codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
m_jit.graph().globalObjectFor(node.codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node.child1().index(),
m_jit.branchPtr(
MacroAssembler::Equal,
MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
MacroAssembler::TrustedImmPtr(m_jit.globalData()->stringStructure.get())));
speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node.child2().index(),
m_jit.branchPtr(
MacroAssembler::Equal,
MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
MacroAssembler::TrustedImmPtr(m_jit.globalData()->stringStructure.get())));
} else {
GPRTemporary structure(this);
GPRReg structureGPR = structure.gpr();
m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node.child1().index(),
m_jit.branchPtr(
MacroAssembler::Equal,
structureGPR,
MacroAssembler::TrustedImmPtr(m_jit.globalData()->stringStructure.get())));
speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node.child1().index(),
m_jit.branchTest8(
MacroAssembler::NonZero,
MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node.child2().index(),
m_jit.branchPtr(
MacroAssembler::Equal,
structureGPR,
MacroAssembler::TrustedImmPtr(m_jit.globalData()->stringStructure.get())));
speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node.child2().index(),
m_jit.branchTest8(
MacroAssembler::NonZero,
MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
branchPtr(condition, op1GPR, op2GPR, taken);
jump(notTaken);
}
void SpeculativeJIT::compilePeepHoleIntegerBranch(Node& node, NodeIndex branchNodeIndex, JITCompiler::RelationalCondition condition)
{
Node& branchNode = at(branchNodeIndex);
BlockIndex taken = branchNode.takenBlockIndex();
BlockIndex notTaken = branchNode.notTakenBlockIndex();
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
if (taken == nextBlock()) {
condition = JITCompiler::invert(condition);
BlockIndex tmp = taken;
taken = notTaken;
notTaken = tmp;
}
if (isInt32Constant(node.child1().index())) {
int32_t imm = valueOfInt32Constant(node.child1().index());
SpeculateIntegerOperand op2(this, node.child2());
branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
} else if (isInt32Constant(node.child2().index())) {
SpeculateIntegerOperand op1(this, node.child1());
int32_t imm = valueOfInt32Constant(node.child2().index());
branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
} else {
SpeculateIntegerOperand op1(this, node.child1());
SpeculateIntegerOperand op2(this, node.child2());
branch32(condition, op1.gpr(), op2.gpr(), taken);
}
jump(notTaken);
}
// Returns true if the compare is fused with a subsequent branch.
bool SpeculativeJIT::compilePeepHoleBranch(Node& node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
{
// Fused compare & branch.
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
// detectPeepHoleBranch currently only permits the branch to be the very next node,
// so can be no intervening nodes to also reference the compare.
ASSERT(node.adjustedRefCount() == 1);
if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())))
compilePeepHoleIntegerBranch(node, branchNodeIndex, condition);
else if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2())))
compilePeepHoleDoubleBranch(node, branchNodeIndex, doubleCondition);
else if (node.op() == CompareEq) {
if (at(node.child1()).shouldSpeculateString() || at(node.child2()).shouldSpeculateString()) {
nonSpeculativePeepholeBranch(node, branchNodeIndex, condition, operation);
return true;
}
if (at(node.child1()).shouldSpeculateNonStringCell() && at(node.child2()).shouldSpeculateNonStringCellOrOther())
compilePeepHoleObjectToObjectOrOtherEquality(node.child1(), node.child2(), branchNodeIndex);
else if (at(node.child1()).shouldSpeculateNonStringCellOrOther() && at(node.child2()).shouldSpeculateNonStringCell())
compilePeepHoleObjectToObjectOrOtherEquality(node.child2(), node.child1(), branchNodeIndex);
else if (at(node.child1()).shouldSpeculateNonStringCell() && at(node.child2()).shouldSpeculateNonStringCell())
compilePeepHoleObjectEquality(node, branchNodeIndex);
else {
nonSpeculativePeepholeBranch(node, branchNodeIndex, condition, operation);
return true;
}
} else {
nonSpeculativePeepholeBranch(node, branchNodeIndex, condition, operation);
return true;
}
use(node.child1());
use(node.child2());
m_indexInBlock = branchIndexInBlock;
m_compileIndex = branchNodeIndex;
return true;
}
return false;
}
void SpeculativeJIT::noticeOSRBirth(NodeIndex nodeIndex, Node& node)
{
if (!node.hasVirtualRegister())
return;
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
info.noticeOSRBirth(*m_stream, nodeIndex, virtualRegister);
}
void SpeculativeJIT::compileMovHint(Node& node)
{
ASSERT(node.op() == SetLocal);
m_lastSetOperand = node.local();
Node& child = at(node.child1());
noticeOSRBirth(node.child1().index(), child);
if (child.op() == UInt32ToNumber)
noticeOSRBirth(child.child1().index(), at(child.child1()));
m_stream->appendAndLog(VariableEvent::movHint(node.child1().index(), node.local()));
}
void SpeculativeJIT::compile(BasicBlock& block)
{
ASSERT(m_compileOkay);
if (!block.isReachable)
return;
if (!block.cfaHasVisited) {
// Don't generate code for basic blocks that are unreachable according to CFA.
// But to be sure that nobody has generated a jump to this block, drop in a
// breakpoint here.
#if !ASSERT_DISABLED
m_jit.breakpoint();
#endif
return;
}
m_blockHeads[m_block] = m_jit.label();
#if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
m_jit.breakpoint();
#endif
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Setting up state for block #%u: ", m_block);
#endif
m_stream->appendAndLog(VariableEvent::reset());
m_jit.jitAssertHasValidCallFrame();
ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments());
for (size_t i = 0; i < m_arguments.size(); ++i) {
ValueSource valueSource = ValueSource(ValueInJSStack);
m_arguments[i] = valueSource;
m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat()));
}
m_state.reset();
m_state.beginBasicBlock(&block);
ASSERT(m_variables.size() == block.variablesAtHead.numberOfLocals());
for (size_t i = 0; i < m_variables.size(); ++i) {
NodeIndex nodeIndex = block.variablesAtHead.local(i);
ValueSource valueSource;
if (nodeIndex == NoNode)
valueSource = ValueSource(SourceIsDead);
else if (at(nodeIndex).variableAccessData()->isArgumentsAlias())
valueSource = ValueSource(ArgumentsSource);
else if (at(nodeIndex).variableAccessData()->isCaptured())
valueSource = ValueSource(ValueInJSStack);
else if (!at(nodeIndex).refCount())
valueSource = ValueSource(SourceIsDead);
else if (at(nodeIndex).variableAccessData()->shouldUseDoubleFormat())
valueSource = ValueSource(DoubleInJSStack);
else
valueSource = ValueSource::forSpeculation(at(nodeIndex).variableAccessData()->argumentAwarePrediction());
m_variables[i] = valueSource;
m_stream->appendAndLog(VariableEvent::setLocal(i, valueSource.dataFormat()));
}
m_lastSetOperand = std::numeric_limits<int>::max();
m_codeOriginForOSR = CodeOrigin();
if (DFG_ENABLE_EDGE_CODE_VERIFICATION) {
JITCompiler::Jump verificationSucceeded =
m_jit.branch32(JITCompiler::Equal, GPRInfo::regT0, TrustedImm32(m_block));
m_jit.breakpoint();
verificationSucceeded.link(&m_jit);
}
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("\n");
#endif
for (m_indexInBlock = 0; m_indexInBlock < block.size(); ++m_indexInBlock) {
m_compileIndex = block[m_indexInBlock];
m_jit.setForNode(m_compileIndex);
Node& node = at(m_compileIndex);
m_codeOriginForOSR = node.codeOrigin;
if (!node.shouldGenerate()) {
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
#endif
switch (node.op()) {
case JSConstant:
m_minifiedGraph->append(MinifiedNode::fromNode(m_compileIndex, node));
break;
case WeakJSConstant:
m_jit.addWeakReference(node.weakConstant());
m_minifiedGraph->append(MinifiedNode::fromNode(m_compileIndex, node));
break;
case SetLocal:
compileMovHint(node);
break;
case InlineStart: {
InlineCallFrame* inlineCallFrame = node.codeOrigin.inlineCallFrame;
int argumentCountIncludingThis = inlineCallFrame->arguments.size();
unsigned argumentPositionStart = node.argumentPositionStart();
CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
for (int i = 0; i < argumentCountIncludingThis; ++i) {
ValueRecovery recovery;
if (codeBlock->isCaptured(argumentToOperand(i)))
recovery = ValueRecovery::alreadyInJSStack();
else {
ArgumentPosition& argumentPosition =
m_jit.graph().m_argumentPositions[argumentPositionStart + i];
ValueSource valueSource;
if (argumentPosition.shouldUseDoubleFormat())
valueSource = ValueSource(DoubleInJSStack);
else if (isInt32Speculation(argumentPosition.prediction()))
valueSource = ValueSource(Int32InJSStack);
else if (isCellSpeculation(argumentPosition.prediction()))
valueSource = ValueSource(CellInJSStack);
else if (isBooleanSpeculation(argumentPosition.prediction()))
valueSource = ValueSource(BooleanInJSStack);
else
valueSource = ValueSource(ValueInJSStack);
recovery = computeValueRecoveryFor(valueSource);
}
// The recovery should refer either to something that has already been
// stored into the stack at the right place, or to a constant,
// since the Arguments code isn't smart enough to handle anything else.
// The exception is the this argument, which we don't really need to be
// able to recover.
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("\nRecovery for argument %d: ", i);
recovery.dump(WTF::dataFile());
#endif
inlineCallFrame->arguments[i] = recovery;
}
break;
}
default:
if (belongsInMinifiedGraph(node.op()))
m_minifiedGraph->append(MinifiedNode::fromNode(m_compileIndex, node));
break;
}
} else {
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
#endif
#if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
m_jit.breakpoint();
#endif
#if DFG_ENABLE(XOR_DEBUG_AID)
m_jit.xorPtr(JITCompiler::TrustedImm32(m_compileIndex), GPRInfo::regT0);
m_jit.xorPtr(JITCompiler::TrustedImm32(m_compileIndex), GPRInfo::regT0);
#endif
checkConsistency();
compile(node);
if (!m_compileOkay) {
m_compileOkay = true;
clearGenerationInfo();
return;
}
if (belongsInMinifiedGraph(node.op())) {
m_minifiedGraph->append(MinifiedNode::fromNode(m_compileIndex, node));
noticeOSRBirth(m_compileIndex, node);
}
#if DFG_ENABLE(DEBUG_VERBOSE)
if (node.hasResult()) {
GenerationInfo& info = m_generationInfo[node.virtualRegister()];
dataLogF("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)node.virtualRegister());
if (info.registerFormat() != DataFormatNone) {
if (info.registerFormat() == DataFormatDouble)
dataLogF(", %s", FPRInfo::debugName(info.fpr()));
#if USE(JSVALUE32_64)
else if (info.registerFormat() & DataFormatJS)
dataLogF(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
#endif
else
dataLogF(", %s", GPRInfo::debugName(info.gpr()));
}
dataLogF(" ");
} else
dataLogF(" ");
#endif
}
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("\n");
#endif
// Make sure that the abstract state is rematerialized for the next node.
m_state.execute(m_indexInBlock);
if (node.shouldGenerate())
checkConsistency();
}
// Perform the most basic verification that children have been used correctly.
#if !ASSERT_DISABLED
for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
GenerationInfo& info = m_generationInfo[index];
ASSERT(!info.alive());
}
#endif
}
// If we are making type predictions about our arguments then
// we need to check that they are correct on function entry.
void SpeculativeJIT::checkArgumentTypes()
{
ASSERT(!m_compileIndex);
m_isCheckingArgumentTypes = true;
m_codeOriginForOSR = CodeOrigin(0);
for (size_t i = 0; i < m_arguments.size(); ++i)
m_arguments[i] = ValueSource(ValueInJSStack);
for (size_t i = 0; i < m_variables.size(); ++i)
m_variables[i] = ValueSource(ValueInJSStack);
for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
NodeIndex nodeIndex = m_jit.graph().m_arguments[i];
Node& node = at(nodeIndex);
ASSERT(node.op() == SetArgument);
if (!node.shouldGenerate()) {
// The argument is dead. We don't do any checks for such arguments.
continue;
}
VariableAccessData* variableAccessData = node.variableAccessData();
VirtualRegister virtualRegister = variableAccessData->local();
SpeculatedType predictedType = variableAccessData->prediction();
JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
#if USE(JSVALUE64)
if (isInt32Speculation(predictedType))
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
else if (isBooleanSpeculation(predictedType)) {
GPRTemporary temp(this);
m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
} else if (isCellSpeculation(predictedType))
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
#else
if (isInt32Speculation(predictedType))
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
else if (isBooleanSpeculation(predictedType))
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
else if (isCellSpeculation(predictedType))
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
#endif
}
m_isCheckingArgumentTypes = false;
}
bool SpeculativeJIT::compile()
{
checkArgumentTypes();
if (DFG_ENABLE_EDGE_CODE_VERIFICATION)
m_jit.move(TrustedImm32(0), GPRInfo::regT0);
ASSERT(!m_compileIndex);
for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block) {
m_jit.setForBlock(m_block);
BasicBlock* block = m_jit.graph().m_blocks[m_block].get();
if (block)
compile(*block);
}
linkBranches();
return true;
}
void SpeculativeJIT::createOSREntries()
{
for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
if (!block)
continue;
if (!block->isOSRTarget)
continue;
// Currently we only need to create OSR entry trampolines when using edge code
// verification. But in the future, we'll need this for other things as well (like
// when we have global reg alloc).
// If we don't need OSR entry trampolin
if (!DFG_ENABLE_EDGE_CODE_VERIFICATION) {
m_osrEntryHeads.append(m_blockHeads[blockIndex]);
continue;
}
m_osrEntryHeads.append(m_jit.label());
m_jit.move(TrustedImm32(blockIndex), GPRInfo::regT0);
m_jit.jump().linkTo(m_blockHeads[blockIndex], &m_jit);
}
}
void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
{
unsigned osrEntryIndex = 0;
for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
if (!block)
continue;
if (!block->isOSRTarget)
continue;
m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
}
ASSERT(osrEntryIndex == m_osrEntryHeads.size());
}
ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
{
if (valueSource.isInJSStack())
return valueSource.valueRecovery();
ASSERT(valueSource.kind() == HaveNode);
if (isConstant(valueSource.nodeIndex()))
return ValueRecovery::constant(valueOfJSConstant(valueSource.nodeIndex()));
return ValueRecovery();
}
void SpeculativeJIT::compileDoublePutByVal(Node& node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
{
Edge child3 = m_jit.graph().varArgChild(node, 2);
Edge child4 = m_jit.graph().varArgChild(node, 3);
ArrayMode arrayMode = node.arrayMode();
GPRReg baseReg = base.gpr();
GPRReg propertyReg = property.gpr();
SpeculateDoubleOperand value(this, child3);
FPRReg valueReg = value.fpr();
if (!isRealNumberSpeculation(m_state.forNode(child3).m_type)) {
// FIXME: We need a way of profiling these, and we need to hoist them into
// SpeculateDoubleOperand.
speculationCheck(
BadType, JSValueRegs(), NoNode,
m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
}
if (!m_compileOkay)
return;
StorageOperand storage(this, child4);
GPRReg storageReg = storage.gpr();
if (node.op() == PutByValAlias) {
// Store the value to the array.
GPRReg propertyReg = property.gpr();
FPRReg valueReg = value.fpr();
m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
noResult(m_compileIndex);
return;
}
GPRTemporary temporary;
GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
MacroAssembler::Jump slowCase;
if (arrayMode.isInBounds()) {
speculationCheck(
StoreToHoleOrOutOfBounds, JSValueRegs(), NoNode,
m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
} else {
MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
if (!arrayMode.isOutOfBounds())
speculationCheck(OutOfBounds, JSValueRegs(), NoNode, slowCase);
m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
inBounds.link(&m_jit);
}
m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
base.use();
property.use();
value.use();
storage.use();
if (arrayMode.isOutOfBounds()) {
addSlowPathGenerator(
slowPathCall(
slowCase, this,
m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
NoResult, baseReg, propertyReg, valueReg));
}
noResult(m_compileIndex, UseChildrenCalledExplicitly);
}
void SpeculativeJIT::compileGetCharCodeAt(Node& node)
{
SpeculateCellOperand string(this, node.child1());
SpeculateStrictInt32Operand index(this, node.child2());
StorageOperand storage(this, node.child3());
GPRReg stringReg = string.gpr();
GPRReg indexReg = index.gpr();
GPRReg storageReg = storage.gpr();
ASSERT(speculationChecked(m_state.forNode(node.child1()).m_type, SpecString));
// unsigned comparison so we can filter out negative indices and indices that are too large
speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
// Load the character into scratchReg
JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
JITCompiler::Jump cont8Bit = m_jit.jump();
is16Bit.link(&m_jit);
m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
cont8Bit.link(&m_jit);
integerResult(scratchReg, m_compileIndex);
}
void SpeculativeJIT::compileGetByValOnString(Node& node)
{
SpeculateCellOperand base(this, node.child1());
SpeculateStrictInt32Operand property(this, node.child2());
StorageOperand storage(this, node.child3());
GPRReg baseReg = base.gpr();
GPRReg propertyReg = property.gpr();
GPRReg storageReg = storage.gpr();
ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node.child1())));
// unsigned comparison so we can filter out negative indices and indices that are too large
speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSString::offsetOfLength())));
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
// Load the character into scratchReg
JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
JITCompiler::Jump cont8Bit = m_jit.jump();
is16Bit.link(&m_jit);
m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
// We only support ascii characters
speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100)));
// 8 bit string values don't need the isASCII check.
cont8Bit.link(&m_jit);
GPRTemporary smallStrings(this);
GPRReg smallStringsReg = smallStrings.gpr();
m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalData()->smallStrings.singleCharacterStrings()), smallStringsReg);
m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, scratchReg, MacroAssembler::ScalePtr, 0), scratchReg);
speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
cellResult(scratchReg, m_compileIndex);
}
GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(NodeIndex nodeIndex)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("checkGeneratedTypeForToInt32@%d ", nodeIndex);
#endif
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
if (info.registerFormat() == DataFormatNone) {
if (node.hasConstant()) {
if (isInt32Constant(nodeIndex))
return GeneratedOperandInteger;
if (isNumberConstant(nodeIndex))
return GeneratedOperandDouble;
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
return GeneratedOperandTypeUnknown;
}
if (info.spillFormat() == DataFormatDouble)
return GeneratedOperandDouble;
}
switch (info.registerFormat()) {
case DataFormatBoolean: // This type never occurs.
case DataFormatStorage:
ASSERT_NOT_REACHED();
case DataFormatCell:
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
return GeneratedOperandTypeUnknown;
case DataFormatNone:
case DataFormatJSCell:
case DataFormatJS:
case DataFormatJSBoolean:
return GeneratedOperandJSValue;
case DataFormatJSInteger:
case DataFormatInteger:
return GeneratedOperandInteger;
case DataFormatJSDouble:
case DataFormatDouble:
return GeneratedOperandDouble;
default:
ASSERT_NOT_REACHED();
return GeneratedOperandTypeUnknown;
}
}
void SpeculativeJIT::compileValueToInt32(Node& node)
{
if (at(node.child1()).shouldSpeculateInteger()) {
SpeculateIntegerOperand op1(this, node.child1());
GPRTemporary result(this, op1);
m_jit.move(op1.gpr(), result.gpr());
integerResult(result.gpr(), m_compileIndex, op1.format());
return;
}
if (at(node.child1()).shouldSpeculateNumber()) {
switch (checkGeneratedTypeForToInt32(node.child1().index())) {
case GeneratedOperandInteger: {
SpeculateIntegerOperand op1(this, node.child1());
GPRTemporary result(this, op1);
m_jit.move(op1.gpr(), result.gpr());
integerResult(result.gpr(), m_compileIndex, op1.format());
return;
}
case GeneratedOperandDouble: {
GPRTemporary result(this);
DoubleOperand op1(this, node.child1());
FPRReg fpr = op1.fpr();
GPRReg gpr = result.gpr();
JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
integerResult(gpr, m_compileIndex);
return;
}
case GeneratedOperandJSValue: {
GPRTemporary result(this);
#if USE(JSVALUE64)
JSValueOperand op1(this, node.child1());
GPRReg gpr = op1.gpr();
GPRReg resultGpr = result.gpr();
FPRTemporary tempFpr(this);
FPRReg fpr = tempFpr.fpr();
JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
if (!isNumberSpeculation(m_state.forNode(node.child1()).m_type))
speculationCheck(BadType, JSValueRegs(gpr), node.child1().index(), m_jit.branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
// First, if we get here we have a double encoded as a JSValue
m_jit.move(gpr, resultGpr);
unboxDouble(resultGpr, fpr);
silentSpillAllRegisters(resultGpr);
callOperation(toInt32, resultGpr, fpr);
silentFillAllRegisters(resultGpr);
JITCompiler::Jump converted = m_jit.jump();
isInteger.link(&m_jit);
m_jit.zeroExtend32ToPtr(gpr, resultGpr);
converted.link(&m_jit);
#else
Node& childNode = at(node.child1().index());
VirtualRegister virtualRegister = childNode.virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
JSValueOperand op1(this, node.child1());
GPRReg payloadGPR = op1.payloadGPR();
GPRReg resultGpr = result.gpr();
if (info.registerFormat() == DataFormatJSInteger)
m_jit.move(payloadGPR, resultGpr);
else {
GPRReg tagGPR = op1.tagGPR();
FPRTemporary tempFpr(this);
FPRReg fpr = tempFpr.fpr();
FPRTemporary scratch(this);
JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
if (!isNumberSpeculation(m_state.forNode(node.child1()).m_type))
speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), node.child1().index(), m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
silentSpillAllRegisters(resultGpr);
callOperation(toInt32, resultGpr, fpr);
silentFillAllRegisters(resultGpr);
JITCompiler::Jump converted = m_jit.jump();
isInteger.link(&m_jit);
m_jit.move(payloadGPR, resultGpr);
converted.link(&m_jit);
}
#endif
integerResult(resultGpr, m_compileIndex);
return;
}
case GeneratedOperandTypeUnknown:
ASSERT_NOT_REACHED();
break;
}
}
if (at(node.child1()).shouldSpeculateBoolean()) {
SpeculateBooleanOperand op1(this, node.child1());
GPRTemporary result(this, op1);
m_jit.move(op1.gpr(), result.gpr());
m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
integerResult(result.gpr(), m_compileIndex);
return;
}
// Do it the safe way.
nonSpeculativeValueToInt32(node);
return;
}
void SpeculativeJIT::compileUInt32ToNumber(Node& node)
{
if (!nodeCanSpeculateInteger(node.arithNodeFlags())) {
// We know that this sometimes produces doubles. So produce a double every
// time. This at least allows subsequent code to not have weird conditionals.
IntegerOperand op1(this, node.child1());
FPRTemporary result(this);
GPRReg inputGPR = op1.gpr();
FPRReg outputFPR = result.fpr();
m_jit.convertInt32ToDouble(inputGPR, outputFPR);
JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
positive.link(&m_jit);
doubleResult(outputFPR, m_compileIndex);
return;
}
IntegerOperand op1(this, node.child1());
GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
m_jit.move(op1.gpr(), result.gpr());
// Test the operand is positive. This is a very special speculation check - we actually
// use roll-forward speculation here, where if this fails, we jump to the baseline
// instruction that follows us, rather than the one we're executing right now. We have
// to do this because by this point, the original values necessary to compile whatever
// operation the UInt32ToNumber originated from might be dead.
forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr()));
integerResult(result.gpr(), m_compileIndex, op1.format());
}
void SpeculativeJIT::compileDoubleAsInt32(Node& node)
{
SpeculateDoubleOperand op1(this, node.child1());
FPRTemporary scratch(this);
GPRTemporary result(this);
FPRReg valueFPR = op1.fpr();
FPRReg scratchFPR = scratch.fpr();
GPRReg resultGPR = result.gpr();
JITCompiler::JumpList failureCases;
m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR);
forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, failureCases, ValueRecovery::inFPR(valueFPR));
integerResult(resultGPR, m_compileIndex);
}
void SpeculativeJIT::compileInt32ToDouble(Node& node)
{
#if USE(JSVALUE64)
// On JSVALUE64 we have a way of loading double constants in a more direct manner
// than a int->double conversion. On 32_64, unfortunately, we currently don't have
// any such mechanism - though we could have it, if we just provisioned some memory
// in CodeBlock for the double form of integer constants.
if (at(node.child1()).hasConstant()) {
ASSERT(isInt32Constant(node.child1().index()));
FPRTemporary result(this);
GPRTemporary temp(this);
m_jit.move(MacroAssembler::Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(node.child1().index()))), temp.gpr());
m_jit.move64ToDouble(temp.gpr(), result.fpr());
doubleResult(result.fpr(), m_compileIndex);
return;
}
#endif
if (isInt32Speculation(m_state.forNode(node.child1()).m_type)) {
SpeculateIntegerOperand op1(this, node.child1());
FPRTemporary result(this);
m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
doubleResult(result.fpr(), m_compileIndex);
return;
}
JSValueOperand op1(this, node.child1());
FPRTemporary result(this);
#if USE(JSVALUE64)
GPRTemporary temp(this);
GPRReg op1GPR = op1.gpr();
GPRReg tempGPR = temp.gpr();
FPRReg resultFPR = result.fpr();
JITCompiler::Jump isInteger = m_jit.branch64(
MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
if (!isNumberSpeculation(m_state.forNode(node.child1()).m_type)) {
speculationCheck(
BadType, JSValueRegs(op1GPR), node.child1(),
m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
}
m_jit.move(op1GPR, tempGPR);
unboxDouble(tempGPR, resultFPR);
JITCompiler::Jump done = m_jit.jump();
isInteger.link(&m_jit);
m_jit.convertInt32ToDouble(op1GPR, resultFPR);
done.link(&m_jit);
#else
FPRTemporary temp(this);
GPRReg op1TagGPR = op1.tagGPR();
GPRReg op1PayloadGPR = op1.payloadGPR();
FPRReg tempFPR = temp.fpr();
FPRReg resultFPR = result.fpr();
JITCompiler::Jump isInteger = m_jit.branch32(
MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
if (!isNumberSpeculation(m_state.forNode(node.child1()).m_type)) {
speculationCheck(
BadType, JSValueRegs(op1TagGPR, op1PayloadGPR), node.child1(),
m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
}
unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
JITCompiler::Jump done = m_jit.jump();
isInteger.link(&m_jit);
m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
done.link(&m_jit);
#endif
doubleResult(resultFPR, m_compileIndex);
}
static double clampDoubleToByte(double d)
{
d += 0.5;
if (!(d > 0))
d = 0;
else if (d > 255)
d = 255;
return d;
}
static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
{
MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
jit.xorPtr(result, result);
MacroAssembler::Jump clamped = jit.jump();
tooBig.link(&jit);
jit.move(JITCompiler::TrustedImm32(255), result);
clamped.link(&jit);
inBounds.link(&jit);
}
static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
{
// Unordered compare so we pick up NaN
static const double zero = 0;
static const double byteMax = 255;
static const double half = 0.5;
jit.loadDouble(&zero, scratch);
MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
jit.loadDouble(&byteMax, scratch);
MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
jit.loadDouble(&half, scratch);
// FIXME: This should probably just use a floating point round!
// https://bugs.webkit.org/show_bug.cgi?id=72054
jit.addDouble(source, scratch);
jit.truncateDoubleToInt32(scratch, result);
MacroAssembler::Jump truncatedInt = jit.jump();
tooSmall.link(&jit);
jit.xorPtr(result, result);
MacroAssembler::Jump zeroed = jit.jump();
tooBig.link(&jit);
jit.move(JITCompiler::TrustedImm32(255), result);
truncatedInt.link(&jit);
zeroed.link(&jit);
}
void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor& descriptor, Node& node, size_t elementSize, TypedArraySignedness signedness)
{
SpeculateCellOperand base(this, node.child1());
SpeculateStrictInt32Operand property(this, node.child2());
StorageOperand storage(this, node.child3());
GPRReg baseReg = base.gpr();
GPRReg propertyReg = property.gpr();
GPRReg storageReg = storage.gpr();
GPRTemporary result(this);
GPRReg resultReg = result.gpr();
ASSERT(node.arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node.child1())));
speculationCheck(
Uncountable, JSValueRegs(), NoNode,
m_jit.branch32(
MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset)));
switch (elementSize) {
case 1:
if (signedness == SignedTypedArray)
m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
else
m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
break;
case 2:
if (signedness == SignedTypedArray)
m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
else
m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
break;
case 4:
m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
break;
default:
CRASH();
}
if (elementSize < 4 || signedness == SignedTypedArray) {
integerResult(resultReg, m_compileIndex);
return;
}
ASSERT(elementSize == 4 && signedness == UnsignedTypedArray);
if (node.shouldSpeculateInteger()) {
forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg));
integerResult(resultReg, m_compileIndex);
return;
}
FPRTemporary fresult(this);
m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
positive.link(&m_jit);
doubleResult(fresult.fpr(), m_compileIndex);
}
void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySignedness signedness, TypedArrayRounding rounding)
{
StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
GPRReg storageReg = storage.gpr();
Edge valueUse = m_jit.graph().varArgChild(node, 2);
GPRTemporary value;
GPRReg valueGPR;
if (at(valueUse).isConstant()) {
JSValue jsValue = valueOfJSConstant(valueUse.index());
if (!jsValue.isNumber()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
noResult(m_compileIndex);
return;
}
double d = jsValue.asNumber();
if (rounding == ClampRounding) {
ASSERT(elementSize == 1);
d = clampDoubleToByte(d);
}
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
m_jit.move(Imm32(toInt32(d)), scratchReg);
value.adopt(scratch);
valueGPR = scratchReg;
} else if (at(valueUse).shouldSpeculateInteger()) {
SpeculateIntegerOperand valueOp(this, valueUse);
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
m_jit.move(valueOp.gpr(), scratchReg);
if (rounding == ClampRounding) {
ASSERT(elementSize == 1);
compileClampIntegerToByte(m_jit, scratchReg);
}
value.adopt(scratch);
valueGPR = scratchReg;
} else if (rounding == ClampRounding) {
ASSERT(elementSize == 1);
SpeculateDoubleOperand valueOp(this, valueUse);
GPRTemporary result(this);
FPRTemporary floatScratch(this);
FPRReg fpr = valueOp.fpr();
GPRReg gpr = result.gpr();
compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
value.adopt(result);
valueGPR = gpr;
} else {
SpeculateDoubleOperand valueOp(this, valueUse);
GPRTemporary result(this);
FPRReg fpr = valueOp.fpr();
GPRReg gpr = result.gpr();
MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
m_jit.xorPtr(gpr, gpr);
MacroAssembler::Jump fixed = m_jit.jump();
notNaN.link(&m_jit);
MacroAssembler::Jump failed;
if (signedness == SignedTypedArray)
failed = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
else
failed = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
fixed.link(&m_jit);
value.adopt(result);
valueGPR = gpr;
}
ASSERT_UNUSED(valueGPR, valueGPR != property);
ASSERT(valueGPR != base);
ASSERT(valueGPR != storageReg);
MacroAssembler::Jump outOfBounds;
if (node.op() == PutByVal)
outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
switch (elementSize) {
case 1:
m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
break;
case 2:
m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
break;
case 4:
m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
break;
default:
CRASH();
}
if (node.op() == PutByVal)
outOfBounds.link(&m_jit);
noResult(m_compileIndex);
}
void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor& descriptor, Node& node, size_t elementSize)
{
SpeculateCellOperand base(this, node.child1());
SpeculateStrictInt32Operand property(this, node.child2());
StorageOperand storage(this, node.child3());
GPRReg baseReg = base.gpr();
GPRReg propertyReg = property.gpr();
GPRReg storageReg = storage.gpr();
ASSERT(node.arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node.child1())));
FPRTemporary result(this);
FPRReg resultReg = result.fpr();
speculationCheck(
Uncountable, JSValueRegs(), NoNode,
m_jit.branch32(
MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset)));
switch (elementSize) {
case 4:
m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
m_jit.convertFloatToDouble(resultReg, resultReg);
break;
case 8: {
m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
static const double NaN = QNaN;
m_jit.loadDouble(&NaN, resultReg);
notNaN.link(&m_jit);
break;
}
default:
ASSERT_NOT_REACHED();
}
doubleResult(resultReg, m_compileIndex);
}
void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize)
{
StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
GPRReg storageReg = storage.gpr();
Edge baseUse = m_jit.graph().varArgChild(node, 0);
Edge valueUse = m_jit.graph().varArgChild(node, 2);
SpeculateDoubleOperand valueOp(this, valueUse);
ASSERT_UNUSED(baseUse, node.arrayMode().alreadyChecked(m_jit.graph(), m_jit.graph()[m_compileIndex], m_state.forNode(baseUse)));
GPRTemporary result(this);
MacroAssembler::Jump outOfBounds;
if (node.op() == PutByVal)
outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
switch (elementSize) {
case 4: {
FPRTemporary scratch(this);
m_jit.moveDouble(valueOp.fpr(), scratch.fpr());
m_jit.convertDoubleToFloat(valueOp.fpr(), scratch.fpr());
m_jit.storeFloat(scratch.fpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
break;
}
case 8:
m_jit.storeDouble(valueOp.fpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
break;
default:
ASSERT_NOT_REACHED();
}
if (node.op() == PutByVal)
outOfBounds.link(&m_jit);
noResult(m_compileIndex);
}
void SpeculativeJIT::compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
{
// Check that prototype is an object.
m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
speculationCheck(BadType, JSValueRegs(), NoNode, m_jit.branchIfNotObject(scratchReg));
// Initialize scratchReg with the value being checked.
m_jit.move(valueReg, scratchReg);
// Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
MacroAssembler::Label loop(&m_jit);
m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
#if USE(JSVALUE64)
m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
#else
m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
#endif
// No match - result is false.
#if USE(JSVALUE64)
m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
#else
m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
#endif
MacroAssembler::Jump putResult = m_jit.jump();
isInstance.link(&m_jit);
#if USE(JSVALUE64)
m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
#else
m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
#endif
putResult.link(&m_jit);
}
void SpeculativeJIT::compileInstanceOf(Node& node)
{
if ((!!(at(node.child1()).prediction() & ~SpecCell)
&& !!(m_state.forNode(node.child1()).m_type & ~SpecCell))
|| at(node.child1()).adjustedRefCount() == 1) {
// It might not be a cell. Speculate less aggressively.
// Or: it might only be used once (i.e. by us), so we get zero benefit
// from speculating any more aggressively than we absolutely need to.
JSValueOperand value(this, node.child1());
SpeculateCellOperand prototype(this, node.child2());
GPRTemporary scratch(this);
GPRReg prototypeReg = prototype.gpr();
GPRReg scratchReg = scratch.gpr();
#if USE(JSVALUE64)
GPRReg valueReg = value.gpr();
MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
#else
GPRReg valueTagReg = value.tagGPR();
GPRReg valueReg = value.payloadGPR();
MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
#endif
MacroAssembler::Jump done = m_jit.jump();
isCell.link(&m_jit);
compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
done.link(&m_jit);
#if USE(JSVALUE64)
jsValueResult(scratchReg, m_compileIndex, DataFormatJSBoolean);
#else
booleanResult(scratchReg, m_compileIndex);
#endif
return;
}