blob: f98e7fe519297ea9ac369e36c4ccc3e555a5fbd4 [file] [log] [blame]
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/code-stub-assembler.h"
#include "src/code-factory.h"
#include "src/frames-inl.h"
#include "src/frames.h"
namespace v8 {
namespace internal {
using compiler::Node;
template <class T>
using TNode = compiler::TNode<T>;
CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state)
: compiler::CodeAssembler(state) {
if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) {
HandleBreakOnNode();
}
}
void CodeStubAssembler::HandleBreakOnNode() {
// FLAG_csa_trap_on_node should be in a form "STUB,NODE" where STUB is a
// string specifying the name of a stub and NODE is number specifying node id.
const char* name = state()->name();
size_t name_length = strlen(name);
if (strncmp(FLAG_csa_trap_on_node, name, name_length) != 0) {
// Different name.
return;
}
size_t option_length = strlen(FLAG_csa_trap_on_node);
if (option_length < name_length + 2 ||
FLAG_csa_trap_on_node[name_length] != ',') {
// Option is too short.
return;
}
const char* start = &FLAG_csa_trap_on_node[name_length + 1];
char* end;
int node_id = static_cast<int>(strtol(start, &end, 10));
if (start == end) {
// Bad node id.
return;
}
BreakOnNode(node_id);
}
void CodeStubAssembler::Assert(const NodeGenerator& condition_body,
const char* message, const char* file, int line,
Node* extra_node1, const char* extra_node1_name,
Node* extra_node2, const char* extra_node2_name,
Node* extra_node3, const char* extra_node3_name,
Node* extra_node4, const char* extra_node4_name,
Node* extra_node5,
const char* extra_node5_name) {
#if defined(DEBUG)
if (FLAG_debug_code) {
Check(condition_body, message, file, line, extra_node1, extra_node1_name,
extra_node2, extra_node2_name, extra_node3, extra_node3_name,
extra_node4, extra_node4_name, extra_node5, extra_node5_name);
}
#endif
}
#ifdef DEBUG
namespace {
void MaybePrintNodeWithName(CodeStubAssembler* csa, Node* node,
const char* node_name) {
if (node != nullptr) {
csa->CallRuntime(Runtime::kPrintWithNameForAssert, csa->SmiConstant(0),
csa->StringConstant(node_name), node);
}
}
} // namespace
#endif
void CodeStubAssembler::Check(const NodeGenerator& condition_body,
const char* message, const char* file, int line,
Node* extra_node1, const char* extra_node1_name,
Node* extra_node2, const char* extra_node2_name,
Node* extra_node3, const char* extra_node3_name,
Node* extra_node4, const char* extra_node4_name,
Node* extra_node5, const char* extra_node5_name) {
Label ok(this);
Label not_ok(this, Label::kDeferred);
if (message != nullptr && FLAG_code_comments) {
Comment("[ Assert: %s", message);
} else {
Comment("[ Assert");
}
Node* condition = condition_body();
DCHECK_NOT_NULL(condition);
Branch(condition, &ok, &not_ok);
BIND(&not_ok);
DCHECK_NOT_NULL(message);
char chars[1024];
Vector<char> buffer(chars);
if (file != nullptr) {
SNPrintF(buffer, "CSA_ASSERT failed: %s [%s:%d]\n", message, file, line);
} else {
SNPrintF(buffer, "CSA_ASSERT failed: %s\n", message);
}
Node* message_node = StringConstant(&(buffer[0]));
#ifdef DEBUG
// Only print the extra nodes in debug builds.
MaybePrintNodeWithName(this, extra_node1, extra_node1_name);
MaybePrintNodeWithName(this, extra_node2, extra_node2_name);
MaybePrintNodeWithName(this, extra_node3, extra_node3_name);
MaybePrintNodeWithName(this, extra_node4, extra_node4_name);
MaybePrintNodeWithName(this, extra_node5, extra_node5_name);
#endif
DebugAbort(message_node);
Unreachable();
BIND(&ok);
Comment("] Assert");
}
Node* CodeStubAssembler::Select(SloppyTNode<BoolT> condition,
const NodeGenerator& true_body,
const NodeGenerator& false_body,
MachineRepresentation rep) {
VARIABLE(value, rep);
Label vtrue(this), vfalse(this), end(this);
Branch(condition, &vtrue, &vfalse);
BIND(&vtrue);
{
value.Bind(true_body());
Goto(&end);
}
BIND(&vfalse);
{
value.Bind(false_body());
Goto(&end);
}
BIND(&end);
return value.value();
}
Node* CodeStubAssembler::SelectConstant(Node* condition, Node* true_value,
Node* false_value,
MachineRepresentation rep) {
return Select(condition, [=] { return true_value; },
[=] { return false_value; }, rep);
}
Node* CodeStubAssembler::SelectInt32Constant(Node* condition, int true_value,
int false_value) {
return SelectConstant(condition, Int32Constant(true_value),
Int32Constant(false_value),
MachineRepresentation::kWord32);
}
Node* CodeStubAssembler::SelectIntPtrConstant(Node* condition, int true_value,
int false_value) {
return SelectConstant(condition, IntPtrConstant(true_value),
IntPtrConstant(false_value),
MachineType::PointerRepresentation());
}
Node* CodeStubAssembler::SelectBooleanConstant(Node* condition) {
return SelectConstant(condition, TrueConstant(), FalseConstant(),
MachineRepresentation::kTagged);
}
Node* CodeStubAssembler::SelectSmiConstant(Node* condition, Smi* true_value,
Smi* false_value) {
return SelectConstant(condition, SmiConstant(true_value),
SmiConstant(false_value),
MachineRepresentation::kTaggedSigned);
}
Node* CodeStubAssembler::NoContextConstant() {
return SmiConstant(Context::kNoContext);
}
#define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \
compiler::TNode<std::remove_reference<decltype( \
*std::declval<Heap>().rootAccessorName())>::type> \
CodeStubAssembler::name##Constant() { \
return UncheckedCast<std::remove_reference<decltype( \
*std::declval<Heap>().rootAccessorName())>::type>( \
LoadRoot(Heap::k##rootIndexName##RootIndex)); \
}
HEAP_CONSTANT_LIST(HEAP_CONSTANT_ACCESSOR);
#undef HEAP_CONSTANT_ACCESSOR
#define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \
compiler::TNode<BoolT> CodeStubAssembler::Is##name( \
SloppyTNode<Object> value) { \
return WordEqual(value, name##Constant()); \
} \
compiler::TNode<BoolT> CodeStubAssembler::IsNot##name( \
SloppyTNode<Object> value) { \
return WordNotEqual(value, name##Constant()); \
}
HEAP_CONSTANT_LIST(HEAP_CONSTANT_TEST);
#undef HEAP_CONSTANT_TEST
Node* CodeStubAssembler::HashSeed() {
return LoadAndUntagToWord32Root(Heap::kHashSeedRootIndex);
}
Node* CodeStubAssembler::StaleRegisterConstant() {
return LoadRoot(Heap::kStaleRegisterRootIndex);
}
Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return SmiConstant(value);
} else {
DCHECK_EQ(INTPTR_PARAMETERS, mode);
return IntPtrConstant(value);
}
}
bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test,
ParameterMode mode) {
int32_t constant_test;
Smi* smi_test;
if (mode == INTPTR_PARAMETERS) {
if (ToInt32Constant(test, constant_test) && constant_test == 0) {
return true;
}
} else {
DCHECK_EQ(mode, SMI_PARAMETERS);
if (ToSmiConstant(test, smi_test) && smi_test->value() == 0) {
return true;
}
}
return false;
}
bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant,
int* value,
ParameterMode mode) {
int32_t int32_constant;
if (mode == INTPTR_PARAMETERS) {
if (ToInt32Constant(maybe_constant, int32_constant)) {
*value = int32_constant;
return true;
}
} else {
DCHECK_EQ(mode, SMI_PARAMETERS);
Smi* smi_constant;
if (ToSmiConstant(maybe_constant, smi_constant)) {
*value = Smi::ToInt(smi_constant);
return true;
}
}
return false;
}
Node* CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32(Node* value) {
Comment("IntPtrRoundUpToPowerOfTwo32");
CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u)));
value = IntPtrSub(value, IntPtrConstant(1));
for (int i = 1; i <= 16; i *= 2) {
value = WordOr(value, WordShr(value, IntPtrConstant(i)));
}
return IntPtrAdd(value, IntPtrConstant(1));
}
Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) {
if (mode == SMI_PARAMETERS) {
return TaggedIsSmi(value);
} else {
return Int32Constant(1);
}
}
TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) {
// value && !(value & (value - 1))
return WordEqual(
Select(
WordEqual(value, IntPtrConstant(0)),
[=] { return IntPtrConstant(1); },
[=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); },
MachineType::PointerRepresentation()),
IntPtrConstant(0));
}
TNode<Float64T> CodeStubAssembler::Float64Round(SloppyTNode<Float64T> x) {
Node* one = Float64Constant(1.0);
Node* one_half = Float64Constant(0.5);
Label return_x(this);
// Round up {x} towards Infinity.
VARIABLE(var_x, MachineRepresentation::kFloat64, Float64Ceil(x));
GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x),
&return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_x);
BIND(&return_x);
return TNode<Float64T>::UncheckedCast(var_x.value());
}
TNode<Float64T> CodeStubAssembler::Float64Ceil(SloppyTNode<Float64T> x) {
if (IsFloat64RoundUpSupported()) {
return Float64RoundUp(x);
}
Node* one = Float64Constant(1.0);
Node* zero = Float64Constant(0.0);
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
VARIABLE(var_x, MachineRepresentation::kFloat64, x);
Label return_x(this), return_minus_x(this);
// Check if {x} is greater than zero.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
&if_xnotgreaterthanzero);
BIND(&if_xgreaterthanzero);
{
// Just return {x} unless it's in the range ]0,2^52[.
GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
// Round positive {x} towards Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
GotoIfNot(Float64LessThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Add(var_x.value(), one));
Goto(&return_x);
}
BIND(&if_xnotgreaterthanzero);
{
// Just return {x} unless it's in the range ]-2^52,0[
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
GotoIfNot(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards Infinity and return the result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_minus_x);
}
BIND(&return_minus_x);
var_x.Bind(Float64Neg(var_x.value()));
Goto(&return_x);
BIND(&return_x);
return TNode<Float64T>::UncheckedCast(var_x.value());
}
TNode<Float64T> CodeStubAssembler::Float64Floor(SloppyTNode<Float64T> x) {
if (IsFloat64RoundDownSupported()) {
return Float64RoundDown(x);
}
Node* one = Float64Constant(1.0);
Node* zero = Float64Constant(0.0);
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
VARIABLE(var_x, MachineRepresentation::kFloat64, x);
Label return_x(this), return_minus_x(this);
// Check if {x} is greater than zero.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
&if_xnotgreaterthanzero);
BIND(&if_xgreaterthanzero);
{
// Just return {x} unless it's in the range ]0,2^52[.
GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
// Round positive {x} towards -Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_x);
}
BIND(&if_xnotgreaterthanzero);
{
// Just return {x} unless it's in the range ]-2^52,0[
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
GotoIfNot(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards -Infinity and return the result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Add(var_x.value(), one));
Goto(&return_minus_x);
}
BIND(&return_minus_x);
var_x.Bind(Float64Neg(var_x.value()));
Goto(&return_x);
BIND(&return_x);
return TNode<Float64T>::UncheckedCast(var_x.value());
}
TNode<Float64T> CodeStubAssembler::Float64RoundToEven(SloppyTNode<Float64T> x) {
if (IsFloat64RoundTiesEvenSupported()) {
return Float64RoundTiesEven(x);
}
// See ES#sec-touint8clamp for details.
Node* f = Float64Floor(x);
Node* f_and_half = Float64Add(f, Float64Constant(0.5));
VARIABLE(var_result, MachineRepresentation::kFloat64);
Label return_f(this), return_f_plus_one(this), done(this);
GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one);
GotoIf(Float64LessThan(x, f_and_half), &return_f);
{
Node* f_mod_2 = Float64Mod(f, Float64Constant(2.0));
Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f,
&return_f_plus_one);
}
BIND(&return_f);
var_result.Bind(f);
Goto(&done);
BIND(&return_f_plus_one);
var_result.Bind(Float64Add(f, Float64Constant(1.0)));
Goto(&done);
BIND(&done);
return TNode<Float64T>::UncheckedCast(var_result.value());
}
TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) {
if (IsFloat64RoundTruncateSupported()) {
return Float64RoundTruncate(x);
}
Node* one = Float64Constant(1.0);
Node* zero = Float64Constant(0.0);
Node* two_52 = Float64Constant(4503599627370496.0E0);
Node* minus_two_52 = Float64Constant(-4503599627370496.0E0);
VARIABLE(var_x, MachineRepresentation::kFloat64, x);
Label return_x(this), return_minus_x(this);
// Check if {x} is greater than 0.
Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this);
Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero,
&if_xnotgreaterthanzero);
BIND(&if_xgreaterthanzero);
{
if (IsFloat64RoundDownSupported()) {
var_x.Bind(Float64RoundDown(x));
} else {
// Just return {x} unless it's in the range ]0,2^52[.
GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x);
// Round positive {x} towards -Infinity.
var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52));
GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x);
var_x.Bind(Float64Sub(var_x.value(), one));
}
Goto(&return_x);
}
BIND(&if_xnotgreaterthanzero);
{
if (IsFloat64RoundUpSupported()) {
var_x.Bind(Float64RoundUp(x));
Goto(&return_x);
} else {
// Just return {x} unless its in the range ]-2^52,0[.
GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x);
GotoIfNot(Float64LessThan(x, zero), &return_x);
// Round negated {x} towards -Infinity and return result negated.
Node* minus_x = Float64Neg(x);
var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52));
GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x);
var_x.Bind(Float64Sub(var_x.value(), one));
Goto(&return_minus_x);
}
}
BIND(&return_minus_x);
var_x.Bind(Float64Neg(var_x.value()));
Goto(&return_x);
BIND(&return_x);
return TNode<Float64T>::UncheckedCast(var_x.value());
}
Node* CodeStubAssembler::SmiShiftBitsConstant() {
return IntPtrConstant(kSmiShiftSize + kSmiTagSize);
}
TNode<Smi> CodeStubAssembler::SmiFromWord32(SloppyTNode<Int32T> value) {
TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value);
return BitcastWordToTaggedSigned(
WordShl(value_intptr, SmiShiftBitsConstant()));
}
TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) {
int32_t constant_value;
if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) {
return SmiConstant(constant_value);
}
return BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant()));
}
TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) {
intptr_t constant_value;
if (ToIntPtrConstant(value, constant_value)) {
return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize));
}
return UncheckedCast<IntPtrT>(
WordSar(BitcastTaggedToWord(value), SmiShiftBitsConstant()));
}
TNode<Int32T> CodeStubAssembler::SmiToWord32(SloppyTNode<Smi> value) {
TNode<IntPtrT> result = SmiUntag(value);
return TruncateWordToWord32(result);
}
TNode<Float64T> CodeStubAssembler::SmiToFloat64(SloppyTNode<Smi> value) {
return ChangeInt32ToFloat64(SmiToWord32(value));
}
TNode<Smi> CodeStubAssembler::SmiMax(SloppyTNode<Smi> a, SloppyTNode<Smi> b) {
return SelectTaggedConstant(SmiLessThan(a, b), b, a);
}
TNode<Smi> CodeStubAssembler::SmiMin(SloppyTNode<Smi> a, SloppyTNode<Smi> b) {
return SelectTaggedConstant(SmiLessThan(a, b), a, b);
}
TNode<Object> CodeStubAssembler::NumberMax(SloppyTNode<Object> a,
SloppyTNode<Object> b) {
// TODO(danno): This could be optimized by specifically handling smi cases.
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
result.Bind(NanConstant());
Goto(&done);
BIND(&greater_than_equal_a);
result.Bind(a);
Goto(&done);
BIND(&greater_than_equal_b);
result.Bind(b);
Goto(&done);
BIND(&done);
return TNode<Object>::UncheckedCast(result.value());
}
TNode<Object> CodeStubAssembler::NumberMin(SloppyTNode<Object> a,
SloppyTNode<Object> b) {
// TODO(danno): This could be optimized by specifically handling smi cases.
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this), greater_than_equal_a(this), greater_than_equal_b(this);
GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a);
GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b);
result.Bind(NanConstant());
Goto(&done);
BIND(&greater_than_equal_a);
result.Bind(b);
Goto(&done);
BIND(&greater_than_equal_b);
result.Bind(a);
Goto(&done);
BIND(&done);
return TNode<Object>::UncheckedCast(result.value());
}
Node* CodeStubAssembler::SmiMod(Node* a, Node* b) {
VARIABLE(var_result, MachineRepresentation::kTagged);
Label return_result(this, &var_result),
return_minuszero(this, Label::kDeferred),
return_nan(this, Label::kDeferred);
// Untag {a} and {b}.
a = SmiToWord32(a);
b = SmiToWord32(b);
// Return NaN if {b} is zero.
GotoIf(Word32Equal(b, Int32Constant(0)), &return_nan);
// Check if {a} is non-negative.
Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred);
Branch(Int32LessThanOrEqual(Int32Constant(0), a), &if_aisnotnegative,
&if_aisnegative);
BIND(&if_aisnotnegative);
{
// Fast case, don't need to check any other edge cases.
Node* r = Int32Mod(a, b);
var_result.Bind(SmiFromWord32(r));
Goto(&return_result);
}
BIND(&if_aisnegative);
{
if (SmiValuesAre32Bits()) {
// Check if {a} is kMinInt and {b} is -1 (only relevant if the
// kMinInt is actually representable as a Smi).
Label join(this);
GotoIfNot(Word32Equal(a, Int32Constant(kMinInt)), &join);
GotoIf(Word32Equal(b, Int32Constant(-1)), &return_minuszero);
Goto(&join);
BIND(&join);
}
// Perform the integer modulus operation.
Node* r = Int32Mod(a, b);
// Check if {r} is zero, and if so return -0, because we have to
// take the sign of the left hand side {a}, which is negative.
GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero);
// The remainder {r} can be outside the valid Smi range on 32bit
// architectures, so we cannot just say SmiFromWord32(r) here.
var_result.Bind(ChangeInt32ToTagged(r));
Goto(&return_result);
}
BIND(&return_minuszero);
var_result.Bind(MinusZeroConstant());
Goto(&return_result);
BIND(&return_nan);
var_result.Bind(NanConstant());
Goto(&return_result);
BIND(&return_result);
return TNode<Object>::UncheckedCast(var_result.value());
}
TNode<Number> CodeStubAssembler::SmiMul(SloppyTNode<Smi> a,
SloppyTNode<Smi> b) {
TVARIABLE(Number, var_result);
VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64);
VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64);
Label return_result(this, &var_result);
// Both {a} and {b} are Smis. Convert them to integers and multiply.
Node* lhs32 = SmiToWord32(a);
Node* rhs32 = SmiToWord32(b);
Node* pair = Int32MulWithOverflow(lhs32, rhs32);
Node* overflow = Projection(1, pair);
// Check if the multiplication overflowed.
Label if_overflow(this, Label::kDeferred), if_notoverflow(this);
Branch(overflow, &if_overflow, &if_notoverflow);
BIND(&if_notoverflow);
{
// If the answer is zero, we may need to return -0.0, depending on the
// input.
Label answer_zero(this), answer_not_zero(this);
Node* answer = Projection(0, pair);
Node* zero = Int32Constant(0);
Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero);
BIND(&answer_not_zero);
{
var_result = ChangeInt32ToTagged(answer);
Goto(&return_result);
}
BIND(&answer_zero);
{
Node* or_result = Word32Or(lhs32, rhs32);
Label if_should_be_negative_zero(this), if_should_be_zero(this);
Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero,
&if_should_be_zero);
BIND(&if_should_be_negative_zero);
{
var_result = MinusZeroConstant();
Goto(&return_result);
}
BIND(&if_should_be_zero);
{
var_result = SmiConstant(0);
Goto(&return_result);
}
}
}
BIND(&if_overflow);
{
var_lhs_float64.Bind(SmiToFloat64(a));
var_rhs_float64.Bind(SmiToFloat64(b));
Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value());
var_result = AllocateHeapNumberWithValue(value);
Goto(&return_result);
}
BIND(&return_result);
return var_result;
}
Node* CodeStubAssembler::TrySmiDiv(Node* dividend, Node* divisor,
Label* bailout) {
// Both {a} and {b} are Smis. Bailout to floating point division if {divisor}
// is zero.
GotoIf(WordEqual(divisor, SmiConstant(0)), bailout);
// Do floating point division if {dividend} is zero and {divisor} is
// negative.
Label dividend_is_zero(this), dividend_is_not_zero(this);
Branch(WordEqual(dividend, SmiConstant(0)), &dividend_is_zero,
&dividend_is_not_zero);
BIND(&dividend_is_zero);
{
GotoIf(SmiLessThan(divisor, SmiConstant(0)), bailout);
Goto(&dividend_is_not_zero);
}
BIND(&dividend_is_not_zero);
Node* untagged_divisor = SmiToWord32(divisor);
Node* untagged_dividend = SmiToWord32(dividend);
// Do floating point division if {dividend} is kMinInt (or kMinInt - 1
// if the Smi size is 31) and {divisor} is -1.
Label divisor_is_minus_one(this), divisor_is_not_minus_one(this);
Branch(Word32Equal(untagged_divisor, Int32Constant(-1)),
&divisor_is_minus_one, &divisor_is_not_minus_one);
BIND(&divisor_is_minus_one);
{
GotoIf(Word32Equal(
untagged_dividend,
Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))),
bailout);
Goto(&divisor_is_not_minus_one);
}
BIND(&divisor_is_not_minus_one);
Node* untagged_result = Int32Div(untagged_dividend, untagged_divisor);
Node* truncated = Int32Mul(untagged_result, untagged_divisor);
// Do floating point division if the remainder is not 0.
GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout);
return SmiFromWord32(untagged_result);
}
TNode<Int32T> CodeStubAssembler::TruncateWordToWord32(
SloppyTNode<IntPtrT> value) {
if (Is64()) {
return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value));
}
return ReinterpretCast<Int32T>(value);
}
TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<Object> a) {
return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
}
TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) {
return WordNotEqual(
WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
}
TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
return WordEqual(WordAnd(BitcastTaggedToWord(a),
IntPtrConstant(kSmiTagMask | kSmiSignMask)),
IntPtrConstant(0));
}
TNode<BoolT> CodeStubAssembler::WordIsWordAligned(SloppyTNode<WordT> word) {
return WordEqual(IntPtrConstant(0),
WordAnd(word, IntPtrConstant(kPointerSize - 1)));
}
#if DEBUG
void CodeStubAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) {
CodeAssembler::Bind(label, debug_info);
}
#else
void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); }
#endif // DEBUG
void CodeStubAssembler::BranchIfPrototypesHaveNoElements(
Node* receiver_map, Label* definitely_no_elements,
Label* possibly_elements) {
CSA_SLOW_ASSERT(this, IsMap(receiver_map));
VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map);
Label loop_body(this, &var_map);
Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
Node* empty_slow_element_dictionary =
LoadRoot(Heap::kEmptySlowElementDictionaryRootIndex);
Goto(&loop_body);
BIND(&loop_body);
{
Node* map = var_map.value();
Node* prototype = LoadMapPrototype(map);
GotoIf(IsNull(prototype), definitely_no_elements);
Node* prototype_map = LoadMap(prototype);
Node* prototype_instance_type = LoadMapInstanceType(prototype_map);
// Pessimistically assume elements if a Proxy, Special API Object,
// or JSValue wrapper is found on the prototype chain. After this
// instance type check, it's not necessary to check for interceptors or
// access checks.
Label if_custom(this, Label::kDeferred), if_notcustom(this);
Branch(Int32LessThanOrEqual(prototype_instance_type,
Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
&if_custom, &if_notcustom);
BIND(&if_custom);
{
// For string JSValue wrappers we still support the checks as long
// as they wrap the empty string.
GotoIfNot(InstanceTypeEqual(prototype_instance_type, JS_VALUE_TYPE),
possibly_elements);
Node* prototype_value = LoadJSValueValue(prototype);
Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements);
}
BIND(&if_notcustom);
{
Node* prototype_elements = LoadElements(prototype);
var_map.Bind(prototype_map);
GotoIf(WordEqual(prototype_elements, empty_fixed_array), &loop_body);
Branch(WordEqual(prototype_elements, empty_slow_element_dictionary),
&loop_body, possibly_elements);
}
}
}
void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true,
Label* if_false) {
GotoIf(TaggedIsSmi(object), if_false);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Branch(IsJSReceiver(object), if_true, if_false);
}
void CodeStubAssembler::BranchIfJSObject(Node* object, Label* if_true,
Label* if_false) {
GotoIf(TaggedIsSmi(object), if_false);
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
Branch(IsJSObject(object), if_true, if_false);
}
TNode<BoolT> CodeStubAssembler::IsFastJSArray(SloppyTNode<Object> object,
SloppyTNode<Context> context) {
Label if_true(this), if_false(this, Label::kDeferred), exit(this);
BranchIfFastJSArray(object, context, &if_true, &if_false);
TVARIABLE(BoolT, var_result);
BIND(&if_true);
{
var_result = ReinterpretCast<BoolT>(Int32Constant(1));
Goto(&exit);
}
BIND(&if_false);
{
var_result = ReinterpretCast<BoolT>(Int32Constant(0));
Goto(&exit);
}
BIND(&exit);
return var_result;
}
void CodeStubAssembler::BranchIfFastJSArray(Node* object, Node* context,
Label* if_true, Label* if_false) {
GotoIfForceSlowPath(if_false);
// Bailout if receiver is a Smi.
GotoIf(TaggedIsSmi(object), if_false);
Node* map = LoadMap(object);
GotoIfNot(IsJSArrayMap(map), if_false);
// Bailout if receiver has slow elements.
Node* elements_kind = LoadMapElementsKind(map);
GotoIfNot(IsFastElementsKind(elements_kind), if_false);
// Check prototype chain if receiver does not have packed elements
GotoIfNot(IsPrototypeInitialArrayPrototype(context, map), if_false);
Branch(IsNoElementsProtectorCellInvalid(), if_false, if_true);
}
void CodeStubAssembler::BranchIfFastJSArrayForCopy(Node* object, Node* context,
Label* if_true,
Label* if_false) {
GotoIf(IsSpeciesProtectorCellInvalid(), if_false);
BranchIfFastJSArray(object, context, if_true, if_false);
}
void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) {
#ifdef V8_ENABLE_FORCE_SLOW_PATH
Node* const force_slow_path_addr =
ExternalConstant(ExternalReference::force_slow_path(isolate()));
Node* const force_slow = Load(MachineType::Uint8(), force_slow_path_addr);
GotoIf(force_slow, if_true);
#endif
}
Node* CodeStubAssembler::AllocateRaw(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
// If there's not enough space, call the runtime.
VARIABLE(result, MachineRepresentation::kTagged);
Label runtime_call(this, Label::kDeferred), no_runtime_call(this);
Label merge_runtime(this, &result);
bool needs_double_alignment = flags & kDoubleAlignment;
if (flags & kAllowLargeObjectAllocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllocateTargetSpace::encode(AllocationSpace::LO_SPACE)));
Node* const runtime_result =
CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
result.Bind(runtime_result);
Goto(&merge_runtime);
BIND(&next);
}
VARIABLE(adjusted_size, MachineType::PointerRepresentation(), size_in_bytes);
if (needs_double_alignment) {
Label not_aligned(this), done_alignment(this, &adjusted_size);
Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &not_aligned,
&done_alignment);
BIND(&not_aligned);
Node* not_aligned_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4));
adjusted_size.Bind(not_aligned_size);
Goto(&done_alignment);
BIND(&done_alignment);
}
Node* new_top = IntPtrAdd(top, adjusted_size.value());
Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call,
&no_runtime_call);
BIND(&runtime_call);
Node* runtime_result;
if (flags & kPretenured) {
Node* runtime_flags = SmiConstant(
Smi::FromInt(AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllocateTargetSpace::encode(AllocationSpace::OLD_SPACE)));
runtime_result =
CallRuntime(Runtime::kAllocateInTargetSpace, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
} else {
runtime_result = CallRuntime(Runtime::kAllocateInNewSpace,
NoContextConstant(), SmiTag(size_in_bytes));
}
result.Bind(runtime_result);
Goto(&merge_runtime);
// When there is enough space, return `top' and bump it up.
BIND(&no_runtime_call);
Node* no_runtime_result = top;
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
new_top);
VARIABLE(address, MachineType::PointerRepresentation(), no_runtime_result);
if (needs_double_alignment) {
Label needs_filler(this), done_filling(this, &address);
Branch(IntPtrEqual(adjusted_size.value(), size_in_bytes), &done_filling,
&needs_filler);
BIND(&needs_filler);
// Store a filler and increase the address by kPointerSize.
StoreNoWriteBarrier(MachineRepresentation::kTagged, top,
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
address.Bind(IntPtrAdd(no_runtime_result, IntPtrConstant(4)));
Goto(&done_filling);
BIND(&done_filling);
}
no_runtime_result = BitcastWordToTagged(
IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag)));
result.Bind(no_runtime_result);
Goto(&merge_runtime);
BIND(&merge_runtime);
return result.value();
}
Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
DCHECK_EQ(flags & kDoubleAlignment, 0);
return AllocateRaw(size_in_bytes, flags, top_address, limit_address);
}
Node* CodeStubAssembler::AllocateRawDoubleAligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
#if defined(V8_HOST_ARCH_32_BIT)
return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address,
limit_address);
#elif defined(V8_HOST_ARCH_64_BIT)
// Allocation on 64 bit machine is naturally double aligned
return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address,
limit_address);
#else
#error Architecture not supported
#endif
}
Node* CodeStubAssembler::AllocateInNewSpace(Node* size_in_bytes,
AllocationFlags flags) {
DCHECK(flags == kNone || flags == kDoubleAlignment);
CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes));
return Allocate(size_in_bytes, flags);
}
Node* CodeStubAssembler::Allocate(Node* size_in_bytes, AllocationFlags flags) {
Comment("Allocate");
bool const new_space = !(flags & kPretenured);
Node* top_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
DCHECK_EQ(kPointerSize,
ExternalReference::new_space_allocation_limit_address(isolate())
.address() -
ExternalReference::new_space_allocation_top_address(isolate())
.address());
DCHECK_EQ(kPointerSize,
ExternalReference::old_space_allocation_limit_address(isolate())
.address() -
ExternalReference::old_space_allocation_top_address(isolate())
.address());
Node* limit_address = IntPtrAdd(top_address, IntPtrConstant(kPointerSize));
if (flags & kDoubleAlignment) {
return AllocateRawDoubleAligned(size_in_bytes, flags, top_address,
limit_address);
} else {
return AllocateRawUnaligned(size_in_bytes, flags, top_address,
limit_address);
}
}
Node* CodeStubAssembler::AllocateInNewSpace(int size_in_bytes,
AllocationFlags flags) {
CHECK(flags == kNone || flags == kDoubleAlignment);
DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize);
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags);
}
Node* CodeStubAssembler::InnerAllocate(Node* previous, Node* offset) {
return BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset));
}
Node* CodeStubAssembler::InnerAllocate(Node* previous, int offset) {
return InnerAllocate(previous, IntPtrConstant(offset));
}
Node* CodeStubAssembler::IsRegularHeapObjectSize(Node* size) {
return UintPtrLessThanOrEqual(size,
IntPtrConstant(kMaxRegularHeapObjectSize));
}
void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true,
Label* if_false) {
Label if_smi(this), if_notsmi(this), if_heapnumber(this, Label::kDeferred),
if_bigint(this, Label::kDeferred);
// Rule out false {value}.
GotoIf(WordEqual(value, FalseConstant()), if_false);
// Check if {value} is a Smi or a HeapObject.
Branch(TaggedIsSmi(value), &if_smi, &if_notsmi);
BIND(&if_smi);
{
// The {value} is a Smi, only need to check against zero.
BranchIfSmiEqual(value, SmiConstant(0), if_false, if_true);
}
BIND(&if_notsmi);
{
// Check if {value} is the empty string.
GotoIf(IsEmptyString(value), if_false);
// The {value} is a HeapObject, load its map.
Node* value_map = LoadMap(value);
// Only null, undefined and document.all have the undetectable bit set,
// so we can return false immediately when that bit is set.
GotoIf(IsUndetectableMap(value_map), if_false);
// We still need to handle numbers specially, but all other {value}s
// that make it here yield true.
GotoIf(IsHeapNumberMap(value_map), &if_heapnumber);
Branch(IsBigInt(value), &if_bigint, if_true);
BIND(&if_heapnumber);
{
// Load the floating point value of {value}.
Node* value_value = LoadObjectField(value, HeapNumber::kValueOffset,
MachineType::Float64());
// Check if the floating point {value} is neither 0.0, -0.0 nor NaN.
Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)),
if_true, if_false);
}
BIND(&if_bigint);
{
Node* result =
CallRuntime(Runtime::kBigIntToBoolean, NoContextConstant(), value);
CSA_ASSERT(this, IsBoolean(result));
Branch(WordEqual(result, TrueConstant()), if_true, if_false);
}
}
}
Node* CodeStubAssembler::LoadFromFrame(int offset, MachineType rep) {
Node* frame_pointer = LoadFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType rep) {
Node* frame_pointer = LoadParentFramePointer();
return Load(rep, frame_pointer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
MachineType rep) {
return Load(rep, buffer, IntPtrConstant(offset));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
int offset, MachineType rep) {
return Load(rep, object, IntPtrConstant(offset - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset,
MachineType rep) {
return Load(rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
SloppyTNode<HeapObject> object, int offset) {
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += kPointerSize / 2;
#endif
return ChangeInt32ToIntPtr(
LoadObjectField(object, offset, MachineType::Int32()));
} else {
return SmiToWord(LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object,
int offset) {
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
offset += kPointerSize / 2;
#endif
return UncheckedCast<Int32T>(
LoadObjectField(object, offset, MachineType::Int32()));
} else {
return SmiToWord32(
LoadObjectField(object, offset, MachineType::AnyTagged()));
}
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) {
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
index += kPointerSize / 2;
#endif
return ChangeInt32ToIntPtr(
Load(MachineType::Int32(), base, IntPtrConstant(index)));
} else {
return SmiToWord(
Load(MachineType::AnyTagged(), base, IntPtrConstant(index)));
}
}
Node* CodeStubAssembler::LoadAndUntagToWord32Root(
Heap::RootListIndex root_index) {
Node* roots_array_start =
ExternalConstant(ExternalReference::roots_array_start(isolate()));
int index = root_index * kPointerSize;
if (Is64()) {
#if V8_TARGET_LITTLE_ENDIAN
index += kPointerSize / 2;
#endif
return Load(MachineType::Int32(), roots_array_start, IntPtrConstant(index));
} else {
return SmiToWord32(Load(MachineType::AnyTagged(), roots_array_start,
IntPtrConstant(index)));
}
}
Node* CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) {
if (Is64()) {
int zero_offset = offset + kPointerSize / 2;
int payload_offset = offset;
#if V8_TARGET_LITTLE_ENDIAN
std::swap(zero_offset, payload_offset);
#endif
StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
IntPtrConstant(zero_offset), Int32Constant(0));
return StoreNoWriteBarrier(MachineRepresentation::kWord32, base,
IntPtrConstant(payload_offset),
TruncateInt64ToInt32(value));
} else {
return StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base,
IntPtrConstant(offset), SmiTag(value));
}
}
TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue(
SloppyTNode<HeapNumber> object) {
return TNode<Float64T>::UncheckedCast(LoadObjectField(
object, HeapNumber::kValueOffset, MachineType::Float64()));
}
TNode<Map> CodeStubAssembler::LoadMap(SloppyTNode<HeapObject> object) {
return UncheckedCast<Map>(LoadObjectField(object, HeapObject::kMapOffset));
}
TNode<Int32T> CodeStubAssembler::LoadInstanceType(
SloppyTNode<HeapObject> object) {
return LoadMapInstanceType(LoadMap(object));
}
Node* CodeStubAssembler::HasInstanceType(Node* object,
InstanceType instance_type) {
return InstanceTypeEqual(LoadInstanceType(object), instance_type);
}
Node* CodeStubAssembler::DoesntHaveInstanceType(Node* object,
InstanceType instance_type) {
return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type));
}
Node* CodeStubAssembler::TaggedDoesntHaveInstanceType(Node* any_tagged,
InstanceType type) {
/* return Phi <TaggedIsSmi(val), DoesntHaveInstanceType(val, type)> */
Node* tagged_is_smi = TaggedIsSmi(any_tagged);
return Select(tagged_is_smi, [=]() { return tagged_is_smi; },
[=]() { return DoesntHaveInstanceType(any_tagged, type); },
MachineRepresentation::kBit);
}
TNode<HeapObject> CodeStubAssembler::LoadFastProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, Word32Not(IsDictionaryMap(LoadMap(object))));
Node* properties = LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
return SelectTaggedConstant<HeapObject>(
TaggedIsSmi(properties), EmptyFixedArrayConstant(), properties);
}
TNode<HeapObject> CodeStubAssembler::LoadSlowProperties(
SloppyTNode<JSObject> object) {
CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object)));
Node* properties = LoadObjectField(object, JSObject::kPropertiesOrHashOffset);
return SelectTaggedConstant<HeapObject>(
TaggedIsSmi(properties), EmptyPropertyDictionaryConstant(), properties);
}
TNode<FixedArrayBase> CodeStubAssembler::LoadElements(
SloppyTNode<JSObject> object) {
return CAST(LoadObjectField(object, JSObject::kElementsOffset));
}
TNode<Object> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) {
CSA_ASSERT(this, IsJSArray(array));
return CAST(LoadObjectField(array, JSArray::kLengthOffset));
}
TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(
SloppyTNode<JSArray> array) {
TNode<Object> length = LoadJSArrayLength(array);
CSA_ASSERT(this, IsFastElementsKind(LoadMapElementsKind(LoadMap(array))));
// JSArray length is always a positive Smi for fast arrays.
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
return UncheckedCast<Smi>(length);
}
TNode<Smi> CodeStubAssembler::LoadFixedArrayBaseLength(
SloppyTNode<FixedArrayBase> array) {
return CAST(LoadObjectField(array, FixedArrayBase::kLengthOffset));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagFixedArrayBaseLength(
SloppyTNode<FixedArrayBase> array) {
return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset);
}
TNode<Int32T> CodeStubAssembler::LoadMapBitField(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return UncheckedCast<Int32T>(
LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8()));
}
TNode<Int32T> CodeStubAssembler::LoadMapBitField2(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return UncheckedCast<Int32T>(
LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8()));
}
TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return UncheckedCast<Uint32T>(
LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32()));
}
TNode<Int32T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) {
return UncheckedCast<Int32T>(
LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint16()));
}
TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field2 = LoadMapBitField2(map);
return Signed(DecodeWord32<Map::ElementsKindBits>(bit_field2));
}
TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return CAST(LoadObjectField(map, Map::kDescriptorsOffset));
}
TNode<Object> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return CAST(LoadObjectField(map, Map::kPrototypeOffset));
}
TNode<PrototypeInfo> CodeStubAssembler::LoadMapPrototypeInfo(
SloppyTNode<Map> map, Label* if_no_proto_info) {
CSA_ASSERT(this, IsMap(map));
Node* prototype_info =
LoadObjectField(map, Map::kTransitionsOrPrototypeInfoOffset);
GotoIf(TaggedIsSmi(prototype_info), if_no_proto_info);
GotoIfNot(WordEqual(LoadMap(prototype_info),
LoadRoot(Heap::kPrototypeInfoMapRootIndex)),
if_no_proto_info);
return CAST(prototype_info);
}
TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return ChangeInt32ToIntPtr(LoadObjectField(
map, Map::kInstanceSizeInWordsOffset, MachineType::Uint8()));
}
TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetInObjectPropertiesStartInWords() for details.
CSA_ASSERT(this, IsJSObjectMap(map));
return ChangeInt32ToIntPtr(LoadObjectField(
map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex(
SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
// See Map::GetConstructorFunctionIndex() for details.
CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map)));
return ChangeInt32ToIntPtr(LoadObjectField(
map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset,
MachineType::Uint8()));
}
TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
TVARIABLE(Object, result,
LoadObjectField(map, Map::kConstructorOrBackPointerOffset));
Label done(this), loop(this, &result);
Goto(&loop);
BIND(&loop);
{
GotoIf(TaggedIsSmi(result), &done);
Node* is_map_type =
InstanceTypeEqual(LoadInstanceType(CAST(result)), MAP_TYPE);
GotoIfNot(is_map_type, &done);
result =
LoadObjectField(CAST(result), Map::kConstructorOrBackPointerOffset);
Goto(&loop);
}
BIND(&done);
return result;
}
Node* CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) {
CSA_SLOW_ASSERT(this, IsMap(map));
Node* bit_field3 = LoadMapBitField3(map);
return DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3);
}
Node* CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) {
Node* object = LoadObjectField(map, Map::kConstructorOrBackPointerOffset);
return Select(IsMap(object), [=] { return object; },
[=] { return UndefinedConstant(); },
MachineRepresentation::kTagged);
}
TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash(
SloppyTNode<Object> receiver, Label* if_no_hash) {
TVARIABLE(IntPtrT, var_hash);
Label done(this), if_smi(this), if_property_array(this),
if_property_dictionary(this), if_fixed_array(this);
TNode<Object> properties_or_hash =
LoadObjectField(TNode<HeapObject>::UncheckedCast(receiver),
JSReceiver::kPropertiesOrHashOffset);
GotoIf(TaggedIsSmi(properties_or_hash), &if_smi);
TNode<HeapObject> properties =
TNode<HeapObject>::UncheckedCast(properties_or_hash);
TNode<Int32T> properties_instance_type = LoadInstanceType(properties);
GotoIf(InstanceTypeEqual(properties_instance_type, PROPERTY_ARRAY_TYPE),
&if_property_array);
Branch(InstanceTypeEqual(properties_instance_type, HASH_TABLE_TYPE),
&if_property_dictionary, &if_fixed_array);
BIND(&if_fixed_array);
{
var_hash = IntPtrConstant(PropertyArray::kNoHashSentinel);
Goto(&done);
}
BIND(&if_smi);
{
var_hash = SmiUntag(TNode<Smi>::UncheckedCast(properties_or_hash));
Goto(&done);
}
BIND(&if_property_array);
{
TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField(
properties, PropertyArray::kLengthAndHashOffset);
var_hash = TNode<IntPtrT>::UncheckedCast(
DecodeWord<PropertyArray::HashField>(length_and_hash));
Goto(&done);
}
BIND(&if_property_dictionary);
{
var_hash = SmiUntag(
LoadFixedArrayElement(properties, NameDictionary::kObjectHashIndex));
Goto(&done);
}
BIND(&done);
if (if_no_hash != nullptr) {
GotoIf(
IntPtrEqual(var_hash, IntPtrConstant(PropertyArray::kNoHashSentinel)),
if_no_hash);
}
return var_hash;
}
TNode<Uint32T> CodeStubAssembler::LoadNameHashField(SloppyTNode<Name> name) {
CSA_ASSERT(this, IsName(name));
return LoadObjectField<Uint32T>(name, Name::kHashFieldOffset);
}
TNode<Uint32T> CodeStubAssembler::LoadNameHash(SloppyTNode<Name> name,
Label* if_hash_not_computed) {
TNode<Uint32T> hash_field = LoadNameHashField(name);
if (if_hash_not_computed != nullptr) {
GotoIf(IsSetWord32(hash_field, Name::kHashNotComputedMask),
if_hash_not_computed);
}
return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift)));
}
TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord(
SloppyTNode<String> object) {
return SmiUntag(LoadStringLengthAsSmi(object));
}
TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi(
SloppyTNode<String> object) {
CSA_ASSERT(this, IsString(object));
return CAST(LoadObjectField(object, String::kLengthOffset,
MachineType::TaggedPointer()));
}
Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) {
CSA_ASSERT(this, IsString(seq_string));
CSA_ASSERT(this,
IsSequentialStringInstanceType(LoadInstanceType(seq_string)));
STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
return IntPtrAdd(
BitcastTaggedToWord(seq_string),
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadJSValueValue(Node* object) {
CSA_ASSERT(this, IsJSValue(object));
return LoadObjectField(object, JSValue::kValueOffset);
}
TNode<Object> CodeStubAssembler::LoadWeakCellValueUnchecked(Node* weak_cell) {
// TODO(ishell): fix callers.
return LoadObjectField(weak_cell, WeakCell::kValueOffset);
}
TNode<Object> CodeStubAssembler::LoadWeakCellValue(
SloppyTNode<WeakCell> weak_cell, Label* if_cleared) {
CSA_ASSERT(this, IsWeakCell(weak_cell));
TNode<Object> value = LoadWeakCellValueUnchecked(weak_cell);
if (if_cleared != nullptr) {
GotoIf(WordEqual(value, IntPtrConstant(0)), if_cleared);
}
return value;
}
Node* CodeStubAssembler::LoadFixedArrayElement(Node* object, Node* index_node,
int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IntPtrGreaterThanOrEqual(
ParameterToWord(index_node, parameter_mode),
IntPtrConstant(0)));
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
return Load(MachineType::AnyTagged(), object, offset);
}
Node* CodeStubAssembler::LoadFixedTypedArrayElement(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode) {
Node* offset =
ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0);
MachineType type;
switch (elements_kind) {
case UINT8_ELEMENTS: /* fall through */
case UINT8_CLAMPED_ELEMENTS:
type = MachineType::Uint8();
break;
case INT8_ELEMENTS:
type = MachineType::Int8();
break;
case UINT16_ELEMENTS:
type = MachineType::Uint16();
break;
case INT16_ELEMENTS:
type = MachineType::Int16();
break;
case UINT32_ELEMENTS:
type = MachineType::Uint32();
break;
case INT32_ELEMENTS:
type = MachineType::Int32();
break;
case FLOAT32_ELEMENTS:
type = MachineType::Float32();
break;
case FLOAT64_ELEMENTS:
type = MachineType::Float64();
break;
default:
UNREACHABLE();
}
return Load(type, data_pointer, offset);
}
Node* CodeStubAssembler::LoadFixedTypedArrayElementAsTagged(
Node* data_pointer, Node* index_node, ElementsKind elements_kind,
ParameterMode parameter_mode) {
Node* value = LoadFixedTypedArrayElement(data_pointer, index_node,
elements_kind, parameter_mode);
switch (elements_kind) {
case ElementsKind::INT8_ELEMENTS:
case ElementsKind::UINT8_CLAMPED_ELEMENTS:
case ElementsKind::UINT8_ELEMENTS:
case ElementsKind::INT16_ELEMENTS:
case ElementsKind::UINT16_ELEMENTS:
return SmiFromWord32(value);
case ElementsKind::INT32_ELEMENTS:
return ChangeInt32ToTagged(value);
case ElementsKind::UINT32_ELEMENTS:
return ChangeUint32ToTagged(value);
case ElementsKind::FLOAT32_ELEMENTS:
return AllocateHeapNumberWithValue(ChangeFloat32ToFloat64(value));
case ElementsKind::FLOAT64_ELEMENTS:
return AllocateHeapNumberWithValue(value);
default:
UNREACHABLE();
}
}
TNode<Object> CodeStubAssembler::LoadFeedbackVectorSlot(
Node* object, Node* slot_index_node, int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IsFeedbackVector(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
int32_t header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(slot_index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), object, offset));
}
Node* CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
Node* object, Node* index_node, int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
int32_t header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
#if V8_TARGET_LITTLE_ENDIAN
if (Is64()) {
header_size += kPointerSize / 2;
}
#endif
Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
if (Is64()) {
return Load(MachineType::Int32(), object, offset);
} else {
return SmiToWord32(Load(MachineType::AnyTagged(), object, offset));
}
}
Node* CodeStubAssembler::LoadFixedDoubleArrayElement(
Node* object, Node* index_node, MachineType machine_type,
int additional_offset, ParameterMode parameter_mode, Label* if_hole) {
CSA_SLOW_ASSERT(this, IsFixedDoubleArray(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
CSA_ASSERT(this, IsFixedDoubleArray(object));
int32_t header_size =
FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, HOLEY_DOUBLE_ELEMENTS,
parameter_mode, header_size);
return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type);
}
Node* CodeStubAssembler::LoadDoubleWithHoleCheck(Node* base, Node* offset,
Label* if_hole,
MachineType machine_type) {
if (if_hole) {
// TODO(ishell): Compare only the upper part for the hole once the
// compiler is able to fold addition of already complex |offset| with
// |kIeeeDoubleExponentWordOffset| into one addressing mode.
if (Is64()) {
Node* element = Load(MachineType::Uint64(), base, offset);
GotoIf(Word64Equal(element, Int64Constant(kHoleNanInt64)), if_hole);
} else {
Node* element_upper = Load(
MachineType::Uint32(), base,
IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset)));
GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)),
if_hole);
}
}
if (machine_type.IsNone()) {
// This means the actual value is not needed.
return nullptr;
}
return Load(machine_type, base, offset);
}
TNode<Object> CodeStubAssembler::LoadContextElement(
SloppyTNode<Context> context, int slot_index) {
int offset = Context::SlotOffset(slot_index);
return UncheckedCast<Object>(
Load(MachineType::AnyTagged(), context, IntPtrConstant(offset)));
}
TNode<Object> CodeStubAssembler::LoadContextElement(
SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) {
Node* offset =
IntPtrAdd(TimesPointerSize(slot_index),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset));
}
void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
int slot_index,
SloppyTNode<Object> value) {
int offset = Context::SlotOffset(slot_index);
Store(context, IntPtrConstant(offset), value);
}
void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context,
SloppyTNode<IntPtrT> slot_index,
SloppyTNode<Object> value) {
Node* offset =
IntPtrAdd(TimesPointerSize(slot_index),
IntPtrConstant(Context::kHeaderSize - kHeapObjectTag));
Store(context, offset, value);
}
void CodeStubAssembler::StoreContextElementNoWriteBarrier(
SloppyTNode<Context> context, int slot_index, SloppyTNode<Object> value) {
int offset = Context::SlotOffset(slot_index);
StoreNoWriteBarrier(MachineRepresentation::kTagged, context,
IntPtrConstant(offset), value);
}
TNode<Context> CodeStubAssembler::LoadNativeContext(
SloppyTNode<Context> context) {
return UncheckedCast<Context>(
LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX));
}
TNode<Context> CodeStubAssembler::LoadModuleContext(
SloppyTNode<Context> context) {
Node* module_map = LoadRoot(Heap::kModuleContextMapRootIndex);
Variable cur_context(this, MachineRepresentation::kTaggedPointer);
cur_context.Bind(context);
Label context_found(this);
Variable* context_search_loop_variables[1] = {&cur_context};
Label context_search(this, 1, context_search_loop_variables);
// Loop until cur_context->map() is module_map.
Goto(&context_search);
BIND(&context_search);
{
CSA_ASSERT(this, Word32BinaryNot(IsNativeContext(cur_context.value())));
GotoIf(WordEqual(LoadMap(cur_context.value()), module_map), &context_found);
cur_context.Bind(
LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX));
Goto(&context_search);
}
BIND(&context_found);
return UncheckedCast<Context>(cur_context.value());
}
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
SloppyTNode<Int32T> kind, SloppyTNode<Context> native_context) {
CSA_ASSERT(this, IsFastElementsKind(kind));
CSA_ASSERT(this, IsNativeContext(native_context));
Node* offset = IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT),
ChangeInt32ToIntPtr(kind));
return UncheckedCast<Map>(LoadContextElement(native_context, offset));
}
TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap(
ElementsKind kind, SloppyTNode<Context> native_context) {
CSA_ASSERT(this, IsNativeContext(native_context));
return UncheckedCast<Map>(
LoadContextElement(native_context, Context::ArrayMapIndex(kind)));
}
Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function,
Label* if_bailout) {
CSA_ASSERT(this, TaggedIsNotSmi(function));
CSA_ASSERT(this, IsJSFunction(function));
CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(function)));
CSA_ASSERT(this, IsClearWord32<Map::HasNonInstancePrototypeBit>(
LoadMapBitField(LoadMap(function))));
Node* proto_or_map =
LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset);
GotoIf(IsTheHole(proto_or_map), if_bailout);
VARIABLE(var_result, MachineRepresentation::kTagged, proto_or_map);
Label done(this, &var_result);
GotoIfNot(IsMap(proto_or_map), &done);
var_result.Bind(LoadMapPrototype(proto_or_map));
Goto(&done);
BIND(&done);
return var_result.value();
}
void CodeStubAssembler::StoreHeapNumberValue(SloppyTNode<HeapNumber> object,
SloppyTNode<Float64T> value) {
StoreObjectFieldNoWriteBarrier(object, HeapNumber::kValueOffset, value,
MachineRepresentation::kFloat64);
}
Node* CodeStubAssembler::StoreObjectField(
Node* object, int offset, Node* value) {
DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead.
return Store(object, IntPtrConstant(offset - kHeapObjectTag), value);
}
Node* CodeStubAssembler::StoreObjectField(Node* object, Node* offset,
Node* value) {
int const_offset;
if (ToInt32Constant(offset, const_offset)) {
return StoreObjectField(object, const_offset, value);
}
return Store(object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)),
value);
}
Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, int offset, Node* value, MachineRepresentation rep) {
return StoreNoWriteBarrier(rep, object,
IntPtrConstant(offset - kHeapObjectTag), value);
}
Node* CodeStubAssembler::StoreObjectFieldNoWriteBarrier(
Node* object, Node* offset, Node* value, MachineRepresentation rep) {
int const_offset;
if (ToInt32Constant(offset, const_offset)) {
return StoreObjectFieldNoWriteBarrier(object, const_offset, value, rep);
}
return StoreNoWriteBarrier(
rep, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)), value);
}
Node* CodeStubAssembler::StoreMap(Node* object, Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return StoreWithMapWriteBarrier(
object, IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
}
Node* CodeStubAssembler::StoreMapNoWriteBarrier(
Node* object, Heap::RootListIndex map_root_index) {
return StoreMapNoWriteBarrier(object, LoadRoot(map_root_index));
}
Node* CodeStubAssembler::StoreMapNoWriteBarrier(Node* object, Node* map) {
CSA_SLOW_ASSERT(this, IsMap(map));
return StoreNoWriteBarrier(
MachineRepresentation::kTagged, object,
IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag), map);
}
Node* CodeStubAssembler::StoreObjectFieldRoot(Node* object, int offset,
Heap::RootListIndex root_index) {
if (Heap::RootIsImmortalImmovable(root_index)) {
return StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index));
} else {
return StoreObjectField(object, offset, LoadRoot(root_index));
}
}
Node* CodeStubAssembler::StoreFixedArrayElement(Node* object, Node* index_node,
Node* value,
WriteBarrierMode barrier_mode,
int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(
this, Word32Or(IsHashTable(object),
Word32Or(IsFixedArray(object), IsPropertyArray(object))));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
STATIC_ASSERT(FixedArray::kHeaderSize == PropertyArray::kHeaderSize);
int header_size =
FixedArray::kHeaderSize + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
if (barrier_mode == SKIP_WRITE_BARRIER) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
value);
} else {
return Store(object, offset, value);
}
}
Node* CodeStubAssembler::StoreFixedDoubleArrayElement(
Node* object, Node* index_node, Node* value, ParameterMode parameter_mode) {
CSA_ASSERT(this, IsFixedDoubleArray(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode));
Node* offset =
ElementOffsetFromIndex(index_node, PACKED_DOUBLE_ELEMENTS, parameter_mode,
FixedArray::kHeaderSize - kHeapObjectTag);
MachineRepresentation rep = MachineRepresentation::kFloat64;
return StoreNoWriteBarrier(rep, object, offset, value);
}
Node* CodeStubAssembler::StoreFeedbackVectorSlot(Node* object,
Node* slot_index_node,
Node* value,
WriteBarrierMode barrier_mode,
int additional_offset,
ParameterMode parameter_mode) {
CSA_SLOW_ASSERT(this, IsFeedbackVector(object));
CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode));
DCHECK(barrier_mode == SKIP_WRITE_BARRIER ||
barrier_mode == UPDATE_WRITE_BARRIER);
int header_size =
FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag;
Node* offset = ElementOffsetFromIndex(slot_index_node, HOLEY_ELEMENTS,
parameter_mode, header_size);
if (barrier_mode == SKIP_WRITE_BARRIER) {
return StoreNoWriteBarrier(MachineRepresentation::kTagged, object, offset,
value);
} else {
return Store(object, offset, value);
}
}
void CodeStubAssembler::EnsureArrayLengthWritable(Node* map, Label* bailout) {
// Check whether the length property is writable. The length property is the
// only default named property on arrays. It's nonconfigurable, hence is
// guaranteed to stay the first property.
Node* descriptors = LoadMapDescriptors(map);
Node* details =
LoadFixedArrayElement(descriptors, DescriptorArray::ToDetailsIndex(0));
GotoIf(IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask), bailout);
}
Node* CodeStubAssembler::EnsureArrayPushable(Node* receiver, Label* bailout) {
// Disallow pushing onto prototypes. It might be the JSArray prototype.
// Disallow pushing onto non-extensible objects.
Comment("Disallow pushing onto prototypes");
Node* map = LoadMap(receiver);
Node* bit_field2 = LoadMapBitField2(map);
int mask = Map::IsPrototypeMapBit::kMask | Map::IsExtensibleBit::kMask;
Node* test = Word32And(bit_field2, Int32Constant(mask));
GotoIf(Word32NotEqual(test, Int32Constant(Map::IsExtensibleBit::kMask)),
bailout);
// Disallow pushing onto arrays in dictionary named property mode. We need
// to figure out whether the length property is still writable.
Comment("Disallow pushing onto arrays in dictionary named property mode");
GotoIf(IsDictionaryMap(map), bailout);
EnsureArrayLengthWritable(map, bailout);
Node* kind = DecodeWord32<Map::ElementsKindBits>(bit_field2);
return kind;
}
void CodeStubAssembler::PossiblyGrowElementsCapacity(
ParameterMode mode, ElementsKind kind, Node* array, Node* length,
Variable* var_elements, Node* growth, Label* bailout) {
Label fits(this, var_elements);
Node* capacity =
TaggedToParameter(LoadFixedArrayBaseLength(var_elements->value()), mode);
// length and growth nodes are already in a ParameterMode appropriate
// representation.
Node* new_length = IntPtrOrSmiAdd(growth, length, mode);
GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits);
Node* new_capacity = CalculateNewElementsCapacity(new_length, mode);
var_elements->Bind(GrowElementsCapacity(array, var_elements->value(), kind,
kind, capacity, new_capacity, mode,
bailout));
Goto(&fits);
BIND(&fits);
}
TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
SloppyTNode<JSArray> array,
CodeStubArguments* args,
TVariable<IntPtrT>* arg_index,
Label* bailout) {
CSA_SLOW_ASSERT(this, IsJSArray(array));
Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
Label pre_bailout(this);
Label success(this);
TVARIABLE(Smi, var_tagged_length);
ParameterMode mode = OptimalParameterMode();
VARIABLE(var_length, OptimalParameterRepresentation(),
TaggedToParameter(LoadFastJSArrayLength(array), mode));
VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = *arg_index;
Node* growth = WordToParameter(
IntPtrSub(UncheckedCast<IntPtrT>(args->GetLength(INTPTR_PARAMETERS)),
first),
mode);
PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
&var_elements, growth, &pre_bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
CodeStubAssembler::VariableList push_vars({&var_length}, zone());
Node* elements = var_elements.value();
args->ForEach(
push_vars,
[this, kind, mode, elements, &var_length, &pre_bailout](Node* arg) {
TryStoreArrayElement(kind, mode, &pre_bailout, elements,
var_length.value(), arg);
Increment(&var_length, 1, mode);
},
first, nullptr);
{
TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
var_tagged_length = length;
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
Goto(&success);
}
BIND(&pre_bailout);
{
TNode<Smi> length = ParameterToTagged(var_length.value(), mode);
var_tagged_length = length;
Node* diff = SmiSub(length, LoadFastJSArrayLength(array));
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
*arg_index = IntPtrAdd(*arg_index, SmiUntag(diff));
Goto(bailout);
}
BIND(&success);
return var_tagged_length;
}
void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind,
ParameterMode mode, Label* bailout,
Node* elements, Node* index,
Node* value) {
if (IsSmiElementsKind(kind)) {
GotoIf(TaggedIsNotSmi(value), bailout);
} else if (IsDoubleElementsKind(kind)) {
GotoIfNotNumber(value, bailout);
}
if (IsDoubleElementsKind(kind)) {
Node* double_value = ChangeNumberToFloat64(value);
StoreFixedDoubleArrayElement(elements, index,
Float64SilenceNaN(double_value), mode);
} else {
WriteBarrierMode barrier_mode =
IsSmiElementsKind(kind) ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
StoreFixedArrayElement(elements, index, value, barrier_mode, 0, mode);
}
}
void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Node* array,
Node* value, Label* bailout) {
CSA_SLOW_ASSERT(this, IsJSArray(array));
Comment("BuildAppendJSArray: %s", ElementsKindToString(kind));
ParameterMode mode = OptimalParameterMode();
VARIABLE(var_length, OptimalParameterRepresentation(),
TaggedToParameter(LoadFastJSArrayLength(array), mode));
VARIABLE(var_elements, MachineRepresentation::kTagged, LoadElements(array));
// Resize the capacity of the fixed array if it doesn't fit.
Node* growth = IntPtrOrSmiConstant(1, mode);
PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(),
&var_elements, growth, bailout);
// Push each argument onto the end of the array now that there is enough
// capacity.
TryStoreArrayElement(kind, mode, bailout, var_elements.value(),
var_length.value(), value);
Increment(&var_length, 1, mode);
Node* length = ParameterToTagged(var_length.value(), mode);
StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length);
}
Node* CodeStubAssembler::AllocateCellWithValue(Node* value,
WriteBarrierMode mode) {
Node* result = Allocate(Cell::kSize, kNone);
StoreMapNoWriteBarrier(result, Heap::kCellMapRootIndex);
StoreCellValue(result, value, mode);
return result;
}
Node* CodeStubAssembler::LoadCellValue(Node* cell) {
CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
return LoadObjectField(cell, Cell::kValueOffset);
}
Node* CodeStubAssembler::StoreCellValue(Node* cell, Node* value,
WriteBarrierMode mode) {
CSA_SLOW_ASSERT(this, HasInstanceType(cell, CELL_TYPE));
DCHECK(mode == SKIP_WRITE_BARRIER || mode == UPDATE_WRITE_BARRIER);
if (mode == UPDATE_WRITE_BARRIER) {
return StoreObjectField(cell, Cell::kValueOffset, value);
} else {
return StoreObjectFieldNoWriteBarrier(cell, Cell::kValueOffset, value);
}
}
TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumber(MutableMode mode) {
Node* result = Allocate(HeapNumber::kSize, kNone);
Heap::RootListIndex heap_map_index =
mode == IMMUTABLE ? Heap::kHeapNumberMapRootIndex
: Heap::kMutableHeapNumberMapRootIndex;
StoreMapNoWriteBarrier(result, heap_map_index);
return UncheckedCast<HeapNumber>(result);
}
TNode<HeapNumber> CodeStubAssembler::AllocateHeapNumberWithValue(
SloppyTNode<Float64T> value, MutableMode mode) {
TNode<HeapNumber> result = AllocateHeapNumber(mode);
StoreHeapNumberValue(result, value);
return result;
}
Node* CodeStubAssembler::AllocateSeqOneByteString(int length,
AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
if (length == 0) {
return LoadRoot(Heap::kempty_stringRootIndex);
}
Node* result = Allocate(SeqOneByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
SmiConstant(length),
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
return result;
}
Node* CodeStubAssembler::IsZeroOrFixedArray(Node* object) {
Label out(this);
VARIABLE(var_result, MachineRepresentation::kWord32, Int32Constant(1));
GotoIf(WordEqual(object, SmiConstant(0)), &out);
GotoIf(IsFixedArray(object), &out);
var_result.Bind(Int32Constant(0));
Goto(&out);
BIND(&out);
return var_result.value();
}
Node* CodeStubAssembler::AllocateSeqOneByteString(Node* context,
TNode<Smi> length,
AllocationFlags flags) {
Comment("AllocateSeqOneByteString");
CSA_SLOW_ASSERT(this, IsZeroOrFixedArray(context));
VARIABLE(var_result, MachineRepresentation::kTagged);
// Compute the SeqOneByteString size and check if it fits into new space.
Label if_lengthiszero(this), if_sizeissmall(this),
if_notsizeissmall(this, Label::kDeferred), if_join(this);
GotoIf(SmiEqual(length, SmiConstant(0)), &if_lengthiszero);
Node* raw_size = GetArrayAllocationSize(
SmiUntag(length), UINT8_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
BIND(&if_sizeissmall);
{
// Just allocate the SeqOneByteString in new space.
Node* result = AllocateInNewSpace(size, flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kOneByteStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kOneByteStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kLengthOffset,
length, MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SeqOneByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
var_result.Bind(result);
Goto(&if_join);
}
BIND(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
Node* result =
CallRuntime(Runtime::kAllocateSeqOneByteString, context, length);
var_result.Bind(result);
Goto(&if_join);
}
BIND(&if_lengthiszero);
{
var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex));
Goto(&if_join);
}
BIND(&if_join);
return var_result.value();
}
Node* CodeStubAssembler::AllocateSeqTwoByteString(int length,
AllocationFlags flags) {
Comment("AllocateSeqTwoByteString");
if (length == 0) {
return LoadRoot(Heap::kempty_stringRootIndex);
}
Node* result = Allocate(SeqTwoByteString::SizeFor(length), flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
SmiConstant(Smi::FromInt(length)),
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
return result;
}
Node* CodeStubAssembler::AllocateSeqTwoByteString(Node* context,
TNode<Smi> length,
AllocationFlags flags) {
CSA_SLOW_ASSERT(this, IsFixedArray(context));
Comment("AllocateSeqTwoByteString");
VARIABLE(var_result, MachineRepresentation::kTagged);
// Compute the SeqTwoByteString size and check if it fits into new space.
Label if_lengthiszero(this), if_sizeissmall(this),
if_notsizeissmall(this, Label::kDeferred), if_join(this);
GotoIf(SmiEqual(length, SmiConstant(0)), &if_lengthiszero);
Node* raw_size = GetArrayAllocationSize(
SmiUntag(length), UINT16_ELEMENTS, INTPTR_PARAMETERS,
SeqOneByteString::kHeaderSize + kObjectAlignmentMask);
Node* size = WordAnd(raw_size, IntPtrConstant(~kObjectAlignmentMask));
Branch(IntPtrLessThanOrEqual(size, IntPtrConstant(kMaxRegularHeapObjectSize)),
&if_sizeissmall, &if_notsizeissmall);
BIND(&if_sizeissmall);
{
// Just allocate the SeqTwoByteString in new space.
Node* result = AllocateInNewSpace(size, flags);
DCHECK(Heap::RootIsImmortalImmovable(Heap::kStringMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kStringMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kLengthOffset,
length, MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SeqTwoByteString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
var_result.Bind(result);
Goto(&if_join);
}
BIND(&if_notsizeissmall);
{
// We might need to allocate in large object space, go to the runtime.
Node* result =
CallRuntime(Runtime::kAllocateSeqTwoByteString, context, length);
var_result.Bind(result);
Goto(&if_join);
}
BIND(&if_lengthiszero);
{
var_result.Bind(LoadRoot(Heap::kempty_stringRootIndex));
Goto(&if_join);
}
BIND(&if_join);
return var_result.value();
}
Node* CodeStubAssembler::AllocateSlicedString(
Heap::RootListIndex map_root_index, TNode<Smi> length, Node* parent,
Node* offset) {
CSA_ASSERT(this, IsString(parent));
CSA_ASSERT(this, TaggedIsSmi(offset));
Node* result = Allocate(SlicedString::kSize);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kLengthOffset, length,
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
StoreObjectFieldNoWriteBarrier(result, SlicedString::kParentOffset, parent,
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, SlicedString::kOffsetOffset, offset,
MachineRepresentation::kTagged);
return result;
}
Node* CodeStubAssembler::AllocateSlicedOneByteString(TNode<Smi> length,
Node* parent,
Node* offset) {
return AllocateSlicedString(Heap::kSlicedOneByteStringMapRootIndex, length,
parent, offset);
}
Node* CodeStubAssembler::AllocateSlicedTwoByteString(TNode<Smi> length,
Node* parent,
Node* offset) {
return AllocateSlicedString(Heap::kSlicedStringMapRootIndex, length, parent,
offset);
}
Node* CodeStubAssembler::AllocateConsString(Heap::RootListIndex map_root_index,
TNode<Smi> length, Node* first,
Node* second,
AllocationFlags flags) {
CSA_ASSERT(this, IsString(first));
CSA_ASSERT(this, IsString(second));
Node* result = Allocate(ConsString::kSize, flags);
DCHECK(Heap::RootIsImmortalImmovable(map_root_index));
StoreMapNoWriteBarrier(result, map_root_index);
StoreObjectFieldNoWriteBarrier(result, ConsString::kLengthOffset, length,
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, ConsString::kHashFieldSlot,
IntPtrConstant(String::kEmptyHashField),
MachineType::PointerRepresentation());
bool const new_space = !(flags & kPretenured);
if (new_space) {
StoreObjectFieldNoWriteBarrier(result, ConsString::kFirstOffset, first,
MachineRepresentation::kTagged);
StoreObjectFieldNoWriteBarrier(result, ConsString::kSecondOffset, second,
MachineRepresentation::kTagged);
} else {
StoreObjectField(result, ConsString::kFirstOffset, first);
StoreObjectField(result, ConsString::kSecondOffset, second);
}
return result;
}
Node* CodeStubAssembler::AllocateOneByteConsString(TNode<Smi> length,
Node* first, Node* second,
AllocationFlags flags) {
return AllocateConsString(Heap::kConsOneByteStringMapRootIndex, length, first,
second, flags);
}
Node* CodeStubAssembler::AllocateTwoByteConsString(TNode<Smi> length,
Node* first, Node* second,
AllocationFlags flags) {
return AllocateConsString(Heap::kConsStringMapRootIndex, length, first,
second, flags);
}
Node* CodeStubAssembler::NewConsString(Node* context, TNode<Smi> length,
Node* left, Node* right,
AllocationFlags flags) {
CSA_ASSERT(this, IsFixedArray(context));
CSA_ASSERT(this, IsString(left));
CSA_ASSERT(this, IsString(right));
// Added string can be a cons string.
Comment("Allocating ConsString");
Node* left_instance_type = LoadInstanceType(left);
Node* right_instance_type = LoadInstanceType(right);
// Compute intersection and difference of instance types.
Node* anded_instance_types =
Word32And(left_instance_type, right_instance_type);
Node* xored_instance_types =
Word32Xor(left_instance_type, right_instance_type);
// We create a one-byte cons string if
// 1. both strings are one-byte, or
// 2. at least one of the strings is two-byte, but happens to contain only
// one-byte characters.
// To do this, we check
// 1. if both strings are one-byte, or if the one-byte data hint is set in
// both strings, or
// 2. if one of the strings has the one-byte data hint set and the other
// string is one-byte.
STATIC_ASSERT(kOneByteStringTag != 0);
STATIC_ASSERT(kOneByteDataHintTag != 0);
Label one_byte_map(this);
Label two_byte_map(this);
VARIABLE(result, MachineRepresentation::kTagged);
Label done(this, &result);
GotoIf(IsSetWord32(anded_instance_types,
kStringEncodingMask | kOneByteDataHintTag),
&one_byte_map);
Branch(Word32NotEqual(Word32And(xored_instance_types,
Int32Constant(kStringEncodingMask |
kOneByteDataHintMask)),
Int32Constant(kOneByteStringTag | kOneByteDataHintTag)),
&two_byte_map, &one_byte_map);
BIND(&one_byte_map);
Comment("One-byte ConsString");
result.Bind(AllocateOneByteConsString(length, left, right, flags));
Goto(&done);
BIND(&two_byte_map);
Comment("Two-byte ConsString");
result.Bind(AllocateTwoByteConsString(length, left, right, flags));
Goto(&done);
BIND(&done);
return result.value();
}
Node* CodeStubAssembler::AllocateNameDictionary(int at_least_space_for) {
return AllocateNameDictionary(IntPtrConstant(at_least_space_for));
}
Node* CodeStubAssembler::AllocateNameDictionary(Node* at_least_space_for) {
CSA_ASSERT(this, UintPtrLessThanOrEqual(
at_least_space_for,
IntPtrConstant(NameDictionary::kMaxCapacity)));
Node* capacity = HashTableComputeCapacity(at_least_space_for);
return AllocateNameDictionaryWithCapacity(capacity);
}
Node* CodeStubAssembler::AllocateNameDictionaryWithCapacity(Node* capacity) {
CSA_ASSERT(this, WordIsPowerOfTwo(capacity));
CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0)));
Node* length = EntryToIndex<NameDictionary>(capacity);
Node* store_size = IntPtrAdd(TimesPointerSize(length),
IntPtrConstant(NameDictionary::kHeaderSize));
Node* result = AllocateInNewSpace(store_size);
Comment("Initialize NameDictionary");
// Initialize FixedArray fields.
DCHECK(Heap::RootIsImmortalImmovable(Heap::kNameDictionaryMapRootIndex));
StoreMapNoWriteBarrier(result, Heap::kNameDictionaryMapRootIndex);
StoreObjectFieldNoWriteBarrier(result, FixedArray::kLengthOffset,
SmiFromWord(length));
// Initialized HashTable fields.
Node* zero = SmiConstant(0);
StoreFixedArrayElement(result, NameDictionary::kNumberOfElementsIndex, zero,
SKIP_WRITE_BARRIER);
StoreFixedArrayElement(result, NameDictionary::kNumberOfDeletedElementsIndex,
zero, SKIP_WRITE_BARRIER);
StoreFixedArrayElement(result, NameDictionary::kCapacityIndex,
SmiTag(capacity), SKIP_WRITE_BARRIER);
// Initialize Dictionary fields.
Node* filler = UndefinedConstant();
StoreFixedArrayElement(result, NameDictionary::kNextEnumerationIndexIndex,
SmiConstant(PropertyDetails::kInitialIndex),
SKIP_WRITE_BARRIER);
StoreFixedArrayElement(result, NameDictionary::kObjectHashIndex,
SmiConstant(PropertyArray::kNoHashSentinel),
SKIP_WRITE_BARRIER);
// Initialize NameDictionary elements.
Node* result_word = BitcastTaggedToWord(result);
Node* start_address = IntPtrAdd(
result_word, IntPtrConstant(NameDictionary::OffsetOfElementAt(
NameDictionary::kElementsStartIndex) -
kHeapObjectTag));
Node* end_address = IntPtrAdd(
result_word, IntPtrSub(store_size, IntPtrConstant(kHeapObjectTag)));
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
return result;
}
Node* CodeStubAssembler::CopyNameDictionary(Node* dictionary,
Label* large_object_fallback) {
CSA_ASSERT(this, IsHashTable(dictionary));
Comment("Copy boilerplate property dict");
Node* capacity = SmiUntag(GetCapacity<NameDictionary>(dictionary));
CSA_ASSERT(this, IntPtrGreaterThanOrEqual(capacity, IntPtrConstant(0)));
GotoIf(UintPtrGreaterThan(
capacity, IntPtrConstant(NameDictionary::kMaxRegularCapacity)),
large_object_fallback);
Node* properties = AllocateNameDictionaryWithCapacity(capacity);
Node* length = SmiUntag(LoadFixedArrayBaseLength(dictionary));
CopyFixedArrayElements(PACKED_ELEMENTS, dictionary, properties, length,
SKIP_WRITE_BARRIER, INTPTR_PARAMETERS);
return properties;
}
Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) {
Comment("AllocateStruct");
CSA_ASSERT(this, IsMap(map));
Node* size = TimesPointerSize(LoadMapInstanceSizeInWords(map));
Node* object = Allocate(size, flags);
StoreMapNoWriteBarrier(object, map);
InitializeStructBody(object, map, size, Struct::kHeaderSize);
return object;
}
void CodeStubAssembler::InitializeStructBody(Node* object, Node* map,
Node* size, int start_offset) {
CSA_SLOW_ASSERT(this, IsMap(map));
Comment("InitializeStructBody");
Node* filler = UndefinedConstant();
// Calculate the untagged field addresses.
object = BitcastTaggedToWord(object);
Node* start_address =
IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
Node* end_address =
IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
}
Node* CodeStubAssembler::AllocateJSObjectFromMap(
Node* map, Node* properties, Node* elements, AllocationFlags flags,
SlackTrackingMode slack_tracking_mode) {
CSA_ASSERT(this, IsMap(map));
CSA_ASSERT(this, Word32BinaryNot(IsJSFunctionMap(map)));
CSA_ASSERT(this, Word32BinaryNot(InstanceTypeEqual(LoadMapInstanceType(map),
JS_GLOBAL_OBJECT_TYPE)));
Node* instance_size = TimesPointerSize(LoadMapInstanceSizeInWords(map));
Node* object = AllocateInNewSpace(instance_size, flags);
StoreMapNoWriteBarrier(object, map);
InitializeJSObjectFromMap(object, map, instance_size, properties, elements,
slack_tracking_mode);
return object;
}
void CodeStubAssembler::InitializeJSObjectFromMap(
Node* object, Node* map, Node* instance_size, Node* properties,
Node* elements, SlackTrackingMode slack_tracking_mode) {
CSA_SLOW_ASSERT(this, IsMap(map));
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
if (properties == nullptr) {
CSA_ASSERT(this, Word32BinaryNot(IsDictionaryMap((map))));
StoreObjectFieldRoot(object, JSObject::kPropertiesOrHashOffset,
Heap::kEmptyFixedArrayRootIndex);
} else {
CSA_ASSERT(this, Word32Or(Word32Or(IsPropertyArray(properties),
IsDictionary(properties)),
IsEmptyFixedArray(properties)));
StoreObjectFieldNoWriteBarrier(object, JSObject::kPropertiesOrHashOffset,
properties);
}
if (elements == nullptr) {
StoreObjectFieldRoot(object, JSObject::kElementsOffset,
Heap::kEmptyFixedArrayRootIndex);
} else {
CSA_ASSERT(this, IsFixedArray(elements));
StoreObjectFieldNoWriteBarrier(object, JSObject::kElementsOffset, elements);
}
if (slack_tracking_mode == kNoSlackTracking) {
InitializeJSObjectBodyNoSlackTracking(object, map, instance_size);
} else {
DCHECK_EQ(slack_tracking_mode, kWithSlackTracking);
InitializeJSObjectBodyWithSlackTracking(object, map, instance_size);
}
}
void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
Node* object, Node* map, Node* instance_size, int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
CSA_ASSERT(
this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), instance_size,
Heap::kUndefinedValueRootIndex);
}
void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
Node* object, Node* map, Node* instance_size) {
CSA_SLOW_ASSERT(this, IsMap(map));
Comment("InitializeJSObjectBodyNoSlackTracking");
// Perform in-object slack tracking if requested.
int start_offset = JSObject::kHeaderSize;
Node* bit_field3 = LoadMapBitField3(map);
Label end(this), slack_tracking(this), complete(this, Label::kDeferred);
STATIC_ASSERT(Map::kNoSlackTracking == 0);
GotoIf(IsSetWord32<Map::ConstructionCounterBits>(bit_field3),
&slack_tracking);
Comment("No slack tracking");
InitializeJSObjectBodyNoSlackTracking(object, map, instance_size);
Goto(&end);
BIND(&slack_tracking);
{
Comment("Decrease construction counter");
// Slack tracking is only done on initial maps.
CSA_ASSERT(this, IsUndefined(LoadMapBackPointer(map)));
STATIC_ASSERT(Map::ConstructionCounterBits::kNext == 32);
Node* new_bit_field3 = Int32Sub(
bit_field3, Int32Constant(1 << Map::ConstructionCounterBits::kShift));
StoreObjectFieldNoWriteBarrier(map, Map::kBitField3Offset, new_bit_field3,
MachineRepresentation::kWord32);
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
// The object still has in-object slack therefore the |unsed_or_unused|
// field contain the "used" value.
Node* used_size = TimesPointerSize(ChangeUint32ToWord(
LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset,
MachineType::Uint8())));
Comment("iInitialize filler fields");
InitializeFieldsWithRoot(object, used_size, instance_size,
Heap::kOnePointerFillerMapRootIndex);
Comment("Initialize undefined fields");
InitializeFieldsWithRoot(object, IntPtrConstant(start_offset), used_size,
Heap::kUndefinedValueRootIndex);
STATIC_ASSERT(Map::kNoSlackTracking == 0);
GotoIf(IsClearWord32<Map::ConstructionCounterBits>(new_bit_field3),
&complete);
Goto(&end);
}
// Finalize the instance size.
BIND(&complete);
{
// ComplextInobjectSlackTracking doesn't allocate and thus doesn't need a
// context.
CallRuntime(Runtime::kCompleteInobjectSlackTrackingForMap,
NoContextConstant(), map);
Goto(&end);
}
BIND(&end);
}
void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
Node* end_address,
Node* value) {
Comment("StoreFieldsNoWriteBarrier");
CSA_ASSERT(this, WordIsWordAligned(start_address));
CSA_ASSERT(this, WordIsWordAligned(end_address));
BuildFastLoop(start_address, end_address,
[this, value](Node* current) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, current,
value);
},
kPointerSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
}
Node* CodeStubAssembler::AllocateUninitializedJSArrayWithoutElements(
Node* array_map, Node* length, Node* allocation_site) {
Comment("begin allocation of JSArray without elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
CSA_SLOW_ASSERT(this, IsMap(array_map));
int base_size = JSArray::kSize;
if (allocation_site != nullptr) {
base_size += AllocationMemento::kSize;
}
Node* size = IntPtrConstant(base_size);
Node* array =
AllocateUninitializedJSArray(array_map, length, allocation_site, size);
return array;
}
std::pair<Node*, Node*>
CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
ElementsKind kind, Node* array_map, Node* length, Node* allocation_site,
Node* capacity, ParameterMode capacity_mode) {
Comment("begin allocation of JSArray with elements");
CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length));
CSA_SLOW_ASSERT(this, IsMap(array_map));
int base_size = JSArray::kSize;
if (allocation_site != nullptr) {
base_size += AllocationMemento::kSize;
}
int elements_offset = base_size;
// Compute space for elements
base_size += FixedArray::kHeaderSize;
Node* size = ElementOffsetFromIndex(capacity, kind, capacity_mode, base_size);
Node* array =
AllocateUninitializedJSArray(array_map, length, allocation_site, size);
Node* elements = InnerAllocate(array, elements_offset);
StoreObjectFieldNoWriteBarrier(array, JSObject::kElementsOffset, elements);
// Setup elements object.
STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
Heap::RootListIndex elements_map_index =
IsDoubleElementsKind(kind) ? Heap::kFixedDoubleArrayMapRootIndex
: Heap::kFixedArrayMapRootIndex;
DCHECK(Heap::RootIsImmortalImmovable(elements_map_index));
StoreMapNoWriteBarrier(elements, elements_map_index);
Node* capacity_smi = ParameterToTagged(capacity, capacity_mode);
CSA_ASSERT(this, SmiGreaterThan(capacity_smi, SmiConstant(0)));
StoreObjectFieldNoWriteBarrier(elements, FixedArray::kLengthOffset,
capacity_smi);
return {array, elements};
}
Node* CodeStubAssembler::AllocateUninitializedJSArray(Node* array_map,