| // Copyright 2016 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/codegen/code-stub-assembler.h" |
| |
| #include "src/codegen/code-factory.h" |
| #include "src/execution/frames-inl.h" |
| #include "src/execution/frames.h" |
| #include "src/heap/heap-inl.h" // For Page/MemoryChunk. TODO(jkummerow): Drop. |
| #include "src/logging/counters.h" |
| #include "src/objects/api-callbacks.h" |
| #include "src/objects/cell.h" |
| #include "src/objects/descriptor-array.h" |
| #include "src/objects/function-kind.h" |
| #include "src/objects/heap-number.h" |
| #include "src/objects/oddball.h" |
| #include "src/objects/ordered-hash-table-inl.h" |
| #include "src/objects/property-cell.h" |
| #include "src/wasm/wasm-objects.h" |
| |
| #if defined(V8_OS_STARBOARD) |
| #include "starboard/client_porting/poem/stdlib_poem.h" |
| #endif |
| |
| namespace v8 { |
| namespace internal { |
| |
| using compiler::Node; |
| template <class T> |
| using TNode = compiler::TNode<T>; |
| template <class T> |
| using SloppyTNode = compiler::SloppyTNode<T>; |
| |
| CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state) |
| : compiler::CodeAssembler(state), |
| TorqueGeneratedExportedMacrosAssembler(state) { |
| if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) { |
| HandleBreakOnNode(); |
| } |
| } |
| |
| void CodeStubAssembler::HandleBreakOnNode() { |
| // FLAG_csa_trap_on_node should be in a form "STUB,NODE" where STUB is a |
| // string specifying the name of a stub and NODE is number specifying node id. |
| const char* name = state()->name(); |
| size_t name_length = strlen(name); |
| if (strncmp(FLAG_csa_trap_on_node, name, name_length) != 0) { |
| // Different name. |
| return; |
| } |
| size_t option_length = strlen(FLAG_csa_trap_on_node); |
| if (option_length < name_length + 2 || |
| FLAG_csa_trap_on_node[name_length] != ',') { |
| // Option is too short. |
| return; |
| } |
| const char* start = &FLAG_csa_trap_on_node[name_length + 1]; |
| char* end; |
| int node_id = static_cast<int>(strtol(start, &end, 10)); |
| if (start == end) { |
| // Bad node id. |
| return; |
| } |
| BreakOnNode(node_id); |
| } |
| |
| void CodeStubAssembler::Assert(const BranchGenerator& branch, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| #if defined(DEBUG) |
| if (FLAG_debug_code) { |
| Check(branch, message, file, line, extra_nodes); |
| } |
| #endif |
| } |
| |
| void CodeStubAssembler::Assert(const NodeGenerator& condition_body, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| #if defined(DEBUG) |
| if (FLAG_debug_code) { |
| Check(condition_body, message, file, line, extra_nodes); |
| } |
| #endif |
| } |
| |
| void CodeStubAssembler::Check(const BranchGenerator& branch, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| Label ok(this); |
| Label not_ok(this, Label::kDeferred); |
| if (message != nullptr && FLAG_code_comments) { |
| Comment("[ Assert: ", message); |
| } else { |
| Comment("[ Assert"); |
| } |
| branch(&ok, ¬_ok); |
| |
| BIND(¬_ok); |
| FailAssert(message, file, line, extra_nodes); |
| |
| BIND(&ok); |
| Comment("] Assert"); |
| } |
| |
| void CodeStubAssembler::Check(const NodeGenerator& condition_body, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| BranchGenerator branch = [=](Label* ok, Label* not_ok) { |
| Node* condition = condition_body(); |
| DCHECK_NOT_NULL(condition); |
| Branch(condition, ok, not_ok); |
| }; |
| |
| Check(branch, message, file, line, extra_nodes); |
| } |
| |
| void CodeStubAssembler::FastCheck(TNode<BoolT> condition) { |
| Label ok(this), not_ok(this, Label::kDeferred); |
| Branch(condition, &ok, ¬_ok); |
| BIND(¬_ok); |
| { |
| DebugBreak(); |
| Goto(&ok); |
| } |
| BIND(&ok); |
| } |
| |
| void CodeStubAssembler::FailAssert( |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| DCHECK_NOT_NULL(message); |
| EmbeddedVector<char, 1024> chars; |
| if (file != nullptr) { |
| SNPrintF(chars, "%s [%s:%d]", message, file, line); |
| message = chars.begin(); |
| } |
| Node* message_node = StringConstant(message); |
| |
| #ifdef DEBUG |
| // Only print the extra nodes in debug builds. |
| for (auto& node : extra_nodes) { |
| CallRuntime(Runtime::kPrintWithNameForAssert, SmiConstant(0), |
| StringConstant(node.second), node.first); |
| } |
| #endif |
| |
| AbortCSAAssert(message_node); |
| Unreachable(); |
| } |
| |
| Node* CodeStubAssembler::SelectImpl(TNode<BoolT> condition, |
| const NodeGenerator& true_body, |
| const NodeGenerator& false_body, |
| MachineRepresentation rep) { |
| VARIABLE(value, rep); |
| Label vtrue(this), vfalse(this), end(this); |
| Branch(condition, &vtrue, &vfalse); |
| |
| BIND(&vtrue); |
| { |
| value.Bind(true_body()); |
| Goto(&end); |
| } |
| BIND(&vfalse); |
| { |
| value.Bind(false_body()); |
| Goto(&end); |
| } |
| |
| BIND(&end); |
| return value.value(); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::SelectInt32Constant( |
| SloppyTNode<BoolT> condition, int true_value, int false_value) { |
| return SelectConstant<Int32T>(condition, Int32Constant(true_value), |
| Int32Constant(false_value)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::SelectIntPtrConstant( |
| SloppyTNode<BoolT> condition, int true_value, int false_value) { |
| return SelectConstant<IntPtrT>(condition, IntPtrConstant(true_value), |
| IntPtrConstant(false_value)); |
| } |
| |
| TNode<Oddball> CodeStubAssembler::SelectBooleanConstant( |
| SloppyTNode<BoolT> condition) { |
| return SelectConstant<Oddball>(condition, TrueConstant(), FalseConstant()); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SelectSmiConstant(SloppyTNode<BoolT> condition, |
| Smi true_value, |
| Smi false_value) { |
| return SelectConstant<Smi>(condition, SmiConstant(true_value), |
| SmiConstant(false_value)); |
| } |
| |
| TNode<Object> CodeStubAssembler::NoContextConstant() { |
| return SmiConstant(Context::kNoContext); |
| } |
| |
| #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ |
| compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<Heap>().rootAccessorName())>::type>::type> \ |
| CodeStubAssembler::name##Constant() { \ |
| return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<Heap>().rootAccessorName())>::type>::type>( \ |
| LoadRoot(RootIndex::k##rootIndexName)); \ |
| } |
| HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) |
| #undef HEAP_CONSTANT_ACCESSOR |
| |
| #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ |
| compiler::TNode<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \ |
| CodeStubAssembler::name##Constant() { \ |
| return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type>( \ |
| LoadRoot(RootIndex::k##rootIndexName)); \ |
| } |
| HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) |
| #undef HEAP_CONSTANT_ACCESSOR |
| |
| #define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \ |
| compiler::TNode<BoolT> CodeStubAssembler::Is##name( \ |
| SloppyTNode<Object> value) { \ |
| return WordEqual(value, name##Constant()); \ |
| } \ |
| compiler::TNode<BoolT> CodeStubAssembler::IsNot##name( \ |
| SloppyTNode<Object> value) { \ |
| return WordNotEqual(value, name##Constant()); \ |
| } |
| HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST) |
| #undef HEAP_CONSTANT_TEST |
| |
| Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) { |
| if (mode == SMI_PARAMETERS) { |
| return SmiConstant(value); |
| } else { |
| DCHECK_EQ(INTPTR_PARAMETERS, mode); |
| return IntPtrConstant(value); |
| } |
| } |
| |
| bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test, |
| ParameterMode mode) { |
| int32_t constant_test; |
| Smi smi_test; |
| if (mode == INTPTR_PARAMETERS) { |
| if (ToInt32Constant(test, constant_test) && constant_test == 0) { |
| return true; |
| } |
| } else { |
| DCHECK_EQ(mode, SMI_PARAMETERS); |
| if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, |
| int* value, |
| ParameterMode mode) { |
| int32_t int32_constant; |
| if (mode == INTPTR_PARAMETERS) { |
| if (ToInt32Constant(maybe_constant, int32_constant)) { |
| *value = int32_constant; |
| return true; |
| } |
| } else { |
| DCHECK_EQ(mode, SMI_PARAMETERS); |
| Smi smi_constant; |
| if (ToSmiConstant(maybe_constant, &smi_constant)) { |
| *value = Smi::ToInt(smi_constant); |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32( |
| TNode<IntPtrT> value) { |
| Comment("IntPtrRoundUpToPowerOfTwo32"); |
| CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u))); |
| value = Signed(IntPtrSub(value, IntPtrConstant(1))); |
| for (int i = 1; i <= 16; i *= 2) { |
| value = Signed(WordOr(value, WordShr(value, IntPtrConstant(i)))); |
| } |
| return Signed(IntPtrAdd(value, IntPtrConstant(1))); |
| } |
| |
| Node* CodeStubAssembler::MatchesParameterMode(Node* value, ParameterMode mode) { |
| if (mode == SMI_PARAMETERS) { |
| return TaggedIsSmi(value); |
| } else { |
| return Int32Constant(1); |
| } |
| } |
| |
| TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) { |
| // value && !(value & (value - 1)) |
| return WordEqual( |
| Select<IntPtrT>( |
| WordEqual(value, IntPtrConstant(0)), |
| [=] { return IntPtrConstant(1); }, |
| [=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); }), |
| IntPtrConstant(0)); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Round(SloppyTNode<Float64T> x) { |
| Node* one = Float64Constant(1.0); |
| Node* one_half = Float64Constant(0.5); |
| |
| Label return_x(this); |
| |
| // Round up {x} towards Infinity. |
| VARIABLE(var_x, MachineRepresentation::kFloat64, Float64Ceil(x)); |
| |
| GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x), |
| &return_x); |
| var_x.Bind(Float64Sub(var_x.value(), one)); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return TNode<Float64T>::UncheckedCast(var_x.value()); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Ceil(SloppyTNode<Float64T> x) { |
| if (IsFloat64RoundUpSupported()) { |
| return Float64RoundUp(x); |
| } |
| |
| Node* one = Float64Constant(1.0); |
| Node* zero = Float64Constant(0.0); |
| Node* two_52 = Float64Constant(4503599627370496.0E0); |
| Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| VARIABLE(var_x, MachineRepresentation::kFloat64, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than zero. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards Infinity. |
| var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); |
| GotoIfNot(Float64LessThan(var_x.value(), x), &return_x); |
| var_x.Bind(Float64Add(var_x.value(), one)); |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]-2^52,0[ |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards Infinity and return the result negated. |
| Node* minus_x = Float64Neg(x); |
| var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); |
| GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); |
| var_x.Bind(Float64Sub(var_x.value(), one)); |
| Goto(&return_minus_x); |
| } |
| |
| BIND(&return_minus_x); |
| var_x.Bind(Float64Neg(var_x.value())); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return TNode<Float64T>::UncheckedCast(var_x.value()); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Floor(SloppyTNode<Float64T> x) { |
| if (IsFloat64RoundDownSupported()) { |
| return Float64RoundDown(x); |
| } |
| |
| Node* one = Float64Constant(1.0); |
| Node* zero = Float64Constant(0.0); |
| Node* two_52 = Float64Constant(4503599627370496.0E0); |
| Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| VARIABLE(var_x, MachineRepresentation::kFloat64, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than zero. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards -Infinity. |
| var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); |
| GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x); |
| var_x.Bind(Float64Sub(var_x.value(), one)); |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]-2^52,0[ |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards -Infinity and return the result negated. |
| Node* minus_x = Float64Neg(x); |
| var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); |
| GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x); |
| var_x.Bind(Float64Add(var_x.value(), one)); |
| Goto(&return_minus_x); |
| } |
| |
| BIND(&return_minus_x); |
| var_x.Bind(Float64Neg(var_x.value())); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return TNode<Float64T>::UncheckedCast(var_x.value()); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64RoundToEven(SloppyTNode<Float64T> x) { |
| if (IsFloat64RoundTiesEvenSupported()) { |
| return Float64RoundTiesEven(x); |
| } |
| // See ES#sec-touint8clamp for details. |
| Node* f = Float64Floor(x); |
| Node* f_and_half = Float64Add(f, Float64Constant(0.5)); |
| |
| VARIABLE(var_result, MachineRepresentation::kFloat64); |
| Label return_f(this), return_f_plus_one(this), done(this); |
| |
| GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one); |
| GotoIf(Float64LessThan(x, f_and_half), &return_f); |
| { |
| Node* f_mod_2 = Float64Mod(f, Float64Constant(2.0)); |
| Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f, |
| &return_f_plus_one); |
| } |
| |
| BIND(&return_f); |
| var_result.Bind(f); |
| Goto(&done); |
| |
| BIND(&return_f_plus_one); |
| var_result.Bind(Float64Add(f, Float64Constant(1.0))); |
| Goto(&done); |
| |
| BIND(&done); |
| return TNode<Float64T>::UncheckedCast(var_result.value()); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) { |
| if (IsFloat64RoundTruncateSupported()) { |
| return Float64RoundTruncate(x); |
| } |
| |
| Node* one = Float64Constant(1.0); |
| Node* zero = Float64Constant(0.0); |
| Node* two_52 = Float64Constant(4503599627370496.0E0); |
| Node* minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| VARIABLE(var_x, MachineRepresentation::kFloat64, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than 0. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| if (IsFloat64RoundDownSupported()) { |
| var_x.Bind(Float64RoundDown(x)); |
| } else { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards -Infinity. |
| var_x.Bind(Float64Sub(Float64Add(two_52, x), two_52)); |
| GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x); |
| var_x.Bind(Float64Sub(var_x.value(), one)); |
| } |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| if (IsFloat64RoundUpSupported()) { |
| var_x.Bind(Float64RoundUp(x)); |
| Goto(&return_x); |
| } else { |
| // Just return {x} unless its in the range ]-2^52,0[. |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards -Infinity and return result negated. |
| Node* minus_x = Float64Neg(x); |
| var_x.Bind(Float64Sub(Float64Add(two_52, minus_x), two_52)); |
| GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); |
| var_x.Bind(Float64Sub(var_x.value(), one)); |
| Goto(&return_minus_x); |
| } |
| } |
| |
| BIND(&return_minus_x); |
| var_x.Bind(Float64Neg(var_x.value())); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return TNode<Float64T>::UncheckedCast(var_x.value()); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsValidSmi(TNode<Smi> smi) { |
| if (SmiValuesAre31Bits() && kSystemPointerSize == kInt64Size) { |
| // Check that the Smi value is properly sign-extended. |
| TNode<IntPtrT> value = Signed(BitcastTaggedSignedToWord(smi)); |
| return WordEqual(value, ChangeInt32ToIntPtr(TruncateIntPtrToInt32(value))); |
| } |
| return Int32TrueConstant(); |
| } |
| |
| Node* CodeStubAssembler::SmiShiftBitsConstant() { |
| return IntPtrConstant(kSmiShiftSize + kSmiTagSize); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) { |
| TNode<IntPtrT> value_intptr = ChangeInt32ToIntPtr(value); |
| TNode<Smi> smi = |
| BitcastWordToTaggedSigned(WordShl(value_intptr, SmiShiftBitsConstant())); |
| return smi; |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) { |
| intptr_t constant_value; |
| if (ToIntPtrConstant(value, constant_value)) { |
| return (static_cast<uintptr_t>(constant_value) <= |
| static_cast<uintptr_t>(Smi::kMaxValue)) |
| ? Int32TrueConstant() |
| : Int32FalseConstant(); |
| } |
| |
| return UintPtrLessThanOrEqual(value, IntPtrConstant(Smi::kMaxValue)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) { |
| int32_t constant_value; |
| if (ToInt32Constant(value, constant_value) && Smi::IsValid(constant_value)) { |
| return SmiConstant(constant_value); |
| } |
| TNode<Smi> smi = |
| BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant())); |
| return smi; |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) { |
| intptr_t constant_value; |
| if (ToIntPtrConstant(value, constant_value)) { |
| return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize)); |
| } |
| return Signed( |
| WordSar(BitcastTaggedSignedToWord(value), SmiShiftBitsConstant())); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) { |
| TNode<IntPtrT> result = SmiUntag(value); |
| return TruncateIntPtrToInt32(result); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::SmiToFloat64(SloppyTNode<Smi> value) { |
| return ChangeInt32ToFloat64(SmiToInt32(value)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiMax(TNode<Smi> a, TNode<Smi> b) { |
| return SelectConstant<Smi>(SmiLessThan(a, b), b, a); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiMin(TNode<Smi> a, TNode<Smi> b) { |
| return SelectConstant<Smi>(SmiLessThan(a, b), a, b); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::TryIntPtrAdd(TNode<IntPtrT> a, |
| TNode<IntPtrT> b, |
| Label* if_overflow) { |
| TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAddWithOverflow(a, b); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| return Projection<0>(pair); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::TryIntPtrSub(TNode<IntPtrT> a, |
| TNode<IntPtrT> b, |
| Label* if_overflow) { |
| TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(a, b); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| return Projection<0>(pair); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b, |
| Label* if_overflow) { |
| TNode<PairT<Int32T, BoolT>> pair = Int32MulWithOverflow(a, b); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| return Projection<0>(pair); |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs, |
| Label* if_overflow) { |
| if (SmiValuesAre32Bits()) { |
| return BitcastWordToTaggedSigned( |
| TryIntPtrAdd(BitcastTaggedSignedToWord(lhs), |
| BitcastTaggedSignedToWord(rhs), if_overflow)); |
| } else { |
| DCHECK(SmiValuesAre31Bits()); |
| TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow( |
| TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)), |
| TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs))); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<Int32T> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result)); |
| } |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs, |
| Label* if_overflow) { |
| if (SmiValuesAre32Bits()) { |
| TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow( |
| BitcastTaggedSignedToWord(lhs), BitcastTaggedSignedToWord(rhs)); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<IntPtrT> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(result); |
| } else { |
| DCHECK(SmiValuesAre31Bits()); |
| TNode<PairT<Int32T, BoolT>> pair = Int32SubWithOverflow( |
| TruncateIntPtrToInt32(BitcastTaggedSignedToWord(lhs)), |
| TruncateIntPtrToInt32(BitcastTaggedSignedToWord(rhs))); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<Int32T> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result)); |
| } |
| } |
| |
| TNode<Number> CodeStubAssembler::NumberMax(SloppyTNode<Number> a, |
| SloppyTNode<Number> b) { |
| // TODO(danno): This could be optimized by specifically handling smi cases. |
| TVARIABLE(Number, result); |
| Label done(this), greater_than_equal_a(this), greater_than_equal_b(this); |
| GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a); |
| GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b); |
| result = NanConstant(); |
| Goto(&done); |
| BIND(&greater_than_equal_a); |
| result = a; |
| Goto(&done); |
| BIND(&greater_than_equal_b); |
| result = b; |
| Goto(&done); |
| BIND(&done); |
| return result.value(); |
| } |
| |
| TNode<Number> CodeStubAssembler::NumberMin(SloppyTNode<Number> a, |
| SloppyTNode<Number> b) { |
| // TODO(danno): This could be optimized by specifically handling smi cases. |
| TVARIABLE(Number, result); |
| Label done(this), greater_than_equal_a(this), greater_than_equal_b(this); |
| GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a); |
| GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b); |
| result = NanConstant(); |
| Goto(&done); |
| BIND(&greater_than_equal_a); |
| result = b; |
| Goto(&done); |
| BIND(&greater_than_equal_b); |
| result = a; |
| Goto(&done); |
| BIND(&done); |
| return result.value(); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::ConvertToRelativeIndex( |
| TNode<Context> context, TNode<Object> index, TNode<IntPtrT> length) { |
| TVARIABLE(IntPtrT, result); |
| |
| TNode<Number> const index_int = |
| ToInteger_Inline(context, index, CodeStubAssembler::kTruncateMinusZero); |
| TNode<IntPtrT> zero = IntPtrConstant(0); |
| |
| Label done(this); |
| Label if_issmi(this), if_isheapnumber(this, Label::kDeferred); |
| Branch(TaggedIsSmi(index_int), &if_issmi, &if_isheapnumber); |
| |
| BIND(&if_issmi); |
| { |
| TNode<Smi> const index_smi = CAST(index_int); |
| result = Select<IntPtrT>( |
| IntPtrLessThan(SmiUntag(index_smi), zero), |
| [=] { return IntPtrMax(IntPtrAdd(length, SmiUntag(index_smi)), zero); }, |
| [=] { return IntPtrMin(SmiUntag(index_smi), length); }); |
| Goto(&done); |
| } |
| |
| BIND(&if_isheapnumber); |
| { |
| // If {index} is a heap number, it is definitely out of bounds. If it is |
| // negative, {index} = max({length} + {index}),0) = 0'. If it is positive, |
| // set {index} to {length}. |
| TNode<HeapNumber> const index_hn = CAST(index_int); |
| TNode<Float64T> const float_zero = Float64Constant(0.); |
| TNode<Float64T> const index_float = LoadHeapNumberValue(index_hn); |
| result = SelectConstant<IntPtrT>(Float64LessThan(index_float, float_zero), |
| zero, length); |
| Goto(&done); |
| } |
| BIND(&done); |
| return result.value(); |
| } |
| |
| TNode<Number> CodeStubAssembler::SmiMod(TNode<Smi> a, TNode<Smi> b) { |
| TVARIABLE(Number, var_result); |
| Label return_result(this, &var_result), |
| return_minuszero(this, Label::kDeferred), |
| return_nan(this, Label::kDeferred); |
| |
| // Untag {a} and {b}. |
| TNode<Int32T> int_a = SmiToInt32(a); |
| TNode<Int32T> int_b = SmiToInt32(b); |
| |
| // Return NaN if {b} is zero. |
| GotoIf(Word32Equal(int_b, Int32Constant(0)), &return_nan); |
| |
| // Check if {a} is non-negative. |
| Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred); |
| Branch(Int32LessThanOrEqual(Int32Constant(0), int_a), &if_aisnotnegative, |
| &if_aisnegative); |
| |
| BIND(&if_aisnotnegative); |
| { |
| // Fast case, don't need to check any other edge cases. |
| TNode<Int32T> r = Int32Mod(int_a, int_b); |
| var_result = SmiFromInt32(r); |
| Goto(&return_result); |
| } |
| |
| BIND(&if_aisnegative); |
| { |
| if (SmiValuesAre32Bits()) { |
| // Check if {a} is kMinInt and {b} is -1 (only relevant if the |
| // kMinInt is actually representable as a Smi). |
| Label join(this); |
| GotoIfNot(Word32Equal(int_a, Int32Constant(kMinInt)), &join); |
| GotoIf(Word32Equal(int_b, Int32Constant(-1)), &return_minuszero); |
| Goto(&join); |
| BIND(&join); |
| } |
| |
| // Perform the integer modulus operation. |
| TNode<Int32T> r = Int32Mod(int_a, int_b); |
| |
| // Check if {r} is zero, and if so return -0, because we have to |
| // take the sign of the left hand side {a}, which is negative. |
| GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero); |
| |
| // The remainder {r} can be outside the valid Smi range on 32bit |
| // architectures, so we cannot just say SmiFromInt32(r) here. |
| var_result = ChangeInt32ToTagged(r); |
| Goto(&return_result); |
| } |
| |
| BIND(&return_minuszero); |
| var_result = MinusZeroConstant(); |
| Goto(&return_result); |
| |
| BIND(&return_nan); |
| var_result = NanConstant(); |
| Goto(&return_result); |
| |
| BIND(&return_result); |
| return var_result.value(); |
| } |
| |
| TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) { |
| TVARIABLE(Number, var_result); |
| VARIABLE(var_lhs_float64, MachineRepresentation::kFloat64); |
| VARIABLE(var_rhs_float64, MachineRepresentation::kFloat64); |
| Label return_result(this, &var_result); |
| |
| // Both {a} and {b} are Smis. Convert them to integers and multiply. |
| Node* lhs32 = SmiToInt32(a); |
| Node* rhs32 = SmiToInt32(b); |
| Node* pair = Int32MulWithOverflow(lhs32, rhs32); |
| |
| Node* overflow = Projection(1, pair); |
| |
| // Check if the multiplication overflowed. |
| Label if_overflow(this, Label::kDeferred), if_notoverflow(this); |
| Branch(overflow, &if_overflow, &if_notoverflow); |
| BIND(&if_notoverflow); |
| { |
| // If the answer is zero, we may need to return -0.0, depending on the |
| // input. |
| Label answer_zero(this), answer_not_zero(this); |
| Node* answer = Projection(0, pair); |
| Node* zero = Int32Constant(0); |
| Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero); |
| BIND(&answer_not_zero); |
| { |
| var_result = ChangeInt32ToTagged(answer); |
| Goto(&return_result); |
| } |
| BIND(&answer_zero); |
| { |
| Node* or_result = Word32Or(lhs32, rhs32); |
| Label if_should_be_negative_zero(this), if_should_be_zero(this); |
| Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero, |
| &if_should_be_zero); |
| BIND(&if_should_be_negative_zero); |
| { |
| var_result = MinusZeroConstant(); |
| Goto(&return_result); |
| } |
| BIND(&if_should_be_zero); |
| { |
| var_result = SmiConstant(0); |
| Goto(&return_result); |
| } |
| } |
| } |
| BIND(&if_overflow); |
| { |
| var_lhs_float64.Bind(SmiToFloat64(a)); |
| var_rhs_float64.Bind(SmiToFloat64(b)); |
| Node* value = Float64Mul(var_lhs_float64.value(), var_rhs_float64.value()); |
| var_result = AllocateHeapNumberWithValue(value); |
| Goto(&return_result); |
| } |
| |
| BIND(&return_result); |
| return var_result.value(); |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor, |
| Label* bailout) { |
| // Both {a} and {b} are Smis. Bailout to floating point division if {divisor} |
| // is zero. |
| GotoIf(WordEqual(divisor, SmiConstant(0)), bailout); |
| |
| // Do floating point division if {dividend} is zero and {divisor} is |
| // negative. |
| Label dividend_is_zero(this), dividend_is_not_zero(this); |
| Branch(WordEqual(dividend, SmiConstant(0)), ÷nd_is_zero, |
| ÷nd_is_not_zero); |
| |
| BIND(÷nd_is_zero); |
| { |
| GotoIf(SmiLessThan(divisor, SmiConstant(0)), bailout); |
| Goto(÷nd_is_not_zero); |
| } |
| BIND(÷nd_is_not_zero); |
| |
| TNode<Int32T> untagged_divisor = SmiToInt32(divisor); |
| TNode<Int32T> untagged_dividend = SmiToInt32(dividend); |
| |
| // Do floating point division if {dividend} is kMinInt (or kMinInt - 1 |
| // if the Smi size is 31) and {divisor} is -1. |
| Label divisor_is_minus_one(this), divisor_is_not_minus_one(this); |
| Branch(Word32Equal(untagged_divisor, Int32Constant(-1)), |
| &divisor_is_minus_one, &divisor_is_not_minus_one); |
| |
| BIND(&divisor_is_minus_one); |
| { |
| GotoIf(Word32Equal( |
| untagged_dividend, |
| Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))), |
| bailout); |
| Goto(&divisor_is_not_minus_one); |
| } |
| BIND(&divisor_is_not_minus_one); |
| |
| TNode<Int32T> untagged_result = Int32Div(untagged_dividend, untagged_divisor); |
| TNode<Int32T> truncated = Int32Mul(untagged_result, untagged_divisor); |
| |
| // Do floating point division if the remainder is not 0. |
| GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout); |
| |
| return SmiFromInt32(untagged_result); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiLexicographicCompare(TNode<Smi> x, |
| TNode<Smi> y) { |
| TNode<ExternalReference> smi_lexicographic_compare = |
| ExternalConstant(ExternalReference::smi_lexicographic_compare_function()); |
| TNode<ExternalReference> isolate_ptr = |
| ExternalConstant(ExternalReference::isolate_address(isolate())); |
| return CAST(CallCFunction(smi_lexicographic_compare, MachineType::AnyTagged(), |
| std::make_pair(MachineType::Pointer(), isolate_ptr), |
| std::make_pair(MachineType::AnyTagged(), x), |
| std::make_pair(MachineType::AnyTagged(), y))); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32( |
| SloppyTNode<IntPtrT> value) { |
| if (Is64()) { |
| return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value)); |
| } |
| return ReinterpretCast<Int32T>(value); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedIsSmi(SloppyTNode<Object> a) { |
| return WordEqual(WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)), |
| IntPtrConstant(0)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) { |
| return WordEqual( |
| WordAnd(BitcastMaybeObjectToWord(a), IntPtrConstant(kSmiTagMask)), |
| IntPtrConstant(0)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) { |
| // Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we |
| // can nonetheless use it to inspect the Smi tag. The assumption here is that |
| // the GC will not exchange Smis for HeapObjects or vice-versa. |
| TNode<IntPtrT> a_bitcast = BitcastTaggedSignedToWord(UncheckedCast<Smi>(a)); |
| return WordNotEqual(WordAnd(a_bitcast, IntPtrConstant(kSmiTagMask)), |
| IntPtrConstant(0)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) { |
| return WordEqual(WordAnd(BitcastTaggedToWord(a), |
| IntPtrConstant(kSmiTagMask | kSmiSignMask)), |
| IntPtrConstant(0)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::WordIsAligned(SloppyTNode<WordT> word, |
| size_t alignment) { |
| DCHECK(base::bits::IsPowerOfTwo(alignment)); |
| return WordEqual(IntPtrConstant(0), |
| WordAnd(word, IntPtrConstant(alignment - 1))); |
| } |
| |
| #if DEBUG |
| void CodeStubAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) { |
| CodeAssembler::Bind(label, debug_info); |
| } |
| #endif // DEBUG |
| |
| void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); } |
| |
| TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck( |
| TNode<FixedDoubleArray> array, TNode<Smi> index, Label* if_hole) { |
| return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0, |
| SMI_PARAMETERS, if_hole); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck( |
| TNode<FixedDoubleArray> array, TNode<IntPtrT> index, Label* if_hole) { |
| return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0, |
| INTPTR_PARAMETERS, if_hole); |
| } |
| |
| void CodeStubAssembler::BranchIfPrototypesHaveNoElements( |
| Node* receiver_map, Label* definitely_no_elements, |
| Label* possibly_elements) { |
| CSA_SLOW_ASSERT(this, IsMap(receiver_map)); |
| VARIABLE(var_map, MachineRepresentation::kTagged, receiver_map); |
| Label loop_body(this, &var_map); |
| Node* empty_fixed_array = LoadRoot(RootIndex::kEmptyFixedArray); |
| Node* empty_slow_element_dictionary = |
| LoadRoot(RootIndex::kEmptySlowElementDictionary); |
| Goto(&loop_body); |
| |
| BIND(&loop_body); |
| { |
| Node* map = var_map.value(); |
| Node* prototype = LoadMapPrototype(map); |
| GotoIf(IsNull(prototype), definitely_no_elements); |
| Node* prototype_map = LoadMap(prototype); |
| TNode<Int32T> prototype_instance_type = LoadMapInstanceType(prototype_map); |
| |
| // Pessimistically assume elements if a Proxy, Special API Object, |
| // or JSPrimitiveWrapper wrapper is found on the prototype chain. After this |
| // instance type check, it's not necessary to check for interceptors or |
| // access checks. |
| Label if_custom(this, Label::kDeferred), if_notcustom(this); |
| Branch(IsCustomElementsReceiverInstanceType(prototype_instance_type), |
| &if_custom, &if_notcustom); |
| |
| BIND(&if_custom); |
| { |
| // For string JSPrimitiveWrapper wrappers we still support the checks as |
| // long as they wrap the empty string. |
| GotoIfNot( |
| InstanceTypeEqual(prototype_instance_type, JS_PRIMITIVE_WRAPPER_TYPE), |
| possibly_elements); |
| Node* prototype_value = LoadJSPrimitiveWrapperValue(prototype); |
| Branch(IsEmptyString(prototype_value), &if_notcustom, possibly_elements); |
| } |
| |
| BIND(&if_notcustom); |
| { |
| Node* prototype_elements = LoadElements(prototype); |
| var_map.Bind(prototype_map); |
| GotoIf(WordEqual(prototype_elements, empty_fixed_array), &loop_body); |
| Branch(WordEqual(prototype_elements, empty_slow_element_dictionary), |
| &loop_body, possibly_elements); |
| } |
| } |
| } |
| |
| void CodeStubAssembler::BranchIfJSReceiver(Node* object, Label* if_true, |
| Label* if_false) { |
| GotoIf(TaggedIsSmi(object), if_false); |
| STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); |
| Branch(IsJSReceiver(object), if_true, if_false); |
| } |
| |
| void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) { |
| #ifdef V8_ENABLE_FORCE_SLOW_PATH |
| Node* const force_slow_path_addr = |
| ExternalConstant(ExternalReference::force_slow_path(isolate())); |
| Node* const force_slow = Load(MachineType::Uint8(), force_slow_path_addr); |
| |
| GotoIf(force_slow, if_true); |
| #endif |
| } |
| |
| void CodeStubAssembler::GotoIfDebugExecutionModeChecksSideEffects( |
| Label* if_true) { |
| STATIC_ASSERT(sizeof(DebugInfo::ExecutionMode) >= sizeof(int32_t)); |
| |
| TNode<ExternalReference> execution_mode_address = ExternalConstant( |
| ExternalReference::debug_execution_mode_address(isolate())); |
| TNode<Int32T> execution_mode = |
| UncheckedCast<Int32T>(Load(MachineType::Int32(), execution_mode_address)); |
| |
| GotoIf(Word32Equal(execution_mode, Int32Constant(DebugInfo::kSideEffects)), |
| if_true); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes, |
| AllocationFlags flags, |
| TNode<RawPtrT> top_address, |
| TNode<RawPtrT> limit_address) { |
| Label if_out_of_memory(this, Label::kDeferred); |
| |
| // TODO(jgruber,jkummerow): Extract the slow paths (= probably everything |
| // but bump pointer allocation) into a builtin to save code space. The |
| // size_in_bytes check may be moved there as well since a non-smi |
| // size_in_bytes probably doesn't fit into the bump pointer region |
| // (double-check that). |
| |
| intptr_t size_in_bytes_constant; |
| bool size_in_bytes_is_constant = false; |
| if (ToIntPtrConstant(size_in_bytes, size_in_bytes_constant)) { |
| size_in_bytes_is_constant = true; |
| CHECK(Internals::IsValidSmi(size_in_bytes_constant)); |
| CHECK_GT(size_in_bytes_constant, 0); |
| } else { |
| GotoIfNot(IsValidPositiveSmi(size_in_bytes), &if_out_of_memory); |
| } |
| |
| TNode<RawPtrT> top = |
| UncheckedCast<RawPtrT>(Load(MachineType::Pointer(), top_address)); |
| TNode<RawPtrT> limit = |
| UncheckedCast<RawPtrT>(Load(MachineType::Pointer(), limit_address)); |
| |
| // If there's not enough space, call the runtime. |
| TVARIABLE(Object, result); |
| Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this); |
| |
| bool needs_double_alignment = flags & kDoubleAlignment; |
| bool allow_large_object_allocation = flags & kAllowLargeObjectAllocation; |
| |
| if (allow_large_object_allocation) { |
| Label next(this); |
| GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next); |
| |
| TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt( |
| AllocateDoubleAlignFlag::encode(needs_double_alignment) | |
| AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation))); |
| if (FLAG_young_generation_large_objects) { |
| result = |
| CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } else { |
| result = |
| CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } |
| Goto(&out); |
| |
| BIND(&next); |
| } |
| |
| TVARIABLE(IntPtrT, adjusted_size, size_in_bytes); |
| |
| if (needs_double_alignment) { |
| Label next(this); |
| GotoIfNot(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &next); |
| |
| adjusted_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4)); |
| Goto(&next); |
| |
| BIND(&next); |
| } |
| |
| TNode<IntPtrT> new_top = |
| IntPtrAdd(UncheckedCast<IntPtrT>(top), adjusted_size.value()); |
| |
| Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call, |
| &no_runtime_call); |
| |
| BIND(&runtime_call); |
| { |
| TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt( |
| AllocateDoubleAlignFlag::encode(needs_double_alignment) | |
| AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation))); |
| if (flags & kPretenured) { |
| result = |
| CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } else { |
| result = |
| CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } |
| Goto(&out); |
| } |
| |
| // When there is enough space, return `top' and bump it up. |
| BIND(&no_runtime_call); |
| { |
| StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address, |
| new_top); |
| |
| TVARIABLE(IntPtrT, address, UncheckedCast<IntPtrT>(top)); |
| |
| if (needs_double_alignment) { |
| Label next(this); |
| GotoIf(IntPtrEqual(adjusted_size.value(), size_in_bytes), &next); |
| |
| // Store a filler and increase the address by 4. |
| StoreNoWriteBarrier(MachineRepresentation::kTagged, top, |
| LoadRoot(RootIndex::kOnePointerFillerMap)); |
| address = IntPtrAdd(UncheckedCast<IntPtrT>(top), IntPtrConstant(4)); |
| Goto(&next); |
| |
| BIND(&next); |
| } |
| |
| result = BitcastWordToTagged( |
| IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag))); |
| Goto(&out); |
| } |
| |
| if (!size_in_bytes_is_constant) { |
| BIND(&if_out_of_memory); |
| CallRuntime(Runtime::kFatalProcessOutOfMemoryInAllocateRaw, |
| NoContextConstant()); |
| Unreachable(); |
| } |
| |
| BIND(&out); |
| return UncheckedCast<HeapObject>(result.value()); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateRawUnaligned( |
| TNode<IntPtrT> size_in_bytes, AllocationFlags flags, |
| TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) { |
| DCHECK_EQ(flags & kDoubleAlignment, 0); |
| return AllocateRaw(size_in_bytes, flags, top_address, limit_address); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned( |
| TNode<IntPtrT> size_in_bytes, AllocationFlags flags, |
| TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) { |
| #if defined(V8_HOST_ARCH_32_BIT) |
| return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address, |
| limit_address); |
| #elif defined(V8_HOST_ARCH_64_BIT) |
| #ifdef V8_COMPRESS_POINTERS |
| // TODO(ishell, v8:8875): Consider using aligned allocations once the |
| // allocation alignment inconsistency is fixed. For now we keep using |
| // unaligned access since both x64 and arm64 architectures (where pointer |
| // compression is supported) allow unaligned access to doubles and full words. |
| #endif // V8_COMPRESS_POINTERS |
| // Allocation on 64 bit machine is naturally double aligned |
| return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address, |
| limit_address); |
| #else |
| #error Architecture not supported |
| #endif |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace( |
| TNode<IntPtrT> size_in_bytes, AllocationFlags flags) { |
| DCHECK(flags == kNone || flags == kDoubleAlignment); |
| CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes)); |
| return Allocate(size_in_bytes, flags); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes, |
| AllocationFlags flags) { |
| Comment("Allocate"); |
| bool const new_space = !(flags & kPretenured); |
| bool const allow_large_objects = flags & kAllowLargeObjectAllocation; |
| // For optimized allocations, we don't allow the allocation to happen in a |
| // different generation than requested. |
| bool const always_allocated_in_requested_space = |
| !new_space || !allow_large_objects || FLAG_young_generation_large_objects; |
| if (!allow_large_objects) { |
| intptr_t size_constant; |
| if (ToIntPtrConstant(size_in_bytes, size_constant)) { |
| CHECK_LE(size_constant, kMaxRegularHeapObjectSize); |
| } else { |
| CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes)); |
| } |
| } |
| if (!(flags & kDoubleAlignment) && always_allocated_in_requested_space) { |
| return OptimizedAllocate( |
| size_in_bytes, |
| new_space ? AllocationType::kYoung : AllocationType::kOld, |
| allow_large_objects ? AllowLargeObjects::kTrue |
| : AllowLargeObjects::kFalse); |
| } |
| TNode<ExternalReference> top_address = ExternalConstant( |
| new_space |
| ? ExternalReference::new_space_allocation_top_address(isolate()) |
| : ExternalReference::old_space_allocation_top_address(isolate())); |
| DCHECK_EQ(kSystemPointerSize, |
| ExternalReference::new_space_allocation_limit_address(isolate()) |
| .address() - |
| ExternalReference::new_space_allocation_top_address(isolate()) |
| .address()); |
| DCHECK_EQ(kSystemPointerSize, |
| ExternalReference::old_space_allocation_limit_address(isolate()) |
| .address() - |
| ExternalReference::old_space_allocation_top_address(isolate()) |
| .address()); |
| TNode<IntPtrT> limit_address = |
| IntPtrAdd(ReinterpretCast<IntPtrT>(top_address), |
| IntPtrConstant(kSystemPointerSize)); |
| |
| if (flags & kDoubleAlignment) { |
| return AllocateRawDoubleAligned(size_in_bytes, flags, |
| ReinterpretCast<RawPtrT>(top_address), |
| ReinterpretCast<RawPtrT>(limit_address)); |
| } else { |
| return AllocateRawUnaligned(size_in_bytes, flags, |
| ReinterpretCast<RawPtrT>(top_address), |
| ReinterpretCast<RawPtrT>(limit_address)); |
| } |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(int size_in_bytes, |
| AllocationFlags flags) { |
| CHECK(flags == kNone || flags == kDoubleAlignment); |
| DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize); |
| return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::Allocate(int size_in_bytes, |
| AllocationFlags flags) { |
| return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous, |
| TNode<IntPtrT> offset) { |
| return UncheckedCast<HeapObject>( |
| BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset))); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous, |
| int offset) { |
| return InnerAllocate(previous, IntPtrConstant(offset)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsRegularHeapObjectSize(TNode<IntPtrT> size) { |
| return UintPtrLessThanOrEqual(size, |
| IntPtrConstant(kMaxRegularHeapObjectSize)); |
| } |
| |
| void CodeStubAssembler::BranchIfToBooleanIsTrue(Node* value, Label* if_true, |
| Label* if_false) { |
| Label if_smi(this), if_notsmi(this), if_heapnumber(this, Label::kDeferred), |
| if_bigint(this, Label::kDeferred); |
| // Rule out false {value}. |
| GotoIf(WordEqual(value, FalseConstant()), if_false); |
| |
| // Check if {value} is a Smi or a HeapObject. |
| Branch(TaggedIsSmi(value), &if_smi, &if_notsmi); |
| |
| BIND(&if_smi); |
| { |
| // The {value} is a Smi, only need to check against zero. |
| BranchIfSmiEqual(CAST(value), SmiConstant(0), if_false, if_true); |
| } |
| |
| BIND(&if_notsmi); |
| { |
| // Check if {value} is the empty string. |
| GotoIf(IsEmptyString(value), if_false); |
| |
| // The {value} is a HeapObject, load its map. |
| Node* value_map = LoadMap(value); |
| |
| // Only null, undefined and document.all have the undetectable bit set, |
| // so we can return false immediately when that bit is set. |
| GotoIf(IsUndetectableMap(value_map), if_false); |
| |
| // We still need to handle numbers specially, but all other {value}s |
| // that make it here yield true. |
| GotoIf(IsHeapNumberMap(value_map), &if_heapnumber); |
| Branch(IsBigInt(value), &if_bigint, if_true); |
| |
| BIND(&if_heapnumber); |
| { |
| // Load the floating point value of {value}. |
| Node* value_value = LoadObjectField(value, HeapNumber::kValueOffset, |
| MachineType::Float64()); |
| |
| // Check if the floating point {value} is neither 0.0, -0.0 nor NaN. |
| Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)), |
| if_true, if_false); |
| } |
| |
| BIND(&if_bigint); |
| { |
| TNode<BigInt> bigint = CAST(value); |
| TNode<Word32T> bitfield = LoadBigIntBitfield(bigint); |
| TNode<Uint32T> length = DecodeWord32<BigIntBase::LengthBits>(bitfield); |
| Branch(Word32Equal(length, Int32Constant(0)), if_false, if_true); |
| } |
| } |
| } |
| |
| Node* CodeStubAssembler::LoadFromParentFrame(int offset, MachineType type) { |
| Node* frame_pointer = LoadParentFramePointer(); |
| return Load(type, frame_pointer, IntPtrConstant(offset)); |
| } |
| |
| Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset, |
| MachineType type) { |
| return Load(type, buffer, IntPtrConstant(offset)); |
| } |
| |
| Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object, |
| int offset, MachineType type) { |
| CSA_ASSERT(this, IsStrong(object)); |
| return LoadFromObject(type, object, IntPtrConstant(offset - kHeapObjectTag)); |
| } |
| |
| Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object, |
| SloppyTNode<IntPtrT> offset, |
| MachineType type) { |
| CSA_ASSERT(this, IsStrong(object)); |
| return LoadFromObject(type, object, |
| IntPtrSub(offset, IntPtrConstant(kHeapObjectTag))); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField( |
| SloppyTNode<HeapObject> object, int offset) { |
| if (SmiValuesAre32Bits()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| offset += 4; |
| #endif |
| return ChangeInt32ToIntPtr( |
| LoadObjectField(object, offset, MachineType::Int32())); |
| } else { |
| return SmiToIntPtr( |
| LoadObjectField(object, offset, MachineType::AnyTagged())); |
| } |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField(Node* object, |
| int offset) { |
| if (SmiValuesAre32Bits()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| offset += 4; |
| #endif |
| return UncheckedCast<Int32T>( |
| LoadObjectField(object, offset, MachineType::Int32())); |
| } else { |
| return SmiToInt32( |
| LoadObjectField(object, offset, MachineType::AnyTagged())); |
| } |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadAndUntagSmi(Node* base, int index) { |
| if (SmiValuesAre32Bits()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| index += 4; |
| #endif |
| return ChangeInt32ToIntPtr( |
| Load(MachineType::Int32(), base, IntPtrConstant(index))); |
| } else { |
| return SmiToIntPtr( |
| Load(MachineType::AnyTagged(), base, IntPtrConstant(index))); |
| } |
| } |
| |
| void CodeStubAssembler::StoreAndTagSmi(Node* base, int offset, Node* value) { |
| if (SmiValuesAre32Bits()) { |
| int zero_offset = offset + 4; |
| int payload_offset = offset; |
| #if V8_TARGET_LITTLE_ENDIAN |
| std::swap(zero_offset, payload_offset); |
| #endif |
| StoreNoWriteBarrier(MachineRepresentation::kWord32, base, |
| IntPtrConstant(zero_offset), Int32Constant(0)); |
| StoreNoWriteBarrier(MachineRepresentation::kWord32, base, |
| IntPtrConstant(payload_offset), |
| TruncateInt64ToInt32(value)); |
| } else { |
| StoreNoWriteBarrier(MachineRepresentation::kTaggedSigned, base, |
| IntPtrConstant(offset), SmiTag(value)); |
| } |
| } |
| |
| TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue( |
| SloppyTNode<HeapNumber> object) { |
| return TNode<Float64T>::UncheckedCast(LoadObjectField( |
| object, HeapNumber::kValueOffset, MachineType::Float64())); |
| } |
| |
| TNode<Map> CodeStubAssembler::GetStructMap(InstanceType instance_type) { |
| Handle<Map> map_handle(Map::GetStructMap(isolate(), instance_type), |
| isolate()); |
| return HeapConstant(map_handle); |
| } |
| |
| TNode<Map> CodeStubAssembler::LoadMap(SloppyTNode<HeapObject> object) { |
| return UncheckedCast<Map>(LoadObjectField(object, HeapObject::kMapOffset, |
| MachineType::TaggedPointer())); |
| } |
| |
| TNode<Uint16T> CodeStubAssembler::LoadInstanceType( |
| SloppyTNode<HeapObject> object) { |
| return LoadMapInstanceType(LoadMap(object)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::HasInstanceType(SloppyTNode<HeapObject> object, |
| InstanceType instance_type) { |
| return InstanceTypeEqual(LoadInstanceType(object), instance_type); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::DoesntHaveInstanceType( |
| SloppyTNode<HeapObject> object, InstanceType instance_type) { |
| return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType( |
| SloppyTNode<HeapObject> any_tagged, InstanceType type) { |
| /* return Phi <TaggedIsSmi(val), DoesntHaveInstanceType(val, type)> */ |
| TNode<BoolT> tagged_is_smi = TaggedIsSmi(any_tagged); |
| return Select<BoolT>( |
| tagged_is_smi, [=]() { return tagged_is_smi; }, |
| [=]() { return DoesntHaveInstanceType(any_tagged, type); }); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::LoadFastProperties( |
| SloppyTNode<JSObject> object) { |
| CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object)))); |
| TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object); |
| return Select<HeapObject>( |
| TaggedIsSmi(properties), [=] { return EmptyFixedArrayConstant(); }, |
| [=] { return CAST(properties); }); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::LoadSlowProperties( |
| SloppyTNode<JSObject> object) { |
| CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object))); |
| TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object); |
| return Select<HeapObject>( |
| TaggedIsSmi(properties), |
| [=] { return EmptyPropertyDictionaryConstant(); }, |
| [=] { return CAST(properties); }); |
| } |
| |
| TNode<Number> CodeStubAssembler::LoadJSArrayLength(SloppyTNode<JSArray> array) { |
| CSA_ASSERT(this, IsJSArray(array)); |
| return CAST(LoadObjectField(array, JSArray::kLengthOffset)); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectWithLength( |
| SloppyTNode<JSArgumentsObjectWithLength> array) { |
| return LoadObjectField(array, JSArgumentsObjectWithLength::kLengthOffset); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength( |
| SloppyTNode<JSArray> array) { |
| TNode<Object> length = LoadJSArrayLength(array); |
| CSA_ASSERT(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)), |
| IsElementsKindInRange(LoadElementsKind(array), |
| PACKED_SEALED_ELEMENTS, |
| HOLEY_FROZEN_ELEMENTS))); |
| // JSArray length is always a positive Smi for fast arrays. |
| CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); |
| return UncheckedCast<Smi>(length); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadFixedArrayBaseLength( |
| SloppyTNode<FixedArrayBase> array) { |
| CSA_SLOW_ASSERT(this, IsNotWeakFixedArraySubclass(array)); |
| return CAST(LoadObjectField(array, FixedArrayBase::kLengthOffset)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadAndUntagFixedArrayBaseLength( |
| SloppyTNode<FixedArrayBase> array) { |
| return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadFeedbackVectorLength( |
| TNode<FeedbackVector> vector) { |
| return ChangeInt32ToIntPtr( |
| LoadObjectField<Int32T>(vector, FeedbackVector::kLengthOffset)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadWeakFixedArrayLength( |
| TNode<WeakFixedArray> array) { |
| return CAST(LoadObjectField(array, WeakFixedArray::kLengthOffset)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadAndUntagWeakFixedArrayLength( |
| SloppyTNode<WeakFixedArray> array) { |
| return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadNumberOfDescriptors( |
| TNode<DescriptorArray> array) { |
| return UncheckedCast<Int32T>( |
| LoadObjectField(array, DescriptorArray::kNumberOfDescriptorsOffset, |
| MachineType::Int16())); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadMapBitField(SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return UncheckedCast<Int32T>( |
| LoadObjectField(map, Map::kBitFieldOffset, MachineType::Uint8())); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadMapBitField2(SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return UncheckedCast<Int32T>( |
| LoadObjectField(map, Map::kBitField2Offset, MachineType::Uint8())); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return UncheckedCast<Uint32T>( |
| LoadObjectField(map, Map::kBitField3Offset, MachineType::Uint32())); |
| } |
| |
| TNode<Uint16T> CodeStubAssembler::LoadMapInstanceType(SloppyTNode<Map> map) { |
| return UncheckedCast<Uint16T>( |
| LoadObjectField(map, Map::kInstanceTypeOffset, MachineType::Uint16())); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| Node* bit_field2 = LoadMapBitField2(map); |
| return Signed(DecodeWord32<Map::ElementsKindBits>(bit_field2)); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadElementsKind( |
| SloppyTNode<HeapObject> object) { |
| return LoadMapElementsKind(LoadMap(object)); |
| } |
| |
| TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors( |
| SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return CAST(LoadObjectField(map, Map::kInstanceDescriptorsOffset)); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return CAST(LoadObjectField(map, Map::kPrototypeOffset)); |
| } |
| |
| TNode<PrototypeInfo> CodeStubAssembler::LoadMapPrototypeInfo( |
| SloppyTNode<Map> map, Label* if_no_proto_info) { |
| Label if_strong_heap_object(this); |
| CSA_ASSERT(this, IsMap(map)); |
| TNode<MaybeObject> maybe_prototype_info = |
| LoadMaybeWeakObjectField(map, Map::kTransitionsOrPrototypeInfoOffset); |
| TVARIABLE(Object, prototype_info); |
| DispatchMaybeObject(maybe_prototype_info, if_no_proto_info, if_no_proto_info, |
| if_no_proto_info, &if_strong_heap_object, |
| &prototype_info); |
| |
| BIND(&if_strong_heap_object); |
| GotoIfNot(WordEqual(LoadMap(CAST(prototype_info.value())), |
| LoadRoot(RootIndex::kPrototypeInfoMap)), |
| if_no_proto_info); |
| return CAST(prototype_info.value()); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords( |
| SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| return ChangeInt32ToIntPtr(LoadObjectField( |
| map, Map::kInstanceSizeInWordsOffset, MachineType::Uint8())); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords( |
| SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| // See Map::GetInObjectPropertiesStartInWords() for details. |
| CSA_ASSERT(this, IsJSObjectMap(map)); |
| return ChangeInt32ToIntPtr(LoadObjectField( |
| map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset, |
| MachineType::Uint8())); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex( |
| SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| // See Map::GetConstructorFunctionIndex() for details. |
| CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map))); |
| return ChangeInt32ToIntPtr(LoadObjectField( |
| map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset, |
| MachineType::Uint8())); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadMapConstructor(SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| TVARIABLE(Object, result, |
| LoadObjectField(map, Map::kConstructorOrBackPointerOffset)); |
| |
| Label done(this), loop(this, &result); |
| Goto(&loop); |
| BIND(&loop); |
| { |
| GotoIf(TaggedIsSmi(result.value()), &done); |
| Node* is_map_type = |
| InstanceTypeEqual(LoadInstanceType(CAST(result.value())), MAP_TYPE); |
| GotoIfNot(is_map_type, &done); |
| result = LoadObjectField(CAST(result.value()), |
| Map::kConstructorOrBackPointerOffset); |
| Goto(&loop); |
| } |
| BIND(&done); |
| return result.value(); |
| } |
| |
| Node* CodeStubAssembler::LoadMapEnumLength(SloppyTNode<Map> map) { |
| CSA_SLOW_ASSERT(this, IsMap(map)); |
| Node* bit_field3 = LoadMapBitField3(map); |
| return DecodeWordFromWord32<Map::EnumLengthBits>(bit_field3); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadMapBackPointer(SloppyTNode<Map> map) { |
| TNode<HeapObject> object = |
| CAST(LoadObjectField(map, Map::kConstructorOrBackPointerOffset)); |
| return Select<Object>( |
| IsMap(object), [=] { return object; }, |
| [=] { return UndefinedConstant(); }); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::EnsureOnlyHasSimpleProperties( |
| TNode<Map> map, TNode<Int32T> instance_type, Label* bailout) { |
| // This check can have false positives, since it applies to any |
| // JSPrimitiveWrapper type. |
| GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout); |
| |
| TNode<Uint32T> bit_field3 = LoadMapBitField3(map); |
| GotoIf(IsSetWord32(bit_field3, Map::IsDictionaryMapBit::kMask), bailout); |
| |
| return bit_field3; |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash( |
| SloppyTNode<Object> receiver, Label* if_no_hash) { |
| TVARIABLE(IntPtrT, var_hash); |
| Label done(this), if_smi(this), if_property_array(this), |
| if_property_dictionary(this), if_fixed_array(this); |
| |
| TNode<Object> properties_or_hash = |
| LoadObjectField(TNode<HeapObject>::UncheckedCast(receiver), |
| JSReceiver::kPropertiesOrHashOffset); |
| GotoIf(TaggedIsSmi(properties_or_hash), &if_smi); |
| |
| TNode<HeapObject> properties = |
| TNode<HeapObject>::UncheckedCast(properties_or_hash); |
| TNode<Int32T> properties_instance_type = LoadInstanceType(properties); |
| |
| GotoIf(InstanceTypeEqual(properties_instance_type, PROPERTY_ARRAY_TYPE), |
| &if_property_array); |
| Branch(InstanceTypeEqual(properties_instance_type, NAME_DICTIONARY_TYPE), |
| &if_property_dictionary, &if_fixed_array); |
| |
| BIND(&if_fixed_array); |
| { |
| var_hash = IntPtrConstant(PropertyArray::kNoHashSentinel); |
| Goto(&done); |
| } |
| |
| BIND(&if_smi); |
| { |
| var_hash = SmiUntag(TNode<Smi>::UncheckedCast(properties_or_hash)); |
| Goto(&done); |
| } |
| |
| BIND(&if_property_array); |
| { |
| TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField( |
| properties, PropertyArray::kLengthAndHashOffset); |
| var_hash = TNode<IntPtrT>::UncheckedCast( |
| DecodeWord<PropertyArray::HashField>(length_and_hash)); |
| Goto(&done); |
| } |
| |
| BIND(&if_property_dictionary); |
| { |
| var_hash = SmiUntag(CAST(LoadFixedArrayElement( |
| CAST(properties), NameDictionary::kObjectHashIndex))); |
| Goto(&done); |
| } |
| |
| BIND(&done); |
| if (if_no_hash != nullptr) { |
| GotoIf(IntPtrEqual(var_hash.value(), |
| IntPtrConstant(PropertyArray::kNoHashSentinel)), |
| if_no_hash); |
| } |
| return var_hash.value(); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadNameHashField(SloppyTNode<Name> name) { |
| CSA_ASSERT(this, IsName(name)); |
| return LoadObjectField<Uint32T>(name, Name::kHashFieldOffset); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadNameHash(SloppyTNode<Name> name, |
| Label* if_hash_not_computed) { |
| TNode<Uint32T> hash_field = LoadNameHashField(name); |
| if (if_hash_not_computed != nullptr) { |
| GotoIf(IsSetWord32(hash_field, Name::kHashNotComputedMask), |
| if_hash_not_computed); |
| } |
| return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift))); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi( |
| SloppyTNode<String> string) { |
| return SmiFromIntPtr(LoadStringLengthAsWord(string)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord( |
| SloppyTNode<String> string) { |
| return Signed(ChangeUint32ToWord(LoadStringLengthAsWord32(string))); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadStringLengthAsWord32( |
| SloppyTNode<String> string) { |
| CSA_ASSERT(this, IsString(string)); |
| return LoadObjectField<Uint32T>(string, String::kLengthOffset); |
| } |
| |
| Node* CodeStubAssembler::PointerToSeqStringData(Node* seq_string) { |
| CSA_ASSERT(this, IsString(seq_string)); |
| CSA_ASSERT(this, |
| IsSequentialStringInstanceType(LoadInstanceType(seq_string))); |
| STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
| return IntPtrAdd( |
| BitcastTaggedToWord(seq_string), |
| IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
| } |
| |
| Node* CodeStubAssembler::LoadJSPrimitiveWrapperValue(Node* object) { |
| CSA_ASSERT(this, IsJSPrimitiveWrapper(object)); |
| return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset); |
| } |
| |
| void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object, |
| Label* if_smi, Label* if_cleared, |
| Label* if_weak, Label* if_strong, |
| TVariable<Object>* extracted) { |
| Label inner_if_smi(this), inner_if_strong(this); |
| |
| GotoIf(TaggedIsSmi(maybe_object), &inner_if_smi); |
| |
| GotoIf(IsCleared(maybe_object), if_cleared); |
| |
| GotoIf(Word32Equal(Word32And(TruncateIntPtrToInt32( |
| BitcastMaybeObjectToWord(maybe_object)), |
| Int32Constant(kHeapObjectTagMask)), |
| Int32Constant(kHeapObjectTag)), |
| &inner_if_strong); |
| |
| *extracted = |
| BitcastWordToTagged(WordAnd(BitcastMaybeObjectToWord(maybe_object), |
| IntPtrConstant(~kWeakHeapObjectMask))); |
| Goto(if_weak); |
| |
| BIND(&inner_if_smi); |
| *extracted = CAST(maybe_object); |
| Goto(if_smi); |
| |
| BIND(&inner_if_strong); |
| *extracted = CAST(maybe_object); |
| Goto(if_strong); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) { |
| return WordEqual(WordAnd(BitcastMaybeObjectToWord(value), |
| IntPtrConstant(kHeapObjectTagMask)), |
| IntPtrConstant(kHeapObjectTag)); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong( |
| TNode<MaybeObject> value, Label* if_not_strong) { |
| GotoIfNot(IsStrong(value), if_not_strong); |
| return CAST(value); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsWeakOrCleared(TNode<MaybeObject> value) { |
| return Word32Equal( |
| Word32And(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), |
| Int32Constant(kHeapObjectTagMask)), |
| Int32Constant(kWeakHeapObjectTag)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) { |
| return Word32Equal(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), |
| Int32Constant(kClearedWeakHeapObjectLower32)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsNotCleared(TNode<MaybeObject> value) { |
| return Word32NotEqual(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), |
| Int32Constant(kClearedWeakHeapObjectLower32)); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak( |
| TNode<MaybeObject> value) { |
| CSA_ASSERT(this, IsWeakOrCleared(value)); |
| CSA_ASSERT(this, IsNotCleared(value)); |
| return UncheckedCast<HeapObject>(BitcastWordToTagged(WordAnd( |
| BitcastMaybeObjectToWord(value), IntPtrConstant(~kWeakHeapObjectMask)))); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak( |
| TNode<MaybeObject> value, Label* if_cleared) { |
| GotoIf(IsCleared(value), if_cleared); |
| return GetHeapObjectAssumeWeak(value); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo(TNode<MaybeObject> object, |
| TNode<Object> value) { |
| return WordEqual(WordAnd(BitcastMaybeObjectToWord(object), |
| IntPtrConstant(~kWeakHeapObjectMask)), |
| BitcastTaggedToWord(value)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsStrongReferenceTo(TNode<MaybeObject> object, |
| TNode<Object> value) { |
| return WordEqual(BitcastMaybeObjectToWord(object), |
| BitcastTaggedToWord(value)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsNotWeakReferenceTo(TNode<MaybeObject> object, |
| TNode<Object> value) { |
| return WordNotEqual(WordAnd(BitcastMaybeObjectToWord(object), |
| IntPtrConstant(~kWeakHeapObjectMask)), |
| BitcastTaggedToWord(value)); |
| } |
| |
| TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) { |
| return ReinterpretCast<MaybeObject>(BitcastWordToTagged( |
| WordOr(BitcastTaggedToWord(value), IntPtrConstant(kWeakHeapObjectTag)))); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<FixedArray> array) { |
| return LoadAndUntagFixedArrayBaseLength(array); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<WeakFixedArray> array) { |
| return LoadAndUntagWeakFixedArrayLength(array); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<PropertyArray> array) { |
| return LoadPropertyArrayLength(array); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength( |
| TNode<DescriptorArray> array) { |
| return IntPtrMul(ChangeInt32ToIntPtr(LoadNumberOfDescriptors(array)), |
| IntPtrConstant(DescriptorArray::kEntrySize)); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength( |
| TNode<TransitionArray> array) { |
| return LoadAndUntagWeakFixedArrayLength(array); |
| } |
| |
| template <typename Array, typename T> |
| TNode<T> CodeStubAssembler::LoadArrayElement(TNode<Array> array, |
| int array_header_size, |
| Node* index_node, |
| int additional_offset, |
| ParameterMode parameter_mode, |
| LoadSensitivity needs_poisoning) { |
| CSA_ASSERT(this, IntPtrGreaterThanOrEqual( |
| ParameterToIntPtr(index_node, parameter_mode), |
| IntPtrConstant(0))); |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| int32_t header_size = array_header_size + additional_offset - kHeapObjectTag; |
| TNode<IntPtrT> offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS, |
| parameter_mode, header_size); |
| CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array), |
| array_header_size)); |
| constexpr MachineType machine_type = MachineTypeOf<T>::value; |
| // TODO(gsps): Remove the Load case once LoadFromObject supports poisoning |
| if (needs_poisoning == LoadSensitivity::kSafe) { |
| return UncheckedCast<T>(LoadFromObject(machine_type, array, offset)); |
| } else { |
| return UncheckedCast<T>(Load(machine_type, array, offset, needs_poisoning)); |
| } |
| } |
| |
| template TNode<MaybeObject> |
| CodeStubAssembler::LoadArrayElement<TransitionArray>(TNode<TransitionArray>, |
| int, Node*, int, |
| ParameterMode, |
| LoadSensitivity); |
| |
| template TNode<MaybeObject> |
| CodeStubAssembler::LoadArrayElement<DescriptorArray>(TNode<DescriptorArray>, |
| int, Node*, int, |
| ParameterMode, |
| LoadSensitivity); |
| |
| void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array, |
| Node* index, |
| int additional_offset, |
| ParameterMode parameter_mode) { |
| if (!FLAG_fixed_array_bounds_checks) return; |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| if (parameter_mode == ParameterMode::SMI_PARAMETERS) { |
| TNode<Smi> effective_index; |
| Smi constant_index; |
| bool index_is_constant = ToSmiConstant(index, &constant_index); |
| if (index_is_constant) { |
| effective_index = SmiConstant(Smi::ToInt(constant_index) + |
| additional_offset / kTaggedSize); |
| } else if (additional_offset != 0) { |
| effective_index = |
| SmiAdd(CAST(index), SmiConstant(additional_offset / kTaggedSize)); |
| } else { |
| effective_index = CAST(index); |
| } |
| CSA_CHECK(this, SmiBelow(effective_index, LoadFixedArrayBaseLength(array))); |
| } else { |
| // IntPtrAdd does constant-folding automatically. |
| TNode<IntPtrT> effective_index = |
| IntPtrAdd(UncheckedCast<IntPtrT>(index), |
| IntPtrConstant(additional_offset / kTaggedSize)); |
| CSA_CHECK(this, UintPtrLessThan(effective_index, |
| LoadAndUntagFixedArrayBaseLength(array))); |
| } |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadFixedArrayElement( |
| TNode<FixedArray> object, Node* index_node, int additional_offset, |
| ParameterMode parameter_mode, LoadSensitivity needs_poisoning, |
| CheckBounds check_bounds) { |
| CSA_ASSERT(this, IsFixedArraySubclass(object)); |
| CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object)); |
| if (NeedsBoundsCheck(check_bounds)) { |
| FixedArrayBoundsCheck(object, index_node, additional_offset, |
| parameter_mode); |
| } |
| TNode<MaybeObject> element = |
| LoadArrayElement(object, FixedArray::kHeaderSize, index_node, |
| additional_offset, parameter_mode, needs_poisoning); |
| return CAST(element); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadPropertyArrayElement( |
| TNode<PropertyArray> object, SloppyTNode<IntPtrT> index) { |
| int additional_offset = 0; |
| ParameterMode parameter_mode = INTPTR_PARAMETERS; |
| LoadSensitivity needs_poisoning = LoadSensitivity::kSafe; |
| return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index, |
| additional_offset, parameter_mode, |
| needs_poisoning)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength( |
| TNode<PropertyArray> object) { |
| TNode<IntPtrT> value = |
| LoadAndUntagObjectField(object, PropertyArray::kLengthAndHashOffset); |
| return Signed(DecodeWord<PropertyArray::LengthField>(value)); |
| } |
| |
| TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayBackingStore( |
| TNode<JSTypedArray> typed_array) { |
| // Backing store = external_pointer + base_pointer. |
| Node* external_pointer = |
| LoadObjectField(typed_array, JSTypedArray::kExternalPointerOffset, |
| MachineType::Pointer()); |
| Node* base_pointer = |
| LoadObjectField(typed_array, JSTypedArray::kBasePointerOffset); |
| return UncheckedCast<RawPtrT>( |
| IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer))); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( |
| SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) { |
| if (Is64()) { |
| TNode<IntPtrT> value = UncheckedCast<IntPtrT>( |
| Load(MachineType::IntPtr(), data_pointer, offset)); |
| return BigIntFromInt64(value); |
| } else { |
| DCHECK(!Is64()); |
| #if defined(V8_TARGET_BIG_ENDIAN) |
| TNode<IntPtrT> high = UncheckedCast<IntPtrT>( |
| Load(MachineType::UintPtr(), data_pointer, offset)); |
| TNode<IntPtrT> low = UncheckedCast<IntPtrT>( |
| Load(MachineType::UintPtr(), data_pointer, |
| Int32Add(TruncateIntPtrToInt32(offset), |
| Int32Constant(kSystemPointerSize)))); |
| #else |
| TNode<IntPtrT> low = UncheckedCast<IntPtrT>( |
| Load(MachineType::UintPtr(), data_pointer, offset)); |
| TNode<IntPtrT> high = UncheckedCast<IntPtrT>( |
| Load(MachineType::UintPtr(), data_pointer, |
| Int32Add(TruncateIntPtrToInt32(offset), |
| Int32Constant(kSystemPointerSize)))); |
| #endif |
| return BigIntFromInt32Pair(low, high); |
| } |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low, |
| TNode<IntPtrT> high) { |
| DCHECK(!Is64()); |
| TVARIABLE(BigInt, var_result); |
| TVARIABLE(Word32T, var_sign, Int32Constant(BigInt::SignBits::encode(false))); |
| TVARIABLE(IntPtrT, var_high, high); |
| TVARIABLE(IntPtrT, var_low, low); |
| Label high_zero(this), negative(this), allocate_one_digit(this), |
| allocate_two_digits(this), if_zero(this), done(this); |
| |
| GotoIf(WordEqual(var_high.value(), IntPtrConstant(0)), &high_zero); |
| Branch(IntPtrLessThan(var_high.value(), IntPtrConstant(0)), &negative, |
| &allocate_two_digits); |
| |
| BIND(&high_zero); |
| Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &if_zero, |
| &allocate_one_digit); |
| |
| BIND(&negative); |
| { |
| var_sign = Int32Constant(BigInt::SignBits::encode(true)); |
| // We must negate the value by computing "0 - (high|low)", performing |
| // both parts of the subtraction separately and manually taking care |
| // of the carry bit (which is 1 iff low != 0). |
| var_high = IntPtrSub(IntPtrConstant(0), var_high.value()); |
| Label carry(this), no_carry(this); |
| Branch(WordEqual(var_low.value(), IntPtrConstant(0)), &no_carry, &carry); |
| BIND(&carry); |
| var_high = IntPtrSub(var_high.value(), IntPtrConstant(1)); |
| Goto(&no_carry); |
| BIND(&no_carry); |
| var_low = IntPtrSub(IntPtrConstant(0), var_low.value()); |
| // var_high was non-zero going into this block, but subtracting the |
| // carry bit from it could bring us back onto the "one digit" path. |
| Branch(WordEqual(var_high.value(), IntPtrConstant(0)), &allocate_one_digit, |
| &allocate_two_digits); |
| } |
| |
| BIND(&allocate_one_digit); |
| { |
| var_result = AllocateRawBigInt(IntPtrConstant(1)); |
| StoreBigIntBitfield(var_result.value(), |
| Word32Or(var_sign.value(), |
| Int32Constant(BigInt::LengthBits::encode(1)))); |
| StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value())); |
| Goto(&done); |
| } |
| |
| BIND(&allocate_two_digits); |
| { |
| var_result = AllocateRawBigInt(IntPtrConstant(2)); |
| StoreBigIntBitfield(var_result.value(), |
| Word32Or(var_sign.value(), |
| Int32Constant(BigInt::LengthBits::encode(2)))); |
| StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value())); |
| StoreBigIntDigit(var_result.value(), 1, Unsigned(var_high.value())); |
| Goto(&done); |
| } |
| |
| BIND(&if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) { |
| DCHECK(Is64()); |
| TVARIABLE(BigInt, var_result); |
| Label done(this), if_positive(this), if_negative(this), if_zero(this); |
| GotoIf(WordEqual(value, IntPtrConstant(0)), &if_zero); |
| var_result = AllocateRawBigInt(IntPtrConstant(1)); |
| Branch(IntPtrGreaterThan(value, IntPtrConstant(0)), &if_positive, |
| &if_negative); |
| |
| BIND(&if_positive); |
| { |
| StoreBigIntBitfield(var_result.value(), |
| Int32Constant(BigInt::SignBits::encode(false) | |
| BigInt::LengthBits::encode(1))); |
| StoreBigIntDigit(var_result.value(), 0, Unsigned(value)); |
| Goto(&done); |
| } |
| |
| BIND(&if_negative); |
| { |
| StoreBigIntBitfield(var_result.value(), |
| Int32Constant(BigInt::SignBits::encode(true) | |
| BigInt::LengthBits::encode(1))); |
| StoreBigIntDigit(var_result.value(), 0, |
| Unsigned(IntPtrSub(IntPtrConstant(0), value))); |
| Goto(&done); |
| } |
| |
| BIND(&if_zero); |
| { |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| } |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| compiler::TNode<BigInt> |
| CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( |
| SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) { |
| Label if_zero(this), done(this); |
| if (Is64()) { |
| TNode<UintPtrT> value = UncheckedCast<UintPtrT>( |
| Load(MachineType::UintPtr(), data_pointer, offset)); |
| return BigIntFromUint64(value); |
| } else { |
| DCHECK(!Is64()); |
| #if defined(V8_TARGET_BIG_ENDIAN) |
| TNode<UintPtrT> high = UncheckedCast<UintPtrT>( |
| Load(MachineType::UintPtr(), data_pointer, offset)); |
| TNode<UintPtrT> low = UncheckedCast<UintPtrT>( |
| Load(MachineType::UintPtr(), data_pointer, |
| Int32Add(TruncateIntPtrToInt32(offset), |
| Int32Constant(kSystemPointerSize)))); |
| #else |
| TNode<UintPtrT> low = UncheckedCast<UintPtrT>( |
| Load(MachineType::UintPtr(), data_pointer, offset)); |
| TNode<UintPtrT> high = UncheckedCast<UintPtrT>( |
| Load(MachineType::UintPtr(), data_pointer, |
| Int32Add(TruncateIntPtrToInt32(offset), |
| Int32Constant(kSystemPointerSize)))); |
| #endif |
| return BigIntFromUint32Pair(low, high); |
| } |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromUint32Pair(TNode<UintPtrT> low, |
| TNode<UintPtrT> high) { |
| DCHECK(!Is64()); |
| TVARIABLE(BigInt, var_result); |
| Label high_zero(this), if_zero(this), done(this); |
| |
| GotoIf(WordEqual(high, IntPtrConstant(0)), &high_zero); |
| var_result = AllocateBigInt(IntPtrConstant(2)); |
| StoreBigIntDigit(var_result.value(), 0, low); |
| StoreBigIntDigit(var_result.value(), 1, high); |
| Goto(&done); |
| |
| BIND(&high_zero); |
| GotoIf(WordEqual(low, IntPtrConstant(0)), &if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(1)); |
| StoreBigIntDigit(var_result.value(), 0, low); |
| Goto(&done); |
| |
| BIND(&if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromUint64(TNode<UintPtrT> value) { |
| DCHECK(Is64()); |
| TVARIABLE(BigInt, var_result); |
| Label done(this), if_zero(this); |
| GotoIf(WordEqual(value, IntPtrConstant(0)), &if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(1)); |
| StoreBigIntDigit(var_result.value(), 0, value); |
| Goto(&done); |
| |
| BIND(&if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( |
| TNode<RawPtrT> data_pointer, Node* index_node, ElementsKind elements_kind, |
| ParameterMode parameter_mode) { |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(index_node, elements_kind, parameter_mode, 0); |
| switch (elements_kind) { |
| case UINT8_ELEMENTS: /* fall through */ |
| case UINT8_CLAMPED_ELEMENTS: |
| return SmiFromInt32(Load(MachineType::Uint8(), data_pointer, offset)); |
| case INT8_ELEMENTS: |
| return SmiFromInt32(Load(MachineType::Int8(), data_pointer, offset)); |
| case UINT16_ELEMENTS: |
| return SmiFromInt32(Load(MachineType::Uint16(), data_pointer, offset)); |
| case INT16_ELEMENTS: |
| return SmiFromInt32(Load(MachineType::Int16(), data_pointer, offset)); |
| case UINT32_ELEMENTS: |
| return ChangeUint32ToTagged( |
| Load(MachineType::Uint32(), data_pointer, offset)); |
| case INT32_ELEMENTS: |
| return ChangeInt32ToTagged( |
| Load(MachineType::Int32(), data_pointer, offset)); |
| case FLOAT32_ELEMENTS: |
| return AllocateHeapNumberWithValue(ChangeFloat32ToFloat64( |
| Load(MachineType::Float32(), data_pointer, offset))); |
| case FLOAT64_ELEMENTS: |
| return AllocateHeapNumberWithValue( |
| Load(MachineType::Float64(), data_pointer, offset)); |
| case BIGINT64_ELEMENTS: |
| return LoadFixedBigInt64ArrayElementAsTagged(data_pointer, offset); |
| case BIGUINT64_ELEMENTS: |
| return LoadFixedBigUint64ArrayElementAsTagged(data_pointer, offset); |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( |
| TNode<RawPtrT> data_pointer, TNode<Smi> index, |
| TNode<Int32T> elements_kind) { |
| TVARIABLE(Numeric, var_result); |
| Label done(this), if_unknown_type(this, Label::kDeferred); |
| int32_t elements_kinds[] = { |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) TYPE##_ELEMENTS, |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| }; |
| |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) Label if_##type##array(this); |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| |
| Label* elements_kind_labels[] = { |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) &if_##type##array, |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| }; |
| STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels)); |
| |
| Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels, |
| arraysize(elements_kinds)); |
| |
| BIND(&if_unknown_type); |
| Unreachable(); |
| |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ |
| BIND(&if_##type##array); \ |
| { \ |
| var_result = LoadFixedTypedArrayElementAsTagged( \ |
| data_pointer, index, TYPE##_ELEMENTS, SMI_PARAMETERS); \ |
| Goto(&done); \ |
| } |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| void CodeStubAssembler::StoreJSTypedArrayElementFromTagged( |
| TNode<Context> context, TNode<JSTypedArray> typed_array, |
| TNode<Smi> index_node, TNode<Object> value, ElementsKind elements_kind) { |
| TNode<RawPtrT> data_pointer = LoadJSTypedArrayBackingStore(typed_array); |
| switch (elements_kind) { |
| case UINT8_ELEMENTS: |
| case UINT8_CLAMPED_ELEMENTS: |
| case INT8_ELEMENTS: |
| case UINT16_ELEMENTS: |
| case INT16_ELEMENTS: |
| StoreElement(data_pointer, elements_kind, index_node, |
| SmiToInt32(CAST(value)), SMI_PARAMETERS); |
| break; |
| case UINT32_ELEMENTS: |
| case INT32_ELEMENTS: |
| StoreElement(data_pointer, elements_kind, index_node, |
| TruncateTaggedToWord32(context, value), SMI_PARAMETERS); |
| break; |
| case FLOAT32_ELEMENTS: |
| StoreElement(data_pointer, elements_kind, index_node, |
| TruncateFloat64ToFloat32(LoadHeapNumberValue(CAST(value))), |
| SMI_PARAMETERS); |
| break; |
| case FLOAT64_ELEMENTS: |
| StoreElement(data_pointer, elements_kind, index_node, |
| LoadHeapNumberValue(CAST(value)), SMI_PARAMETERS); |
| break; |
| case BIGUINT64_ELEMENTS: |
| case BIGINT64_ELEMENTS: |
| StoreElement(data_pointer, elements_kind, index_node, |
| UncheckedCast<BigInt>(value), SMI_PARAMETERS); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( |
| Node* object, Node* slot_index_node, int additional_offset, |
| ParameterMode parameter_mode) { |
| CSA_SLOW_ASSERT(this, IsFeedbackVector(object)); |
| CSA_SLOW_ASSERT(this, MatchesParameterMode(slot_index_node, parameter_mode)); |
| int32_t header_size = |
| FeedbackVector::kFeedbackSlotsOffset + additional_offset - kHeapObjectTag; |
| Node* offset = ElementOffsetFromIndex(slot_index_node, HOLEY_ELEMENTS, |
| parameter_mode, header_size); |
| CSA_SLOW_ASSERT( |
| this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(CAST(object)), |
| FeedbackVector::kHeaderSize)); |
| return UncheckedCast<MaybeObject>( |
| Load(MachineType::AnyTagged(), object, offset)); |
| } |
| |
| template <typename Array> |
| TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement( |
| TNode<Array> object, int array_header_size, Node* index_node, |
| int additional_offset, ParameterMode parameter_mode) { |
| CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode)); |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| int endian_correction = 0; |
| #if V8_TARGET_LITTLE_ENDIAN |
| if (SmiValuesAre32Bits()) endian_correction = 4; |
| #endif |
| int32_t header_size = array_header_size + additional_offset - kHeapObjectTag + |
| endian_correction; |
| Node* offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS, |
| parameter_mode, header_size); |
| CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(object), |
| array_header_size + endian_correction)); |
| if (SmiValuesAre32Bits()) { |
| return UncheckedCast<Int32T>(Load(MachineType::Int32(), object, offset)); |
| } else { |
| return SmiToInt32(Load(MachineType::AnyTagged(), object, offset)); |
| } |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement( |
| TNode<FixedArray> object, Node* index_node, int additional_offset, |
| ParameterMode parameter_mode) { |
| CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object)); |
| return LoadAndUntagToWord32ArrayElement(object, FixedArray::kHeaderSize, |
| index_node, additional_offset, |
| parameter_mode); |
| } |
| |
| TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement( |
| TNode<WeakFixedArray> object, Node* index, int additional_offset, |
| ParameterMode parameter_mode, LoadSensitivity needs_poisoning) { |
| return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index, |
| additional_offset, parameter_mode, needs_poisoning); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement( |
| SloppyTNode<FixedDoubleArray> object, Node* index_node, |
| MachineType machine_type, int additional_offset, |
| ParameterMode parameter_mode, Label* if_hole) { |
| CSA_ASSERT(this, IsFixedDoubleArray(object)); |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode)); |
| int32_t header_size = |
| FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag; |
| TNode<IntPtrT> offset = ElementOffsetFromIndex( |
| index_node, HOLEY_DOUBLE_ELEMENTS, parameter_mode, header_size); |
| CSA_ASSERT(this, IsOffsetInBounds( |
| offset, LoadAndUntagFixedArrayBaseLength(object), |
| FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS)); |
| return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged( |
| TNode<FixedArrayBase> elements, TNode<IntPtrT> index, |
| TNode<Int32T> elements_kind, Label* if_accessor, Label* if_hole) { |
| TVARIABLE(Object, var_result); |
| Label done(this), if_packed(this), if_holey(this), if_packed_double(this), |
| if_holey_double(this), if_dictionary(this, Label::kDeferred); |
| |
| int32_t kinds[] = {// Handled by if_packed. |
| PACKED_SMI_ELEMENTS, PACKED_ELEMENTS, |
| PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS, |
| // Handled by if_holey. |
| HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS, HOLEY_SEALED_ELEMENTS, |
| HOLEY_FROZEN_ELEMENTS, |
| // Handled by if_packed_double. |
| PACKED_DOUBLE_ELEMENTS, |
| // Handled by if_holey_double. |
| HOLEY_DOUBLE_ELEMENTS}; |
| Label* labels[] = {// PACKED_{SMI,}_ELEMENTS |
| &if_packed, &if_packed, &if_packed, &if_packed, |
| // HOLEY_{SMI,}_ELEMENTS |
| &if_holey, &if_holey, &if_holey, &if_holey, |
| // PACKED_DOUBLE_ELEMENTS |
| &if_packed_double, |
| // HOLEY_DOUBLE_ELEMENTS |
| &if_holey_double}; |
| Switch(elements_kind, &if_dictionary, kinds, labels, arraysize(kinds)); |
| |
| BIND(&if_packed); |
| { |
| var_result = LoadFixedArrayElement(CAST(elements), index, 0); |
| Goto(&done); |
| } |
| |
| BIND(&if_holey); |
| { |
| var_result = LoadFixedArrayElement(CAST(elements), index); |
| Branch(WordEqual(var_result.value(), TheHoleConstant()), if_hole, &done); |
| } |
| |
| BIND(&if_packed_double); |
| { |
| var_result = AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement( |
| CAST(elements), index, MachineType::Float64())); |
| Goto(&done); |
| } |
| |
| BIND(&if_holey_double); |
| { |
| var_result = AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement( |
| CAST(elements), index, MachineType::Float64(), 0, INTPTR_PARAMETERS, |
| if_hole)); |
| Goto(&done); |
| } |
| |
| BIND(&if_dictionary); |
| { |
| CSA_ASSERT(this, IsDictionaryElementsKind(elements_kind)); |
| var_result = BasicLoadNumberDictionaryElement(CAST(elements), index, |
| if_accessor, if_hole); |
| Goto(&done); |
| } |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck( |
| SloppyTNode<Object> base, SloppyTNode<IntPtrT> offset, Label* if_hole, |
| MachineType machine_type) { |
| if (if_hole) { |
| // TODO(ishell): Compare only the upper part for the hole once the |
| // compiler is able to fold addition of already complex |offset| with |
| // |kIeeeDoubleExponentWordOffset| into one addressing mode. |
| if (Is64()) { |
| Node* element = Load(MachineType::Uint64(), base, offset); |
| GotoIf(Word64Equal(element, Int64Constant(kHoleNanInt64)), if_hole); |
| } else { |
| Node* element_upper = Load( |
| MachineType::Uint32(), base, |
| IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset))); |
| GotoIf(Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)), |
| if_hole); |
| } |
| } |
| if (machine_type.IsNone()) { |
| // This means the actual value is not needed. |
| return TNode<Float64T>(); |
| } |
| return UncheckedCast<Float64T>(Load(machine_type, base, offset)); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadContextElement( |
| SloppyTNode<Context> context, int slot_index) { |
| int offset = Context::SlotOffset(slot_index); |
| return UncheckedCast<Object>( |
| Load(MachineType::AnyTagged(), context, IntPtrConstant(offset))); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadContextElement( |
| SloppyTNode<Context> context, SloppyTNode<IntPtrT> slot_index) { |
| Node* offset = ElementOffsetFromIndex( |
| slot_index, PACKED_ELEMENTS, INTPTR_PARAMETERS, Context::SlotOffset(0)); |
| return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset)); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadContextElement(TNode<Context> context, |
| TNode<Smi> slot_index) { |
| Node* offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, |
| SMI_PARAMETERS, Context::SlotOffset(0)); |
| return UncheckedCast<Object>(Load(MachineType::AnyTagged(), context, offset)); |
| } |
| |
| void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context, |
| int slot_index, |
| SloppyTNode<Object> value) { |
| int offset = Context::SlotOffset(slot_index); |
| Store(context, IntPtrConstant(offset), value); |
| } |
| |
| void CodeStubAssembler::StoreContextElement(SloppyTNode<Context> context, |
| SloppyTNode<IntPtrT> slot_index, |
| SloppyTNode<Object> value) { |
| Node* offset = IntPtrAdd(TimesTaggedSize(slot_index), |
| IntPtrConstant(Context::SlotOffset(0))); |
| Store(context, offset, value); |
| } |
| |
| void CodeStubAssembler::StoreContextElementNoWriteBarrier( |
| SloppyTNode<Context> context, int slot_index, SloppyTNode<Object> value) { |
| int offset = Context::SlotOffset(slot_index); |
| StoreNoWriteBarrier(MachineRepresentation::kTagged, context, |
| IntPtrConstant(offset), value); |
| } |
| |
| TNode<Context> CodeStubAssembler::LoadNativeContext( |
| SloppyTNode<Context> context) { |
| return UncheckedCast<Context>( |
| LoadContextElement(context, Context::NATIVE_CONTEXT_INDEX)); |
| } |
| |
| TNode<Context> CodeStubAssembler::LoadModuleContext( |
| SloppyTNode<Context> context) { |
| Node* module_map = LoadRoot(RootIndex::kModuleContextMap); |
| Variable cur_context(this, MachineRepresentation::kTaggedPointer); |
| cur_context.Bind(context); |
| |
| Label context_found(this); |
| |
| Variable* context_search_loop_variables[1] = {&cur_context}; |
| Label context_search(this, 1, context_search_loop_variables); |
| |
| // Loop until cur_context->map() is module_map. |
| Goto(&context_search); |
| BIND(&context_search); |
| { |
| CSA_ASSERT(this, Word32BinaryNot(IsNativeContext(cur_context.value()))); |
| GotoIf(WordEqual(LoadMap(cur_context.value()), module_map), &context_found); |
| |
| cur_context.Bind( |
| LoadContextElement(cur_context.value(), Context::PREVIOUS_INDEX)); |
| Goto(&context_search); |
| } |
| |
| BIND(&context_found); |
| return UncheckedCast<Context>(cur_context.value()); |
| } |
| |
| TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap( |
| SloppyTNode<Int32T> kind, SloppyTNode<Context> native_context) { |
| CSA_ASSERT(this, IsFastElementsKind(kind)); |
| CSA_ASSERT(this, IsNativeContext(native_context)); |
| Node* offset = IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT), |
| ChangeInt32ToIntPtr(kind)); |
| return UncheckedCast<Map>(LoadContextElement(native_context, offset)); |
| } |
| |
| TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap( |
| ElementsKind kind, SloppyTNode<Context> native_context) { |
| CSA_ASSERT(this, IsNativeContext(native_context)); |
| return UncheckedCast<Map>( |
| LoadContextElement(native_context, Context::ArrayMapIndex(kind))); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsGeneratorFunction( |
| TNode<JSFunction> function) { |
| TNode<SharedFunctionInfo> const shared_function_info = |
| CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset)); |
| |
| TNode<Uint32T> const function_kind = |
| DecodeWord32<SharedFunctionInfo::FunctionKindBits>(LoadObjectField( |
| shared_function_info, SharedFunctionInfo::kFlagsOffset, |
| MachineType::Uint32())); |
| |
| return TNode<BoolT>::UncheckedCast(Word32Or( |
| Word32Or( |
| Word32Or( |
| Word32Equal(function_kind, |
| Int32Constant(FunctionKind::kAsyncGeneratorFunction)), |
| Word32Equal( |
| function_kind, |
| Int32Constant(FunctionKind::kAsyncConciseGeneratorMethod))), |
| Word32Equal(function_kind, |
| Int32Constant(FunctionKind::kGeneratorFunction))), |
| Word32Equal(function_kind, |
| Int32Constant(FunctionKind::kConciseGeneratorMethod)))); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::HasPrototypeProperty(TNode<JSFunction> function, |
| TNode<Map> map) { |
| // (has_prototype_slot() && IsConstructor()) || |
| // IsGeneratorFunction(shared()->kind()) |
| uint32_t mask = |
| Map::HasPrototypeSlotBit::kMask | Map::IsConstructorBit::kMask; |
| return TNode<BoolT>::UncheckedCast( |
| Word32Or(IsAllSetWord32(LoadMapBitField(map), mask), |
| IsGeneratorFunction(function))); |
| } |
| |
| void CodeStubAssembler::GotoIfPrototypeRequiresRuntimeLookup( |
| TNode<JSFunction> function, TNode<Map> map, Label* runtime) { |
| // !has_prototype_property() || has_non_instance_prototype() |
| GotoIfNot(HasPrototypeProperty(function, map), runtime); |
| GotoIf(IsSetWord32<Map::HasNonInstancePrototypeBit>(LoadMapBitField(map)), |
| runtime); |
| } |
| |
| Node* CodeStubAssembler::LoadJSFunctionPrototype(Node* function, |
| Label* if_bailout) { |
| CSA_ASSERT(this, TaggedIsNotSmi(function)); |
| CSA_ASSERT(this, IsJSFunction(function)); |
| CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(function))); |
| CSA_ASSERT(this, IsClearWord32<Map::HasNonInstancePrototypeBit>( |
| LoadMapBitField(LoadMap(function)))); |
| Node* proto_or_map = |
| LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset); |
| GotoIf(IsTheHole(proto_or_map), if_bailout); |
| |
| VARIABLE(var_result, MachineRepresentation::kTagged, proto_or_map); |
| Label done(this, &var_result); |
| GotoIfNot(IsMap(proto_or_map), &done); |
| |
| var_result.Bind(LoadMapPrototype(proto_or_map)); |
| Goto(&done); |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray( |
| SloppyTNode<SharedFunctionInfo> shared) { |
| Node* function_data = |
| LoadObjectField(shared, SharedFunctionInfo::kFunctionDataOffset); |
| |
| VARIABLE(var_result, MachineRepresentation::kTagged, function_data); |
| Label done(this, &var_result); |
| |
| GotoIfNot(HasInstanceType(function_data, INTERPRETER_DATA_TYPE), &done); |
| Node* bytecode_array = |
| LoadObjectField(function_data, InterpreterData::kBytecodeArrayOffset); |
| var_result.Bind(bytecode_array); |
| Goto(&done); |
| |
| BIND(&done); |
| return CAST(var_result.value()); |
| } |
| |
| void CodeStubAssembler::StoreObjectByteNoWriteBarrier(TNode<HeapObject> object, |
| int offset, |
| TNode<Word32T> value) { |
| StoreNoWriteBarrier(MachineRepresentation::kWord8, object, |
| IntPtrConstant(offset - kHeapObjectTag), value); |
| } |
| |
|