| // Copyright 2016 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/codegen/code-stub-assembler.h" |
| |
| #include "include/v8-internal.h" |
| #include "src/base/macros.h" |
| #include "src/codegen/code-factory.h" |
| #include "src/codegen/tnode.h" |
| #include "src/common/globals.h" |
| #include "src/execution/frames-inl.h" |
| #include "src/execution/frames.h" |
| #include "src/execution/protectors.h" |
| #include "src/heap/heap-inl.h" // For MemoryChunk. TODO(jkummerow): Drop. |
| #include "src/heap/memory-chunk.h" |
| #include "src/logging/counters.h" |
| #include "src/objects/api-callbacks.h" |
| #include "src/objects/cell.h" |
| #include "src/objects/descriptor-array.h" |
| #include "src/objects/function-kind.h" |
| #include "src/objects/heap-number.h" |
| #include "src/objects/js-generator.h" |
| #include "src/objects/oddball.h" |
| #include "src/objects/ordered-hash-table-inl.h" |
| #include "src/objects/property-cell.h" |
| #include "src/roots/roots.h" |
| #include "src/wasm/wasm-objects.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| using compiler::Node; |
| |
| CodeStubAssembler::CodeStubAssembler(compiler::CodeAssemblerState* state) |
| : compiler::CodeAssembler(state), |
| TorqueGeneratedExportedMacrosAssembler(state) { |
| if (DEBUG_BOOL && FLAG_csa_trap_on_node != nullptr) { |
| HandleBreakOnNode(); |
| } |
| } |
| |
| void CodeStubAssembler::HandleBreakOnNode() { |
| // FLAG_csa_trap_on_node should be in a form "STUB,NODE" where STUB is a |
| // string specifying the name of a stub and NODE is number specifying node id. |
| const char* name = state()->name(); |
| size_t name_length = strlen(name); |
| if (strncmp(FLAG_csa_trap_on_node, name, name_length) != 0) { |
| // Different name. |
| return; |
| } |
| size_t option_length = strlen(FLAG_csa_trap_on_node); |
| if (option_length < name_length + 2 || |
| FLAG_csa_trap_on_node[name_length] != ',') { |
| // Option is too short. |
| return; |
| } |
| const char* start = &FLAG_csa_trap_on_node[name_length + 1]; |
| char* end; |
| int node_id = static_cast<int>(strtol(start, &end, 10)); |
| if (start == end) { |
| // Bad node id. |
| return; |
| } |
| BreakOnNode(node_id); |
| } |
| |
| void CodeStubAssembler::Assert(const BranchGenerator& branch, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| #if defined(DEBUG) |
| if (FLAG_debug_code) { |
| Check(branch, message, file, line, extra_nodes); |
| } |
| #endif |
| } |
| |
| void CodeStubAssembler::Assert(const NodeGenerator<BoolT>& condition_body, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| #if defined(DEBUG) |
| if (FLAG_debug_code) { |
| Check(condition_body, message, file, line, extra_nodes); |
| } |
| #endif |
| } |
| |
| void CodeStubAssembler::Assert(TNode<Word32T> condition_node, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| #if defined(DEBUG) |
| if (FLAG_debug_code) { |
| Check(condition_node, message, file, line, extra_nodes); |
| } |
| #endif |
| } |
| |
| void CodeStubAssembler::Check(const BranchGenerator& branch, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| Label ok(this); |
| Label not_ok(this, Label::kDeferred); |
| if (message != nullptr && FLAG_code_comments) { |
| Comment("[ Assert: ", message); |
| } else { |
| Comment("[ Assert"); |
| } |
| branch(&ok, ¬_ok); |
| |
| BIND(¬_ok); |
| std::vector<FileAndLine> file_and_line; |
| if (file != nullptr) { |
| file_and_line.push_back({file, line}); |
| } |
| FailAssert(message, file_and_line, extra_nodes); |
| |
| BIND(&ok); |
| Comment("] Assert"); |
| } |
| |
| void CodeStubAssembler::Check(const NodeGenerator<BoolT>& condition_body, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| BranchGenerator branch = [=](Label* ok, Label* not_ok) { |
| TNode<BoolT> condition = condition_body(); |
| Branch(condition, ok, not_ok); |
| }; |
| |
| Check(branch, message, file, line, extra_nodes); |
| } |
| |
| void CodeStubAssembler::Check(TNode<Word32T> condition_node, |
| const char* message, const char* file, int line, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| BranchGenerator branch = [=](Label* ok, Label* not_ok) { |
| Branch(condition_node, ok, not_ok); |
| }; |
| |
| Check(branch, message, file, line, extra_nodes); |
| } |
| |
| void CodeStubAssembler::IncrementCallCount( |
| TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot_id) { |
| Comment("increment call count"); |
| TNode<Smi> call_count = |
| CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id, kTaggedSize)); |
| // The lowest {FeedbackNexus::CallCountField::kShift} bits of the call |
| // count are used as flags. To increment the call count by 1 we hence |
| // have to increment by 1 << {FeedbackNexus::CallCountField::kShift}. |
| TNode<Smi> new_count = SmiAdd( |
| call_count, SmiConstant(1 << FeedbackNexus::CallCountField::kShift)); |
| // Count is Smi, so we don't need a write barrier. |
| StoreFeedbackVectorSlot(feedback_vector, slot_id, new_count, |
| SKIP_WRITE_BARRIER, kTaggedSize); |
| } |
| |
| void CodeStubAssembler::FastCheck(TNode<BoolT> condition) { |
| Label ok(this), not_ok(this, Label::kDeferred); |
| Branch(condition, &ok, ¬_ok); |
| BIND(¬_ok); |
| Unreachable(); |
| BIND(&ok); |
| } |
| |
| void CodeStubAssembler::FailAssert( |
| const char* message, const std::vector<FileAndLine>& files_and_lines, |
| std::initializer_list<ExtraNode> extra_nodes) { |
| DCHECK_NOT_NULL(message); |
| EmbeddedVector<char, 1024> chars; |
| std::stringstream stream; |
| for (auto it = files_and_lines.rbegin(); it != files_and_lines.rend(); ++it) { |
| if (it->first != nullptr) { |
| stream << " [" << it->first << ":" << it->second << "]"; |
| #ifndef DEBUG |
| // To limit the size of these strings in release builds, we include only |
| // the innermost macro's file name and line number. |
| break; |
| #endif |
| } |
| } |
| std::string files_and_lines_text = stream.str(); |
| if (files_and_lines_text.size() != 0) { |
| SNPrintF(chars, "%s%s", message, files_and_lines_text.c_str()); |
| message = chars.begin(); |
| } |
| TNode<String> message_node = StringConstant(message); |
| |
| #ifdef DEBUG |
| // Only print the extra nodes in debug builds. |
| for (auto& node : extra_nodes) { |
| CallRuntime(Runtime::kPrintWithNameForAssert, SmiConstant(0), |
| StringConstant(node.second), node.first); |
| } |
| #endif |
| |
| AbortCSAAssert(message_node); |
| Unreachable(); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::SelectInt32Constant(TNode<BoolT> condition, |
| int true_value, |
| int false_value) { |
| return SelectConstant<Int32T>(condition, Int32Constant(true_value), |
| Int32Constant(false_value)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::SelectIntPtrConstant(TNode<BoolT> condition, |
| int true_value, |
| int false_value) { |
| return SelectConstant<IntPtrT>(condition, IntPtrConstant(true_value), |
| IntPtrConstant(false_value)); |
| } |
| |
| TNode<Oddball> CodeStubAssembler::SelectBooleanConstant( |
| TNode<BoolT> condition) { |
| return SelectConstant<Oddball>(condition, TrueConstant(), FalseConstant()); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SelectSmiConstant(TNode<BoolT> condition, |
| Smi true_value, |
| Smi false_value) { |
| return SelectConstant<Smi>(condition, SmiConstant(true_value), |
| SmiConstant(false_value)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::NoContextConstant() { |
| return SmiConstant(Context::kNoContext); |
| } |
| |
| #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ |
| TNode<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<Heap>().rootAccessorName())>::type>::type> \ |
| CodeStubAssembler::name##Constant() { \ |
| return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<Heap>().rootAccessorName())>::type>::type>( \ |
| LoadRoot(RootIndex::k##rootIndexName)); \ |
| } |
| HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) |
| #undef HEAP_CONSTANT_ACCESSOR |
| |
| #define HEAP_CONSTANT_ACCESSOR(rootIndexName, rootAccessorName, name) \ |
| TNode<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type> \ |
| CodeStubAssembler::name##Constant() { \ |
| return UncheckedCast<std::remove_pointer<std::remove_reference<decltype( \ |
| std::declval<ReadOnlyRoots>().rootAccessorName())>::type>::type>( \ |
| LoadRoot(RootIndex::k##rootIndexName)); \ |
| } |
| HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_ACCESSOR) |
| #undef HEAP_CONSTANT_ACCESSOR |
| |
| #define HEAP_CONSTANT_TEST(rootIndexName, rootAccessorName, name) \ |
| TNode<BoolT> CodeStubAssembler::Is##name(SloppyTNode<Object> value) { \ |
| return TaggedEqual(value, name##Constant()); \ |
| } \ |
| TNode<BoolT> CodeStubAssembler::IsNot##name(SloppyTNode<Object> value) { \ |
| return TaggedNotEqual(value, name##Constant()); \ |
| } |
| HEAP_IMMOVABLE_OBJECT_LIST(HEAP_CONSTANT_TEST) |
| #undef HEAP_CONSTANT_TEST |
| |
| TNode<BInt> CodeStubAssembler::BIntConstant(int value) { |
| #if defined(BINT_IS_SMI) |
| return SmiConstant(value); |
| #elif defined(BINT_IS_INTPTR) |
| return IntPtrConstant(value); |
| #else |
| #error Unknown architecture. |
| #endif |
| } |
| |
| template <> |
| TNode<Smi> CodeStubAssembler::IntPtrOrSmiConstant<Smi>(int value) { |
| return SmiConstant(value); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::IntPtrOrSmiConstant<IntPtrT>(int value) { |
| return IntPtrConstant(value); |
| } |
| |
| template <> |
| TNode<UintPtrT> CodeStubAssembler::IntPtrOrSmiConstant<UintPtrT>(int value) { |
| return Unsigned(IntPtrConstant(value)); |
| } |
| |
| template <> |
| TNode<RawPtrT> CodeStubAssembler::IntPtrOrSmiConstant<RawPtrT>(int value) { |
| return ReinterpretCast<RawPtrT>(IntPtrConstant(value)); |
| } |
| |
| bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue( |
| TNode<Smi> maybe_constant, int* value) { |
| Smi smi_constant; |
| if (ToSmiConstant(maybe_constant, &smi_constant)) { |
| *value = Smi::ToInt(smi_constant); |
| return true; |
| } |
| return false; |
| } |
| |
| bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue( |
| TNode<IntPtrT> maybe_constant, int* value) { |
| int32_t int32_constant; |
| if (ToInt32Constant(maybe_constant, &int32_constant)) { |
| *value = int32_constant; |
| return true; |
| } |
| return false; |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::IntPtrRoundUpToPowerOfTwo32( |
| TNode<IntPtrT> value) { |
| Comment("IntPtrRoundUpToPowerOfTwo32"); |
| CSA_ASSERT(this, UintPtrLessThanOrEqual(value, IntPtrConstant(0x80000000u))); |
| value = Signed(IntPtrSub(value, IntPtrConstant(1))); |
| for (int i = 1; i <= 16; i *= 2) { |
| value = Signed(WordOr(value, WordShr(value, IntPtrConstant(i)))); |
| } |
| return Signed(IntPtrAdd(value, IntPtrConstant(1))); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::WordIsPowerOfTwo(SloppyTNode<IntPtrT> value) { |
| intptr_t constant; |
| if (ToIntPtrConstant(value, &constant)) { |
| return BoolConstant(base::bits::IsPowerOfTwo(constant)); |
| } |
| // value && !(value & (value - 1)) |
| return IntPtrEqual( |
| Select<IntPtrT>( |
| IntPtrEqual(value, IntPtrConstant(0)), |
| [=] { return IntPtrConstant(1); }, |
| [=] { return WordAnd(value, IntPtrSub(value, IntPtrConstant(1))); }), |
| IntPtrConstant(0)); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Round(SloppyTNode<Float64T> x) { |
| TNode<Float64T> one = Float64Constant(1.0); |
| TNode<Float64T> one_half = Float64Constant(0.5); |
| |
| Label return_x(this); |
| |
| // Round up {x} towards Infinity. |
| TVARIABLE(Float64T, var_x, Float64Ceil(x)); |
| |
| GotoIf(Float64LessThanOrEqual(Float64Sub(var_x.value(), one_half), x), |
| &return_x); |
| var_x = Float64Sub(var_x.value(), one); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return TNode<Float64T>::UncheckedCast(var_x.value()); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Ceil(SloppyTNode<Float64T> x) { |
| if (IsFloat64RoundUpSupported()) { |
| return Float64RoundUp(x); |
| } |
| |
| TNode<Float64T> one = Float64Constant(1.0); |
| TNode<Float64T> zero = Float64Constant(0.0); |
| TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0); |
| TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| TVARIABLE(Float64T, var_x, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than zero. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards Infinity. |
| var_x = Float64Sub(Float64Add(two_52, x), two_52); |
| GotoIfNot(Float64LessThan(var_x.value(), x), &return_x); |
| var_x = Float64Add(var_x.value(), one); |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]-2^52,0[ |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards Infinity and return the result negated. |
| TNode<Float64T> minus_x = Float64Neg(x); |
| var_x = Float64Sub(Float64Add(two_52, minus_x), two_52); |
| GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); |
| var_x = Float64Sub(var_x.value(), one); |
| Goto(&return_minus_x); |
| } |
| |
| BIND(&return_minus_x); |
| var_x = Float64Neg(var_x.value()); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return TNode<Float64T>::UncheckedCast(var_x.value()); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Floor(SloppyTNode<Float64T> x) { |
| if (IsFloat64RoundDownSupported()) { |
| return Float64RoundDown(x); |
| } |
| |
| TNode<Float64T> one = Float64Constant(1.0); |
| TNode<Float64T> zero = Float64Constant(0.0); |
| TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0); |
| TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| TVARIABLE(Float64T, var_x, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than zero. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards -Infinity. |
| var_x = Float64Sub(Float64Add(two_52, x), two_52); |
| GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x); |
| var_x = Float64Sub(var_x.value(), one); |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| // Just return {x} unless it's in the range ]-2^52,0[ |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards -Infinity and return the result negated. |
| TNode<Float64T> minus_x = Float64Neg(x); |
| var_x = Float64Sub(Float64Add(two_52, minus_x), two_52); |
| GotoIfNot(Float64LessThan(var_x.value(), minus_x), &return_minus_x); |
| var_x = Float64Add(var_x.value(), one); |
| Goto(&return_minus_x); |
| } |
| |
| BIND(&return_minus_x); |
| var_x = Float64Neg(var_x.value()); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return TNode<Float64T>::UncheckedCast(var_x.value()); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64RoundToEven(SloppyTNode<Float64T> x) { |
| if (IsFloat64RoundTiesEvenSupported()) { |
| return Float64RoundTiesEven(x); |
| } |
| // See ES#sec-touint8clamp for details. |
| TNode<Float64T> f = Float64Floor(x); |
| TNode<Float64T> f_and_half = Float64Add(f, Float64Constant(0.5)); |
| |
| TVARIABLE(Float64T, var_result); |
| Label return_f(this), return_f_plus_one(this), done(this); |
| |
| GotoIf(Float64LessThan(f_and_half, x), &return_f_plus_one); |
| GotoIf(Float64LessThan(x, f_and_half), &return_f); |
| { |
| TNode<Float64T> f_mod_2 = Float64Mod(f, Float64Constant(2.0)); |
| Branch(Float64Equal(f_mod_2, Float64Constant(0.0)), &return_f, |
| &return_f_plus_one); |
| } |
| |
| BIND(&return_f); |
| var_result = f; |
| Goto(&done); |
| |
| BIND(&return_f_plus_one); |
| var_result = Float64Add(f, Float64Constant(1.0)); |
| Goto(&done); |
| |
| BIND(&done); |
| return TNode<Float64T>::UncheckedCast(var_result.value()); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::Float64Trunc(SloppyTNode<Float64T> x) { |
| if (IsFloat64RoundTruncateSupported()) { |
| return Float64RoundTruncate(x); |
| } |
| |
| TNode<Float64T> one = Float64Constant(1.0); |
| TNode<Float64T> zero = Float64Constant(0.0); |
| TNode<Float64T> two_52 = Float64Constant(4503599627370496.0E0); |
| TNode<Float64T> minus_two_52 = Float64Constant(-4503599627370496.0E0); |
| |
| TVARIABLE(Float64T, var_x, x); |
| Label return_x(this), return_minus_x(this); |
| |
| // Check if {x} is greater than 0. |
| Label if_xgreaterthanzero(this), if_xnotgreaterthanzero(this); |
| Branch(Float64GreaterThan(x, zero), &if_xgreaterthanzero, |
| &if_xnotgreaterthanzero); |
| |
| BIND(&if_xgreaterthanzero); |
| { |
| if (IsFloat64RoundDownSupported()) { |
| var_x = Float64RoundDown(x); |
| } else { |
| // Just return {x} unless it's in the range ]0,2^52[. |
| GotoIf(Float64GreaterThanOrEqual(x, two_52), &return_x); |
| |
| // Round positive {x} towards -Infinity. |
| var_x = Float64Sub(Float64Add(two_52, x), two_52); |
| GotoIfNot(Float64GreaterThan(var_x.value(), x), &return_x); |
| var_x = Float64Sub(var_x.value(), one); |
| } |
| Goto(&return_x); |
| } |
| |
| BIND(&if_xnotgreaterthanzero); |
| { |
| if (IsFloat64RoundUpSupported()) { |
| var_x = Float64RoundUp(x); |
| Goto(&return_x); |
| } else { |
| // Just return {x} unless its in the range ]-2^52,0[. |
| GotoIf(Float64LessThanOrEqual(x, minus_two_52), &return_x); |
| GotoIfNot(Float64LessThan(x, zero), &return_x); |
| |
| // Round negated {x} towards -Infinity and return result negated. |
| TNode<Float64T> minus_x = Float64Neg(x); |
| var_x = Float64Sub(Float64Add(two_52, minus_x), two_52); |
| GotoIfNot(Float64GreaterThan(var_x.value(), minus_x), &return_minus_x); |
| var_x = Float64Sub(var_x.value(), one); |
| Goto(&return_minus_x); |
| } |
| } |
| |
| BIND(&return_minus_x); |
| var_x = Float64Neg(var_x.value()); |
| Goto(&return_x); |
| |
| BIND(&return_x); |
| return TNode<Float64T>::UncheckedCast(var_x.value()); |
| } |
| |
| template <> |
| TNode<Smi> CodeStubAssembler::TaggedToParameter(TNode<Smi> value) { |
| return value; |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::TaggedToParameter(TNode<Smi> value) { |
| return SmiUntag(value); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::TaggedIndexToIntPtr( |
| TNode<TaggedIndex> value) { |
| return Signed(WordSarShiftOutZeros(BitcastTaggedToWordForTagAndSmiBits(value), |
| IntPtrConstant(kSmiTagSize))); |
| } |
| |
| TNode<TaggedIndex> CodeStubAssembler::IntPtrToTaggedIndex( |
| TNode<IntPtrT> value) { |
| return ReinterpretCast<TaggedIndex>( |
| BitcastWordToTaggedSigned(WordShl(value, IntPtrConstant(kSmiTagSize)))); |
| } |
| |
| TNode<Smi> CodeStubAssembler::TaggedIndexToSmi(TNode<TaggedIndex> value) { |
| if (SmiValuesAre32Bits()) { |
| DCHECK_EQ(kSmiShiftSize, 31); |
| return BitcastWordToTaggedSigned( |
| WordShl(BitcastTaggedToWordForTagAndSmiBits(value), |
| IntPtrConstant(kSmiShiftSize))); |
| } |
| DCHECK(SmiValuesAre31Bits()); |
| DCHECK_EQ(kSmiShiftSize, 0); |
| return ReinterpretCast<Smi>(value); |
| } |
| |
| TNode<TaggedIndex> CodeStubAssembler::SmiToTaggedIndex(TNode<Smi> value) { |
| if (kSystemPointerSize == kInt32Size) { |
| return ReinterpretCast<TaggedIndex>(value); |
| } |
| if (SmiValuesAre32Bits()) { |
| DCHECK_EQ(kSmiShiftSize, 31); |
| return ReinterpretCast<TaggedIndex>(BitcastWordToTaggedSigned( |
| WordSar(BitcastTaggedToWordForTagAndSmiBits(value), |
| IntPtrConstant(kSmiShiftSize)))); |
| } |
| DCHECK(SmiValuesAre31Bits()); |
| DCHECK_EQ(kSmiShiftSize, 0); |
| // Just sign-extend the lower 32 bits. |
| TNode<Int32T> raw = |
| TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(value)); |
| return ReinterpretCast<TaggedIndex>( |
| BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(raw))); |
| } |
| |
| TNode<Smi> CodeStubAssembler::NormalizeSmiIndex(TNode<Smi> smi_index) { |
| if (COMPRESS_POINTERS_BOOL) { |
| TNode<Int32T> raw = |
| TruncateWordToInt32(BitcastTaggedToWordForTagAndSmiBits(smi_index)); |
| smi_index = BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(raw)); |
| } |
| return smi_index; |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiFromInt32(SloppyTNode<Int32T> value) { |
| if (COMPRESS_POINTERS_BOOL) { |
| static_assert(!COMPRESS_POINTERS_BOOL || (kSmiShiftSize + kSmiTagSize == 1), |
| "Use shifting instead of add"); |
| return BitcastWordToTaggedSigned( |
| ChangeUint32ToWord(Int32Add(value, value))); |
| } |
| return SmiTag(ChangeInt32ToIntPtr(value)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiFromUint32(TNode<Uint32T> value) { |
| CSA_ASSERT(this, IntPtrLessThan(ChangeUint32ToWord(value), |
| IntPtrConstant(Smi::kMaxValue))); |
| return SmiFromInt32(Signed(value)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsValidPositiveSmi(TNode<IntPtrT> value) { |
| intptr_t constant_value; |
| if (ToIntPtrConstant(value, &constant_value)) { |
| return (static_cast<uintptr_t>(constant_value) <= |
| static_cast<uintptr_t>(Smi::kMaxValue)) |
| ? Int32TrueConstant() |
| : Int32FalseConstant(); |
| } |
| |
| return UintPtrLessThanOrEqual(value, IntPtrConstant(Smi::kMaxValue)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiTag(SloppyTNode<IntPtrT> value) { |
| int32_t constant_value; |
| if (ToInt32Constant(value, &constant_value) && Smi::IsValid(constant_value)) { |
| return SmiConstant(constant_value); |
| } |
| if (COMPRESS_POINTERS_BOOL) { |
| return SmiFromInt32(TruncateIntPtrToInt32(value)); |
| } |
| TNode<Smi> smi = |
| BitcastWordToTaggedSigned(WordShl(value, SmiShiftBitsConstant())); |
| return smi; |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::SmiUntag(SloppyTNode<Smi> value) { |
| intptr_t constant_value; |
| if (ToIntPtrConstant(value, &constant_value)) { |
| return IntPtrConstant(constant_value >> (kSmiShiftSize + kSmiTagSize)); |
| } |
| TNode<IntPtrT> raw_bits = BitcastTaggedToWordForTagAndSmiBits(value); |
| if (COMPRESS_POINTERS_BOOL) { |
| // Clear the upper half using sign-extension. |
| raw_bits = ChangeInt32ToIntPtr(TruncateIntPtrToInt32(raw_bits)); |
| } |
| return Signed(WordSarShiftOutZeros(raw_bits, SmiShiftBitsConstant())); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::SmiToInt32(SloppyTNode<Smi> value) { |
| if (COMPRESS_POINTERS_BOOL) { |
| return Signed(Word32SarShiftOutZeros( |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)), |
| SmiShiftBitsConstant32())); |
| } |
| TNode<IntPtrT> result = SmiUntag(value); |
| return TruncateIntPtrToInt32(result); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::SmiToFloat64(SloppyTNode<Smi> value) { |
| return ChangeInt32ToFloat64(SmiToInt32(value)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiMax(TNode<Smi> a, TNode<Smi> b) { |
| return SelectConstant<Smi>(SmiLessThan(a, b), b, a); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiMin(TNode<Smi> a, TNode<Smi> b) { |
| return SelectConstant<Smi>(SmiLessThan(a, b), a, b); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::TryIntPtrAdd(TNode<IntPtrT> a, |
| TNode<IntPtrT> b, |
| Label* if_overflow) { |
| TNode<PairT<IntPtrT, BoolT>> pair = IntPtrAddWithOverflow(a, b); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| return Projection<0>(pair); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::TryIntPtrSub(TNode<IntPtrT> a, |
| TNode<IntPtrT> b, |
| Label* if_overflow) { |
| TNode<PairT<IntPtrT, BoolT>> pair = IntPtrSubWithOverflow(a, b); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| return Projection<0>(pair); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::TryInt32Mul(TNode<Int32T> a, TNode<Int32T> b, |
| Label* if_overflow) { |
| TNode<PairT<Int32T, BoolT>> pair = Int32MulWithOverflow(a, b); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| return Projection<0>(pair); |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiAdd(TNode<Smi> lhs, TNode<Smi> rhs, |
| Label* if_overflow) { |
| if (SmiValuesAre32Bits()) { |
| return BitcastWordToTaggedSigned( |
| TryIntPtrAdd(BitcastTaggedToWordForTagAndSmiBits(lhs), |
| BitcastTaggedToWordForTagAndSmiBits(rhs), if_overflow)); |
| } else { |
| DCHECK(SmiValuesAre31Bits()); |
| TNode<PairT<Int32T, BoolT>> pair = Int32AddWithOverflow( |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)), |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs))); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<Int32T> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result)); |
| } |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiSub(TNode<Smi> lhs, TNode<Smi> rhs, |
| Label* if_overflow) { |
| if (SmiValuesAre32Bits()) { |
| TNode<PairT<IntPtrT, BoolT>> pair = |
| IntPtrSubWithOverflow(BitcastTaggedToWordForTagAndSmiBits(lhs), |
| BitcastTaggedToWordForTagAndSmiBits(rhs)); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<IntPtrT> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(result); |
| } else { |
| DCHECK(SmiValuesAre31Bits()); |
| TNode<PairT<Int32T, BoolT>> pair = Int32SubWithOverflow( |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(lhs)), |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(rhs))); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<Int32T> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result)); |
| } |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiAbs(TNode<Smi> a, Label* if_overflow) { |
| if (SmiValuesAre32Bits()) { |
| TNode<PairT<IntPtrT, BoolT>> pair = |
| IntPtrAbsWithOverflow(BitcastTaggedToWordForTagAndSmiBits(a)); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<IntPtrT> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(result); |
| } else { |
| CHECK(SmiValuesAre31Bits()); |
| CHECK(IsInt32AbsWithOverflowSupported()); |
| TNode<PairT<Int32T, BoolT>> pair = Int32AbsWithOverflow( |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a))); |
| TNode<BoolT> overflow = Projection<1>(pair); |
| GotoIf(overflow, if_overflow); |
| TNode<Int32T> result = Projection<0>(pair); |
| return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(result)); |
| } |
| } |
| |
| TNode<Number> CodeStubAssembler::NumberMax(TNode<Number> a, TNode<Number> b) { |
| // TODO(danno): This could be optimized by specifically handling smi cases. |
| TVARIABLE(Number, result); |
| Label done(this), greater_than_equal_a(this), greater_than_equal_b(this); |
| GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a); |
| GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b); |
| result = NanConstant(); |
| Goto(&done); |
| BIND(&greater_than_equal_a); |
| result = a; |
| Goto(&done); |
| BIND(&greater_than_equal_b); |
| result = b; |
| Goto(&done); |
| BIND(&done); |
| return result.value(); |
| } |
| |
| TNode<Number> CodeStubAssembler::NumberMin(TNode<Number> a, TNode<Number> b) { |
| // TODO(danno): This could be optimized by specifically handling smi cases. |
| TVARIABLE(Number, result); |
| Label done(this), greater_than_equal_a(this), greater_than_equal_b(this); |
| GotoIfNumberGreaterThanOrEqual(a, b, &greater_than_equal_a); |
| GotoIfNumberGreaterThanOrEqual(b, a, &greater_than_equal_b); |
| result = NanConstant(); |
| Goto(&done); |
| BIND(&greater_than_equal_a); |
| result = b; |
| Goto(&done); |
| BIND(&greater_than_equal_b); |
| result = a; |
| Goto(&done); |
| BIND(&done); |
| return result.value(); |
| } |
| |
| TNode<Number> CodeStubAssembler::SmiMod(TNode<Smi> a, TNode<Smi> b) { |
| TVARIABLE(Number, var_result); |
| Label return_result(this, &var_result), |
| return_minuszero(this, Label::kDeferred), |
| return_nan(this, Label::kDeferred); |
| |
| // Untag {a} and {b}. |
| TNode<Int32T> int_a = SmiToInt32(a); |
| TNode<Int32T> int_b = SmiToInt32(b); |
| |
| // Return NaN if {b} is zero. |
| GotoIf(Word32Equal(int_b, Int32Constant(0)), &return_nan); |
| |
| // Check if {a} is non-negative. |
| Label if_aisnotnegative(this), if_aisnegative(this, Label::kDeferred); |
| Branch(Int32LessThanOrEqual(Int32Constant(0), int_a), &if_aisnotnegative, |
| &if_aisnegative); |
| |
| BIND(&if_aisnotnegative); |
| { |
| // Fast case, don't need to check any other edge cases. |
| TNode<Int32T> r = Int32Mod(int_a, int_b); |
| var_result = SmiFromInt32(r); |
| Goto(&return_result); |
| } |
| |
| BIND(&if_aisnegative); |
| { |
| if (SmiValuesAre32Bits()) { |
| // Check if {a} is kMinInt and {b} is -1 (only relevant if the |
| // kMinInt is actually representable as a Smi). |
| Label join(this); |
| GotoIfNot(Word32Equal(int_a, Int32Constant(kMinInt)), &join); |
| GotoIf(Word32Equal(int_b, Int32Constant(-1)), &return_minuszero); |
| Goto(&join); |
| BIND(&join); |
| } |
| |
| // Perform the integer modulus operation. |
| TNode<Int32T> r = Int32Mod(int_a, int_b); |
| |
| // Check if {r} is zero, and if so return -0, because we have to |
| // take the sign of the left hand side {a}, which is negative. |
| GotoIf(Word32Equal(r, Int32Constant(0)), &return_minuszero); |
| |
| // The remainder {r} can be outside the valid Smi range on 32bit |
| // architectures, so we cannot just say SmiFromInt32(r) here. |
| var_result = ChangeInt32ToTagged(r); |
| Goto(&return_result); |
| } |
| |
| BIND(&return_minuszero); |
| var_result = MinusZeroConstant(); |
| Goto(&return_result); |
| |
| BIND(&return_nan); |
| var_result = NanConstant(); |
| Goto(&return_result); |
| |
| BIND(&return_result); |
| return var_result.value(); |
| } |
| |
| TNode<Number> CodeStubAssembler::SmiMul(TNode<Smi> a, TNode<Smi> b) { |
| TVARIABLE(Number, var_result); |
| TVARIABLE(Float64T, var_lhs_float64); |
| TVARIABLE(Float64T, var_rhs_float64); |
| Label return_result(this, &var_result); |
| |
| // Both {a} and {b} are Smis. Convert them to integers and multiply. |
| TNode<Int32T> lhs32 = SmiToInt32(a); |
| TNode<Int32T> rhs32 = SmiToInt32(b); |
| auto pair = Int32MulWithOverflow(lhs32, rhs32); |
| |
| TNode<BoolT> overflow = Projection<1>(pair); |
| |
| // Check if the multiplication overflowed. |
| Label if_overflow(this, Label::kDeferred), if_notoverflow(this); |
| Branch(overflow, &if_overflow, &if_notoverflow); |
| BIND(&if_notoverflow); |
| { |
| // If the answer is zero, we may need to return -0.0, depending on the |
| // input. |
| Label answer_zero(this), answer_not_zero(this); |
| TNode<Int32T> answer = Projection<0>(pair); |
| TNode<Int32T> zero = Int32Constant(0); |
| Branch(Word32Equal(answer, zero), &answer_zero, &answer_not_zero); |
| BIND(&answer_not_zero); |
| { |
| var_result = ChangeInt32ToTagged(answer); |
| Goto(&return_result); |
| } |
| BIND(&answer_zero); |
| { |
| TNode<Int32T> or_result = Word32Or(lhs32, rhs32); |
| Label if_should_be_negative_zero(this), if_should_be_zero(this); |
| Branch(Int32LessThan(or_result, zero), &if_should_be_negative_zero, |
| &if_should_be_zero); |
| BIND(&if_should_be_negative_zero); |
| { |
| var_result = MinusZeroConstant(); |
| Goto(&return_result); |
| } |
| BIND(&if_should_be_zero); |
| { |
| var_result = SmiConstant(0); |
| Goto(&return_result); |
| } |
| } |
| } |
| BIND(&if_overflow); |
| { |
| var_lhs_float64 = SmiToFloat64(a); |
| var_rhs_float64 = SmiToFloat64(b); |
| TNode<Float64T> value = |
| Float64Mul(var_lhs_float64.value(), var_rhs_float64.value()); |
| var_result = AllocateHeapNumberWithValue(value); |
| Goto(&return_result); |
| } |
| |
| BIND(&return_result); |
| return var_result.value(); |
| } |
| |
| TNode<Smi> CodeStubAssembler::TrySmiDiv(TNode<Smi> dividend, TNode<Smi> divisor, |
| Label* bailout) { |
| // Both {a} and {b} are Smis. Bailout to floating point division if {divisor} |
| // is zero. |
| GotoIf(TaggedEqual(divisor, SmiConstant(0)), bailout); |
| |
| // Do floating point division if {dividend} is zero and {divisor} is |
| // negative. |
| Label dividend_is_zero(this), dividend_is_not_zero(this); |
| Branch(TaggedEqual(dividend, SmiConstant(0)), ÷nd_is_zero, |
| ÷nd_is_not_zero); |
| |
| BIND(÷nd_is_zero); |
| { |
| GotoIf(SmiLessThan(divisor, SmiConstant(0)), bailout); |
| Goto(÷nd_is_not_zero); |
| } |
| BIND(÷nd_is_not_zero); |
| |
| TNode<Int32T> untagged_divisor = SmiToInt32(divisor); |
| TNode<Int32T> untagged_dividend = SmiToInt32(dividend); |
| |
| // Do floating point division if {dividend} is kMinInt (or kMinInt - 1 |
| // if the Smi size is 31) and {divisor} is -1. |
| Label divisor_is_minus_one(this), divisor_is_not_minus_one(this); |
| Branch(Word32Equal(untagged_divisor, Int32Constant(-1)), |
| &divisor_is_minus_one, &divisor_is_not_minus_one); |
| |
| BIND(&divisor_is_minus_one); |
| { |
| GotoIf(Word32Equal( |
| untagged_dividend, |
| Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))), |
| bailout); |
| Goto(&divisor_is_not_minus_one); |
| } |
| BIND(&divisor_is_not_minus_one); |
| |
| TNode<Int32T> untagged_result = Int32Div(untagged_dividend, untagged_divisor); |
| TNode<Int32T> truncated = Int32Mul(untagged_result, untagged_divisor); |
| |
| // Do floating point division if the remainder is not 0. |
| GotoIf(Word32NotEqual(untagged_dividend, truncated), bailout); |
| |
| return SmiFromInt32(untagged_result); |
| } |
| |
| TNode<Smi> CodeStubAssembler::SmiLexicographicCompare(TNode<Smi> x, |
| TNode<Smi> y) { |
| TNode<ExternalReference> smi_lexicographic_compare = |
| ExternalConstant(ExternalReference::smi_lexicographic_compare_function()); |
| TNode<ExternalReference> isolate_ptr = |
| ExternalConstant(ExternalReference::isolate_address(isolate())); |
| return CAST(CallCFunction(smi_lexicographic_compare, MachineType::AnyTagged(), |
| std::make_pair(MachineType::Pointer(), isolate_ptr), |
| std::make_pair(MachineType::AnyTagged(), x), |
| std::make_pair(MachineType::AnyTagged(), y))); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::TruncateWordToInt32(SloppyTNode<WordT> value) { |
| if (Is64()) { |
| return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value)); |
| } |
| return ReinterpretCast<Int32T>(value); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::TruncateIntPtrToInt32( |
| SloppyTNode<IntPtrT> value) { |
| if (Is64()) { |
| return TruncateInt64ToInt32(ReinterpretCast<Int64T>(value)); |
| } |
| return ReinterpretCast<Int32T>(value); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) { |
| STATIC_ASSERT(kSmiTagMask < kMaxUInt32); |
| return Word32Equal( |
| Word32And(TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), |
| Int32Constant(kSmiTagMask)), |
| Int32Constant(0)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(TNode<MaybeObject> a) { |
| return Word32BinaryNot(TaggedIsSmi(a)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) { |
| #if defined(V8_HOST_ARCH_32_BIT) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) |
| return Word32Equal( |
| Word32And( |
| TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), |
| Uint32Constant(static_cast<uint32_t>(kSmiTagMask | kSmiSignMask))), |
| Int32Constant(0)); |
| #else |
| return WordEqual(WordAnd(BitcastTaggedToWordForTagAndSmiBits(a), |
| IntPtrConstant(kSmiTagMask | kSmiSignMask)), |
| IntPtrConstant(0)); |
| #endif |
| } |
| |
| TNode<BoolT> CodeStubAssembler::WordIsAligned(SloppyTNode<WordT> word, |
| size_t alignment) { |
| DCHECK(base::bits::IsPowerOfTwo(alignment)); |
| DCHECK_LE(alignment, kMaxUInt32); |
| return Word32Equal( |
| Int32Constant(0), |
| Word32And(TruncateWordToInt32(word), |
| Uint32Constant(static_cast<uint32_t>(alignment) - 1))); |
| } |
| |
| #if DEBUG |
| void CodeStubAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) { |
| CodeAssembler::Bind(label, debug_info); |
| } |
| #endif // DEBUG |
| |
| void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); } |
| |
| TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck( |
| TNode<FixedDoubleArray> array, TNode<IntPtrT> index, Label* if_hole) { |
| return LoadFixedDoubleArrayElement(array, index, if_hole); |
| } |
| |
| void CodeStubAssembler::BranchIfJSReceiver(SloppyTNode<Object> object, |
| Label* if_true, Label* if_false) { |
| GotoIf(TaggedIsSmi(object), if_false); |
| STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE); |
| Branch(IsJSReceiver(CAST(object)), if_true, if_false); |
| } |
| |
| void CodeStubAssembler::GotoIfForceSlowPath(Label* if_true) { |
| #ifdef V8_ENABLE_FORCE_SLOW_PATH |
| const TNode<ExternalReference> force_slow_path_addr = |
| ExternalConstant(ExternalReference::force_slow_path(isolate())); |
| const TNode<Uint8T> force_slow = Load<Uint8T>(force_slow_path_addr); |
| |
| GotoIf(force_slow, if_true); |
| #endif |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes, |
| AllocationFlags flags, |
| TNode<RawPtrT> top_address, |
| TNode<RawPtrT> limit_address) { |
| Label if_out_of_memory(this, Label::kDeferred); |
| |
| // TODO(jgruber,jkummerow): Extract the slow paths (= probably everything |
| // but bump pointer allocation) into a builtin to save code space. The |
| // size_in_bytes check may be moved there as well since a non-smi |
| // size_in_bytes probably doesn't fit into the bump pointer region |
| // (double-check that). |
| |
| intptr_t size_in_bytes_constant; |
| bool size_in_bytes_is_constant = false; |
| if (ToIntPtrConstant(size_in_bytes, &size_in_bytes_constant)) { |
| size_in_bytes_is_constant = true; |
| CHECK(Internals::IsValidSmi(size_in_bytes_constant)); |
| CHECK_GT(size_in_bytes_constant, 0); |
| } else { |
| GotoIfNot(IsValidPositiveSmi(size_in_bytes), &if_out_of_memory); |
| } |
| |
| TNode<RawPtrT> top = Load<RawPtrT>(top_address); |
| TNode<RawPtrT> limit = Load<RawPtrT>(limit_address); |
| |
| // If there's not enough space, call the runtime. |
| TVARIABLE(Object, result); |
| Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this); |
| |
| bool needs_double_alignment = flags & kDoubleAlignment; |
| bool allow_large_object_allocation = flags & kAllowLargeObjectAllocation; |
| |
| if (allow_large_object_allocation) { |
| Label next(this); |
| GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next); |
| |
| TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt( |
| AllocateDoubleAlignFlag::encode(needs_double_alignment) | |
| AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation))); |
| if (FLAG_young_generation_large_objects) { |
| result = |
| CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } else { |
| result = |
| CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } |
| Goto(&out); |
| |
| BIND(&next); |
| } |
| |
| TVARIABLE(IntPtrT, adjusted_size, size_in_bytes); |
| |
| if (needs_double_alignment) { |
| Label next(this); |
| GotoIfNot(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)), &next); |
| |
| adjusted_size = IntPtrAdd(size_in_bytes, IntPtrConstant(4)); |
| Goto(&next); |
| |
| BIND(&next); |
| } |
| |
| TNode<IntPtrT> new_top = |
| IntPtrAdd(UncheckedCast<IntPtrT>(top), adjusted_size.value()); |
| |
| Branch(UintPtrGreaterThanOrEqual(new_top, limit), &runtime_call, |
| &no_runtime_call); |
| |
| BIND(&runtime_call); |
| { |
| TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt( |
| AllocateDoubleAlignFlag::encode(needs_double_alignment) | |
| AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation))); |
| if (flags & kPretenured) { |
| result = |
| CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } else { |
| result = |
| CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(), |
| SmiTag(size_in_bytes), runtime_flags); |
| } |
| Goto(&out); |
| } |
| |
| // When there is enough space, return `top' and bump it up. |
| BIND(&no_runtime_call); |
| { |
| StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address, |
| new_top); |
| |
| TVARIABLE(IntPtrT, address, UncheckedCast<IntPtrT>(top)); |
| |
| if (needs_double_alignment) { |
| Label next(this); |
| GotoIf(IntPtrEqual(adjusted_size.value(), size_in_bytes), &next); |
| |
| // Store a filler and increase the address by 4. |
| StoreNoWriteBarrier(MachineRepresentation::kTagged, top, |
| OnePointerFillerMapConstant()); |
| address = IntPtrAdd(UncheckedCast<IntPtrT>(top), IntPtrConstant(4)); |
| Goto(&next); |
| |
| BIND(&next); |
| } |
| |
| result = BitcastWordToTagged( |
| IntPtrAdd(address.value(), IntPtrConstant(kHeapObjectTag))); |
| Goto(&out); |
| } |
| |
| if (!size_in_bytes_is_constant) { |
| BIND(&if_out_of_memory); |
| CallRuntime(Runtime::kFatalProcessOutOfMemoryInAllocateRaw, |
| NoContextConstant()); |
| Unreachable(); |
| } |
| |
| BIND(&out); |
| return UncheckedCast<HeapObject>(result.value()); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateRawUnaligned( |
| TNode<IntPtrT> size_in_bytes, AllocationFlags flags, |
| TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) { |
| DCHECK_EQ(flags & kDoubleAlignment, 0); |
| return AllocateRaw(size_in_bytes, flags, top_address, limit_address); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateRawDoubleAligned( |
| TNode<IntPtrT> size_in_bytes, AllocationFlags flags, |
| TNode<RawPtrT> top_address, TNode<RawPtrT> limit_address) { |
| #if defined(V8_HOST_ARCH_32_BIT) |
| return AllocateRaw(size_in_bytes, flags | kDoubleAlignment, top_address, |
| limit_address); |
| #elif defined(V8_HOST_ARCH_64_BIT) |
| #ifdef V8_COMPRESS_POINTERS |
| // TODO(ishell, v8:8875): Consider using aligned allocations once the |
| // allocation alignment inconsistency is fixed. For now we keep using |
| // unaligned access since both x64 and arm64 architectures (where pointer |
| // compression is supported) allow unaligned access to doubles and full words. |
| #endif // V8_COMPRESS_POINTERS |
| // Allocation on 64 bit machine is naturally double aligned |
| return AllocateRaw(size_in_bytes, flags & ~kDoubleAlignment, top_address, |
| limit_address); |
| #else |
| #error Architecture not supported |
| #endif |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace( |
| TNode<IntPtrT> size_in_bytes, AllocationFlags flags) { |
| DCHECK(flags == kNone || flags == kDoubleAlignment); |
| CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes)); |
| return Allocate(size_in_bytes, flags); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes, |
| AllocationFlags flags) { |
| Comment("Allocate"); |
| bool const new_space = !(flags & kPretenured); |
| bool const allow_large_objects = flags & kAllowLargeObjectAllocation; |
| // For optimized allocations, we don't allow the allocation to happen in a |
| // different generation than requested. |
| bool const always_allocated_in_requested_space = |
| !new_space || !allow_large_objects || FLAG_young_generation_large_objects; |
| if (!allow_large_objects) { |
| intptr_t size_constant; |
| if (ToIntPtrConstant(size_in_bytes, &size_constant)) { |
| CHECK_LE(size_constant, kMaxRegularHeapObjectSize); |
| } else { |
| CSA_ASSERT(this, IsRegularHeapObjectSize(size_in_bytes)); |
| } |
| } |
| if (!(flags & kDoubleAlignment) && always_allocated_in_requested_space) { |
| return OptimizedAllocate( |
| size_in_bytes, |
| new_space ? AllocationType::kYoung : AllocationType::kOld, |
| allow_large_objects ? AllowLargeObjects::kTrue |
| : AllowLargeObjects::kFalse); |
| } |
| TNode<ExternalReference> top_address = ExternalConstant( |
| new_space |
| ? ExternalReference::new_space_allocation_top_address(isolate()) |
| : ExternalReference::old_space_allocation_top_address(isolate())); |
| DCHECK_EQ(kSystemPointerSize, |
| ExternalReference::new_space_allocation_limit_address(isolate()) |
| .address() - |
| ExternalReference::new_space_allocation_top_address(isolate()) |
| .address()); |
| DCHECK_EQ(kSystemPointerSize, |
| ExternalReference::old_space_allocation_limit_address(isolate()) |
| .address() - |
| ExternalReference::old_space_allocation_top_address(isolate()) |
| .address()); |
| TNode<IntPtrT> limit_address = |
| IntPtrAdd(ReinterpretCast<IntPtrT>(top_address), |
| IntPtrConstant(kSystemPointerSize)); |
| |
| if (flags & kDoubleAlignment) { |
| return AllocateRawDoubleAligned(size_in_bytes, flags, |
| ReinterpretCast<RawPtrT>(top_address), |
| ReinterpretCast<RawPtrT>(limit_address)); |
| } else { |
| return AllocateRawUnaligned(size_in_bytes, flags, |
| ReinterpretCast<RawPtrT>(top_address), |
| ReinterpretCast<RawPtrT>(limit_address)); |
| } |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::AllocateInNewSpace(int size_in_bytes, |
| AllocationFlags flags) { |
| CHECK(flags == kNone || flags == kDoubleAlignment); |
| DCHECK_LE(size_in_bytes, kMaxRegularHeapObjectSize); |
| return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::Allocate(int size_in_bytes, |
| AllocationFlags flags) { |
| return CodeStubAssembler::Allocate(IntPtrConstant(size_in_bytes), flags); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous, |
| TNode<IntPtrT> offset) { |
| return UncheckedCast<HeapObject>( |
| BitcastWordToTagged(IntPtrAdd(BitcastTaggedToWord(previous), offset))); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::InnerAllocate(TNode<HeapObject> previous, |
| int offset) { |
| return InnerAllocate(previous, IntPtrConstant(offset)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsRegularHeapObjectSize(TNode<IntPtrT> size) { |
| return UintPtrLessThanOrEqual(size, |
| IntPtrConstant(kMaxRegularHeapObjectSize)); |
| } |
| |
| void CodeStubAssembler::BranchIfToBooleanIsTrue(SloppyTNode<Object> value, |
| Label* if_true, |
| Label* if_false) { |
| Label if_smi(this), if_notsmi(this), if_heapnumber(this, Label::kDeferred), |
| if_bigint(this, Label::kDeferred); |
| // Rule out false {value}. |
| GotoIf(TaggedEqual(value, FalseConstant()), if_false); |
| |
| // Check if {value} is a Smi or a HeapObject. |
| Branch(TaggedIsSmi(value), &if_smi, &if_notsmi); |
| |
| BIND(&if_smi); |
| { |
| // The {value} is a Smi, only need to check against zero. |
| BranchIfSmiEqual(CAST(value), SmiConstant(0), if_false, if_true); |
| } |
| |
| BIND(&if_notsmi); |
| { |
| TNode<HeapObject> value_heapobject = CAST(value); |
| |
| // Check if {value} is the empty string. |
| GotoIf(IsEmptyString(value_heapobject), if_false); |
| |
| // The {value} is a HeapObject, load its map. |
| TNode<Map> value_map = LoadMap(value_heapobject); |
| |
| // Only null, undefined and document.all have the undetectable bit set, |
| // so we can return false immediately when that bit is set. |
| GotoIf(IsUndetectableMap(value_map), if_false); |
| |
| // We still need to handle numbers specially, but all other {value}s |
| // that make it here yield true. |
| GotoIf(IsHeapNumberMap(value_map), &if_heapnumber); |
| Branch(IsBigInt(value_heapobject), &if_bigint, if_true); |
| |
| BIND(&if_heapnumber); |
| { |
| // Load the floating point value of {value}. |
| TNode<Float64T> value_value = |
| LoadObjectField<Float64T>(value_heapobject, HeapNumber::kValueOffset); |
| |
| // Check if the floating point {value} is neither 0.0, -0.0 nor NaN. |
| Branch(Float64LessThan(Float64Constant(0.0), Float64Abs(value_value)), |
| if_true, if_false); |
| } |
| |
| BIND(&if_bigint); |
| { |
| TNode<BigInt> bigint = CAST(value); |
| TNode<Word32T> bitfield = LoadBigIntBitfield(bigint); |
| TNode<Uint32T> length = DecodeWord32<BigIntBase::LengthBits>(bitfield); |
| Branch(Word32Equal(length, Int32Constant(0)), if_false, if_true); |
| } |
| } |
| } |
| |
| TNode<ExternalPointerT> CodeStubAssembler::ChangeUint32ToExternalPointer( |
| TNode<Uint32T> value) { |
| STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize); |
| return ReinterpretCast<ExternalPointerT>(ChangeUint32ToWord(value)); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::ChangeExternalPointerToUint32( |
| TNode<ExternalPointerT> value) { |
| STATIC_ASSERT(kExternalPointerSize == kSystemPointerSize); |
| return Unsigned(TruncateWordToInt32(ReinterpretCast<UintPtrT>(value))); |
| } |
| |
| void CodeStubAssembler::InitializeExternalPointerField(TNode<HeapObject> object, |
| TNode<IntPtrT> offset) { |
| #ifdef V8_HEAP_SANDBOX |
| TNode<ExternalReference> external_pointer_table_address = ExternalConstant( |
| ExternalReference::external_pointer_table_address(isolate())); |
| TNode<Uint32T> table_length = UncheckedCast<Uint32T>( |
| Load(MachineType::Uint32(), external_pointer_table_address, |
| UintPtrConstant(Internals::kExternalPointerTableLengthOffset))); |
| TNode<Uint32T> table_capacity = UncheckedCast<Uint32T>( |
| Load(MachineType::Uint32(), external_pointer_table_address, |
| UintPtrConstant(Internals::kExternalPointerTableCapacityOffset))); |
| |
| Label grow_table(this, Label::kDeferred), finish(this); |
| |
| TNode<BoolT> compare = Uint32LessThan(table_length, table_capacity); |
| Branch(compare, &finish, &grow_table); |
| |
| BIND(&grow_table); |
| { |
| TNode<ExternalReference> table_grow_function = ExternalConstant( |
| ExternalReference::external_pointer_table_grow_table_function()); |
| CallCFunction( |
| table_grow_function, MachineType::Pointer(), |
| std::make_pair(MachineType::Pointer(), external_pointer_table_address)); |
| Goto(&finish); |
| } |
| BIND(&finish); |
| |
| TNode<Uint32T> new_table_length = Uint32Add(table_length, Uint32Constant(1)); |
| StoreNoWriteBarrier( |
| MachineRepresentation::kWord32, external_pointer_table_address, |
| UintPtrConstant(Internals::kExternalPointerTableLengthOffset), |
| new_table_length); |
| |
| TNode<Uint32T> index = table_length; |
| TNode<ExternalPointerT> encoded = ChangeUint32ToExternalPointer(index); |
| StoreObjectFieldNoWriteBarrier<ExternalPointerT>(object, offset, encoded); |
| #endif |
| } |
| |
| TNode<RawPtrT> CodeStubAssembler::LoadExternalPointerFromObject( |
| TNode<HeapObject> object, TNode<IntPtrT> offset, |
| ExternalPointerTag external_pointer_tag) { |
| #ifdef V8_HEAP_SANDBOX |
| TNode<ExternalReference> external_pointer_table_address = ExternalConstant( |
| ExternalReference::external_pointer_table_address(isolate())); |
| TNode<RawPtrT> table = UncheckedCast<RawPtrT>( |
| Load(MachineType::Pointer(), external_pointer_table_address, |
| UintPtrConstant(Internals::kExternalPointerTableBufferOffset))); |
| |
| TNode<ExternalPointerT> encoded = |
| LoadObjectField<ExternalPointerT>(object, offset); |
| TNode<Word32T> index = ChangeExternalPointerToUint32(encoded); |
| // TODO(v8:10391, saelo): bounds check if table is not caged |
| TNode<IntPtrT> table_offset = ElementOffsetFromIndex( |
| ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0); |
| |
| TNode<UintPtrT> entry = Load<UintPtrT>(table, table_offset); |
| if (external_pointer_tag != 0) { |
| TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag); |
| entry = UncheckedCast<UintPtrT>(WordXor(entry, tag)); |
| } |
| return UncheckedCast<RawPtrT>(UncheckedCast<WordT>(entry)); |
| #else |
| return LoadObjectField<RawPtrT>(object, offset); |
| #endif // V8_HEAP_SANDBOX |
| } |
| |
| void CodeStubAssembler::StoreExternalPointerToObject( |
| TNode<HeapObject> object, TNode<IntPtrT> offset, TNode<RawPtrT> pointer, |
| ExternalPointerTag external_pointer_tag) { |
| #ifdef V8_HEAP_SANDBOX |
| TNode<ExternalReference> external_pointer_table_address = ExternalConstant( |
| ExternalReference::external_pointer_table_address(isolate())); |
| TNode<RawPtrT> table = UncheckedCast<RawPtrT>( |
| Load(MachineType::Pointer(), external_pointer_table_address, |
| UintPtrConstant(Internals::kExternalPointerTableBufferOffset))); |
| |
| TNode<ExternalPointerT> encoded = |
| LoadObjectField<ExternalPointerT>(object, offset); |
| TNode<Word32T> index = ChangeExternalPointerToUint32(encoded); |
| // TODO(v8:10391, saelo): bounds check if table is not caged |
| TNode<IntPtrT> table_offset = ElementOffsetFromIndex( |
| ChangeUint32ToWord(index), SYSTEM_POINTER_ELEMENTS, 0); |
| |
| TNode<UintPtrT> value = UncheckedCast<UintPtrT>(pointer); |
| if (external_pointer_tag != 0) { |
| TNode<UintPtrT> tag = UintPtrConstant(external_pointer_tag); |
| value = UncheckedCast<UintPtrT>(WordXor(pointer, tag)); |
| } |
| StoreNoWriteBarrier(MachineType::PointerRepresentation(), table, table_offset, |
| value); |
| #else |
| StoreObjectFieldNoWriteBarrier<RawPtrT>(object, offset, pointer); |
| #endif // V8_HEAP_SANDBOX |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadFromParentFrame(int offset) { |
| TNode<RawPtrT> frame_pointer = LoadParentFramePointer(); |
| return LoadFullTagged(frame_pointer, IntPtrConstant(offset)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField( |
| TNode<HeapObject> object, int offset) { |
| if (SmiValuesAre32Bits()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| offset += 4; |
| #endif |
| return ChangeInt32ToIntPtr(LoadObjectField<Int32T>(object, offset)); |
| } else { |
| return SmiToIntPtr(LoadObjectField<Smi>(object, offset)); |
| } |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ObjectField( |
| TNode<HeapObject> object, int offset) { |
| if (SmiValuesAre32Bits()) { |
| #if V8_TARGET_LITTLE_ENDIAN |
| offset += 4; |
| #endif |
| return LoadObjectField<Int32T>(object, offset); |
| } else { |
| return SmiToInt32(LoadObjectField<Smi>(object, offset)); |
| } |
| } |
| |
| TNode<Float64T> CodeStubAssembler::LoadHeapNumberValue( |
| TNode<HeapObject> object) { |
| CSA_ASSERT(this, Word32Or(IsHeapNumber(object), IsOddball(object))); |
| STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset); |
| return LoadObjectField<Float64T>(object, HeapNumber::kValueOffset); |
| } |
| |
| TNode<Map> CodeStubAssembler::GetInstanceTypeMap(InstanceType instance_type) { |
| Handle<Map> map_handle( |
| Map::GetInstanceTypeMap(ReadOnlyRoots(isolate()), instance_type), |
| isolate()); |
| return HeapConstant(map_handle); |
| } |
| |
| TNode<Map> CodeStubAssembler::LoadMap(TNode<HeapObject> object) { |
| return LoadObjectField<Map>(object, HeapObject::kMapOffset); |
| } |
| |
| TNode<Uint16T> CodeStubAssembler::LoadInstanceType(TNode<HeapObject> object) { |
| return LoadMapInstanceType(LoadMap(object)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::HasInstanceType(TNode<HeapObject> object, |
| InstanceType instance_type) { |
| return InstanceTypeEqual(LoadInstanceType(object), instance_type); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::DoesntHaveInstanceType( |
| TNode<HeapObject> object, InstanceType instance_type) { |
| return Word32NotEqual(LoadInstanceType(object), Int32Constant(instance_type)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::TaggedDoesntHaveInstanceType( |
| TNode<HeapObject> any_tagged, InstanceType type) { |
| /* return Phi <TaggedIsSmi(val), DoesntHaveInstanceType(val, type)> */ |
| TNode<BoolT> tagged_is_smi = TaggedIsSmi(any_tagged); |
| return Select<BoolT>( |
| tagged_is_smi, [=]() { return tagged_is_smi; }, |
| [=]() { return DoesntHaveInstanceType(any_tagged, type); }); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsSpecialReceiverMap(TNode<Map> map) { |
| TNode<BoolT> is_special = |
| IsSpecialReceiverInstanceType(LoadMapInstanceType(map)); |
| uint32_t mask = Map::Bits1::HasNamedInterceptorBit::kMask | |
| Map::Bits1::IsAccessCheckNeededBit::kMask; |
| USE(mask); |
| // Interceptors or access checks imply special receiver. |
| CSA_ASSERT(this, |
| SelectConstant<BoolT>(IsSetWord32(LoadMapBitField(map), mask), |
| is_special, Int32TrueConstant())); |
| return is_special; |
| } |
| |
| TNode<Word32T> CodeStubAssembler::IsStringWrapperElementsKind(TNode<Map> map) { |
| TNode<Int32T> kind = LoadMapElementsKind(map); |
| return Word32Or( |
| Word32Equal(kind, Int32Constant(FAST_STRING_WRAPPER_ELEMENTS)), |
| Word32Equal(kind, Int32Constant(SLOW_STRING_WRAPPER_ELEMENTS))); |
| } |
| |
| void CodeStubAssembler::GotoIfMapHasSlowProperties(TNode<Map> map, |
| Label* if_slow) { |
| GotoIf(IsStringWrapperElementsKind(map), if_slow); |
| GotoIf(IsSpecialReceiverMap(map), if_slow); |
| GotoIf(IsDictionaryMap(map), if_slow); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::LoadFastProperties( |
| TNode<JSReceiver> object) { |
| CSA_SLOW_ASSERT(this, Word32BinaryNot(IsDictionaryMap(LoadMap(object)))); |
| TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object); |
| return Select<HeapObject>( |
| TaggedIsSmi(properties), [=] { return EmptyFixedArrayConstant(); }, |
| [=] { return CAST(properties); }); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::LoadSlowProperties( |
| TNode<JSReceiver> object) { |
| CSA_SLOW_ASSERT(this, IsDictionaryMap(LoadMap(object))); |
| TNode<Object> properties = LoadJSReceiverPropertiesOrHash(object); |
| return Select<HeapObject>( |
| TaggedIsSmi(properties), |
| [=] { return EmptyPropertyDictionaryConstant(); }, |
| [=] { return CAST(properties); }); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadJSArgumentsObjectLength( |
| TNode<Context> context, TNode<JSArgumentsObject> array) { |
| CSA_ASSERT(this, IsJSArgumentsObjectWithLength(context, array)); |
| constexpr int offset = JSStrictArgumentsObject::kLengthOffset; |
| STATIC_ASSERT(offset == JSSloppyArgumentsObject::kLengthOffset); |
| return LoadObjectField(array, offset); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadFastJSArrayLength(TNode<JSArray> array) { |
| TNode<Number> length = LoadJSArrayLength(array); |
| CSA_ASSERT(this, Word32Or(IsFastElementsKind(LoadElementsKind(array)), |
| IsElementsKindInRange( |
| LoadElementsKind(array), |
| FIRST_ANY_NONEXTENSIBLE_ELEMENTS_KIND, |
| LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND))); |
| // JSArray length is always a positive Smi for fast arrays. |
| CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); |
| return CAST(length); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadFixedArrayBaseLength( |
| TNode<FixedArrayBase> array) { |
| CSA_SLOW_ASSERT(this, IsNotWeakFixedArraySubclass(array)); |
| return LoadObjectField<Smi>(array, FixedArrayBase::kLengthOffset); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadAndUntagFixedArrayBaseLength( |
| TNode<FixedArrayBase> array) { |
| return LoadAndUntagObjectField(array, FixedArrayBase::kLengthOffset); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadFeedbackVectorLength( |
| TNode<FeedbackVector> vector) { |
| return ChangeInt32ToIntPtr( |
| LoadObjectField<Int32T>(vector, FeedbackVector::kLengthOffset)); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadWeakFixedArrayLength( |
| TNode<WeakFixedArray> array) { |
| return LoadObjectField<Smi>(array, WeakFixedArray::kLengthOffset); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadAndUntagWeakFixedArrayLength( |
| TNode<WeakFixedArray> array) { |
| return LoadAndUntagObjectField(array, WeakFixedArray::kLengthOffset); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadNumberOfDescriptors( |
| TNode<DescriptorArray> array) { |
| return UncheckedCast<Int32T>(LoadObjectField<Int16T>( |
| array, DescriptorArray::kNumberOfDescriptorsOffset)); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadNumberOfOwnDescriptors(TNode<Map> map) { |
| TNode<Uint32T> bit_field3 = LoadMapBitField3(map); |
| return UncheckedCast<Int32T>( |
| DecodeWord32<Map::Bits3::NumberOfOwnDescriptorsBits>(bit_field3)); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadMapBitField(TNode<Map> map) { |
| return UncheckedCast<Int32T>( |
| LoadObjectField<Uint8T>(map, Map::kBitFieldOffset)); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadMapBitField2(TNode<Map> map) { |
| return UncheckedCast<Int32T>( |
| LoadObjectField<Uint8T>(map, Map::kBitField2Offset)); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadMapBitField3(TNode<Map> map) { |
| return LoadObjectField<Uint32T>(map, Map::kBitField3Offset); |
| } |
| |
| TNode<Uint16T> CodeStubAssembler::LoadMapInstanceType(TNode<Map> map) { |
| return LoadObjectField<Uint16T>(map, Map::kInstanceTypeOffset); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadMapElementsKind(TNode<Map> map) { |
| TNode<Int32T> bit_field2 = LoadMapBitField2(map); |
| return Signed(DecodeWord32<Map::Bits2::ElementsKindBits>(bit_field2)); |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadElementsKind(TNode<HeapObject> object) { |
| return LoadMapElementsKind(LoadMap(object)); |
| } |
| |
| TNode<DescriptorArray> CodeStubAssembler::LoadMapDescriptors(TNode<Map> map) { |
| return LoadObjectField<DescriptorArray>(map, Map::kInstanceDescriptorsOffset); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::LoadMapPrototype(TNode<Map> map) { |
| return LoadObjectField<HeapObject>(map, Map::kPrototypeOffset); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadMapInstanceSizeInWords(TNode<Map> map) { |
| return ChangeInt32ToIntPtr( |
| LoadObjectField<Uint8T>(map, Map::kInstanceSizeInWordsOffset)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadMapInobjectPropertiesStartInWords( |
| TNode<Map> map) { |
| // See Map::GetInObjectPropertiesStartInWords() for details. |
| CSA_ASSERT(this, IsJSObjectMap(map)); |
| return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>( |
| map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadMapConstructorFunctionIndex( |
| TNode<Map> map) { |
| // See Map::GetConstructorFunctionIndex() for details. |
| CSA_ASSERT(this, IsPrimitiveInstanceType(LoadMapInstanceType(map))); |
| return ChangeInt32ToIntPtr(LoadObjectField<Uint8T>( |
| map, Map::kInObjectPropertiesStartOrConstructorFunctionIndexOffset)); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadMapConstructor(TNode<Map> map) { |
| TVARIABLE(Object, result, |
| LoadObjectField( |
| map, Map::kConstructorOrBackPointerOrNativeContextOffset)); |
| |
| Label done(this), loop(this, &result); |
| Goto(&loop); |
| BIND(&loop); |
| { |
| GotoIf(TaggedIsSmi(result.value()), &done); |
| TNode<BoolT> is_map_type = |
| InstanceTypeEqual(LoadInstanceType(CAST(result.value())), MAP_TYPE); |
| GotoIfNot(is_map_type, &done); |
| result = |
| LoadObjectField(CAST(result.value()), |
| Map::kConstructorOrBackPointerOrNativeContextOffset); |
| Goto(&loop); |
| } |
| BIND(&done); |
| return result.value(); |
| } |
| |
| TNode<WordT> CodeStubAssembler::LoadMapEnumLength(TNode<Map> map) { |
| TNode<Uint32T> bit_field3 = LoadMapBitField3(map); |
| return DecodeWordFromWord32<Map::Bits3::EnumLengthBits>(bit_field3); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadMapBackPointer(TNode<Map> map) { |
| TNode<HeapObject> object = CAST(LoadObjectField( |
| map, Map::kConstructorOrBackPointerOrNativeContextOffset)); |
| return Select<Object>( |
| IsMap(object), [=] { return object; }, |
| [=] { return UndefinedConstant(); }); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::EnsureOnlyHasSimpleProperties( |
| TNode<Map> map, TNode<Int32T> instance_type, Label* bailout) { |
| // This check can have false positives, since it applies to any |
| // JSPrimitiveWrapper type. |
| GotoIf(IsCustomElementsReceiverInstanceType(instance_type), bailout); |
| |
| TNode<Uint32T> bit_field3 = LoadMapBitField3(map); |
| GotoIf(IsSetWord32(bit_field3, Map::Bits3::IsDictionaryMapBit::kMask), |
| bailout); |
| |
| return bit_field3; |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadJSReceiverIdentityHash( |
| SloppyTNode<Object> receiver, Label* if_no_hash) { |
| TVARIABLE(IntPtrT, var_hash); |
| Label done(this), if_smi(this), if_property_array(this), |
| if_property_dictionary(this), if_fixed_array(this); |
| |
| TNode<Object> properties_or_hash = |
| LoadObjectField(TNode<HeapObject>::UncheckedCast(receiver), |
| JSReceiver::kPropertiesOrHashOffset); |
| GotoIf(TaggedIsSmi(properties_or_hash), &if_smi); |
| |
| TNode<HeapObject> properties = |
| TNode<HeapObject>::UncheckedCast(properties_or_hash); |
| TNode<Uint16T> properties_instance_type = LoadInstanceType(properties); |
| |
| GotoIf(InstanceTypeEqual(properties_instance_type, PROPERTY_ARRAY_TYPE), |
| &if_property_array); |
| Branch(InstanceTypeEqual(properties_instance_type, NAME_DICTIONARY_TYPE), |
| &if_property_dictionary, &if_fixed_array); |
| |
| BIND(&if_fixed_array); |
| { |
| var_hash = IntPtrConstant(PropertyArray::kNoHashSentinel); |
| Goto(&done); |
| } |
| |
| BIND(&if_smi); |
| { |
| var_hash = SmiUntag(TNode<Smi>::UncheckedCast(properties_or_hash)); |
| Goto(&done); |
| } |
| |
| BIND(&if_property_array); |
| { |
| TNode<IntPtrT> length_and_hash = LoadAndUntagObjectField( |
| properties, PropertyArray::kLengthAndHashOffset); |
| var_hash = TNode<IntPtrT>::UncheckedCast( |
| DecodeWord<PropertyArray::HashField>(length_and_hash)); |
| Goto(&done); |
| } |
| |
| BIND(&if_property_dictionary); |
| { |
| var_hash = SmiUntag(CAST(LoadFixedArrayElement( |
| CAST(properties), NameDictionary::kObjectHashIndex))); |
| Goto(&done); |
| } |
| |
| BIND(&done); |
| if (if_no_hash != nullptr) { |
| GotoIf(IntPtrEqual(var_hash.value(), |
| IntPtrConstant(PropertyArray::kNoHashSentinel)), |
| if_no_hash); |
| } |
| return var_hash.value(); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadNameHashAssumeComputed(TNode<Name> name) { |
| TNode<Uint32T> hash_field = LoadNameHashField(name); |
| CSA_ASSERT(this, IsClearWord32(hash_field, Name::kHashNotComputedMask)); |
| return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift))); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadNameHash(TNode<Name> name, |
| Label* if_hash_not_computed) { |
| TNode<Uint32T> hash_field = LoadNameHashField(name); |
| if (if_hash_not_computed != nullptr) { |
| GotoIf(IsSetWord32(hash_field, Name::kHashNotComputedMask), |
| if_hash_not_computed); |
| } |
| return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift))); |
| } |
| |
| TNode<Smi> CodeStubAssembler::LoadStringLengthAsSmi(TNode<String> string) { |
| return SmiFromIntPtr(LoadStringLengthAsWord(string)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadStringLengthAsWord(TNode<String> string) { |
| return Signed(ChangeUint32ToWord(LoadStringLengthAsWord32(string))); |
| } |
| |
| TNode<Uint32T> CodeStubAssembler::LoadStringLengthAsWord32( |
| TNode<String> string) { |
| return LoadObjectField<Uint32T>(string, String::kLengthOffset); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadJSPrimitiveWrapperValue( |
| TNode<JSPrimitiveWrapper> object) { |
| return LoadObjectField(object, JSPrimitiveWrapper::kValueOffset); |
| } |
| |
| void CodeStubAssembler::DispatchMaybeObject(TNode<MaybeObject> maybe_object, |
| Label* if_smi, Label* if_cleared, |
| Label* if_weak, Label* if_strong, |
| TVariable<Object>* extracted) { |
| Label inner_if_smi(this), inner_if_strong(this); |
| |
| GotoIf(TaggedIsSmi(maybe_object), &inner_if_smi); |
| |
| GotoIf(IsCleared(maybe_object), if_cleared); |
| |
| GotoIf(IsStrong(maybe_object), &inner_if_strong); |
| |
| *extracted = GetHeapObjectAssumeWeak(maybe_object); |
| Goto(if_weak); |
| |
| BIND(&inner_if_smi); |
| *extracted = CAST(maybe_object); |
| Goto(if_smi); |
| |
| BIND(&inner_if_strong); |
| *extracted = CAST(maybe_object); |
| Goto(if_strong); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsStrong(TNode<MaybeObject> value) { |
| return Word32Equal(Word32And(TruncateIntPtrToInt32( |
| BitcastTaggedToWordForTagAndSmiBits(value)), |
| Int32Constant(kHeapObjectTagMask)), |
| Int32Constant(kHeapObjectTag)); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::GetHeapObjectIfStrong( |
| TNode<MaybeObject> value, Label* if_not_strong) { |
| GotoIfNot(IsStrong(value), if_not_strong); |
| return CAST(value); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsWeakOrCleared(TNode<MaybeObject> value) { |
| return Word32Equal(Word32And(TruncateIntPtrToInt32( |
| BitcastTaggedToWordForTagAndSmiBits(value)), |
| Int32Constant(kHeapObjectTagMask)), |
| Int32Constant(kWeakHeapObjectTag)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsCleared(TNode<MaybeObject> value) { |
| return Word32Equal(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), |
| Int32Constant(kClearedWeakHeapObjectLower32)); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak( |
| TNode<MaybeObject> value) { |
| CSA_ASSERT(this, IsWeakOrCleared(value)); |
| CSA_ASSERT(this, IsNotCleared(value)); |
| return UncheckedCast<HeapObject>(BitcastWordToTagged(WordAnd( |
| BitcastMaybeObjectToWord(value), IntPtrConstant(~kWeakHeapObjectMask)))); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::GetHeapObjectAssumeWeak( |
| TNode<MaybeObject> value, Label* if_cleared) { |
| GotoIf(IsCleared(value), if_cleared); |
| return GetHeapObjectAssumeWeak(value); |
| } |
| |
| // This version generates |
| // (maybe_object & ~mask) == value |
| // It works for non-Smi |maybe_object| and for both Smi and HeapObject values |
| // but requires a big constant for ~mask. |
| TNode<BoolT> CodeStubAssembler::IsWeakReferenceToObject( |
| TNode<MaybeObject> maybe_object, TNode<Object> value) { |
| CSA_ASSERT(this, TaggedIsNotSmi(maybe_object)); |
| if (COMPRESS_POINTERS_BOOL) { |
| return Word32Equal( |
| Word32And(TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)), |
| Uint32Constant(~static_cast<uint32_t>(kWeakHeapObjectMask))), |
| TruncateWordToInt32(BitcastTaggedToWord(value))); |
| } else { |
| return WordEqual(WordAnd(BitcastMaybeObjectToWord(maybe_object), |
| IntPtrConstant(~kWeakHeapObjectMask)), |
| BitcastTaggedToWord(value)); |
| } |
| } |
| |
| // This version generates |
| // maybe_object == (heap_object | mask) |
| // It works for any |maybe_object| values and generates a better code because it |
| // uses a small constant for mask. |
| TNode<BoolT> CodeStubAssembler::IsWeakReferenceTo( |
| TNode<MaybeObject> maybe_object, TNode<HeapObject> heap_object) { |
| if (COMPRESS_POINTERS_BOOL) { |
| return Word32Equal( |
| TruncateWordToInt32(BitcastMaybeObjectToWord(maybe_object)), |
| Word32Or(TruncateWordToInt32(BitcastTaggedToWord(heap_object)), |
| Int32Constant(kWeakHeapObjectMask))); |
| } else { |
| return WordEqual(BitcastMaybeObjectToWord(maybe_object), |
| WordOr(BitcastTaggedToWord(heap_object), |
| IntPtrConstant(kWeakHeapObjectMask))); |
| } |
| } |
| |
| TNode<MaybeObject> CodeStubAssembler::MakeWeak(TNode<HeapObject> value) { |
| return ReinterpretCast<MaybeObject>(BitcastWordToTagged( |
| WordOr(BitcastTaggedToWord(value), IntPtrConstant(kWeakHeapObjectTag)))); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<FixedArray> array) { |
| return LoadAndUntagFixedArrayBaseLength(array); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<WeakFixedArray> array) { |
| return LoadAndUntagWeakFixedArrayLength(array); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(TNode<PropertyArray> array) { |
| return LoadPropertyArrayLength(array); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength( |
| TNode<DescriptorArray> array) { |
| return IntPtrMul(ChangeInt32ToIntPtr(LoadNumberOfDescriptors(array)), |
| IntPtrConstant(DescriptorArray::kEntrySize)); |
| } |
| |
| template <> |
| TNode<IntPtrT> CodeStubAssembler::LoadArrayLength( |
| TNode<TransitionArray> array) { |
| return LoadAndUntagWeakFixedArrayLength(array); |
| } |
| |
| template <typename Array, typename TIndex, typename TValue> |
| TNode<TValue> CodeStubAssembler::LoadArrayElement( |
| TNode<Array> array, int array_header_size, TNode<TIndex> index_node, |
| int additional_offset, LoadSensitivity needs_poisoning) { |
| // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants? |
| static_assert(std::is_same<TIndex, Smi>::value || |
| std::is_same<TIndex, UintPtrT>::value || |
| std::is_same<TIndex, IntPtrT>::value, |
| "Only Smi, UintPtrT or IntPtrT indices are allowed"); |
| CSA_ASSERT(this, IntPtrGreaterThanOrEqual(ParameterToIntPtr(index_node), |
| IntPtrConstant(0))); |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| int32_t header_size = array_header_size + additional_offset - kHeapObjectTag; |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS, header_size); |
| CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array), |
| array_header_size)); |
| constexpr MachineType machine_type = MachineTypeOf<TValue>::value; |
| // TODO(gsps): Remove the Load case once LoadFromObject supports poisoning |
| if (needs_poisoning == LoadSensitivity::kSafe) { |
| return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset)); |
| } else { |
| return UncheckedCast<TValue>( |
| Load(machine_type, array, offset, needs_poisoning)); |
| } |
| } |
| |
| template V8_EXPORT_PRIVATE TNode<MaybeObject> |
| CodeStubAssembler::LoadArrayElement<TransitionArray, IntPtrT>( |
| TNode<TransitionArray>, int, TNode<IntPtrT>, int, LoadSensitivity); |
| |
| template <typename TIndex> |
| TNode<Object> CodeStubAssembler::LoadFixedArrayElement( |
| TNode<FixedArray> object, TNode<TIndex> index, int additional_offset, |
| LoadSensitivity needs_poisoning, CheckBounds check_bounds) { |
| // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants? |
| static_assert(std::is_same<TIndex, Smi>::value || |
| std::is_same<TIndex, UintPtrT>::value || |
| std::is_same<TIndex, IntPtrT>::value, |
| "Only Smi, UintPtrT or IntPtrT indexes are allowed"); |
| CSA_ASSERT(this, IsFixedArraySubclass(object)); |
| CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object)); |
| |
| if (NeedsBoundsCheck(check_bounds)) { |
| FixedArrayBoundsCheck(object, index, additional_offset); |
| } |
| TNode<MaybeObject> element = |
| LoadArrayElement(object, FixedArray::kHeaderSize, index, |
| additional_offset, needs_poisoning); |
| return CAST(element); |
| } |
| |
| template V8_EXPORT_PRIVATE TNode<Object> |
| CodeStubAssembler::LoadFixedArrayElement<Smi>(TNode<FixedArray>, TNode<Smi>, |
| int, LoadSensitivity, |
| CheckBounds); |
| template V8_EXPORT_PRIVATE TNode<Object> |
| CodeStubAssembler::LoadFixedArrayElement<UintPtrT>(TNode<FixedArray>, |
| TNode<UintPtrT>, int, |
| LoadSensitivity, |
| CheckBounds); |
| template V8_EXPORT_PRIVATE TNode<Object> |
| CodeStubAssembler::LoadFixedArrayElement<IntPtrT>(TNode<FixedArray>, |
| TNode<IntPtrT>, int, |
| LoadSensitivity, CheckBounds); |
| |
| void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array, |
| TNode<Smi> index, |
| int additional_offset) { |
| if (!FLAG_fixed_array_bounds_checks) return; |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| TNode<Smi> effective_index; |
| Smi constant_index; |
| bool index_is_constant = ToSmiConstant(index, &constant_index); |
| if (index_is_constant) { |
| effective_index = SmiConstant(Smi::ToInt(constant_index) + |
| additional_offset / kTaggedSize); |
| } else { |
| effective_index = |
| SmiAdd(index, SmiConstant(additional_offset / kTaggedSize)); |
| } |
| CSA_CHECK(this, SmiBelow(effective_index, LoadFixedArrayBaseLength(array))); |
| } |
| |
| void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array, |
| TNode<IntPtrT> index, |
| int additional_offset) { |
| if (!FLAG_fixed_array_bounds_checks) return; |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| // IntPtrAdd does constant-folding automatically. |
| TNode<IntPtrT> effective_index = |
| IntPtrAdd(index, IntPtrConstant(additional_offset / kTaggedSize)); |
| CSA_CHECK(this, UintPtrLessThan(effective_index, |
| LoadAndUntagFixedArrayBaseLength(array))); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadPropertyArrayElement( |
| TNode<PropertyArray> object, SloppyTNode<IntPtrT> index) { |
| int additional_offset = 0; |
| LoadSensitivity needs_poisoning = LoadSensitivity::kSafe; |
| return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index, |
| additional_offset, needs_poisoning)); |
| } |
| |
| TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength( |
| TNode<PropertyArray> object) { |
| TNode<IntPtrT> value = |
| LoadAndUntagObjectField(object, PropertyArray::kLengthAndHashOffset); |
| return Signed(DecodeWord<PropertyArray::LengthField>(value)); |
| } |
| |
| TNode<RawPtrT> CodeStubAssembler::LoadJSTypedArrayDataPtr( |
| TNode<JSTypedArray> typed_array) { |
| // Data pointer = external_pointer + static_cast<Tagged_t>(base_pointer). |
| TNode<RawPtrT> external_pointer = |
| LoadJSTypedArrayExternalPointerPtr(typed_array); |
| |
| TNode<IntPtrT> base_pointer; |
| if (COMPRESS_POINTERS_BOOL) { |
| TNode<Int32T> compressed_base = |
| LoadObjectField<Int32T>(typed_array, JSTypedArray::kBasePointerOffset); |
| // Zero-extend TaggedT to WordT according to current compression scheme |
| // so that the addition with |external_pointer| (which already contains |
| // compensated offset value) below will decompress the tagged value. |
| // See JSTypedArray::ExternalPointerCompensationForOnHeapArray() for |
| // details. |
| base_pointer = Signed(ChangeUint32ToWord(compressed_base)); |
| } else { |
| base_pointer = |
| LoadObjectField<IntPtrT>(typed_array, JSTypedArray::kBasePointerOffset); |
| } |
| return RawPtrAdd(external_pointer, base_pointer); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::LoadFixedBigInt64ArrayElementAsTagged( |
| SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) { |
| if (Is64()) { |
| TNode<IntPtrT> value = Load<IntPtrT>(data_pointer, offset); |
| return BigIntFromInt64(value); |
| } else { |
| DCHECK(!Is64()); |
| #if defined(V8_TARGET_BIG_ENDIAN) |
| TNode<IntPtrT> high = Load<IntPtrT>(data_pointer, offset); |
| TNode<IntPtrT> low = Load<IntPtrT>( |
| data_pointer, IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize))); |
| #else |
| TNode<IntPtrT> low = Load<IntPtrT>(data_pointer, offset); |
| TNode<IntPtrT> high = Load<IntPtrT>( |
| data_pointer, IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize))); |
| #endif |
| return BigIntFromInt32Pair(low, high); |
| } |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromInt32Pair(TNode<IntPtrT> low, |
| TNode<IntPtrT> high) { |
| DCHECK(!Is64()); |
| TVARIABLE(BigInt, var_result); |
| TVARIABLE(Word32T, var_sign, Int32Constant(BigInt::SignBits::encode(false))); |
| TVARIABLE(IntPtrT, var_high, high); |
| TVARIABLE(IntPtrT, var_low, low); |
| Label high_zero(this), negative(this), allocate_one_digit(this), |
| allocate_two_digits(this), if_zero(this), done(this); |
| |
| GotoIf(IntPtrEqual(var_high.value(), IntPtrConstant(0)), &high_zero); |
| Branch(IntPtrLessThan(var_high.value(), IntPtrConstant(0)), &negative, |
| &allocate_two_digits); |
| |
| BIND(&high_zero); |
| Branch(IntPtrEqual(var_low.value(), IntPtrConstant(0)), &if_zero, |
| &allocate_one_digit); |
| |
| BIND(&negative); |
| { |
| var_sign = Int32Constant(BigInt::SignBits::encode(true)); |
| // We must negate the value by computing "0 - (high|low)", performing |
| // both parts of the subtraction separately and manually taking care |
| // of the carry bit (which is 1 iff low != 0). |
| var_high = IntPtrSub(IntPtrConstant(0), var_high.value()); |
| Label carry(this), no_carry(this); |
| Branch(IntPtrEqual(var_low.value(), IntPtrConstant(0)), &no_carry, &carry); |
| BIND(&carry); |
| var_high = IntPtrSub(var_high.value(), IntPtrConstant(1)); |
| Goto(&no_carry); |
| BIND(&no_carry); |
| var_low = IntPtrSub(IntPtrConstant(0), var_low.value()); |
| // var_high was non-zero going into this block, but subtracting the |
| // carry bit from it could bring us back onto the "one digit" path. |
| Branch(IntPtrEqual(var_high.value(), IntPtrConstant(0)), |
| &allocate_one_digit, &allocate_two_digits); |
| } |
| |
| BIND(&allocate_one_digit); |
| { |
| var_result = AllocateRawBigInt(IntPtrConstant(1)); |
| StoreBigIntBitfield(var_result.value(), |
| Word32Or(var_sign.value(), |
| Int32Constant(BigInt::LengthBits::encode(1)))); |
| StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value())); |
| Goto(&done); |
| } |
| |
| BIND(&allocate_two_digits); |
| { |
| var_result = AllocateRawBigInt(IntPtrConstant(2)); |
| StoreBigIntBitfield(var_result.value(), |
| Word32Or(var_sign.value(), |
| Int32Constant(BigInt::LengthBits::encode(2)))); |
| StoreBigIntDigit(var_result.value(), 0, Unsigned(var_low.value())); |
| StoreBigIntDigit(var_result.value(), 1, Unsigned(var_high.value())); |
| Goto(&done); |
| } |
| |
| BIND(&if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromInt64(TNode<IntPtrT> value) { |
| DCHECK(Is64()); |
| TVARIABLE(BigInt, var_result); |
| Label done(this), if_positive(this), if_negative(this), if_zero(this); |
| GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero); |
| var_result = AllocateRawBigInt(IntPtrConstant(1)); |
| Branch(IntPtrGreaterThan(value, IntPtrConstant(0)), &if_positive, |
| &if_negative); |
| |
| BIND(&if_positive); |
| { |
| StoreBigIntBitfield(var_result.value(), |
| Int32Constant(BigInt::SignBits::encode(false) | |
| BigInt::LengthBits::encode(1))); |
| StoreBigIntDigit(var_result.value(), 0, Unsigned(value)); |
| Goto(&done); |
| } |
| |
| BIND(&if_negative); |
| { |
| StoreBigIntBitfield(var_result.value(), |
| Int32Constant(BigInt::SignBits::encode(true) | |
| BigInt::LengthBits::encode(1))); |
| StoreBigIntDigit(var_result.value(), 0, |
| Unsigned(IntPtrSub(IntPtrConstant(0), value))); |
| Goto(&done); |
| } |
| |
| BIND(&if_zero); |
| { |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| } |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::LoadFixedBigUint64ArrayElementAsTagged( |
| SloppyTNode<RawPtrT> data_pointer, SloppyTNode<IntPtrT> offset) { |
| Label if_zero(this), done(this); |
| if (Is64()) { |
| TNode<UintPtrT> value = Load<UintPtrT>(data_pointer, offset); |
| return BigIntFromUint64(value); |
| } else { |
| DCHECK(!Is64()); |
| #if defined(V8_TARGET_BIG_ENDIAN) |
| TNode<UintPtrT> high = Load<UintPtrT>(data_pointer, offset); |
| TNode<UintPtrT> low = Load<UintPtrT>( |
| data_pointer, IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize))); |
| #else |
| TNode<UintPtrT> low = Load<UintPtrT>(data_pointer, offset); |
| TNode<UintPtrT> high = Load<UintPtrT>( |
| data_pointer, IntPtrAdd(offset, IntPtrConstant(kSystemPointerSize))); |
| #endif |
| return BigIntFromUint32Pair(low, high); |
| } |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromUint32Pair(TNode<UintPtrT> low, |
| TNode<UintPtrT> high) { |
| DCHECK(!Is64()); |
| TVARIABLE(BigInt, var_result); |
| Label high_zero(this), if_zero(this), done(this); |
| |
| GotoIf(IntPtrEqual(high, IntPtrConstant(0)), &high_zero); |
| var_result = AllocateBigInt(IntPtrConstant(2)); |
| StoreBigIntDigit(var_result.value(), 0, low); |
| StoreBigIntDigit(var_result.value(), 1, high); |
| Goto(&done); |
| |
| BIND(&high_zero); |
| GotoIf(IntPtrEqual(low, IntPtrConstant(0)), &if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(1)); |
| StoreBigIntDigit(var_result.value(), 0, low); |
| Goto(&done); |
| |
| BIND(&if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<BigInt> CodeStubAssembler::BigIntFromUint64(TNode<UintPtrT> value) { |
| DCHECK(Is64()); |
| TVARIABLE(BigInt, var_result); |
| Label done(this), if_zero(this); |
| GotoIf(IntPtrEqual(value, IntPtrConstant(0)), &if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(1)); |
| StoreBigIntDigit(var_result.value(), 0, value); |
| Goto(&done); |
| |
| BIND(&if_zero); |
| var_result = AllocateBigInt(IntPtrConstant(0)); |
| Goto(&done); |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( |
| TNode<RawPtrT> data_pointer, TNode<UintPtrT> index, |
| ElementsKind elements_kind) { |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(Signed(index), elements_kind, 0); |
| switch (elements_kind) { |
| case UINT8_ELEMENTS: /* fall through */ |
| case UINT8_CLAMPED_ELEMENTS: |
| return SmiFromInt32(Load<Uint8T>(data_pointer, offset)); |
| case INT8_ELEMENTS: |
| return SmiFromInt32(Load<Int8T>(data_pointer, offset)); |
| case UINT16_ELEMENTS: |
| return SmiFromInt32(Load<Uint16T>(data_pointer, offset)); |
| case INT16_ELEMENTS: |
| return SmiFromInt32(Load<Int16T>(data_pointer, offset)); |
| case UINT32_ELEMENTS: |
| return ChangeUint32ToTagged(Load<Uint32T>(data_pointer, offset)); |
| case INT32_ELEMENTS: |
| return ChangeInt32ToTagged(Load<Int32T>(data_pointer, offset)); |
| case FLOAT32_ELEMENTS: |
| return AllocateHeapNumberWithValue( |
| ChangeFloat32ToFloat64(Load<Float32T>(data_pointer, offset))); |
| case FLOAT64_ELEMENTS: |
| return AllocateHeapNumberWithValue(Load<Float64T>(data_pointer, offset)); |
| case BIGINT64_ELEMENTS: |
| return LoadFixedBigInt64ArrayElementAsTagged(data_pointer, offset); |
| case BIGUINT64_ELEMENTS: |
| return LoadFixedBigUint64ArrayElementAsTagged(data_pointer, offset); |
| default: |
| UNREACHABLE(); |
| } |
| } |
| |
| TNode<Numeric> CodeStubAssembler::LoadFixedTypedArrayElementAsTagged( |
| TNode<RawPtrT> data_pointer, TNode<UintPtrT> index, |
| TNode<Int32T> elements_kind) { |
| TVARIABLE(Numeric, var_result); |
| Label done(this), if_unknown_type(this, Label::kDeferred); |
| int32_t elements_kinds[] = { |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) TYPE##_ELEMENTS, |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| }; |
| |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) Label if_##type##array(this); |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| |
| Label* elements_kind_labels[] = { |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) &if_##type##array, |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| }; |
| STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels)); |
| |
| Switch(elements_kind, &if_unknown_type, elements_kinds, elements_kind_labels, |
| arraysize(elements_kinds)); |
| |
| BIND(&if_unknown_type); |
| Unreachable(); |
| |
| #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ |
| BIND(&if_##type##array); \ |
| { \ |
| var_result = LoadFixedTypedArrayElementAsTagged(data_pointer, index, \ |
| TYPE##_ELEMENTS); \ |
| Goto(&done); \ |
| } |
| TYPED_ARRAYS(TYPED_ARRAY_CASE) |
| #undef TYPED_ARRAY_CASE |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| template <typename TIndex> |
| TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( |
| TNode<FeedbackVector> feedback_vector, TNode<TIndex> slot, |
| int additional_offset) { |
| int32_t header_size = FeedbackVector::kRawFeedbackSlotsOffset + |
| additional_offset - kHeapObjectTag; |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(slot, HOLEY_ELEMENTS, header_size); |
| CSA_SLOW_ASSERT( |
| this, IsOffsetInBounds(offset, LoadFeedbackVectorLength(feedback_vector), |
| FeedbackVector::kHeaderSize)); |
| return Load<MaybeObject>(feedback_vector, offset); |
| } |
| |
| template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( |
| TNode<FeedbackVector> feedback_vector, TNode<TaggedIndex> slot, |
| int additional_offset); |
| template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( |
| TNode<FeedbackVector> feedback_vector, TNode<IntPtrT> slot, |
| int additional_offset); |
| template TNode<MaybeObject> CodeStubAssembler::LoadFeedbackVectorSlot( |
| TNode<FeedbackVector> feedback_vector, TNode<UintPtrT> slot, |
| int additional_offset); |
| |
| template <typename Array> |
| TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32ArrayElement( |
| TNode<Array> object, int array_header_size, TNode<IntPtrT> index, |
| int additional_offset) { |
| DCHECK(IsAligned(additional_offset, kTaggedSize)); |
| int endian_correction = 0; |
| #if V8_TARGET_LITTLE_ENDIAN |
| if (SmiValuesAre32Bits()) endian_correction = 4; |
| #endif |
| int32_t header_size = array_header_size + additional_offset - kHeapObjectTag + |
| endian_correction; |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(index, HOLEY_ELEMENTS, header_size); |
| CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(object), |
| array_header_size + endian_correction)); |
| if (SmiValuesAre32Bits()) { |
| return Load<Int32T>(object, offset); |
| } else { |
| return SmiToInt32(Load(MachineType::TaggedSigned(), object, offset)); |
| } |
| } |
| |
| TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement( |
| TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset) { |
| CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object)); |
| return LoadAndUntagToWord32ArrayElement(object, FixedArray::kHeaderSize, |
| index, additional_offset); |
| } |
| |
| TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement( |
| TNode<WeakFixedArray> object, TNode<IntPtrT> index, int additional_offset) { |
| return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index, |
| additional_offset, LoadSensitivity::kSafe); |
| } |
| |
| TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement( |
| TNode<FixedDoubleArray> object, TNode<IntPtrT> index, Label* if_hole, |
| MachineType machine_type) { |
| int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag; |
| TNode<IntPtrT> offset = |
| ElementOffsetFromIndex(index, HOLEY_DOUBLE_ELEMENTS, header_size); |
| CSA_ASSERT(this, IsOffsetInBounds( |
| offset, LoadAndUntagFixedArrayBaseLength(object), |
| FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS)); |
| return LoadDoubleWithHoleCheck(object, offset, if_hole, machine_type); |
| } |
| |
| TNode<Object> CodeStubAssembler::LoadFixedArrayBaseElementAsTagged( |
| TNode<FixedArrayBase> elements, TNode<IntPtrT> index, |
| TNode<Int32T> elements_kind, Label* if_accessor, Label* if_hole) { |
| TVARIABLE(Object, var_result); |
| Label done(this), if_packed(this), if_holey(this), if_packed_double(this), |
| if_holey_double(this), if_dictionary(this, Label::kDeferred); |
| |
| int32_t kinds[] = { |
| // Handled by if_packed. |
| PACKED_SMI_ELEMENTS, PACKED_ELEMENTS, PACKED_NONEXTENSIBLE_ELEMENTS, |
| PACKED_SEALED_ELEMENTS, PACKED_FROZEN_ELEMENTS, |
| // Handled by if_holey. |
| HOLEY_SMI_ELEMENTS, HOLEY_ELEMENTS, HOLEY_NONEXTENSIBLE_ELEMENTS, |
| HOLEY_SEALED_ELEMENTS, HOLEY_FROZEN_ELEMENTS, |
| // Handled by if_packed_double. |
| PACKED_DOUBLE_ELEMENTS, |
| // Handled by if_holey_double. |
| HOLEY_DOUBLE_ELEMENTS}; |
| Label* labels[] = {// PACKED_{SMI,}_ELEMENTS |
| &if_packed, &if_packed, &if_packed, &if_packed, &if_packed, |
| // HOLEY_{SMI,}_ELEMENTS |
| &if_holey, &if_holey, &if_holey, &if_holey, &if_holey, |
| // PACKED_DOUBLE_ELEMENTS |
| &if_packed_double, |
| // HOLEY_DOUBLE_ELEMENTS |
| &if_holey_double}; |
| Switch(elements_kind, &if_dictionary, kinds, labels, arraysize(kinds)); |
| |
| BIND(&if_packed); |
| { |
| var_result = LoadFixedArrayElement(CAST(elements), index, 0); |
| Goto(&done); |
| } |
| |
| BIND(&if_holey); |
| { |
| var_result = LoadFixedArrayElement(CAST(elements), index); |
| Branch(TaggedEqual(var_result.value(), TheHoleConstant()), if_hole, &done); |
| } |
| |
| BIND(&if_packed_double); |
| { |
| var_result = AllocateHeapNumberWithValue( |
| LoadFixedDoubleArrayElement(CAST(elements), index)); |
| Goto(&done); |
| } |
| |
| BIND(&if_holey_double); |
| { |
| var_result = AllocateHeapNumberWithValue( |
| LoadFixedDoubleArrayElement(CAST(elements), index, if_hole)); |
| Goto(&done); |
| } |
| |
| BIND(&if_dictionary); |
| { |
| CSA_ASSERT(this, IsDictionaryElementsKind(elements_kind)); |
| var_result = BasicLoadNumberDictionaryElement(CAST(elements), index, |
| if_accessor, if_hole); |
| Goto(&done); |
| } |
| |
| BIND(&done); |
| return var_result.value(); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsDoubleHole(TNode<Object> base, |
| TNode<IntPtrT> offset) { |
| // TODO(ishell): Compare only the upper part for the hole once the |
| // compiler is able to fold addition of already complex |offset| with |
| // |kIeeeDoubleExponentWordOffset| into one addressing mode. |
| if (Is64()) { |
| TNode<Uint64T> element = Load<Uint64T>(base, offset); |
| return Word64Equal(element, Int64Constant(kHoleNanInt64)); |
| } else { |
| TNode<Uint32T> element_upper = Load<Uint32T>( |
| base, IntPtrAdd(offset, IntPtrConstant(kIeeeDoubleExponentWordOffset))); |
| return Word32Equal(element_upper, Int32Constant(kHoleNanUpper32)); |
| } |
| } |
| |
| TNode<Float64T> CodeStubAssembler::LoadDoubleWithHoleCheck( |
| TNode<Object> base, TNode<IntPtrT> offset, Label* if_hole, |
| MachineType machine_type) { |
| if (if_hole) { |
| GotoIf(IsDoubleHole(base, offset), if_hole); |
| } |
| if (machine_type.IsNone()) { |
| // This means the actual value is not needed. |
| return TNode<Float64T>(); |
| } |
| return UncheckedCast<Float64T>(Load(machine_type, base, offset)); |
| } |
| |
| TNode<ScopeInfo> CodeStubAssembler::LoadScopeInfo(TNode<Context> context) { |
| return CAST(LoadContextElement(context, Context::SCOPE_INFO_INDEX)); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::LoadScopeInfoHasExtensionField( |
| TNode<ScopeInfo> scope_info) { |
| TNode<IntPtrT> value = |
| LoadAndUntagObjectField(scope_info, ScopeInfo::kFlagsOffset); |
| return IsSetWord<ScopeInfo::HasContextExtensionSlotBit>(value); |
| } |
| |
| void CodeStubAssembler::StoreContextElementNoWriteBarrier( |
| TNode<Context> context, int slot_index, SloppyTNode<Object> value) { |
| int offset = Context::SlotOffset(slot_index); |
| StoreNoWriteBarrier(MachineRepresentation::kTagged, context, |
| IntPtrConstant(offset), value); |
| } |
| |
| TNode<NativeContext> CodeStubAssembler::LoadNativeContext( |
| TNode<Context> context) { |
| TNode<Map> map = LoadMap(context); |
| return CAST(LoadObjectField( |
| map, Map::kConstructorOrBackPointerOrNativeContextOffset)); |
| } |
| |
| TNode<Context> CodeStubAssembler::LoadModuleContext(TNode<Context> context) { |
| TNode<NativeContext> native_context = LoadNativeContext(context); |
| TNode<Map> module_map = CAST( |
| LoadContextElement(native_context, Context::MODULE_CONTEXT_MAP_INDEX)); |
| TVariable<Object> cur_context(context, this); |
| |
| Label context_found(this); |
| |
| Label context_search(this, &cur_context); |
| |
| // Loop until cur_context->map() is module_map. |
| Goto(&context_search); |
| BIND(&context_search); |
| { |
| CSA_ASSERT(this, Word32BinaryNot( |
| TaggedEqual(cur_context.value(), native_context))); |
| GotoIf(TaggedEqual(LoadMap(CAST(cur_context.value())), module_map), |
| &context_found); |
| |
| cur_context = |
| LoadContextElement(CAST(cur_context.value()), Context::PREVIOUS_INDEX); |
| Goto(&context_search); |
| } |
| |
| BIND(&context_found); |
| return UncheckedCast<Context>(cur_context.value()); |
| } |
| |
| TNode<Map> CodeStubAssembler::LoadObjectFunctionInitialMap( |
| TNode<NativeContext> native_context) { |
| TNode<JSFunction> object_function = |
| CAST(LoadContextElement(native_context, Context::OBJECT_FUNCTION_INDEX)); |
| return CAST(LoadJSFunctionPrototypeOrInitialMap(object_function)); |
| } |
| |
| TNode<Map> CodeStubAssembler::LoadSlowObjectWithNullPrototypeMap( |
| TNode<NativeContext> native_context) { |
| TNode<Map> map = CAST(LoadContextElement( |
| native_context, Context::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP)); |
| return map; |
| } |
| |
| TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap( |
| SloppyTNode<Int32T> kind, TNode<NativeContext> native_context) { |
| CSA_ASSERT(this, IsFastElementsKind(kind)); |
| TNode<IntPtrT> offset = |
| IntPtrAdd(IntPtrConstant(Context::FIRST_JS_ARRAY_MAP_SLOT), |
| ChangeInt32ToIntPtr(kind)); |
| return UncheckedCast<Map>(LoadContextElement(native_context, offset)); |
| } |
| |
| TNode<Map> CodeStubAssembler::LoadJSArrayElementsMap( |
| ElementsKind kind, TNode<NativeContext> native_context) { |
| return UncheckedCast<Map>( |
| LoadContextElement(native_context, Context::ArrayMapIndex(kind))); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsGeneratorFunction( |
| TNode<JSFunction> function) { |
| const TNode<SharedFunctionInfo> shared_function_info = |
| LoadObjectField<SharedFunctionInfo>( |
| function, JSFunction::kSharedFunctionInfoOffset); |
| |
| const TNode<Uint32T> function_kind = |
| DecodeWord32<SharedFunctionInfo::FunctionKindBits>( |
| LoadObjectField<Uint32T>(shared_function_info, |
| SharedFunctionInfo::kFlagsOffset)); |
| |
| // See IsGeneratorFunction(FunctionKind kind). |
| return IsInRange(function_kind, FunctionKind::kAsyncConciseGeneratorMethod, |
| FunctionKind::kConciseGeneratorMethod); |
| } |
| |
| TNode<BoolT> CodeStubAssembler::IsJSFunctionWithPrototypeSlot( |
| TNode<HeapObject> object) { |
| // Only JSFunction maps may have HasPrototypeSlotBit set. |
| return TNode<BoolT>::UncheckedCast( |
| IsSetWord32<Map::Bits1::HasPrototypeSlotBit>( |
| LoadMapBitField(LoadMap(object)))); |
| } |
| |
| void CodeStubAssembler::BranchIfHasPrototypeProperty( |
| TNode<JSFunction> function, TNode<Int32T> function_map_bit_field, |
| Label* if_true, Label* if_false) { |
| // (has_prototype_slot() && IsConstructor()) || |
| // IsGeneratorFunction(shared()->kind()) |
| uint32_t mask = Map::Bits1::HasPrototypeSlotBit::kMask | |
| Map::Bits1::IsConstructorBit::kMask; |
| |
| GotoIf(IsAllSetWord32(function_map_bit_field, mask), if_true); |
| Branch(IsGeneratorFunction(function), if_true, if_false); |
| } |
| |
| void CodeStubAssembler::GotoIfPrototypeRequiresRuntimeLookup( |
| TNode<JSFunction> function, TNode<Map> map, Label* runtime) { |
| // !has_prototype_property() || has_non_instance_prototype() |
| TNode<Int32T> map_bit_field = LoadMapBitField(map); |
| Label next_check(this); |
| BranchIfHasPrototypeProperty(function, map_bit_field, &next_check, runtime); |
| BIND(&next_check); |
| GotoIf(IsSetWord32<Map::Bits1::HasNonInstancePrototypeBit>(map_bit_field), |
| runtime); |
| } |
| |
| TNode<HeapObject> CodeStubAssembler::LoadJSFunctionPrototype( |
| TNode<JSFunction> function, Label* if_bailout) { |
| CSA_ASSERT(this, IsFunctionWithPrototypeSlotMap(LoadMap(function))); |
| CSA_ASSERT(this, IsClearWord32<Map::Bits1::HasNonInstancePrototypeBit>( |
| LoadMapBitField(LoadMap(function)))); |
| TNode<HeapObject> proto_or_map = LoadObjectField<HeapObject>( |
| function, |