| // Copyright 2012 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "src/elements.h" |
| |
| #include "src/arguments.h" |
| #include "src/conversions.h" |
| #include "src/factory.h" |
| #include "src/frames.h" |
| #include "src/isolate-inl.h" |
| #include "src/messages.h" |
| #include "src/objects-inl.h" |
| #include "src/utils.h" |
| #include "src/zone/zone.h" |
| |
| // Each concrete ElementsAccessor can handle exactly one ElementsKind, |
| // several abstract ElementsAccessor classes are used to allow sharing |
| // common code. |
| // |
| // Inheritance hierarchy: |
| // - ElementsAccessorBase (abstract) |
| // - FastElementsAccessor (abstract) |
| // - FastSmiOrObjectElementsAccessor |
| // - FastPackedSmiElementsAccessor |
| // - FastHoleySmiElementsAccessor |
| // - FastPackedObjectElementsAccessor |
| // - FastHoleyObjectElementsAccessor |
| // - FastDoubleElementsAccessor |
| // - FastPackedDoubleElementsAccessor |
| // - FastHoleyDoubleElementsAccessor |
| // - TypedElementsAccessor: template, with instantiations: |
| // - FixedUint8ElementsAccessor |
| // - FixedInt8ElementsAccessor |
| // - FixedUint16ElementsAccessor |
| // - FixedInt16ElementsAccessor |
| // - FixedUint32ElementsAccessor |
| // - FixedInt32ElementsAccessor |
| // - FixedFloat32ElementsAccessor |
| // - FixedFloat64ElementsAccessor |
| // - FixedUint8ClampedElementsAccessor |
| // - DictionaryElementsAccessor |
| // - SloppyArgumentsElementsAccessor |
| // - FastSloppyArgumentsElementsAccessor |
| // - SlowSloppyArgumentsElementsAccessor |
| // - StringWrapperElementsAccessor |
| // - FastStringWrapperElementsAccessor |
| // - SlowStringWrapperElementsAccessor |
| |
| namespace v8 { |
| namespace internal { |
| |
| |
| namespace { |
| |
| |
| static const int kPackedSizeNotKnown = -1; |
| |
| enum Where { AT_START, AT_END }; |
| |
| |
| // First argument in list is the accessor class, the second argument is the |
| // accessor ElementsKind, and the third is the backing store class. Use the |
| // fast element handler for smi-only arrays. The implementation is currently |
| // identical. Note that the order must match that of the ElementsKind enum for |
| // the |accessor_array[]| below to work. |
| #define ELEMENTS_LIST(V) \ |
| V(FastPackedSmiElementsAccessor, PACKED_SMI_ELEMENTS, FixedArray) \ |
| V(FastHoleySmiElementsAccessor, HOLEY_SMI_ELEMENTS, FixedArray) \ |
| V(FastPackedObjectElementsAccessor, PACKED_ELEMENTS, FixedArray) \ |
| V(FastHoleyObjectElementsAccessor, HOLEY_ELEMENTS, FixedArray) \ |
| V(FastPackedDoubleElementsAccessor, PACKED_DOUBLE_ELEMENTS, \ |
| FixedDoubleArray) \ |
| V(FastHoleyDoubleElementsAccessor, HOLEY_DOUBLE_ELEMENTS, FixedDoubleArray) \ |
| V(DictionaryElementsAccessor, DICTIONARY_ELEMENTS, NumberDictionary) \ |
| V(FastSloppyArgumentsElementsAccessor, FAST_SLOPPY_ARGUMENTS_ELEMENTS, \ |
| FixedArray) \ |
| V(SlowSloppyArgumentsElementsAccessor, SLOW_SLOPPY_ARGUMENTS_ELEMENTS, \ |
| FixedArray) \ |
| V(FastStringWrapperElementsAccessor, FAST_STRING_WRAPPER_ELEMENTS, \ |
| FixedArray) \ |
| V(SlowStringWrapperElementsAccessor, SLOW_STRING_WRAPPER_ELEMENTS, \ |
| FixedArray) \ |
| V(FixedUint8ElementsAccessor, UINT8_ELEMENTS, FixedUint8Array) \ |
| V(FixedInt8ElementsAccessor, INT8_ELEMENTS, FixedInt8Array) \ |
| V(FixedUint16ElementsAccessor, UINT16_ELEMENTS, FixedUint16Array) \ |
| V(FixedInt16ElementsAccessor, INT16_ELEMENTS, FixedInt16Array) \ |
| V(FixedUint32ElementsAccessor, UINT32_ELEMENTS, FixedUint32Array) \ |
| V(FixedInt32ElementsAccessor, INT32_ELEMENTS, FixedInt32Array) \ |
| V(FixedFloat32ElementsAccessor, FLOAT32_ELEMENTS, FixedFloat32Array) \ |
| V(FixedFloat64ElementsAccessor, FLOAT64_ELEMENTS, FixedFloat64Array) \ |
| V(FixedUint8ClampedElementsAccessor, UINT8_CLAMPED_ELEMENTS, \ |
| FixedUint8ClampedArray) |
| |
| template<ElementsKind Kind> class ElementsKindTraits { |
| public: |
| typedef FixedArrayBase BackingStore; |
| }; |
| |
| #define ELEMENTS_TRAITS(Class, KindParam, Store) \ |
| template <> \ |
| class ElementsKindTraits<KindParam> { \ |
| public: /* NOLINT */ \ |
| static constexpr ElementsKind Kind = KindParam; \ |
| typedef Store BackingStore; \ |
| }; \ |
| constexpr ElementsKind ElementsKindTraits<KindParam>::Kind; |
| ELEMENTS_LIST(ELEMENTS_TRAITS) |
| #undef ELEMENTS_TRAITS |
| |
| |
| MUST_USE_RESULT |
| MaybeHandle<Object> ThrowArrayLengthRangeError(Isolate* isolate) { |
| THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kInvalidArrayLength), |
| Object); |
| } |
| |
| |
| void CopyObjectToObjectElements(FixedArrayBase* from_base, |
| ElementsKind from_kind, uint32_t from_start, |
| FixedArrayBase* to_base, ElementsKind to_kind, |
| uint32_t to_start, int raw_copy_size) { |
| DCHECK(to_base->map() != |
| from_base->GetIsolate()->heap()->fixed_cow_array_map()); |
| DisallowHeapAllocation no_allocation; |
| int copy_size = raw_copy_size; |
| if (raw_copy_size < 0) { |
| DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || |
| raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); |
| copy_size = Min(from_base->length() - from_start, |
| to_base->length() - to_start); |
| if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { |
| int start = to_start + copy_size; |
| int length = to_base->length() - start; |
| if (length > 0) { |
| Heap* heap = from_base->GetHeap(); |
| MemsetPointer(FixedArray::cast(to_base)->data_start() + start, |
| heap->the_hole_value(), length); |
| } |
| } |
| } |
| DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && |
| (copy_size + static_cast<int>(from_start)) <= from_base->length()); |
| if (copy_size == 0) return; |
| FixedArray* from = FixedArray::cast(from_base); |
| FixedArray* to = FixedArray::cast(to_base); |
| DCHECK(IsSmiOrObjectElementsKind(from_kind)); |
| DCHECK(IsSmiOrObjectElementsKind(to_kind)); |
| |
| WriteBarrierMode write_barrier_mode = |
| (IsObjectElementsKind(from_kind) && IsObjectElementsKind(to_kind)) |
| ? UPDATE_WRITE_BARRIER |
| : SKIP_WRITE_BARRIER; |
| for (int i = 0; i < copy_size; i++) { |
| Object* value = from->get(from_start + i); |
| to->set(to_start + i, value, write_barrier_mode); |
| } |
| } |
| |
| |
| static void CopyDictionaryToObjectElements( |
| FixedArrayBase* from_base, uint32_t from_start, FixedArrayBase* to_base, |
| ElementsKind to_kind, uint32_t to_start, int raw_copy_size) { |
| DisallowHeapAllocation no_allocation; |
| NumberDictionary* from = NumberDictionary::cast(from_base); |
| int copy_size = raw_copy_size; |
| if (raw_copy_size < 0) { |
| DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || |
| raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); |
| copy_size = from->max_number_key() + 1 - from_start; |
| if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { |
| int start = to_start + copy_size; |
| int length = to_base->length() - start; |
| if (length > 0) { |
| Heap* heap = from->GetHeap(); |
| MemsetPointer(FixedArray::cast(to_base)->data_start() + start, |
| heap->the_hole_value(), length); |
| } |
| } |
| } |
| DCHECK(to_base != from_base); |
| DCHECK(IsSmiOrObjectElementsKind(to_kind)); |
| if (copy_size == 0) return; |
| FixedArray* to = FixedArray::cast(to_base); |
| uint32_t to_length = to->length(); |
| if (to_start + copy_size > to_length) { |
| copy_size = to_length - to_start; |
| } |
| WriteBarrierMode write_barrier_mode = |
| IsObjectElementsKind(to_kind) ? UPDATE_WRITE_BARRIER : SKIP_WRITE_BARRIER; |
| Isolate* isolate = from->GetIsolate(); |
| for (int i = 0; i < copy_size; i++) { |
| int entry = from->FindEntry(isolate, i + from_start); |
| if (entry != NumberDictionary::kNotFound) { |
| Object* value = from->ValueAt(entry); |
| DCHECK(!value->IsTheHole(isolate)); |
| to->set(i + to_start, value, write_barrier_mode); |
| } else { |
| to->set_the_hole(isolate, i + to_start); |
| } |
| } |
| } |
| |
| |
| // NOTE: this method violates the handlified function signature convention: |
| // raw pointer parameters in the function that allocates. |
| // See ElementsAccessorBase::CopyElements() for details. |
| static void CopyDoubleToObjectElements(FixedArrayBase* from_base, |
| uint32_t from_start, |
| FixedArrayBase* to_base, |
| uint32_t to_start, int raw_copy_size) { |
| int copy_size = raw_copy_size; |
| if (raw_copy_size < 0) { |
| DisallowHeapAllocation no_allocation; |
| DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || |
| raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); |
| copy_size = Min(from_base->length() - from_start, |
| to_base->length() - to_start); |
| if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { |
| // Also initialize the area that will be copied over since HeapNumber |
| // allocation below can cause an incremental marking step, requiring all |
| // existing heap objects to be propertly initialized. |
| int start = to_start; |
| int length = to_base->length() - start; |
| if (length > 0) { |
| Heap* heap = from_base->GetHeap(); |
| MemsetPointer(FixedArray::cast(to_base)->data_start() + start, |
| heap->the_hole_value(), length); |
| } |
| } |
| } |
| |
| DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && |
| (copy_size + static_cast<int>(from_start)) <= from_base->length()); |
| if (copy_size == 0) return; |
| |
| // From here on, the code below could actually allocate. Therefore the raw |
| // values are wrapped into handles. |
| Isolate* isolate = from_base->GetIsolate(); |
| Handle<FixedDoubleArray> from(FixedDoubleArray::cast(from_base), isolate); |
| Handle<FixedArray> to(FixedArray::cast(to_base), isolate); |
| |
| // Use an outer loop to not waste too much time on creating HandleScopes. |
| // On the other hand we might overflow a single handle scope depending on |
| // the copy_size. |
| int offset = 0; |
| while (offset < copy_size) { |
| HandleScope scope(isolate); |
| offset += 100; |
| for (int i = offset - 100; i < offset && i < copy_size; ++i) { |
| Handle<Object> value = |
| FixedDoubleArray::get(*from, i + from_start, isolate); |
| to->set(i + to_start, *value, UPDATE_WRITE_BARRIER); |
| } |
| } |
| } |
| |
| |
| static void CopyDoubleToDoubleElements(FixedArrayBase* from_base, |
| uint32_t from_start, |
| FixedArrayBase* to_base, |
| uint32_t to_start, int raw_copy_size) { |
| DisallowHeapAllocation no_allocation; |
| int copy_size = raw_copy_size; |
| if (raw_copy_size < 0) { |
| DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || |
| raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); |
| copy_size = Min(from_base->length() - from_start, |
| to_base->length() - to_start); |
| if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { |
| for (int i = to_start + copy_size; i < to_base->length(); ++i) { |
| FixedDoubleArray::cast(to_base)->set_the_hole(i); |
| } |
| } |
| } |
| DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && |
| (copy_size + static_cast<int>(from_start)) <= from_base->length()); |
| if (copy_size == 0) return; |
| FixedDoubleArray* from = FixedDoubleArray::cast(from_base); |
| FixedDoubleArray* to = FixedDoubleArray::cast(to_base); |
| Address to_address = to->address() + FixedDoubleArray::kHeaderSize; |
| Address from_address = from->address() + FixedDoubleArray::kHeaderSize; |
| to_address += kDoubleSize * to_start; |
| from_address += kDoubleSize * from_start; |
| int words_per_double = (kDoubleSize / kPointerSize); |
| CopyWords(reinterpret_cast<Object**>(to_address), |
| reinterpret_cast<Object**>(from_address), |
| static_cast<size_t>(words_per_double * copy_size)); |
| } |
| |
| |
| static void CopySmiToDoubleElements(FixedArrayBase* from_base, |
| uint32_t from_start, |
| FixedArrayBase* to_base, uint32_t to_start, |
| int raw_copy_size) { |
| DisallowHeapAllocation no_allocation; |
| int copy_size = raw_copy_size; |
| if (raw_copy_size < 0) { |
| DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || |
| raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); |
| copy_size = from_base->length() - from_start; |
| if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { |
| for (int i = to_start + copy_size; i < to_base->length(); ++i) { |
| FixedDoubleArray::cast(to_base)->set_the_hole(i); |
| } |
| } |
| } |
| DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && |
| (copy_size + static_cast<int>(from_start)) <= from_base->length()); |
| if (copy_size == 0) return; |
| FixedArray* from = FixedArray::cast(from_base); |
| FixedDoubleArray* to = FixedDoubleArray::cast(to_base); |
| Object* the_hole = from->GetHeap()->the_hole_value(); |
| for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size); |
| from_start < from_end; from_start++, to_start++) { |
| Object* hole_or_smi = from->get(from_start); |
| if (hole_or_smi == the_hole) { |
| to->set_the_hole(to_start); |
| } else { |
| to->set(to_start, Smi::ToInt(hole_or_smi)); |
| } |
| } |
| } |
| |
| |
| static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base, |
| uint32_t from_start, |
| FixedArrayBase* to_base, |
| uint32_t to_start, int packed_size, |
| int raw_copy_size) { |
| DisallowHeapAllocation no_allocation; |
| int copy_size = raw_copy_size; |
| uint32_t to_end; |
| if (raw_copy_size < 0) { |
| DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || |
| raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); |
| copy_size = packed_size - from_start; |
| if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { |
| to_end = to_base->length(); |
| for (uint32_t i = to_start + copy_size; i < to_end; ++i) { |
| FixedDoubleArray::cast(to_base)->set_the_hole(i); |
| } |
| } else { |
| to_end = to_start + static_cast<uint32_t>(copy_size); |
| } |
| } else { |
| to_end = to_start + static_cast<uint32_t>(copy_size); |
| } |
| DCHECK(static_cast<int>(to_end) <= to_base->length()); |
| DCHECK(packed_size >= 0 && packed_size <= copy_size); |
| DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && |
| (copy_size + static_cast<int>(from_start)) <= from_base->length()); |
| if (copy_size == 0) return; |
| FixedArray* from = FixedArray::cast(from_base); |
| FixedDoubleArray* to = FixedDoubleArray::cast(to_base); |
| for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size); |
| from_start < from_end; from_start++, to_start++) { |
| Object* smi = from->get(from_start); |
| DCHECK(!smi->IsTheHole(from->GetIsolate())); |
| to->set(to_start, Smi::ToInt(smi)); |
| } |
| } |
| |
| |
| static void CopyObjectToDoubleElements(FixedArrayBase* from_base, |
| uint32_t from_start, |
| FixedArrayBase* to_base, |
| uint32_t to_start, int raw_copy_size) { |
| DisallowHeapAllocation no_allocation; |
| int copy_size = raw_copy_size; |
| if (raw_copy_size < 0) { |
| DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd || |
| raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); |
| copy_size = from_base->length() - from_start; |
| if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { |
| for (int i = to_start + copy_size; i < to_base->length(); ++i) { |
| FixedDoubleArray::cast(to_base)->set_the_hole(i); |
| } |
| } |
| } |
| DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() && |
| (copy_size + static_cast<int>(from_start)) <= from_base->length()); |
| if (copy_size == 0) return; |
| FixedArray* from = FixedArray::cast(from_base); |
| FixedDoubleArray* to = FixedDoubleArray::cast(to_base); |
| Object* the_hole = from->GetHeap()->the_hole_value(); |
| for (uint32_t from_end = from_start + copy_size; |
| from_start < from_end; from_start++, to_start++) { |
| Object* hole_or_object = from->get(from_start); |
| if (hole_or_object == the_hole) { |
| to->set_the_hole(to_start); |
| } else { |
| to->set(to_start, hole_or_object->Number()); |
| } |
| } |
| } |
| |
| |
| static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base, |
| uint32_t from_start, |
| FixedArrayBase* to_base, |
| uint32_t to_start, |
| int raw_copy_size) { |
| DisallowHeapAllocation no_allocation; |
| NumberDictionary* from = NumberDictionary::cast(from_base); |
| int copy_size = raw_copy_size; |
| if (copy_size < 0) { |
| DCHECK(copy_size == ElementsAccessor::kCopyToEnd || |
| copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); |
| copy_size = from->max_number_key() + 1 - from_start; |
| if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { |
| for (int i = to_start + copy_size; i < to_base->length(); ++i) { |
| FixedDoubleArray::cast(to_base)->set_the_hole(i); |
| } |
| } |
| } |
| if (copy_size == 0) return; |
| FixedDoubleArray* to = FixedDoubleArray::cast(to_base); |
| uint32_t to_length = to->length(); |
| if (to_start + copy_size > to_length) { |
| copy_size = to_length - to_start; |
| } |
| Isolate* isolate = from->GetIsolate(); |
| for (int i = 0; i < copy_size; i++) { |
| int entry = from->FindEntry(isolate, i + from_start); |
| if (entry != NumberDictionary::kNotFound) { |
| to->set(i + to_start, from->ValueAt(entry)->Number()); |
| } else { |
| to->set_the_hole(i + to_start); |
| } |
| } |
| } |
| |
| static void TraceTopFrame(Isolate* isolate) { |
| StackFrameIterator it(isolate); |
| if (it.done()) { |
| PrintF("unknown location (no JavaScript frames present)"); |
| return; |
| } |
| StackFrame* raw_frame = it.frame(); |
| if (raw_frame->is_internal()) { |
| Code* apply_builtin = |
| isolate->builtins()->builtin(Builtins::kFunctionPrototypeApply); |
| if (raw_frame->unchecked_code() == apply_builtin) { |
| PrintF("apply from "); |
| it.Advance(); |
| raw_frame = it.frame(); |
| } |
| } |
| JavaScriptFrame::PrintTop(isolate, stdout, false, true); |
| } |
| |
| static void SortIndices( |
| Handle<FixedArray> indices, uint32_t sort_size, |
| WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER) { |
| struct { |
| bool operator()(const base::AtomicElement<Object*>& elementA, |
| const base::AtomicElement<Object*>& elementB) { |
| const Object* a = elementA.value(); |
| const Object* b = elementB.value(); |
| if (a->IsSmi() || !a->IsUndefined(HeapObject::cast(a)->GetIsolate())) { |
| if (!b->IsSmi() && b->IsUndefined(HeapObject::cast(b)->GetIsolate())) { |
| return true; |
| } |
| return a->Number() < b->Number(); |
| } |
| return !b->IsSmi() && b->IsUndefined(HeapObject::cast(b)->GetIsolate()); |
| } |
| } cmp; |
| // Use AtomicElement wrapper to ensure that std::sort uses atomic load and |
| // store operations that are safe for concurrent marking. |
| base::AtomicElement<Object*>* start = |
| reinterpret_cast<base::AtomicElement<Object*>*>( |
| indices->GetFirstElementAddress()); |
| std::sort(start, start + sort_size, cmp); |
| if (write_barrier_mode != SKIP_WRITE_BARRIER) { |
| FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(indices->GetIsolate()->heap(), *indices, |
| 0, sort_size); |
| } |
| } |
| |
| static Maybe<bool> IncludesValueSlowPath(Isolate* isolate, |
| Handle<JSObject> receiver, |
| Handle<Object> value, |
| uint32_t start_from, uint32_t length) { |
| bool search_for_hole = value->IsUndefined(isolate); |
| for (uint32_t k = start_from; k < length; ++k) { |
| LookupIterator it(isolate, receiver, k); |
| if (!it.IsFound()) { |
| if (search_for_hole) return Just(true); |
| continue; |
| } |
| Handle<Object> element_k; |
| ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, element_k, |
| Object::GetProperty(&it), Nothing<bool>()); |
| |
| if (value->SameValueZero(*element_k)) return Just(true); |
| } |
| |
| return Just(false); |
| } |
| |
| static Maybe<int64_t> IndexOfValueSlowPath(Isolate* isolate, |
| Handle<JSObject> receiver, |
| Handle<Object> value, |
| uint32_t start_from, |
| uint32_t length) { |
| for (uint32_t k = start_from; k < length; ++k) { |
| LookupIterator it(isolate, receiver, k); |
| if (!it.IsFound()) { |
| continue; |
| } |
| Handle<Object> element_k; |
| ASSIGN_RETURN_ON_EXCEPTION_VALUE( |
| isolate, element_k, Object::GetProperty(&it), Nothing<int64_t>()); |
| |
| if (value->StrictEquals(*element_k)) return Just<int64_t>(k); |
| } |
| |
| return Just<int64_t>(-1); |
| } |
| |
| // The InternalElementsAccessor is a helper class to expose otherwise protected |
| // methods to its subclasses. Namely, we don't want to publicly expose methods |
| // that take an entry (instead of an index) as an argument. |
| class InternalElementsAccessor : public ElementsAccessor { |
| public: |
| explicit InternalElementsAccessor(const char* name) |
| : ElementsAccessor(name) {} |
| |
| virtual uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder, |
| FixedArrayBase* backing_store, |
| uint32_t index) = 0; |
| |
| virtual PropertyDetails GetDetails(JSObject* holder, uint32_t entry) = 0; |
| }; |
| |
| // Base class for element handler implementations. Contains the |
| // the common logic for objects with different ElementsKinds. |
| // Subclasses must specialize method for which the element |
| // implementation differs from the base class implementation. |
| // |
| // This class is intended to be used in the following way: |
| // |
| // class SomeElementsAccessor : |
| // public ElementsAccessorBase<SomeElementsAccessor, |
| // BackingStoreClass> { |
| // ... |
| // } |
| // |
| // This is an example of the Curiously Recurring Template Pattern (see |
| // http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern). We use |
| // CRTP to guarantee aggressive compile time optimizations (i.e. inlining and |
| // specialization of SomeElementsAccessor methods). |
| template <typename Subclass, typename ElementsTraitsParam> |
| class ElementsAccessorBase : public InternalElementsAccessor { |
| public: |
| explicit ElementsAccessorBase(const char* name) |
| : InternalElementsAccessor(name) {} |
| |
| typedef ElementsTraitsParam ElementsTraits; |
| typedef typename ElementsTraitsParam::BackingStore BackingStore; |
| |
| static ElementsKind kind() { return ElementsTraits::Kind; } |
| |
| static void ValidateContents(JSObject* holder, int length) {} |
| |
| static void ValidateImpl(JSObject* holder) { |
| FixedArrayBase* fixed_array_base = holder->elements(); |
| if (!fixed_array_base->IsHeapObject()) return; |
| // Arrays that have been shifted in place can't be verified. |
| if (fixed_array_base->IsFiller()) return; |
| int length = 0; |
| if (holder->IsJSArray()) { |
| Object* length_obj = JSArray::cast(holder)->length(); |
| if (length_obj->IsSmi()) { |
| length = Smi::ToInt(length_obj); |
| } |
| } else { |
| length = fixed_array_base->length(); |
| } |
| Subclass::ValidateContents(holder, length); |
| } |
| |
| void Validate(JSObject* holder) final { |
| DisallowHeapAllocation no_gc; |
| Subclass::ValidateImpl(holder); |
| } |
| |
| static bool IsPackedImpl(JSObject* holder, FixedArrayBase* backing_store, |
| uint32_t start, uint32_t end) { |
| DisallowHeapAllocation no_gc; |
| if (IsFastPackedElementsKind(kind())) return true; |
| Isolate* isolate = backing_store->GetIsolate(); |
| for (uint32_t i = start; i < end; i++) { |
| if (!Subclass::HasElementImpl(isolate, holder, i, backing_store, |
| ALL_PROPERTIES)) { |
| return false; |
| } |
| } |
| return true; |
| } |
| |
| static void TryTransitionResultArrayToPacked(Handle<JSArray> array) { |
| if (!IsHoleyOrDictionaryElementsKind(kind())) return; |
| Handle<FixedArrayBase> backing_store(array->elements()); |
| int length = Smi::ToInt(array->length()); |
| if (!Subclass::IsPackedImpl(*array, *backing_store, 0, length)) { |
| return; |
| } |
| ElementsKind packed_kind = GetPackedElementsKind(kind()); |
| Handle<Map> new_map = |
| JSObject::GetElementsTransitionMap(array, packed_kind); |
| JSObject::MigrateToMap(array, new_map); |
| if (FLAG_trace_elements_transitions) { |
| JSObject::PrintElementsTransition(stdout, array, kind(), backing_store, |
| packed_kind, backing_store); |
| } |
| } |
| |
| bool HasElement(JSObject* holder, uint32_t index, |
| FixedArrayBase* backing_store, PropertyFilter filter) final { |
| return Subclass::HasElementImpl(holder->GetIsolate(), holder, index, |
| backing_store, filter); |
| } |
| |
| static bool HasElementImpl(Isolate* isolate, JSObject* holder, uint32_t index, |
| FixedArrayBase* backing_store, |
| PropertyFilter filter = ALL_PROPERTIES) { |
| return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index, |
| filter) != kMaxUInt32; |
| } |
| |
| bool HasEntry(JSObject* holder, uint32_t entry) final { |
| return Subclass::HasEntryImpl(holder->GetIsolate(), holder->elements(), |
| entry); |
| } |
| |
| static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* backing_store, |
| uint32_t entry) { |
| UNIMPLEMENTED(); |
| } |
| |
| bool HasAccessors(JSObject* holder) final { |
| return Subclass::HasAccessorsImpl(holder, holder->elements()); |
| } |
| |
| static bool HasAccessorsImpl(JSObject* holder, |
| FixedArrayBase* backing_store) { |
| return false; |
| } |
| |
| Handle<Object> Get(Handle<JSObject> holder, uint32_t entry) final { |
| return Subclass::GetInternalImpl(holder, entry); |
| } |
| |
| static Handle<Object> GetInternalImpl(Handle<JSObject> holder, |
| uint32_t entry) { |
| return Subclass::GetImpl(holder->GetIsolate(), holder->elements(), entry); |
| } |
| |
| static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store, |
| uint32_t entry) { |
| uint32_t index = GetIndexForEntryImpl(backing_store, entry); |
| return handle(BackingStore::cast(backing_store)->get(index), isolate); |
| } |
| |
| void Set(Handle<JSObject> holder, uint32_t entry, Object* value) final { |
| Subclass::SetImpl(holder, entry, value); |
| } |
| |
| void Reconfigure(Handle<JSObject> object, Handle<FixedArrayBase> store, |
| uint32_t entry, Handle<Object> value, |
| PropertyAttributes attributes) final { |
| Subclass::ReconfigureImpl(object, store, entry, value, attributes); |
| } |
| |
| static void ReconfigureImpl(Handle<JSObject> object, |
| Handle<FixedArrayBase> store, uint32_t entry, |
| Handle<Object> value, |
| PropertyAttributes attributes) { |
| UNREACHABLE(); |
| } |
| |
| void Add(Handle<JSObject> object, uint32_t index, Handle<Object> value, |
| PropertyAttributes attributes, uint32_t new_capacity) final { |
| Subclass::AddImpl(object, index, value, attributes, new_capacity); |
| } |
| |
| static void AddImpl(Handle<JSObject> object, uint32_t index, |
| Handle<Object> value, PropertyAttributes attributes, |
| uint32_t new_capacity) { |
| UNREACHABLE(); |
| } |
| |
| uint32_t Push(Handle<JSArray> receiver, Arguments* args, |
| uint32_t push_size) final { |
| return Subclass::PushImpl(receiver, args, push_size); |
| } |
| |
| static uint32_t PushImpl(Handle<JSArray> receiver, Arguments* args, |
| uint32_t push_sized) { |
| UNREACHABLE(); |
| } |
| |
| uint32_t Unshift(Handle<JSArray> receiver, Arguments* args, |
| uint32_t unshift_size) final { |
| return Subclass::UnshiftImpl(receiver, args, unshift_size); |
| } |
| |
| static uint32_t UnshiftImpl(Handle<JSArray> receiver, Arguments* args, |
| uint32_t unshift_size) { |
| UNREACHABLE(); |
| } |
| |
| Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start, |
| uint32_t end) final { |
| return Subclass::SliceImpl(receiver, start, end); |
| } |
| |
| Handle<JSObject> Slice(Handle<JSObject> receiver, uint32_t start, |
| uint32_t end, Handle<JSObject> result) final { |
| return Subclass::SliceWithResultImpl(receiver, start, end, result); |
| } |
| |
| static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start, |
| uint32_t end) { |
| UNREACHABLE(); |
| } |
| |
| static Handle<JSObject> SliceWithResultImpl(Handle<JSObject> receiver, |
| uint32_t start, uint32_t end, |
| Handle<JSObject> result) { |
| UNREACHABLE(); |
| } |
| |
| Handle<JSArray> Splice(Handle<JSArray> receiver, uint32_t start, |
| uint32_t delete_count, Arguments* args, |
| uint32_t add_count) final { |
| return Subclass::SpliceImpl(receiver, start, delete_count, args, add_count); |
| } |
| |
| static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver, |
| uint32_t start, uint32_t delete_count, |
| Arguments* args, uint32_t add_count) { |
| UNREACHABLE(); |
| } |
| |
| Handle<Object> Pop(Handle<JSArray> receiver) final { |
| return Subclass::PopImpl(receiver); |
| } |
| |
| static Handle<Object> PopImpl(Handle<JSArray> receiver) { |
| UNREACHABLE(); |
| } |
| |
| Handle<Object> Shift(Handle<JSArray> receiver) final { |
| return Subclass::ShiftImpl(receiver); |
| } |
| |
| static Handle<Object> ShiftImpl(Handle<JSArray> receiver) { |
| UNREACHABLE(); |
| } |
| |
| void SetLength(Handle<JSArray> array, uint32_t length) final { |
| Subclass::SetLengthImpl(array->GetIsolate(), array, length, |
| handle(array->elements())); |
| } |
| |
| static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array, |
| uint32_t length, |
| Handle<FixedArrayBase> backing_store) { |
| DCHECK(!array->SetLengthWouldNormalize(length)); |
| DCHECK(IsFastElementsKind(array->GetElementsKind())); |
| uint32_t old_length = 0; |
| CHECK(array->length()->ToArrayIndex(&old_length)); |
| |
| if (old_length < length) { |
| ElementsKind kind = array->GetElementsKind(); |
| if (!IsHoleyElementsKind(kind)) { |
| kind = GetHoleyElementsKind(kind); |
| JSObject::TransitionElementsKind(array, kind); |
| } |
| } |
| |
| // Check whether the backing store should be shrunk. |
| uint32_t capacity = backing_store->length(); |
| old_length = Min(old_length, capacity); |
| if (length == 0) { |
| array->initialize_elements(); |
| } else if (length <= capacity) { |
| if (IsSmiOrObjectElementsKind(kind())) { |
| JSObject::EnsureWritableFastElements(array); |
| if (array->elements() != *backing_store) { |
| backing_store = handle(array->elements(), isolate); |
| } |
| } |
| if (2 * length + JSObject::kMinAddedElementsCapacity <= capacity) { |
| // If more than half the elements won't be used, trim the array. |
| // Do not trim from short arrays to prevent frequent trimming on |
| // repeated pop operations. |
| // Leave some space to allow for subsequent push operations. |
| int elements_to_trim = length + 1 == old_length |
| ? (capacity - length) / 2 |
| : capacity - length; |
| isolate->heap()->RightTrimFixedArray(*backing_store, elements_to_trim); |
| // Fill the non-trimmed elements with holes. |
| BackingStore::cast(*backing_store) |
| ->FillWithHoles(length, |
| std::min(old_length, capacity - elements_to_trim)); |
| } else { |
| // Otherwise, fill the unused tail with holes. |
| BackingStore::cast(*backing_store)->FillWithHoles(length, old_length); |
| } |
| } else { |
| // Check whether the backing store should be expanded. |
| capacity = Max(length, JSObject::NewElementsCapacity(capacity)); |
| Subclass::GrowCapacityAndConvertImpl(array, capacity); |
| } |
| |
| array->set_length(Smi::FromInt(length)); |
| JSObject::ValidateElements(*array); |
| } |
| |
| uint32_t NumberOfElements(JSObject* receiver) final { |
| return Subclass::NumberOfElementsImpl(receiver, receiver->elements()); |
| } |
| |
| static uint32_t NumberOfElementsImpl(JSObject* receiver, |
| FixedArrayBase* backing_store) { |
| UNREACHABLE(); |
| } |
| |
| static uint32_t GetMaxIndex(JSObject* receiver, FixedArrayBase* elements) { |
| if (receiver->IsJSArray()) { |
| DCHECK(JSArray::cast(receiver)->length()->IsSmi()); |
| return static_cast<uint32_t>( |
| Smi::ToInt(JSArray::cast(receiver)->length())); |
| } |
| return Subclass::GetCapacityImpl(receiver, elements); |
| } |
| |
| static uint32_t GetMaxNumberOfEntries(JSObject* receiver, |
| FixedArrayBase* elements) { |
| return Subclass::GetMaxIndex(receiver, elements); |
| } |
| |
| static Handle<FixedArrayBase> ConvertElementsWithCapacity( |
| Handle<JSObject> object, Handle<FixedArrayBase> old_elements, |
| ElementsKind from_kind, uint32_t capacity) { |
| return ConvertElementsWithCapacity( |
| object, old_elements, from_kind, capacity, 0, 0, |
| ElementsAccessor::kCopyToEndAndInitializeToHole); |
| } |
| |
| static Handle<FixedArrayBase> ConvertElementsWithCapacity( |
| Handle<JSObject> object, Handle<FixedArrayBase> old_elements, |
| ElementsKind from_kind, uint32_t capacity, int copy_size) { |
| return ConvertElementsWithCapacity(object, old_elements, from_kind, |
| capacity, 0, 0, copy_size); |
| } |
| |
| static Handle<FixedArrayBase> ConvertElementsWithCapacity( |
| Handle<JSObject> object, Handle<FixedArrayBase> old_elements, |
| ElementsKind from_kind, uint32_t capacity, uint32_t src_index, |
| uint32_t dst_index, int copy_size) { |
| Isolate* isolate = object->GetIsolate(); |
| Handle<FixedArrayBase> new_elements; |
| if (IsDoubleElementsKind(kind())) { |
| new_elements = isolate->factory()->NewFixedDoubleArray(capacity); |
| } else { |
| new_elements = isolate->factory()->NewUninitializedFixedArray(capacity); |
| } |
| |
| int packed_size = kPackedSizeNotKnown; |
| if (IsFastPackedElementsKind(from_kind) && object->IsJSArray()) { |
| packed_size = Smi::ToInt(JSArray::cast(*object)->length()); |
| } |
| |
| Subclass::CopyElementsImpl(*old_elements, src_index, *new_elements, |
| from_kind, dst_index, packed_size, copy_size); |
| |
| return new_elements; |
| } |
| |
| static void TransitionElementsKindImpl(Handle<JSObject> object, |
| Handle<Map> to_map) { |
| Handle<Map> from_map = handle(object->map()); |
| ElementsKind from_kind = from_map->elements_kind(); |
| ElementsKind to_kind = to_map->elements_kind(); |
| if (IsHoleyElementsKind(from_kind)) { |
| to_kind = GetHoleyElementsKind(to_kind); |
| } |
| if (from_kind != to_kind) { |
| // This method should never be called for any other case. |
| DCHECK(IsFastElementsKind(from_kind)); |
| DCHECK(IsFastElementsKind(to_kind)); |
| DCHECK_NE(TERMINAL_FAST_ELEMENTS_KIND, from_kind); |
| |
| Handle<FixedArrayBase> from_elements(object->elements()); |
| if (object->elements() == object->GetHeap()->empty_fixed_array() || |
| IsDoubleElementsKind(from_kind) == IsDoubleElementsKind(to_kind)) { |
| // No change is needed to the elements() buffer, the transition |
| // only requires a map change. |
| JSObject::MigrateToMap(object, to_map); |
| } else { |
| DCHECK( |
| (IsSmiElementsKind(from_kind) && IsDoubleElementsKind(to_kind)) || |
| (IsDoubleElementsKind(from_kind) && IsObjectElementsKind(to_kind))); |
| uint32_t capacity = static_cast<uint32_t>(object->elements()->length()); |
| Handle<FixedArrayBase> elements = ConvertElementsWithCapacity( |
| object, from_elements, from_kind, capacity); |
| JSObject::SetMapAndElements(object, to_map, elements); |
| } |
| if (FLAG_trace_elements_transitions) { |
| JSObject::PrintElementsTransition(stdout, object, from_kind, |
| from_elements, to_kind, |
| handle(object->elements())); |
| } |
| } |
| } |
| |
| static void GrowCapacityAndConvertImpl(Handle<JSObject> object, |
| uint32_t capacity) { |
| ElementsKind from_kind = object->GetElementsKind(); |
| if (IsSmiOrObjectElementsKind(from_kind)) { |
| // Array optimizations rely on the prototype lookups of Array objects |
| // always returning undefined. If there is a store to the initial |
| // prototype object, make sure all of these optimizations are invalidated. |
| object->GetIsolate()->UpdateNoElementsProtectorOnSetLength(object); |
| } |
| Handle<FixedArrayBase> old_elements(object->elements()); |
| // This method should only be called if there's a reason to update the |
| // elements. |
| DCHECK(IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(kind()) || |
| IsDictionaryElementsKind(from_kind) || |
| static_cast<uint32_t>(old_elements->length()) < capacity); |
| Subclass::BasicGrowCapacityAndConvertImpl(object, old_elements, from_kind, |
| kind(), capacity); |
| } |
| |
| static void BasicGrowCapacityAndConvertImpl( |
| Handle<JSObject> object, Handle<FixedArrayBase> old_elements, |
| ElementsKind from_kind, ElementsKind to_kind, uint32_t capacity) { |
| Handle<FixedArrayBase> elements = |
| ConvertElementsWithCapacity(object, old_elements, from_kind, capacity); |
| |
| if (IsHoleyOrDictionaryElementsKind(from_kind)) |
| to_kind = GetHoleyElementsKind(to_kind); |
| Handle<Map> new_map = JSObject::GetElementsTransitionMap(object, to_kind); |
| JSObject::SetMapAndElements(object, new_map, elements); |
| |
| // Transition through the allocation site as well if present. |
| JSObject::UpdateAllocationSite(object, to_kind); |
| |
| if (FLAG_trace_elements_transitions) { |
| JSObject::PrintElementsTransition(stdout, object, from_kind, old_elements, |
| to_kind, elements); |
| } |
| } |
| |
| void TransitionElementsKind(Handle<JSObject> object, Handle<Map> map) final { |
| Subclass::TransitionElementsKindImpl(object, map); |
| } |
| |
| void GrowCapacityAndConvert(Handle<JSObject> object, |
| uint32_t capacity) final { |
| Subclass::GrowCapacityAndConvertImpl(object, capacity); |
| } |
| |
| bool GrowCapacity(Handle<JSObject> object, uint32_t index) final { |
| // This function is intended to be called from optimized code. We don't |
| // want to trigger lazy deopts there, so refuse to handle cases that would. |
| if (object->map()->is_prototype_map() || |
| object->WouldConvertToSlowElements(index)) { |
| return false; |
| } |
| Handle<FixedArrayBase> old_elements(object->elements()); |
| uint32_t new_capacity = JSObject::NewElementsCapacity(index + 1); |
| DCHECK(static_cast<uint32_t>(old_elements->length()) < new_capacity); |
| Handle<FixedArrayBase> elements = |
| ConvertElementsWithCapacity(object, old_elements, kind(), new_capacity); |
| |
| DCHECK_EQ(object->GetElementsKind(), kind()); |
| // Transition through the allocation site as well if present. |
| if (JSObject::UpdateAllocationSite<AllocationSiteUpdateMode::kCheckOnly>( |
| object, kind())) { |
| return false; |
| } |
| |
| object->set_elements(*elements); |
| return true; |
| } |
| |
| void Delete(Handle<JSObject> obj, uint32_t entry) final { |
| Subclass::DeleteImpl(obj, entry); |
| } |
| |
| static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, |
| FixedArrayBase* to, ElementsKind from_kind, |
| uint32_t to_start, int packed_size, |
| int copy_size) { |
| UNREACHABLE(); |
| } |
| |
| void CopyElements(JSObject* from_holder, uint32_t from_start, |
| ElementsKind from_kind, Handle<FixedArrayBase> to, |
| uint32_t to_start, int copy_size) final { |
| int packed_size = kPackedSizeNotKnown; |
| bool is_packed = IsFastPackedElementsKind(from_kind) && |
| from_holder->IsJSArray(); |
| if (is_packed) { |
| packed_size = Smi::ToInt(JSArray::cast(from_holder)->length()); |
| if (copy_size >= 0 && packed_size > copy_size) { |
| packed_size = copy_size; |
| } |
| } |
| FixedArrayBase* from = from_holder->elements(); |
| // NOTE: the Subclass::CopyElementsImpl() methods |
| // violate the handlified function signature convention: |
| // raw pointer parameters in the function that allocates. This is done |
| // intentionally to avoid ArrayConcat() builtin performance degradation. |
| // |
| // Details: The idea is that allocations actually happen only in case of |
| // copying from object with fast double elements to object with object |
| // elements. In all the other cases there are no allocations performed and |
| // handle creation causes noticeable performance degradation of the builtin. |
| Subclass::CopyElementsImpl(from, from_start, *to, from_kind, to_start, |
| packed_size, copy_size); |
| } |
| |
| void CopyElements(Handle<FixedArrayBase> source, ElementsKind source_kind, |
| Handle<FixedArrayBase> destination, int size) { |
| Subclass::CopyElementsImpl(*source, 0, *destination, source_kind, 0, |
| kPackedSizeNotKnown, size); |
| } |
| |
| Object* CopyElements(Handle<JSReceiver> source, Handle<JSObject> destination, |
| size_t length, uint32_t offset) final { |
| return Subclass::CopyElementsHandleImpl(source, destination, length, |
| offset); |
| } |
| |
| static Object* CopyElementsHandleImpl(Handle<JSReceiver> source, |
| Handle<JSObject> destination, |
| size_t length, uint32_t offset) { |
| UNREACHABLE(); |
| } |
| |
| Handle<NumberDictionary> Normalize(Handle<JSObject> object) final { |
| return Subclass::NormalizeImpl(object, handle(object->elements())); |
| } |
| |
| static Handle<NumberDictionary> NormalizeImpl( |
| Handle<JSObject> object, Handle<FixedArrayBase> elements) { |
| UNREACHABLE(); |
| } |
| |
| Maybe<bool> CollectValuesOrEntries(Isolate* isolate, Handle<JSObject> object, |
| Handle<FixedArray> values_or_entries, |
| bool get_entries, int* nof_items, |
| PropertyFilter filter) { |
| return Subclass::CollectValuesOrEntriesImpl( |
| isolate, object, values_or_entries, get_entries, nof_items, filter); |
| } |
| |
| static Maybe<bool> CollectValuesOrEntriesImpl( |
| Isolate* isolate, Handle<JSObject> object, |
| Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items, |
| PropertyFilter filter) { |
| DCHECK_EQ(*nof_items, 0); |
| KeyAccumulator accumulator(isolate, KeyCollectionMode::kOwnOnly, |
| ALL_PROPERTIES); |
| Subclass::CollectElementIndicesImpl( |
| object, handle(object->elements(), isolate), &accumulator); |
| Handle<FixedArray> keys = accumulator.GetKeys(); |
| |
| int count = 0; |
| int i = 0; |
| ElementsKind original_elements_kind = object->GetElementsKind(); |
| |
| for (; i < keys->length(); ++i) { |
| Handle<Object> key(keys->get(i), isolate); |
| uint32_t index; |
| if (!key->ToUint32(&index)) continue; |
| |
| DCHECK_EQ(object->GetElementsKind(), original_elements_kind); |
| uint32_t entry = Subclass::GetEntryForIndexImpl( |
| isolate, *object, object->elements(), index, filter); |
| if (entry == kMaxUInt32) continue; |
| PropertyDetails details = Subclass::GetDetailsImpl(*object, entry); |
| |
| Handle<Object> value; |
| if (details.kind() == kData) { |
| value = Subclass::GetImpl(isolate, object->elements(), entry); |
| } else { |
| // This might modify the elements and/or change the elements kind. |
| LookupIterator it(isolate, object, index, LookupIterator::OWN); |
| ASSIGN_RETURN_ON_EXCEPTION_VALUE( |
| isolate, value, Object::GetProperty(&it), Nothing<bool>()); |
| } |
| if (get_entries) value = MakeEntryPair(isolate, index, value); |
| values_or_entries->set(count++, *value); |
| if (object->GetElementsKind() != original_elements_kind) break; |
| } |
| |
| // Slow path caused by changes in elements kind during iteration. |
| for (; i < keys->length(); i++) { |
| Handle<Object> key(keys->get(i), isolate); |
| uint32_t index; |
| if (!key->ToUint32(&index)) continue; |
| |
| if (filter & ONLY_ENUMERABLE) { |
| InternalElementsAccessor* accessor = |
| reinterpret_cast<InternalElementsAccessor*>( |
| object->GetElementsAccessor()); |
| uint32_t entry = accessor->GetEntryForIndex(isolate, *object, |
| object->elements(), index); |
| if (entry == kMaxUInt32) continue; |
| PropertyDetails details = accessor->GetDetails(*object, entry); |
| if (!details.IsEnumerable()) continue; |
| } |
| |
| Handle<Object> value; |
| LookupIterator it(isolate, object, index, LookupIterator::OWN); |
| ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, Object::GetProperty(&it), |
| Nothing<bool>()); |
| |
| if (get_entries) value = MakeEntryPair(isolate, index, value); |
| values_or_entries->set(count++, *value); |
| } |
| |
| *nof_items = count; |
| return Just(true); |
| } |
| |
| void CollectElementIndices(Handle<JSObject> object, |
| Handle<FixedArrayBase> backing_store, |
| KeyAccumulator* keys) final { |
| if (keys->filter() & ONLY_ALL_CAN_READ) return; |
| Subclass::CollectElementIndicesImpl(object, backing_store, keys); |
| } |
| |
| static void CollectElementIndicesImpl(Handle<JSObject> object, |
| Handle<FixedArrayBase> backing_store, |
| KeyAccumulator* keys) { |
| DCHECK_NE(DICTIONARY_ELEMENTS, kind()); |
| // Non-dictionary elements can't have all-can-read accessors. |
| uint32_t length = Subclass::GetMaxIndex(*object, *backing_store); |
| PropertyFilter filter = keys->filter(); |
| Isolate* isolate = keys->isolate(); |
| Factory* factory = isolate->factory(); |
| for (uint32_t i = 0; i < length; i++) { |
| if (Subclass::HasElementImpl(isolate, *object, i, *backing_store, |
| filter)) { |
| keys->AddKey(factory->NewNumberFromUint(i)); |
| } |
| } |
| } |
| |
| static Handle<FixedArray> DirectCollectElementIndicesImpl( |
| Isolate* isolate, Handle<JSObject> object, |
| Handle<FixedArrayBase> backing_store, GetKeysConversion convert, |
| PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices, |
| uint32_t insertion_index = 0) { |
| uint32_t length = Subclass::GetMaxIndex(*object, *backing_store); |
| for (uint32_t i = 0; i < length; i++) { |
| if (Subclass::HasElementImpl(isolate, *object, i, *backing_store, |
| filter)) { |
| if (convert == GetKeysConversion::kConvertToString) { |
| Handle<String> index_string = isolate->factory()->Uint32ToString(i); |
| list->set(insertion_index, *index_string); |
| } else { |
| list->set(insertion_index, Smi::FromInt(i), SKIP_WRITE_BARRIER); |
| } |
| insertion_index++; |
| } |
| } |
| *nof_indices = insertion_index; |
| return list; |
| } |
| |
| MaybeHandle<FixedArray> PrependElementIndices( |
| Handle<JSObject> object, Handle<FixedArrayBase> backing_store, |
| Handle<FixedArray> keys, GetKeysConversion convert, |
| PropertyFilter filter) final { |
| return Subclass::PrependElementIndicesImpl(object, backing_store, keys, |
| convert, filter); |
| } |
| |
| static MaybeHandle<FixedArray> PrependElementIndicesImpl( |
| Handle<JSObject> object, Handle<FixedArrayBase> backing_store, |
| Handle<FixedArray> keys, GetKeysConversion convert, |
| PropertyFilter filter) { |
| Isolate* isolate = object->GetIsolate(); |
| uint32_t nof_property_keys = keys->length(); |
| uint32_t initial_list_length = |
| Subclass::GetMaxNumberOfEntries(*object, *backing_store); |
| |
| initial_list_length += nof_property_keys; |
| if (initial_list_length > FixedArray::kMaxLength || |
| initial_list_length < nof_property_keys) { |
| return isolate->Throw<FixedArray>(isolate->factory()->NewRangeError( |
| MessageTemplate::kInvalidArrayLength)); |
| } |
| |
| // Collect the element indices into a new list. |
| MaybeHandle<FixedArray> raw_array = |
| isolate->factory()->TryNewFixedArray(initial_list_length); |
| Handle<FixedArray> combined_keys; |
| |
| // If we have a holey backing store try to precisely estimate the backing |
| // store size as a last emergency measure if we cannot allocate the big |
| // array. |
| if (!raw_array.ToHandle(&combined_keys)) { |
| if (IsHoleyOrDictionaryElementsKind(kind())) { |
| // If we overestimate the result list size we might end up in the |
| // large-object space which doesn't free memory on shrinking the list. |
| // Hence we try to estimate the final size for holey backing stores more |
| // precisely here. |
| initial_list_length = |
| Subclass::NumberOfElementsImpl(*object, *backing_store); |
| initial_list_length += nof_property_keys; |
| } |
| combined_keys = isolate->factory()->NewFixedArray(initial_list_length); |
| } |
| |
| uint32_t nof_indices = 0; |
| bool needs_sorting = IsDictionaryElementsKind(kind()) || |
| IsSloppyArgumentsElementsKind(kind()); |
| combined_keys = Subclass::DirectCollectElementIndicesImpl( |
| isolate, object, backing_store, |
| needs_sorting ? GetKeysConversion::kKeepNumbers : convert, filter, |
| combined_keys, &nof_indices); |
| |
| if (needs_sorting) { |
| SortIndices(combined_keys, nof_indices); |
| // Indices from dictionary elements should only be converted after |
| // sorting. |
| if (convert == GetKeysConversion::kConvertToString) { |
| for (uint32_t i = 0; i < nof_indices; i++) { |
| Handle<Object> index_string = isolate->factory()->Uint32ToString( |
| combined_keys->get(i)->Number()); |
| combined_keys->set(i, *index_string); |
| } |
| } |
| } |
| |
| // Copy over the passed-in property keys. |
| CopyObjectToObjectElements(*keys, PACKED_ELEMENTS, 0, *combined_keys, |
| PACKED_ELEMENTS, nof_indices, nof_property_keys); |
| |
| // For holey elements and arguments we might have to shrink the collected |
| // keys since the estimates might be off. |
| if (IsHoleyOrDictionaryElementsKind(kind()) || |
| IsSloppyArgumentsElementsKind(kind())) { |
| // Shrink combined_keys to the final size. |
| int final_size = nof_indices + nof_property_keys; |
| DCHECK_LE(final_size, combined_keys->length()); |
| combined_keys->Shrink(final_size); |
| } |
| |
| return combined_keys; |
| } |
| |
| void AddElementsToKeyAccumulator(Handle<JSObject> receiver, |
| KeyAccumulator* accumulator, |
| AddKeyConversion convert) final { |
| Subclass::AddElementsToKeyAccumulatorImpl(receiver, accumulator, convert); |
| } |
| |
| static uint32_t GetCapacityImpl(JSObject* holder, |
| FixedArrayBase* backing_store) { |
| return backing_store->length(); |
| } |
| |
| uint32_t GetCapacity(JSObject* holder, FixedArrayBase* backing_store) final { |
| return Subclass::GetCapacityImpl(holder, backing_store); |
| } |
| |
| static Object* FillImpl(Isolate* isolate, Handle<JSObject> receiver, |
| Handle<Object> obj_value, uint32_t start, |
| uint32_t end) { |
| UNREACHABLE(); |
| } |
| |
| Object* Fill(Isolate* isolate, Handle<JSObject> receiver, |
| Handle<Object> obj_value, uint32_t start, uint32_t end) { |
| return Subclass::FillImpl(isolate, receiver, obj_value, start, end); |
| } |
| |
| static Maybe<bool> IncludesValueImpl(Isolate* isolate, |
| Handle<JSObject> receiver, |
| Handle<Object> value, |
| uint32_t start_from, uint32_t length) { |
| return IncludesValueSlowPath(isolate, receiver, value, start_from, length); |
| } |
| |
| Maybe<bool> IncludesValue(Isolate* isolate, Handle<JSObject> receiver, |
| Handle<Object> value, uint32_t start_from, |
| uint32_t length) final { |
| return Subclass::IncludesValueImpl(isolate, receiver, value, start_from, |
| length); |
| } |
| |
| static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate, |
| Handle<JSObject> receiver, |
| Handle<Object> value, |
| uint32_t start_from, uint32_t length) { |
| return IndexOfValueSlowPath(isolate, receiver, value, start_from, length); |
| } |
| |
| Maybe<int64_t> IndexOfValue(Isolate* isolate, Handle<JSObject> receiver, |
| Handle<Object> value, uint32_t start_from, |
| uint32_t length) final { |
| return Subclass::IndexOfValueImpl(isolate, receiver, value, start_from, |
| length); |
| } |
| |
| static Maybe<int64_t> LastIndexOfValueImpl(Isolate* isolate, |
| Handle<JSObject> receiver, |
| Handle<Object> value, |
| uint32_t start_from) { |
| UNREACHABLE(); |
| } |
| |
| Maybe<int64_t> LastIndexOfValue(Isolate* isolate, Handle<JSObject> receiver, |
| Handle<Object> value, |
| uint32_t start_from) final { |
| return Subclass::LastIndexOfValueImpl(isolate, receiver, value, start_from); |
| } |
| |
| static void ReverseImpl(JSObject* receiver) { UNREACHABLE(); } |
| |
| void Reverse(JSObject* receiver) final { Subclass::ReverseImpl(receiver); } |
| |
| static uint32_t GetIndexForEntryImpl(FixedArrayBase* backing_store, |
| uint32_t entry) { |
| return entry; |
| } |
| |
| static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder, |
| FixedArrayBase* backing_store, |
| uint32_t index, PropertyFilter filter) { |
| uint32_t length = Subclass::GetMaxIndex(holder, backing_store); |
| if (IsHoleyOrDictionaryElementsKind(kind())) { |
| return index < length && |
| !BackingStore::cast(backing_store) |
| ->is_the_hole(isolate, index) |
| ? index |
| : kMaxUInt32; |
| } else { |
| return index < length ? index : kMaxUInt32; |
| } |
| } |
| |
| uint32_t GetEntryForIndex(Isolate* isolate, JSObject* holder, |
| FixedArrayBase* backing_store, |
| uint32_t index) final { |
| return Subclass::GetEntryForIndexImpl(isolate, holder, backing_store, index, |
| ALL_PROPERTIES); |
| } |
| |
| static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store, |
| uint32_t entry) { |
| return PropertyDetails(kData, NONE, PropertyCellType::kNoCell); |
| } |
| |
| static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) { |
| return PropertyDetails(kData, NONE, PropertyCellType::kNoCell); |
| } |
| |
| PropertyDetails GetDetails(JSObject* holder, uint32_t entry) final { |
| return Subclass::GetDetailsImpl(holder, entry); |
| } |
| |
| Handle<FixedArray> CreateListFromArrayLike(Isolate* isolate, |
| Handle<JSObject> object, |
| uint32_t length) final { |
| return Subclass::CreateListFromArrayLikeImpl(isolate, object, length); |
| }; |
| |
| static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate, |
| Handle<JSObject> object, |
| uint32_t length) { |
| UNREACHABLE(); |
| } |
| |
| private: |
| DISALLOW_COPY_AND_ASSIGN(ElementsAccessorBase); |
| }; |
| |
| |
| class DictionaryElementsAccessor |
| : public ElementsAccessorBase<DictionaryElementsAccessor, |
| ElementsKindTraits<DICTIONARY_ELEMENTS> > { |
| public: |
| explicit DictionaryElementsAccessor(const char* name) |
| : ElementsAccessorBase<DictionaryElementsAccessor, |
| ElementsKindTraits<DICTIONARY_ELEMENTS> >(name) {} |
| |
| static uint32_t GetMaxIndex(JSObject* receiver, FixedArrayBase* elements) { |
| // We cannot properly estimate this for dictionaries. |
| UNREACHABLE(); |
| } |
| |
| static uint32_t GetMaxNumberOfEntries(JSObject* receiver, |
| FixedArrayBase* backing_store) { |
| return NumberOfElementsImpl(receiver, backing_store); |
| } |
| |
| static uint32_t NumberOfElementsImpl(JSObject* receiver, |
| FixedArrayBase* backing_store) { |
| NumberDictionary* dict = NumberDictionary::cast(backing_store); |
| return dict->NumberOfElements(); |
| } |
| |
| static void SetLengthImpl(Isolate* isolate, Handle<JSArray> array, |
| uint32_t length, |
| Handle<FixedArrayBase> backing_store) { |
| Handle<NumberDictionary> dict = |
| Handle<NumberDictionary>::cast(backing_store); |
| int capacity = dict->Capacity(); |
| uint32_t old_length = 0; |
| CHECK(array->length()->ToArrayLength(&old_length)); |
| { |
| DisallowHeapAllocation no_gc; |
| if (length < old_length) { |
| if (dict->requires_slow_elements()) { |
| // Find last non-deletable element in range of elements to be |
| // deleted and adjust range accordingly. |
| for (int entry = 0; entry < capacity; entry++) { |
| Object* index = dict->KeyAt(entry); |
| if (dict->IsKey(isolate, index)) { |
| uint32_t number = static_cast<uint32_t>(index->Number()); |
| if (length <= number && number < old_length) { |
| PropertyDetails details = dict->DetailsAt(entry); |
| if (!details.IsConfigurable()) length = number + 1; |
| } |
| } |
| } |
| } |
| |
| if (length == 0) { |
| // Flush the backing store. |
| array->initialize_elements(); |
| } else { |
| // Remove elements that should be deleted. |
| int removed_entries = 0; |
| for (int entry = 0; entry < capacity; entry++) { |
| Object* index = dict->KeyAt(entry); |
| if (dict->IsKey(isolate, index)) { |
| uint32_t number = static_cast<uint32_t>(index->Number()); |
| if (length <= number && number < old_length) { |
| dict->ClearEntry(entry); |
| removed_entries++; |
| } |
| } |
| } |
| |
| // Update the number of elements. |
| dict->ElementsRemoved(removed_entries); |
| } |
| } |
| } |
| |
| Handle<Object> length_obj = isolate->factory()->NewNumberFromUint(length); |
| array->set_length(*length_obj); |
| } |
| |
| static void CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, |
| FixedArrayBase* to, ElementsKind from_kind, |
| uint32_t to_start, int packed_size, |
| int copy_size) { |
| UNREACHABLE(); |
| } |
| |
| static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start, |
| uint32_t end) { |
| Isolate* isolate = receiver->GetIsolate(); |
| uint32_t result_length = end < start ? 0u : end - start; |
| |
| // Result must also be a dictionary. |
| Handle<JSArray> result_array = |
| isolate->factory()->NewJSArray(0, HOLEY_ELEMENTS); |
| JSObject::NormalizeElements(result_array); |
| result_array->set_length(Smi::FromInt(result_length)); |
| Handle<NumberDictionary> source_dict( |
| NumberDictionary::cast(receiver->elements())); |
| int entry_count = source_dict->Capacity(); |
| for (int i = 0; i < entry_count; i++) { |
| Object* key = source_dict->KeyAt(i); |
| if (!source_dict->ToKey(isolate, i, &key)) continue; |
| uint64_t key_value = NumberToInt64(key); |
| if (key_value >= start && key_value < end) { |
| Handle<NumberDictionary> dest_dict( |
| NumberDictionary::cast(result_array->elements())); |
| Handle<Object> value(source_dict->ValueAt(i), isolate); |
| PropertyDetails details = source_dict->DetailsAt(i); |
| PropertyAttributes attr = details.attributes(); |
| AddImpl(result_array, static_cast<uint32_t>(key_value) - start, value, |
| attr, 0); |
| } |
| } |
| |
| return result_array; |
| } |
| |
| static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) { |
| Handle<NumberDictionary> dict(NumberDictionary::cast(obj->elements())); |
| dict = NumberDictionary::DeleteEntry(dict, entry); |
| obj->set_elements(*dict); |
| } |
| |
| static bool HasAccessorsImpl(JSObject* holder, |
| FixedArrayBase* backing_store) { |
| DisallowHeapAllocation no_gc; |
| NumberDictionary* dict = NumberDictionary::cast(backing_store); |
| if (!dict->requires_slow_elements()) return false; |
| int capacity = dict->Capacity(); |
| Isolate* isolate = dict->GetIsolate(); |
| for (int i = 0; i < capacity; i++) { |
| Object* key = dict->KeyAt(i); |
| if (!dict->IsKey(isolate, key)) continue; |
| PropertyDetails details = dict->DetailsAt(i); |
| if (details.kind() == kAccessor) return true; |
| } |
| return false; |
| } |
| |
| static Object* GetRaw(FixedArrayBase* store, uint32_t entry) { |
| NumberDictionary* backing_store = NumberDictionary::cast(store); |
| return backing_store->ValueAt(entry); |
| } |
| |
| static Handle<Object> GetImpl(Isolate* isolate, FixedArrayBase* backing_store, |
| uint32_t entry) { |
| return handle(GetRaw(backing_store, entry), isolate); |
| } |
| |
| static inline void SetImpl(Handle<JSObject> holder, uint32_t entry, |
| Object* value) { |
| SetImpl(holder->elements(), entry, value); |
| } |
| |
| static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry, |
| Object* value) { |
| NumberDictionary::cast(backing_store)->ValueAtPut(entry, value); |
| } |
| |
| static void ReconfigureImpl(Handle<JSObject> object, |
| Handle<FixedArrayBase> store, uint32_t entry, |
| Handle<Object> value, |
| PropertyAttributes attributes) { |
| NumberDictionary* dictionary = NumberDictionary::cast(*store); |
| if (attributes != NONE) object->RequireSlowElements(dictionary); |
| dictionary->ValueAtPut(entry, *value); |
| PropertyDetails details = dictionary->DetailsAt(entry); |
| details = PropertyDetails(kData, attributes, PropertyCellType::kNoCell, |
| details.dictionary_index()); |
| |
| dictionary->DetailsAtPut(entry, details); |
| } |
| |
| static void AddImpl(Handle<JSObject> object, uint32_t index, |
| Handle<Object> value, PropertyAttributes attributes, |
| uint32_t new_capacity) { |
| PropertyDetails details(kData, attributes, PropertyCellType::kNoCell); |
| Handle<NumberDictionary> dictionary = |
| object->HasFastElements() || object->HasFastStringWrapperElements() |
| ? JSObject::NormalizeElements(object) |
| : handle(NumberDictionary::cast(object->elements())); |
| Handle<NumberDictionary> new_dictionary = |
| NumberDictionary::Add(dictionary, index, value, details); |
| new_dictionary->UpdateMaxNumberKey(index, object); |
| if (attributes != NONE) object->RequireSlowElements(*new_dictionary); |
| if (dictionary.is_identical_to(new_dictionary)) return; |
| object->set_elements(*new_dictionary); |
| } |
| |
| static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* store, |
| uint32_t entry) { |
| DisallowHeapAllocation no_gc; |
| NumberDictionary* dict = NumberDictionary::cast(store); |
| Object* index = dict->KeyAt(entry); |
| return !index->IsTheHole(isolate); |
| } |
| |
| static uint32_t GetIndexForEntryImpl(FixedArrayBase* store, uint32_t entry) { |
| DisallowHeapAllocation no_gc; |
| NumberDictionary* dict = NumberDictionary::cast(store); |
| uint32_t result = 0; |
| CHECK(dict->KeyAt(entry)->ToArrayIndex(&result)); |
| return result; |
| } |
| |
| static uint32_t GetEntryForIndexImpl(Isolate* isolate, JSObject* holder, |
| FixedArrayBase* store, uint32_t index, |
| PropertyFilter filter) { |
| DisallowHeapAllocation no_gc; |
| NumberDictionary* dictionary = NumberDictionary::cast(store); |
| int entry = dictionary->FindEntry(isolate, index); |
| if (entry == NumberDictionary::kNotFound) return kMaxUInt32; |
| if (filter != ALL_PROPERTIES) { |
| PropertyDetails details = dictionary->DetailsAt(entry); |
| PropertyAttributes attr = details.attributes(); |
| if ((attr & filter) != 0) return kMaxUInt32; |
| } |
| return static_cast<uint32_t>(entry); |
| } |
| |
| static PropertyDetails GetDetailsImpl(JSObject* holder, uint32_t entry) { |
| return GetDetailsImpl(holder->elements(), entry); |
| } |
| |
| static PropertyDetails GetDetailsImpl(FixedArrayBase* backing_store, |
| uint32_t entry) { |
| return NumberDictionary::cast(backing_store)->DetailsAt(entry); |
| } |
| |
| static uint32_t FilterKey(Handle<NumberDictionary> dictionary, int entry, |
| Object* raw_key, PropertyFilter filter) { |
| DCHECK(raw_key->IsNumber()); |
| DCHECK_LE(raw_key->Number(), kMaxUInt32); |
| PropertyDetails details = dictionary->DetailsAt(entry); |
| PropertyAttributes attr = details.attributes(); |
| if ((attr & filter) != 0) return kMaxUInt32; |
| return static_cast<uint32_t>(raw_key->Number()); |
| } |
| |
| static uint32_t GetKeyForEntryImpl(Isolate* isolate, |
| Handle<NumberDictionary> dictionary, |
| int entry, PropertyFilter filter) { |
| DisallowHeapAllocation no_gc; |
| Object* raw_key = dictionary->KeyAt(entry); |
| if (!dictionary->IsKey(isolate, raw_key)) return kMaxUInt32; |
| return FilterKey(dictionary, entry, raw_key, filter); |
| } |
| |
| static void CollectElementIndicesImpl(Handle<JSObject> object, |
| Handle<FixedArrayBase> backing_store, |
| KeyAccumulator* keys) { |
| if (keys->filter() & SKIP_STRINGS) return; |
| Isolate* isolate = keys->isolate(); |
| Handle<NumberDictionary> dictionary = |
| Handle<NumberDictionary>::cast(backing_store); |
| int capacity = dictionary->Capacity(); |
| Handle<FixedArray> elements = isolate->factory()->NewFixedArray( |
| GetMaxNumberOfEntries(*object, *backing_store)); |
| int insertion_index = 0; |
| PropertyFilter filter = keys->filter(); |
| for (int i = 0; i < capacity; i++) { |
| Object* raw_key = dictionary->KeyAt(i); |
| if (!dictionary->IsKey(isolate, raw_key)) continue; |
| uint32_t key = FilterKey(dictionary, i, raw_key, filter); |
| if (key == kMaxUInt32) { |
| keys->AddShadowingKey(raw_key); |
| continue; |
| } |
| elements->set(insertion_index, raw_key); |
| insertion_index++; |
| } |
| SortIndices(elements, insertion_index); |
| for (int i = 0; i < insertion_index; i++) { |
| keys->AddKey(elements->get(i)); |
| } |
| } |
| |
| static Handle<FixedArray> DirectCollectElementIndicesImpl( |
| Isolate* isolate, Handle<JSObject> object, |
| Handle<FixedArrayBase> backing_store, GetKeysConversion convert, |
| PropertyFilter filter, Handle<FixedArray> list, uint32_t* nof_indices, |
| uint32_t insertion_index = 0) { |
| if (filter & SKIP_STRINGS) return list; |
| if (filter & ONLY_ALL_CAN_READ) return list; |
| |
| Handle<NumberDictionary> dictionary = |
| Handle<NumberDictionary>::cast(backing_store); |
| uint32_t capacity = dictionary->Capacity(); |
| for (uint32_t i = 0; i < capacity; i++) { |
| uint32_t key = GetKeyForEntryImpl(isolate, dictionary, i, filter); |
| if (key == kMaxUInt32) continue; |
| Handle<Object> index = isolate->factory()->NewNumberFromUint(key); |
| list->set(insertion_index, *index); |
| insertion_index++; |
| } |
| *nof_indices = insertion_index; |
| return list; |
| } |
| |
| static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver, |
| KeyAccumulator* accumulator, |
| AddKeyConversion convert) { |
| Isolate* isolate = accumulator->isolate(); |
| Handle<NumberDictionary> dictionary( |
| NumberDictionary::cast(receiver->elements()), isolate); |
| int capacity = dictionary->Capacity(); |
| for (int i = 0; i < capacity; i++) { |
| Object* k = dictionary->KeyAt(i); |
| if (!dictionary->IsKey(isolate, k)) continue; |
| Object* value = dictionary->ValueAt(i); |
| DCHECK(!value->IsTheHole(isolate)); |
| DCHECK(!value->IsAccessorPair()); |
| DCHECK(!value->IsAccessorInfo()); |
| accumulator->AddKey(value, convert); |
| } |
| } |
| |
| static bool IncludesValueFastPath(Isolate* isolate, Handle<JSObject> receiver, |
| Handle<Object> value, uint32_t start_from, |
| uint32_t length, Maybe<bool>* result) { |
| DisallowHeapAllocation no_gc; |
| NumberDictionary* dictionary = NumberDictionary::cast(receiver->elements()); |
| int capacity = dictionary->Capacity(); |
| Object* the_hole = isolate->heap()->the_hole_value(); |
| Object* undefined = isolate->heap()->undefined_value(); |
| |
| // Scan for accessor properties. If accessors are present, then elements |
| // must be accessed in order via the slow path. |
| bool found = false; |
| for (int i = 0; i < capacity; ++i) { |
| Object* k = dictionary->KeyAt(i); |
| if (k == the_hole) continue; |
| if (k == undefined) continue; |
| |
| uint32_t index; |
| if (!k->ToArrayIndex(&index) || index < start_from || index >= length) { |
| continue; |
| } |
| |
| if (dictionary->DetailsAt(i).kind() == kAccessor) { |
| // Restart from beginning in slow path, otherwise we may observably |
| // access getters out of order |
| return false; |
| } else if (!found) { |
| Object* element_k = dictionary->ValueAt(i); |
| if (value->SameValueZero(element_k)) found = true; |
| } |
| } |
| |
| *result = Just(found); |
| return true; |
| } |
| |
| static Maybe<bool> IncludesValueImpl(Isolate* isolate, |
| Handle<JSObject> receiver, |
| Handle<Object> value, |
| uint32_t start_from, uint32_t length) { |
| DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver)); |
| bool search_for_hole = value->IsUndefined(isolate); |
| |
| if (!search_for_hole) { |
| Maybe<bool> result = Nothing<bool>(); |
| if (DictionaryElementsAccessor::IncludesValueFastPath( |
| isolate, receiver, value, start_from, length, &result)) { |
| return result; |
| } |
| } |
| ElementsKind original_elements_kind = receiver->GetElementsKind(); |
| USE(original_elements_kind); |
| Handle<NumberDictionary> dictionary( |
| NumberDictionary::cast(receiver->elements()), isolate); |
| // Iterate through entire range, as accessing elements out of order is |
| // observable |
| for (uint32_t k = start_from; k < length; ++k) { |
| DCHECK_EQ(receiver->GetElementsKind(), original_elements_kind); |
| int entry = dictionary->FindEntry(isolate, k); |
| if (entry == NumberDictionary::kNotFound) { |
| if (search_for_hole) return Just(true); |
| continue; |
| } |
| |
| PropertyDetails details = GetDetailsImpl(*dictionary, entry); |
| switch (details.kind()) { |
| case kData: { |
| Object* element_k = dictionary->ValueAt(entry); |
| if (value->SameValueZero(element_k)) return Just(true); |
| break; |
| } |
| case kAccessor: { |
| LookupIterator it(isolate, receiver, k, |
| LookupIterator::OWN_SKIP_INTERCEPTOR); |
| DCHECK(it.IsFound()); |
| DCHECK_EQ(it.state(), LookupIterator::ACCESSOR); |
| Handle<Object> element_k; |
| |
| ASSIGN_RETURN_ON_EXCEPTION_VALUE( |
| isolate, element_k, JSObject::GetPropertyWithAccessor(&it), |
| Nothing<bool>()); |
| |
| if (value->SameValueZero(*element_k)) return Just(true); |
| |
| // Bailout to slow path if elements on prototype changed |
| if (!JSObject::PrototypeHasNoElements(isolate, *receiver)) { |
| return IncludesValueSlowPath(isolate, receiver, value, k + 1, |
| length); |
| } |
| |
| // Continue if elements unchanged |
| if (*dictionary == receiver->elements()) continue; |
| |
| // Otherwise, bailout or update elements |
| |
| // If switched to initial elements, return true if searching for |
| // undefined, and false otherwise. |
| if (receiver->map()->GetInitialElements() == receiver->elements()) { |
| return Just(search_for_hole); |
| } |
| |
| // If switched to fast elements, continue with the correct accessor. |
| if (receiver->GetElementsKind() != DICTIONARY_ELEMENTS) { |
| ElementsAccessor* accessor = receiver->GetElementsAccessor(); |
| return accessor->IncludesValue(isolate, receiver, value, k + 1, |
| length); |
| } |
| dictionary = |
| handle(NumberDictionary::cast(receiver->elements()), isolate); |
| break; |
| } |
| } |
| } |
| return Just(false); |
| } |
| |
| static Maybe<int64_t> IndexOfValueImpl(Isolate* isolate, |
| Handle<JSObject> receiver, |
| Handle<Object> value, |
| uint32_t start_from, uint32_t length) { |
| DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver)); |
| |
| ElementsKind original_elements_kind = receiver->GetElementsKind(); |
| USE(original_elements_kind); |
| Handle<NumberDictionary> dictionary( |
| NumberDictionary::cast(receiver->elements()), isolate); |
| // Iterate through entire range, as accessing elements out of order is |
| // observable. |
| for (uint32_t k = start_from; k < length; ++k) { |
| DCHECK_EQ(receiver->GetElementsKind(), original_elements_kind); |
| int entry = dictionary->FindEntry(isolate, k); |
| if (entry == NumberDictionary::kNotFound) continue; |
| |
| PropertyDetails details = GetDetailsImpl(*dictionary, entry); |
| switch (details.kind()) { |
| case kData: { |
| Object* element_k = dictionary->ValueAt(entry); |
| if (value->StrictEquals(element_k)) { |
| return Just<int64_t>(k); |
| } |
| break; |
| } |
| case kAccessor: { |
| LookupIterator it(isolate, receiver, k, |
| LookupIterator::OWN_SKIP_INTERCEPTOR); |
| DCHECK(it.IsFound()); |
| DCHECK_EQ(it.state(), LookupIterator::ACCESSOR); |
| Handle<Object> element_k; |
| |
| ASSIGN_RETURN_ON_EXCEPTION_VALUE( |
| isolate, element_k, JSObject::GetPropertyWithAccessor(&it), |
| Nothing<int64_t>()); |
| |
| if (value->StrictEquals(*element_k)) return Just<int64_t>(k); |
| |
| // Bailout to slow path if elements on prototype changed. |
| if (!JSObject::PrototypeHasNoElements(isolate, *receiver)) { |
| return IndexOfValueSlowPath(isolate, receiver, value, k + 1, |
| length); |
| } |
| |
| // Continue if elements unchanged. |
| if (*dictionary == receiver->elements()) continue; |
| |
| // Otherwise, bailout or update elements. |
| if (receiver->GetElementsKind() != DICTIONARY_ELEMENTS) { |
| // Otherwise, switch to slow path. |
| return IndexOfValueSlowPath(isolate, receiver, value, k + 1, |
| length); |
| } |
| dictionary = |
| handle(NumberDictionary::cast(receiver->elements()), isolate); |
| break; |
| } |
| } |
| } |
| return Just<int64_t>(-1); |
| } |
| |
| static void ValidateContents(JSObject* holder, int length) { |
| DisallowHeapAllocation no_gc; |
| #if DEBUG |
| DCHECK_EQ(holder->map()->elements_kind(), DICTIONARY_ELEMENTS); |
| if (!FLAG_enable_slow_asserts) return; |
| Isolate* isolate = holder->GetIsolate(); |
| NumberDictionary* dictionary = NumberDictionary::cast(holder->elements()); |
| // Validate the requires_slow_elements and max_number_key values. |
| int capacity = dictionary->Capacity(); |
| bool requires_slow_elements = false; |
| int max_key = 0; |
| for (int i = 0; i < capacity; ++i) { |
| Object* k; |
| if (!dictionary->ToKey(isolate, i, &k)) continue; |
| DCHECK_LE(0.0, k->Number()); |
| if (k->Number() > NumberDictionary::kRequiresSlowElementsLimit) { |
| requires_slow_elements = true; |
| } else { |
| max_key = Max(max_key, Smi::ToInt(k)); |
| } |
| } |
| if (requires_slow_elements) { |
| DCHECK(dictionary->requires_slow_elements()); |
| } else if (!dictionary->requires_slow_elements()) { |
| DCHECK_LE(max_key, dictionary->max_number_key()); |
| } |
| #endif |
| } |
| }; |
| |
| |
| // Super class for all fast element arrays. |
| template <typename Subclass, typename KindTraits> |
| class FastElementsAccessor : public ElementsAccessorBase<Subclass, KindTraits> { |
| public: |
| explicit FastElementsAccessor(const char* name) |
| : ElementsAccessorBase<Subclass, KindTraits>(name) {} |
| |
| typedef typename KindTraits::BackingStore BackingStore; |
| |
| static Handle<NumberDictionary> NormalizeImpl(Handle<JSObject> object, |
| Handle<FixedArrayBase> store) { |
| Isolate* isolate = store->GetIsolate(); |
| ElementsKind kind = Subclass::kind(); |
| |
| // Ensure that notifications fire if the array or object prototypes are |
| // normalizing. |
| if (IsSmiOrObjectElementsKind(kind) || |
| kind == FAST_STRING_WRAPPER_ELEMENTS) { |
| isolate->UpdateNoElementsProtectorOnNormalizeElements(object); |
| } |
| |
| int capacity = object->GetFastElementsUsage(); |
| Handle<NumberDictionary> dictionary = |
| NumberDictionary::New(isolate, capacity); |
| |
| PropertyDetails details = PropertyDetails::Empty(); |
| int j = 0; |
| int max_number_key = -1; |
| for (int i = 0; j < capacity; i++) { |
| if (IsHoleyOrDictionaryElementsKind(kind)) { |
| if (BackingStore::cast(*store)->is_the_hole(isolate, i)) continue; |
| } |
| max_number_key = i; |
| Handle<Object> value = Subclass::GetImpl(isolate, *store, i); |
| dictionary = NumberDictionary::Add(dictionary, i, value, details); |
| j++; |
| } |
| |
| if (max_number_key > 0) { |
| dictionary->UpdateMaxNumberKey(static_cast<uint32_t>(max_number_key), |
| object); |
| } |
| return dictionary; |
| } |
| |
| static void DeleteAtEnd(Handle<JSObject> obj, |
| Handle<BackingStore> backing_store, uint32_t entry) { |
| uint32_t length = static_cast<uint32_t>(backing_store->length()); |
| Isolate* isolate = obj->GetIsolate(); |
| for (; entry > 0; entry--) { |
| if (!backing_store->is_the_hole(isolate, entry - 1)) break; |
| } |
| if (entry == 0) { |
| FixedArray* empty = isolate->heap()->empty_fixed_array(); |
| // Dynamically ask for the elements kind here since we manually redirect |
| // the operations for argument backing stores. |
| if (obj->GetElementsKind() == FAST_SLOPPY_ARGUMENTS_ELEMENTS) { |
| SloppyArgumentsElements::cast(obj->elements())->set_arguments(empty); |
| } else { |
| obj->set_elements(empty); |
| } |
| return; |
| } |
| |
| isolate->heap()->RightTrimFixedArray(*backing_store, length - entry); |
| } |
| |
| static void DeleteCommon(Handle<JSObject> obj, uint32_t entry, |
| Handle<FixedArrayBase> store) { |
| DCHECK(obj->HasSmiOrObjectElements() || obj->HasDoubleElements() || |
| obj->HasFastArgumentsElements() || |
| obj->HasFastStringWrapperElements()); |
| Handle<BackingStore> backing_store = Handle<BackingStore>::cast(store); |
| if (!obj->IsJSArray() && |
| entry == static_cast<uint32_t>(store->length()) - 1) { |
| DeleteAtEnd(obj, backing_store, entry); |
| return; |
| } |
| |
| Isolate* isolate = obj->GetIsolate(); |
| backing_store->set_the_hole(isolate, entry); |
| |
| // TODO(verwaest): Move this out of elements.cc. |
| // If an old space backing store is larger than a certain size and |
| // has too few used values, normalize it. |
| const int kMinLengthForSparsenessCheck = 64; |
| if (backing_store->length() < kMinLengthForSparsenessCheck) return; |
| if (backing_store->GetHeap()->InNewSpace(*backing_store)) return; |
| uint32_t length = 0; |
| if (obj->IsJSArray()) { |
| JSArray::cast(*obj)->length()->ToArrayLength(&length); |
| } else { |
| length = static_cast<uint32_t>(store->length()); |
| } |
| |
| // To avoid doing the check on every delete, use a counter-based heuristic. |
| const int kLengthFraction = 16; |
| // The above constant must be large enough to ensure that we check for |
| // normalization frequently enough. At a minimum, it should be large |
| // enough to reliably hit the "window" of remaining elements count where |
| // normalization would be beneficial. |
| STATIC_ASSERT(kLengthFraction >= |
| NumberDictionary::kEntrySize * |
| NumberDictionary::kPreferFastElementsSizeFactor); |
| size_t current_counter = isolate->elements_deletion_counter(); |
| if (current_counter < length / kLengthFraction) { |
| isolate->set_elements_deletion_counter(current_counter + 1); |
| return; |
| } |
| // Reset the counter whenever the full check is performed. |
| isolate->set_elements_deletion_counter(0); |
| |
| if (!obj->IsJSArray()) { |
| uint32_t i; |
| for (i = entry + 1; i < length; i++) { |
| if (!backing_store->is_the_hole(isolate, i)) break; |
| } |
| if (i == length) { |
| DeleteAtEnd(obj, backing_store, entry); |
| return; |
| } |
| } |
| int num_used = 0; |
| for (int i = 0; i < backing_store->length(); ++i) { |
| if (!backing_store->is_the_hole(isolate, i)) { |
| ++num_used; |
| // Bail out if a number dictionary wouldn't be able to save much space. |
| if (NumberDictionary::kPreferFastElementsSizeFactor * |
| NumberDictionary::ComputeCapacity(num_used) * |
| NumberDictionary::kEntrySize > |
| static_cast<uint32_t>(backing_store->length())) { |
| return; |
| } |
| } |
| } |
| JSObject::NormalizeElements(obj); |
| } |
| |
| static void ReconfigureImpl(Handle<JSObject> object, |
| Handle<FixedArrayBase> store, uint32_t entry, |
| Handle<Object> value, |
| PropertyAttributes attributes) { |
| Handle<NumberDictionary> dictionary = JSObject::NormalizeElements(object); |
| entry = dictionary->FindEntry(entry); |
| DictionaryElementsAccessor::ReconfigureImpl(object, dictionary, entry, |
| value, attributes); |
| } |
| |
| static void AddImpl(Handle<JSObject> object, uint32_t index, |
| Handle<Object> value, PropertyAttributes attributes, |
| uint32_t new_capacity) { |
| DCHECK_EQ(NONE, attributes); |
| ElementsKind from_kind = object->GetElementsKind(); |
| ElementsKind to_kind = Subclass::kind(); |
| if (IsDictionaryElementsKind(from_kind) || |
| IsDoubleElementsKind(from_kind) != IsDoubleElementsKind(to_kind) || |
| Subclass::GetCapacityImpl(*object, object->elements()) != |
| new_capacity) { |
| Subclass::GrowCapacityAndConvertImpl(object, new_capacity); |
| } else { |
| if (IsFastElementsKind(from_kind) && from_kind != to_kind) { |
| JSObject::TransitionElementsKind(object, to_kind); |
| } |
| if (IsSmiOrObjectElementsKind(from_kind)) { |
| DCHECK(IsSmiOrObjectElementsKind(to_kind)); |
| JSObject::EnsureWritableFastElements(object); |
| } |
| } |
| Subclass::SetImpl(object, index, *value); |
| } |
| |
| static void DeleteImpl(Handle<JSObject> obj, uint32_t entry) { |
| ElementsKind kind = KindTraits::Kind; |
| if (IsFastPackedElementsKind(kind)) { |
| JSObject::TransitionElementsKind(obj, GetHoleyElementsKind(kind)); |
| } |
| if (IsSmiOrObjectElementsKind(KindTraits::Kind)) { |
| JSObject::EnsureWritableFastElements(obj); |
| } |
| DeleteCommon(obj, entry, handle(obj->elements())); |
| } |
| |
| static bool HasEntryImpl(Isolate* isolate, FixedArrayBase* backing_store, |
| uint32_t entry) { |
| return !BackingStore::cast(backing_store)->is_the_hole(isolate, entry); |
| } |
| |
| static uint32_t NumberOfElementsImpl(JSObject* receiver, |
| FixedArrayBase* backing_store) { |
| uint32_t max_index = Subclass::GetMaxIndex(receiver, backing_store); |
| if (IsFastPackedElementsKind(Subclass::kind())) return max_index; |
| Isolate* isolate = receiver->GetIsolate(); |
| uint32_t count = 0; |
| for (uint32_t i = 0; i < max_index; i++) { |
| if (Subclass::HasEntryImpl(isolate, backing_store, i)) count++; |
| } |
| return count; |
| } |
| |
| static void AddElementsToKeyAccumulatorImpl(Handle<JSObject> receiver, |
| KeyAccumulator* accumulator, |
| AddKeyConversion convert) { |
| Isolate* isolate = accumulator->isolate(); |
| Handle<FixedArrayBase> elements(receiver->elements(), isolate); |
| uint32_t length = Subclass::GetMaxNumberOfEntries(*receiver, *elements); |
| for (uint32_t i = 0; i < length; i++) { |
| if (IsFastPackedElementsKind(KindTraits::Kind) || |
| HasEntryImpl(isolate, *elements, i)) { |
| accumulator->AddKey(Subclass::GetImpl(isolate, *elements, i), convert); |
| } |
| } |
| } |
| |
| static void ValidateContents(JSObject* holder, int length) { |
| #if DEBUG |
| Isolate* isolate = holder->GetIsolate(); |
| Heap* heap = isolate->heap(); |
| FixedArrayBase* elements = holder->elements(); |
| Map* map = elements->map(); |
| if (IsSmiOrObjectElementsKind(KindTraits::Kind)) { |
| DCHECK_NE(map, heap->fixed_double_array_map()); |
| } else if (IsDoubleElementsKind(KindTraits::Kind)) { |
| DCHECK_NE(map, heap->fixed_cow_array_map()); |
| if (map == heap->fixed_array_map()) DCHECK_EQ(0, length); |
| } else { |
| UNREACHABLE(); |
| } |
| if (length == 0) return; // nothing to do! |
| #if ENABLE_SLOW_DCHECKS |
| DisallowHeapAllocation no_gc; |
| BackingStore* backing_store = BackingStore::cast(elements); |
| if (IsSmiElementsKind(KindTraits::Kind)) { |
| HandleScope scope(isolate); |
| for (int i = 0; i < length; i++) { |
| DCHECK(BackingStore::get(backing_store, i, isolate)->IsSmi() || |
| (IsHoleyElementsKind(KindTraits::Kind) && |
| backing_store->is_the_hole(isolate, i))); |
| } |
| } else if (KindTraits::Kind == PACKED_ELEMENTS || |
| KindTraits::Kind == PACKED_DOUBLE_ELEMENTS) { |
| for (int i = 0; i < length; i++) { |
| DCHECK(!backing_store->is_the_hole(isolate, i)); |
| } |
| } else { |
| DCHECK(IsHoleyElementsKind(KindTraits::Kind)); |
| } |
| #endif |
| #endif |
| } |
| |
| static Handle<Object> PopImpl(Handle<JSArray> receiver) { |
| return Subclass::RemoveElement(receiver, AT_END); |
| } |
| |
| static Handle<Object> ShiftImpl(Handle<JSArray> receiver) { |
| return Subclass::RemoveElement(receiver, AT_START); |
| } |
| |
| static uint32_t PushImpl(Handle<JSArray> receiver, |
| Arguments* args, uint32_t push_size) { |
| Handle<FixedArrayBase> backing_store(receiver->elements()); |
| return Subclass::AddArguments(receiver, backing_store, args, push_size, |
| AT_END); |
| } |
| |
| static uint32_t UnshiftImpl(Handle<JSArray> receiver, |
| Arguments* args, uint32_t unshift_size) { |
| Handle<FixedArrayBase> backing_store(receiver->elements()); |
| return Subclass::AddArguments(receiver, backing_store, args, unshift_size, |
| AT_START); |
| } |
| |
| static Handle<JSObject> SliceImpl(Handle<JSObject> receiver, uint32_t start, |
| uint32_t end) { |
| Isolate* isolate = receiver->GetIsolate(); |
| Handle<FixedArrayBase> backing_store(receiver->elements(), isolate); |
| int result_len = end < start ? 0u : end - start; |
| Handle<JSArray> result_array = isolate->factory()->NewJSArray( |
| KindTraits::Kind, result_len, result_len); |
| DisallowHeapAllocation no_gc; |
| Subclass::CopyElementsImpl(*backing_store, start, result_array->elements(), |
| KindTraits::Kind, 0, kPackedSizeNotKnown, |
| result_len); |
| Subclass::TryTransitionResultArrayToPacked(result_array); |
| return result_array; |
| } |
| |
| static Handle<JSArray> SpliceImpl(Handle<JSArray> receiver, |
| uint32_t start, uint32_t delete_count, |
| Arguments* args, uint32_t add_count) { |
| Isolate* isolate = receiver->GetIsolate(); |
| Heap* heap = isolate->heap(); |
| uint32_t length = Smi::ToInt(receiver->length()); |
| uint32_t new_length = length - delete_count + add_count; |
| |
| ElementsKind kind = KindTraits::Kind; |
| if (new_length <= static_cast<uint32_t>(receiver->elements()->length()) && |
| IsSmiOrObjectElementsKind(kind)) { |
| HandleScope scope(isolate); |
| JSObject::EnsureWritableFastElements(receiver); |
| } |
| |
| Handle<FixedArrayBase> backing_store(receiver->elements(), isolate); |
| |
| if (new_length == 0) { |
| receiver->set_elements(heap->empty_fixed_array()); |
| receiver->set_length(Smi::kZero); |
| return isolate->factory()->NewJSArrayWithElements( |
| backing_store, KindTraits::Kind, delete_count); |
| } |
| |
| // Construct the result array which holds the deleted elements. |
| Handle<JSArray> deleted_elements = isolate->factory()->NewJSArray( |
| KindTraits::Kind, delete_count, delete_count); |
| if (delete_count > 0) { |
| DisallowHeapAllocation no_gc; |
| Subclass::CopyElementsImpl(*backing_store, start, |
| deleted_elements->elements(), KindTraits::Kind, |
| 0, kPackedSizeNotKnown, delete_count); |
| } |
| |
| // Delete and move elements to make space for add_count new elements. |
| if (add_count < delete_count) { |
| Subclass::SpliceShrinkStep(isolate, receiver, backing_store, start, |
| delete_count, add_count, length, new_length); |
| } else if (add_count > delete_count) { |
| backing_store = |
| Subclass::SpliceGrowStep(isolate, receiver, backing_store, start, |
| delete_count, add_count, length, new_length); |
| } |
| |
| // Copy over the arguments. |
| Subclass::CopyArguments(args, backing_store, add_count, 3, start); |
| |
| receiver->set_length(Smi::FromInt(new_length)); |
| Subclass::TryTransitionResultArrayToPacked(deleted_elements); |
| return deleted_elements; |
| } |
| |
| static Maybe<bool> CollectValuesOrEntriesImpl( |
| Isolate* isolate, Handle<JSObject> object, |
| Handle<FixedArray> values_or_entries, bool get_entries, int* nof_items, |
| PropertyFilter filter) { |
| Handle<BackingStore> elements(BackingStore::cast(object->elements()), |
| isolate); |
| int count = 0; |
| uint32_t length = elements->length(); |
| for (uint32_t index = 0; index < length; ++index) { |
| if (!HasEntryImpl(isolate, *elements, index)) continue; |
| Handle<Object> value = Subclass::GetImpl(isolate, *elements, index); |
| if (get_entries) { |
| value = MakeEntryPair(isolate, index, value); |
| } |
| values_or_entries->set(count++, *value); |
| } |
| *nof_items = count; |
| return Just(true); |
| } |
| |
| static void MoveElements(Isolate* isolate, Handle<JSArray> receiver, |
| Handle<FixedArrayBase> backing_store, int dst_index, |
| int src_index, int len, int hole_start, |
| int hole_end) { |
| Heap* heap = isolate->heap(); |
| Handle<BackingStore> dst_elms = Handle<BackingStore>::cast(backing_store); |
| if (len > JSArray::kMaxCopyElements && dst_index == 0 && |
| heap->CanMoveObjectStart(*dst_elms)) { |
| // Update all the copies of this backing_store handle. |
| *dst_elms.location() = |
| BackingStore::cast(heap->LeftTrimFixedArray(*dst_elms, src_index)); |
| receiver->set_elements(*dst_elms); |
| // Adjust the hole offset as the array has been shrunk. |
| hole_end -= src_index; |
| DCHECK_LE(hole_start, backing_store->length()); |
| DCHECK_LE(hole_end, backing_store->length()); |
| } else if (len != 0) { |
| if (IsDoubleElementsKind(KindTraits::Kind)) { |
| MemMove(dst_elms->data_start() + dst_index, |
| dst_elms->data_start() + src_index, len * kDoubleSize); |
| } else { |
| DisallowHeapAllocation no_gc; |
| heap->MoveElements(FixedArray::cast(*dst_elms), dst_index, src_index, |
| len); |
| } |
| } |
| if (hole_start != hole_end) { |
| dst_elms->FillWithHoles(hole_start, hole_end); |
| } |
| } |
| |
| static Maybe<bool> IncludesValueImpl(Isolate* isolate, |
| Handle<JSObject> receiver, |
| Handle<Object> search_value, |
| uint32_t start_from, uint32_t length) { |
| DCHECK(JSObject::PrototypeHasNoElements(isolate, *receiver)); |
| DisallowHeapAllocation no_gc; |
| FixedArrayBase* elements_base = receiver->elements(); |
| Object* the_hole = isolate->heap()->the_hole_value(); |
| Object* undefined = isolate->heap()->undefined_value(); |
| Object* value = *search_value; |
| |
| // Elements beyond the capacity of the backing store treated as undefined. |
| if (value == undefined && |
| static_cast<uint32_t>(elements_base->length()) < length) { |
| return Just(true); |
| } |
| |
| if (start_from >= length) return Just(false); |
| |
| length = std::min(static_cast<uint32_t>(elements_base->length()), length); |
| |
| if (!value->IsNumber()) { |
| if (value == undefined) { |
| // Only PACKED_ELEMENTS, HOLEY_ELEMENTS, HOLEY_SMI_ELEMENTS, and |
| // HOLEY_DOUBLE_ELEMENTS can have `undefined` as a value. |
| if (!IsObjectElementsKind(Subclass::kind()) && |
| !IsHoleyElementsKind(Subclass::kind())) { |
| return Just(false); |
| } |
| |
| // Search for `undefined` or The Hole in PACKED_ELEMENTS, |
| // HOLEY_ELEMENTS or HOLEY_SMI_ELEMENTS |
| if (IsSmiOrObjectElementsKind(Subclass::kind())) { |
| auto elements = FixedArray::cast(receiver->elements()); |
| |
| for (uint32_t k = start_from; k < length; ++k) { |
| Object* element_k = elements->get(k); |
| |
| if (IsHoleyElementsKind(Subclass::kind()) && |
| element_k == the_hole) { |
| return Just(true); |
| } |
| if (IsObjectElementsKind(Subclass::kind()) && |
| element_k == undefined) { |
| return Just(true); |
| } |
| } |
| return Just(false); |
| } else { |
| // Search for The Hole in HOLEY_DOUBLE_ELEMENTS |
| DCHECK_EQ(Subclass::kind(), HOLEY_DOUBLE_ELEMENTS); |
| auto elements = FixedDoubleArray::cast(receiver->elements()); |
| |
| for (uint32_t k = start_from; k < length; ++k) { |
| if (IsHoleyElementsKind(Subclass::kind()) && |
| elements->is_the_hole(k)) { |
| return Just(true); |
| } |
| } |
| return Just(false); |
| } |
| } else if (!IsObjectElementsKind(Subclass::kind())) { |
| // Search for non-number, non-Undefined value, with either |
| // PACKED_SMI_ELEMENTS, PACKED_DOUBLE_ELEMENTS, HOLEY_SMI_ELEMENTS or |
| // HOLEY_DOUBLE_ELEMENTS. Guaranteed to return false, since these |
| // elements kinds can only contain Number values or undefined. |
| return Just(false); |
| } else { |
| // Search for non-number, non-Undefined value with either |
| // PACKED_ELEMENTS or HOLEY_ELEMENTS. |
| DCHECK(IsObjectElementsKind(Subclass::kind())); |
| auto elements = FixedArray::cast(receiver->elements()); |
| |
| for (uint32_t k = start_from; k < length; ++k) { |
| Object* element_k = elements->get(k); |
| if (IsHoleyElementsKind(Subclass::kind()) && element_k == the_hole) { |
| continue; |
| } |
| |
| if (value->SameValueZero(element_k)) return Just(true); |
| } |
| return Just(false); |
| } |
| } else { |
| if (!value->IsNaN()) { |
| double search_value = value->Number(); |
| if (IsDoubleElementsKind(Subclass::kind())) { |
| // Search for non-NaN Number in PACKED_DOUBLE_ELEMENTS or |
| // HOLEY_DOUBLE_ELEMENTS --- Skip TheHole, and trust UCOMISD or |
| // similar operation for result. |
| auto elements = FixedDoubleArray::cast(receiver->elements()); |
| |
| for (uint32_t k = start_from; k < length; ++k) { |
| if (IsHoleyElementsKind(Subclass::kind()) && |
| elements->is_the_hole(k)) { |
| continue; |
| } |
| if (elements->get_scalar(k) == search_value) return Just(true); |
| } |
| return Just(false); |
| } else { |
| // Search for non-NaN Number in PACKED_ELEMENTS, HOLEY_ELEMENTS, |
| // PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS --- Skip non-Numbers, |
| // and trust UCOMISD or similar operation for result |
| auto elements = FixedArray::cast(receiver->elements()); |
| |
| for (uint32_t k = start_from; k < length; ++k) { |
| Object* element_k = elements->get(k); |
| if (element_k->IsNumber() && element_k->Number() == search_value) { |
| return Just(true); |
| } |
| } |
| return Just(false); |
| } |
| } else { |
| // Search for NaN --- NaN cannot be represented with Smi elements, so |
| // abort if ElementsKind is PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS |
| if (IsSmiElementsKind(Subclass::kind())) return Just(false); |
| |
| if (IsDoubleElementsKind(Subclass::kind())) { |
| // Search for NaN in PACKED_DOUBLE_ELEMENTS or |
| // HOLEY_DOUBLE_ELEMENTS --- Skip The Hole and trust |
| // std::isnan(elementK) for result |
| auto elements = FixedDoubleArray::cast(receiver->elements()); |
| |
| for (uint32_t k = start_from; k < length; ++k) { |
| if (IsHoleyElementsKind(Subclass::kind()) && |
| elements->is_the_hole(k)) { |
| continue; |
| } |
| if (std::isnan(elements->get_scalar(k))) return Just(true); |
| } |
| return Just(false); |
| } else { |
| // Search for NaN in PACKED_ELEMENTS, HOLEY_ELEMENTS, |
| // PACKED_SMI_ELEMENTS or HOLEY_SMI_ELEMENTS. Return true if |
| // elementK->IsHeapNumber() && std::isnan(elementK->Number()) |
| DCHECK(IsSmiOrObjectElementsKind(Subclass::kind())); |
| auto elements = FixedArray::cast(receiver->elements()); |
| |
| for (uint32_t k = start_from; k < length; ++k) { |
| if (elements->get(k)->IsNaN()) return Just(true); |
| } |
| return Just(false); |
| } |
| } |
| } |
| } |
| |
| static Handle<FixedArray> CreateListFromArrayLikeImpl(Isolate* isolate, |
| Handle<JSObject> object, |
| uint32_t length) { |
| Handle<FixedArray> result = isolate->factory()->NewFixedArray(length); |
| Handle<FixedArrayBase> elements(object->elements(), isolate); |
| for (uint32_t i = 0; i < length; i++) { |
| if (!Subclass::HasElementImpl(isolate, *object, i, *elements)) continue; |
| Handle<Object> value; |
| value = Subclass::GetImpl(isolate, *elements, i); |
| if (value->IsName()) { |
| value = isolate->factory()->InternalizeName(Handle<Name>::cast(value)); |
| } |
| result->set(i, *value); |
| } |
| return result; |
| } |
| |
| private: |
| // SpliceShrinkStep might modify the backing_store. |
| static void SpliceShrinkStep(Isolate* isolate, Handle<JSArray> receiver, |
| Handle<FixedArrayBase> backing_store, |
| uint32_t start, uint32_t delete_count, |
| uint32_t add_count, uint32_t len, |
| uint32_t new_length) { |
| const int move_left_count = len - delete_count - start; |
| const int move_left_dst_index = start + add_count; |
| Subclass::MoveElements(isolate, receiver, backing_store, |
| move_left_dst_index, start + delete_count, |
| move_left_count, new_length, len); |
| } |
| |
| // SpliceGrowStep might modify the backing_store. |
| static Handle<FixedArrayBase> SpliceGrowStep( |
| Isolate* isolate, Handle<JSArray> receiver, |
| Handle<FixedArrayBase> backing_store, uint32_t start, |
| uint32_t delete_count, uint32_t add_count, uint32_t length, |
| uint32_t new_length) { |
| // Check we do not overflow the new_length. |
| DCHECK((add_count - delete_count) <= (Smi::kMaxValue - length)); |
| // Check if backing_store is big enough. |
| if (new_length <= static_cast<uint32_t>(backing_store->length())) { |
| Subclass::MoveElements(isolate, receiver, backing_store, |
| start + add_count, start + delete_count, |
| (length - delete_count - start), 0, 0); |
| // MoveElements updates the backing_store in-place. |
| return backing_store; |
| } |
| // New backing storage is needed. |
| int capacity = JSObject::NewElementsCapacity(new_length); |
| // Partially copy all elements up to start. |
| Handle<FixedArrayBase> new_elms = Subclass::ConvertElementsWithCapacity( |
| receiver, backing_store, KindTraits::Kind, capacity, start); |
| // Copy the trailing elements after start + delete_count |
| Subclass::CopyElementsImpl(*backing_store, start + delete_count, *new_elms, |
| KindTraits::Kind, start + add_count, |
| kPackedSizeNotKnown, |
| ElementsAccessor::kCopyToEndAndInitializeToHole); |
| receiver->set_elements(*new_elms); |
| return new_elms; |
| } |
| |
| static Handle<Object> RemoveElement(Handle<JSArray> receiver, |
| Where remove_position) { |
| Isolate* isolate = receiver->GetIsolate(); |
| ElementsKind kind = KindTraits::Kind; |
| if (IsSmiOrObjectElementsKind(kind)) { |
| HandleScope scope(isolate); |
| JSObject::EnsureWritableFastElements(receiver); |
| } |
| Handle<FixedArrayBase> backing_store(receiver->elements(), isolate); |
| uint32_t length = static_cast<uint32_t>(Smi::ToInt(receiver->length())); |
| DCHECK_GT(length, 0); |
| int new_length = length - 1; |
| int remove_index = remove_position == AT_START ? 0 : new_length; |
| Handle<Object> result = |
| Subclass::GetImpl(isolate, *backing_store, remove_index); |
| if (remove_position == AT_START) { |
| Subclass::MoveElements(isolate, receiver, backing_store, 0, 1, new_length, |
| 0, 0); |
| } |
| Subclass::SetLengthImpl(isolate, receiver, new_length, backing_store); |
| |
| if (IsHoleyOrDictionaryElementsKind(kind) && result->IsTheHole(isolate)) { |
| return isolate->factory()->undefined_value(); |
| } |
| return result; |
| } |
| |
| static uint32_t AddArguments(Handle<JSArray> receiver, |
| Handle<FixedArrayBase> backing_store, |
| Arguments* args, uint32_t add_size, |
| Where add_position) { |
| uint32_t length = Smi::ToInt(receiver->length()); |
| DCHECK_LT(0, add_size); |
| uint32_t elms_len = backing_store->length(); |
| // Check we do not overflow the new_length. |
| DCHECK(add_size <= static_cast<uint32_t>(Smi::kMaxValue - length)); |
| uint32_t new_length = length + add_size; |
| |
| if (new_length > elms_len) { |
| // New backing storage is needed. |
| uint32_t capacity = JSObject::NewElementsCapacity(new_length); |
| // If we add arguments to the start we have to shift the existing objects. |
| int copy_dst_index = add_position == AT_START ? add_size : 0; |
| // Copy over all objects to a new backing_store. |
|